]> Pileus Git - ~andy/linux/commitdiff
Merge tag 'v3.10' into drm-intel-fixes
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 18 Jul 2013 10:03:29 +0000 (12:03 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 18 Jul 2013 10:03:29 +0000 (12:03 +0200)
Backmerge Linux 3.10 to get at

commit 19b2dbde5732170a03bd82cc8bd442cf88d856f7
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Jun 12 10:15:12 2013 +0100

    drm/i915: Restore fences after resume and GPU resets

That commit is not in my current -fixes pile since that's based on my
-next queue for 3.11. And the above mentioned fix was merged really
late into 3.10 (and blew up, bad me) so was on a diverging branch.

Option B would have been to rebase my current pile of fixes onto
Dave's drm-fixes branch. But since some of the patches here are a bit
tricky I've decided not to void all the testing by moving over the
entire merge window.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
1  2 
MAINTAINERS
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_suspend.c

diff --combined MAINTAINERS
index 437dd12ab9cf990c2d75de576aae3eaf3c95d9ee,ad7e322ad17b8bb985c73617407792fdc92eb33d..79daefa2c76e25d42922adb4fe9129cdc5ebf87f
@@@ -2697,14 -2697,12 +2697,14 @@@ F:   include/drm/exynos
  F:    include/uapi/drm/exynos*
  
  DRM DRIVERS FOR NVIDIA TEGRA
 -M:    Thierry Reding <thierry.reding@avionic-design.de>
 +M:    Thierry Reding <thierry.reding@gmail.com>
 +M:    Terje Bergström <tbergstrom@nvidia.com>
  L:    dri-devel@lists.freedesktop.org
  L:    linux-tegra@vger.kernel.org
 -T:    git git://gitorious.org/thierryreding/linux.git
 +T:    git git://anongit.freedesktop.org/tegra/linux.git
  S:    Maintained
 -F:    drivers/gpu/drm/tegra/
 +F:    drivers/gpu/host1x/
 +F:    include/uapi/drm/tegra_drm.h
  F:    Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
  
  DSBR100 USB FM RADIO DRIVER
@@@ -3222,7 -3220,7 +3222,7 @@@ F:      lib/fault-inject.
  
  FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
  M:    Robert Love <robert.w.love@intel.com>
- L:    devel@open-fcoe.org
+ L:    fcoe-devel@open-fcoe.org
  W:    www.Open-FCoE.org
  S:    Supported
  F:    drivers/scsi/libfc/
index cc1d6056ab70238eb059d899be50efde6c4d05f6,9669a0b8b440384394f1d30cc2890add8525a8f7..a416645bcd23364bb9793100da286b7f74858794
@@@ -76,8 -76,6 +76,8 @@@ enum plane 
  };
  #define plane_name(p) ((p) + 'A')
  
 +#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
 +
  enum port {
        PORT_A = 0,
        PORT_B,
  };
  #define port_name(p) ((p) + 'A')
  
 +enum intel_display_power_domain {
 +      POWER_DOMAIN_PIPE_A,
 +      POWER_DOMAIN_PIPE_B,
 +      POWER_DOMAIN_PIPE_C,
 +      POWER_DOMAIN_PIPE_A_PANEL_FITTER,
 +      POWER_DOMAIN_PIPE_B_PANEL_FITTER,
 +      POWER_DOMAIN_PIPE_C_PANEL_FITTER,
 +      POWER_DOMAIN_TRANSCODER_A,
 +      POWER_DOMAIN_TRANSCODER_B,
 +      POWER_DOMAIN_TRANSCODER_C,
 +      POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
 +};
 +
 +#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 +#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
 +              ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
 +#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
 +
  enum hpd_pin {
        HPD_NONE = 0,
        HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
  
 -struct intel_pch_pll {
 +struct drm_i915_private;
 +
 +enum intel_dpll_id {
 +      DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
 +      /* real shared dpll ids must be >= 0 */
 +      DPLL_ID_PCH_PLL_A,
 +      DPLL_ID_PCH_PLL_B,
 +};
 +#define I915_NUM_PLLS 2
 +
 +struct intel_dpll_hw_state {
 +      uint32_t dpll;
 +      uint32_t fp0;
 +      uint32_t fp1;
 +};
 +
 +struct intel_shared_dpll {
        int refcount; /* count of number of CRTCs sharing this PLL */
        int active; /* count of number of active CRTCs (i.e. DPMS on) */
        bool on; /* is the PLL actually active? Disabled during modeset */
 -      int pll_reg;
 -      int fp0_reg;
 -      int fp1_reg;
 +      const char *name;
 +      /* should match the index in the dev_priv->shared_dplls array */
 +      enum intel_dpll_id id;
 +      struct intel_dpll_hw_state hw_state;
 +      void (*enable)(struct drm_i915_private *dev_priv,
 +                     struct intel_shared_dpll *pll);
 +      void (*disable)(struct drm_i915_private *dev_priv,
 +                      struct intel_shared_dpll *pll);
 +      bool (*get_hw_state)(struct drm_i915_private *dev_priv,
 +                           struct intel_shared_dpll *pll,
 +                           struct intel_dpll_hw_state *hw_state);
  };
 -#define I915_NUM_PLLS 2
  
  /* Used by dp and fdi links */
  struct intel_link_m_n {
@@@ -218,6 -175,7 +218,6 @@@ struct opregion_header
  struct opregion_acpi;
  struct opregion_swsci;
  struct opregion_asle;
 -struct drm_i915_private;
  
  struct intel_opregion {
        struct opregion_header __iomem *header;
@@@ -328,8 -286,6 +328,8 @@@ struct drm_i915_error_state 
  
  struct intel_crtc_config;
  struct intel_crtc;
 +struct intel_limit;
 +struct dpll;
  
  struct drm_i915_display_funcs {
        bool (*fbc_enabled)(struct drm_device *dev);
        void (*disable_fbc)(struct drm_device *dev);
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
 +      /**
 +       * find_dpll() - Find the best values for the PLL
 +       * @limit: limits for the PLL
 +       * @crtc: current CRTC
 +       * @target: target frequency in kHz
 +       * @refclk: reference clock frequency in kHz
 +       * @match_clock: if provided, @best_clock P divider must
 +       *               match the P divider from @match_clock
 +       *               used for LVDS downclocking
 +       * @best_clock: best PLL values found
 +       *
 +       * Returns true on success, false on failure.
 +       */
 +      bool (*find_dpll)(const struct intel_limit *limit,
 +                        struct drm_crtc *crtc,
 +                        int target, int refclk,
 +                        struct dpll *match_clock,
 +                        struct dpll *best_clock);
        void (*update_wm)(struct drm_device *dev);
        void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 -                               uint32_t sprite_width, int pixel_size);
 -      void (*update_linetime_wm)(struct drm_device *dev, int pipe,
 -                               struct drm_display_mode *mode);
 +                               uint32_t sprite_width, int pixel_size,
 +                               bool enable);
        void (*modeset_global_resources)(struct drm_device *dev);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
@@@ -392,56 -331,68 +392,56 @@@ struct drm_i915_gt_funcs 
        void (*force_wake_put)(struct drm_i915_private *dev_priv);
  };
  
 -#define DEV_INFO_FLAGS \
 -      DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
 -      DEV_INFO_FLAG(has_llc)
 +#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
 +      func(is_mobile) sep \
 +      func(is_i85x) sep \
 +      func(is_i915g) sep \
 +      func(is_i945gm) sep \
 +      func(is_g33) sep \
 +      func(need_gfx_hws) sep \
 +      func(is_g4x) sep \
 +      func(is_pineview) sep \
 +      func(is_broadwater) sep \
 +      func(is_crestline) sep \
 +      func(is_ivybridge) sep \
 +      func(is_valleyview) sep \
 +      func(is_haswell) sep \
 +      func(has_force_wake) sep \
 +      func(has_fbc) sep \
 +      func(has_pipe_cxsr) sep \
 +      func(has_hotplug) sep \
 +      func(cursor_needs_physical) sep \
 +      func(has_overlay) sep \
 +      func(overlay_needs_physical) sep \
 +      func(supports_tv) sep \
 +      func(has_bsd_ring) sep \
 +      func(has_blt_ring) sep \
 +      func(has_vebox_ring) sep \
 +      func(has_llc) sep \
 +      func(has_ddi) sep \
 +      func(has_fpga_dbg)
 +
 +#define DEFINE_FLAG(name) u8 name:1
 +#define SEP_SEMICOLON ;
  
  struct intel_device_info {
        u32 display_mmio_offset;
        u8 num_pipes:3;
        u8 gen;
 -      u8 is_mobile:1;
 -      u8 is_i85x:1;
 -      u8 is_i915g:1;
 -      u8 is_i945gm:1;
 -      u8 is_g33:1;
 -      u8 need_gfx_hws:1;
 -      u8 is_g4x:1;
 -      u8 is_pineview:1;
 -      u8 is_broadwater:1;
 -      u8 is_crestline:1;
 -      u8 is_ivybridge:1;
 -      u8 is_valleyview:1;
 -      u8 has_force_wake:1;
 -      u8 is_haswell:1;
 -      u8 has_fbc:1;
 -      u8 has_pipe_cxsr:1;
 -      u8 has_hotplug:1;
 -      u8 cursor_needs_physical:1;
 -      u8 has_overlay:1;
 -      u8 overlay_needs_physical:1;
 -      u8 supports_tv:1;
 -      u8 has_bsd_ring:1;
 -      u8 has_blt_ring:1;
 -      u8 has_llc:1;
 +      DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
  };
  
 +#undef DEFINE_FLAG
 +#undef SEP_SEMICOLON
 +
  enum i915_cache_level {
        I915_CACHE_NONE = 0,
        I915_CACHE_LLC,
        I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
  };
  
 +typedef uint32_t gen6_gtt_pte_t;
 +
  /* The Graphics Translation Table is the way in which GEN hardware translates a
   * Graphics Virtual Address into a Physical Address. In addition to the normal
   * collateral associated with any va->pa translations GEN hardware also has a
@@@ -477,9 -428,6 +477,9 @@@ struct i915_gtt 
                                   struct sg_table *st,
                                   unsigned int pg_start,
                                   enum i915_cache_level cache_level);
 +      gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
 +                                   dma_addr_t addr,
 +                                   enum i915_cache_level level);
  };
  #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
  
@@@ -501,31 -449,19 +501,31 @@@ struct i915_hw_ppgtt 
                               struct sg_table *st,
                               unsigned int pg_start,
                               enum i915_cache_level cache_level);
 +      gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
 +                                   dma_addr_t addr,
 +                                   enum i915_cache_level level);
        int (*enable)(struct drm_device *dev);
        void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
  };
  
 +struct i915_ctx_hang_stats {
 +      /* This context had batch pending when hang was declared */
 +      unsigned batch_pending;
 +
 +      /* This context had batch active when hang was declared */
 +      unsigned batch_active;
 +};
  
  /* This must match up with the value previously used for execbuf2.rsvd1. */
  #define DEFAULT_CONTEXT_ID 0
  struct i915_hw_context {
 +      struct kref ref;
        int id;
        bool is_initialized;
        struct drm_i915_file_private *file_priv;
        struct intel_ring_buffer *ring;
        struct drm_i915_gem_object *obj;
 +      struct i915_ctx_hang_stats hang_stats;
  };
  
  enum no_fbc_reason {
@@@ -722,7 -658,6 +722,7 @@@ struct i915_suspend_saved_registers 
  
  struct intel_gen6_power_mgmt {
        struct work_struct work;
 +      struct delayed_work vlv_work;
        u32 pm_iir;
        /* lock - irqsave spinlock that protectects the work_struct and
         * pm_iir. */
        u8 cur_delay;
        u8 min_delay;
        u8 max_delay;
 +      u8 rpe_delay;
        u8 hw_max;
  
        struct delayed_work delayed_resume_work;
@@@ -770,15 -704,6 +770,15 @@@ struct intel_ilk_power_mgmt 
        struct drm_i915_gem_object *renderctx;
  };
  
 +/* Power well structure for haswell */
 +struct i915_power_well {
 +      struct drm_device *device;
 +      spinlock_t lock;
 +      /* power well enable/disable usage count */
 +      int count;
 +      int i915_request;
 +};
 +
  struct i915_dri1_state {
        unsigned allow_batchbuffer : 1;
        u32 __iomem *gfx_hws_cpu_addr;
@@@ -887,20 -812,14 +887,20 @@@ struct i915_gem_mm 
        u32 object_count;
  };
  
 +struct drm_i915_error_state_buf {
 +      unsigned bytes;
 +      unsigned size;
 +      int err;
 +      u8 *buf;
 +      loff_t start;
 +      loff_t pos;
 +};
 +
  struct i915_gpu_error {
        /* For hangcheck timer */
  #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
  #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
        struct timer_list hangcheck_timer;
 -      int hangcheck_count;
 -      uint32_t last_acthd[I915_NUM_RINGS];
 -      uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
  
        /* For reset and error_state handling. */
        spinlock_t lock;
@@@ -956,37 -875,6 +956,37 @@@ enum modeset_restore 
        MODESET_SUSPENDED,
  };
  
 +struct intel_vbt_data {
 +      struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 +      struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 +
 +      /* Feature bits */
 +      unsigned int int_tv_support:1;
 +      unsigned int lvds_dither:1;
 +      unsigned int lvds_vbt:1;
 +      unsigned int int_crt_support:1;
 +      unsigned int lvds_use_ssc:1;
 +      unsigned int display_clock_mode:1;
 +      unsigned int fdi_rx_polarity_inverted:1;
 +      int lvds_ssc_freq;
 +      unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 +
 +      /* eDP */
 +      int edp_rate;
 +      int edp_lanes;
 +      int edp_preemphasis;
 +      int edp_vswing;
 +      bool edp_initialized;
 +      bool edp_support;
 +      int edp_bpp;
 +      struct edp_power_seq edp_pps;
 +
 +      int crt_ddc_pin;
 +
 +      int child_dev_num;
 +      struct child_device_config *child_dev;
 +};
 +
  typedef struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
                        HPD_MARK_DISABLED = 2
                } hpd_mark;
        } hpd_stats[HPD_NUM_PINS];
 +      u32 hpd_event_bits;
        struct timer_list hotplug_reenable_timer;
  
 -      int num_pch_pll;
        int num_plane;
  
        unsigned long cfb_size;
        struct intel_fbc_work *fbc_work;
  
        struct intel_opregion opregion;
 +      struct intel_vbt_data vbt;
  
        /* overlay */
        struct intel_overlay *overlay;
        struct {
                int level;
                bool enabled;
 +              spinlock_t lock; /* bl registers and the above bl fields */
                struct backlight_device *device;
        } backlight;
  
        /* LVDS info */
        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 -
 -      /* Feature bits from the VBIOS */
 -      unsigned int int_tv_support:1;
 -      unsigned int lvds_dither:1;
 -      unsigned int lvds_vbt:1;
 -      unsigned int int_crt_support:1;
 -      unsigned int lvds_use_ssc:1;
 -      unsigned int display_clock_mode:1;
 -      unsigned int fdi_rx_polarity_inverted:1;
 -      int lvds_ssc_freq;
 -      unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 -      struct {
 -              int rate;
 -              int lanes;
 -              int preemphasis;
 -              int vswing;
 -
 -              bool initialized;
 -              bool support;
 -              int bpp;
 -              struct edp_power_seq pps;
 -      } edp;
        bool no_aux_handshake;
  
 -      int crt_ddc_pin;
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
        /* Kernel Modesetting */
  
        struct sdvo_device_mapping sdvo_mappings[2];
 -      /* indicate whether the LVDS_BORDER should be enabled or not */
 -      unsigned int lvds_border_bits;
 -      /* Panel fitter placement and size for Ironlake+ */
 -      u32 pch_pf_pos, pch_pf_size;
  
        struct drm_crtc *plane_to_crtc_mapping[3];
        struct drm_crtc *pipe_to_crtc_mapping[3];
        wait_queue_head_t pending_flip_queue;
  
 -      struct intel_pch_pll pch_plls[I915_NUM_PLLS];
 +      int num_shared_dpll;
 +      struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        struct intel_ddi_plls ddi_plls;
  
        /* Reclocking support */
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
        u16 orig_clock;
 -      int child_dev_num;
 -      struct child_device_config *child_dev;
  
        bool mchbar_need_disable;
  
         * mchdev_lock in intel_pm.c */
        struct intel_ilk_power_mgmt ips;
  
 +      /* Haswell power well */
 +      struct i915_power_well power_well;
 +
        enum no_fbc_reason no_fbc_reason;
  
        struct drm_mm_node *compressed_fb;
  
        struct i915_gpu_error gpu_error;
  
 +      struct drm_i915_gem_object *vlv_pctx;
 +
        /* list of fbdev register on this device */
        struct intel_fbdev *fbdev;
  
@@@ -1215,7 -1124,7 +1215,7 @@@ struct drm_i915_gem_object 
        struct drm_mm_node *gtt_space;
        /** Stolen memory for this object, instead of being backed by shmem. */
        struct drm_mm_node *stolen;
 -      struct list_head gtt_list;
 +      struct list_head global_list;
  
        /** This object's place on the active/inactive lists */
        struct list_head ring_list;
@@@ -1362,18 -1271,9 +1362,18 @@@ struct drm_i915_gem_request 
        /** GEM sequence number associated with this request. */
        uint32_t seqno;
  
 -      /** Postion in the ringbuffer of the end of the request */
 +      /** Position in the ringbuffer of the start of the request */
 +      u32 head;
 +
 +      /** Position in the ringbuffer of the end of the request */
        u32 tail;
  
 +      /** Context related to this request */
 +      struct i915_hw_context *ctx;
 +
 +      /** Batch buffer related to this request if any */
 +      struct drm_i915_gem_object *batch_obj;
 +
        /** Time at which this request was emitted, in jiffies. */
        unsigned long emitted_jiffies;
  
@@@ -1391,8 -1291,6 +1391,8 @@@ struct drm_i915_file_private 
                struct list_head request_list;
        } mm;
        struct idr context_idr;
 +
 +      struct i915_ctx_hang_stats hang_stats;
  };
  
  #define INTEL_INFO(dev)       (((struct drm_i915_private *) (dev)->dev_private)->info)
  
  #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
  #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 +#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
  #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
  #define I915_NEED_GFX_HWS(dev)        (INTEL_INFO(dev)->need_gfx_hws)
  
  #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
  #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
  
 +#define HAS_IPS(dev)          (IS_ULT(dev))
 +
  #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
  
 -#define HAS_DDI(dev)          (IS_HASWELL(dev))
 +#define HAS_DDI(dev)          (INTEL_INFO(dev)->has_ddi)
  #define HAS_POWER_WELL(dev)   (IS_HASWELL(dev))
 +#define HAS_FPGA_DBG_UNCLAIMED(dev)   (INTEL_INFO(dev)->has_fpga_dbg)
  
  #define INTEL_PCH_DEVICE_ID_MASK              0xff00
  #define INTEL_PCH_IBX_DEVICE_ID_TYPE          0x3b00
@@@ -1541,7 -1435,6 +1541,7 @@@ extern bool i915_enable_hangcheck __rea
  extern int i915_enable_ppgtt __read_mostly;
  extern unsigned int i915_preliminary_hw_support __read_mostly;
  extern int i915_disable_power_well __read_mostly;
 +extern int i915_enable_ips __read_mostly;
  
  extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  extern int i915_resume(struct drm_device *dev);
@@@ -1593,6 -1486,8 +1593,6 @@@ i915_enable_pipestat(drm_i915_private_
  void
  i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  
 -void intel_enable_asle(struct drm_device *dev);
 -
  #ifdef CONFIG_DEBUG_FS
  extern void i915_destroy_error_state(struct drm_device *dev);
  #else
@@@ -1731,7 -1626,6 +1731,7 @@@ i915_gem_object_unpin_fence(struct drm_
  {
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 +              WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
                dev_priv->fence_regs[obj->fence_reg].pin_count--;
        }
  }
@@@ -1764,12 -1658,9 +1764,12 @@@ void i915_gem_init_swizzling(struct drm
  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
  int __must_check i915_gpu_idle(struct drm_device *dev);
  int __must_check i915_gem_idle(struct drm_device *dev);
 -int i915_add_request(struct intel_ring_buffer *ring,
 -                   struct drm_file *file,
 -                   u32 *seqno);
 +int __i915_add_request(struct intel_ring_buffer *ring,
 +                     struct drm_file *file,
 +                     struct drm_i915_gem_object *batch_obj,
 +                     u32 *seqno);
 +#define i915_add_request(ring, seqno) \
 +      __i915_add_request(ring, NULL, NULL, seqno)
  int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
                                 uint32_t seqno);
  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@@ -1806,27 -1697,14 +1806,29 @@@ struct drm_gem_object *i915_gem_prime_i
  struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
  
+ void i915_gem_restore_fences(struct drm_device *dev);
  /* i915_gem_context.c */
  void i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct intel_ring_buffer *ring,
                        struct drm_file *file, int to_id);
 +void i915_gem_context_free(struct kref *ctx_ref);
 +static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
 +{
 +      kref_get(&ctx->ref);
 +}
 +
 +static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
 +{
 +      kref_put(&ctx->ref, i915_gem_context_free);
 +}
 +
 +struct i915_ctx_hang_stats * __must_check
 +i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
 +                              struct drm_file *file,
 +                              u32 id);
  int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
  int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@@ -1908,8 -1786,6 +1910,8 @@@ void i915_gem_dump_object(struct drm_i9
  /* i915_debugfs.c */
  int i915_debugfs_init(struct drm_minor *minor);
  void i915_debugfs_cleanup(struct drm_minor *minor);
 +__printf(2, 3)
 +void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
  
  /* i915_suspend.c */
  extern int i915_save_state(struct drm_device *dev);
@@@ -1926,7 -1802,7 +1928,7 @@@ void i915_teardown_sysfs(struct drm_dev
  /* intel_i2c.c */
  extern int intel_setup_gmbus(struct drm_device *dev);
  extern void intel_teardown_gmbus(struct drm_device *dev);
 -extern inline bool intel_gmbus_is_port_valid(unsigned port)
 +static inline bool intel_gmbus_is_port_valid(unsigned port)
  {
        return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
  }
@@@ -1935,7 -1811,7 +1937,7 @@@ extern struct i2c_adapter *intel_gmbus_
                struct drm_i915_private *dev_priv, unsigned port);
  extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
  {
        return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
  }
@@@ -1947,10 -1823,14 +1949,10 @@@ extern int intel_opregion_setup(struct 
  extern void intel_opregion_init(struct drm_device *dev);
  extern void intel_opregion_fini(struct drm_device *dev);
  extern void intel_opregion_asle_intr(struct drm_device *dev);
 -extern void intel_opregion_gse_intr(struct drm_device *dev);
 -extern void intel_opregion_enable_asle(struct drm_device *dev);
  #else
  static inline void intel_opregion_init(struct drm_device *dev) { return; }
  static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
 -static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
 -static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
  #endif
  
  /* intel_acpi.c */
@@@ -1964,7 -1844,6 +1966,7 @@@ static inline void intel_unregister_dsm
  
  /* modesetting */
  extern void intel_modeset_init_hw(struct drm_device *dev);
 +extern void intel_modeset_suspend_hw(struct drm_device *dev);
  extern void intel_modeset_init(struct drm_device *dev);
  extern void intel_modeset_gem_init(struct drm_device *dev);
  extern void intel_modeset_cleanup(struct drm_device *dev);
@@@ -1977,9 -1856,6 +1979,9 @@@ extern void intel_disable_fbc(struct dr
  extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
  extern void intel_init_pch_refclk(struct drm_device *dev);
  extern void gen6_set_rps(struct drm_device *dev, u8 val);
 +extern void valleyview_set_rps(struct drm_device *dev, u8 val);
 +extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
 +extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
  extern void intel_detect_pch(struct drm_device *dev);
  extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
  extern int intel_enable_rc6(const struct drm_device *dev);
@@@ -1991,11 -1867,10 +1993,11 @@@ int i915_reg_read_ioctl(struct drm_devi
  /* overlay */
  #ifdef CONFIG_DEBUG_FS
  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 -extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
 +extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
 +                                          struct intel_overlay_error_state *error);
  
  extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
 -extern void intel_display_print_error_state(struct seq_file *m,
 +extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
  #endif
@@@ -2010,20 -1885,8 +2012,20 @@@ int __gen6_gt_wait_for_fifo(struct drm_
  
  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
 -int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
 -int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 +
 +/* intel_sideband.c */
 +u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 +void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 +u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
 +u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
 +void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
 +u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 +                 enum intel_sbi_destination destination);
 +void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 +                   enum intel_sbi_destination destination);
 +
 +int vlv_gpu_freq(int ddr_freq, int val);
 +int vlv_freq_opcode(int ddr_freq, int val);
  
  #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 06d66e09da172c42ce0b878fef26c96e10d908d1,9e35dafc580724da0f48db14c441f57db951b45d..97afd2639fb63a1e240fce2c6f97f53fd9f82e98
@@@ -176,7 -176,7 +176,7 @@@ i915_gem_get_aperture_ioctl(struct drm_
  
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
 -      list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
 +      list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
                        pinned += obj->gtt_space->size;
        mutex_unlock(&dev->struct_mutex);
@@@ -956,7 -956,7 +956,7 @@@ i915_gem_check_olr(struct intel_ring_bu
  
        ret = 0;
        if (seqno == ring->outstanding_lazy_request)
 -              ret = i915_add_request(ring, NULL, NULL);
 +              ret = i915_add_request(ring, NULL);
  
        return ret;
  }
@@@ -1087,25 -1087,6 +1087,25 @@@ i915_wait_seqno(struct intel_ring_buffe
                            interruptible, NULL);
  }
  
 +static int
 +i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
 +                                   struct intel_ring_buffer *ring)
 +{
 +      i915_gem_retire_requests_ring(ring);
 +
 +      /* Manually manage the write flush as we may have not yet
 +       * retired the buffer.
 +       *
 +       * Note that the last_write_seqno is always the earlier of
 +       * the two (read/write) seqno, so if we haved successfully waited,
 +       * we know we have passed the last write.
 +       */
 +      obj->last_write_seqno = 0;
 +      obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 +
 +      return 0;
 +}
 +
  /**
   * Ensures that all rendering to the object has completed and the object is
   * safe to unbind from the GTT or access from the CPU.
@@@ -1126,7 -1107,18 +1126,7 @@@ i915_gem_object_wait_rendering(struct d
        if (ret)
                return ret;
  
 -      i915_gem_retire_requests_ring(ring);
 -
 -      /* Manually manage the write flush as we may have not yet
 -       * retired the buffer.
 -       */
 -      if (obj->last_write_seqno &&
 -          i915_seqno_passed(seqno, obj->last_write_seqno)) {
 -              obj->last_write_seqno = 0;
 -              obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 -      }
 -
 -      return 0;
 +      return i915_gem_object_wait_rendering__tail(obj, ring);
  }
  
  /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@@ -1162,10 -1154,19 +1162,10 @@@ i915_gem_object_wait_rendering__nonbloc
        mutex_unlock(&dev->struct_mutex);
        ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
        mutex_lock(&dev->struct_mutex);
 +      if (ret)
 +              return ret;
  
 -      i915_gem_retire_requests_ring(ring);
 -
 -      /* Manually manage the write flush as we may have not yet
 -       * retired the buffer.
 -       */
 -      if (obj->last_write_seqno &&
 -          i915_seqno_passed(seqno, obj->last_write_seqno)) {
 -              obj->last_write_seqno = 0;
 -              obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 -      }
 -
 -      return ret;
 +      return i915_gem_object_wait_rendering__tail(obj, ring);
  }
  
  /**
@@@ -1675,7 -1676,7 +1675,7 @@@ i915_gem_object_put_pages(struct drm_i9
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
 -      list_del(&obj->gtt_list);
 +      list_del(&obj->global_list);
  
        ops->put_pages(obj);
        obj->pages = NULL;
@@@ -1695,7 -1696,7 +1695,7 @@@ __i915_gem_shrink(struct drm_i915_priva
  
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
 -                               gtt_list) {
 +                               global_list) {
                if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
@@@ -1732,8 -1733,7 +1732,8 @@@ i915_gem_shrink_all(struct drm_i915_pri
  
        i915_gem_evict_everything(dev_priv->dev);
  
 -      list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
 +      list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 +                               global_list)
                i915_gem_object_put_pages(obj);
  }
  
@@@ -1867,7 -1867,7 +1867,7 @@@ i915_gem_object_get_pages(struct drm_i9
        if (ret)
                return ret;
  
 -      list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
 +      list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        return 0;
  }
  
@@@ -1880,10 -1880,6 +1880,10 @@@ i915_gem_object_move_to_active(struct d
        u32 seqno = intel_ring_get_seqno(ring);
  
        BUG_ON(ring == NULL);
 +      if (obj->ring != ring && obj->last_write_seqno) {
 +              /* Keep the seqno relative to the current ring */
 +              obj->last_write_seqno = seqno;
 +      }
        obj->ring = ring;
  
        /* Add a reference if we're newly entering the active list. */
@@@ -2009,18 -2005,17 +2009,18 @@@ i915_gem_get_seqno(struct drm_device *d
        return 0;
  }
  
 -int
 -i915_add_request(struct intel_ring_buffer *ring,
 -               struct drm_file *file,
 -               u32 *out_seqno)
 +int __i915_add_request(struct intel_ring_buffer *ring,
 +                     struct drm_file *file,
 +                     struct drm_i915_gem_object *obj,
 +                     u32 *out_seqno)
  {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
 -      u32 request_ring_position;
 +      u32 request_ring_position, request_start;
        int was_empty;
        int ret;
  
 +      request_start = intel_ring_get_tail(ring);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
  
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
 +      request->head = request_start;
        request->tail = request_ring_position;
 +      request->ctx = ring->last_context;
 +      request->batch_obj = obj;
 +
 +      /* Whilst this request exists, batch_obj will be on the
 +       * active_list, and so will hold the active reference. Only when this
 +       * request is retired will the the batch_obj be moved onto the
 +       * inactive_list and lose its active reference. Hence we do not need
 +       * to explicitly hold another reference here.
 +       */
 +
 +      if (request->ctx)
 +              i915_gem_context_reference(request->ctx);
 +
        request->emitted_jiffies = jiffies;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
@@@ -2119,114 -2100,9 +2119,114 @@@ i915_gem_request_remove_from_client(str
        spin_unlock(&file_priv->mm.lock);
  }
  
 +static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
 +{
 +      if (acthd >= obj->gtt_offset &&
 +          acthd < obj->gtt_offset + obj->base.size)
 +              return true;
 +
 +      return false;
 +}
 +
 +static bool i915_head_inside_request(const u32 acthd_unmasked,
 +                                   const u32 request_start,
 +                                   const u32 request_end)
 +{
 +      const u32 acthd = acthd_unmasked & HEAD_ADDR;
 +
 +      if (request_start < request_end) {
 +              if (acthd >= request_start && acthd < request_end)
 +                      return true;
 +      } else if (request_start > request_end) {
 +              if (acthd >= request_start || acthd < request_end)
 +                      return true;
 +      }
 +
 +      return false;
 +}
 +
 +static bool i915_request_guilty(struct drm_i915_gem_request *request,
 +                              const u32 acthd, bool *inside)
 +{
 +      /* There is a possibility that unmasked head address
 +       * pointing inside the ring, matches the batch_obj address range.
 +       * However this is extremely unlikely.
 +       */
 +
 +      if (request->batch_obj) {
 +              if (i915_head_inside_object(acthd, request->batch_obj)) {
 +                      *inside = true;
 +                      return true;
 +              }
 +      }
 +
 +      if (i915_head_inside_request(acthd, request->head, request->tail)) {
 +              *inside = false;
 +              return true;
 +      }
 +
 +      return false;
 +}
 +
 +static void i915_set_reset_status(struct intel_ring_buffer *ring,
 +                                struct drm_i915_gem_request *request,
 +                                u32 acthd)
 +{
 +      struct i915_ctx_hang_stats *hs = NULL;
 +      bool inside, guilty;
 +
 +      /* Innocent until proven guilty */
 +      guilty = false;
 +
 +      if (ring->hangcheck.action != wait &&
 +          i915_request_guilty(request, acthd, &inside)) {
 +              DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
 +                        ring->name,
 +                        inside ? "inside" : "flushing",
 +                        request->batch_obj ?
 +                        request->batch_obj->gtt_offset : 0,
 +                        request->ctx ? request->ctx->id : 0,
 +                        acthd);
 +
 +              guilty = true;
 +      }
 +
 +      /* If contexts are disabled or this is the default context, use
 +       * file_priv->reset_state
 +       */
 +      if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
 +              hs = &request->ctx->hang_stats;
 +      else if (request->file_priv)
 +              hs = &request->file_priv->hang_stats;
 +
 +      if (hs) {
 +              if (guilty)
 +                      hs->batch_active++;
 +              else
 +                      hs->batch_pending++;
 +      }
 +}
 +
 +static void i915_gem_free_request(struct drm_i915_gem_request *request)
 +{
 +      list_del(&request->list);
 +      i915_gem_request_remove_from_client(request);
 +
 +      if (request->ctx)
 +              i915_gem_context_unreference(request->ctx);
 +
 +      kfree(request);
 +}
 +
  static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                      struct intel_ring_buffer *ring)
  {
 +      u32 completed_seqno;
 +      u32 acthd;
 +
 +      acthd = intel_ring_get_active_head(ring);
 +      completed_seqno = ring->get_seqno(ring, false);
 +
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
  
                                           struct drm_i915_gem_request,
                                           list);
  
 -              list_del(&request->list);
 -              i915_gem_request_remove_from_client(request);
 -              kfree(request);
 +              if (request->seqno > completed_seqno)
 +                      i915_set_reset_status(ring, request, acthd);
 +
 +              i915_gem_free_request(request);
        }
  
        while (!list_empty(&ring->active_list)) {
        }
  }
  
static void i915_gem_reset_fences(struct drm_device *dev)
void i915_gem_restore_fences(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
  
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-               if (reg->obj)
-                       i915_gem_object_fence_lost(reg->obj);
-               i915_gem_write_fence(dev, i, NULL);
-               reg->pin_count = 0;
-               reg->obj = NULL;
-               INIT_LIST_HEAD(&reg->lru_list);
+               i915_gem_write_fence(dev, i, reg->obj);
        }
-       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  }
  
  void i915_gem_reset(struct drm_device *dev)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
  
-       /* The fence registers are invalidated so clear them out */
-       i915_gem_reset_fences(dev);
+       i915_gem_restore_fences(dev);
  }
  
  /**
@@@ -2329,7 -2193,9 +2318,7 @@@ i915_gem_retire_requests_ring(struct in
                 */
                ring->last_retired_head = request->tail;
  
 -              list_del(&request->list);
 -              i915_gem_request_remove_from_client(request);
 -              kfree(request);
 +              i915_gem_free_request(request);
        }
  
        /* Move any buffers on the active list that are no longer referenced
@@@ -2396,7 -2262,7 +2385,7 @@@ i915_gem_retire_work_handler(struct wor
        idle = true;
        for_each_ring(ring, dev_priv, i) {
                if (ring->gpu_caches_dirty)
 -                      i915_add_request(ring, NULL, NULL);
 +                      i915_add_request(ring, NULL);
  
                idle &= list_empty(&ring->request_list);
        }
@@@ -2628,10 -2494,9 +2617,10 @@@ i915_gem_object_unbind(struct drm_i915_
                obj->has_aliasing_ppgtt_mapping = 0;
        }
        i915_gem_gtt_finish_object(obj);
 +      i915_gem_object_unpin_pages(obj);
  
        list_del(&obj->mm_list);
 -      list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
 +      list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        obj->map_and_fenceable = true;
  
@@@ -2668,6 -2533,7 +2657,6 @@@ static void i965_write_fence_reg(struc
        drm_i915_private_t *dev_priv = dev->dev_private;
        int fence_reg;
        int fence_pitch_shift;
 -      uint64_t val;
  
        if (INTEL_INFO(dev)->gen >= 6) {
                fence_reg = FENCE_REG_SANDYBRIDGE_0;
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
  
 +      fence_reg += reg * 8;
 +
 +      /* To w/a incoherency with non-atomic 64-bit register updates,
 +       * we split the 64-bit update into two 32-bit writes. In order
 +       * for a partial fence not to be evaluated between writes, we
 +       * precede the update with write to turn off the fence register,
 +       * and only enable the fence as the last step.
 +       *
 +       * For extra levels of paranoia, we make sure each step lands
 +       * before applying the next step.
 +       */
 +      I915_WRITE(fence_reg, 0);
 +      POSTING_READ(fence_reg);
 +
        if (obj) {
                u32 size = obj->gtt_space->size;
 +              uint64_t val;
  
                val = (uint64_t)((obj->gtt_offset + size - 4096) &
                                 0xfffff000) << 32;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
                val |= I965_FENCE_REG_VALID;
 -      } else
 -              val = 0;
  
 -      fence_reg += reg * 8;
 -      I915_WRITE64(fence_reg, val);
 -      POSTING_READ(fence_reg);
 +              I915_WRITE(fence_reg + 4, val >> 32);
 +              POSTING_READ(fence_reg + 4);
 +
 +              I915_WRITE(fence_reg + 0, val);
 +              POSTING_READ(fence_reg);
 +      } else {
 +              I915_WRITE(fence_reg + 4, 0);
 +              POSTING_READ(fence_reg + 4);
 +      }
  }
  
  static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@@ -2829,17 -2676,35 +2818,17 @@@ static inline int fence_number(struct d
        return fence - dev_priv->fence_regs;
  }
  
 -static void i915_gem_write_fence__ipi(void *data)
 -{
 -      wbinvd();
 -}
 -
  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
  {
 -      struct drm_device *dev = obj->base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int fence_reg = fence_number(dev_priv, fence);
 -
 -      /* In order to fully serialize access to the fenced region and
 -       * the update to the fence register we need to take extreme
 -       * measures on SNB+. In theory, the write to the fence register
 -       * flushes all memory transactions before, and coupled with the
 -       * mb() placed around the register write we serialise all memory
 -       * operations with respect to the changes in the tiler. Yet, on
 -       * SNB+ we need to take a step further and emit an explicit wbinvd()
 -       * on each processor in order to manually flush all memory
 -       * transactions before updating the fence register.
 -       */
 -      if (HAS_LLC(obj->base.dev))
 -              on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
 -      i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
 +      struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 +      int reg = fence_number(dev_priv, fence);
 +
 +      i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  
        if (enable) {
 -              obj->fence_reg = fence_reg;
 +              obj->fence_reg = reg;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
@@@ -3018,7 -2883,7 +3007,7 @@@ static void i915_gem_verify_gtt(struct 
        struct drm_i915_gem_object *obj;
        int err = 0;
  
 -      list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 +      list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
                if (obj->gtt_space == NULL) {
                        printk(KERN_ERR "object found on GTT list with no space reserved\n");
                        err++;
@@@ -3065,8 -2930,6 +3054,8 @@@ i915_gem_object_bind_to_gtt(struct drm_
        struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
 +      size_t gtt_max = map_and_fenceable ?
 +              dev_priv->gtt.mappable_end : dev_priv->gtt.total;
        int ret;
  
        fence_size = i915_gem_get_gtt_size(dev,
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
 -      if (obj->base.size >
 -          (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
 -              DRM_ERROR("Attempting to bind an object larger than the aperture\n");
 +      if (obj->base.size > gtt_max) {
 +              DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
 +                        obj->base.size,
 +                        map_and_fenceable ? "mappable" : "total",
 +                        gtt_max);
                return -E2BIG;
        }
  
                return -ENOMEM;
        }
  
 - search_free:
 -      if (map_and_fenceable)
 -              ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
 -                                                        size, alignment, obj->cache_level,
 -                                                        0, dev_priv->gtt.mappable_end);
 -      else
 -              ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
 -                                               size, alignment, obj->cache_level);
 +search_free:
 +      ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
 +                                                size, alignment,
 +                                                obj->cache_level, 0, gtt_max);
        if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                return ret;
        }
  
 -      list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
 +      list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  
        obj->gtt_space = node;
  
        obj->map_and_fenceable = mappable && fenceable;
  
 -      i915_gem_object_unpin_pages(obj);
        trace_i915_gem_object_bind(obj, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
@@@ -3856,7 -3722,7 +3845,7 @@@ void i915_gem_object_init(struct drm_i9
                          const struct drm_i915_gem_object_ops *ops)
  {
        INIT_LIST_HEAD(&obj->mm_list);
 -      INIT_LIST_HEAD(&obj->gtt_list);
 +      INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
  
@@@ -3956,13 -3822,7 +3945,13 @@@ void i915_gem_free_object(struct drm_ge
                dev_priv->mm.interruptible = was_interruptible;
        }
  
 -      obj->pages_pin_count = 0;
 +      /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
 +       * before progressing. */
 +      if (obj->stolen)
 +              i915_gem_object_unpin_pages(obj);
 +
 +      if (WARN_ON(obj->pages_pin_count))
 +              obj->pages_pin_count = 0;
        i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
        i915_gem_object_release_stolen(obj);
@@@ -4003,8 -3863,6 +3992,6 @@@ i915_gem_idle(struct drm_device *dev
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
  
-       i915_gem_reset_fences(dev);
        /* Hack!  Don't let anybody do execbuf while we don't control the chip.
         * We need to replace this with a semaphore, or something.
         * And not confound mm.suspended!
@@@ -4115,21 -3973,12 +4102,21 @@@ static int i915_gem_init_rings(struct d
                        goto cleanup_bsd_ring;
        }
  
 +      if (HAS_VEBOX(dev)) {
 +              ret = intel_init_vebox_ring_buffer(dev);
 +              if (ret)
 +                      goto cleanup_blt_ring;
 +      }
 +
 +
        ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
        if (ret)
 -              goto cleanup_blt_ring;
 +              goto cleanup_vebox_ring;
  
        return 0;
  
 +cleanup_vebox_ring:
 +      intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
  cleanup_blt_ring:
        intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
  cleanup_bsd_ring:
@@@ -4340,7 -4189,8 +4327,8 @@@ i915_gem_load(struct drm_device *dev
                dev_priv->num_fence_regs = 8;
  
        /* Initialize fence registers to zero */
-       i915_gem_reset_fences(dev);
+       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       i915_gem_restore_fences(dev);
  
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@@ -4603,10 -4453,10 +4591,10 @@@ i915_gem_inactive_shrink(struct shrinke
        }
  
        cnt = 0;
 -      list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
 +      list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 -      list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
 +      list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
  
index 88b9a663944f0afce886e3070677753282cad516,369b3d8776ab42c6ef52dafa94174c3038ecebe0..70db618989c42a3bcd3a73df44593a368e2d66d0
@@@ -192,7 -192,6 +192,7 @@@ static void i915_restore_vga(struct drm
  static void i915_save_display(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      unsigned long flags;
  
        /* Display arbitration control */
        if (INTEL_INFO(dev)->gen <= 4)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_save_display_reg(dev);
  
 +      spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 +
        /* LVDS state */
        if (HAS_PCH_SPLIT(dev)) {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
                        dev_priv->regfile.saveLVDS = I915_READ(LVDS);
        }
  
 +      spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 +
        if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
                dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
  
@@@ -262,7 -257,6 +262,7 @@@ static void i915_restore_display(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 mask = 0xffffffff;
 +      unsigned long flags;
  
        /* Display arbitration */
        if (INTEL_INFO(dev)->gen <= 4)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_restore_display_reg(dev);
  
 +      spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 +
        /* LVDS state */
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
                I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
                I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
        }
  
 +      spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 +
        /* only restore FBC info on the platform that supports FBC*/
        intel_disable_fbc(dev);
        if (I915_HAS_FBC(dev)) {
@@@ -394,6 -384,7 +394,7 @@@ int i915_restore_state(struct drm_devic
  
        mutex_lock(&dev->struct_mutex);
  
+       i915_gem_restore_fences(dev);
        i915_restore_display(dev);
  
        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {