]> Pileus Git - ~andy/linux/blobdiff - drivers/gpu/drm/i915/intel_ringbuffer.c
Merge tag 'v3.5-rc4' into drm-intel-next-queued
[~andy/linux] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index e5b84ff89ca58234b5c7e0d30fa8e97583d6357d..f30a53a8917e6f229b1bd986d23a4e65b41f28fc 100644 (file)
@@ -226,6 +226,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
         * impact.
         */
        flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+       flags |= PIPE_CONTROL_TLB_INVALIDATE;
        flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
        flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
@@ -433,11 +434,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                 */
                I915_WRITE(CACHE_MODE_0,
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+               /* This is not explicitly set for GEN6, so read the register.
+                * see intel_ring_mi_set_context() for why we care.
+                * TODO: consider explicitly setting the bit for GEN5
+                */
+               ring->itlb_before_ctx_switch =
+                       !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
        }
 
        if (INTEL_INFO(dev)->gen >= 6)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
+       if (IS_IVYBRIDGE(dev))
+               I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
        return ret;
 }
 
@@ -825,7 +836,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
-               I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+               if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+                       I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+                                               GEN6_RENDER_L3_PARITY_ERROR));
+               else
+                       I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
                dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@ -844,7 +859,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
-               I915_WRITE_IMR(ring, ~0);
+               if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+                       I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+               else
+                       I915_WRITE_IMR(ring, ~0);
                dev_priv->gt_irq_mask |= ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@ -969,6 +987,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_ring_buffer *ring)
 {
        struct drm_i915_gem_object *obj;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        ring->dev = dev;
@@ -1002,8 +1021,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unpin;
 
-       ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
-                                        ring->size);
+       ring->virtual_start =
+               ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+                          ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
                ret = -EINVAL;
@@ -1100,7 +1120,7 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       ret = i915_wait_request(ring, seqno);
+       ret = i915_wait_seqno(ring, seqno);
 
        dev_priv->mm.interruptible = was_interruptible;
        if (!ret)