2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 #define GEN6_PPGTT_PD_ENTRIES 512
32 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
35 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
36 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
38 #define GEN6_PDE_VALID (1 << 0)
39 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
40 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
42 #define GEN6_PTE_VALID (1 << 0)
43 #define GEN6_PTE_UNCACHED (1 << 1)
44 #define HSW_PTE_UNCACHED (0)
45 #define GEN6_PTE_CACHE_LLC (2 << 1)
46 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
47 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
50 /* Cacheability Control is a 4-bit value. The low three bits are stored in *
51 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
53 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
54 (((bits) & 0x8) << (11 - 3)))
55 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
56 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
57 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
59 static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
60 enum i915_cache_level level)
62 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
63 pte |= GEN6_PTE_ADDR_ENCODE(addr);
66 case I915_CACHE_LLC_MLC:
67 pte |= GEN6_PTE_CACHE_LLC_MLC;
70 pte |= GEN6_PTE_CACHE_LLC;
73 pte |= GEN6_PTE_UNCACHED;
82 #define BYT_PTE_WRITEABLE (1 << 1)
83 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
85 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
86 enum i915_cache_level level)
88 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
89 pte |= GEN6_PTE_ADDR_ENCODE(addr);
91 /* Mark the page as writeable. Other platforms don't have a
92 * setting for read-only/writable, so this matches that behavior.
94 pte |= BYT_PTE_WRITEABLE;
96 if (level != I915_CACHE_NONE)
97 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
102 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
103 enum i915_cache_level level)
105 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
106 pte |= HSW_PTE_ADDR_ENCODE(addr);
108 if (level != I915_CACHE_NONE)
109 pte |= HSW_WB_LLC_AGE3;
114 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
115 enum i915_cache_level level)
117 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
118 pte |= HSW_PTE_ADDR_ENCODE(addr);
120 if (level != I915_CACHE_NONE)
121 pte |= HSW_WB_ELLC_LLC_AGE0;
126 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
128 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
129 gen6_gtt_pte_t __iomem *pd_addr;
133 WARN_ON(ppgtt->pd_offset & 0x3f);
134 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
135 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
136 for (i = 0; i < ppgtt->num_pd_entries; i++) {
139 pt_addr = ppgtt->pt_dma_addr[i];
140 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
141 pd_entry |= GEN6_PDE_VALID;
143 writel(pd_entry, pd_addr + i);
148 static int gen6_ppgtt_enable(struct drm_device *dev)
150 drm_i915_private_t *dev_priv = dev->dev_private;
152 struct intel_ring_buffer *ring;
153 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
156 BUG_ON(ppgtt->pd_offset & 0x3f);
158 gen6_write_pdes(ppgtt);
160 pd_offset = ppgtt->pd_offset;
161 pd_offset /= 64; /* in cachelines, */
164 if (INTEL_INFO(dev)->gen == 6) {
165 uint32_t ecochk, gab_ctl, ecobits;
167 ecobits = I915_READ(GAC_ECO_BITS);
168 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
169 ECOBITS_PPGTT_CACHE64B);
171 gab_ctl = I915_READ(GAB_CTL);
172 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
174 ecochk = I915_READ(GAM_ECOCHK);
175 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
176 ECOCHK_PPGTT_CACHE64B);
177 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
178 } else if (INTEL_INFO(dev)->gen >= 7) {
179 uint32_t ecochk, ecobits;
181 ecobits = I915_READ(GAC_ECO_BITS);
182 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
184 ecochk = I915_READ(GAM_ECOCHK);
185 if (IS_HASWELL(dev)) {
186 ecochk |= ECOCHK_PPGTT_WB_HSW;
188 ecochk |= ECOCHK_PPGTT_LLC_IVB;
189 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
191 I915_WRITE(GAM_ECOCHK, ecochk);
192 /* GFX_MODE is per-ring on gen7+ */
195 for_each_ring(ring, dev_priv, i) {
196 if (INTEL_INFO(dev)->gen >= 7)
197 I915_WRITE(RING_MODE_GEN7(ring),
198 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
200 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
201 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
206 /* PPGTT support for Sandybdrige/Gen6 and later */
207 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
208 unsigned first_entry,
209 unsigned num_entries)
211 struct i915_hw_ppgtt *ppgtt =
212 container_of(vm, struct i915_hw_ppgtt, base);
213 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
214 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
215 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
216 unsigned last_pte, i;
218 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
220 while (num_entries) {
221 last_pte = first_pte + num_entries;
222 if (last_pte > I915_PPGTT_PT_ENTRIES)
223 last_pte = I915_PPGTT_PT_ENTRIES;
225 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
227 for (i = first_pte; i < last_pte; i++)
228 pt_vaddr[i] = scratch_pte;
230 kunmap_atomic(pt_vaddr);
232 num_entries -= last_pte - first_pte;
238 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
239 struct sg_table *pages,
240 unsigned first_entry,
241 enum i915_cache_level cache_level)
243 struct i915_hw_ppgtt *ppgtt =
244 container_of(vm, struct i915_hw_ppgtt, base);
245 gen6_gtt_pte_t *pt_vaddr;
246 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
247 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
248 struct sg_page_iter sg_iter;
250 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
251 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
252 dma_addr_t page_addr;
254 page_addr = sg_page_iter_dma_address(&sg_iter);
255 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
256 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
257 kunmap_atomic(pt_vaddr);
259 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
264 kunmap_atomic(pt_vaddr);
267 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
269 struct i915_hw_ppgtt *ppgtt =
270 container_of(vm, struct i915_hw_ppgtt, base);
273 drm_mm_takedown(&ppgtt->base.mm);
275 if (ppgtt->pt_dma_addr) {
276 for (i = 0; i < ppgtt->num_pd_entries; i++)
277 pci_unmap_page(ppgtt->base.dev->pdev,
278 ppgtt->pt_dma_addr[i],
279 4096, PCI_DMA_BIDIRECTIONAL);
282 kfree(ppgtt->pt_dma_addr);
283 for (i = 0; i < ppgtt->num_pd_entries; i++)
284 __free_page(ppgtt->pt_pages[i]);
285 kfree(ppgtt->pt_pages);
289 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
291 struct drm_device *dev = ppgtt->base.dev;
292 struct drm_i915_private *dev_priv = dev->dev_private;
293 unsigned first_pd_entry_in_global_pt;
297 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
298 * entries. For aliasing ppgtt support we just steal them at the end for
300 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
302 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
303 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
304 ppgtt->enable = gen6_ppgtt_enable;
305 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
306 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
307 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
308 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
309 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
311 if (!ppgtt->pt_pages)
314 for (i = 0; i < ppgtt->num_pd_entries; i++) {
315 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
316 if (!ppgtt->pt_pages[i])
320 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
322 if (!ppgtt->pt_dma_addr)
325 for (i = 0; i < ppgtt->num_pd_entries; i++) {
328 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
329 PCI_DMA_BIDIRECTIONAL);
331 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
336 ppgtt->pt_dma_addr[i] = pt_addr;
339 ppgtt->base.clear_range(&ppgtt->base, 0,
340 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
342 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
347 if (ppgtt->pt_dma_addr) {
348 for (i--; i >= 0; i--)
349 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
350 4096, PCI_DMA_BIDIRECTIONAL);
353 kfree(ppgtt->pt_dma_addr);
354 for (i = 0; i < ppgtt->num_pd_entries; i++) {
355 if (ppgtt->pt_pages[i])
356 __free_page(ppgtt->pt_pages[i]);
358 kfree(ppgtt->pt_pages);
363 static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
365 struct drm_i915_private *dev_priv = dev->dev_private;
366 struct i915_hw_ppgtt *ppgtt;
369 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
373 ppgtt->base.dev = dev;
375 if (INTEL_INFO(dev)->gen < 8)
376 ret = gen6_ppgtt_init(ppgtt);
383 dev_priv->mm.aliasing_ppgtt = ppgtt;
384 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
391 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
393 struct drm_i915_private *dev_priv = dev->dev_private;
394 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
399 ppgtt->base.cleanup(&ppgtt->base);
400 dev_priv->mm.aliasing_ppgtt = NULL;
403 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
404 struct drm_i915_gem_object *obj,
405 enum i915_cache_level cache_level)
407 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
408 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
412 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
413 struct drm_i915_gem_object *obj)
415 ppgtt->base.clear_range(&ppgtt->base,
416 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
417 obj->base.size >> PAGE_SHIFT);
420 extern int intel_iommu_gfx_mapped;
421 /* Certain Gen5 chipsets require require idling the GPU before
422 * unmapping anything from the GTT when VT-d is enabled.
424 static inline bool needs_idle_maps(struct drm_device *dev)
426 #ifdef CONFIG_INTEL_IOMMU
427 /* Query intel_iommu to see if we need the workaround. Presumably that
430 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
436 static bool do_idling(struct drm_i915_private *dev_priv)
438 bool ret = dev_priv->mm.interruptible;
440 if (unlikely(dev_priv->gtt.do_idle_maps)) {
441 dev_priv->mm.interruptible = false;
442 if (i915_gpu_idle(dev_priv->dev)) {
443 DRM_ERROR("Couldn't idle GPU\n");
444 /* Wait a bit, in hopes it avoids the hang */
452 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
454 if (unlikely(dev_priv->gtt.do_idle_maps))
455 dev_priv->mm.interruptible = interruptible;
458 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
460 struct drm_i915_private *dev_priv = dev->dev_private;
461 struct drm_i915_gem_object *obj;
463 /* First fill our portion of the GTT with scratch pages */
464 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
465 dev_priv->gtt.base.start / PAGE_SIZE,
466 dev_priv->gtt.base.total / PAGE_SIZE);
468 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
469 i915_gem_clflush_object(obj);
470 i915_gem_gtt_bind_object(obj, obj->cache_level);
473 i915_gem_chipset_flush(dev);
476 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
478 if (obj->has_dma_mapping)
481 if (!dma_map_sg(&obj->base.dev->pdev->dev,
482 obj->pages->sgl, obj->pages->nents,
483 PCI_DMA_BIDIRECTIONAL))
490 * Binds an object into the global gtt with the specified cache level. The object
491 * will be accessible to the GPU via commands whose operands reference offsets
492 * within the global GTT as well as accessible by the GPU through the GMADR
493 * mapped BAR (dev_priv->mm.gtt->gtt).
495 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
497 unsigned int first_entry,
498 enum i915_cache_level level)
500 struct drm_i915_private *dev_priv = vm->dev->dev_private;
501 gen6_gtt_pte_t __iomem *gtt_entries =
502 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
504 struct sg_page_iter sg_iter;
507 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
508 addr = sg_page_iter_dma_address(&sg_iter);
509 iowrite32(vm->pte_encode(addr, level), >t_entries[i]);
513 /* XXX: This serves as a posting read to make sure that the PTE has
514 * actually been updated. There is some concern that even though
515 * registers and PTEs are within the same BAR that they are potentially
516 * of NUMA access patterns. Therefore, even with the way we assume
517 * hardware should work, we must keep this posting read for paranoia.
520 WARN_ON(readl(>t_entries[i-1]) !=
521 vm->pte_encode(addr, level));
523 /* This next bit makes the above posting read even more important. We
524 * want to flush the TLBs only after we're certain all the PTE updates
527 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
528 POSTING_READ(GFX_FLSH_CNTL_GEN6);
531 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
532 unsigned int first_entry,
533 unsigned int num_entries)
535 struct drm_i915_private *dev_priv = vm->dev->dev_private;
536 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
537 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
538 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
541 if (WARN(num_entries > max_entries,
542 "First entry = %d; Num entries = %d (max=%d)\n",
543 first_entry, num_entries, max_entries))
544 num_entries = max_entries;
546 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
547 for (i = 0; i < num_entries; i++)
548 iowrite32(scratch_pte, >t_base[i]);
553 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
555 unsigned int pg_start,
556 enum i915_cache_level cache_level)
558 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
559 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
561 intel_gtt_insert_sg_entries(st, pg_start, flags);
565 static void i915_ggtt_clear_range(struct i915_address_space *vm,
566 unsigned int first_entry,
567 unsigned int num_entries)
569 intel_gtt_clear_range(first_entry, num_entries);
573 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
574 enum i915_cache_level cache_level)
576 struct drm_device *dev = obj->base.dev;
577 struct drm_i915_private *dev_priv = dev->dev_private;
578 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
580 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
584 obj->has_global_gtt_mapping = 1;
587 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
589 struct drm_device *dev = obj->base.dev;
590 struct drm_i915_private *dev_priv = dev->dev_private;
591 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
593 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
595 obj->base.size >> PAGE_SHIFT);
597 obj->has_global_gtt_mapping = 0;
600 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
602 struct drm_device *dev = obj->base.dev;
603 struct drm_i915_private *dev_priv = dev->dev_private;
606 interruptible = do_idling(dev_priv);
608 if (!obj->has_dma_mapping)
609 dma_unmap_sg(&dev->pdev->dev,
610 obj->pages->sgl, obj->pages->nents,
611 PCI_DMA_BIDIRECTIONAL);
613 undo_idling(dev_priv, interruptible);
616 static void i915_gtt_color_adjust(struct drm_mm_node *node,
618 unsigned long *start,
621 if (node->color != color)
624 if (!list_empty(&node->node_list)) {
625 node = list_entry(node->node_list.next,
628 if (node->allocated && node->color != color)
632 void i915_gem_setup_global_gtt(struct drm_device *dev,
634 unsigned long mappable_end,
637 /* Let GEM Manage all of the aperture.
639 * However, leave one page at the end still bound to the scratch page.
640 * There are a number of places where the hardware apparently prefetches
641 * past the end of the object, and we've seen multiple hangs with the
642 * GPU head pointer stuck in a batchbuffer bound at the last page of the
643 * aperture. One page should be enough to keep any prefetching inside
646 drm_i915_private_t *dev_priv = dev->dev_private;
647 struct drm_mm_node *entry;
648 struct drm_i915_gem_object *obj;
649 unsigned long hole_start, hole_end;
651 BUG_ON(mappable_end > end);
653 /* Subtract the guard page ... */
654 drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
656 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
658 /* Mark any preallocated objects as occupied */
659 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
660 struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
662 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
663 i915_gem_obj_ggtt_offset(obj), obj->base.size);
665 WARN_ON(i915_gem_obj_ggtt_bound(obj));
666 ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
668 DRM_DEBUG_KMS("Reservation failed\n");
669 obj->has_global_gtt_mapping = 1;
670 list_add(&vma->vma_link, &obj->vma_list);
673 dev_priv->gtt.base.start = start;
674 dev_priv->gtt.base.total = end - start;
676 /* Clear any non-preallocated blocks */
677 drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
678 hole_start, hole_end) {
679 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
680 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
681 hole_start, hole_end);
682 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
683 hole_start / PAGE_SIZE,
687 /* And finally clear the reserved guard page */
688 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
689 end / PAGE_SIZE - 1, 1);
693 intel_enable_ppgtt(struct drm_device *dev)
695 if (i915_enable_ppgtt >= 0)
696 return i915_enable_ppgtt;
698 #ifdef CONFIG_INTEL_IOMMU
699 /* Disable ppgtt on SNB if VT-d is on. */
700 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
707 void i915_gem_init_global_gtt(struct drm_device *dev)
709 struct drm_i915_private *dev_priv = dev->dev_private;
710 unsigned long gtt_size, mappable_size;
712 gtt_size = dev_priv->gtt.base.total;
713 mappable_size = dev_priv->gtt.mappable_end;
715 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
718 if (INTEL_INFO(dev)->gen <= 7) {
719 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
720 * aperture accordingly when using aliasing ppgtt. */
721 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
724 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
726 ret = i915_gem_init_aliasing_ppgtt(dev);
730 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
731 drm_mm_takedown(&dev_priv->gtt.base.mm);
732 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
734 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
737 static int setup_scratch_page(struct drm_device *dev)
739 struct drm_i915_private *dev_priv = dev->dev_private;
743 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
747 set_pages_uc(page, 1);
749 #ifdef CONFIG_INTEL_IOMMU
750 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
751 PCI_DMA_BIDIRECTIONAL);
752 if (pci_dma_mapping_error(dev->pdev, dma_addr))
755 dma_addr = page_to_phys(page);
757 dev_priv->gtt.base.scratch.page = page;
758 dev_priv->gtt.base.scratch.addr = dma_addr;
763 static void teardown_scratch_page(struct drm_device *dev)
765 struct drm_i915_private *dev_priv = dev->dev_private;
766 struct page *page = dev_priv->gtt.base.scratch.page;
768 set_pages_wb(page, 1);
769 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
770 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
775 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
777 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
778 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
779 return snb_gmch_ctl << 20;
782 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
784 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
785 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
786 return snb_gmch_ctl << 25; /* 32 MB units */
789 static int gen6_gmch_probe(struct drm_device *dev,
792 phys_addr_t *mappable_base,
793 unsigned long *mappable_end)
795 struct drm_i915_private *dev_priv = dev->dev_private;
796 phys_addr_t gtt_bus_addr;
797 unsigned int gtt_size;
801 *mappable_base = pci_resource_start(dev->pdev, 2);
802 *mappable_end = pci_resource_len(dev->pdev, 2);
804 /* 64/512MB is the current min/max we actually know of, but this is just
805 * a coarse sanity check.
807 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
808 DRM_ERROR("Unknown GMADR size (%lx)\n",
809 dev_priv->gtt.mappable_end);
813 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
814 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
815 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
816 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
818 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
819 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
821 /* For Modern GENs the PTEs and register space are split in the BAR */
822 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
823 (pci_resource_len(dev->pdev, 0) / 2);
825 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
826 if (!dev_priv->gtt.gsm) {
827 DRM_ERROR("Failed to map the gtt page table\n");
831 ret = setup_scratch_page(dev);
833 DRM_ERROR("Scratch setup failed\n");
835 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
836 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
841 static void gen6_gmch_remove(struct i915_address_space *vm)
844 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
846 teardown_scratch_page(vm->dev);
849 static int i915_gmch_probe(struct drm_device *dev,
852 phys_addr_t *mappable_base,
853 unsigned long *mappable_end)
855 struct drm_i915_private *dev_priv = dev->dev_private;
858 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
860 DRM_ERROR("failed to set up gmch\n");
864 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
866 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
867 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
868 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
873 static void i915_gmch_remove(struct i915_address_space *vm)
878 int i915_gem_gtt_init(struct drm_device *dev)
880 struct drm_i915_private *dev_priv = dev->dev_private;
881 struct i915_gtt *gtt = &dev_priv->gtt;
884 if (INTEL_INFO(dev)->gen <= 5) {
885 gtt->gtt_probe = i915_gmch_probe;
886 gtt->base.cleanup = i915_gmch_remove;
888 gtt->gtt_probe = gen6_gmch_probe;
889 gtt->base.cleanup = gen6_gmch_remove;
890 if (IS_HASWELL(dev) && dev_priv->ellc_size)
891 gtt->base.pte_encode = iris_pte_encode;
892 else if (IS_HASWELL(dev))
893 gtt->base.pte_encode = hsw_pte_encode;
894 else if (IS_VALLEYVIEW(dev))
895 gtt->base.pte_encode = byt_pte_encode;
897 gtt->base.pte_encode = gen6_pte_encode;
900 ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
901 >t->mappable_base, >t->mappable_end);
907 /* GMADR is the PCI mmio aperture into the global GTT. */
908 DRM_INFO("Memory usable by graphics device = %zdM\n",
909 gtt->base.total >> 20);
910 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
911 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);