2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_mm.h"
36 #include "nouveau_vm.h"
38 #include <linux/log2.h>
39 #include <linux/slab.h>
42 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45 struct drm_device *dev = dev_priv->dev;
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
60 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
61 int *align, int *size)
63 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
65 if (dev_priv->card_type < NV_50) {
66 if (nvbo->tile_mode) {
67 if (dev_priv->chipset >= 0x40) {
69 *size = roundup(*size, 64 * nvbo->tile_mode);
71 } else if (dev_priv->chipset >= 0x30) {
73 *size = roundup(*size, 64 * nvbo->tile_mode);
75 } else if (dev_priv->chipset >= 0x20) {
77 *size = roundup(*size, 64 * nvbo->tile_mode);
79 } else if (dev_priv->chipset >= 0x10) {
81 *size = roundup(*size, 32 * nvbo->tile_mode);
85 *size = roundup(*size, (1 << nvbo->page_shift));
86 *align = max((1 << nvbo->page_shift), *align);
89 *size = roundup(*size, PAGE_SIZE);
93 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
94 int size, int align, uint32_t flags, uint32_t tile_mode,
95 uint32_t tile_flags, struct nouveau_bo **pnvbo)
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_bo *nvbo;
101 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
104 INIT_LIST_HEAD(&nvbo->head);
105 INIT_LIST_HEAD(&nvbo->entry);
106 nvbo->tile_mode = tile_mode;
107 nvbo->tile_flags = tile_flags;
108 nvbo->bo.bdev = &dev_priv->ttm.bdev;
110 nvbo->page_shift = 12;
111 if (dev_priv->bar1_vm) {
112 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
113 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
116 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
117 align >>= PAGE_SHIFT;
119 if (dev_priv->chan_vm) {
120 ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift,
121 NV_MEM_ACCESS_RW, &nvbo->vma);
128 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
129 nouveau_bo_placement_set(nvbo, flags, 0);
131 nvbo->channel = chan;
132 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
133 ttm_bo_type_device, &nvbo->placement, align, 0,
134 false, NULL, size, nouveau_bo_del_ttm);
136 /* ttm will call nouveau_bo_del_ttm if it fails.. */
139 nvbo->channel = NULL;
142 nvbo->bo.offset = nvbo->vma.offset;
148 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
152 if (type & TTM_PL_FLAG_VRAM)
153 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
154 if (type & TTM_PL_FLAG_TT)
155 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
156 if (type & TTM_PL_FLAG_SYSTEM)
157 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
161 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
163 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
164 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
166 if (dev_priv->card_type == NV_10 &&
167 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
168 nvbo->bo.mem.num_pages < vram_pages / 2) {
170 * Make sure that the color and depth buffers are handled
171 * by independent memory controller units. Up to a 9x
172 * speed up when alpha-blending and depth-test are enabled
175 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
176 nvbo->placement.fpfn = vram_pages / 2;
177 nvbo->placement.lpfn = ~0;
179 nvbo->placement.fpfn = 0;
180 nvbo->placement.lpfn = vram_pages / 2;
186 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
188 struct ttm_placement *pl = &nvbo->placement;
189 uint32_t flags = TTM_PL_MASK_CACHING |
190 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
192 pl->placement = nvbo->placements;
193 set_placement_list(nvbo->placements, &pl->num_placement,
196 pl->busy_placement = nvbo->busy_placements;
197 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
200 set_placement_range(nvbo, type);
204 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
206 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
207 struct ttm_buffer_object *bo = &nvbo->bo;
210 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
211 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
212 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
213 1 << bo->mem.mem_type, memtype);
217 if (nvbo->pin_refcnt++)
220 ret = ttm_bo_reserve(bo, false, false, false, 0);
224 nouveau_bo_placement_set(nvbo, memtype, 0);
226 ret = nouveau_bo_validate(nvbo, false, false, false);
228 switch (bo->mem.mem_type) {
230 dev_priv->fb_aper_free -= bo->mem.size;
233 dev_priv->gart_info.aper_free -= bo->mem.size;
239 ttm_bo_unreserve(bo);
247 nouveau_bo_unpin(struct nouveau_bo *nvbo)
249 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
250 struct ttm_buffer_object *bo = &nvbo->bo;
253 if (--nvbo->pin_refcnt)
256 ret = ttm_bo_reserve(bo, false, false, false, 0);
260 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
262 ret = nouveau_bo_validate(nvbo, false, false, false);
264 switch (bo->mem.mem_type) {
266 dev_priv->fb_aper_free += bo->mem.size;
269 dev_priv->gart_info.aper_free += bo->mem.size;
276 ttm_bo_unreserve(bo);
281 nouveau_bo_map(struct nouveau_bo *nvbo)
285 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
289 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
290 ttm_bo_unreserve(&nvbo->bo);
295 nouveau_bo_unmap(struct nouveau_bo *nvbo)
298 ttm_bo_kunmap(&nvbo->kmap);
302 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
303 bool no_wait_reserve, bool no_wait_gpu)
307 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
308 no_wait_reserve, no_wait_gpu);
316 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
319 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
322 return ioread16_native((void __force __iomem *)mem);
328 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
331 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
334 iowrite16_native(val, (void __force __iomem *)mem);
340 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
343 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
346 return ioread32_native((void __force __iomem *)mem);
352 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
355 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
358 iowrite32_native(val, (void __force __iomem *)mem);
363 static struct ttm_backend *
364 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
366 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
367 struct drm_device *dev = dev_priv->dev;
369 switch (dev_priv->gart_info.type) {
371 case NOUVEAU_GART_AGP:
372 return ttm_agp_backend_init(bdev, dev->agp->bridge);
374 case NOUVEAU_GART_PDMA:
375 case NOUVEAU_GART_HW:
376 return nouveau_sgdma_init_ttm(dev);
378 NV_ERROR(dev, "Unknown GART type %d\n",
379 dev_priv->gart_info.type);
387 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
389 /* We'll do this from user space. */
394 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
395 struct ttm_mem_type_manager *man)
397 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
398 struct drm_device *dev = dev_priv->dev;
402 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
403 man->available_caching = TTM_PL_MASK_CACHING;
404 man->default_caching = TTM_PL_FLAG_CACHED;
407 if (dev_priv->card_type >= NV_50) {
408 man->func = &nouveau_vram_manager;
409 man->io_reserve_fastpath = false;
410 man->use_io_reserve_lru = true;
412 man->func = &ttm_bo_manager_func;
414 man->flags = TTM_MEMTYPE_FLAG_FIXED |
415 TTM_MEMTYPE_FLAG_MAPPABLE;
416 man->available_caching = TTM_PL_FLAG_UNCACHED |
418 man->default_caching = TTM_PL_FLAG_WC;
421 if (dev_priv->card_type >= NV_50)
422 man->func = &nouveau_gart_manager;
424 man->func = &ttm_bo_manager_func;
425 switch (dev_priv->gart_info.type) {
426 case NOUVEAU_GART_AGP:
427 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
428 man->available_caching = TTM_PL_FLAG_UNCACHED |
430 man->default_caching = TTM_PL_FLAG_WC;
432 case NOUVEAU_GART_PDMA:
433 case NOUVEAU_GART_HW:
434 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
435 TTM_MEMTYPE_FLAG_CMA;
436 man->available_caching = TTM_PL_MASK_CACHING;
437 man->default_caching = TTM_PL_FLAG_CACHED;
438 man->gpu_offset = dev_priv->gart_info.aper_base;
441 NV_ERROR(dev, "Unknown GART type: %d\n",
442 dev_priv->gart_info.type);
447 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
454 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
456 struct nouveau_bo *nvbo = nouveau_bo(bo);
458 switch (bo->mem.mem_type) {
460 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
464 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
468 *pl = nvbo->placement;
472 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
473 * TTM_PL_{VRAM,TT} directly.
477 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
478 struct nouveau_bo *nvbo, bool evict,
479 bool no_wait_reserve, bool no_wait_gpu,
480 struct ttm_mem_reg *new_mem)
482 struct nouveau_fence *fence = NULL;
485 ret = nouveau_fence_new(chan, &fence, true);
489 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
490 no_wait_reserve, no_wait_gpu, new_mem);
491 nouveau_fence_unref(&fence);
496 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
497 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
499 struct nouveau_mem *node = old_mem->mm_node;
500 u64 src_offset = node->vma[0].offset;
501 u64 dst_offset = node->vma[1].offset;
502 u32 page_count = new_mem->num_pages;
505 page_count = new_mem->num_pages;
507 int line_count = (page_count > 2047) ? 2047 : page_count;
509 ret = RING_SPACE(chan, 12);
513 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
514 OUT_RING (chan, upper_32_bits(dst_offset));
515 OUT_RING (chan, lower_32_bits(dst_offset));
516 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
517 OUT_RING (chan, upper_32_bits(src_offset));
518 OUT_RING (chan, lower_32_bits(src_offset));
519 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
520 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
521 OUT_RING (chan, PAGE_SIZE); /* line_length */
522 OUT_RING (chan, line_count);
523 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
524 OUT_RING (chan, 0x00100110);
526 page_count -= line_count;
527 src_offset += (PAGE_SIZE * line_count);
528 dst_offset += (PAGE_SIZE * line_count);
535 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
536 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
538 struct nouveau_mem *node = old_mem->mm_node;
539 struct nouveau_bo *nvbo = nouveau_bo(bo);
540 u64 length = (new_mem->num_pages << PAGE_SHIFT);
541 u64 src_offset = node->vma[0].offset;
542 u64 dst_offset = node->vma[1].offset;
546 u32 amount, stride, height;
548 amount = min(length, (u64)(4 * 1024 * 1024));
550 height = amount / stride;
552 if (new_mem->mem_type == TTM_PL_VRAM &&
553 nouveau_bo_tile_layout(nvbo)) {
554 ret = RING_SPACE(chan, 8);
558 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
561 OUT_RING (chan, stride);
562 OUT_RING (chan, height);
567 ret = RING_SPACE(chan, 2);
571 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
574 if (old_mem->mem_type == TTM_PL_VRAM &&
575 nouveau_bo_tile_layout(nvbo)) {
576 ret = RING_SPACE(chan, 8);
580 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
583 OUT_RING (chan, stride);
584 OUT_RING (chan, height);
589 ret = RING_SPACE(chan, 2);
593 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
597 ret = RING_SPACE(chan, 14);
601 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
602 OUT_RING (chan, upper_32_bits(src_offset));
603 OUT_RING (chan, upper_32_bits(dst_offset));
604 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
605 OUT_RING (chan, lower_32_bits(src_offset));
606 OUT_RING (chan, lower_32_bits(dst_offset));
607 OUT_RING (chan, stride);
608 OUT_RING (chan, stride);
609 OUT_RING (chan, stride);
610 OUT_RING (chan, height);
611 OUT_RING (chan, 0x00000101);
612 OUT_RING (chan, 0x00000000);
613 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
617 src_offset += amount;
618 dst_offset += amount;
624 static inline uint32_t
625 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
626 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
628 if (mem->mem_type == TTM_PL_TT)
629 return chan->gart_handle;
630 return chan->vram_handle;
634 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
635 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
637 u32 src_offset = old_mem->start << PAGE_SHIFT;
638 u32 dst_offset = new_mem->start << PAGE_SHIFT;
639 u32 page_count = new_mem->num_pages;
642 ret = RING_SPACE(chan, 3);
646 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
647 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
648 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
650 page_count = new_mem->num_pages;
652 int line_count = (page_count > 2047) ? 2047 : page_count;
654 ret = RING_SPACE(chan, 11);
658 BEGIN_RING(chan, NvSubM2MF,
659 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
660 OUT_RING (chan, src_offset);
661 OUT_RING (chan, dst_offset);
662 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
663 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
664 OUT_RING (chan, PAGE_SIZE); /* line_length */
665 OUT_RING (chan, line_count);
666 OUT_RING (chan, 0x00000101);
667 OUT_RING (chan, 0x00000000);
668 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
671 page_count -= line_count;
672 src_offset += (PAGE_SIZE * line_count);
673 dst_offset += (PAGE_SIZE * line_count);
680 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
681 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
683 struct nouveau_mem *node = mem->mm_node;
686 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
687 node->page_shift, NV_MEM_ACCESS_RO, vma);
691 if (mem->mem_type == TTM_PL_VRAM)
692 nouveau_vm_map(vma, node);
694 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
701 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
702 bool no_wait_reserve, bool no_wait_gpu,
703 struct ttm_mem_reg *new_mem)
705 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
706 struct nouveau_bo *nvbo = nouveau_bo(bo);
707 struct ttm_mem_reg *old_mem = &bo->mem;
708 struct nouveau_channel *chan;
711 chan = nvbo->channel;
713 chan = dev_priv->channel;
714 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
717 /* create temporary vmas for the transfer and attach them to the
718 * old nouveau_mem node, these will get cleaned up after ttm has
719 * destroyed the ttm_mem_reg
721 if (dev_priv->card_type >= NV_50) {
722 struct nouveau_mem *node = old_mem->mm_node;
724 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
728 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
733 if (dev_priv->card_type < NV_50)
734 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
736 if (dev_priv->card_type < NV_C0)
737 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
739 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
741 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
743 no_wait_gpu, new_mem);
747 if (chan == dev_priv->channel)
748 mutex_unlock(&chan->mutex);
753 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
754 bool no_wait_reserve, bool no_wait_gpu,
755 struct ttm_mem_reg *new_mem)
757 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
758 struct ttm_placement placement;
759 struct ttm_mem_reg tmp_mem;
762 placement.fpfn = placement.lpfn = 0;
763 placement.num_placement = placement.num_busy_placement = 1;
764 placement.placement = placement.busy_placement = &placement_memtype;
767 tmp_mem.mm_node = NULL;
768 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
772 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
776 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
780 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
782 ttm_bo_mem_put(bo, &tmp_mem);
787 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
788 bool no_wait_reserve, bool no_wait_gpu,
789 struct ttm_mem_reg *new_mem)
791 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
792 struct ttm_placement placement;
793 struct ttm_mem_reg tmp_mem;
796 placement.fpfn = placement.lpfn = 0;
797 placement.num_placement = placement.num_busy_placement = 1;
798 placement.placement = placement.busy_placement = &placement_memtype;
801 tmp_mem.mm_node = NULL;
802 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
806 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
810 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
815 ttm_bo_mem_put(bo, &tmp_mem);
820 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
822 struct nouveau_mem *node = new_mem->mm_node;
823 struct nouveau_bo *nvbo = nouveau_bo(bo);
824 struct nouveau_vma *vma = &nvbo->vma;
829 if (new_mem->mem_type == TTM_PL_VRAM) {
830 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
832 if (new_mem->mem_type == TTM_PL_TT &&
833 nvbo->page_shift == nvbo->vma.vm->spg_shift) {
834 nouveau_vm_map_sg(&nvbo->vma, 0, new_mem->
835 num_pages << PAGE_SHIFT, node, node->pages);
837 nouveau_vm_unmap(&nvbo->vma);
842 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
843 struct nouveau_tile_reg **new_tile)
845 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
846 struct drm_device *dev = dev_priv->dev;
847 struct nouveau_bo *nvbo = nouveau_bo(bo);
848 u64 offset = new_mem->start << PAGE_SHIFT;
851 if (new_mem->mem_type != TTM_PL_VRAM)
854 if (dev_priv->card_type >= NV_10) {
855 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
864 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
865 struct nouveau_tile_reg *new_tile,
866 struct nouveau_tile_reg **old_tile)
868 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
869 struct drm_device *dev = dev_priv->dev;
871 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
872 *old_tile = new_tile;
876 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
877 bool no_wait_reserve, bool no_wait_gpu,
878 struct ttm_mem_reg *new_mem)
880 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
881 struct nouveau_bo *nvbo = nouveau_bo(bo);
882 struct ttm_mem_reg *old_mem = &bo->mem;
883 struct nouveau_tile_reg *new_tile = NULL;
886 if (dev_priv->card_type < NV_50) {
887 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
893 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
894 BUG_ON(bo->mem.mm_node != NULL);
896 new_mem->mm_node = NULL;
900 /* Software copy if the card isn't up and running yet. */
901 if (!dev_priv->channel) {
902 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
906 /* Hardware assisted copy. */
907 if (new_mem->mem_type == TTM_PL_SYSTEM)
908 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
909 else if (old_mem->mem_type == TTM_PL_SYSTEM)
910 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
912 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
917 /* Fallback to software copy. */
918 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
921 if (dev_priv->card_type < NV_50) {
923 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
925 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
932 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
938 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
940 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
941 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
942 struct drm_device *dev = dev_priv->dev;
945 mem->bus.addr = NULL;
947 mem->bus.size = mem->num_pages << PAGE_SHIFT;
949 mem->bus.is_iomem = false;
950 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
952 switch (mem->mem_type) {
958 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
959 mem->bus.offset = mem->start << PAGE_SHIFT;
960 mem->bus.base = dev_priv->gart_info.aper_base;
961 mem->bus.is_iomem = true;
967 struct nouveau_mem *node = mem->mm_node;
970 if (!dev_priv->bar1_vm) {
971 mem->bus.offset = mem->start << PAGE_SHIFT;
972 mem->bus.base = pci_resource_start(dev->pdev, 1);
973 mem->bus.is_iomem = true;
977 if (dev_priv->card_type == NV_C0)
978 page_shift = node->page_shift;
982 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
983 page_shift, NV_MEM_ACCESS_RW,
988 nouveau_vm_map(&node->bar_vma, node);
990 nouveau_vm_put(&node->bar_vma);
994 mem->bus.offset = node->bar_vma.offset;
995 if (dev_priv->card_type == NV_50) /*XXX*/
996 mem->bus.offset -= 0x0020000000ULL;
997 mem->bus.base = pci_resource_start(dev->pdev, 1);
998 mem->bus.is_iomem = true;
1008 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1010 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1011 struct nouveau_mem *node = mem->mm_node;
1013 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1016 if (!node->bar_vma.node)
1019 nouveau_vm_unmap(&node->bar_vma);
1020 nouveau_vm_put(&node->bar_vma);
1024 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1026 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1027 struct nouveau_bo *nvbo = nouveau_bo(bo);
1029 /* as long as the bo isn't in vram, and isn't tiled, we've got
1030 * nothing to do here.
1032 if (bo->mem.mem_type != TTM_PL_VRAM) {
1033 if (dev_priv->card_type < NV_50 ||
1034 !nouveau_bo_tile_layout(nvbo))
1038 /* make sure bo is in mappable vram */
1039 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1043 nvbo->placement.fpfn = 0;
1044 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1045 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1046 return nouveau_bo_validate(nvbo, false, true, false);
1050 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1052 struct nouveau_fence *old_fence;
1055 nouveau_fence_ref(fence);
1057 spin_lock(&nvbo->bo.bdev->fence_lock);
1058 old_fence = nvbo->bo.sync_obj;
1059 nvbo->bo.sync_obj = fence;
1060 spin_unlock(&nvbo->bo.bdev->fence_lock);
1062 nouveau_fence_unref(&old_fence);
1065 struct ttm_bo_driver nouveau_bo_driver = {
1066 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1067 .invalidate_caches = nouveau_bo_invalidate_caches,
1068 .init_mem_type = nouveau_bo_init_mem_type,
1069 .evict_flags = nouveau_bo_evict_flags,
1070 .move_notify = nouveau_bo_move_ntfy,
1071 .move = nouveau_bo_move,
1072 .verify_access = nouveau_bo_verify_access,
1073 .sync_obj_signaled = __nouveau_fence_signalled,
1074 .sync_obj_wait = __nouveau_fence_wait,
1075 .sync_obj_flush = __nouveau_fence_flush,
1076 .sync_obj_unref = __nouveau_fence_unref,
1077 .sync_obj_ref = __nouveau_fence_ref,
1078 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1079 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1080 .io_mem_free = &nouveau_ttm_io_mem_free,