2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
31 #include "ttm/ttm_page_alloc.h"
33 #include <nouveau_drm.h>
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
37 #include "nouveau_fence.h"
38 #include <core/ramht.h>
40 #include <linux/log2.h>
41 #include <linux/slab.h>
44 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
47 struct drm_device *dev = dev_priv->dev;
48 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 if (unlikely(nvbo->gem))
51 DRM_ERROR("bo %p still attached to GEM object\n", bo);
53 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
58 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
59 int *align, int *size)
61 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
63 if (dev_priv->card_type < NV_50) {
64 if (nvbo->tile_mode) {
65 if (dev_priv->chipset >= 0x40) {
67 *size = roundup(*size, 64 * nvbo->tile_mode);
69 } else if (dev_priv->chipset >= 0x30) {
71 *size = roundup(*size, 64 * nvbo->tile_mode);
73 } else if (dev_priv->chipset >= 0x20) {
75 *size = roundup(*size, 64 * nvbo->tile_mode);
77 } else if (dev_priv->chipset >= 0x10) {
79 *size = roundup(*size, 32 * nvbo->tile_mode);
83 *size = roundup(*size, (1 << nvbo->page_shift));
84 *align = max((1 << nvbo->page_shift), *align);
87 *size = roundup(*size, PAGE_SIZE);
91 nouveau_bo_new(struct drm_device *dev, int size, int align,
92 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
94 struct nouveau_bo **pnvbo)
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_bo *nvbo;
100 int type = ttm_bo_type_device;
103 type = ttm_bo_type_sg;
105 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
108 INIT_LIST_HEAD(&nvbo->head);
109 INIT_LIST_HEAD(&nvbo->entry);
110 INIT_LIST_HEAD(&nvbo->vma_list);
111 nvbo->tile_mode = tile_mode;
112 nvbo->tile_flags = tile_flags;
113 nvbo->bo.bdev = &dev_priv->ttm.bdev;
115 nvbo->page_shift = 12;
116 if (dev_priv->chan_vm) {
117 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
118 nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
121 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
122 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
123 nouveau_bo_placement_set(nvbo, flags, 0);
125 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
126 sizeof(struct nouveau_bo));
128 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
129 type, &nvbo->placement,
130 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
133 /* ttm will call nouveau_bo_del_ttm if it fails.. */
142 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
146 if (type & TTM_PL_FLAG_VRAM)
147 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
148 if (type & TTM_PL_FLAG_TT)
149 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
150 if (type & TTM_PL_FLAG_SYSTEM)
151 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
155 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
157 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
158 int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT;
160 if (dev_priv->card_type == NV_10 &&
161 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
162 nvbo->bo.mem.num_pages < vram_pages / 4) {
164 * Make sure that the color and depth buffers are handled
165 * by independent memory controller units. Up to a 9x
166 * speed up when alpha-blending and depth-test are enabled
169 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
170 nvbo->placement.fpfn = vram_pages / 2;
171 nvbo->placement.lpfn = ~0;
173 nvbo->placement.fpfn = 0;
174 nvbo->placement.lpfn = vram_pages / 2;
180 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
182 struct ttm_placement *pl = &nvbo->placement;
183 uint32_t flags = TTM_PL_MASK_CACHING |
184 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
186 pl->placement = nvbo->placements;
187 set_placement_list(nvbo->placements, &pl->num_placement,
190 pl->busy_placement = nvbo->busy_placements;
191 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
194 set_placement_range(nvbo, type);
198 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
201 struct ttm_buffer_object *bo = &nvbo->bo;
204 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
205 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
206 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207 1 << bo->mem.mem_type, memtype);
211 if (nvbo->pin_refcnt++)
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
218 nouveau_bo_placement_set(nvbo, memtype, 0);
220 ret = nouveau_bo_validate(nvbo, false, false, false);
222 switch (bo->mem.mem_type) {
224 dev_priv->fb_aper_free -= bo->mem.size;
227 dev_priv->gart_info.aper_free -= bo->mem.size;
233 ttm_bo_unreserve(bo);
241 nouveau_bo_unpin(struct nouveau_bo *nvbo)
243 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
244 struct ttm_buffer_object *bo = &nvbo->bo;
247 if (--nvbo->pin_refcnt)
250 ret = ttm_bo_reserve(bo, false, false, false, 0);
254 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
256 ret = nouveau_bo_validate(nvbo, false, false, false);
258 switch (bo->mem.mem_type) {
260 dev_priv->fb_aper_free += bo->mem.size;
263 dev_priv->gart_info.aper_free += bo->mem.size;
270 ttm_bo_unreserve(bo);
275 nouveau_bo_map(struct nouveau_bo *nvbo)
279 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
283 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
284 ttm_bo_unreserve(&nvbo->bo);
289 nouveau_bo_unmap(struct nouveau_bo *nvbo)
292 ttm_bo_kunmap(&nvbo->kmap);
296 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
297 bool no_wait_reserve, bool no_wait_gpu)
301 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
302 no_wait_reserve, no_wait_gpu);
310 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
313 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
316 return ioread16_native((void __force __iomem *)mem);
322 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
325 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
328 iowrite16_native(val, (void __force __iomem *)mem);
334 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
337 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
340 return ioread32_native((void __force __iomem *)mem);
346 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
349 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
352 iowrite32_native(val, (void __force __iomem *)mem);
357 static struct ttm_tt *
358 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
359 unsigned long size, uint32_t page_flags,
360 struct page *dummy_read_page)
362 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
363 struct drm_device *dev = dev_priv->dev;
365 switch (dev_priv->gart_info.type) {
367 case NOUVEAU_GART_AGP:
368 return ttm_agp_tt_create(bdev, dev->agp->bridge,
369 size, page_flags, dummy_read_page);
371 case NOUVEAU_GART_PDMA:
372 case NOUVEAU_GART_HW:
373 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
376 NV_ERROR(dev, "Unknown GART type %d\n",
377 dev_priv->gart_info.type);
385 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
387 /* We'll do this from user space. */
392 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
393 struct ttm_mem_type_manager *man)
395 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
396 struct drm_device *dev = dev_priv->dev;
400 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
401 man->available_caching = TTM_PL_MASK_CACHING;
402 man->default_caching = TTM_PL_FLAG_CACHED;
405 if (dev_priv->card_type >= NV_50) {
406 man->func = &nouveau_vram_manager;
407 man->io_reserve_fastpath = false;
408 man->use_io_reserve_lru = true;
410 man->func = &ttm_bo_manager_func;
412 man->flags = TTM_MEMTYPE_FLAG_FIXED |
413 TTM_MEMTYPE_FLAG_MAPPABLE;
414 man->available_caching = TTM_PL_FLAG_UNCACHED |
416 man->default_caching = TTM_PL_FLAG_WC;
419 if (dev_priv->card_type >= NV_50)
420 man->func = &nouveau_gart_manager;
422 if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
423 man->func = &nv04_gart_manager;
425 man->func = &ttm_bo_manager_func;
426 switch (dev_priv->gart_info.type) {
427 case NOUVEAU_GART_AGP:
428 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
429 man->available_caching = TTM_PL_FLAG_UNCACHED |
431 man->default_caching = TTM_PL_FLAG_WC;
433 case NOUVEAU_GART_PDMA:
434 case NOUVEAU_GART_HW:
435 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
436 TTM_MEMTYPE_FLAG_CMA;
437 man->available_caching = TTM_PL_MASK_CACHING;
438 man->default_caching = TTM_PL_FLAG_CACHED;
441 NV_ERROR(dev, "Unknown GART type: %d\n",
442 dev_priv->gart_info.type);
447 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
454 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
456 struct nouveau_bo *nvbo = nouveau_bo(bo);
458 switch (bo->mem.mem_type) {
460 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
464 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
468 *pl = nvbo->placement;
472 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
473 * TTM_PL_{VRAM,TT} directly.
477 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
478 struct nouveau_bo *nvbo, bool evict,
479 bool no_wait_reserve, bool no_wait_gpu,
480 struct ttm_mem_reg *new_mem)
482 struct nouveau_fence *fence = NULL;
485 ret = nouveau_fence_new(chan, &fence);
489 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
490 no_wait_reserve, no_wait_gpu, new_mem);
491 nouveau_fence_unref(&fence);
496 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
497 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
499 struct nouveau_mem *node = old_mem->mm_node;
500 int ret = RING_SPACE(chan, 10);
502 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
503 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
504 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
505 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
506 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
507 OUT_RING (chan, PAGE_SIZE);
508 OUT_RING (chan, PAGE_SIZE);
509 OUT_RING (chan, PAGE_SIZE);
510 OUT_RING (chan, new_mem->num_pages);
511 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
517 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
519 int ret = RING_SPACE(chan, 2);
521 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
522 OUT_RING (chan, handle);
528 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
529 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
531 struct nouveau_mem *node = old_mem->mm_node;
532 u64 src_offset = node->vma[0].offset;
533 u64 dst_offset = node->vma[1].offset;
534 u32 page_count = new_mem->num_pages;
537 page_count = new_mem->num_pages;
539 int line_count = (page_count > 8191) ? 8191 : page_count;
541 ret = RING_SPACE(chan, 11);
545 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
546 OUT_RING (chan, upper_32_bits(src_offset));
547 OUT_RING (chan, lower_32_bits(src_offset));
548 OUT_RING (chan, upper_32_bits(dst_offset));
549 OUT_RING (chan, lower_32_bits(dst_offset));
550 OUT_RING (chan, PAGE_SIZE);
551 OUT_RING (chan, PAGE_SIZE);
552 OUT_RING (chan, PAGE_SIZE);
553 OUT_RING (chan, line_count);
554 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
555 OUT_RING (chan, 0x00000110);
557 page_count -= line_count;
558 src_offset += (PAGE_SIZE * line_count);
559 dst_offset += (PAGE_SIZE * line_count);
566 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
567 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
569 struct nouveau_mem *node = old_mem->mm_node;
570 u64 src_offset = node->vma[0].offset;
571 u64 dst_offset = node->vma[1].offset;
572 u32 page_count = new_mem->num_pages;
575 page_count = new_mem->num_pages;
577 int line_count = (page_count > 2047) ? 2047 : page_count;
579 ret = RING_SPACE(chan, 12);
583 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
584 OUT_RING (chan, upper_32_bits(dst_offset));
585 OUT_RING (chan, lower_32_bits(dst_offset));
586 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
587 OUT_RING (chan, upper_32_bits(src_offset));
588 OUT_RING (chan, lower_32_bits(src_offset));
589 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
590 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
591 OUT_RING (chan, PAGE_SIZE); /* line_length */
592 OUT_RING (chan, line_count);
593 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
594 OUT_RING (chan, 0x00100110);
596 page_count -= line_count;
597 src_offset += (PAGE_SIZE * line_count);
598 dst_offset += (PAGE_SIZE * line_count);
605 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
606 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
608 struct nouveau_mem *node = old_mem->mm_node;
609 u64 src_offset = node->vma[0].offset;
610 u64 dst_offset = node->vma[1].offset;
611 u32 page_count = new_mem->num_pages;
614 page_count = new_mem->num_pages;
616 int line_count = (page_count > 8191) ? 8191 : page_count;
618 ret = RING_SPACE(chan, 11);
622 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
623 OUT_RING (chan, upper_32_bits(src_offset));
624 OUT_RING (chan, lower_32_bits(src_offset));
625 OUT_RING (chan, upper_32_bits(dst_offset));
626 OUT_RING (chan, lower_32_bits(dst_offset));
627 OUT_RING (chan, PAGE_SIZE);
628 OUT_RING (chan, PAGE_SIZE);
629 OUT_RING (chan, PAGE_SIZE);
630 OUT_RING (chan, line_count);
631 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
632 OUT_RING (chan, 0x00000110);
634 page_count -= line_count;
635 src_offset += (PAGE_SIZE * line_count);
636 dst_offset += (PAGE_SIZE * line_count);
643 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
644 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
646 struct nouveau_mem *node = old_mem->mm_node;
647 int ret = RING_SPACE(chan, 7);
649 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
650 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
651 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
652 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
653 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
654 OUT_RING (chan, 0x00000000 /* COPY */);
655 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
661 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
662 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
664 struct nouveau_mem *node = old_mem->mm_node;
665 int ret = RING_SPACE(chan, 7);
667 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
668 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
669 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
670 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
671 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
672 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
673 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
679 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
681 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
684 ret = RING_SPACE(chan, 6);
686 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
687 OUT_RING (chan, handle);
688 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
689 OUT_RING (chan, NvNotify0);
690 OUT_RING (chan, NvDmaFB);
691 OUT_RING (chan, NvDmaFB);
693 nouveau_ramht_remove(chan, NvNotify0);
701 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
702 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
704 struct nouveau_mem *node = old_mem->mm_node;
705 struct nouveau_bo *nvbo = nouveau_bo(bo);
706 u64 length = (new_mem->num_pages << PAGE_SHIFT);
707 u64 src_offset = node->vma[0].offset;
708 u64 dst_offset = node->vma[1].offset;
712 u32 amount, stride, height;
714 amount = min(length, (u64)(4 * 1024 * 1024));
716 height = amount / stride;
718 if (new_mem->mem_type == TTM_PL_VRAM &&
719 nouveau_bo_tile_layout(nvbo)) {
720 ret = RING_SPACE(chan, 8);
724 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
727 OUT_RING (chan, stride);
728 OUT_RING (chan, height);
733 ret = RING_SPACE(chan, 2);
737 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
740 if (old_mem->mem_type == TTM_PL_VRAM &&
741 nouveau_bo_tile_layout(nvbo)) {
742 ret = RING_SPACE(chan, 8);
746 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
749 OUT_RING (chan, stride);
750 OUT_RING (chan, height);
755 ret = RING_SPACE(chan, 2);
759 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
763 ret = RING_SPACE(chan, 14);
767 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
768 OUT_RING (chan, upper_32_bits(src_offset));
769 OUT_RING (chan, upper_32_bits(dst_offset));
770 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
771 OUT_RING (chan, lower_32_bits(src_offset));
772 OUT_RING (chan, lower_32_bits(dst_offset));
773 OUT_RING (chan, stride);
774 OUT_RING (chan, stride);
775 OUT_RING (chan, stride);
776 OUT_RING (chan, height);
777 OUT_RING (chan, 0x00000101);
778 OUT_RING (chan, 0x00000000);
779 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
783 src_offset += amount;
784 dst_offset += amount;
791 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
793 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
796 ret = RING_SPACE(chan, 4);
798 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
799 OUT_RING (chan, handle);
800 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
801 OUT_RING (chan, NvNotify0);
808 static inline uint32_t
809 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
810 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
812 if (mem->mem_type == TTM_PL_TT)
813 return chan->gart_handle;
814 return chan->vram_handle;
818 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
819 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
821 u32 src_offset = old_mem->start << PAGE_SHIFT;
822 u32 dst_offset = new_mem->start << PAGE_SHIFT;
823 u32 page_count = new_mem->num_pages;
826 ret = RING_SPACE(chan, 3);
830 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
831 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
832 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
834 page_count = new_mem->num_pages;
836 int line_count = (page_count > 2047) ? 2047 : page_count;
838 ret = RING_SPACE(chan, 11);
842 BEGIN_NV04(chan, NvSubCopy,
843 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
844 OUT_RING (chan, src_offset);
845 OUT_RING (chan, dst_offset);
846 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
847 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
848 OUT_RING (chan, PAGE_SIZE); /* line_length */
849 OUT_RING (chan, line_count);
850 OUT_RING (chan, 0x00000101);
851 OUT_RING (chan, 0x00000000);
852 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
855 page_count -= line_count;
856 src_offset += (PAGE_SIZE * line_count);
857 dst_offset += (PAGE_SIZE * line_count);
864 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
865 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
867 struct nouveau_mem *node = mem->mm_node;
870 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
871 node->page_shift, NV_MEM_ACCESS_RO, vma);
875 if (mem->mem_type == TTM_PL_VRAM)
876 nouveau_vm_map(vma, node);
878 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
884 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
885 bool no_wait_reserve, bool no_wait_gpu,
886 struct ttm_mem_reg *new_mem)
888 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
889 struct nouveau_channel *chan = chan = dev_priv->channel;
890 struct nouveau_bo *nvbo = nouveau_bo(bo);
891 struct ttm_mem_reg *old_mem = &bo->mem;
894 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
896 /* create temporary vmas for the transfer and attach them to the
897 * old nouveau_mem node, these will get cleaned up after ttm has
898 * destroyed the ttm_mem_reg
900 if (dev_priv->card_type >= NV_50) {
901 struct nouveau_mem *node = old_mem->mm_node;
903 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
907 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
912 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
914 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
916 no_wait_gpu, new_mem);
920 mutex_unlock(&chan->mutex);
925 nouveau_bo_move_init(struct nouveau_channel *chan)
927 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
928 static const struct {
932 int (*exec)(struct nouveau_channel *,
933 struct ttm_buffer_object *,
934 struct ttm_mem_reg *, struct ttm_mem_reg *);
935 int (*init)(struct nouveau_channel *, u32 handle);
937 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
938 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
939 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
940 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
941 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
942 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
943 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
944 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
946 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
948 const char *name = "CPU";
952 u32 handle = (mthd->engine << 16) | mthd->oclass;
953 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
955 ret = mthd->init(chan, handle);
957 dev_priv->ttm.move = mthd->exec;
962 } while ((++mthd)->exec);
964 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
968 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
969 bool no_wait_reserve, bool no_wait_gpu,
970 struct ttm_mem_reg *new_mem)
972 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
973 struct ttm_placement placement;
974 struct ttm_mem_reg tmp_mem;
977 placement.fpfn = placement.lpfn = 0;
978 placement.num_placement = placement.num_busy_placement = 1;
979 placement.placement = placement.busy_placement = &placement_memtype;
982 tmp_mem.mm_node = NULL;
983 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
987 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
991 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
995 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
997 ttm_bo_mem_put(bo, &tmp_mem);
1002 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1003 bool no_wait_reserve, bool no_wait_gpu,
1004 struct ttm_mem_reg *new_mem)
1006 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1007 struct ttm_placement placement;
1008 struct ttm_mem_reg tmp_mem;
1011 placement.fpfn = placement.lpfn = 0;
1012 placement.num_placement = placement.num_busy_placement = 1;
1013 placement.placement = placement.busy_placement = &placement_memtype;
1016 tmp_mem.mm_node = NULL;
1017 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
1021 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
1025 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
1030 ttm_bo_mem_put(bo, &tmp_mem);
1035 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1037 struct nouveau_bo *nvbo = nouveau_bo(bo);
1038 struct nouveau_vma *vma;
1040 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1041 if (bo->destroy != nouveau_bo_del_ttm)
1044 list_for_each_entry(vma, &nvbo->vma_list, head) {
1045 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1046 nouveau_vm_map(vma, new_mem->mm_node);
1048 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1049 nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
1050 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1051 nouveau_vm_map_sg_table(vma, 0, new_mem->
1052 num_pages << PAGE_SHIFT,
1055 nouveau_vm_map_sg(vma, 0, new_mem->
1056 num_pages << PAGE_SHIFT,
1059 nouveau_vm_unmap(vma);
1065 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1066 struct nouveau_tile_reg **new_tile)
1068 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1069 struct drm_device *dev = dev_priv->dev;
1070 struct nouveau_bo *nvbo = nouveau_bo(bo);
1071 u64 offset = new_mem->start << PAGE_SHIFT;
1074 if (new_mem->mem_type != TTM_PL_VRAM)
1077 if (dev_priv->card_type >= NV_10) {
1078 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
1087 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1088 struct nouveau_tile_reg *new_tile,
1089 struct nouveau_tile_reg **old_tile)
1091 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1092 struct drm_device *dev = dev_priv->dev;
1094 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
1095 *old_tile = new_tile;
1099 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1100 bool no_wait_reserve, bool no_wait_gpu,
1101 struct ttm_mem_reg *new_mem)
1103 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1104 struct nouveau_bo *nvbo = nouveau_bo(bo);
1105 struct ttm_mem_reg *old_mem = &bo->mem;
1106 struct nouveau_tile_reg *new_tile = NULL;
1109 if (dev_priv->card_type < NV_50) {
1110 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1116 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1117 BUG_ON(bo->mem.mm_node != NULL);
1119 new_mem->mm_node = NULL;
1123 /* CPU copy if we have no accelerated method available */
1124 if (!dev_priv->ttm.move) {
1125 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1129 /* Hardware assisted copy. */
1130 if (new_mem->mem_type == TTM_PL_SYSTEM)
1131 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1132 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1133 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1135 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1140 /* Fallback to software copy. */
1141 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1144 if (dev_priv->card_type < NV_50) {
1146 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1148 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1155 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1161 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1163 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1164 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1165 struct drm_device *dev = dev_priv->dev;
1168 mem->bus.addr = NULL;
1169 mem->bus.offset = 0;
1170 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1172 mem->bus.is_iomem = false;
1173 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1175 switch (mem->mem_type) {
1181 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1182 mem->bus.offset = mem->start << PAGE_SHIFT;
1183 mem->bus.base = dev_priv->gart_info.aper_base;
1184 mem->bus.is_iomem = true;
1189 mem->bus.offset = mem->start << PAGE_SHIFT;
1190 mem->bus.base = pci_resource_start(dev->pdev, 1);
1191 mem->bus.is_iomem = true;
1192 if (dev_priv->card_type >= NV_50) {
1193 struct nouveau_mem *node = mem->mm_node;
1195 ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
1200 mem->bus.offset = node->bar_vma.offset;
1210 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1212 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1213 struct nouveau_mem *node = mem->mm_node;
1215 if (mem->mem_type != TTM_PL_VRAM)
1218 if (!node->bar_vma.node)
1221 nvbar_unmap(dev_priv->dev, &node->bar_vma);
1225 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1227 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1228 struct nouveau_bo *nvbo = nouveau_bo(bo);
1230 /* as long as the bo isn't in vram, and isn't tiled, we've got
1231 * nothing to do here.
1233 if (bo->mem.mem_type != TTM_PL_VRAM) {
1234 if (dev_priv->card_type < NV_50 ||
1235 !nouveau_bo_tile_layout(nvbo))
1239 /* make sure bo is in mappable vram */
1240 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1244 nvbo->placement.fpfn = 0;
1245 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1246 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1247 return nouveau_bo_validate(nvbo, false, true, false);
1251 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1253 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1254 struct drm_nouveau_private *dev_priv;
1255 struct drm_device *dev;
1258 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1260 if (ttm->state != tt_unpopulated)
1263 if (slave && ttm->sg) {
1264 /* make userspace faulting work */
1265 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1266 ttm_dma->dma_address, ttm->num_pages);
1267 ttm->state = tt_unbound;
1271 dev_priv = nouveau_bdev(ttm->bdev);
1272 dev = dev_priv->dev;
1275 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1276 return ttm_agp_tt_populate(ttm);
1280 #ifdef CONFIG_SWIOTLB
1281 if (swiotlb_nr_tbl()) {
1282 return ttm_dma_populate((void *)ttm, dev->dev);
1286 r = ttm_pool_populate(ttm);
1291 for (i = 0; i < ttm->num_pages; i++) {
1292 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1294 PCI_DMA_BIDIRECTIONAL);
1295 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1297 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1298 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1299 ttm_dma->dma_address[i] = 0;
1301 ttm_pool_unpopulate(ttm);
1309 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1311 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1312 struct drm_nouveau_private *dev_priv;
1313 struct drm_device *dev;
1315 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1320 dev_priv = nouveau_bdev(ttm->bdev);
1321 dev = dev_priv->dev;
1324 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1325 ttm_agp_tt_unpopulate(ttm);
1330 #ifdef CONFIG_SWIOTLB
1331 if (swiotlb_nr_tbl()) {
1332 ttm_dma_unpopulate((void *)ttm, dev->dev);
1337 for (i = 0; i < ttm->num_pages; i++) {
1338 if (ttm_dma->dma_address[i]) {
1339 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1340 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1344 ttm_pool_unpopulate(ttm);
1348 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1350 struct nouveau_fence *old_fence = NULL;
1353 nouveau_fence_ref(fence);
1355 spin_lock(&nvbo->bo.bdev->fence_lock);
1356 old_fence = nvbo->bo.sync_obj;
1357 nvbo->bo.sync_obj = fence;
1358 spin_unlock(&nvbo->bo.bdev->fence_lock);
1360 nouveau_fence_unref(&old_fence);
1364 nouveau_bo_fence_unref(void **sync_obj)
1366 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1370 nouveau_bo_fence_ref(void *sync_obj)
1372 return nouveau_fence_ref(sync_obj);
1376 nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1378 return nouveau_fence_done(sync_obj);
1382 nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1384 return nouveau_fence_wait(sync_obj, lazy, intr);
1388 nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1393 struct ttm_bo_driver nouveau_bo_driver = {
1394 .ttm_tt_create = &nouveau_ttm_tt_create,
1395 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1396 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1397 .invalidate_caches = nouveau_bo_invalidate_caches,
1398 .init_mem_type = nouveau_bo_init_mem_type,
1399 .evict_flags = nouveau_bo_evict_flags,
1400 .move_notify = nouveau_bo_move_ntfy,
1401 .move = nouveau_bo_move,
1402 .verify_access = nouveau_bo_verify_access,
1403 .sync_obj_signaled = nouveau_bo_fence_signalled,
1404 .sync_obj_wait = nouveau_bo_fence_wait,
1405 .sync_obj_flush = nouveau_bo_fence_flush,
1406 .sync_obj_unref = nouveau_bo_fence_unref,
1407 .sync_obj_ref = nouveau_bo_fence_ref,
1408 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1409 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1410 .io_mem_free = &nouveau_ttm_io_mem_free,
1413 struct nouveau_vma *
1414 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1416 struct nouveau_vma *vma;
1417 list_for_each_entry(vma, &nvbo->vma_list, head) {
1426 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1427 struct nouveau_vma *vma)
1429 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1430 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1433 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1434 NV_MEM_ACCESS_RW, vma);
1438 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1439 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1440 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1442 nouveau_vm_map_sg_table(vma, 0, size, node);
1444 nouveau_vm_map_sg(vma, 0, size, node);
1447 list_add_tail(&vma->head, &nvbo->vma_list);
1453 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1456 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1457 spin_lock(&nvbo->bo.bdev->fence_lock);
1458 ttm_bo_wait(&nvbo->bo, false, false, false);
1459 spin_unlock(&nvbo->bo.bdev->fence_lock);
1460 nouveau_vm_unmap(vma);
1463 nouveau_vm_put(vma);
1464 list_del(&vma->head);