]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/nouveau/nouveau_bo.c
Merge tag 'drm-intel-fixes-2013-11-07' of git://people.freedesktop.org/~danvet/drm...
[~andy/linux] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include <core/engine.h>
31 #include <linux/swiotlb.h>
32
33 #include <subdev/fb.h>
34 #include <subdev/vm.h>
35 #include <subdev/bar.h>
36
37 #include "nouveau_drm.h"
38 #include "nouveau_dma.h"
39 #include "nouveau_fence.h"
40
41 #include "nouveau_bo.h"
42 #include "nouveau_ttm.h"
43 #include "nouveau_gem.h"
44
45 /*
46  * NV10-NV40 tiling helpers
47  */
48
49 static void
50 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51                            u32 addr, u32 size, u32 pitch, u32 flags)
52 {
53         struct nouveau_drm *drm = nouveau_drm(dev);
54         int i = reg - drm->tile.reg;
55         struct nouveau_fb *pfb = nouveau_fb(drm->device);
56         struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57         struct nouveau_engine *engine;
58
59         nouveau_fence_unref(&reg->fence);
60
61         if (tile->pitch)
62                 pfb->tile.fini(pfb, i, tile);
63
64         if (pitch)
65                 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
66
67         pfb->tile.prog(pfb, i, tile);
68
69         if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70                 engine->tile_prog(engine, i);
71         if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72                 engine->tile_prog(engine, i);
73 }
74
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
77 {
78         struct nouveau_drm *drm = nouveau_drm(dev);
79         struct nouveau_drm_tile *tile = &drm->tile.reg[i];
80
81         spin_lock(&drm->tile.lock);
82
83         if (!tile->used &&
84             (!tile->fence || nouveau_fence_done(tile->fence)))
85                 tile->used = true;
86         else
87                 tile = NULL;
88
89         spin_unlock(&drm->tile.lock);
90         return tile;
91 }
92
93 static void
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95                         struct nouveau_fence *fence)
96 {
97         struct nouveau_drm *drm = nouveau_drm(dev);
98
99         if (tile) {
100                 spin_lock(&drm->tile.lock);
101                 if (fence) {
102                         /* Mark it as pending. */
103                         tile->fence = fence;
104                         nouveau_fence_ref(fence);
105                 }
106
107                 tile->used = false;
108                 spin_unlock(&drm->tile.lock);
109         }
110 }
111
112 static struct nouveau_drm_tile *
113 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
114                    u32 size, u32 pitch, u32 flags)
115 {
116         struct nouveau_drm *drm = nouveau_drm(dev);
117         struct nouveau_fb *pfb = nouveau_fb(drm->device);
118         struct nouveau_drm_tile *tile, *found = NULL;
119         int i;
120
121         for (i = 0; i < pfb->tile.regions; i++) {
122                 tile = nv10_bo_get_tile_region(dev, i);
123
124                 if (pitch && !found) {
125                         found = tile;
126                         continue;
127
128                 } else if (tile && pfb->tile.region[i].pitch) {
129                         /* Kill an unused tile region. */
130                         nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
131                 }
132
133                 nv10_bo_put_tile_region(dev, tile, NULL);
134         }
135
136         if (found)
137                 nv10_bo_update_tile_region(dev, found, addr, size,
138                                             pitch, flags);
139         return found;
140 }
141
142 static void
143 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
144 {
145         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
146         struct drm_device *dev = drm->dev;
147         struct nouveau_bo *nvbo = nouveau_bo(bo);
148
149         if (unlikely(nvbo->gem.filp))
150                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
151         WARN_ON(nvbo->pin_refcnt > 0);
152         nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
153         kfree(nvbo);
154 }
155
156 static void
157 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
158                        int *align, int *size)
159 {
160         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
161         struct nouveau_device *device = nv_device(drm->device);
162
163         if (device->card_type < NV_50) {
164                 if (nvbo->tile_mode) {
165                         if (device->chipset >= 0x40) {
166                                 *align = 65536;
167                                 *size = roundup(*size, 64 * nvbo->tile_mode);
168
169                         } else if (device->chipset >= 0x30) {
170                                 *align = 32768;
171                                 *size = roundup(*size, 64 * nvbo->tile_mode);
172
173                         } else if (device->chipset >= 0x20) {
174                                 *align = 16384;
175                                 *size = roundup(*size, 64 * nvbo->tile_mode);
176
177                         } else if (device->chipset >= 0x10) {
178                                 *align = 16384;
179                                 *size = roundup(*size, 32 * nvbo->tile_mode);
180                         }
181                 }
182         } else {
183                 *size = roundup(*size, (1 << nvbo->page_shift));
184                 *align = max((1 <<  nvbo->page_shift), *align);
185         }
186
187         *size = roundup(*size, PAGE_SIZE);
188 }
189
190 int
191 nouveau_bo_new(struct drm_device *dev, int size, int align,
192                uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
193                struct sg_table *sg,
194                struct nouveau_bo **pnvbo)
195 {
196         struct nouveau_drm *drm = nouveau_drm(dev);
197         struct nouveau_bo *nvbo;
198         size_t acc_size;
199         int ret;
200         int type = ttm_bo_type_device;
201         int lpg_shift = 12;
202         int max_size;
203
204         if (drm->client.base.vm)
205                 lpg_shift = drm->client.base.vm->vmm->lpg_shift;
206         max_size = INT_MAX & ~((1 << lpg_shift) - 1);
207
208         if (size <= 0 || size > max_size) {
209                 nv_warn(drm, "skipped size %x\n", (u32)size);
210                 return -EINVAL;
211         }
212
213         if (sg)
214                 type = ttm_bo_type_sg;
215
216         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
217         if (!nvbo)
218                 return -ENOMEM;
219         INIT_LIST_HEAD(&nvbo->head);
220         INIT_LIST_HEAD(&nvbo->entry);
221         INIT_LIST_HEAD(&nvbo->vma_list);
222         nvbo->tile_mode = tile_mode;
223         nvbo->tile_flags = tile_flags;
224         nvbo->bo.bdev = &drm->ttm.bdev;
225
226         nvbo->page_shift = 12;
227         if (drm->client.base.vm) {
228                 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
229                         nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
230         }
231
232         nouveau_bo_fixup_align(nvbo, flags, &align, &size);
233         nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
234         nouveau_bo_placement_set(nvbo, flags, 0);
235
236         acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
237                                        sizeof(struct nouveau_bo));
238
239         ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
240                           type, &nvbo->placement,
241                           align >> PAGE_SHIFT, false, NULL, acc_size, sg,
242                           nouveau_bo_del_ttm);
243         if (ret) {
244                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
245                 return ret;
246         }
247
248         *pnvbo = nvbo;
249         return 0;
250 }
251
252 static void
253 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
254 {
255         *n = 0;
256
257         if (type & TTM_PL_FLAG_VRAM)
258                 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
259         if (type & TTM_PL_FLAG_TT)
260                 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
261         if (type & TTM_PL_FLAG_SYSTEM)
262                 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
263 }
264
265 static void
266 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
267 {
268         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
269         struct nouveau_fb *pfb = nouveau_fb(drm->device);
270         u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
271
272         if ((nv_device(drm->device)->card_type == NV_10 ||
273              nv_device(drm->device)->card_type == NV_11) &&
274             nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
275             nvbo->bo.mem.num_pages < vram_pages / 4) {
276                 /*
277                  * Make sure that the color and depth buffers are handled
278                  * by independent memory controller units. Up to a 9x
279                  * speed up when alpha-blending and depth-test are enabled
280                  * at the same time.
281                  */
282                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
283                         nvbo->placement.fpfn = vram_pages / 2;
284                         nvbo->placement.lpfn = ~0;
285                 } else {
286                         nvbo->placement.fpfn = 0;
287                         nvbo->placement.lpfn = vram_pages / 2;
288                 }
289         }
290 }
291
292 void
293 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
294 {
295         struct ttm_placement *pl = &nvbo->placement;
296         uint32_t flags = TTM_PL_MASK_CACHING |
297                 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
298
299         pl->placement = nvbo->placements;
300         set_placement_list(nvbo->placements, &pl->num_placement,
301                            type, flags);
302
303         pl->busy_placement = nvbo->busy_placements;
304         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
305                            type | busy, flags);
306
307         set_placement_range(nvbo, type);
308 }
309
310 int
311 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
312 {
313         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
314         struct ttm_buffer_object *bo = &nvbo->bo;
315         int ret;
316
317         ret = ttm_bo_reserve(bo, false, false, false, 0);
318         if (ret)
319                 goto out;
320
321         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
322                 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
323                          1 << bo->mem.mem_type, memtype);
324                 ret = -EINVAL;
325                 goto out;
326         }
327
328         if (nvbo->pin_refcnt++)
329                 goto out;
330
331         nouveau_bo_placement_set(nvbo, memtype, 0);
332
333         ret = nouveau_bo_validate(nvbo, false, false);
334         if (ret == 0) {
335                 switch (bo->mem.mem_type) {
336                 case TTM_PL_VRAM:
337                         drm->gem.vram_available -= bo->mem.size;
338                         break;
339                 case TTM_PL_TT:
340                         drm->gem.gart_available -= bo->mem.size;
341                         break;
342                 default:
343                         break;
344                 }
345         }
346 out:
347         ttm_bo_unreserve(bo);
348         return ret;
349 }
350
351 int
352 nouveau_bo_unpin(struct nouveau_bo *nvbo)
353 {
354         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
355         struct ttm_buffer_object *bo = &nvbo->bo;
356         int ret, ref;
357
358         ret = ttm_bo_reserve(bo, false, false, false, 0);
359         if (ret)
360                 return ret;
361
362         ref = --nvbo->pin_refcnt;
363         WARN_ON_ONCE(ref < 0);
364         if (ref)
365                 goto out;
366
367         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
368
369         ret = nouveau_bo_validate(nvbo, false, false);
370         if (ret == 0) {
371                 switch (bo->mem.mem_type) {
372                 case TTM_PL_VRAM:
373                         drm->gem.vram_available += bo->mem.size;
374                         break;
375                 case TTM_PL_TT:
376                         drm->gem.gart_available += bo->mem.size;
377                         break;
378                 default:
379                         break;
380                 }
381         }
382
383 out:
384         ttm_bo_unreserve(bo);
385         return ret;
386 }
387
388 int
389 nouveau_bo_map(struct nouveau_bo *nvbo)
390 {
391         int ret;
392
393         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
394         if (ret)
395                 return ret;
396
397         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
398         ttm_bo_unreserve(&nvbo->bo);
399         return ret;
400 }
401
402 void
403 nouveau_bo_unmap(struct nouveau_bo *nvbo)
404 {
405         if (nvbo)
406                 ttm_bo_kunmap(&nvbo->kmap);
407 }
408
409 int
410 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
411                     bool no_wait_gpu)
412 {
413         int ret;
414
415         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
416                               interruptible, no_wait_gpu);
417         if (ret)
418                 return ret;
419
420         return 0;
421 }
422
423 u16
424 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
425 {
426         bool is_iomem;
427         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
428         mem = &mem[index];
429         if (is_iomem)
430                 return ioread16_native((void __force __iomem *)mem);
431         else
432                 return *mem;
433 }
434
435 void
436 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
437 {
438         bool is_iomem;
439         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
440         mem = &mem[index];
441         if (is_iomem)
442                 iowrite16_native(val, (void __force __iomem *)mem);
443         else
444                 *mem = val;
445 }
446
447 u32
448 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
449 {
450         bool is_iomem;
451         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
452         mem = &mem[index];
453         if (is_iomem)
454                 return ioread32_native((void __force __iomem *)mem);
455         else
456                 return *mem;
457 }
458
459 void
460 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
461 {
462         bool is_iomem;
463         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
464         mem = &mem[index];
465         if (is_iomem)
466                 iowrite32_native(val, (void __force __iomem *)mem);
467         else
468                 *mem = val;
469 }
470
471 static struct ttm_tt *
472 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
473                       uint32_t page_flags, struct page *dummy_read)
474 {
475 #if __OS_HAS_AGP
476         struct nouveau_drm *drm = nouveau_bdev(bdev);
477         struct drm_device *dev = drm->dev;
478
479         if (drm->agp.stat == ENABLED) {
480                 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
481                                          page_flags, dummy_read);
482         }
483 #endif
484
485         return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
486 }
487
488 static int
489 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
490 {
491         /* We'll do this from user space. */
492         return 0;
493 }
494
495 static int
496 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
497                          struct ttm_mem_type_manager *man)
498 {
499         struct nouveau_drm *drm = nouveau_bdev(bdev);
500
501         switch (type) {
502         case TTM_PL_SYSTEM:
503                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
504                 man->available_caching = TTM_PL_MASK_CACHING;
505                 man->default_caching = TTM_PL_FLAG_CACHED;
506                 break;
507         case TTM_PL_VRAM:
508                 if (nv_device(drm->device)->card_type >= NV_50) {
509                         man->func = &nouveau_vram_manager;
510                         man->io_reserve_fastpath = false;
511                         man->use_io_reserve_lru = true;
512                 } else {
513                         man->func = &ttm_bo_manager_func;
514                 }
515                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
516                              TTM_MEMTYPE_FLAG_MAPPABLE;
517                 man->available_caching = TTM_PL_FLAG_UNCACHED |
518                                          TTM_PL_FLAG_WC;
519                 man->default_caching = TTM_PL_FLAG_WC;
520                 break;
521         case TTM_PL_TT:
522                 if (nv_device(drm->device)->card_type >= NV_50)
523                         man->func = &nouveau_gart_manager;
524                 else
525                 if (drm->agp.stat != ENABLED)
526                         man->func = &nv04_gart_manager;
527                 else
528                         man->func = &ttm_bo_manager_func;
529
530                 if (drm->agp.stat == ENABLED) {
531                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
532                         man->available_caching = TTM_PL_FLAG_UNCACHED |
533                                 TTM_PL_FLAG_WC;
534                         man->default_caching = TTM_PL_FLAG_WC;
535                 } else {
536                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
537                                      TTM_MEMTYPE_FLAG_CMA;
538                         man->available_caching = TTM_PL_MASK_CACHING;
539                         man->default_caching = TTM_PL_FLAG_CACHED;
540                 }
541
542                 break;
543         default:
544                 return -EINVAL;
545         }
546         return 0;
547 }
548
549 static void
550 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
551 {
552         struct nouveau_bo *nvbo = nouveau_bo(bo);
553
554         switch (bo->mem.mem_type) {
555         case TTM_PL_VRAM:
556                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
557                                          TTM_PL_FLAG_SYSTEM);
558                 break;
559         default:
560                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
561                 break;
562         }
563
564         *pl = nvbo->placement;
565 }
566
567
568 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
569  * TTM_PL_{VRAM,TT} directly.
570  */
571
572 static int
573 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
574                               struct nouveau_bo *nvbo, bool evict,
575                               bool no_wait_gpu, struct ttm_mem_reg *new_mem)
576 {
577         struct nouveau_fence *fence = NULL;
578         int ret;
579
580         ret = nouveau_fence_new(chan, false, &fence);
581         if (ret)
582                 return ret;
583
584         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
585                                         no_wait_gpu, new_mem);
586         nouveau_fence_unref(&fence);
587         return ret;
588 }
589
590 static int
591 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
592 {
593         int ret = RING_SPACE(chan, 2);
594         if (ret == 0) {
595                 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
596                 OUT_RING  (chan, handle & 0x0000ffff);
597                 FIRE_RING (chan);
598         }
599         return ret;
600 }
601
602 static int
603 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
604                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
605 {
606         struct nouveau_mem *node = old_mem->mm_node;
607         int ret = RING_SPACE(chan, 10);
608         if (ret == 0) {
609                 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
610                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
611                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
612                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
613                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
614                 OUT_RING  (chan, PAGE_SIZE);
615                 OUT_RING  (chan, PAGE_SIZE);
616                 OUT_RING  (chan, PAGE_SIZE);
617                 OUT_RING  (chan, new_mem->num_pages);
618                 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
619         }
620         return ret;
621 }
622
623 static int
624 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
625 {
626         int ret = RING_SPACE(chan, 2);
627         if (ret == 0) {
628                 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
629                 OUT_RING  (chan, handle);
630         }
631         return ret;
632 }
633
634 static int
635 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
636                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
637 {
638         struct nouveau_mem *node = old_mem->mm_node;
639         u64 src_offset = node->vma[0].offset;
640         u64 dst_offset = node->vma[1].offset;
641         u32 page_count = new_mem->num_pages;
642         int ret;
643
644         page_count = new_mem->num_pages;
645         while (page_count) {
646                 int line_count = (page_count > 8191) ? 8191 : page_count;
647
648                 ret = RING_SPACE(chan, 11);
649                 if (ret)
650                         return ret;
651
652                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
653                 OUT_RING  (chan, upper_32_bits(src_offset));
654                 OUT_RING  (chan, lower_32_bits(src_offset));
655                 OUT_RING  (chan, upper_32_bits(dst_offset));
656                 OUT_RING  (chan, lower_32_bits(dst_offset));
657                 OUT_RING  (chan, PAGE_SIZE);
658                 OUT_RING  (chan, PAGE_SIZE);
659                 OUT_RING  (chan, PAGE_SIZE);
660                 OUT_RING  (chan, line_count);
661                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
662                 OUT_RING  (chan, 0x00000110);
663
664                 page_count -= line_count;
665                 src_offset += (PAGE_SIZE * line_count);
666                 dst_offset += (PAGE_SIZE * line_count);
667         }
668
669         return 0;
670 }
671
672 static int
673 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
674                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
675 {
676         struct nouveau_mem *node = old_mem->mm_node;
677         u64 src_offset = node->vma[0].offset;
678         u64 dst_offset = node->vma[1].offset;
679         u32 page_count = new_mem->num_pages;
680         int ret;
681
682         page_count = new_mem->num_pages;
683         while (page_count) {
684                 int line_count = (page_count > 2047) ? 2047 : page_count;
685
686                 ret = RING_SPACE(chan, 12);
687                 if (ret)
688                         return ret;
689
690                 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
691                 OUT_RING  (chan, upper_32_bits(dst_offset));
692                 OUT_RING  (chan, lower_32_bits(dst_offset));
693                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
694                 OUT_RING  (chan, upper_32_bits(src_offset));
695                 OUT_RING  (chan, lower_32_bits(src_offset));
696                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
697                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
698                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
699                 OUT_RING  (chan, line_count);
700                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
701                 OUT_RING  (chan, 0x00100110);
702
703                 page_count -= line_count;
704                 src_offset += (PAGE_SIZE * line_count);
705                 dst_offset += (PAGE_SIZE * line_count);
706         }
707
708         return 0;
709 }
710
711 static int
712 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
713                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
714 {
715         struct nouveau_mem *node = old_mem->mm_node;
716         u64 src_offset = node->vma[0].offset;
717         u64 dst_offset = node->vma[1].offset;
718         u32 page_count = new_mem->num_pages;
719         int ret;
720
721         page_count = new_mem->num_pages;
722         while (page_count) {
723                 int line_count = (page_count > 8191) ? 8191 : page_count;
724
725                 ret = RING_SPACE(chan, 11);
726                 if (ret)
727                         return ret;
728
729                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
730                 OUT_RING  (chan, upper_32_bits(src_offset));
731                 OUT_RING  (chan, lower_32_bits(src_offset));
732                 OUT_RING  (chan, upper_32_bits(dst_offset));
733                 OUT_RING  (chan, lower_32_bits(dst_offset));
734                 OUT_RING  (chan, PAGE_SIZE);
735                 OUT_RING  (chan, PAGE_SIZE);
736                 OUT_RING  (chan, PAGE_SIZE);
737                 OUT_RING  (chan, line_count);
738                 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
739                 OUT_RING  (chan, 0x00000110);
740
741                 page_count -= line_count;
742                 src_offset += (PAGE_SIZE * line_count);
743                 dst_offset += (PAGE_SIZE * line_count);
744         }
745
746         return 0;
747 }
748
749 static int
750 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
751                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
752 {
753         struct nouveau_mem *node = old_mem->mm_node;
754         int ret = RING_SPACE(chan, 7);
755         if (ret == 0) {
756                 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
757                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
758                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
759                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
760                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
761                 OUT_RING  (chan, 0x00000000 /* COPY */);
762                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
763         }
764         return ret;
765 }
766
767 static int
768 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
769                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
770 {
771         struct nouveau_mem *node = old_mem->mm_node;
772         int ret = RING_SPACE(chan, 7);
773         if (ret == 0) {
774                 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
775                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
776                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
777                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
778                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
779                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
780                 OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
781         }
782         return ret;
783 }
784
785 static int
786 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
787 {
788         int ret = RING_SPACE(chan, 6);
789         if (ret == 0) {
790                 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
791                 OUT_RING  (chan, handle);
792                 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
793                 OUT_RING  (chan, NvNotify0);
794                 OUT_RING  (chan, NvDmaFB);
795                 OUT_RING  (chan, NvDmaFB);
796         }
797
798         return ret;
799 }
800
801 static int
802 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
803                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
804 {
805         struct nouveau_mem *node = old_mem->mm_node;
806         struct nouveau_bo *nvbo = nouveau_bo(bo);
807         u64 length = (new_mem->num_pages << PAGE_SHIFT);
808         u64 src_offset = node->vma[0].offset;
809         u64 dst_offset = node->vma[1].offset;
810         int ret;
811
812         while (length) {
813                 u32 amount, stride, height;
814
815                 amount  = min(length, (u64)(4 * 1024 * 1024));
816                 stride  = 16 * 4;
817                 height  = amount / stride;
818
819                 if (old_mem->mem_type == TTM_PL_VRAM &&
820                     nouveau_bo_tile_layout(nvbo)) {
821                         ret = RING_SPACE(chan, 8);
822                         if (ret)
823                                 return ret;
824
825                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
826                         OUT_RING  (chan, 0);
827                         OUT_RING  (chan, 0);
828                         OUT_RING  (chan, stride);
829                         OUT_RING  (chan, height);
830                         OUT_RING  (chan, 1);
831                         OUT_RING  (chan, 0);
832                         OUT_RING  (chan, 0);
833                 } else {
834                         ret = RING_SPACE(chan, 2);
835                         if (ret)
836                                 return ret;
837
838                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
839                         OUT_RING  (chan, 1);
840                 }
841                 if (new_mem->mem_type == TTM_PL_VRAM &&
842                     nouveau_bo_tile_layout(nvbo)) {
843                         ret = RING_SPACE(chan, 8);
844                         if (ret)
845                                 return ret;
846
847                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
848                         OUT_RING  (chan, 0);
849                         OUT_RING  (chan, 0);
850                         OUT_RING  (chan, stride);
851                         OUT_RING  (chan, height);
852                         OUT_RING  (chan, 1);
853                         OUT_RING  (chan, 0);
854                         OUT_RING  (chan, 0);
855                 } else {
856                         ret = RING_SPACE(chan, 2);
857                         if (ret)
858                                 return ret;
859
860                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
861                         OUT_RING  (chan, 1);
862                 }
863
864                 ret = RING_SPACE(chan, 14);
865                 if (ret)
866                         return ret;
867
868                 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
869                 OUT_RING  (chan, upper_32_bits(src_offset));
870                 OUT_RING  (chan, upper_32_bits(dst_offset));
871                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
872                 OUT_RING  (chan, lower_32_bits(src_offset));
873                 OUT_RING  (chan, lower_32_bits(dst_offset));
874                 OUT_RING  (chan, stride);
875                 OUT_RING  (chan, stride);
876                 OUT_RING  (chan, stride);
877                 OUT_RING  (chan, height);
878                 OUT_RING  (chan, 0x00000101);
879                 OUT_RING  (chan, 0x00000000);
880                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
881                 OUT_RING  (chan, 0);
882
883                 length -= amount;
884                 src_offset += amount;
885                 dst_offset += amount;
886         }
887
888         return 0;
889 }
890
891 static int
892 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
893 {
894         int ret = RING_SPACE(chan, 4);
895         if (ret == 0) {
896                 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
897                 OUT_RING  (chan, handle);
898                 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
899                 OUT_RING  (chan, NvNotify0);
900         }
901
902         return ret;
903 }
904
905 static inline uint32_t
906 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
907                       struct nouveau_channel *chan, struct ttm_mem_reg *mem)
908 {
909         if (mem->mem_type == TTM_PL_TT)
910                 return NvDmaTT;
911         return NvDmaFB;
912 }
913
914 static int
915 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
916                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
917 {
918         u32 src_offset = old_mem->start << PAGE_SHIFT;
919         u32 dst_offset = new_mem->start << PAGE_SHIFT;
920         u32 page_count = new_mem->num_pages;
921         int ret;
922
923         ret = RING_SPACE(chan, 3);
924         if (ret)
925                 return ret;
926
927         BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
928         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
929         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
930
931         page_count = new_mem->num_pages;
932         while (page_count) {
933                 int line_count = (page_count > 2047) ? 2047 : page_count;
934
935                 ret = RING_SPACE(chan, 11);
936                 if (ret)
937                         return ret;
938
939                 BEGIN_NV04(chan, NvSubCopy,
940                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
941                 OUT_RING  (chan, src_offset);
942                 OUT_RING  (chan, dst_offset);
943                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
944                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
945                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
946                 OUT_RING  (chan, line_count);
947                 OUT_RING  (chan, 0x00000101);
948                 OUT_RING  (chan, 0x00000000);
949                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
950                 OUT_RING  (chan, 0);
951
952                 page_count -= line_count;
953                 src_offset += (PAGE_SIZE * line_count);
954                 dst_offset += (PAGE_SIZE * line_count);
955         }
956
957         return 0;
958 }
959
960 static int
961 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
962                    struct ttm_mem_reg *mem, struct nouveau_vma *vma)
963 {
964         struct nouveau_mem *node = mem->mm_node;
965         int ret;
966
967         ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
968                              PAGE_SHIFT, node->page_shift,
969                              NV_MEM_ACCESS_RW, vma);
970         if (ret)
971                 return ret;
972
973         if (mem->mem_type == TTM_PL_VRAM)
974                 nouveau_vm_map(vma, node);
975         else
976                 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
977
978         return 0;
979 }
980
981 static int
982 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
983                      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
984 {
985         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
986         struct nouveau_channel *chan = drm->ttm.chan;
987         struct nouveau_bo *nvbo = nouveau_bo(bo);
988         struct ttm_mem_reg *old_mem = &bo->mem;
989         int ret;
990
991         mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
992
993         /* create temporary vmas for the transfer and attach them to the
994          * old nouveau_mem node, these will get cleaned up after ttm has
995          * destroyed the ttm_mem_reg
996          */
997         if (nv_device(drm->device)->card_type >= NV_50) {
998                 struct nouveau_mem *node = old_mem->mm_node;
999
1000                 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
1001                 if (ret)
1002                         goto out;
1003
1004                 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
1005                 if (ret)
1006                         goto out;
1007         }
1008
1009         ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1010         if (ret == 0) {
1011                 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
1012                                                     no_wait_gpu, new_mem);
1013         }
1014
1015 out:
1016         mutex_unlock(&chan->cli->mutex);
1017         return ret;
1018 }
1019
1020 void
1021 nouveau_bo_move_init(struct nouveau_drm *drm)
1022 {
1023         static const struct {
1024                 const char *name;
1025                 int engine;
1026                 u32 oclass;
1027                 int (*exec)(struct nouveau_channel *,
1028                             struct ttm_buffer_object *,
1029                             struct ttm_mem_reg *, struct ttm_mem_reg *);
1030                 int (*init)(struct nouveau_channel *, u32 handle);
1031         } _methods[] = {
1032                 {  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1033                 {  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1034                 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1035                 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1036                 {  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1037                 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1038                 {  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1039                 {  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1040                 {  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1041                 {},
1042                 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1043         }, *mthd = _methods;
1044         const char *name = "CPU";
1045         int ret;
1046
1047         do {
1048                 struct nouveau_object *object;
1049                 struct nouveau_channel *chan;
1050                 u32 handle = (mthd->engine << 16) | mthd->oclass;
1051
1052                 if (mthd->engine)
1053                         chan = drm->cechan;
1054                 else
1055                         chan = drm->channel;
1056                 if (chan == NULL)
1057                         continue;
1058
1059                 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1060                                          mthd->oclass, NULL, 0, &object);
1061                 if (ret == 0) {
1062                         ret = mthd->init(chan, handle);
1063                         if (ret) {
1064                                 nouveau_object_del(nv_object(drm),
1065                                                    chan->handle, handle);
1066                                 continue;
1067                         }
1068
1069                         drm->ttm.move = mthd->exec;
1070                         drm->ttm.chan = chan;
1071                         name = mthd->name;
1072                         break;
1073                 }
1074         } while ((++mthd)->exec);
1075
1076         NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1077 }
1078
1079 static int
1080 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1081                       bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1082 {
1083         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1084         struct ttm_placement placement;
1085         struct ttm_mem_reg tmp_mem;
1086         int ret;
1087
1088         placement.fpfn = placement.lpfn = 0;
1089         placement.num_placement = placement.num_busy_placement = 1;
1090         placement.placement = placement.busy_placement = &placement_memtype;
1091
1092         tmp_mem = *new_mem;
1093         tmp_mem.mm_node = NULL;
1094         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1095         if (ret)
1096                 return ret;
1097
1098         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1099         if (ret)
1100                 goto out;
1101
1102         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1103         if (ret)
1104                 goto out;
1105
1106         ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1107 out:
1108         ttm_bo_mem_put(bo, &tmp_mem);
1109         return ret;
1110 }
1111
1112 static int
1113 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1114                       bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1115 {
1116         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1117         struct ttm_placement placement;
1118         struct ttm_mem_reg tmp_mem;
1119         int ret;
1120
1121         placement.fpfn = placement.lpfn = 0;
1122         placement.num_placement = placement.num_busy_placement = 1;
1123         placement.placement = placement.busy_placement = &placement_memtype;
1124
1125         tmp_mem = *new_mem;
1126         tmp_mem.mm_node = NULL;
1127         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1128         if (ret)
1129                 return ret;
1130
1131         ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1132         if (ret)
1133                 goto out;
1134
1135         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1136         if (ret)
1137                 goto out;
1138
1139 out:
1140         ttm_bo_mem_put(bo, &tmp_mem);
1141         return ret;
1142 }
1143
1144 static void
1145 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1146 {
1147         struct nouveau_bo *nvbo = nouveau_bo(bo);
1148         struct nouveau_vma *vma;
1149
1150         /* ttm can now (stupidly) pass the driver bos it didn't create... */
1151         if (bo->destroy != nouveau_bo_del_ttm)
1152                 return;
1153
1154         list_for_each_entry(vma, &nvbo->vma_list, head) {
1155                 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1156                         nouveau_vm_map(vma, new_mem->mm_node);
1157                 } else
1158                 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1159                     nvbo->page_shift == vma->vm->vmm->spg_shift) {
1160                         if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1161                                 nouveau_vm_map_sg_table(vma, 0, new_mem->
1162                                                   num_pages << PAGE_SHIFT,
1163                                                   new_mem->mm_node);
1164                         else
1165                                 nouveau_vm_map_sg(vma, 0, new_mem->
1166                                                   num_pages << PAGE_SHIFT,
1167                                                   new_mem->mm_node);
1168                 } else {
1169                         nouveau_vm_unmap(vma);
1170                 }
1171         }
1172 }
1173
1174 static int
1175 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1176                    struct nouveau_drm_tile **new_tile)
1177 {
1178         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1179         struct drm_device *dev = drm->dev;
1180         struct nouveau_bo *nvbo = nouveau_bo(bo);
1181         u64 offset = new_mem->start << PAGE_SHIFT;
1182
1183         *new_tile = NULL;
1184         if (new_mem->mem_type != TTM_PL_VRAM)
1185                 return 0;
1186
1187         if (nv_device(drm->device)->card_type >= NV_10) {
1188                 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1189                                                 nvbo->tile_mode,
1190                                                 nvbo->tile_flags);
1191         }
1192
1193         return 0;
1194 }
1195
1196 static void
1197 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1198                       struct nouveau_drm_tile *new_tile,
1199                       struct nouveau_drm_tile **old_tile)
1200 {
1201         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1202         struct drm_device *dev = drm->dev;
1203
1204         nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1205         *old_tile = new_tile;
1206 }
1207
1208 static int
1209 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1210                 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1211 {
1212         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1213         struct nouveau_bo *nvbo = nouveau_bo(bo);
1214         struct ttm_mem_reg *old_mem = &bo->mem;
1215         struct nouveau_drm_tile *new_tile = NULL;
1216         int ret = 0;
1217
1218         if (nv_device(drm->device)->card_type < NV_50) {
1219                 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1220                 if (ret)
1221                         return ret;
1222         }
1223
1224         /* Fake bo copy. */
1225         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1226                 BUG_ON(bo->mem.mm_node != NULL);
1227                 bo->mem = *new_mem;
1228                 new_mem->mm_node = NULL;
1229                 goto out;
1230         }
1231
1232         /* CPU copy if we have no accelerated method available */
1233         if (!drm->ttm.move) {
1234                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1235                 goto out;
1236         }
1237
1238         /* Hardware assisted copy. */
1239         if (new_mem->mem_type == TTM_PL_SYSTEM)
1240                 ret = nouveau_bo_move_flipd(bo, evict, intr,
1241                                             no_wait_gpu, new_mem);
1242         else if (old_mem->mem_type == TTM_PL_SYSTEM)
1243                 ret = nouveau_bo_move_flips(bo, evict, intr,
1244                                             no_wait_gpu, new_mem);
1245         else
1246                 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1247                                            no_wait_gpu, new_mem);
1248
1249         if (!ret)
1250                 goto out;
1251
1252         /* Fallback to software copy. */
1253         ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1254
1255 out:
1256         if (nv_device(drm->device)->card_type < NV_50) {
1257                 if (ret)
1258                         nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1259                 else
1260                         nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1261         }
1262
1263         return ret;
1264 }
1265
1266 static int
1267 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1268 {
1269         struct nouveau_bo *nvbo = nouveau_bo(bo);
1270
1271         return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1272 }
1273
1274 static int
1275 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1276 {
1277         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1278         struct nouveau_drm *drm = nouveau_bdev(bdev);
1279         struct drm_device *dev = drm->dev;
1280         int ret;
1281
1282         mem->bus.addr = NULL;
1283         mem->bus.offset = 0;
1284         mem->bus.size = mem->num_pages << PAGE_SHIFT;
1285         mem->bus.base = 0;
1286         mem->bus.is_iomem = false;
1287         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1288                 return -EINVAL;
1289         switch (mem->mem_type) {
1290         case TTM_PL_SYSTEM:
1291                 /* System memory */
1292                 return 0;
1293         case TTM_PL_TT:
1294 #if __OS_HAS_AGP
1295                 if (drm->agp.stat == ENABLED) {
1296                         mem->bus.offset = mem->start << PAGE_SHIFT;
1297                         mem->bus.base = drm->agp.base;
1298                         mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1299                 }
1300 #endif
1301                 break;
1302         case TTM_PL_VRAM:
1303                 mem->bus.offset = mem->start << PAGE_SHIFT;
1304                 mem->bus.base = pci_resource_start(dev->pdev, 1);
1305                 mem->bus.is_iomem = true;
1306                 if (nv_device(drm->device)->card_type >= NV_50) {
1307                         struct nouveau_bar *bar = nouveau_bar(drm->device);
1308                         struct nouveau_mem *node = mem->mm_node;
1309
1310                         ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1311                                         &node->bar_vma);
1312                         if (ret)
1313                                 return ret;
1314
1315                         mem->bus.offset = node->bar_vma.offset;
1316                 }
1317                 break;
1318         default:
1319                 return -EINVAL;
1320         }
1321         return 0;
1322 }
1323
1324 static void
1325 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1326 {
1327         struct nouveau_drm *drm = nouveau_bdev(bdev);
1328         struct nouveau_bar *bar = nouveau_bar(drm->device);
1329         struct nouveau_mem *node = mem->mm_node;
1330
1331         if (!node->bar_vma.node)
1332                 return;
1333
1334         bar->unmap(bar, &node->bar_vma);
1335 }
1336
1337 static int
1338 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1339 {
1340         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1341         struct nouveau_bo *nvbo = nouveau_bo(bo);
1342         struct nouveau_device *device = nv_device(drm->device);
1343         u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1344
1345         /* as long as the bo isn't in vram, and isn't tiled, we've got
1346          * nothing to do here.
1347          */
1348         if (bo->mem.mem_type != TTM_PL_VRAM) {
1349                 if (nv_device(drm->device)->card_type < NV_50 ||
1350                     !nouveau_bo_tile_layout(nvbo))
1351                         return 0;
1352         }
1353
1354         /* make sure bo is in mappable vram */
1355         if (bo->mem.start + bo->mem.num_pages < mappable)
1356                 return 0;
1357
1358
1359         nvbo->placement.fpfn = 0;
1360         nvbo->placement.lpfn = mappable;
1361         nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1362         return nouveau_bo_validate(nvbo, false, false);
1363 }
1364
1365 static int
1366 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1367 {
1368         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1369         struct nouveau_drm *drm;
1370         struct drm_device *dev;
1371         unsigned i;
1372         int r;
1373         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1374
1375         if (ttm->state != tt_unpopulated)
1376                 return 0;
1377
1378         if (slave && ttm->sg) {
1379                 /* make userspace faulting work */
1380                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1381                                                  ttm_dma->dma_address, ttm->num_pages);
1382                 ttm->state = tt_unbound;
1383                 return 0;
1384         }
1385
1386         drm = nouveau_bdev(ttm->bdev);
1387         dev = drm->dev;
1388
1389 #if __OS_HAS_AGP
1390         if (drm->agp.stat == ENABLED) {
1391                 return ttm_agp_tt_populate(ttm);
1392         }
1393 #endif
1394
1395 #ifdef CONFIG_SWIOTLB
1396         if (swiotlb_nr_tbl()) {
1397                 return ttm_dma_populate((void *)ttm, dev->dev);
1398         }
1399 #endif
1400
1401         r = ttm_pool_populate(ttm);
1402         if (r) {
1403                 return r;
1404         }
1405
1406         for (i = 0; i < ttm->num_pages; i++) {
1407                 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1408                                                    0, PAGE_SIZE,
1409                                                    PCI_DMA_BIDIRECTIONAL);
1410                 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1411                         while (--i) {
1412                                 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1413                                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1414                                 ttm_dma->dma_address[i] = 0;
1415                         }
1416                         ttm_pool_unpopulate(ttm);
1417                         return -EFAULT;
1418                 }
1419         }
1420         return 0;
1421 }
1422
1423 static void
1424 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1425 {
1426         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1427         struct nouveau_drm *drm;
1428         struct drm_device *dev;
1429         unsigned i;
1430         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1431
1432         if (slave)
1433                 return;
1434
1435         drm = nouveau_bdev(ttm->bdev);
1436         dev = drm->dev;
1437
1438 #if __OS_HAS_AGP
1439         if (drm->agp.stat == ENABLED) {
1440                 ttm_agp_tt_unpopulate(ttm);
1441                 return;
1442         }
1443 #endif
1444
1445 #ifdef CONFIG_SWIOTLB
1446         if (swiotlb_nr_tbl()) {
1447                 ttm_dma_unpopulate((void *)ttm, dev->dev);
1448                 return;
1449         }
1450 #endif
1451
1452         for (i = 0; i < ttm->num_pages; i++) {
1453                 if (ttm_dma->dma_address[i]) {
1454                         pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1455                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1456                 }
1457         }
1458
1459         ttm_pool_unpopulate(ttm);
1460 }
1461
1462 void
1463 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1464 {
1465         struct nouveau_fence *old_fence = NULL;
1466
1467         if (likely(fence))
1468                 nouveau_fence_ref(fence);
1469
1470         spin_lock(&nvbo->bo.bdev->fence_lock);
1471         old_fence = nvbo->bo.sync_obj;
1472         nvbo->bo.sync_obj = fence;
1473         spin_unlock(&nvbo->bo.bdev->fence_lock);
1474
1475         nouveau_fence_unref(&old_fence);
1476 }
1477
1478 static void
1479 nouveau_bo_fence_unref(void **sync_obj)
1480 {
1481         nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1482 }
1483
1484 static void *
1485 nouveau_bo_fence_ref(void *sync_obj)
1486 {
1487         return nouveau_fence_ref(sync_obj);
1488 }
1489
1490 static bool
1491 nouveau_bo_fence_signalled(void *sync_obj)
1492 {
1493         return nouveau_fence_done(sync_obj);
1494 }
1495
1496 static int
1497 nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1498 {
1499         return nouveau_fence_wait(sync_obj, lazy, intr);
1500 }
1501
1502 static int
1503 nouveau_bo_fence_flush(void *sync_obj)
1504 {
1505         return 0;
1506 }
1507
1508 struct ttm_bo_driver nouveau_bo_driver = {
1509         .ttm_tt_create = &nouveau_ttm_tt_create,
1510         .ttm_tt_populate = &nouveau_ttm_tt_populate,
1511         .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1512         .invalidate_caches = nouveau_bo_invalidate_caches,
1513         .init_mem_type = nouveau_bo_init_mem_type,
1514         .evict_flags = nouveau_bo_evict_flags,
1515         .move_notify = nouveau_bo_move_ntfy,
1516         .move = nouveau_bo_move,
1517         .verify_access = nouveau_bo_verify_access,
1518         .sync_obj_signaled = nouveau_bo_fence_signalled,
1519         .sync_obj_wait = nouveau_bo_fence_wait,
1520         .sync_obj_flush = nouveau_bo_fence_flush,
1521         .sync_obj_unref = nouveau_bo_fence_unref,
1522         .sync_obj_ref = nouveau_bo_fence_ref,
1523         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1524         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1525         .io_mem_free = &nouveau_ttm_io_mem_free,
1526 };
1527
1528 struct nouveau_vma *
1529 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1530 {
1531         struct nouveau_vma *vma;
1532         list_for_each_entry(vma, &nvbo->vma_list, head) {
1533                 if (vma->vm == vm)
1534                         return vma;
1535         }
1536
1537         return NULL;
1538 }
1539
1540 int
1541 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1542                    struct nouveau_vma *vma)
1543 {
1544         const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1545         struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1546         int ret;
1547
1548         ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1549                              NV_MEM_ACCESS_RW, vma);
1550         if (ret)
1551                 return ret;
1552
1553         if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1554                 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1555         else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1556                 if (node->sg)
1557                         nouveau_vm_map_sg_table(vma, 0, size, node);
1558                 else
1559                         nouveau_vm_map_sg(vma, 0, size, node);
1560         }
1561
1562         list_add_tail(&vma->head, &nvbo->vma_list);
1563         vma->refcount = 1;
1564         return 0;
1565 }
1566
1567 void
1568 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1569 {
1570         if (vma->node) {
1571                 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1572                         nouveau_vm_unmap(vma);
1573                 nouveau_vm_put(vma);
1574                 list_del(&vma->head);
1575         }
1576 }