]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/nouveau/nouveau_bo.c
drm/nouveau/instmem: completely new implementation, as a subdev module
[~andy/linux] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31 #include "ttm/ttm_page_alloc.h"
32
33 #include <nouveau_drm.h>
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include <core/mm.h>
37 #include "nouveau_fence.h"
38 #include <core/ramht.h>
39
40 #include <linux/log2.h>
41 #include <linux/slab.h>
42
43 static void
44 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
45 {
46         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
47         struct drm_device *dev = dev_priv->dev;
48         struct nouveau_bo *nvbo = nouveau_bo(bo);
49
50         if (unlikely(nvbo->gem))
51                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
52
53         nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
54         kfree(nvbo);
55 }
56
57 static void
58 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
59                        int *align, int *size)
60 {
61         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
62
63         if (dev_priv->card_type < NV_50) {
64                 if (nvbo->tile_mode) {
65                         if (dev_priv->chipset >= 0x40) {
66                                 *align = 65536;
67                                 *size = roundup(*size, 64 * nvbo->tile_mode);
68
69                         } else if (dev_priv->chipset >= 0x30) {
70                                 *align = 32768;
71                                 *size = roundup(*size, 64 * nvbo->tile_mode);
72
73                         } else if (dev_priv->chipset >= 0x20) {
74                                 *align = 16384;
75                                 *size = roundup(*size, 64 * nvbo->tile_mode);
76
77                         } else if (dev_priv->chipset >= 0x10) {
78                                 *align = 16384;
79                                 *size = roundup(*size, 32 * nvbo->tile_mode);
80                         }
81                 }
82         } else {
83                 *size = roundup(*size, (1 << nvbo->page_shift));
84                 *align = max((1 <<  nvbo->page_shift), *align);
85         }
86
87         *size = roundup(*size, PAGE_SIZE);
88 }
89
90 int
91 nouveau_bo_new(struct drm_device *dev, int size, int align,
92                uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
93                struct sg_table *sg,
94                struct nouveau_bo **pnvbo)
95 {
96         struct drm_nouveau_private *dev_priv = dev->dev_private;
97         struct nouveau_bo *nvbo;
98         size_t acc_size;
99         int ret;
100         int type = ttm_bo_type_device;
101
102         if (sg)
103                 type = ttm_bo_type_sg;
104
105         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
106         if (!nvbo)
107                 return -ENOMEM;
108         INIT_LIST_HEAD(&nvbo->head);
109         INIT_LIST_HEAD(&nvbo->entry);
110         INIT_LIST_HEAD(&nvbo->vma_list);
111         nvbo->tile_mode = tile_mode;
112         nvbo->tile_flags = tile_flags;
113         nvbo->bo.bdev = &dev_priv->ttm.bdev;
114
115         nvbo->page_shift = 12;
116         if (dev_priv->chan_vm) {
117                 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
118                         nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
119         }
120
121         nouveau_bo_fixup_align(nvbo, flags, &align, &size);
122         nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
123         nouveau_bo_placement_set(nvbo, flags, 0);
124
125         acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
126                                        sizeof(struct nouveau_bo));
127
128         ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
129                           type, &nvbo->placement,
130                           align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
131                           nouveau_bo_del_ttm);
132         if (ret) {
133                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
134                 return ret;
135         }
136
137         *pnvbo = nvbo;
138         return 0;
139 }
140
141 static void
142 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
143 {
144         *n = 0;
145
146         if (type & TTM_PL_FLAG_VRAM)
147                 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
148         if (type & TTM_PL_FLAG_TT)
149                 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
150         if (type & TTM_PL_FLAG_SYSTEM)
151                 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
152 }
153
154 static void
155 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
156 {
157         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
158         int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT;
159
160         if (dev_priv->card_type == NV_10 &&
161             nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
162             nvbo->bo.mem.num_pages < vram_pages / 4) {
163                 /*
164                  * Make sure that the color and depth buffers are handled
165                  * by independent memory controller units. Up to a 9x
166                  * speed up when alpha-blending and depth-test are enabled
167                  * at the same time.
168                  */
169                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
170                         nvbo->placement.fpfn = vram_pages / 2;
171                         nvbo->placement.lpfn = ~0;
172                 } else {
173                         nvbo->placement.fpfn = 0;
174                         nvbo->placement.lpfn = vram_pages / 2;
175                 }
176         }
177 }
178
179 void
180 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
181 {
182         struct ttm_placement *pl = &nvbo->placement;
183         uint32_t flags = TTM_PL_MASK_CACHING |
184                 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
185
186         pl->placement = nvbo->placements;
187         set_placement_list(nvbo->placements, &pl->num_placement,
188                            type, flags);
189
190         pl->busy_placement = nvbo->busy_placements;
191         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
192                            type | busy, flags);
193
194         set_placement_range(nvbo, type);
195 }
196
197 int
198 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
199 {
200         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
201         struct ttm_buffer_object *bo = &nvbo->bo;
202         int ret;
203
204         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
205                 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
206                          "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207                          1 << bo->mem.mem_type, memtype);
208                 return -EINVAL;
209         }
210
211         if (nvbo->pin_refcnt++)
212                 return 0;
213
214         ret = ttm_bo_reserve(bo, false, false, false, 0);
215         if (ret)
216                 goto out;
217
218         nouveau_bo_placement_set(nvbo, memtype, 0);
219
220         ret = nouveau_bo_validate(nvbo, false, false, false);
221         if (ret == 0) {
222                 switch (bo->mem.mem_type) {
223                 case TTM_PL_VRAM:
224                         dev_priv->fb_aper_free -= bo->mem.size;
225                         break;
226                 case TTM_PL_TT:
227                         dev_priv->gart_info.aper_free -= bo->mem.size;
228                         break;
229                 default:
230                         break;
231                 }
232         }
233         ttm_bo_unreserve(bo);
234 out:
235         if (unlikely(ret))
236                 nvbo->pin_refcnt--;
237         return ret;
238 }
239
240 int
241 nouveau_bo_unpin(struct nouveau_bo *nvbo)
242 {
243         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
244         struct ttm_buffer_object *bo = &nvbo->bo;
245         int ret;
246
247         if (--nvbo->pin_refcnt)
248                 return 0;
249
250         ret = ttm_bo_reserve(bo, false, false, false, 0);
251         if (ret)
252                 return ret;
253
254         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
255
256         ret = nouveau_bo_validate(nvbo, false, false, false);
257         if (ret == 0) {
258                 switch (bo->mem.mem_type) {
259                 case TTM_PL_VRAM:
260                         dev_priv->fb_aper_free += bo->mem.size;
261                         break;
262                 case TTM_PL_TT:
263                         dev_priv->gart_info.aper_free += bo->mem.size;
264                         break;
265                 default:
266                         break;
267                 }
268         }
269
270         ttm_bo_unreserve(bo);
271         return ret;
272 }
273
274 int
275 nouveau_bo_map(struct nouveau_bo *nvbo)
276 {
277         int ret;
278
279         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
280         if (ret)
281                 return ret;
282
283         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
284         ttm_bo_unreserve(&nvbo->bo);
285         return ret;
286 }
287
288 void
289 nouveau_bo_unmap(struct nouveau_bo *nvbo)
290 {
291         if (nvbo)
292                 ttm_bo_kunmap(&nvbo->kmap);
293 }
294
295 int
296 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
297                     bool no_wait_reserve, bool no_wait_gpu)
298 {
299         int ret;
300
301         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
302                               no_wait_reserve, no_wait_gpu);
303         if (ret)
304                 return ret;
305
306         return 0;
307 }
308
309 u16
310 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
311 {
312         bool is_iomem;
313         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
314         mem = &mem[index];
315         if (is_iomem)
316                 return ioread16_native((void __force __iomem *)mem);
317         else
318                 return *mem;
319 }
320
321 void
322 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
323 {
324         bool is_iomem;
325         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
326         mem = &mem[index];
327         if (is_iomem)
328                 iowrite16_native(val, (void __force __iomem *)mem);
329         else
330                 *mem = val;
331 }
332
333 u32
334 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
335 {
336         bool is_iomem;
337         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
338         mem = &mem[index];
339         if (is_iomem)
340                 return ioread32_native((void __force __iomem *)mem);
341         else
342                 return *mem;
343 }
344
345 void
346 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
347 {
348         bool is_iomem;
349         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
350         mem = &mem[index];
351         if (is_iomem)
352                 iowrite32_native(val, (void __force __iomem *)mem);
353         else
354                 *mem = val;
355 }
356
357 static struct ttm_tt *
358 nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
359                       unsigned long size, uint32_t page_flags,
360                       struct page *dummy_read_page)
361 {
362         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
363         struct drm_device *dev = dev_priv->dev;
364
365         switch (dev_priv->gart_info.type) {
366 #if __OS_HAS_AGP
367         case NOUVEAU_GART_AGP:
368                 return ttm_agp_tt_create(bdev, dev->agp->bridge,
369                                          size, page_flags, dummy_read_page);
370 #endif
371         case NOUVEAU_GART_PDMA:
372         case NOUVEAU_GART_HW:
373                 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
374                                                 dummy_read_page);
375         default:
376                 NV_ERROR(dev, "Unknown GART type %d\n",
377                          dev_priv->gart_info.type);
378                 break;
379         }
380
381         return NULL;
382 }
383
384 static int
385 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
386 {
387         /* We'll do this from user space. */
388         return 0;
389 }
390
391 static int
392 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
393                          struct ttm_mem_type_manager *man)
394 {
395         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
396         struct drm_device *dev = dev_priv->dev;
397
398         switch (type) {
399         case TTM_PL_SYSTEM:
400                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
401                 man->available_caching = TTM_PL_MASK_CACHING;
402                 man->default_caching = TTM_PL_FLAG_CACHED;
403                 break;
404         case TTM_PL_VRAM:
405                 if (dev_priv->card_type >= NV_50) {
406                         man->func = &nouveau_vram_manager;
407                         man->io_reserve_fastpath = false;
408                         man->use_io_reserve_lru = true;
409                 } else {
410                         man->func = &ttm_bo_manager_func;
411                 }
412                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
413                              TTM_MEMTYPE_FLAG_MAPPABLE;
414                 man->available_caching = TTM_PL_FLAG_UNCACHED |
415                                          TTM_PL_FLAG_WC;
416                 man->default_caching = TTM_PL_FLAG_WC;
417                 break;
418         case TTM_PL_TT:
419                 if (dev_priv->card_type >= NV_50)
420                         man->func = &nouveau_gart_manager;
421                 else
422                 if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
423                         man->func = &nv04_gart_manager;
424                 else
425                         man->func = &ttm_bo_manager_func;
426                 switch (dev_priv->gart_info.type) {
427                 case NOUVEAU_GART_AGP:
428                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
429                         man->available_caching = TTM_PL_FLAG_UNCACHED |
430                                 TTM_PL_FLAG_WC;
431                         man->default_caching = TTM_PL_FLAG_WC;
432                         break;
433                 case NOUVEAU_GART_PDMA:
434                 case NOUVEAU_GART_HW:
435                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
436                                      TTM_MEMTYPE_FLAG_CMA;
437                         man->available_caching = TTM_PL_MASK_CACHING;
438                         man->default_caching = TTM_PL_FLAG_CACHED;
439                         break;
440                 default:
441                         NV_ERROR(dev, "Unknown GART type: %d\n",
442                                  dev_priv->gart_info.type);
443                         return -EINVAL;
444                 }
445                 break;
446         default:
447                 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
448                 return -EINVAL;
449         }
450         return 0;
451 }
452
453 static void
454 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
455 {
456         struct nouveau_bo *nvbo = nouveau_bo(bo);
457
458         switch (bo->mem.mem_type) {
459         case TTM_PL_VRAM:
460                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
461                                          TTM_PL_FLAG_SYSTEM);
462                 break;
463         default:
464                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
465                 break;
466         }
467
468         *pl = nvbo->placement;
469 }
470
471
472 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
473  * TTM_PL_{VRAM,TT} directly.
474  */
475
476 static int
477 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
478                               struct nouveau_bo *nvbo, bool evict,
479                               bool no_wait_reserve, bool no_wait_gpu,
480                               struct ttm_mem_reg *new_mem)
481 {
482         struct nouveau_fence *fence = NULL;
483         int ret;
484
485         ret = nouveau_fence_new(chan, &fence);
486         if (ret)
487                 return ret;
488
489         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
490                                         no_wait_reserve, no_wait_gpu, new_mem);
491         nouveau_fence_unref(&fence);
492         return ret;
493 }
494
495 static int
496 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
497                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
498 {
499         struct nouveau_mem *node = old_mem->mm_node;
500         int ret = RING_SPACE(chan, 10);
501         if (ret == 0) {
502                 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
503                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
504                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
505                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
506                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
507                 OUT_RING  (chan, PAGE_SIZE);
508                 OUT_RING  (chan, PAGE_SIZE);
509                 OUT_RING  (chan, PAGE_SIZE);
510                 OUT_RING  (chan, new_mem->num_pages);
511                 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
512         }
513         return ret;
514 }
515
516 static int
517 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
518 {
519         int ret = RING_SPACE(chan, 2);
520         if (ret == 0) {
521                 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
522                 OUT_RING  (chan, handle);
523         }
524         return ret;
525 }
526
527 static int
528 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
529                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
530 {
531         struct nouveau_mem *node = old_mem->mm_node;
532         u64 src_offset = node->vma[0].offset;
533         u64 dst_offset = node->vma[1].offset;
534         u32 page_count = new_mem->num_pages;
535         int ret;
536
537         page_count = new_mem->num_pages;
538         while (page_count) {
539                 int line_count = (page_count > 8191) ? 8191 : page_count;
540
541                 ret = RING_SPACE(chan, 11);
542                 if (ret)
543                         return ret;
544
545                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
546                 OUT_RING  (chan, upper_32_bits(src_offset));
547                 OUT_RING  (chan, lower_32_bits(src_offset));
548                 OUT_RING  (chan, upper_32_bits(dst_offset));
549                 OUT_RING  (chan, lower_32_bits(dst_offset));
550                 OUT_RING  (chan, PAGE_SIZE);
551                 OUT_RING  (chan, PAGE_SIZE);
552                 OUT_RING  (chan, PAGE_SIZE);
553                 OUT_RING  (chan, line_count);
554                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
555                 OUT_RING  (chan, 0x00000110);
556
557                 page_count -= line_count;
558                 src_offset += (PAGE_SIZE * line_count);
559                 dst_offset += (PAGE_SIZE * line_count);
560         }
561
562         return 0;
563 }
564
565 static int
566 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
567                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
568 {
569         struct nouveau_mem *node = old_mem->mm_node;
570         u64 src_offset = node->vma[0].offset;
571         u64 dst_offset = node->vma[1].offset;
572         u32 page_count = new_mem->num_pages;
573         int ret;
574
575         page_count = new_mem->num_pages;
576         while (page_count) {
577                 int line_count = (page_count > 2047) ? 2047 : page_count;
578
579                 ret = RING_SPACE(chan, 12);
580                 if (ret)
581                         return ret;
582
583                 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
584                 OUT_RING  (chan, upper_32_bits(dst_offset));
585                 OUT_RING  (chan, lower_32_bits(dst_offset));
586                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
587                 OUT_RING  (chan, upper_32_bits(src_offset));
588                 OUT_RING  (chan, lower_32_bits(src_offset));
589                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
590                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
591                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
592                 OUT_RING  (chan, line_count);
593                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
594                 OUT_RING  (chan, 0x00100110);
595
596                 page_count -= line_count;
597                 src_offset += (PAGE_SIZE * line_count);
598                 dst_offset += (PAGE_SIZE * line_count);
599         }
600
601         return 0;
602 }
603
604 static int
605 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
606                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
607 {
608         struct nouveau_mem *node = old_mem->mm_node;
609         u64 src_offset = node->vma[0].offset;
610         u64 dst_offset = node->vma[1].offset;
611         u32 page_count = new_mem->num_pages;
612         int ret;
613
614         page_count = new_mem->num_pages;
615         while (page_count) {
616                 int line_count = (page_count > 8191) ? 8191 : page_count;
617
618                 ret = RING_SPACE(chan, 11);
619                 if (ret)
620                         return ret;
621
622                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
623                 OUT_RING  (chan, upper_32_bits(src_offset));
624                 OUT_RING  (chan, lower_32_bits(src_offset));
625                 OUT_RING  (chan, upper_32_bits(dst_offset));
626                 OUT_RING  (chan, lower_32_bits(dst_offset));
627                 OUT_RING  (chan, PAGE_SIZE);
628                 OUT_RING  (chan, PAGE_SIZE);
629                 OUT_RING  (chan, PAGE_SIZE);
630                 OUT_RING  (chan, line_count);
631                 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
632                 OUT_RING  (chan, 0x00000110);
633
634                 page_count -= line_count;
635                 src_offset += (PAGE_SIZE * line_count);
636                 dst_offset += (PAGE_SIZE * line_count);
637         }
638
639         return 0;
640 }
641
642 static int
643 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
644                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
645 {
646         struct nouveau_mem *node = old_mem->mm_node;
647         int ret = RING_SPACE(chan, 7);
648         if (ret == 0) {
649                 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
650                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
651                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
652                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
653                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
654                 OUT_RING  (chan, 0x00000000 /* COPY */);
655                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
656         }
657         return ret;
658 }
659
660 static int
661 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
662                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
663 {
664         struct nouveau_mem *node = old_mem->mm_node;
665         int ret = RING_SPACE(chan, 7);
666         if (ret == 0) {
667                 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
668                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
669                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
670                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
671                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
672                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
673                 OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
674         }
675         return ret;
676 }
677
678 static int
679 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
680 {
681         int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
682                                          &chan->m2mf_ntfy);
683         if (ret == 0) {
684                 ret = RING_SPACE(chan, 6);
685                 if (ret == 0) {
686                         BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
687                         OUT_RING  (chan, handle);
688                         BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
689                         OUT_RING  (chan, NvNotify0);
690                         OUT_RING  (chan, NvDmaFB);
691                         OUT_RING  (chan, NvDmaFB);
692                 } else {
693                         nouveau_ramht_remove(chan, NvNotify0);
694                 }
695         }
696
697         return ret;
698 }
699
700 static int
701 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
702                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
703 {
704         struct nouveau_mem *node = old_mem->mm_node;
705         struct nouveau_bo *nvbo = nouveau_bo(bo);
706         u64 length = (new_mem->num_pages << PAGE_SHIFT);
707         u64 src_offset = node->vma[0].offset;
708         u64 dst_offset = node->vma[1].offset;
709         int ret;
710
711         while (length) {
712                 u32 amount, stride, height;
713
714                 amount  = min(length, (u64)(4 * 1024 * 1024));
715                 stride  = 16 * 4;
716                 height  = amount / stride;
717
718                 if (new_mem->mem_type == TTM_PL_VRAM &&
719                     nouveau_bo_tile_layout(nvbo)) {
720                         ret = RING_SPACE(chan, 8);
721                         if (ret)
722                                 return ret;
723
724                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
725                         OUT_RING  (chan, 0);
726                         OUT_RING  (chan, 0);
727                         OUT_RING  (chan, stride);
728                         OUT_RING  (chan, height);
729                         OUT_RING  (chan, 1);
730                         OUT_RING  (chan, 0);
731                         OUT_RING  (chan, 0);
732                 } else {
733                         ret = RING_SPACE(chan, 2);
734                         if (ret)
735                                 return ret;
736
737                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
738                         OUT_RING  (chan, 1);
739                 }
740                 if (old_mem->mem_type == TTM_PL_VRAM &&
741                     nouveau_bo_tile_layout(nvbo)) {
742                         ret = RING_SPACE(chan, 8);
743                         if (ret)
744                                 return ret;
745
746                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
747                         OUT_RING  (chan, 0);
748                         OUT_RING  (chan, 0);
749                         OUT_RING  (chan, stride);
750                         OUT_RING  (chan, height);
751                         OUT_RING  (chan, 1);
752                         OUT_RING  (chan, 0);
753                         OUT_RING  (chan, 0);
754                 } else {
755                         ret = RING_SPACE(chan, 2);
756                         if (ret)
757                                 return ret;
758
759                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
760                         OUT_RING  (chan, 1);
761                 }
762
763                 ret = RING_SPACE(chan, 14);
764                 if (ret)
765                         return ret;
766
767                 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
768                 OUT_RING  (chan, upper_32_bits(src_offset));
769                 OUT_RING  (chan, upper_32_bits(dst_offset));
770                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
771                 OUT_RING  (chan, lower_32_bits(src_offset));
772                 OUT_RING  (chan, lower_32_bits(dst_offset));
773                 OUT_RING  (chan, stride);
774                 OUT_RING  (chan, stride);
775                 OUT_RING  (chan, stride);
776                 OUT_RING  (chan, height);
777                 OUT_RING  (chan, 0x00000101);
778                 OUT_RING  (chan, 0x00000000);
779                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
780                 OUT_RING  (chan, 0);
781
782                 length -= amount;
783                 src_offset += amount;
784                 dst_offset += amount;
785         }
786
787         return 0;
788 }
789
790 static int
791 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
792 {
793         int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
794                                          &chan->m2mf_ntfy);
795         if (ret == 0) {
796                 ret = RING_SPACE(chan, 4);
797                 if (ret == 0) {
798                         BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
799                         OUT_RING  (chan, handle);
800                         BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
801                         OUT_RING  (chan, NvNotify0);
802                 }
803         }
804
805         return ret;
806 }
807
808 static inline uint32_t
809 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
810                       struct nouveau_channel *chan, struct ttm_mem_reg *mem)
811 {
812         if (mem->mem_type == TTM_PL_TT)
813                 return chan->gart_handle;
814         return chan->vram_handle;
815 }
816
817 static int
818 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
819                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
820 {
821         u32 src_offset = old_mem->start << PAGE_SHIFT;
822         u32 dst_offset = new_mem->start << PAGE_SHIFT;
823         u32 page_count = new_mem->num_pages;
824         int ret;
825
826         ret = RING_SPACE(chan, 3);
827         if (ret)
828                 return ret;
829
830         BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
831         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
832         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
833
834         page_count = new_mem->num_pages;
835         while (page_count) {
836                 int line_count = (page_count > 2047) ? 2047 : page_count;
837
838                 ret = RING_SPACE(chan, 11);
839                 if (ret)
840                         return ret;
841
842                 BEGIN_NV04(chan, NvSubCopy,
843                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
844                 OUT_RING  (chan, src_offset);
845                 OUT_RING  (chan, dst_offset);
846                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
847                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
848                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
849                 OUT_RING  (chan, line_count);
850                 OUT_RING  (chan, 0x00000101);
851                 OUT_RING  (chan, 0x00000000);
852                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
853                 OUT_RING  (chan, 0);
854
855                 page_count -= line_count;
856                 src_offset += (PAGE_SIZE * line_count);
857                 dst_offset += (PAGE_SIZE * line_count);
858         }
859
860         return 0;
861 }
862
863 static int
864 nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
865                    struct ttm_mem_reg *mem, struct nouveau_vma *vma)
866 {
867         struct nouveau_mem *node = mem->mm_node;
868         int ret;
869
870         ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
871                              node->page_shift, NV_MEM_ACCESS_RO, vma);
872         if (ret)
873                 return ret;
874
875         if (mem->mem_type == TTM_PL_VRAM)
876                 nouveau_vm_map(vma, node);
877         else
878                 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
879
880         return 0;
881 }
882
883 static int
884 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
885                      bool no_wait_reserve, bool no_wait_gpu,
886                      struct ttm_mem_reg *new_mem)
887 {
888         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
889         struct nouveau_channel *chan = chan = dev_priv->channel;
890         struct nouveau_bo *nvbo = nouveau_bo(bo);
891         struct ttm_mem_reg *old_mem = &bo->mem;
892         int ret;
893
894         mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
895
896         /* create temporary vmas for the transfer and attach them to the
897          * old nouveau_mem node, these will get cleaned up after ttm has
898          * destroyed the ttm_mem_reg
899          */
900         if (dev_priv->card_type >= NV_50) {
901                 struct nouveau_mem *node = old_mem->mm_node;
902
903                 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
904                 if (ret)
905                         goto out;
906
907                 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
908                 if (ret)
909                         goto out;
910         }
911
912         ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
913         if (ret == 0) {
914                 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
915                                                     no_wait_reserve,
916                                                     no_wait_gpu, new_mem);
917         }
918
919 out:
920         mutex_unlock(&chan->mutex);
921         return ret;
922 }
923
924 void
925 nouveau_bo_move_init(struct nouveau_channel *chan)
926 {
927         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
928         static const struct {
929                 const char *name;
930                 int engine;
931                 u32 oclass;
932                 int (*exec)(struct nouveau_channel *,
933                             struct ttm_buffer_object *,
934                             struct ttm_mem_reg *, struct ttm_mem_reg *);
935                 int (*init)(struct nouveau_channel *, u32 handle);
936         } _methods[] = {
937                 {  "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
938                 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
939                 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
940                 {  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
941                 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
942                 {  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
943                 {  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
944                 {  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
945                 {},
946                 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
947         }, *mthd = _methods;
948         const char *name = "CPU";
949         int ret;
950
951         do {
952                 u32 handle = (mthd->engine << 16) | mthd->oclass;
953                 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
954                 if (ret == 0) {
955                         ret = mthd->init(chan, handle);
956                         if (ret == 0) {
957                                 dev_priv->ttm.move = mthd->exec;
958                                 name = mthd->name;
959                                 break;
960                         }
961                 }
962         } while ((++mthd)->exec);
963
964         NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
965 }
966
967 static int
968 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
969                       bool no_wait_reserve, bool no_wait_gpu,
970                       struct ttm_mem_reg *new_mem)
971 {
972         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
973         struct ttm_placement placement;
974         struct ttm_mem_reg tmp_mem;
975         int ret;
976
977         placement.fpfn = placement.lpfn = 0;
978         placement.num_placement = placement.num_busy_placement = 1;
979         placement.placement = placement.busy_placement = &placement_memtype;
980
981         tmp_mem = *new_mem;
982         tmp_mem.mm_node = NULL;
983         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
984         if (ret)
985                 return ret;
986
987         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
988         if (ret)
989                 goto out;
990
991         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
992         if (ret)
993                 goto out;
994
995         ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
996 out:
997         ttm_bo_mem_put(bo, &tmp_mem);
998         return ret;
999 }
1000
1001 static int
1002 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1003                       bool no_wait_reserve, bool no_wait_gpu,
1004                       struct ttm_mem_reg *new_mem)
1005 {
1006         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1007         struct ttm_placement placement;
1008         struct ttm_mem_reg tmp_mem;
1009         int ret;
1010
1011         placement.fpfn = placement.lpfn = 0;
1012         placement.num_placement = placement.num_busy_placement = 1;
1013         placement.placement = placement.busy_placement = &placement_memtype;
1014
1015         tmp_mem = *new_mem;
1016         tmp_mem.mm_node = NULL;
1017         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
1018         if (ret)
1019                 return ret;
1020
1021         ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
1022         if (ret)
1023                 goto out;
1024
1025         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
1026         if (ret)
1027                 goto out;
1028
1029 out:
1030         ttm_bo_mem_put(bo, &tmp_mem);
1031         return ret;
1032 }
1033
1034 static void
1035 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1036 {
1037         struct nouveau_bo *nvbo = nouveau_bo(bo);
1038         struct nouveau_vma *vma;
1039
1040         /* ttm can now (stupidly) pass the driver bos it didn't create... */
1041         if (bo->destroy != nouveau_bo_del_ttm)
1042                 return;
1043
1044         list_for_each_entry(vma, &nvbo->vma_list, head) {
1045                 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
1046                         nouveau_vm_map(vma, new_mem->mm_node);
1047                 } else
1048                 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1049                     nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
1050                         if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1051                                 nouveau_vm_map_sg_table(vma, 0, new_mem->
1052                                                   num_pages << PAGE_SHIFT,
1053                                                   new_mem->mm_node);
1054                         else
1055                                 nouveau_vm_map_sg(vma, 0, new_mem->
1056                                                   num_pages << PAGE_SHIFT,
1057                                                   new_mem->mm_node);
1058                 } else {
1059                         nouveau_vm_unmap(vma);
1060                 }
1061         }
1062 }
1063
1064 static int
1065 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1066                    struct nouveau_tile_reg **new_tile)
1067 {
1068         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1069         struct drm_device *dev = dev_priv->dev;
1070         struct nouveau_bo *nvbo = nouveau_bo(bo);
1071         u64 offset = new_mem->start << PAGE_SHIFT;
1072
1073         *new_tile = NULL;
1074         if (new_mem->mem_type != TTM_PL_VRAM)
1075                 return 0;
1076
1077         if (dev_priv->card_type >= NV_10) {
1078                 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
1079                                                 nvbo->tile_mode,
1080                                                 nvbo->tile_flags);
1081         }
1082
1083         return 0;
1084 }
1085
1086 static void
1087 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1088                       struct nouveau_tile_reg *new_tile,
1089                       struct nouveau_tile_reg **old_tile)
1090 {
1091         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1092         struct drm_device *dev = dev_priv->dev;
1093
1094         nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
1095         *old_tile = new_tile;
1096 }
1097
1098 static int
1099 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1100                 bool no_wait_reserve, bool no_wait_gpu,
1101                 struct ttm_mem_reg *new_mem)
1102 {
1103         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1104         struct nouveau_bo *nvbo = nouveau_bo(bo);
1105         struct ttm_mem_reg *old_mem = &bo->mem;
1106         struct nouveau_tile_reg *new_tile = NULL;
1107         int ret = 0;
1108
1109         if (dev_priv->card_type < NV_50) {
1110                 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1111                 if (ret)
1112                         return ret;
1113         }
1114
1115         /* Fake bo copy. */
1116         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1117                 BUG_ON(bo->mem.mm_node != NULL);
1118                 bo->mem = *new_mem;
1119                 new_mem->mm_node = NULL;
1120                 goto out;
1121         }
1122
1123         /* CPU copy if we have no accelerated method available */
1124         if (!dev_priv->ttm.move) {
1125                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1126                 goto out;
1127         }
1128
1129         /* Hardware assisted copy. */
1130         if (new_mem->mem_type == TTM_PL_SYSTEM)
1131                 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1132         else if (old_mem->mem_type == TTM_PL_SYSTEM)
1133                 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1134         else
1135                 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
1136
1137         if (!ret)
1138                 goto out;
1139
1140         /* Fallback to software copy. */
1141         ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1142
1143 out:
1144         if (dev_priv->card_type < NV_50) {
1145                 if (ret)
1146                         nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1147                 else
1148                         nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1149         }
1150
1151         return ret;
1152 }
1153
1154 static int
1155 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1156 {
1157         return 0;
1158 }
1159
1160 static int
1161 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1162 {
1163         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1164         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1165         struct drm_device *dev = dev_priv->dev;
1166         int ret;
1167
1168         mem->bus.addr = NULL;
1169         mem->bus.offset = 0;
1170         mem->bus.size = mem->num_pages << PAGE_SHIFT;
1171         mem->bus.base = 0;
1172         mem->bus.is_iomem = false;
1173         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1174                 return -EINVAL;
1175         switch (mem->mem_type) {
1176         case TTM_PL_SYSTEM:
1177                 /* System memory */
1178                 return 0;
1179         case TTM_PL_TT:
1180 #if __OS_HAS_AGP
1181                 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1182                         mem->bus.offset = mem->start << PAGE_SHIFT;
1183                         mem->bus.base = dev_priv->gart_info.aper_base;
1184                         mem->bus.is_iomem = true;
1185                 }
1186 #endif
1187                 break;
1188         case TTM_PL_VRAM:
1189                 mem->bus.offset = mem->start << PAGE_SHIFT;
1190                 mem->bus.base = pci_resource_start(dev->pdev, 1);
1191                 mem->bus.is_iomem = true;
1192                 if (dev_priv->card_type >= NV_50) {
1193                         struct nouveau_mem *node = mem->mm_node;
1194
1195                         ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
1196                                         &node->bar_vma);
1197                         if (ret)
1198                                 return ret;
1199
1200                         mem->bus.offset = node->bar_vma.offset;
1201                 }
1202                 break;
1203         default:
1204                 return -EINVAL;
1205         }
1206         return 0;
1207 }
1208
1209 static void
1210 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1211 {
1212         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1213         struct nouveau_mem *node = mem->mm_node;
1214
1215         if (mem->mem_type != TTM_PL_VRAM)
1216                 return;
1217
1218         if (!node->bar_vma.node)
1219                 return;
1220
1221         nvbar_unmap(dev_priv->dev, &node->bar_vma);
1222 }
1223
1224 static int
1225 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1226 {
1227         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1228         struct nouveau_bo *nvbo = nouveau_bo(bo);
1229
1230         /* as long as the bo isn't in vram, and isn't tiled, we've got
1231          * nothing to do here.
1232          */
1233         if (bo->mem.mem_type != TTM_PL_VRAM) {
1234                 if (dev_priv->card_type < NV_50 ||
1235                     !nouveau_bo_tile_layout(nvbo))
1236                         return 0;
1237         }
1238
1239         /* make sure bo is in mappable vram */
1240         if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1241                 return 0;
1242
1243
1244         nvbo->placement.fpfn = 0;
1245         nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1246         nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1247         return nouveau_bo_validate(nvbo, false, true, false);
1248 }
1249
1250 static int
1251 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1252 {
1253         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1254         struct drm_nouveau_private *dev_priv;
1255         struct drm_device *dev;
1256         unsigned i;
1257         int r;
1258         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1259
1260         if (ttm->state != tt_unpopulated)
1261                 return 0;
1262
1263         if (slave && ttm->sg) {
1264                 /* make userspace faulting work */
1265                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1266                                                  ttm_dma->dma_address, ttm->num_pages);
1267                 ttm->state = tt_unbound;
1268                 return 0;
1269         }
1270
1271         dev_priv = nouveau_bdev(ttm->bdev);
1272         dev = dev_priv->dev;
1273
1274 #if __OS_HAS_AGP
1275         if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1276                 return ttm_agp_tt_populate(ttm);
1277         }
1278 #endif
1279
1280 #ifdef CONFIG_SWIOTLB
1281         if (swiotlb_nr_tbl()) {
1282                 return ttm_dma_populate((void *)ttm, dev->dev);
1283         }
1284 #endif
1285
1286         r = ttm_pool_populate(ttm);
1287         if (r) {
1288                 return r;
1289         }
1290
1291         for (i = 0; i < ttm->num_pages; i++) {
1292                 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
1293                                                    0, PAGE_SIZE,
1294                                                    PCI_DMA_BIDIRECTIONAL);
1295                 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
1296                         while (--i) {
1297                                 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1298                                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1299                                 ttm_dma->dma_address[i] = 0;
1300                         }
1301                         ttm_pool_unpopulate(ttm);
1302                         return -EFAULT;
1303                 }
1304         }
1305         return 0;
1306 }
1307
1308 static void
1309 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1310 {
1311         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1312         struct drm_nouveau_private *dev_priv;
1313         struct drm_device *dev;
1314         unsigned i;
1315         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1316
1317         if (slave)
1318                 return;
1319
1320         dev_priv = nouveau_bdev(ttm->bdev);
1321         dev = dev_priv->dev;
1322
1323 #if __OS_HAS_AGP
1324         if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1325                 ttm_agp_tt_unpopulate(ttm);
1326                 return;
1327         }
1328 #endif
1329
1330 #ifdef CONFIG_SWIOTLB
1331         if (swiotlb_nr_tbl()) {
1332                 ttm_dma_unpopulate((void *)ttm, dev->dev);
1333                 return;
1334         }
1335 #endif
1336
1337         for (i = 0; i < ttm->num_pages; i++) {
1338                 if (ttm_dma->dma_address[i]) {
1339                         pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
1340                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1341                 }
1342         }
1343
1344         ttm_pool_unpopulate(ttm);
1345 }
1346
1347 void
1348 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1349 {
1350         struct nouveau_fence *old_fence = NULL;
1351
1352         if (likely(fence))
1353                 nouveau_fence_ref(fence);
1354
1355         spin_lock(&nvbo->bo.bdev->fence_lock);
1356         old_fence = nvbo->bo.sync_obj;
1357         nvbo->bo.sync_obj = fence;
1358         spin_unlock(&nvbo->bo.bdev->fence_lock);
1359
1360         nouveau_fence_unref(&old_fence);
1361 }
1362
1363 static void
1364 nouveau_bo_fence_unref(void **sync_obj)
1365 {
1366         nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1367 }
1368
1369 static void *
1370 nouveau_bo_fence_ref(void *sync_obj)
1371 {
1372         return nouveau_fence_ref(sync_obj);
1373 }
1374
1375 static bool
1376 nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1377 {
1378         return nouveau_fence_done(sync_obj);
1379 }
1380
1381 static int
1382 nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1383 {
1384         return nouveau_fence_wait(sync_obj, lazy, intr);
1385 }
1386
1387 static int
1388 nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1389 {
1390         return 0;
1391 }
1392
1393 struct ttm_bo_driver nouveau_bo_driver = {
1394         .ttm_tt_create = &nouveau_ttm_tt_create,
1395         .ttm_tt_populate = &nouveau_ttm_tt_populate,
1396         .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1397         .invalidate_caches = nouveau_bo_invalidate_caches,
1398         .init_mem_type = nouveau_bo_init_mem_type,
1399         .evict_flags = nouveau_bo_evict_flags,
1400         .move_notify = nouveau_bo_move_ntfy,
1401         .move = nouveau_bo_move,
1402         .verify_access = nouveau_bo_verify_access,
1403         .sync_obj_signaled = nouveau_bo_fence_signalled,
1404         .sync_obj_wait = nouveau_bo_fence_wait,
1405         .sync_obj_flush = nouveau_bo_fence_flush,
1406         .sync_obj_unref = nouveau_bo_fence_unref,
1407         .sync_obj_ref = nouveau_bo_fence_ref,
1408         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1409         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1410         .io_mem_free = &nouveau_ttm_io_mem_free,
1411 };
1412
1413 struct nouveau_vma *
1414 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1415 {
1416         struct nouveau_vma *vma;
1417         list_for_each_entry(vma, &nvbo->vma_list, head) {
1418                 if (vma->vm == vm)
1419                         return vma;
1420         }
1421
1422         return NULL;
1423 }
1424
1425 int
1426 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1427                    struct nouveau_vma *vma)
1428 {
1429         const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1430         struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1431         int ret;
1432
1433         ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1434                              NV_MEM_ACCESS_RW, vma);
1435         if (ret)
1436                 return ret;
1437
1438         if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1439                 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1440         else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1441                 if (node->sg)
1442                         nouveau_vm_map_sg_table(vma, 0, size, node);
1443                 else
1444                         nouveau_vm_map_sg(vma, 0, size, node);
1445         }
1446
1447         list_add_tail(&vma->head, &nvbo->vma_list);
1448         vma->refcount = 1;
1449         return 0;
1450 }
1451
1452 void
1453 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1454 {
1455         if (vma->node) {
1456                 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1457                         spin_lock(&nvbo->bo.bdev->fence_lock);
1458                         ttm_bo_wait(&nvbo->bo, false, false, false);
1459                         spin_unlock(&nvbo->bo.bdev->fence_lock);
1460                         nouveau_vm_unmap(vma);
1461                 }
1462
1463                 nouveau_vm_put(vma);
1464                 list_del(&vma->head);
1465         }
1466 }