]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/nouveau/nouveau_gem.c
Merge tag 'uapi-prep-20121002' of git://git.infradead.org/users/dhowells/linux-headers
[~andy/linux] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 #include <linux/dma-buf.h>
27 #include <drm/drmP.h>
28
29 #include "nouveau_drv.h"
30 #include <drm/nouveau_drm.h>
31 #include "nouveau_dma.h"
32 #include "nouveau_fence.h"
33
34 #define nouveau_gem_pushbuf_sync(chan) 0
35
36 int
37 nouveau_gem_object_new(struct drm_gem_object *gem)
38 {
39         return 0;
40 }
41
42 void
43 nouveau_gem_object_del(struct drm_gem_object *gem)
44 {
45         struct nouveau_bo *nvbo = gem->driver_private;
46         struct ttm_buffer_object *bo = &nvbo->bo;
47
48         if (!nvbo)
49                 return;
50         nvbo->gem = NULL;
51
52         if (unlikely(nvbo->pin_refcnt)) {
53                 nvbo->pin_refcnt = 1;
54                 nouveau_bo_unpin(nvbo);
55         }
56
57         if (gem->import_attach)
58                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
59
60         ttm_bo_unref(&bo);
61
62         drm_gem_object_release(gem);
63         kfree(gem);
64 }
65
66 int
67 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
68 {
69         struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
70         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
71         struct nouveau_vma *vma;
72         int ret;
73
74         if (!fpriv->vm)
75                 return 0;
76
77         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
78         if (ret)
79                 return ret;
80
81         vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
82         if (!vma) {
83                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
84                 if (!vma) {
85                         ret = -ENOMEM;
86                         goto out;
87                 }
88
89                 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
90                 if (ret) {
91                         kfree(vma);
92                         goto out;
93                 }
94         } else {
95                 vma->refcount++;
96         }
97
98 out:
99         ttm_bo_unreserve(&nvbo->bo);
100         return ret;
101 }
102
103 void
104 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
105 {
106         struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
107         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
108         struct nouveau_vma *vma;
109         int ret;
110
111         if (!fpriv->vm)
112                 return;
113
114         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
115         if (ret)
116                 return;
117
118         vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
119         if (vma) {
120                 if (--vma->refcount == 0) {
121                         nouveau_bo_vma_del(nvbo, vma);
122                         kfree(vma);
123                 }
124         }
125         ttm_bo_unreserve(&nvbo->bo);
126 }
127
128 int
129 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
130                 uint32_t tile_mode, uint32_t tile_flags,
131                 struct nouveau_bo **pnvbo)
132 {
133         struct drm_nouveau_private *dev_priv = dev->dev_private;
134         struct nouveau_bo *nvbo;
135         u32 flags = 0;
136         int ret;
137
138         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
139                 flags |= TTM_PL_FLAG_VRAM;
140         if (domain & NOUVEAU_GEM_DOMAIN_GART)
141                 flags |= TTM_PL_FLAG_TT;
142         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
143                 flags |= TTM_PL_FLAG_SYSTEM;
144
145         ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
146                              tile_flags, NULL, pnvbo);
147         if (ret)
148                 return ret;
149         nvbo = *pnvbo;
150
151         /* we restrict allowed domains on nv50+ to only the types
152          * that were requested at creation time.  not possibly on
153          * earlier chips without busting the ABI.
154          */
155         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
156                               NOUVEAU_GEM_DOMAIN_GART;
157         if (dev_priv->card_type >= NV_50)
158                 nvbo->valid_domains &= domain;
159
160         nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
161         if (!nvbo->gem) {
162                 nouveau_bo_ref(NULL, pnvbo);
163                 return -ENOMEM;
164         }
165
166         nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
167         nvbo->gem->driver_private = nvbo;
168         return 0;
169 }
170
171 static int
172 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
173                  struct drm_nouveau_gem_info *rep)
174 {
175         struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
176         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
177         struct nouveau_vma *vma;
178
179         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
180                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
181         else
182                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
183
184         rep->offset = nvbo->bo.offset;
185         if (fpriv->vm) {
186                 vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
187                 if (!vma)
188                         return -EINVAL;
189
190                 rep->offset = vma->offset;
191         }
192
193         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
194         rep->map_handle = nvbo->bo.addr_space_offset;
195         rep->tile_mode = nvbo->tile_mode;
196         rep->tile_flags = nvbo->tile_flags;
197         return 0;
198 }
199
200 int
201 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
202                       struct drm_file *file_priv)
203 {
204         struct drm_nouveau_private *dev_priv = dev->dev_private;
205         struct drm_nouveau_gem_new *req = data;
206         struct nouveau_bo *nvbo = NULL;
207         int ret = 0;
208
209         dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
210
211         if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
212                 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
213                 return -EINVAL;
214         }
215
216         ret = nouveau_gem_new(dev, req->info.size, req->align,
217                               req->info.domain, req->info.tile_mode,
218                               req->info.tile_flags, &nvbo);
219         if (ret)
220                 return ret;
221
222         ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
223         if (ret == 0) {
224                 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
225                 if (ret)
226                         drm_gem_handle_delete(file_priv, req->info.handle);
227         }
228
229         /* drop reference from allocate - handle holds it now */
230         drm_gem_object_unreference_unlocked(nvbo->gem);
231         return ret;
232 }
233
234 static int
235 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
236                        uint32_t write_domains, uint32_t valid_domains)
237 {
238         struct nouveau_bo *nvbo = gem->driver_private;
239         struct ttm_buffer_object *bo = &nvbo->bo;
240         uint32_t domains = valid_domains & nvbo->valid_domains &
241                 (write_domains ? write_domains : read_domains);
242         uint32_t pref_flags = 0, valid_flags = 0;
243
244         if (!domains)
245                 return -EINVAL;
246
247         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
248                 valid_flags |= TTM_PL_FLAG_VRAM;
249
250         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
251                 valid_flags |= TTM_PL_FLAG_TT;
252
253         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
254             bo->mem.mem_type == TTM_PL_VRAM)
255                 pref_flags |= TTM_PL_FLAG_VRAM;
256
257         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
258                  bo->mem.mem_type == TTM_PL_TT)
259                 pref_flags |= TTM_PL_FLAG_TT;
260
261         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
262                 pref_flags |= TTM_PL_FLAG_VRAM;
263
264         else
265                 pref_flags |= TTM_PL_FLAG_TT;
266
267         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
268
269         return 0;
270 }
271
272 struct validate_op {
273         struct list_head vram_list;
274         struct list_head gart_list;
275         struct list_head both_list;
276 };
277
278 static void
279 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
280 {
281         struct list_head *entry, *tmp;
282         struct nouveau_bo *nvbo;
283
284         list_for_each_safe(entry, tmp, list) {
285                 nvbo = list_entry(entry, struct nouveau_bo, entry);
286
287                 nouveau_bo_fence(nvbo, fence);
288
289                 if (unlikely(nvbo->validate_mapped)) {
290                         ttm_bo_kunmap(&nvbo->kmap);
291                         nvbo->validate_mapped = false;
292                 }
293
294                 list_del(&nvbo->entry);
295                 nvbo->reserved_by = NULL;
296                 ttm_bo_unreserve(&nvbo->bo);
297                 drm_gem_object_unreference_unlocked(nvbo->gem);
298         }
299 }
300
301 static void
302 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
303 {
304         validate_fini_list(&op->vram_list, fence);
305         validate_fini_list(&op->gart_list, fence);
306         validate_fini_list(&op->both_list, fence);
307 }
308
309 static int
310 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
311               struct drm_nouveau_gem_pushbuf_bo *pbbo,
312               int nr_buffers, struct validate_op *op)
313 {
314         struct drm_device *dev = chan->dev;
315         struct drm_nouveau_private *dev_priv = dev->dev_private;
316         uint32_t sequence;
317         int trycnt = 0;
318         int ret, i;
319
320         sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
321 retry:
322         if (++trycnt > 100000) {
323                 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
324                 return -EINVAL;
325         }
326
327         for (i = 0; i < nr_buffers; i++) {
328                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
329                 struct drm_gem_object *gem;
330                 struct nouveau_bo *nvbo;
331
332                 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
333                 if (!gem) {
334                         NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
335                         validate_fini(op, NULL);
336                         return -ENOENT;
337                 }
338                 nvbo = gem->driver_private;
339
340                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
341                         NV_ERROR(dev, "multiple instances of buffer %d on "
342                                       "validation list\n", b->handle);
343                         drm_gem_object_unreference_unlocked(gem);
344                         validate_fini(op, NULL);
345                         return -EINVAL;
346                 }
347
348                 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
349                 if (ret) {
350                         validate_fini(op, NULL);
351                         if (unlikely(ret == -EAGAIN))
352                                 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
353                         drm_gem_object_unreference_unlocked(gem);
354                         if (unlikely(ret)) {
355                                 if (ret != -ERESTARTSYS)
356                                         NV_ERROR(dev, "fail reserve\n");
357                                 return ret;
358                         }
359                         goto retry;
360                 }
361
362                 b->user_priv = (uint64_t)(unsigned long)nvbo;
363                 nvbo->reserved_by = file_priv;
364                 nvbo->pbbo_index = i;
365                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
366                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
367                         list_add_tail(&nvbo->entry, &op->both_list);
368                 else
369                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
370                         list_add_tail(&nvbo->entry, &op->vram_list);
371                 else
372                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
373                         list_add_tail(&nvbo->entry, &op->gart_list);
374                 else {
375                         NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
376                                  b->valid_domains);
377                         list_add_tail(&nvbo->entry, &op->both_list);
378                         validate_fini(op, NULL);
379                         return -EINVAL;
380                 }
381         }
382
383         return 0;
384 }
385
386 static int
387 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
388 {
389         struct nouveau_fence *fence = NULL;
390         int ret = 0;
391
392         spin_lock(&nvbo->bo.bdev->fence_lock);
393         if (nvbo->bo.sync_obj)
394                 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
395         spin_unlock(&nvbo->bo.bdev->fence_lock);
396
397         if (fence) {
398                 ret = nouveau_fence_sync(fence, chan);
399                 nouveau_fence_unref(&fence);
400         }
401
402         return ret;
403 }
404
405 static int
406 validate_list(struct nouveau_channel *chan, struct list_head *list,
407               struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
408 {
409         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
410         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
411                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
412         struct drm_device *dev = chan->dev;
413         struct nouveau_bo *nvbo;
414         int ret, relocs = 0;
415
416         list_for_each_entry(nvbo, list, entry) {
417                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
418
419                 ret = validate_sync(chan, nvbo);
420                 if (unlikely(ret)) {
421                         NV_ERROR(dev, "fail pre-validate sync\n");
422                         return ret;
423                 }
424
425                 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
426                                              b->write_domains,
427                                              b->valid_domains);
428                 if (unlikely(ret)) {
429                         NV_ERROR(dev, "fail set_domain\n");
430                         return ret;
431                 }
432
433                 ret = nouveau_bo_validate(nvbo, true, false, false);
434                 if (unlikely(ret)) {
435                         if (ret != -ERESTARTSYS)
436                                 NV_ERROR(dev, "fail ttm_validate\n");
437                         return ret;
438                 }
439
440                 ret = validate_sync(chan, nvbo);
441                 if (unlikely(ret)) {
442                         NV_ERROR(dev, "fail post-validate sync\n");
443                         return ret;
444                 }
445
446                 if (dev_priv->card_type < NV_50) {
447                         if (nvbo->bo.offset == b->presumed.offset &&
448                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
449                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
450                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
451                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
452                                 continue;
453
454                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
455                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
456                         else
457                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
458                         b->presumed.offset = nvbo->bo.offset;
459                         b->presumed.valid = 0;
460                         relocs++;
461
462                         if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
463                                              &b->presumed, sizeof(b->presumed)))
464                                 return -EFAULT;
465                 }
466         }
467
468         return relocs;
469 }
470
471 static int
472 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
473                              struct drm_file *file_priv,
474                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
475                              uint64_t user_buffers, int nr_buffers,
476                              struct validate_op *op, int *apply_relocs)
477 {
478         struct drm_device *dev = chan->dev;
479         int ret, relocs = 0;
480
481         INIT_LIST_HEAD(&op->vram_list);
482         INIT_LIST_HEAD(&op->gart_list);
483         INIT_LIST_HEAD(&op->both_list);
484
485         if (nr_buffers == 0)
486                 return 0;
487
488         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
489         if (unlikely(ret)) {
490                 if (ret != -ERESTARTSYS)
491                         NV_ERROR(dev, "validate_init\n");
492                 return ret;
493         }
494
495         ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
496         if (unlikely(ret < 0)) {
497                 if (ret != -ERESTARTSYS)
498                         NV_ERROR(dev, "validate vram_list\n");
499                 validate_fini(op, NULL);
500                 return ret;
501         }
502         relocs += ret;
503
504         ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
505         if (unlikely(ret < 0)) {
506                 if (ret != -ERESTARTSYS)
507                         NV_ERROR(dev, "validate gart_list\n");
508                 validate_fini(op, NULL);
509                 return ret;
510         }
511         relocs += ret;
512
513         ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
514         if (unlikely(ret < 0)) {
515                 if (ret != -ERESTARTSYS)
516                         NV_ERROR(dev, "validate both_list\n");
517                 validate_fini(op, NULL);
518                 return ret;
519         }
520         relocs += ret;
521
522         *apply_relocs = relocs;
523         return 0;
524 }
525
526 static inline void *
527 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
528 {
529         void *mem;
530         void __user *userptr = (void __force __user *)(uintptr_t)user;
531
532         mem = kmalloc(nmemb * size, GFP_KERNEL);
533         if (!mem)
534                 return ERR_PTR(-ENOMEM);
535
536         if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
537                 kfree(mem);
538                 return ERR_PTR(-EFAULT);
539         }
540
541         return mem;
542 }
543
544 static int
545 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
546                                 struct drm_nouveau_gem_pushbuf *req,
547                                 struct drm_nouveau_gem_pushbuf_bo *bo)
548 {
549         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
550         int ret = 0;
551         unsigned i;
552
553         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
554         if (IS_ERR(reloc))
555                 return PTR_ERR(reloc);
556
557         for (i = 0; i < req->nr_relocs; i++) {
558                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
559                 struct drm_nouveau_gem_pushbuf_bo *b;
560                 struct nouveau_bo *nvbo;
561                 uint32_t data;
562
563                 if (unlikely(r->bo_index > req->nr_buffers)) {
564                         NV_ERROR(dev, "reloc bo index invalid\n");
565                         ret = -EINVAL;
566                         break;
567                 }
568
569                 b = &bo[r->bo_index];
570                 if (b->presumed.valid)
571                         continue;
572
573                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
574                         NV_ERROR(dev, "reloc container bo index invalid\n");
575                         ret = -EINVAL;
576                         break;
577                 }
578                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
579
580                 if (unlikely(r->reloc_bo_offset + 4 >
581                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
582                         NV_ERROR(dev, "reloc outside of bo\n");
583                         ret = -EINVAL;
584                         break;
585                 }
586
587                 if (!nvbo->kmap.virtual) {
588                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
589                                           &nvbo->kmap);
590                         if (ret) {
591                                 NV_ERROR(dev, "failed kmap for reloc\n");
592                                 break;
593                         }
594                         nvbo->validate_mapped = true;
595                 }
596
597                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
598                         data = b->presumed.offset + r->data;
599                 else
600                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
601                         data = (b->presumed.offset + r->data) >> 32;
602                 else
603                         data = r->data;
604
605                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
606                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
607                                 data |= r->tor;
608                         else
609                                 data |= r->vor;
610                 }
611
612                 spin_lock(&nvbo->bo.bdev->fence_lock);
613                 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
614                 spin_unlock(&nvbo->bo.bdev->fence_lock);
615                 if (ret) {
616                         NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
617                         break;
618                 }
619
620                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
621         }
622
623         kfree(reloc);
624         return ret;
625 }
626
627 int
628 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
629                           struct drm_file *file_priv)
630 {
631         struct drm_nouveau_private *dev_priv = dev->dev_private;
632         struct drm_nouveau_gem_pushbuf *req = data;
633         struct drm_nouveau_gem_pushbuf_push *push;
634         struct drm_nouveau_gem_pushbuf_bo *bo;
635         struct nouveau_channel *chan;
636         struct validate_op op;
637         struct nouveau_fence *fence = NULL;
638         int i, j, ret = 0, do_reloc = 0;
639
640         chan = nouveau_channel_get(file_priv, req->channel);
641         if (IS_ERR(chan))
642                 return PTR_ERR(chan);
643
644         req->vram_available = dev_priv->fb_aper_free;
645         req->gart_available = dev_priv->gart_info.aper_free;
646         if (unlikely(req->nr_push == 0))
647                 goto out_next;
648
649         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
650                 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
651                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
652                 nouveau_channel_put(&chan);
653                 return -EINVAL;
654         }
655
656         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
657                 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
658                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
659                 nouveau_channel_put(&chan);
660                 return -EINVAL;
661         }
662
663         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
664                 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
665                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
666                 nouveau_channel_put(&chan);
667                 return -EINVAL;
668         }
669
670         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
671         if (IS_ERR(push)) {
672                 nouveau_channel_put(&chan);
673                 return PTR_ERR(push);
674         }
675
676         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
677         if (IS_ERR(bo)) {
678                 kfree(push);
679                 nouveau_channel_put(&chan);
680                 return PTR_ERR(bo);
681         }
682
683         /* Ensure all push buffers are on validate list */
684         for (i = 0; i < req->nr_push; i++) {
685                 if (push[i].bo_index >= req->nr_buffers) {
686                         NV_ERROR(dev, "push %d buffer not in list\n", i);
687                         ret = -EINVAL;
688                         goto out_prevalid;
689                 }
690         }
691
692         /* Validate buffer list */
693         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
694                                            req->nr_buffers, &op, &do_reloc);
695         if (ret) {
696                 if (ret != -ERESTARTSYS)
697                         NV_ERROR(dev, "validate: %d\n", ret);
698                 goto out_prevalid;
699         }
700
701         /* Apply any relocations that are required */
702         if (do_reloc) {
703                 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
704                 if (ret) {
705                         NV_ERROR(dev, "reloc apply: %d\n", ret);
706                         goto out;
707                 }
708         }
709
710         if (chan->dma.ib_max) {
711                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
712                 if (ret) {
713                         NV_INFO(dev, "nv50cal_space: %d\n", ret);
714                         goto out;
715                 }
716
717                 for (i = 0; i < req->nr_push; i++) {
718                         struct nouveau_bo *nvbo = (void *)(unsigned long)
719                                 bo[push[i].bo_index].user_priv;
720
721                         nv50_dma_push(chan, nvbo, push[i].offset,
722                                       push[i].length);
723                 }
724         } else
725         if (dev_priv->chipset >= 0x25) {
726                 ret = RING_SPACE(chan, req->nr_push * 2);
727                 if (ret) {
728                         NV_ERROR(dev, "cal_space: %d\n", ret);
729                         goto out;
730                 }
731
732                 for (i = 0; i < req->nr_push; i++) {
733                         struct nouveau_bo *nvbo = (void *)(unsigned long)
734                                 bo[push[i].bo_index].user_priv;
735                         struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
736
737                         OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
738                                         push[i].offset) | 2);
739                         OUT_RING(chan, 0);
740                 }
741         } else {
742                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
743                 if (ret) {
744                         NV_ERROR(dev, "jmp_space: %d\n", ret);
745                         goto out;
746                 }
747
748                 for (i = 0; i < req->nr_push; i++) {
749                         struct nouveau_bo *nvbo = (void *)(unsigned long)
750                                 bo[push[i].bo_index].user_priv;
751                         struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
752                         uint32_t cmd;
753
754                         cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
755                         cmd |= 0x20000000;
756                         if (unlikely(cmd != req->suffix0)) {
757                                 if (!nvbo->kmap.virtual) {
758                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
759                                                           nvbo->bo.mem.
760                                                           num_pages,
761                                                           &nvbo->kmap);
762                                         if (ret) {
763                                                 WIND_RING(chan);
764                                                 goto out;
765                                         }
766                                         nvbo->validate_mapped = true;
767                                 }
768
769                                 nouveau_bo_wr32(nvbo, (push[i].offset +
770                                                 push[i].length - 8) / 4, cmd);
771                         }
772
773                         OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
774                                         push[i].offset) | 0x20000000);
775                         OUT_RING(chan, 0);
776                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
777                                 OUT_RING(chan, 0);
778                 }
779         }
780
781         ret = nouveau_fence_new(chan, &fence);
782         if (ret) {
783                 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
784                 WIND_RING(chan);
785                 goto out;
786         }
787
788 out:
789         validate_fini(&op, fence);
790         nouveau_fence_unref(&fence);
791
792 out_prevalid:
793         kfree(bo);
794         kfree(push);
795
796 out_next:
797         if (chan->dma.ib_max) {
798                 req->suffix0 = 0x00000000;
799                 req->suffix1 = 0x00000000;
800         } else
801         if (dev_priv->chipset >= 0x25) {
802                 req->suffix0 = 0x00020000;
803                 req->suffix1 = 0x00000000;
804         } else {
805                 req->suffix0 = 0x20000000 |
806                               (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
807                 req->suffix1 = 0x00000000;
808         }
809
810         nouveau_channel_put(&chan);
811         return ret;
812 }
813
814 static inline uint32_t
815 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
816 {
817         uint32_t flags = 0;
818
819         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
820                 flags |= TTM_PL_FLAG_VRAM;
821         if (domain & NOUVEAU_GEM_DOMAIN_GART)
822                 flags |= TTM_PL_FLAG_TT;
823
824         return flags;
825 }
826
827 int
828 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
829                            struct drm_file *file_priv)
830 {
831         struct drm_nouveau_gem_cpu_prep *req = data;
832         struct drm_gem_object *gem;
833         struct nouveau_bo *nvbo;
834         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
835         int ret = -EINVAL;
836
837         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
838         if (!gem)
839                 return -ENOENT;
840         nvbo = nouveau_gem_object(gem);
841
842         spin_lock(&nvbo->bo.bdev->fence_lock);
843         ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
844         spin_unlock(&nvbo->bo.bdev->fence_lock);
845         drm_gem_object_unreference_unlocked(gem);
846         return ret;
847 }
848
849 int
850 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
851                            struct drm_file *file_priv)
852 {
853         return 0;
854 }
855
856 int
857 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
858                        struct drm_file *file_priv)
859 {
860         struct drm_nouveau_gem_info *req = data;
861         struct drm_gem_object *gem;
862         int ret;
863
864         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
865         if (!gem)
866                 return -ENOENT;
867
868         ret = nouveau_gem_info(file_priv, gem, req);
869         drm_gem_object_unreference_unlocked(gem);
870         return ret;
871 }
872