]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_gem_execbuffer.c
drm/i915: Missed dropped VMA conversion
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 struct eb_vmas {
37         struct list_head vmas;
38         int and;
39         union {
40                 struct i915_vma *lut[0];
41                 struct hlist_head buckets[0];
42         };
43 };
44
45 static struct eb_vmas *
46 eb_create(struct drm_i915_gem_execbuffer2 *args)
47 {
48         struct eb_vmas *eb = NULL;
49
50         if (args->flags & I915_EXEC_HANDLE_LUT) {
51                 unsigned size = args->buffer_count;
52                 size *= sizeof(struct i915_vma *);
53                 size += sizeof(struct eb_vmas);
54                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55         }
56
57         if (eb == NULL) {
58                 unsigned size = args->buffer_count;
59                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61                 while (count > 2*size)
62                         count >>= 1;
63                 eb = kzalloc(count*sizeof(struct hlist_head) +
64                              sizeof(struct eb_vmas),
65                              GFP_TEMPORARY);
66                 if (eb == NULL)
67                         return eb;
68
69                 eb->and = count - 1;
70         } else
71                 eb->and = -args->buffer_count;
72
73         INIT_LIST_HEAD(&eb->vmas);
74         return eb;
75 }
76
77 static void
78 eb_reset(struct eb_vmas *eb)
79 {
80         if (eb->and >= 0)
81                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82 }
83
84 static int
85 eb_lookup_vmas(struct eb_vmas *eb,
86                struct drm_i915_gem_exec_object2 *exec,
87                const struct drm_i915_gem_execbuffer2 *args,
88                struct i915_address_space *vm,
89                struct drm_file *file)
90 {
91         struct drm_i915_gem_object *obj;
92         struct list_head objects;
93         int i, ret = 0;
94
95         INIT_LIST_HEAD(&objects);
96         spin_lock(&file->table_lock);
97         /* Grab a reference to the object and release the lock so we can lookup
98          * or create the VMA without using GFP_ATOMIC */
99         for (i = 0; i < args->buffer_count; i++) {
100                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
101                 if (obj == NULL) {
102                         spin_unlock(&file->table_lock);
103                         DRM_DEBUG("Invalid object handle %d at index %d\n",
104                                    exec[i].handle, i);
105                         ret = -ENOENT;
106                         goto out;
107                 }
108
109                 if (!list_empty(&obj->obj_exec_link)) {
110                         spin_unlock(&file->table_lock);
111                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
112                                    obj, exec[i].handle, i);
113                         ret = -EINVAL;
114                         goto out;
115                 }
116
117                 drm_gem_object_reference(&obj->base);
118                 list_add_tail(&obj->obj_exec_link, &objects);
119         }
120         spin_unlock(&file->table_lock);
121
122         i = 0;
123         list_for_each_entry(obj, &objects, obj_exec_link) {
124                 struct i915_vma *vma;
125
126                 /*
127                  * NOTE: We can leak any vmas created here when something fails
128                  * later on. But that's no issue since vma_unbind can deal with
129                  * vmas which are not actually bound. And since only
130                  * lookup_or_create exists as an interface to get at the vma
131                  * from the (obj, vm) we don't run the risk of creating
132                  * duplicated vmas for the same vm.
133                  */
134                 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
135                 if (IS_ERR(vma)) {
136                         DRM_DEBUG("Failed to lookup VMA\n");
137                         ret = PTR_ERR(vma);
138                         goto out;
139                 }
140
141                 list_add_tail(&vma->exec_list, &eb->vmas);
142
143                 vma->exec_entry = &exec[i];
144                 if (eb->and < 0) {
145                         eb->lut[i] = vma;
146                 } else {
147                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
148                         vma->exec_handle = handle;
149                         hlist_add_head(&vma->exec_node,
150                                        &eb->buckets[handle & eb->and]);
151                 }
152                 ++i;
153         }
154
155
156 out:
157         while (!list_empty(&objects)) {
158                 obj = list_first_entry(&objects,
159                                        struct drm_i915_gem_object,
160                                        obj_exec_link);
161                 list_del_init(&obj->obj_exec_link);
162                 if (ret)
163                         drm_gem_object_unreference(&obj->base);
164         }
165         return ret;
166 }
167
168 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
169 {
170         if (eb->and < 0) {
171                 if (handle >= -eb->and)
172                         return NULL;
173                 return eb->lut[handle];
174         } else {
175                 struct hlist_head *head;
176                 struct hlist_node *node;
177
178                 head = &eb->buckets[handle & eb->and];
179                 hlist_for_each(node, head) {
180                         struct i915_vma *vma;
181
182                         vma = hlist_entry(node, struct i915_vma, exec_node);
183                         if (vma->exec_handle == handle)
184                                 return vma;
185                 }
186                 return NULL;
187         }
188 }
189
190 static void eb_destroy(struct eb_vmas *eb) {
191         while (!list_empty(&eb->vmas)) {
192                 struct i915_vma *vma;
193
194                 vma = list_first_entry(&eb->vmas,
195                                        struct i915_vma,
196                                        exec_list);
197                 list_del_init(&vma->exec_list);
198                 drm_gem_object_unreference(&vma->obj->base);
199         }
200         kfree(eb);
201 }
202
203 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
204 {
205         return (HAS_LLC(obj->base.dev) ||
206                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
207                 !obj->map_and_fenceable ||
208                 obj->cache_level != I915_CACHE_NONE);
209 }
210
211 static int
212 relocate_entry_cpu(struct drm_i915_gem_object *obj,
213                    struct drm_i915_gem_relocation_entry *reloc)
214 {
215         struct drm_device *dev = obj->base.dev;
216         uint32_t page_offset = offset_in_page(reloc->offset);
217         char *vaddr;
218         int ret = -EINVAL;
219
220         ret = i915_gem_object_set_to_cpu_domain(obj, true);
221         if (ret)
222                 return ret;
223
224         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
225                                 reloc->offset >> PAGE_SHIFT));
226         *(uint32_t *)(vaddr + page_offset) = reloc->delta;
227
228         if (INTEL_INFO(dev)->gen >= 8) {
229                 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
230
231                 if (page_offset == 0) {
232                         kunmap_atomic(vaddr);
233                         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
234                             (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
235                 }
236
237                 *(uint32_t *)(vaddr + page_offset) = 0;
238         }
239
240         kunmap_atomic(vaddr);
241
242         return 0;
243 }
244
245 static int
246 relocate_entry_gtt(struct drm_i915_gem_object *obj,
247                    struct drm_i915_gem_relocation_entry *reloc)
248 {
249         struct drm_device *dev = obj->base.dev;
250         struct drm_i915_private *dev_priv = dev->dev_private;
251         uint32_t __iomem *reloc_entry;
252         void __iomem *reloc_page;
253         int ret = -EINVAL;
254
255         ret = i915_gem_object_set_to_gtt_domain(obj, true);
256         if (ret)
257                 return ret;
258
259         ret = i915_gem_object_put_fence(obj);
260         if (ret)
261                 return ret;
262
263         /* Map the page containing the relocation we're going to perform.  */
264         reloc->offset += i915_gem_obj_ggtt_offset(obj);
265         reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
266                         reloc->offset & PAGE_MASK);
267         reloc_entry = (uint32_t __iomem *)
268                 (reloc_page + offset_in_page(reloc->offset));
269         iowrite32(reloc->delta, reloc_entry);
270
271         if (INTEL_INFO(dev)->gen >= 8) {
272                 reloc_entry += 1;
273
274                 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
275                         io_mapping_unmap_atomic(reloc_page);
276                         reloc_page = io_mapping_map_atomic_wc(
277                                         dev_priv->gtt.mappable,
278                                         reloc->offset + sizeof(uint32_t));
279                         reloc_entry = reloc_page;
280                 }
281
282                 iowrite32(0, reloc_entry);
283         }
284
285         io_mapping_unmap_atomic(reloc_page);
286
287         return 0;
288 }
289
290 static int
291 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
292                                    struct eb_vmas *eb,
293                                    struct drm_i915_gem_relocation_entry *reloc,
294                                    struct i915_address_space *vm)
295 {
296         struct drm_device *dev = obj->base.dev;
297         struct drm_gem_object *target_obj;
298         struct drm_i915_gem_object *target_i915_obj;
299         struct i915_vma *target_vma;
300         uint32_t target_offset;
301         int ret = -EINVAL;
302
303         /* we've already hold a reference to all valid objects */
304         target_vma = eb_get_vma(eb, reloc->target_handle);
305         if (unlikely(target_vma == NULL))
306                 return -ENOENT;
307         target_i915_obj = target_vma->obj;
308         target_obj = &target_vma->obj->base;
309
310         target_offset = target_vma->node.start;
311
312         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
313          * pipe_control writes because the gpu doesn't properly redirect them
314          * through the ppgtt for non_secure batchbuffers. */
315         if (unlikely(IS_GEN6(dev) &&
316             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
317             !target_i915_obj->has_global_gtt_mapping)) {
318                 i915_gem_gtt_bind_object(target_i915_obj,
319                                          target_i915_obj->cache_level);
320         }
321
322         /* Validate that the target is in a valid r/w GPU domain */
323         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
324                 DRM_DEBUG("reloc with multiple write domains: "
325                           "obj %p target %d offset %d "
326                           "read %08x write %08x",
327                           obj, reloc->target_handle,
328                           (int) reloc->offset,
329                           reloc->read_domains,
330                           reloc->write_domain);
331                 return ret;
332         }
333         if (unlikely((reloc->write_domain | reloc->read_domains)
334                      & ~I915_GEM_GPU_DOMAINS)) {
335                 DRM_DEBUG("reloc with read/write non-GPU domains: "
336                           "obj %p target %d offset %d "
337                           "read %08x write %08x",
338                           obj, reloc->target_handle,
339                           (int) reloc->offset,
340                           reloc->read_domains,
341                           reloc->write_domain);
342                 return ret;
343         }
344
345         target_obj->pending_read_domains |= reloc->read_domains;
346         target_obj->pending_write_domain |= reloc->write_domain;
347
348         /* If the relocation already has the right value in it, no
349          * more work needs to be done.
350          */
351         if (target_offset == reloc->presumed_offset)
352                 return 0;
353
354         /* Check that the relocation address is valid... */
355         if (unlikely(reloc->offset >
356                 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
357                 DRM_DEBUG("Relocation beyond object bounds: "
358                           "obj %p target %d offset %d size %d.\n",
359                           obj, reloc->target_handle,
360                           (int) reloc->offset,
361                           (int) obj->base.size);
362                 return ret;
363         }
364         if (unlikely(reloc->offset & 3)) {
365                 DRM_DEBUG("Relocation not 4-byte aligned: "
366                           "obj %p target %d offset %d.\n",
367                           obj, reloc->target_handle,
368                           (int) reloc->offset);
369                 return ret;
370         }
371
372         /* We can't wait for rendering with pagefaults disabled */
373         if (obj->active && in_atomic())
374                 return -EFAULT;
375
376         reloc->delta += target_offset;
377         if (use_cpu_reloc(obj))
378                 ret = relocate_entry_cpu(obj, reloc);
379         else
380                 ret = relocate_entry_gtt(obj, reloc);
381
382         if (ret)
383                 return ret;
384
385         /* and update the user's relocation entry */
386         reloc->presumed_offset = target_offset;
387
388         return 0;
389 }
390
391 static int
392 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
393                                  struct eb_vmas *eb)
394 {
395 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
396         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
397         struct drm_i915_gem_relocation_entry __user *user_relocs;
398         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
399         int remain, ret;
400
401         user_relocs = to_user_ptr(entry->relocs_ptr);
402
403         remain = entry->relocation_count;
404         while (remain) {
405                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
406                 int count = remain;
407                 if (count > ARRAY_SIZE(stack_reloc))
408                         count = ARRAY_SIZE(stack_reloc);
409                 remain -= count;
410
411                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
412                         return -EFAULT;
413
414                 do {
415                         u64 offset = r->presumed_offset;
416
417                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
418                                                                  vma->vm);
419                         if (ret)
420                                 return ret;
421
422                         if (r->presumed_offset != offset &&
423                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
424                                                     &r->presumed_offset,
425                                                     sizeof(r->presumed_offset))) {
426                                 return -EFAULT;
427                         }
428
429                         user_relocs++;
430                         r++;
431                 } while (--count);
432         }
433
434         return 0;
435 #undef N_RELOC
436 }
437
438 static int
439 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
440                                       struct eb_vmas *eb,
441                                       struct drm_i915_gem_relocation_entry *relocs)
442 {
443         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
444         int i, ret;
445
446         for (i = 0; i < entry->relocation_count; i++) {
447                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
448                                                          vma->vm);
449                 if (ret)
450                         return ret;
451         }
452
453         return 0;
454 }
455
456 static int
457 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
458 {
459         struct i915_vma *vma;
460         int ret = 0;
461
462         /* This is the fast path and we cannot handle a pagefault whilst
463          * holding the struct mutex lest the user pass in the relocations
464          * contained within a mmaped bo. For in such a case we, the page
465          * fault handler would call i915_gem_fault() and we would try to
466          * acquire the struct mutex again. Obviously this is bad and so
467          * lockdep complains vehemently.
468          */
469         pagefault_disable();
470         list_for_each_entry(vma, &eb->vmas, exec_list) {
471                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
472                 if (ret)
473                         break;
474         }
475         pagefault_enable();
476
477         return ret;
478 }
479
480 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
481 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
482
483 static int
484 need_reloc_mappable(struct i915_vma *vma)
485 {
486         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
487         return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
488                 i915_is_ggtt(vma->vm);
489 }
490
491 static int
492 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
493                                 struct intel_ring_buffer *ring,
494                                 bool *need_reloc)
495 {
496         struct drm_i915_private *dev_priv = ring->dev->dev_private;
497         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
498         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
499         bool need_fence, need_mappable;
500         struct drm_i915_gem_object *obj = vma->obj;
501         int ret;
502
503         need_fence =
504                 has_fenced_gpu_access &&
505                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
506                 obj->tiling_mode != I915_TILING_NONE;
507         need_mappable = need_fence || need_reloc_mappable(vma);
508
509         ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
510                                   false);
511         if (ret)
512                 return ret;
513
514         entry->flags |= __EXEC_OBJECT_HAS_PIN;
515
516         if (has_fenced_gpu_access) {
517                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
518                         ret = i915_gem_object_get_fence(obj);
519                         if (ret)
520                                 return ret;
521
522                         if (i915_gem_object_pin_fence(obj))
523                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
524
525                         obj->pending_fenced_gpu_access = true;
526                 }
527         }
528
529         /* Ensure ppgtt mapping exists if needed */
530         if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
531                 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
532                                        obj, obj->cache_level);
533
534                 obj->has_aliasing_ppgtt_mapping = 1;
535         }
536
537         if (entry->offset != vma->node.start) {
538                 entry->offset = vma->node.start;
539                 *need_reloc = true;
540         }
541
542         if (entry->flags & EXEC_OBJECT_WRITE) {
543                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
544                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
545         }
546
547         if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
548             !obj->has_global_gtt_mapping)
549                 i915_gem_gtt_bind_object(obj, obj->cache_level);
550
551         return 0;
552 }
553
554 static void
555 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
556 {
557         struct drm_i915_gem_exec_object2 *entry;
558         struct drm_i915_gem_object *obj = vma->obj;
559
560         if (!drm_mm_node_allocated(&vma->node))
561                 return;
562
563         entry = vma->exec_entry;
564
565         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
566                 i915_gem_object_unpin_fence(obj);
567
568         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
569                 i915_gem_object_unpin(obj);
570
571         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
572 }
573
574 static int
575 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576                             struct list_head *vmas,
577                             bool *need_relocs)
578 {
579         struct drm_i915_gem_object *obj;
580         struct i915_vma *vma;
581         struct i915_address_space *vm;
582         struct list_head ordered_vmas;
583         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
584         int retry;
585
586         if (list_empty(vmas))
587                 return 0;
588
589         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
590
591         INIT_LIST_HEAD(&ordered_vmas);
592         while (!list_empty(vmas)) {
593                 struct drm_i915_gem_exec_object2 *entry;
594                 bool need_fence, need_mappable;
595
596                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
597                 obj = vma->obj;
598                 entry = vma->exec_entry;
599
600                 need_fence =
601                         has_fenced_gpu_access &&
602                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
603                         obj->tiling_mode != I915_TILING_NONE;
604                 need_mappable = need_fence || need_reloc_mappable(vma);
605
606                 if (need_mappable)
607                         list_move(&vma->exec_list, &ordered_vmas);
608                 else
609                         list_move_tail(&vma->exec_list, &ordered_vmas);
610
611                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
612                 obj->base.pending_write_domain = 0;
613                 obj->pending_fenced_gpu_access = false;
614         }
615         list_splice(&ordered_vmas, vmas);
616
617         /* Attempt to pin all of the buffers into the GTT.
618          * This is done in 3 phases:
619          *
620          * 1a. Unbind all objects that do not match the GTT constraints for
621          *     the execbuffer (fenceable, mappable, alignment etc).
622          * 1b. Increment pin count for already bound objects.
623          * 2.  Bind new objects.
624          * 3.  Decrement pin count.
625          *
626          * This avoid unnecessary unbinding of later objects in order to make
627          * room for the earlier objects *unless* we need to defragment.
628          */
629         retry = 0;
630         do {
631                 int ret = 0;
632
633                 /* Unbind any ill-fitting objects or pin. */
634                 list_for_each_entry(vma, vmas, exec_list) {
635                         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
636                         bool need_fence, need_mappable;
637
638                         obj = vma->obj;
639
640                         if (!drm_mm_node_allocated(&vma->node))
641                                 continue;
642
643                         need_fence =
644                                 has_fenced_gpu_access &&
645                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
646                                 obj->tiling_mode != I915_TILING_NONE;
647                         need_mappable = need_fence || need_reloc_mappable(vma);
648
649                         WARN_ON((need_mappable || need_fence) &&
650                                !i915_is_ggtt(vma->vm));
651
652                         if ((entry->alignment &&
653                              vma->node.start & (entry->alignment - 1)) ||
654                             (need_mappable && !obj->map_and_fenceable))
655                                 ret = i915_vma_unbind(vma);
656                         else
657                                 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
658                         if (ret)
659                                 goto err;
660                 }
661
662                 /* Bind fresh objects */
663                 list_for_each_entry(vma, vmas, exec_list) {
664                         if (drm_mm_node_allocated(&vma->node))
665                                 continue;
666
667                         ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
668                         if (ret)
669                                 goto err;
670                 }
671
672 err:            /* Decrement pin count for bound objects */
673                 list_for_each_entry(vma, vmas, exec_list)
674                         i915_gem_execbuffer_unreserve_vma(vma);
675
676                 if (ret != -ENOSPC || retry++)
677                         return ret;
678
679                 ret = i915_gem_evict_vm(vm, true);
680                 if (ret)
681                         return ret;
682         } while (1);
683 }
684
685 static int
686 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
687                                   struct drm_i915_gem_execbuffer2 *args,
688                                   struct drm_file *file,
689                                   struct intel_ring_buffer *ring,
690                                   struct eb_vmas *eb,
691                                   struct drm_i915_gem_exec_object2 *exec)
692 {
693         struct drm_i915_gem_relocation_entry *reloc;
694         struct i915_address_space *vm;
695         struct i915_vma *vma;
696         bool need_relocs;
697         int *reloc_offset;
698         int i, total, ret;
699         unsigned count = args->buffer_count;
700
701         if (WARN_ON(list_empty(&eb->vmas)))
702                 return 0;
703
704         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
705
706         /* We may process another execbuffer during the unlock... */
707         while (!list_empty(&eb->vmas)) {
708                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
709                 list_del_init(&vma->exec_list);
710                 drm_gem_object_unreference(&vma->obj->base);
711         }
712
713         mutex_unlock(&dev->struct_mutex);
714
715         total = 0;
716         for (i = 0; i < count; i++)
717                 total += exec[i].relocation_count;
718
719         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
720         reloc = drm_malloc_ab(total, sizeof(*reloc));
721         if (reloc == NULL || reloc_offset == NULL) {
722                 drm_free_large(reloc);
723                 drm_free_large(reloc_offset);
724                 mutex_lock(&dev->struct_mutex);
725                 return -ENOMEM;
726         }
727
728         total = 0;
729         for (i = 0; i < count; i++) {
730                 struct drm_i915_gem_relocation_entry __user *user_relocs;
731                 u64 invalid_offset = (u64)-1;
732                 int j;
733
734                 user_relocs = to_user_ptr(exec[i].relocs_ptr);
735
736                 if (copy_from_user(reloc+total, user_relocs,
737                                    exec[i].relocation_count * sizeof(*reloc))) {
738                         ret = -EFAULT;
739                         mutex_lock(&dev->struct_mutex);
740                         goto err;
741                 }
742
743                 /* As we do not update the known relocation offsets after
744                  * relocating (due to the complexities in lock handling),
745                  * we need to mark them as invalid now so that we force the
746                  * relocation processing next time. Just in case the target
747                  * object is evicted and then rebound into its old
748                  * presumed_offset before the next execbuffer - if that
749                  * happened we would make the mistake of assuming that the
750                  * relocations were valid.
751                  */
752                 for (j = 0; j < exec[i].relocation_count; j++) {
753                         if (copy_to_user(&user_relocs[j].presumed_offset,
754                                          &invalid_offset,
755                                          sizeof(invalid_offset))) {
756                                 ret = -EFAULT;
757                                 mutex_lock(&dev->struct_mutex);
758                                 goto err;
759                         }
760                 }
761
762                 reloc_offset[i] = total;
763                 total += exec[i].relocation_count;
764         }
765
766         ret = i915_mutex_lock_interruptible(dev);
767         if (ret) {
768                 mutex_lock(&dev->struct_mutex);
769                 goto err;
770         }
771
772         /* reacquire the objects */
773         eb_reset(eb);
774         ret = eb_lookup_vmas(eb, exec, args, vm, file);
775         if (ret)
776                 goto err;
777
778         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
779         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
780         if (ret)
781                 goto err;
782
783         list_for_each_entry(vma, &eb->vmas, exec_list) {
784                 int offset = vma->exec_entry - exec;
785                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
786                                                             reloc + reloc_offset[offset]);
787                 if (ret)
788                         goto err;
789         }
790
791         /* Leave the user relocations as are, this is the painfully slow path,
792          * and we want to avoid the complication of dropping the lock whilst
793          * having buffers reserved in the aperture and so causing spurious
794          * ENOSPC for random operations.
795          */
796
797 err:
798         drm_free_large(reloc);
799         drm_free_large(reloc_offset);
800         return ret;
801 }
802
803 static int
804 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
805                                 struct list_head *vmas)
806 {
807         struct i915_vma *vma;
808         uint32_t flush_domains = 0;
809         bool flush_chipset = false;
810         int ret;
811
812         list_for_each_entry(vma, vmas, exec_list) {
813                 struct drm_i915_gem_object *obj = vma->obj;
814                 ret = i915_gem_object_sync(obj, ring);
815                 if (ret)
816                         return ret;
817
818                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
819                         flush_chipset |= i915_gem_clflush_object(obj, false);
820
821                 flush_domains |= obj->base.write_domain;
822         }
823
824         if (flush_chipset)
825                 i915_gem_chipset_flush(ring->dev);
826
827         if (flush_domains & I915_GEM_DOMAIN_GTT)
828                 wmb();
829
830         /* Unconditionally invalidate gpu caches and ensure that we do flush
831          * any residual writes from the previous batch.
832          */
833         return intel_ring_invalidate_all_caches(ring);
834 }
835
836 static bool
837 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
838 {
839         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
840                 return false;
841
842         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
843 }
844
845 static int
846 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
847                    int count)
848 {
849         int i;
850         unsigned relocs_total = 0;
851         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
852
853         for (i = 0; i < count; i++) {
854                 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
855                 int length; /* limited by fault_in_pages_readable() */
856
857                 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
858                         return -EINVAL;
859
860                 /* First check for malicious input causing overflow in
861                  * the worst case where we need to allocate the entire
862                  * relocation tree as a single array.
863                  */
864                 if (exec[i].relocation_count > relocs_max - relocs_total)
865                         return -EINVAL;
866                 relocs_total += exec[i].relocation_count;
867
868                 length = exec[i].relocation_count *
869                         sizeof(struct drm_i915_gem_relocation_entry);
870                 /*
871                  * We must check that the entire relocation array is safe
872                  * to read, but since we may need to update the presumed
873                  * offsets during execution, check for full write access.
874                  */
875                 if (!access_ok(VERIFY_WRITE, ptr, length))
876                         return -EFAULT;
877
878                 if (likely(!i915_prefault_disable)) {
879                         if (fault_in_multipages_readable(ptr, length))
880                                 return -EFAULT;
881                 }
882         }
883
884         return 0;
885 }
886
887 static void
888 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
889                                    struct intel_ring_buffer *ring)
890 {
891         struct i915_vma *vma;
892
893         list_for_each_entry(vma, vmas, exec_list) {
894                 struct drm_i915_gem_object *obj = vma->obj;
895                 u32 old_read = obj->base.read_domains;
896                 u32 old_write = obj->base.write_domain;
897
898                 obj->base.write_domain = obj->base.pending_write_domain;
899                 if (obj->base.write_domain == 0)
900                         obj->base.pending_read_domains |= obj->base.read_domains;
901                 obj->base.read_domains = obj->base.pending_read_domains;
902                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
903
904                 i915_vma_move_to_active(vma, ring);
905                 if (obj->base.write_domain) {
906                         obj->dirty = 1;
907                         obj->last_write_seqno = intel_ring_get_seqno(ring);
908                         if (obj->pin_count) /* check for potential scanout */
909                                 intel_mark_fb_busy(obj, ring);
910                 }
911
912                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
913         }
914 }
915
916 static void
917 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
918                                     struct drm_file *file,
919                                     struct intel_ring_buffer *ring,
920                                     struct drm_i915_gem_object *obj)
921 {
922         /* Unconditionally force add_request to emit a full flush. */
923         ring->gpu_caches_dirty = true;
924
925         /* Add a breadcrumb for the completion of the batch buffer */
926         (void)__i915_add_request(ring, file, obj, NULL);
927 }
928
929 static int
930 i915_reset_gen7_sol_offsets(struct drm_device *dev,
931                             struct intel_ring_buffer *ring)
932 {
933         drm_i915_private_t *dev_priv = dev->dev_private;
934         int ret, i;
935
936         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
937                 return 0;
938
939         ret = intel_ring_begin(ring, 4 * 3);
940         if (ret)
941                 return ret;
942
943         for (i = 0; i < 4; i++) {
944                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
945                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
946                 intel_ring_emit(ring, 0);
947         }
948
949         intel_ring_advance(ring);
950
951         return 0;
952 }
953
954 static int
955 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
956                        struct drm_file *file,
957                        struct drm_i915_gem_execbuffer2 *args,
958                        struct drm_i915_gem_exec_object2 *exec,
959                        struct i915_address_space *vm)
960 {
961         drm_i915_private_t *dev_priv = dev->dev_private;
962         struct eb_vmas *eb;
963         struct drm_i915_gem_object *batch_obj;
964         struct drm_clip_rect *cliprects = NULL;
965         struct intel_ring_buffer *ring;
966         struct i915_ctx_hang_stats *hs;
967         u32 ctx_id = i915_execbuffer2_get_context_id(*args);
968         u32 exec_start, exec_len;
969         u32 mask, flags;
970         int ret, mode, i;
971         bool need_relocs;
972
973         if (!i915_gem_check_execbuffer(args))
974                 return -EINVAL;
975
976         ret = validate_exec_list(exec, args->buffer_count);
977         if (ret)
978                 return ret;
979
980         flags = 0;
981         if (args->flags & I915_EXEC_SECURE) {
982                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
983                     return -EPERM;
984
985                 flags |= I915_DISPATCH_SECURE;
986         }
987         if (args->flags & I915_EXEC_IS_PINNED)
988                 flags |= I915_DISPATCH_PINNED;
989
990         switch (args->flags & I915_EXEC_RING_MASK) {
991         case I915_EXEC_DEFAULT:
992         case I915_EXEC_RENDER:
993                 ring = &dev_priv->ring[RCS];
994                 break;
995         case I915_EXEC_BSD:
996                 ring = &dev_priv->ring[VCS];
997                 if (ctx_id != DEFAULT_CONTEXT_ID) {
998                         DRM_DEBUG("Ring %s doesn't support contexts\n",
999                                   ring->name);
1000                         return -EPERM;
1001                 }
1002                 break;
1003         case I915_EXEC_BLT:
1004                 ring = &dev_priv->ring[BCS];
1005                 if (ctx_id != DEFAULT_CONTEXT_ID) {
1006                         DRM_DEBUG("Ring %s doesn't support contexts\n",
1007                                   ring->name);
1008                         return -EPERM;
1009                 }
1010                 break;
1011         case I915_EXEC_VEBOX:
1012                 ring = &dev_priv->ring[VECS];
1013                 if (ctx_id != DEFAULT_CONTEXT_ID) {
1014                         DRM_DEBUG("Ring %s doesn't support contexts\n",
1015                                   ring->name);
1016                         return -EPERM;
1017                 }
1018                 break;
1019
1020         default:
1021                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1022                           (int)(args->flags & I915_EXEC_RING_MASK));
1023                 return -EINVAL;
1024         }
1025         if (!intel_ring_initialized(ring)) {
1026                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1027                           (int)(args->flags & I915_EXEC_RING_MASK));
1028                 return -EINVAL;
1029         }
1030
1031         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1032         mask = I915_EXEC_CONSTANTS_MASK;
1033         switch (mode) {
1034         case I915_EXEC_CONSTANTS_REL_GENERAL:
1035         case I915_EXEC_CONSTANTS_ABSOLUTE:
1036         case I915_EXEC_CONSTANTS_REL_SURFACE:
1037                 if (ring == &dev_priv->ring[RCS] &&
1038                     mode != dev_priv->relative_constants_mode) {
1039                         if (INTEL_INFO(dev)->gen < 4)
1040                                 return -EINVAL;
1041
1042                         if (INTEL_INFO(dev)->gen > 5 &&
1043                             mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1044                                 return -EINVAL;
1045
1046                         /* The HW changed the meaning on this bit on gen6 */
1047                         if (INTEL_INFO(dev)->gen >= 6)
1048                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1049                 }
1050                 break;
1051         default:
1052                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1053                 return -EINVAL;
1054         }
1055
1056         if (args->buffer_count < 1) {
1057                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1058                 return -EINVAL;
1059         }
1060
1061         if (args->num_cliprects != 0) {
1062                 if (ring != &dev_priv->ring[RCS]) {
1063                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1064                         return -EINVAL;
1065                 }
1066
1067                 if (INTEL_INFO(dev)->gen >= 5) {
1068                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1069                         return -EINVAL;
1070                 }
1071
1072                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1073                         DRM_DEBUG("execbuf with %u cliprects\n",
1074                                   args->num_cliprects);
1075                         return -EINVAL;
1076                 }
1077
1078                 cliprects = kcalloc(args->num_cliprects,
1079                                     sizeof(*cliprects),
1080                                     GFP_KERNEL);
1081                 if (cliprects == NULL) {
1082                         ret = -ENOMEM;
1083                         goto pre_mutex_err;
1084                 }
1085
1086                 if (copy_from_user(cliprects,
1087                                    to_user_ptr(args->cliprects_ptr),
1088                                    sizeof(*cliprects)*args->num_cliprects)) {
1089                         ret = -EFAULT;
1090                         goto pre_mutex_err;
1091                 }
1092         }
1093
1094         ret = i915_mutex_lock_interruptible(dev);
1095         if (ret)
1096                 goto pre_mutex_err;
1097
1098         if (dev_priv->ums.mm_suspended) {
1099                 mutex_unlock(&dev->struct_mutex);
1100                 ret = -EBUSY;
1101                 goto pre_mutex_err;
1102         }
1103
1104         eb = eb_create(args);
1105         if (eb == NULL) {
1106                 mutex_unlock(&dev->struct_mutex);
1107                 ret = -ENOMEM;
1108                 goto pre_mutex_err;
1109         }
1110
1111         /* Look up object handles */
1112         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1113         if (ret)
1114                 goto err;
1115
1116         /* take note of the batch buffer before we might reorder the lists */
1117         batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1118
1119         /* Move the objects en-masse into the GTT, evicting if necessary. */
1120         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1121         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1122         if (ret)
1123                 goto err;
1124
1125         /* The objects are in their final locations, apply the relocations. */
1126         if (need_relocs)
1127                 ret = i915_gem_execbuffer_relocate(eb);
1128         if (ret) {
1129                 if (ret == -EFAULT) {
1130                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1131                                                                 eb, exec);
1132                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1133                 }
1134                 if (ret)
1135                         goto err;
1136         }
1137
1138         /* Set the pending read domains for the batch buffer to COMMAND */
1139         if (batch_obj->base.pending_write_domain) {
1140                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1141                 ret = -EINVAL;
1142                 goto err;
1143         }
1144         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1145
1146         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1147          * batch" bit. Hence we need to pin secure batches into the global gtt.
1148          * hsw should have this fixed, but bdw mucks it up again. */
1149         if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1150                 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1151
1152         ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1153         if (ret)
1154                 goto err;
1155
1156         hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
1157         if (IS_ERR(hs)) {
1158                 ret = PTR_ERR(hs);
1159                 goto err;
1160         }
1161
1162         if (hs->banned) {
1163                 ret = -EIO;
1164                 goto err;
1165         }
1166
1167         ret = i915_switch_context(ring, file, ctx_id);
1168         if (ret)
1169                 goto err;
1170
1171         if (ring == &dev_priv->ring[RCS] &&
1172             mode != dev_priv->relative_constants_mode) {
1173                 ret = intel_ring_begin(ring, 4);
1174                 if (ret)
1175                                 goto err;
1176
1177                 intel_ring_emit(ring, MI_NOOP);
1178                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1179                 intel_ring_emit(ring, INSTPM);
1180                 intel_ring_emit(ring, mask << 16 | mode);
1181                 intel_ring_advance(ring);
1182
1183                 dev_priv->relative_constants_mode = mode;
1184         }
1185
1186         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1187                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1188                 if (ret)
1189                         goto err;
1190         }
1191
1192         exec_start = i915_gem_obj_offset(batch_obj, vm) +
1193                 args->batch_start_offset;
1194         exec_len = args->batch_len;
1195         if (cliprects) {
1196                 for (i = 0; i < args->num_cliprects; i++) {
1197                         ret = i915_emit_box(dev, &cliprects[i],
1198                                             args->DR1, args->DR4);
1199                         if (ret)
1200                                 goto err;
1201
1202                         ret = ring->dispatch_execbuffer(ring,
1203                                                         exec_start, exec_len,
1204                                                         flags);
1205                         if (ret)
1206                                 goto err;
1207                 }
1208         } else {
1209                 ret = ring->dispatch_execbuffer(ring,
1210                                                 exec_start, exec_len,
1211                                                 flags);
1212                 if (ret)
1213                         goto err;
1214         }
1215
1216         trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1217
1218         i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1219         i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1220
1221 err:
1222         eb_destroy(eb);
1223
1224         mutex_unlock(&dev->struct_mutex);
1225
1226 pre_mutex_err:
1227         kfree(cliprects);
1228         return ret;
1229 }
1230
1231 /*
1232  * Legacy execbuffer just creates an exec2 list from the original exec object
1233  * list array and passes it to the real function.
1234  */
1235 int
1236 i915_gem_execbuffer(struct drm_device *dev, void *data,
1237                     struct drm_file *file)
1238 {
1239         struct drm_i915_private *dev_priv = dev->dev_private;
1240         struct drm_i915_gem_execbuffer *args = data;
1241         struct drm_i915_gem_execbuffer2 exec2;
1242         struct drm_i915_gem_exec_object *exec_list = NULL;
1243         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1244         int ret, i;
1245
1246         if (args->buffer_count < 1) {
1247                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1248                 return -EINVAL;
1249         }
1250
1251         /* Copy in the exec list from userland */
1252         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1253         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1254         if (exec_list == NULL || exec2_list == NULL) {
1255                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1256                           args->buffer_count);
1257                 drm_free_large(exec_list);
1258                 drm_free_large(exec2_list);
1259                 return -ENOMEM;
1260         }
1261         ret = copy_from_user(exec_list,
1262                              to_user_ptr(args->buffers_ptr),
1263                              sizeof(*exec_list) * args->buffer_count);
1264         if (ret != 0) {
1265                 DRM_DEBUG("copy %d exec entries failed %d\n",
1266                           args->buffer_count, ret);
1267                 drm_free_large(exec_list);
1268                 drm_free_large(exec2_list);
1269                 return -EFAULT;
1270         }
1271
1272         for (i = 0; i < args->buffer_count; i++) {
1273                 exec2_list[i].handle = exec_list[i].handle;
1274                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1275                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1276                 exec2_list[i].alignment = exec_list[i].alignment;
1277                 exec2_list[i].offset = exec_list[i].offset;
1278                 if (INTEL_INFO(dev)->gen < 4)
1279                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1280                 else
1281                         exec2_list[i].flags = 0;
1282         }
1283
1284         exec2.buffers_ptr = args->buffers_ptr;
1285         exec2.buffer_count = args->buffer_count;
1286         exec2.batch_start_offset = args->batch_start_offset;
1287         exec2.batch_len = args->batch_len;
1288         exec2.DR1 = args->DR1;
1289         exec2.DR4 = args->DR4;
1290         exec2.num_cliprects = args->num_cliprects;
1291         exec2.cliprects_ptr = args->cliprects_ptr;
1292         exec2.flags = I915_EXEC_RENDER;
1293         i915_execbuffer2_set_context_id(exec2, 0);
1294
1295         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1296                                      &dev_priv->gtt.base);
1297         if (!ret) {
1298                 /* Copy the new buffer offsets back to the user's exec list. */
1299                 for (i = 0; i < args->buffer_count; i++)
1300                         exec_list[i].offset = exec2_list[i].offset;
1301                 /* ... and back out to userspace */
1302                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1303                                    exec_list,
1304                                    sizeof(*exec_list) * args->buffer_count);
1305                 if (ret) {
1306                         ret = -EFAULT;
1307                         DRM_DEBUG("failed to copy %d exec entries "
1308                                   "back to user (%d)\n",
1309                                   args->buffer_count, ret);
1310                 }
1311         }
1312
1313         drm_free_large(exec_list);
1314         drm_free_large(exec2_list);
1315         return ret;
1316 }
1317
1318 int
1319 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1320                      struct drm_file *file)
1321 {
1322         struct drm_i915_private *dev_priv = dev->dev_private;
1323         struct drm_i915_gem_execbuffer2 *args = data;
1324         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1325         int ret;
1326
1327         if (args->buffer_count < 1 ||
1328             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1329                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1330                 return -EINVAL;
1331         }
1332
1333         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1334                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1335         if (exec2_list == NULL)
1336                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1337                                            args->buffer_count);
1338         if (exec2_list == NULL) {
1339                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1340                           args->buffer_count);
1341                 return -ENOMEM;
1342         }
1343         ret = copy_from_user(exec2_list,
1344                              to_user_ptr(args->buffers_ptr),
1345                              sizeof(*exec2_list) * args->buffer_count);
1346         if (ret != 0) {
1347                 DRM_DEBUG("copy %d exec entries failed %d\n",
1348                           args->buffer_count, ret);
1349                 drm_free_large(exec2_list);
1350                 return -EFAULT;
1351         }
1352
1353         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1354                                      &dev_priv->gtt.base);
1355         if (!ret) {
1356                 /* Copy the new buffer offsets back to the user's exec list. */
1357                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1358                                    exec2_list,
1359                                    sizeof(*exec2_list) * args->buffer_count);
1360                 if (ret) {
1361                         ret = -EFAULT;
1362                         DRM_DEBUG("failed to copy %d exec entries "
1363                                   "back to user (%d)\n",
1364                                   args->buffer_count, ret);
1365                 }
1366         }
1367
1368         drm_free_large(exec2_list);
1369         return ret;
1370 }