]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Linux 3.14
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38
39 struct eb_vmas {
40         struct list_head vmas;
41         int and;
42         union {
43                 struct i915_vma *lut[0];
44                 struct hlist_head buckets[0];
45         };
46 };
47
48 static struct eb_vmas *
49 eb_create(struct drm_i915_gem_execbuffer2 *args)
50 {
51         struct eb_vmas *eb = NULL;
52
53         if (args->flags & I915_EXEC_HANDLE_LUT) {
54                 unsigned size = args->buffer_count;
55                 size *= sizeof(struct i915_vma *);
56                 size += sizeof(struct eb_vmas);
57                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
58         }
59
60         if (eb == NULL) {
61                 unsigned size = args->buffer_count;
62                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
63                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64                 while (count > 2*size)
65                         count >>= 1;
66                 eb = kzalloc(count*sizeof(struct hlist_head) +
67                              sizeof(struct eb_vmas),
68                              GFP_TEMPORARY);
69                 if (eb == NULL)
70                         return eb;
71
72                 eb->and = count - 1;
73         } else
74                 eb->and = -args->buffer_count;
75
76         INIT_LIST_HEAD(&eb->vmas);
77         return eb;
78 }
79
80 static void
81 eb_reset(struct eb_vmas *eb)
82 {
83         if (eb->and >= 0)
84                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 }
86
87 static int
88 eb_lookup_vmas(struct eb_vmas *eb,
89                struct drm_i915_gem_exec_object2 *exec,
90                const struct drm_i915_gem_execbuffer2 *args,
91                struct i915_address_space *vm,
92                struct drm_file *file)
93 {
94         struct drm_i915_gem_object *obj;
95         struct list_head objects;
96         int i, ret;
97
98         INIT_LIST_HEAD(&objects);
99         spin_lock(&file->table_lock);
100         /* Grab a reference to the object and release the lock so we can lookup
101          * or create the VMA without using GFP_ATOMIC */
102         for (i = 0; i < args->buffer_count; i++) {
103                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
104                 if (obj == NULL) {
105                         spin_unlock(&file->table_lock);
106                         DRM_DEBUG("Invalid object handle %d at index %d\n",
107                                    exec[i].handle, i);
108                         ret = -ENOENT;
109                         goto err;
110                 }
111
112                 if (!list_empty(&obj->obj_exec_link)) {
113                         spin_unlock(&file->table_lock);
114                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115                                    obj, exec[i].handle, i);
116                         ret = -EINVAL;
117                         goto err;
118                 }
119
120                 drm_gem_object_reference(&obj->base);
121                 list_add_tail(&obj->obj_exec_link, &objects);
122         }
123         spin_unlock(&file->table_lock);
124
125         i = 0;
126         while (!list_empty(&objects)) {
127                 struct i915_vma *vma;
128
129                 obj = list_first_entry(&objects,
130                                        struct drm_i915_gem_object,
131                                        obj_exec_link);
132
133                 /*
134                  * NOTE: We can leak any vmas created here when something fails
135                  * later on. But that's no issue since vma_unbind can deal with
136                  * vmas which are not actually bound. And since only
137                  * lookup_or_create exists as an interface to get at the vma
138                  * from the (obj, vm) we don't run the risk of creating
139                  * duplicated vmas for the same vm.
140                  */
141                 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
142                 if (IS_ERR(vma)) {
143                         DRM_DEBUG("Failed to lookup VMA\n");
144                         ret = PTR_ERR(vma);
145                         goto err;
146                 }
147
148                 /* Transfer ownership from the objects list to the vmas list. */
149                 list_add_tail(&vma->exec_list, &eb->vmas);
150                 list_del_init(&obj->obj_exec_link);
151
152                 vma->exec_entry = &exec[i];
153                 if (eb->and < 0) {
154                         eb->lut[i] = vma;
155                 } else {
156                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
157                         vma->exec_handle = handle;
158                         hlist_add_head(&vma->exec_node,
159                                        &eb->buckets[handle & eb->and]);
160                 }
161                 ++i;
162         }
163
164         return 0;
165
166
167 err:
168         while (!list_empty(&objects)) {
169                 obj = list_first_entry(&objects,
170                                        struct drm_i915_gem_object,
171                                        obj_exec_link);
172                 list_del_init(&obj->obj_exec_link);
173                 drm_gem_object_unreference(&obj->base);
174         }
175         /*
176          * Objects already transfered to the vmas list will be unreferenced by
177          * eb_destroy.
178          */
179
180         return ret;
181 }
182
183 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
184 {
185         if (eb->and < 0) {
186                 if (handle >= -eb->and)
187                         return NULL;
188                 return eb->lut[handle];
189         } else {
190                 struct hlist_head *head;
191                 struct hlist_node *node;
192
193                 head = &eb->buckets[handle & eb->and];
194                 hlist_for_each(node, head) {
195                         struct i915_vma *vma;
196
197                         vma = hlist_entry(node, struct i915_vma, exec_node);
198                         if (vma->exec_handle == handle)
199                                 return vma;
200                 }
201                 return NULL;
202         }
203 }
204
205 static void
206 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
207 {
208         struct drm_i915_gem_exec_object2 *entry;
209         struct drm_i915_gem_object *obj = vma->obj;
210
211         if (!drm_mm_node_allocated(&vma->node))
212                 return;
213
214         entry = vma->exec_entry;
215
216         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
217                 i915_gem_object_unpin_fence(obj);
218
219         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220                 i915_gem_object_unpin(obj);
221
222         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223 }
224
225 static void eb_destroy(struct eb_vmas *eb)
226 {
227         while (!list_empty(&eb->vmas)) {
228                 struct i915_vma *vma;
229
230                 vma = list_first_entry(&eb->vmas,
231                                        struct i915_vma,
232                                        exec_list);
233                 list_del_init(&vma->exec_list);
234                 i915_gem_execbuffer_unreserve_vma(vma);
235                 drm_gem_object_unreference(&vma->obj->base);
236         }
237         kfree(eb);
238 }
239
240 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
241 {
242         return (HAS_LLC(obj->base.dev) ||
243                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
244                 !obj->map_and_fenceable ||
245                 obj->cache_level != I915_CACHE_NONE);
246 }
247
248 static int
249 relocate_entry_cpu(struct drm_i915_gem_object *obj,
250                    struct drm_i915_gem_relocation_entry *reloc)
251 {
252         struct drm_device *dev = obj->base.dev;
253         uint32_t page_offset = offset_in_page(reloc->offset);
254         char *vaddr;
255         int ret;
256
257         ret = i915_gem_object_set_to_cpu_domain(obj, true);
258         if (ret)
259                 return ret;
260
261         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
262                                 reloc->offset >> PAGE_SHIFT));
263         *(uint32_t *)(vaddr + page_offset) = reloc->delta;
264
265         if (INTEL_INFO(dev)->gen >= 8) {
266                 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
267
268                 if (page_offset == 0) {
269                         kunmap_atomic(vaddr);
270                         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
271                             (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
272                 }
273
274                 *(uint32_t *)(vaddr + page_offset) = 0;
275         }
276
277         kunmap_atomic(vaddr);
278
279         return 0;
280 }
281
282 static int
283 relocate_entry_gtt(struct drm_i915_gem_object *obj,
284                    struct drm_i915_gem_relocation_entry *reloc)
285 {
286         struct drm_device *dev = obj->base.dev;
287         struct drm_i915_private *dev_priv = dev->dev_private;
288         uint32_t __iomem *reloc_entry;
289         void __iomem *reloc_page;
290         int ret;
291
292         ret = i915_gem_object_set_to_gtt_domain(obj, true);
293         if (ret)
294                 return ret;
295
296         ret = i915_gem_object_put_fence(obj);
297         if (ret)
298                 return ret;
299
300         /* Map the page containing the relocation we're going to perform.  */
301         reloc->offset += i915_gem_obj_ggtt_offset(obj);
302         reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
303                         reloc->offset & PAGE_MASK);
304         reloc_entry = (uint32_t __iomem *)
305                 (reloc_page + offset_in_page(reloc->offset));
306         iowrite32(reloc->delta, reloc_entry);
307
308         if (INTEL_INFO(dev)->gen >= 8) {
309                 reloc_entry += 1;
310
311                 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
312                         io_mapping_unmap_atomic(reloc_page);
313                         reloc_page = io_mapping_map_atomic_wc(
314                                         dev_priv->gtt.mappable,
315                                         reloc->offset + sizeof(uint32_t));
316                         reloc_entry = reloc_page;
317                 }
318
319                 iowrite32(0, reloc_entry);
320         }
321
322         io_mapping_unmap_atomic(reloc_page);
323
324         return 0;
325 }
326
327 static int
328 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
329                                    struct eb_vmas *eb,
330                                    struct drm_i915_gem_relocation_entry *reloc,
331                                    struct i915_address_space *vm)
332 {
333         struct drm_device *dev = obj->base.dev;
334         struct drm_gem_object *target_obj;
335         struct drm_i915_gem_object *target_i915_obj;
336         struct i915_vma *target_vma;
337         uint32_t target_offset;
338         int ret;
339
340         /* we've already hold a reference to all valid objects */
341         target_vma = eb_get_vma(eb, reloc->target_handle);
342         if (unlikely(target_vma == NULL))
343                 return -ENOENT;
344         target_i915_obj = target_vma->obj;
345         target_obj = &target_vma->obj->base;
346
347         target_offset = target_vma->node.start;
348
349         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
350          * pipe_control writes because the gpu doesn't properly redirect them
351          * through the ppgtt for non_secure batchbuffers. */
352         if (unlikely(IS_GEN6(dev) &&
353             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
354             !target_i915_obj->has_global_gtt_mapping)) {
355                 i915_gem_gtt_bind_object(target_i915_obj,
356                                          target_i915_obj->cache_level);
357         }
358
359         /* Validate that the target is in a valid r/w GPU domain */
360         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
361                 DRM_DEBUG("reloc with multiple write domains: "
362                           "obj %p target %d offset %d "
363                           "read %08x write %08x",
364                           obj, reloc->target_handle,
365                           (int) reloc->offset,
366                           reloc->read_domains,
367                           reloc->write_domain);
368                 return -EINVAL;
369         }
370         if (unlikely((reloc->write_domain | reloc->read_domains)
371                      & ~I915_GEM_GPU_DOMAINS)) {
372                 DRM_DEBUG("reloc with read/write non-GPU domains: "
373                           "obj %p target %d offset %d "
374                           "read %08x write %08x",
375                           obj, reloc->target_handle,
376                           (int) reloc->offset,
377                           reloc->read_domains,
378                           reloc->write_domain);
379                 return -EINVAL;
380         }
381
382         target_obj->pending_read_domains |= reloc->read_domains;
383         target_obj->pending_write_domain |= reloc->write_domain;
384
385         /* If the relocation already has the right value in it, no
386          * more work needs to be done.
387          */
388         if (target_offset == reloc->presumed_offset)
389                 return 0;
390
391         /* Check that the relocation address is valid... */
392         if (unlikely(reloc->offset >
393                 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
394                 DRM_DEBUG("Relocation beyond object bounds: "
395                           "obj %p target %d offset %d size %d.\n",
396                           obj, reloc->target_handle,
397                           (int) reloc->offset,
398                           (int) obj->base.size);
399                 return -EINVAL;
400         }
401         if (unlikely(reloc->offset & 3)) {
402                 DRM_DEBUG("Relocation not 4-byte aligned: "
403                           "obj %p target %d offset %d.\n",
404                           obj, reloc->target_handle,
405                           (int) reloc->offset);
406                 return -EINVAL;
407         }
408
409         /* We can't wait for rendering with pagefaults disabled */
410         if (obj->active && in_atomic())
411                 return -EFAULT;
412
413         reloc->delta += target_offset;
414         if (use_cpu_reloc(obj))
415                 ret = relocate_entry_cpu(obj, reloc);
416         else
417                 ret = relocate_entry_gtt(obj, reloc);
418
419         if (ret)
420                 return ret;
421
422         /* and update the user's relocation entry */
423         reloc->presumed_offset = target_offset;
424
425         return 0;
426 }
427
428 static int
429 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
430                                  struct eb_vmas *eb)
431 {
432 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
433         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
434         struct drm_i915_gem_relocation_entry __user *user_relocs;
435         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
436         int remain, ret;
437
438         user_relocs = to_user_ptr(entry->relocs_ptr);
439
440         remain = entry->relocation_count;
441         while (remain) {
442                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
443                 int count = remain;
444                 if (count > ARRAY_SIZE(stack_reloc))
445                         count = ARRAY_SIZE(stack_reloc);
446                 remain -= count;
447
448                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
449                         return -EFAULT;
450
451                 do {
452                         u64 offset = r->presumed_offset;
453
454                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
455                                                                  vma->vm);
456                         if (ret)
457                                 return ret;
458
459                         if (r->presumed_offset != offset &&
460                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
461                                                     &r->presumed_offset,
462                                                     sizeof(r->presumed_offset))) {
463                                 return -EFAULT;
464                         }
465
466                         user_relocs++;
467                         r++;
468                 } while (--count);
469         }
470
471         return 0;
472 #undef N_RELOC
473 }
474
475 static int
476 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
477                                       struct eb_vmas *eb,
478                                       struct drm_i915_gem_relocation_entry *relocs)
479 {
480         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
481         int i, ret;
482
483         for (i = 0; i < entry->relocation_count; i++) {
484                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
485                                                          vma->vm);
486                 if (ret)
487                         return ret;
488         }
489
490         return 0;
491 }
492
493 static int
494 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
495 {
496         struct i915_vma *vma;
497         int ret = 0;
498
499         /* This is the fast path and we cannot handle a pagefault whilst
500          * holding the struct mutex lest the user pass in the relocations
501          * contained within a mmaped bo. For in such a case we, the page
502          * fault handler would call i915_gem_fault() and we would try to
503          * acquire the struct mutex again. Obviously this is bad and so
504          * lockdep complains vehemently.
505          */
506         pagefault_disable();
507         list_for_each_entry(vma, &eb->vmas, exec_list) {
508                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
509                 if (ret)
510                         break;
511         }
512         pagefault_enable();
513
514         return ret;
515 }
516
517 static int
518 need_reloc_mappable(struct i915_vma *vma)
519 {
520         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
521         return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
522                 i915_is_ggtt(vma->vm);
523 }
524
525 static int
526 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
527                                 struct intel_ring_buffer *ring,
528                                 bool *need_reloc)
529 {
530         struct drm_i915_private *dev_priv = ring->dev->dev_private;
531         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
532         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
533         bool need_fence, need_mappable;
534         struct drm_i915_gem_object *obj = vma->obj;
535         int ret;
536
537         need_fence =
538                 has_fenced_gpu_access &&
539                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
540                 obj->tiling_mode != I915_TILING_NONE;
541         need_mappable = need_fence || need_reloc_mappable(vma);
542
543         ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
544                                   false);
545         if (ret)
546                 return ret;
547
548         entry->flags |= __EXEC_OBJECT_HAS_PIN;
549
550         if (has_fenced_gpu_access) {
551                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
552                         ret = i915_gem_object_get_fence(obj);
553                         if (ret)
554                                 return ret;
555
556                         if (i915_gem_object_pin_fence(obj))
557                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
558
559                         obj->pending_fenced_gpu_access = true;
560                 }
561         }
562
563         /* Ensure ppgtt mapping exists if needed */
564         if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
565                 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
566                                        obj, obj->cache_level);
567
568                 obj->has_aliasing_ppgtt_mapping = 1;
569         }
570
571         if (entry->offset != vma->node.start) {
572                 entry->offset = vma->node.start;
573                 *need_reloc = true;
574         }
575
576         if (entry->flags & EXEC_OBJECT_WRITE) {
577                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
578                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
579         }
580
581         if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
582             !obj->has_global_gtt_mapping)
583                 i915_gem_gtt_bind_object(obj, obj->cache_level);
584
585         return 0;
586 }
587
588 static int
589 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
590                             struct list_head *vmas,
591                             bool *need_relocs)
592 {
593         struct drm_i915_gem_object *obj;
594         struct i915_vma *vma;
595         struct i915_address_space *vm;
596         struct list_head ordered_vmas;
597         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
598         int retry;
599
600         if (list_empty(vmas))
601                 return 0;
602
603         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
604
605         INIT_LIST_HEAD(&ordered_vmas);
606         while (!list_empty(vmas)) {
607                 struct drm_i915_gem_exec_object2 *entry;
608                 bool need_fence, need_mappable;
609
610                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
611                 obj = vma->obj;
612                 entry = vma->exec_entry;
613
614                 need_fence =
615                         has_fenced_gpu_access &&
616                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
617                         obj->tiling_mode != I915_TILING_NONE;
618                 need_mappable = need_fence || need_reloc_mappable(vma);
619
620                 if (need_mappable)
621                         list_move(&vma->exec_list, &ordered_vmas);
622                 else
623                         list_move_tail(&vma->exec_list, &ordered_vmas);
624
625                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
626                 obj->base.pending_write_domain = 0;
627                 obj->pending_fenced_gpu_access = false;
628         }
629         list_splice(&ordered_vmas, vmas);
630
631         /* Attempt to pin all of the buffers into the GTT.
632          * This is done in 3 phases:
633          *
634          * 1a. Unbind all objects that do not match the GTT constraints for
635          *     the execbuffer (fenceable, mappable, alignment etc).
636          * 1b. Increment pin count for already bound objects.
637          * 2.  Bind new objects.
638          * 3.  Decrement pin count.
639          *
640          * This avoid unnecessary unbinding of later objects in order to make
641          * room for the earlier objects *unless* we need to defragment.
642          */
643         retry = 0;
644         do {
645                 int ret = 0;
646
647                 /* Unbind any ill-fitting objects or pin. */
648                 list_for_each_entry(vma, vmas, exec_list) {
649                         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
650                         bool need_fence, need_mappable;
651
652                         obj = vma->obj;
653
654                         if (!drm_mm_node_allocated(&vma->node))
655                                 continue;
656
657                         need_fence =
658                                 has_fenced_gpu_access &&
659                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
660                                 obj->tiling_mode != I915_TILING_NONE;
661                         need_mappable = need_fence || need_reloc_mappable(vma);
662
663                         WARN_ON((need_mappable || need_fence) &&
664                                !i915_is_ggtt(vma->vm));
665
666                         if ((entry->alignment &&
667                              vma->node.start & (entry->alignment - 1)) ||
668                             (need_mappable && !obj->map_and_fenceable))
669                                 ret = i915_vma_unbind(vma);
670                         else
671                                 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
672                         if (ret)
673                                 goto err;
674                 }
675
676                 /* Bind fresh objects */
677                 list_for_each_entry(vma, vmas, exec_list) {
678                         if (drm_mm_node_allocated(&vma->node))
679                                 continue;
680
681                         ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
682                         if (ret)
683                                 goto err;
684                 }
685
686 err:
687                 if (ret != -ENOSPC || retry++)
688                         return ret;
689
690                 /* Decrement pin count for bound objects */
691                 list_for_each_entry(vma, vmas, exec_list)
692                         i915_gem_execbuffer_unreserve_vma(vma);
693
694                 ret = i915_gem_evict_vm(vm, true);
695                 if (ret)
696                         return ret;
697         } while (1);
698 }
699
700 static int
701 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
702                                   struct drm_i915_gem_execbuffer2 *args,
703                                   struct drm_file *file,
704                                   struct intel_ring_buffer *ring,
705                                   struct eb_vmas *eb,
706                                   struct drm_i915_gem_exec_object2 *exec)
707 {
708         struct drm_i915_gem_relocation_entry *reloc;
709         struct i915_address_space *vm;
710         struct i915_vma *vma;
711         bool need_relocs;
712         int *reloc_offset;
713         int i, total, ret;
714         unsigned count = args->buffer_count;
715
716         if (WARN_ON(list_empty(&eb->vmas)))
717                 return 0;
718
719         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
720
721         /* We may process another execbuffer during the unlock... */
722         while (!list_empty(&eb->vmas)) {
723                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
724                 list_del_init(&vma->exec_list);
725                 i915_gem_execbuffer_unreserve_vma(vma);
726                 drm_gem_object_unreference(&vma->obj->base);
727         }
728
729         mutex_unlock(&dev->struct_mutex);
730
731         total = 0;
732         for (i = 0; i < count; i++)
733                 total += exec[i].relocation_count;
734
735         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
736         reloc = drm_malloc_ab(total, sizeof(*reloc));
737         if (reloc == NULL || reloc_offset == NULL) {
738                 drm_free_large(reloc);
739                 drm_free_large(reloc_offset);
740                 mutex_lock(&dev->struct_mutex);
741                 return -ENOMEM;
742         }
743
744         total = 0;
745         for (i = 0; i < count; i++) {
746                 struct drm_i915_gem_relocation_entry __user *user_relocs;
747                 u64 invalid_offset = (u64)-1;
748                 int j;
749
750                 user_relocs = to_user_ptr(exec[i].relocs_ptr);
751
752                 if (copy_from_user(reloc+total, user_relocs,
753                                    exec[i].relocation_count * sizeof(*reloc))) {
754                         ret = -EFAULT;
755                         mutex_lock(&dev->struct_mutex);
756                         goto err;
757                 }
758
759                 /* As we do not update the known relocation offsets after
760                  * relocating (due to the complexities in lock handling),
761                  * we need to mark them as invalid now so that we force the
762                  * relocation processing next time. Just in case the target
763                  * object is evicted and then rebound into its old
764                  * presumed_offset before the next execbuffer - if that
765                  * happened we would make the mistake of assuming that the
766                  * relocations were valid.
767                  */
768                 for (j = 0; j < exec[i].relocation_count; j++) {
769                         if (copy_to_user(&user_relocs[j].presumed_offset,
770                                          &invalid_offset,
771                                          sizeof(invalid_offset))) {
772                                 ret = -EFAULT;
773                                 mutex_lock(&dev->struct_mutex);
774                                 goto err;
775                         }
776                 }
777
778                 reloc_offset[i] = total;
779                 total += exec[i].relocation_count;
780         }
781
782         ret = i915_mutex_lock_interruptible(dev);
783         if (ret) {
784                 mutex_lock(&dev->struct_mutex);
785                 goto err;
786         }
787
788         /* reacquire the objects */
789         eb_reset(eb);
790         ret = eb_lookup_vmas(eb, exec, args, vm, file);
791         if (ret)
792                 goto err;
793
794         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
795         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
796         if (ret)
797                 goto err;
798
799         list_for_each_entry(vma, &eb->vmas, exec_list) {
800                 int offset = vma->exec_entry - exec;
801                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
802                                                             reloc + reloc_offset[offset]);
803                 if (ret)
804                         goto err;
805         }
806
807         /* Leave the user relocations as are, this is the painfully slow path,
808          * and we want to avoid the complication of dropping the lock whilst
809          * having buffers reserved in the aperture and so causing spurious
810          * ENOSPC for random operations.
811          */
812
813 err:
814         drm_free_large(reloc);
815         drm_free_large(reloc_offset);
816         return ret;
817 }
818
819 static int
820 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
821                                 struct list_head *vmas)
822 {
823         struct i915_vma *vma;
824         uint32_t flush_domains = 0;
825         bool flush_chipset = false;
826         int ret;
827
828         list_for_each_entry(vma, vmas, exec_list) {
829                 struct drm_i915_gem_object *obj = vma->obj;
830                 ret = i915_gem_object_sync(obj, ring);
831                 if (ret)
832                         return ret;
833
834                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
835                         flush_chipset |= i915_gem_clflush_object(obj, false);
836
837                 flush_domains |= obj->base.write_domain;
838         }
839
840         if (flush_chipset)
841                 i915_gem_chipset_flush(ring->dev);
842
843         if (flush_domains & I915_GEM_DOMAIN_GTT)
844                 wmb();
845
846         /* Unconditionally invalidate gpu caches and ensure that we do flush
847          * any residual writes from the previous batch.
848          */
849         return intel_ring_invalidate_all_caches(ring);
850 }
851
852 static bool
853 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
854 {
855         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
856                 return false;
857
858         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
859 }
860
861 static int
862 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
863                    int count)
864 {
865         int i;
866         unsigned relocs_total = 0;
867         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
868
869         for (i = 0; i < count; i++) {
870                 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
871                 int length; /* limited by fault_in_pages_readable() */
872
873                 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
874                         return -EINVAL;
875
876                 /* First check for malicious input causing overflow in
877                  * the worst case where we need to allocate the entire
878                  * relocation tree as a single array.
879                  */
880                 if (exec[i].relocation_count > relocs_max - relocs_total)
881                         return -EINVAL;
882                 relocs_total += exec[i].relocation_count;
883
884                 length = exec[i].relocation_count *
885                         sizeof(struct drm_i915_gem_relocation_entry);
886                 /*
887                  * We must check that the entire relocation array is safe
888                  * to read, but since we may need to update the presumed
889                  * offsets during execution, check for full write access.
890                  */
891                 if (!access_ok(VERIFY_WRITE, ptr, length))
892                         return -EFAULT;
893
894                 if (likely(!i915_prefault_disable)) {
895                         if (fault_in_multipages_readable(ptr, length))
896                                 return -EFAULT;
897                 }
898         }
899
900         return 0;
901 }
902
903 static int
904 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
905                           const u32 ctx_id)
906 {
907         struct i915_ctx_hang_stats *hs;
908
909         hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
910         if (IS_ERR(hs))
911                 return PTR_ERR(hs);
912
913         if (hs->banned) {
914                 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
915                 return -EIO;
916         }
917
918         return 0;
919 }
920
921 static void
922 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
923                                    struct intel_ring_buffer *ring)
924 {
925         struct i915_vma *vma;
926
927         list_for_each_entry(vma, vmas, exec_list) {
928                 struct drm_i915_gem_object *obj = vma->obj;
929                 u32 old_read = obj->base.read_domains;
930                 u32 old_write = obj->base.write_domain;
931
932                 obj->base.write_domain = obj->base.pending_write_domain;
933                 if (obj->base.write_domain == 0)
934                         obj->base.pending_read_domains |= obj->base.read_domains;
935                 obj->base.read_domains = obj->base.pending_read_domains;
936                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
937
938                 i915_vma_move_to_active(vma, ring);
939                 if (obj->base.write_domain) {
940                         obj->dirty = 1;
941                         obj->last_write_seqno = intel_ring_get_seqno(ring);
942                         if (obj->pin_count) /* check for potential scanout */
943                                 intel_mark_fb_busy(obj, ring);
944                 }
945
946                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
947         }
948 }
949
950 static void
951 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
952                                     struct drm_file *file,
953                                     struct intel_ring_buffer *ring,
954                                     struct drm_i915_gem_object *obj)
955 {
956         /* Unconditionally force add_request to emit a full flush. */
957         ring->gpu_caches_dirty = true;
958
959         /* Add a breadcrumb for the completion of the batch buffer */
960         (void)__i915_add_request(ring, file, obj, NULL);
961 }
962
963 static int
964 i915_reset_gen7_sol_offsets(struct drm_device *dev,
965                             struct intel_ring_buffer *ring)
966 {
967         drm_i915_private_t *dev_priv = dev->dev_private;
968         int ret, i;
969
970         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
971                 return 0;
972
973         ret = intel_ring_begin(ring, 4 * 3);
974         if (ret)
975                 return ret;
976
977         for (i = 0; i < 4; i++) {
978                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
979                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
980                 intel_ring_emit(ring, 0);
981         }
982
983         intel_ring_advance(ring);
984
985         return 0;
986 }
987
988 static int
989 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
990                        struct drm_file *file,
991                        struct drm_i915_gem_execbuffer2 *args,
992                        struct drm_i915_gem_exec_object2 *exec,
993                        struct i915_address_space *vm)
994 {
995         drm_i915_private_t *dev_priv = dev->dev_private;
996         struct eb_vmas *eb;
997         struct drm_i915_gem_object *batch_obj;
998         struct drm_clip_rect *cliprects = NULL;
999         struct intel_ring_buffer *ring;
1000         const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1001         u32 exec_start, exec_len;
1002         u32 mask, flags;
1003         int ret, mode, i;
1004         bool need_relocs;
1005
1006         if (!i915_gem_check_execbuffer(args))
1007                 return -EINVAL;
1008
1009         ret = validate_exec_list(exec, args->buffer_count);
1010         if (ret)
1011                 return ret;
1012
1013         flags = 0;
1014         if (args->flags & I915_EXEC_SECURE) {
1015                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1016                     return -EPERM;
1017
1018                 flags |= I915_DISPATCH_SECURE;
1019         }
1020         if (args->flags & I915_EXEC_IS_PINNED)
1021                 flags |= I915_DISPATCH_PINNED;
1022
1023         switch (args->flags & I915_EXEC_RING_MASK) {
1024         case I915_EXEC_DEFAULT:
1025         case I915_EXEC_RENDER:
1026                 ring = &dev_priv->ring[RCS];
1027                 break;
1028         case I915_EXEC_BSD:
1029                 ring = &dev_priv->ring[VCS];
1030                 if (ctx_id != DEFAULT_CONTEXT_ID) {
1031                         DRM_DEBUG("Ring %s doesn't support contexts\n",
1032                                   ring->name);
1033                         return -EPERM;
1034                 }
1035                 break;
1036         case I915_EXEC_BLT:
1037                 ring = &dev_priv->ring[BCS];
1038                 if (ctx_id != DEFAULT_CONTEXT_ID) {
1039                         DRM_DEBUG("Ring %s doesn't support contexts\n",
1040                                   ring->name);
1041                         return -EPERM;
1042                 }
1043                 break;
1044         case I915_EXEC_VEBOX:
1045                 ring = &dev_priv->ring[VECS];
1046                 if (ctx_id != DEFAULT_CONTEXT_ID) {
1047                         DRM_DEBUG("Ring %s doesn't support contexts\n",
1048                                   ring->name);
1049                         return -EPERM;
1050                 }
1051                 break;
1052
1053         default:
1054                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1055                           (int)(args->flags & I915_EXEC_RING_MASK));
1056                 return -EINVAL;
1057         }
1058         if (!intel_ring_initialized(ring)) {
1059                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1060                           (int)(args->flags & I915_EXEC_RING_MASK));
1061                 return -EINVAL;
1062         }
1063
1064         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1065         mask = I915_EXEC_CONSTANTS_MASK;
1066         switch (mode) {
1067         case I915_EXEC_CONSTANTS_REL_GENERAL:
1068         case I915_EXEC_CONSTANTS_ABSOLUTE:
1069         case I915_EXEC_CONSTANTS_REL_SURFACE:
1070                 if (ring == &dev_priv->ring[RCS] &&
1071                     mode != dev_priv->relative_constants_mode) {
1072                         if (INTEL_INFO(dev)->gen < 4)
1073                                 return -EINVAL;
1074
1075                         if (INTEL_INFO(dev)->gen > 5 &&
1076                             mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1077                                 return -EINVAL;
1078
1079                         /* The HW changed the meaning on this bit on gen6 */
1080                         if (INTEL_INFO(dev)->gen >= 6)
1081                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1082                 }
1083                 break;
1084         default:
1085                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1086                 return -EINVAL;
1087         }
1088
1089         if (args->buffer_count < 1) {
1090                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1091                 return -EINVAL;
1092         }
1093
1094         if (args->num_cliprects != 0) {
1095                 if (ring != &dev_priv->ring[RCS]) {
1096                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1097                         return -EINVAL;
1098                 }
1099
1100                 if (INTEL_INFO(dev)->gen >= 5) {
1101                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1102                         return -EINVAL;
1103                 }
1104
1105                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1106                         DRM_DEBUG("execbuf with %u cliprects\n",
1107                                   args->num_cliprects);
1108                         return -EINVAL;
1109                 }
1110
1111                 cliprects = kcalloc(args->num_cliprects,
1112                                     sizeof(*cliprects),
1113                                     GFP_KERNEL);
1114                 if (cliprects == NULL) {
1115                         ret = -ENOMEM;
1116                         goto pre_mutex_err;
1117                 }
1118
1119                 if (copy_from_user(cliprects,
1120                                    to_user_ptr(args->cliprects_ptr),
1121                                    sizeof(*cliprects)*args->num_cliprects)) {
1122                         ret = -EFAULT;
1123                         goto pre_mutex_err;
1124                 }
1125         }
1126
1127         intel_runtime_pm_get(dev_priv);
1128
1129         ret = i915_mutex_lock_interruptible(dev);
1130         if (ret)
1131                 goto pre_mutex_err;
1132
1133         if (dev_priv->ums.mm_suspended) {
1134                 mutex_unlock(&dev->struct_mutex);
1135                 ret = -EBUSY;
1136                 goto pre_mutex_err;
1137         }
1138
1139         ret = i915_gem_validate_context(dev, file, ctx_id);
1140         if (ret) {
1141                 mutex_unlock(&dev->struct_mutex);
1142                 goto pre_mutex_err;
1143         }
1144
1145         eb = eb_create(args);
1146         if (eb == NULL) {
1147                 mutex_unlock(&dev->struct_mutex);
1148                 ret = -ENOMEM;
1149                 goto pre_mutex_err;
1150         }
1151
1152         /* Look up object handles */
1153         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1154         if (ret)
1155                 goto err;
1156
1157         /* take note of the batch buffer before we might reorder the lists */
1158         batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1159
1160         /* Move the objects en-masse into the GTT, evicting if necessary. */
1161         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1162         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1163         if (ret)
1164                 goto err;
1165
1166         /* The objects are in their final locations, apply the relocations. */
1167         if (need_relocs)
1168                 ret = i915_gem_execbuffer_relocate(eb);
1169         if (ret) {
1170                 if (ret == -EFAULT) {
1171                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1172                                                                 eb, exec);
1173                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1174                 }
1175                 if (ret)
1176                         goto err;
1177         }
1178
1179         /* Set the pending read domains for the batch buffer to COMMAND */
1180         if (batch_obj->base.pending_write_domain) {
1181                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1182                 ret = -EINVAL;
1183                 goto err;
1184         }
1185         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1186
1187         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1188          * batch" bit. Hence we need to pin secure batches into the global gtt.
1189          * hsw should have this fixed, but bdw mucks it up again. */
1190         if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1191                 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1192
1193         ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1194         if (ret)
1195                 goto err;
1196
1197         ret = i915_switch_context(ring, file, ctx_id);
1198         if (ret)
1199                 goto err;
1200
1201         if (ring == &dev_priv->ring[RCS] &&
1202             mode != dev_priv->relative_constants_mode) {
1203                 ret = intel_ring_begin(ring, 4);
1204                 if (ret)
1205                                 goto err;
1206
1207                 intel_ring_emit(ring, MI_NOOP);
1208                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1209                 intel_ring_emit(ring, INSTPM);
1210                 intel_ring_emit(ring, mask << 16 | mode);
1211                 intel_ring_advance(ring);
1212
1213                 dev_priv->relative_constants_mode = mode;
1214         }
1215
1216         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1217                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1218                 if (ret)
1219                         goto err;
1220         }
1221
1222         exec_start = i915_gem_obj_offset(batch_obj, vm) +
1223                 args->batch_start_offset;
1224         exec_len = args->batch_len;
1225         if (cliprects) {
1226                 for (i = 0; i < args->num_cliprects; i++) {
1227                         ret = i915_emit_box(dev, &cliprects[i],
1228                                             args->DR1, args->DR4);
1229                         if (ret)
1230                                 goto err;
1231
1232                         ret = ring->dispatch_execbuffer(ring,
1233                                                         exec_start, exec_len,
1234                                                         flags);
1235                         if (ret)
1236                                 goto err;
1237                 }
1238         } else {
1239                 ret = ring->dispatch_execbuffer(ring,
1240                                                 exec_start, exec_len,
1241                                                 flags);
1242                 if (ret)
1243                         goto err;
1244         }
1245
1246         trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1247
1248         i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1249         i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1250
1251 err:
1252         eb_destroy(eb);
1253
1254         mutex_unlock(&dev->struct_mutex);
1255
1256 pre_mutex_err:
1257         kfree(cliprects);
1258
1259         /* intel_gpu_busy should also get a ref, so it will free when the device
1260          * is really idle. */
1261         intel_runtime_pm_put(dev_priv);
1262         return ret;
1263 }
1264
1265 /*
1266  * Legacy execbuffer just creates an exec2 list from the original exec object
1267  * list array and passes it to the real function.
1268  */
1269 int
1270 i915_gem_execbuffer(struct drm_device *dev, void *data,
1271                     struct drm_file *file)
1272 {
1273         struct drm_i915_private *dev_priv = dev->dev_private;
1274         struct drm_i915_gem_execbuffer *args = data;
1275         struct drm_i915_gem_execbuffer2 exec2;
1276         struct drm_i915_gem_exec_object *exec_list = NULL;
1277         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1278         int ret, i;
1279
1280         if (args->buffer_count < 1) {
1281                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1282                 return -EINVAL;
1283         }
1284
1285         /* Copy in the exec list from userland */
1286         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1287         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1288         if (exec_list == NULL || exec2_list == NULL) {
1289                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1290                           args->buffer_count);
1291                 drm_free_large(exec_list);
1292                 drm_free_large(exec2_list);
1293                 return -ENOMEM;
1294         }
1295         ret = copy_from_user(exec_list,
1296                              to_user_ptr(args->buffers_ptr),
1297                              sizeof(*exec_list) * args->buffer_count);
1298         if (ret != 0) {
1299                 DRM_DEBUG("copy %d exec entries failed %d\n",
1300                           args->buffer_count, ret);
1301                 drm_free_large(exec_list);
1302                 drm_free_large(exec2_list);
1303                 return -EFAULT;
1304         }
1305
1306         for (i = 0; i < args->buffer_count; i++) {
1307                 exec2_list[i].handle = exec_list[i].handle;
1308                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1309                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1310                 exec2_list[i].alignment = exec_list[i].alignment;
1311                 exec2_list[i].offset = exec_list[i].offset;
1312                 if (INTEL_INFO(dev)->gen < 4)
1313                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1314                 else
1315                         exec2_list[i].flags = 0;
1316         }
1317
1318         exec2.buffers_ptr = args->buffers_ptr;
1319         exec2.buffer_count = args->buffer_count;
1320         exec2.batch_start_offset = args->batch_start_offset;
1321         exec2.batch_len = args->batch_len;
1322         exec2.DR1 = args->DR1;
1323         exec2.DR4 = args->DR4;
1324         exec2.num_cliprects = args->num_cliprects;
1325         exec2.cliprects_ptr = args->cliprects_ptr;
1326         exec2.flags = I915_EXEC_RENDER;
1327         i915_execbuffer2_set_context_id(exec2, 0);
1328
1329         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1330                                      &dev_priv->gtt.base);
1331         if (!ret) {
1332                 /* Copy the new buffer offsets back to the user's exec list. */
1333                 for (i = 0; i < args->buffer_count; i++)
1334                         exec_list[i].offset = exec2_list[i].offset;
1335                 /* ... and back out to userspace */
1336                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1337                                    exec_list,
1338                                    sizeof(*exec_list) * args->buffer_count);
1339                 if (ret) {
1340                         ret = -EFAULT;
1341                         DRM_DEBUG("failed to copy %d exec entries "
1342                                   "back to user (%d)\n",
1343                                   args->buffer_count, ret);
1344                 }
1345         }
1346
1347         drm_free_large(exec_list);
1348         drm_free_large(exec2_list);
1349         return ret;
1350 }
1351
1352 int
1353 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1354                      struct drm_file *file)
1355 {
1356         struct drm_i915_private *dev_priv = dev->dev_private;
1357         struct drm_i915_gem_execbuffer2 *args = data;
1358         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1359         int ret;
1360
1361         if (args->buffer_count < 1 ||
1362             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1363                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1364                 return -EINVAL;
1365         }
1366
1367         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1368                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1369         if (exec2_list == NULL)
1370                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1371                                            args->buffer_count);
1372         if (exec2_list == NULL) {
1373                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1374                           args->buffer_count);
1375                 return -ENOMEM;
1376         }
1377         ret = copy_from_user(exec2_list,
1378                              to_user_ptr(args->buffers_ptr),
1379                              sizeof(*exec2_list) * args->buffer_count);
1380         if (ret != 0) {
1381                 DRM_DEBUG("copy %d exec entries failed %d\n",
1382                           args->buffer_count, ret);
1383                 drm_free_large(exec2_list);
1384                 return -EFAULT;
1385         }
1386
1387         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1388                                      &dev_priv->gtt.base);
1389         if (!ret) {
1390                 /* Copy the new buffer offsets back to the user's exec list. */
1391                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1392                                    exec2_list,
1393                                    sizeof(*exec2_list) * args->buffer_count);
1394                 if (ret) {
1395                         ret = -EFAULT;
1396                         DRM_DEBUG("failed to copy %d exec entries "
1397                                   "back to user (%d)\n",
1398                                   args->buffer_count, ret);
1399                 }
1400         }
1401
1402         drm_free_large(exec2_list);
1403         return ret;
1404 }