]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Update rules for reading cache lines through the LLC
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/i915_drm.h>
30 #include "i915_drv.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 static __must_check int
42 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
43                            struct i915_address_space *vm,
44                            unsigned alignment,
45                            bool map_and_fenceable,
46                            bool nonblocking);
47 static int i915_gem_phys_pwrite(struct drm_device *dev,
48                                 struct drm_i915_gem_object *obj,
49                                 struct drm_i915_gem_pwrite *args,
50                                 struct drm_file *file);
51
52 static void i915_gem_write_fence(struct drm_device *dev, int reg,
53                                  struct drm_i915_gem_object *obj);
54 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
55                                          struct drm_i915_fence_reg *fence,
56                                          bool enable);
57
58 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59                                     struct shrink_control *sc);
60 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
61 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
62 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
63
64 static bool cpu_cache_is_coherent(struct drm_device *dev,
65                                   enum i915_cache_level level)
66 {
67         return HAS_LLC(dev) || level != I915_CACHE_NONE;
68 }
69
70 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
71 {
72         if (obj->tiling_mode)
73                 i915_gem_release_mmap(obj);
74
75         /* As we do not have an associated fence register, we will force
76          * a tiling change if we ever need to acquire one.
77          */
78         obj->fence_dirty = false;
79         obj->fence_reg = I915_FENCE_REG_NONE;
80 }
81
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
84                                   size_t size)
85 {
86         spin_lock(&dev_priv->mm.object_stat_lock);
87         dev_priv->mm.object_count++;
88         dev_priv->mm.object_memory += size;
89         spin_unlock(&dev_priv->mm.object_stat_lock);
90 }
91
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
93                                      size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count--;
97         dev_priv->mm.object_memory -= size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static int
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
103 {
104         int ret;
105
106 #define EXIT_COND (!i915_reset_in_progress(error) || \
107                    i915_terminally_wedged(error))
108         if (EXIT_COND)
109                 return 0;
110
111         /*
112          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
113          * userspace. If it takes that long something really bad is going on and
114          * we should simply try to bail out and fail as gracefully as possible.
115          */
116         ret = wait_event_interruptible_timeout(error->reset_queue,
117                                                EXIT_COND,
118                                                10*HZ);
119         if (ret == 0) {
120                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121                 return -EIO;
122         } else if (ret < 0) {
123                 return ret;
124         }
125 #undef EXIT_COND
126
127         return 0;
128 }
129
130 int i915_mutex_lock_interruptible(struct drm_device *dev)
131 {
132         struct drm_i915_private *dev_priv = dev->dev_private;
133         int ret;
134
135         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
136         if (ret)
137                 return ret;
138
139         ret = mutex_lock_interruptible(&dev->struct_mutex);
140         if (ret)
141                 return ret;
142
143         WARN_ON(i915_verify_lists(dev));
144         return 0;
145 }
146
147 static inline bool
148 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
149 {
150         return i915_gem_obj_bound_any(obj) && !obj->active;
151 }
152
153 int
154 i915_gem_init_ioctl(struct drm_device *dev, void *data,
155                     struct drm_file *file)
156 {
157         struct drm_i915_private *dev_priv = dev->dev_private;
158         struct drm_i915_gem_init *args = data;
159
160         if (drm_core_check_feature(dev, DRIVER_MODESET))
161                 return -ENODEV;
162
163         if (args->gtt_start >= args->gtt_end ||
164             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
165                 return -EINVAL;
166
167         /* GEM with user mode setting was never supported on ilk and later. */
168         if (INTEL_INFO(dev)->gen >= 5)
169                 return -ENODEV;
170
171         mutex_lock(&dev->struct_mutex);
172         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
173                                   args->gtt_end);
174         dev_priv->gtt.mappable_end = args->gtt_end;
175         mutex_unlock(&dev->struct_mutex);
176
177         return 0;
178 }
179
180 int
181 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
182                             struct drm_file *file)
183 {
184         struct drm_i915_private *dev_priv = dev->dev_private;
185         struct drm_i915_gem_get_aperture *args = data;
186         struct drm_i915_gem_object *obj;
187         size_t pinned;
188
189         pinned = 0;
190         mutex_lock(&dev->struct_mutex);
191         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
192                 if (obj->pin_count)
193                         pinned += i915_gem_obj_ggtt_size(obj);
194         mutex_unlock(&dev->struct_mutex);
195
196         args->aper_size = dev_priv->gtt.base.total;
197         args->aper_available_size = args->aper_size - pinned;
198
199         return 0;
200 }
201
202 void *i915_gem_object_alloc(struct drm_device *dev)
203 {
204         struct drm_i915_private *dev_priv = dev->dev_private;
205         return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
206 }
207
208 void i915_gem_object_free(struct drm_i915_gem_object *obj)
209 {
210         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
211         kmem_cache_free(dev_priv->slab, obj);
212 }
213
214 static int
215 i915_gem_create(struct drm_file *file,
216                 struct drm_device *dev,
217                 uint64_t size,
218                 uint32_t *handle_p)
219 {
220         struct drm_i915_gem_object *obj;
221         int ret;
222         u32 handle;
223
224         size = roundup(size, PAGE_SIZE);
225         if (size == 0)
226                 return -EINVAL;
227
228         /* Allocate the new object */
229         obj = i915_gem_alloc_object(dev, size);
230         if (obj == NULL)
231                 return -ENOMEM;
232
233         ret = drm_gem_handle_create(file, &obj->base, &handle);
234         /* drop reference from allocate - handle holds it now */
235         drm_gem_object_unreference_unlocked(&obj->base);
236         if (ret)
237                 return ret;
238
239         *handle_p = handle;
240         return 0;
241 }
242
243 int
244 i915_gem_dumb_create(struct drm_file *file,
245                      struct drm_device *dev,
246                      struct drm_mode_create_dumb *args)
247 {
248         /* have to work out size/pitch and return them */
249         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
250         args->size = args->pitch * args->height;
251         return i915_gem_create(file, dev,
252                                args->size, &args->handle);
253 }
254
255 int i915_gem_dumb_destroy(struct drm_file *file,
256                           struct drm_device *dev,
257                           uint32_t handle)
258 {
259         return drm_gem_handle_delete(file, handle);
260 }
261
262 /**
263  * Creates a new mm object and returns a handle to it.
264  */
265 int
266 i915_gem_create_ioctl(struct drm_device *dev, void *data,
267                       struct drm_file *file)
268 {
269         struct drm_i915_gem_create *args = data;
270
271         return i915_gem_create(file, dev,
272                                args->size, &args->handle);
273 }
274
275 static inline int
276 __copy_to_user_swizzled(char __user *cpu_vaddr,
277                         const char *gpu_vaddr, int gpu_offset,
278                         int length)
279 {
280         int ret, cpu_offset = 0;
281
282         while (length > 0) {
283                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
284                 int this_length = min(cacheline_end - gpu_offset, length);
285                 int swizzled_gpu_offset = gpu_offset ^ 64;
286
287                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
288                                      gpu_vaddr + swizzled_gpu_offset,
289                                      this_length);
290                 if (ret)
291                         return ret + length;
292
293                 cpu_offset += this_length;
294                 gpu_offset += this_length;
295                 length -= this_length;
296         }
297
298         return 0;
299 }
300
301 static inline int
302 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
303                           const char __user *cpu_vaddr,
304                           int length)
305 {
306         int ret, cpu_offset = 0;
307
308         while (length > 0) {
309                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
310                 int this_length = min(cacheline_end - gpu_offset, length);
311                 int swizzled_gpu_offset = gpu_offset ^ 64;
312
313                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
314                                        cpu_vaddr + cpu_offset,
315                                        this_length);
316                 if (ret)
317                         return ret + length;
318
319                 cpu_offset += this_length;
320                 gpu_offset += this_length;
321                 length -= this_length;
322         }
323
324         return 0;
325 }
326
327 /* Per-page copy function for the shmem pread fastpath.
328  * Flushes invalid cachelines before reading the target if
329  * needs_clflush is set. */
330 static int
331 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
332                  char __user *user_data,
333                  bool page_do_bit17_swizzling, bool needs_clflush)
334 {
335         char *vaddr;
336         int ret;
337
338         if (unlikely(page_do_bit17_swizzling))
339                 return -EINVAL;
340
341         vaddr = kmap_atomic(page);
342         if (needs_clflush)
343                 drm_clflush_virt_range(vaddr + shmem_page_offset,
344                                        page_length);
345         ret = __copy_to_user_inatomic(user_data,
346                                       vaddr + shmem_page_offset,
347                                       page_length);
348         kunmap_atomic(vaddr);
349
350         return ret ? -EFAULT : 0;
351 }
352
353 static void
354 shmem_clflush_swizzled_range(char *addr, unsigned long length,
355                              bool swizzled)
356 {
357         if (unlikely(swizzled)) {
358                 unsigned long start = (unsigned long) addr;
359                 unsigned long end = (unsigned long) addr + length;
360
361                 /* For swizzling simply ensure that we always flush both
362                  * channels. Lame, but simple and it works. Swizzled
363                  * pwrite/pread is far from a hotpath - current userspace
364                  * doesn't use it at all. */
365                 start = round_down(start, 128);
366                 end = round_up(end, 128);
367
368                 drm_clflush_virt_range((void *)start, end - start);
369         } else {
370                 drm_clflush_virt_range(addr, length);
371         }
372
373 }
374
375 /* Only difference to the fast-path function is that this can handle bit17
376  * and uses non-atomic copy and kmap functions. */
377 static int
378 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
379                  char __user *user_data,
380                  bool page_do_bit17_swizzling, bool needs_clflush)
381 {
382         char *vaddr;
383         int ret;
384
385         vaddr = kmap(page);
386         if (needs_clflush)
387                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
388                                              page_length,
389                                              page_do_bit17_swizzling);
390
391         if (page_do_bit17_swizzling)
392                 ret = __copy_to_user_swizzled(user_data,
393                                               vaddr, shmem_page_offset,
394                                               page_length);
395         else
396                 ret = __copy_to_user(user_data,
397                                      vaddr + shmem_page_offset,
398                                      page_length);
399         kunmap(page);
400
401         return ret ? - EFAULT : 0;
402 }
403
404 static int
405 i915_gem_shmem_pread(struct drm_device *dev,
406                      struct drm_i915_gem_object *obj,
407                      struct drm_i915_gem_pread *args,
408                      struct drm_file *file)
409 {
410         char __user *user_data;
411         ssize_t remain;
412         loff_t offset;
413         int shmem_page_offset, page_length, ret = 0;
414         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
415         int prefaulted = 0;
416         int needs_clflush = 0;
417         struct sg_page_iter sg_iter;
418
419         user_data = to_user_ptr(args->data_ptr);
420         remain = args->size;
421
422         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
423
424         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
425                 /* If we're not in the cpu read domain, set ourself into the gtt
426                  * read domain and manually flush cachelines (if required). This
427                  * optimizes for the case when the gpu will dirty the data
428                  * anyway again before the next pread happens. */
429                 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
430                 if (i915_gem_obj_bound_any(obj)) {
431                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
432                         if (ret)
433                                 return ret;
434                 }
435         }
436
437         ret = i915_gem_object_get_pages(obj);
438         if (ret)
439                 return ret;
440
441         i915_gem_object_pin_pages(obj);
442
443         offset = args->offset;
444
445         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
446                          offset >> PAGE_SHIFT) {
447                 struct page *page = sg_page_iter_page(&sg_iter);
448
449                 if (remain <= 0)
450                         break;
451
452                 /* Operation in this page
453                  *
454                  * shmem_page_offset = offset within page in shmem file
455                  * page_length = bytes to copy for this page
456                  */
457                 shmem_page_offset = offset_in_page(offset);
458                 page_length = remain;
459                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460                         page_length = PAGE_SIZE - shmem_page_offset;
461
462                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
463                         (page_to_phys(page) & (1 << 17)) != 0;
464
465                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
466                                        user_data, page_do_bit17_swizzling,
467                                        needs_clflush);
468                 if (ret == 0)
469                         goto next_page;
470
471                 mutex_unlock(&dev->struct_mutex);
472
473                 if (likely(!i915_prefault_disable) && !prefaulted) {
474                         ret = fault_in_multipages_writeable(user_data, remain);
475                         /* Userspace is tricking us, but we've already clobbered
476                          * its pages with the prefault and promised to write the
477                          * data up to the first fault. Hence ignore any errors
478                          * and just continue. */
479                         (void)ret;
480                         prefaulted = 1;
481                 }
482
483                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
484                                        user_data, page_do_bit17_swizzling,
485                                        needs_clflush);
486
487                 mutex_lock(&dev->struct_mutex);
488
489 next_page:
490                 mark_page_accessed(page);
491
492                 if (ret)
493                         goto out;
494
495                 remain -= page_length;
496                 user_data += page_length;
497                 offset += page_length;
498         }
499
500 out:
501         i915_gem_object_unpin_pages(obj);
502
503         return ret;
504 }
505
506 /**
507  * Reads data from the object referenced by handle.
508  *
509  * On error, the contents of *data are undefined.
510  */
511 int
512 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
513                      struct drm_file *file)
514 {
515         struct drm_i915_gem_pread *args = data;
516         struct drm_i915_gem_object *obj;
517         int ret = 0;
518
519         if (args->size == 0)
520                 return 0;
521
522         if (!access_ok(VERIFY_WRITE,
523                        to_user_ptr(args->data_ptr),
524                        args->size))
525                 return -EFAULT;
526
527         ret = i915_mutex_lock_interruptible(dev);
528         if (ret)
529                 return ret;
530
531         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
532         if (&obj->base == NULL) {
533                 ret = -ENOENT;
534                 goto unlock;
535         }
536
537         /* Bounds check source.  */
538         if (args->offset > obj->base.size ||
539             args->size > obj->base.size - args->offset) {
540                 ret = -EINVAL;
541                 goto out;
542         }
543
544         /* prime objects have no backing filp to GEM pread/pwrite
545          * pages from.
546          */
547         if (!obj->base.filp) {
548                 ret = -EINVAL;
549                 goto out;
550         }
551
552         trace_i915_gem_object_pread(obj, args->offset, args->size);
553
554         ret = i915_gem_shmem_pread(dev, obj, args, file);
555
556 out:
557         drm_gem_object_unreference(&obj->base);
558 unlock:
559         mutex_unlock(&dev->struct_mutex);
560         return ret;
561 }
562
563 /* This is the fast write path which cannot handle
564  * page faults in the source data
565  */
566
567 static inline int
568 fast_user_write(struct io_mapping *mapping,
569                 loff_t page_base, int page_offset,
570                 char __user *user_data,
571                 int length)
572 {
573         void __iomem *vaddr_atomic;
574         void *vaddr;
575         unsigned long unwritten;
576
577         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
578         /* We can use the cpu mem copy function because this is X86. */
579         vaddr = (void __force*)vaddr_atomic + page_offset;
580         unwritten = __copy_from_user_inatomic_nocache(vaddr,
581                                                       user_data, length);
582         io_mapping_unmap_atomic(vaddr_atomic);
583         return unwritten;
584 }
585
586 /**
587  * This is the fast pwrite path, where we copy the data directly from the
588  * user into the GTT, uncached.
589  */
590 static int
591 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
592                          struct drm_i915_gem_object *obj,
593                          struct drm_i915_gem_pwrite *args,
594                          struct drm_file *file)
595 {
596         drm_i915_private_t *dev_priv = dev->dev_private;
597         ssize_t remain;
598         loff_t offset, page_base;
599         char __user *user_data;
600         int page_offset, page_length, ret;
601
602         ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
603         if (ret)
604                 goto out;
605
606         ret = i915_gem_object_set_to_gtt_domain(obj, true);
607         if (ret)
608                 goto out_unpin;
609
610         ret = i915_gem_object_put_fence(obj);
611         if (ret)
612                 goto out_unpin;
613
614         user_data = to_user_ptr(args->data_ptr);
615         remain = args->size;
616
617         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
618
619         while (remain > 0) {
620                 /* Operation in this page
621                  *
622                  * page_base = page offset within aperture
623                  * page_offset = offset within page
624                  * page_length = bytes to copy for this page
625                  */
626                 page_base = offset & PAGE_MASK;
627                 page_offset = offset_in_page(offset);
628                 page_length = remain;
629                 if ((page_offset + remain) > PAGE_SIZE)
630                         page_length = PAGE_SIZE - page_offset;
631
632                 /* If we get a fault while copying data, then (presumably) our
633                  * source page isn't available.  Return the error and we'll
634                  * retry in the slow path.
635                  */
636                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
637                                     page_offset, user_data, page_length)) {
638                         ret = -EFAULT;
639                         goto out_unpin;
640                 }
641
642                 remain -= page_length;
643                 user_data += page_length;
644                 offset += page_length;
645         }
646
647 out_unpin:
648         i915_gem_object_unpin(obj);
649 out:
650         return ret;
651 }
652
653 /* Per-page copy function for the shmem pwrite fastpath.
654  * Flushes invalid cachelines before writing to the target if
655  * needs_clflush_before is set and flushes out any written cachelines after
656  * writing if needs_clflush is set. */
657 static int
658 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
659                   char __user *user_data,
660                   bool page_do_bit17_swizzling,
661                   bool needs_clflush_before,
662                   bool needs_clflush_after)
663 {
664         char *vaddr;
665         int ret;
666
667         if (unlikely(page_do_bit17_swizzling))
668                 return -EINVAL;
669
670         vaddr = kmap_atomic(page);
671         if (needs_clflush_before)
672                 drm_clflush_virt_range(vaddr + shmem_page_offset,
673                                        page_length);
674         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
675                                                 user_data,
676                                                 page_length);
677         if (needs_clflush_after)
678                 drm_clflush_virt_range(vaddr + shmem_page_offset,
679                                        page_length);
680         kunmap_atomic(vaddr);
681
682         return ret ? -EFAULT : 0;
683 }
684
685 /* Only difference to the fast-path function is that this can handle bit17
686  * and uses non-atomic copy and kmap functions. */
687 static int
688 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
689                   char __user *user_data,
690                   bool page_do_bit17_swizzling,
691                   bool needs_clflush_before,
692                   bool needs_clflush_after)
693 {
694         char *vaddr;
695         int ret;
696
697         vaddr = kmap(page);
698         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
699                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
700                                              page_length,
701                                              page_do_bit17_swizzling);
702         if (page_do_bit17_swizzling)
703                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
704                                                 user_data,
705                                                 page_length);
706         else
707                 ret = __copy_from_user(vaddr + shmem_page_offset,
708                                        user_data,
709                                        page_length);
710         if (needs_clflush_after)
711                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
712                                              page_length,
713                                              page_do_bit17_swizzling);
714         kunmap(page);
715
716         return ret ? -EFAULT : 0;
717 }
718
719 static int
720 i915_gem_shmem_pwrite(struct drm_device *dev,
721                       struct drm_i915_gem_object *obj,
722                       struct drm_i915_gem_pwrite *args,
723                       struct drm_file *file)
724 {
725         ssize_t remain;
726         loff_t offset;
727         char __user *user_data;
728         int shmem_page_offset, page_length, ret = 0;
729         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
730         int hit_slowpath = 0;
731         int needs_clflush_after = 0;
732         int needs_clflush_before = 0;
733         struct sg_page_iter sg_iter;
734
735         user_data = to_user_ptr(args->data_ptr);
736         remain = args->size;
737
738         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
739
740         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
741                 /* If we're not in the cpu write domain, set ourself into the gtt
742                  * write domain and manually flush cachelines (if required). This
743                  * optimizes for the case when the gpu will use the data
744                  * right away and we therefore have to clflush anyway. */
745                 if (obj->cache_level == I915_CACHE_NONE)
746                         needs_clflush_after = 1;
747                 if (i915_gem_obj_bound_any(obj)) {
748                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
749                         if (ret)
750                                 return ret;
751                 }
752         }
753         /* Same trick applies to invalidate partially written cachelines read
754          * before writing. */
755         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
756                 needs_clflush_before =
757                         !cpu_cache_is_coherent(dev, obj->cache_level);
758
759         ret = i915_gem_object_get_pages(obj);
760         if (ret)
761                 return ret;
762
763         i915_gem_object_pin_pages(obj);
764
765         offset = args->offset;
766         obj->dirty = 1;
767
768         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
769                          offset >> PAGE_SHIFT) {
770                 struct page *page = sg_page_iter_page(&sg_iter);
771                 int partial_cacheline_write;
772
773                 if (remain <= 0)
774                         break;
775
776                 /* Operation in this page
777                  *
778                  * shmem_page_offset = offset within page in shmem file
779                  * page_length = bytes to copy for this page
780                  */
781                 shmem_page_offset = offset_in_page(offset);
782
783                 page_length = remain;
784                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
785                         page_length = PAGE_SIZE - shmem_page_offset;
786
787                 /* If we don't overwrite a cacheline completely we need to be
788                  * careful to have up-to-date data by first clflushing. Don't
789                  * overcomplicate things and flush the entire patch. */
790                 partial_cacheline_write = needs_clflush_before &&
791                         ((shmem_page_offset | page_length)
792                                 & (boot_cpu_data.x86_clflush_size - 1));
793
794                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
795                         (page_to_phys(page) & (1 << 17)) != 0;
796
797                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
798                                         user_data, page_do_bit17_swizzling,
799                                         partial_cacheline_write,
800                                         needs_clflush_after);
801                 if (ret == 0)
802                         goto next_page;
803
804                 hit_slowpath = 1;
805                 mutex_unlock(&dev->struct_mutex);
806                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
807                                         user_data, page_do_bit17_swizzling,
808                                         partial_cacheline_write,
809                                         needs_clflush_after);
810
811                 mutex_lock(&dev->struct_mutex);
812
813 next_page:
814                 set_page_dirty(page);
815                 mark_page_accessed(page);
816
817                 if (ret)
818                         goto out;
819
820                 remain -= page_length;
821                 user_data += page_length;
822                 offset += page_length;
823         }
824
825 out:
826         i915_gem_object_unpin_pages(obj);
827
828         if (hit_slowpath) {
829                 /*
830                  * Fixup: Flush cpu caches in case we didn't flush the dirty
831                  * cachelines in-line while writing and the object moved
832                  * out of the cpu write domain while we've dropped the lock.
833                  */
834                 if (!needs_clflush_after &&
835                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
836                         i915_gem_clflush_object(obj);
837                         i915_gem_chipset_flush(dev);
838                 }
839         }
840
841         if (needs_clflush_after)
842                 i915_gem_chipset_flush(dev);
843
844         return ret;
845 }
846
847 /**
848  * Writes data to the object referenced by handle.
849  *
850  * On error, the contents of the buffer that were to be modified are undefined.
851  */
852 int
853 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
854                       struct drm_file *file)
855 {
856         struct drm_i915_gem_pwrite *args = data;
857         struct drm_i915_gem_object *obj;
858         int ret;
859
860         if (args->size == 0)
861                 return 0;
862
863         if (!access_ok(VERIFY_READ,
864                        to_user_ptr(args->data_ptr),
865                        args->size))
866                 return -EFAULT;
867
868         if (likely(!i915_prefault_disable)) {
869                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
870                                                    args->size);
871                 if (ret)
872                         return -EFAULT;
873         }
874
875         ret = i915_mutex_lock_interruptible(dev);
876         if (ret)
877                 return ret;
878
879         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
880         if (&obj->base == NULL) {
881                 ret = -ENOENT;
882                 goto unlock;
883         }
884
885         /* Bounds check destination. */
886         if (args->offset > obj->base.size ||
887             args->size > obj->base.size - args->offset) {
888                 ret = -EINVAL;
889                 goto out;
890         }
891
892         /* prime objects have no backing filp to GEM pread/pwrite
893          * pages from.
894          */
895         if (!obj->base.filp) {
896                 ret = -EINVAL;
897                 goto out;
898         }
899
900         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
901
902         ret = -EFAULT;
903         /* We can only do the GTT pwrite on untiled buffers, as otherwise
904          * it would end up going through the fenced access, and we'll get
905          * different detiling behavior between reading and writing.
906          * pread/pwrite currently are reading and writing from the CPU
907          * perspective, requiring manual detiling by the client.
908          */
909         if (obj->phys_obj) {
910                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
911                 goto out;
912         }
913
914         if (obj->cache_level == I915_CACHE_NONE &&
915             obj->tiling_mode == I915_TILING_NONE &&
916             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
917                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
918                 /* Note that the gtt paths might fail with non-page-backed user
919                  * pointers (e.g. gtt mappings when moving data between
920                  * textures). Fallback to the shmem path in that case. */
921         }
922
923         if (ret == -EFAULT || ret == -ENOSPC)
924                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
925
926 out:
927         drm_gem_object_unreference(&obj->base);
928 unlock:
929         mutex_unlock(&dev->struct_mutex);
930         return ret;
931 }
932
933 int
934 i915_gem_check_wedge(struct i915_gpu_error *error,
935                      bool interruptible)
936 {
937         if (i915_reset_in_progress(error)) {
938                 /* Non-interruptible callers can't handle -EAGAIN, hence return
939                  * -EIO unconditionally for these. */
940                 if (!interruptible)
941                         return -EIO;
942
943                 /* Recovery complete, but the reset failed ... */
944                 if (i915_terminally_wedged(error))
945                         return -EIO;
946
947                 return -EAGAIN;
948         }
949
950         return 0;
951 }
952
953 /*
954  * Compare seqno against outstanding lazy request. Emit a request if they are
955  * equal.
956  */
957 static int
958 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
959 {
960         int ret;
961
962         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
963
964         ret = 0;
965         if (seqno == ring->outstanding_lazy_request)
966                 ret = i915_add_request(ring, NULL);
967
968         return ret;
969 }
970
971 /**
972  * __wait_seqno - wait until execution of seqno has finished
973  * @ring: the ring expected to report seqno
974  * @seqno: duh!
975  * @reset_counter: reset sequence associated with the given seqno
976  * @interruptible: do an interruptible wait (normally yes)
977  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
978  *
979  * Note: It is of utmost importance that the passed in seqno and reset_counter
980  * values have been read by the caller in an smp safe manner. Where read-side
981  * locks are involved, it is sufficient to read the reset_counter before
982  * unlocking the lock that protects the seqno. For lockless tricks, the
983  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
984  * inserted.
985  *
986  * Returns 0 if the seqno was found within the alloted time. Else returns the
987  * errno with remaining time filled in timeout argument.
988  */
989 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
990                         unsigned reset_counter,
991                         bool interruptible, struct timespec *timeout)
992 {
993         drm_i915_private_t *dev_priv = ring->dev->dev_private;
994         struct timespec before, now, wait_time={1,0};
995         unsigned long timeout_jiffies;
996         long end;
997         bool wait_forever = true;
998         int ret;
999
1000         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1001                 return 0;
1002
1003         trace_i915_gem_request_wait_begin(ring, seqno);
1004
1005         if (timeout != NULL) {
1006                 wait_time = *timeout;
1007                 wait_forever = false;
1008         }
1009
1010         timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1011
1012         if (WARN_ON(!ring->irq_get(ring)))
1013                 return -ENODEV;
1014
1015         /* Record current time in case interrupted by signal, or wedged * */
1016         getrawmonotonic(&before);
1017
1018 #define EXIT_COND \
1019         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1020          i915_reset_in_progress(&dev_priv->gpu_error) || \
1021          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1022         do {
1023                 if (interruptible)
1024                         end = wait_event_interruptible_timeout(ring->irq_queue,
1025                                                                EXIT_COND,
1026                                                                timeout_jiffies);
1027                 else
1028                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1029                                                  timeout_jiffies);
1030
1031                 /* We need to check whether any gpu reset happened in between
1032                  * the caller grabbing the seqno and now ... */
1033                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1034                         end = -EAGAIN;
1035
1036                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1037                  * gone. */
1038                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1039                 if (ret)
1040                         end = ret;
1041         } while (end == 0 && wait_forever);
1042
1043         getrawmonotonic(&now);
1044
1045         ring->irq_put(ring);
1046         trace_i915_gem_request_wait_end(ring, seqno);
1047 #undef EXIT_COND
1048
1049         if (timeout) {
1050                 struct timespec sleep_time = timespec_sub(now, before);
1051                 *timeout = timespec_sub(*timeout, sleep_time);
1052                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1053                         set_normalized_timespec(timeout, 0, 0);
1054         }
1055
1056         switch (end) {
1057         case -EIO:
1058         case -EAGAIN: /* Wedged */
1059         case -ERESTARTSYS: /* Signal */
1060                 return (int)end;
1061         case 0: /* Timeout */
1062                 return -ETIME;
1063         default: /* Completed */
1064                 WARN_ON(end < 0); /* We're not aware of other errors */
1065                 return 0;
1066         }
1067 }
1068
1069 /**
1070  * Waits for a sequence number to be signaled, and cleans up the
1071  * request and object lists appropriately for that event.
1072  */
1073 int
1074 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1075 {
1076         struct drm_device *dev = ring->dev;
1077         struct drm_i915_private *dev_priv = dev->dev_private;
1078         bool interruptible = dev_priv->mm.interruptible;
1079         int ret;
1080
1081         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1082         BUG_ON(seqno == 0);
1083
1084         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1085         if (ret)
1086                 return ret;
1087
1088         ret = i915_gem_check_olr(ring, seqno);
1089         if (ret)
1090                 return ret;
1091
1092         return __wait_seqno(ring, seqno,
1093                             atomic_read(&dev_priv->gpu_error.reset_counter),
1094                             interruptible, NULL);
1095 }
1096
1097 static int
1098 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1099                                      struct intel_ring_buffer *ring)
1100 {
1101         i915_gem_retire_requests_ring(ring);
1102
1103         /* Manually manage the write flush as we may have not yet
1104          * retired the buffer.
1105          *
1106          * Note that the last_write_seqno is always the earlier of
1107          * the two (read/write) seqno, so if we haved successfully waited,
1108          * we know we have passed the last write.
1109          */
1110         obj->last_write_seqno = 0;
1111         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1112
1113         return 0;
1114 }
1115
1116 /**
1117  * Ensures that all rendering to the object has completed and the object is
1118  * safe to unbind from the GTT or access from the CPU.
1119  */
1120 static __must_check int
1121 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1122                                bool readonly)
1123 {
1124         struct intel_ring_buffer *ring = obj->ring;
1125         u32 seqno;
1126         int ret;
1127
1128         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1129         if (seqno == 0)
1130                 return 0;
1131
1132         ret = i915_wait_seqno(ring, seqno);
1133         if (ret)
1134                 return ret;
1135
1136         return i915_gem_object_wait_rendering__tail(obj, ring);
1137 }
1138
1139 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1140  * as the object state may change during this call.
1141  */
1142 static __must_check int
1143 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1144                                             bool readonly)
1145 {
1146         struct drm_device *dev = obj->base.dev;
1147         struct drm_i915_private *dev_priv = dev->dev_private;
1148         struct intel_ring_buffer *ring = obj->ring;
1149         unsigned reset_counter;
1150         u32 seqno;
1151         int ret;
1152
1153         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1154         BUG_ON(!dev_priv->mm.interruptible);
1155
1156         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1157         if (seqno == 0)
1158                 return 0;
1159
1160         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1161         if (ret)
1162                 return ret;
1163
1164         ret = i915_gem_check_olr(ring, seqno);
1165         if (ret)
1166                 return ret;
1167
1168         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1169         mutex_unlock(&dev->struct_mutex);
1170         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1171         mutex_lock(&dev->struct_mutex);
1172         if (ret)
1173                 return ret;
1174
1175         return i915_gem_object_wait_rendering__tail(obj, ring);
1176 }
1177
1178 /**
1179  * Called when user space prepares to use an object with the CPU, either
1180  * through the mmap ioctl's mapping or a GTT mapping.
1181  */
1182 int
1183 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1184                           struct drm_file *file)
1185 {
1186         struct drm_i915_gem_set_domain *args = data;
1187         struct drm_i915_gem_object *obj;
1188         uint32_t read_domains = args->read_domains;
1189         uint32_t write_domain = args->write_domain;
1190         int ret;
1191
1192         /* Only handle setting domains to types used by the CPU. */
1193         if (write_domain & I915_GEM_GPU_DOMAINS)
1194                 return -EINVAL;
1195
1196         if (read_domains & I915_GEM_GPU_DOMAINS)
1197                 return -EINVAL;
1198
1199         /* Having something in the write domain implies it's in the read
1200          * domain, and only that read domain.  Enforce that in the request.
1201          */
1202         if (write_domain != 0 && read_domains != write_domain)
1203                 return -EINVAL;
1204
1205         ret = i915_mutex_lock_interruptible(dev);
1206         if (ret)
1207                 return ret;
1208
1209         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1210         if (&obj->base == NULL) {
1211                 ret = -ENOENT;
1212                 goto unlock;
1213         }
1214
1215         /* Try to flush the object off the GPU without holding the lock.
1216          * We will repeat the flush holding the lock in the normal manner
1217          * to catch cases where we are gazumped.
1218          */
1219         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1220         if (ret)
1221                 goto unref;
1222
1223         if (read_domains & I915_GEM_DOMAIN_GTT) {
1224                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1225
1226                 /* Silently promote "you're not bound, there was nothing to do"
1227                  * to success, since the client was just asking us to
1228                  * make sure everything was done.
1229                  */
1230                 if (ret == -EINVAL)
1231                         ret = 0;
1232         } else {
1233                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1234         }
1235
1236 unref:
1237         drm_gem_object_unreference(&obj->base);
1238 unlock:
1239         mutex_unlock(&dev->struct_mutex);
1240         return ret;
1241 }
1242
1243 /**
1244  * Called when user space has done writes to this buffer
1245  */
1246 int
1247 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1248                          struct drm_file *file)
1249 {
1250         struct drm_i915_gem_sw_finish *args = data;
1251         struct drm_i915_gem_object *obj;
1252         int ret = 0;
1253
1254         ret = i915_mutex_lock_interruptible(dev);
1255         if (ret)
1256                 return ret;
1257
1258         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1259         if (&obj->base == NULL) {
1260                 ret = -ENOENT;
1261                 goto unlock;
1262         }
1263
1264         /* Pinned buffers may be scanout, so flush the cache */
1265         if (obj->pin_count)
1266                 i915_gem_object_flush_cpu_write_domain(obj);
1267
1268         drm_gem_object_unreference(&obj->base);
1269 unlock:
1270         mutex_unlock(&dev->struct_mutex);
1271         return ret;
1272 }
1273
1274 /**
1275  * Maps the contents of an object, returning the address it is mapped
1276  * into.
1277  *
1278  * While the mapping holds a reference on the contents of the object, it doesn't
1279  * imply a ref on the object itself.
1280  */
1281 int
1282 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1283                     struct drm_file *file)
1284 {
1285         struct drm_i915_gem_mmap *args = data;
1286         struct drm_gem_object *obj;
1287         unsigned long addr;
1288
1289         obj = drm_gem_object_lookup(dev, file, args->handle);
1290         if (obj == NULL)
1291                 return -ENOENT;
1292
1293         /* prime objects have no backing filp to GEM mmap
1294          * pages from.
1295          */
1296         if (!obj->filp) {
1297                 drm_gem_object_unreference_unlocked(obj);
1298                 return -EINVAL;
1299         }
1300
1301         addr = vm_mmap(obj->filp, 0, args->size,
1302                        PROT_READ | PROT_WRITE, MAP_SHARED,
1303                        args->offset);
1304         drm_gem_object_unreference_unlocked(obj);
1305         if (IS_ERR((void *)addr))
1306                 return addr;
1307
1308         args->addr_ptr = (uint64_t) addr;
1309
1310         return 0;
1311 }
1312
1313 /**
1314  * i915_gem_fault - fault a page into the GTT
1315  * vma: VMA in question
1316  * vmf: fault info
1317  *
1318  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1319  * from userspace.  The fault handler takes care of binding the object to
1320  * the GTT (if needed), allocating and programming a fence register (again,
1321  * only if needed based on whether the old reg is still valid or the object
1322  * is tiled) and inserting a new PTE into the faulting process.
1323  *
1324  * Note that the faulting process may involve evicting existing objects
1325  * from the GTT and/or fence registers to make room.  So performance may
1326  * suffer if the GTT working set is large or there are few fence registers
1327  * left.
1328  */
1329 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1330 {
1331         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1332         struct drm_device *dev = obj->base.dev;
1333         drm_i915_private_t *dev_priv = dev->dev_private;
1334         pgoff_t page_offset;
1335         unsigned long pfn;
1336         int ret = 0;
1337         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1338
1339         /* We don't use vmf->pgoff since that has the fake offset */
1340         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1341                 PAGE_SHIFT;
1342
1343         ret = i915_mutex_lock_interruptible(dev);
1344         if (ret)
1345                 goto out;
1346
1347         trace_i915_gem_object_fault(obj, page_offset, true, write);
1348
1349         /* Access to snoopable pages through the GTT is incoherent. */
1350         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1351                 ret = -EINVAL;
1352                 goto unlock;
1353         }
1354
1355         /* Now bind it into the GTT if needed */
1356         ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
1357         if (ret)
1358                 goto unlock;
1359
1360         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1361         if (ret)
1362                 goto unpin;
1363
1364         ret = i915_gem_object_get_fence(obj);
1365         if (ret)
1366                 goto unpin;
1367
1368         obj->fault_mappable = true;
1369
1370         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1371         pfn >>= PAGE_SHIFT;
1372         pfn += page_offset;
1373
1374         /* Finally, remap it using the new GTT offset */
1375         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1376 unpin:
1377         i915_gem_object_unpin(obj);
1378 unlock:
1379         mutex_unlock(&dev->struct_mutex);
1380 out:
1381         switch (ret) {
1382         case -EIO:
1383                 /* If this -EIO is due to a gpu hang, give the reset code a
1384                  * chance to clean up the mess. Otherwise return the proper
1385                  * SIGBUS. */
1386                 if (i915_terminally_wedged(&dev_priv->gpu_error))
1387                         return VM_FAULT_SIGBUS;
1388         case -EAGAIN:
1389                 /* Give the error handler a chance to run and move the
1390                  * objects off the GPU active list. Next time we service the
1391                  * fault, we should be able to transition the page into the
1392                  * GTT without touching the GPU (and so avoid further
1393                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1394                  * with coherency, just lost writes.
1395                  */
1396                 set_need_resched();
1397         case 0:
1398         case -ERESTARTSYS:
1399         case -EINTR:
1400         case -EBUSY:
1401                 /*
1402                  * EBUSY is ok: this just means that another thread
1403                  * already did the job.
1404                  */
1405                 return VM_FAULT_NOPAGE;
1406         case -ENOMEM:
1407                 return VM_FAULT_OOM;
1408         case -ENOSPC:
1409                 return VM_FAULT_SIGBUS;
1410         default:
1411                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1412                 return VM_FAULT_SIGBUS;
1413         }
1414 }
1415
1416 /**
1417  * i915_gem_release_mmap - remove physical page mappings
1418  * @obj: obj in question
1419  *
1420  * Preserve the reservation of the mmapping with the DRM core code, but
1421  * relinquish ownership of the pages back to the system.
1422  *
1423  * It is vital that we remove the page mapping if we have mapped a tiled
1424  * object through the GTT and then lose the fence register due to
1425  * resource pressure. Similarly if the object has been moved out of the
1426  * aperture, than pages mapped into userspace must be revoked. Removing the
1427  * mapping will then trigger a page fault on the next user access, allowing
1428  * fixup by i915_gem_fault().
1429  */
1430 void
1431 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1432 {
1433         if (!obj->fault_mappable)
1434                 return;
1435
1436         if (obj->base.dev->dev_mapping)
1437                 unmap_mapping_range(obj->base.dev->dev_mapping,
1438                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1439                                     obj->base.size, 1);
1440
1441         obj->fault_mappable = false;
1442 }
1443
1444 uint32_t
1445 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1446 {
1447         uint32_t gtt_size;
1448
1449         if (INTEL_INFO(dev)->gen >= 4 ||
1450             tiling_mode == I915_TILING_NONE)
1451                 return size;
1452
1453         /* Previous chips need a power-of-two fence region when tiling */
1454         if (INTEL_INFO(dev)->gen == 3)
1455                 gtt_size = 1024*1024;
1456         else
1457                 gtt_size = 512*1024;
1458
1459         while (gtt_size < size)
1460                 gtt_size <<= 1;
1461
1462         return gtt_size;
1463 }
1464
1465 /**
1466  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1467  * @obj: object to check
1468  *
1469  * Return the required GTT alignment for an object, taking into account
1470  * potential fence register mapping.
1471  */
1472 uint32_t
1473 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1474                            int tiling_mode, bool fenced)
1475 {
1476         /*
1477          * Minimum alignment is 4k (GTT page size), but might be greater
1478          * if a fence register is needed for the object.
1479          */
1480         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1481             tiling_mode == I915_TILING_NONE)
1482                 return 4096;
1483
1484         /*
1485          * Previous chips need to be aligned to the size of the smallest
1486          * fence register that can contain the object.
1487          */
1488         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1489 }
1490
1491 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1492 {
1493         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1494         int ret;
1495
1496         if (obj->base.map_list.map)
1497                 return 0;
1498
1499         dev_priv->mm.shrinker_no_lock_stealing = true;
1500
1501         ret = drm_gem_create_mmap_offset(&obj->base);
1502         if (ret != -ENOSPC)
1503                 goto out;
1504
1505         /* Badly fragmented mmap space? The only way we can recover
1506          * space is by destroying unwanted objects. We can't randomly release
1507          * mmap_offsets as userspace expects them to be persistent for the
1508          * lifetime of the objects. The closest we can is to release the
1509          * offsets on purgeable objects by truncating it and marking it purged,
1510          * which prevents userspace from ever using that object again.
1511          */
1512         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1513         ret = drm_gem_create_mmap_offset(&obj->base);
1514         if (ret != -ENOSPC)
1515                 goto out;
1516
1517         i915_gem_shrink_all(dev_priv);
1518         ret = drm_gem_create_mmap_offset(&obj->base);
1519 out:
1520         dev_priv->mm.shrinker_no_lock_stealing = false;
1521
1522         return ret;
1523 }
1524
1525 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1526 {
1527         if (!obj->base.map_list.map)
1528                 return;
1529
1530         drm_gem_free_mmap_offset(&obj->base);
1531 }
1532
1533 int
1534 i915_gem_mmap_gtt(struct drm_file *file,
1535                   struct drm_device *dev,
1536                   uint32_t handle,
1537                   uint64_t *offset)
1538 {
1539         struct drm_i915_private *dev_priv = dev->dev_private;
1540         struct drm_i915_gem_object *obj;
1541         int ret;
1542
1543         ret = i915_mutex_lock_interruptible(dev);
1544         if (ret)
1545                 return ret;
1546
1547         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1548         if (&obj->base == NULL) {
1549                 ret = -ENOENT;
1550                 goto unlock;
1551         }
1552
1553         if (obj->base.size > dev_priv->gtt.mappable_end) {
1554                 ret = -E2BIG;
1555                 goto out;
1556         }
1557
1558         if (obj->madv != I915_MADV_WILLNEED) {
1559                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1560                 ret = -EINVAL;
1561                 goto out;
1562         }
1563
1564         ret = i915_gem_object_create_mmap_offset(obj);
1565         if (ret)
1566                 goto out;
1567
1568         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1569
1570 out:
1571         drm_gem_object_unreference(&obj->base);
1572 unlock:
1573         mutex_unlock(&dev->struct_mutex);
1574         return ret;
1575 }
1576
1577 /**
1578  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1579  * @dev: DRM device
1580  * @data: GTT mapping ioctl data
1581  * @file: GEM object info
1582  *
1583  * Simply returns the fake offset to userspace so it can mmap it.
1584  * The mmap call will end up in drm_gem_mmap(), which will set things
1585  * up so we can get faults in the handler above.
1586  *
1587  * The fault handler will take care of binding the object into the GTT
1588  * (since it may have been evicted to make room for something), allocating
1589  * a fence register, and mapping the appropriate aperture address into
1590  * userspace.
1591  */
1592 int
1593 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1594                         struct drm_file *file)
1595 {
1596         struct drm_i915_gem_mmap_gtt *args = data;
1597
1598         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1599 }
1600
1601 /* Immediately discard the backing storage */
1602 static void
1603 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1604 {
1605         struct inode *inode;
1606
1607         i915_gem_object_free_mmap_offset(obj);
1608
1609         if (obj->base.filp == NULL)
1610                 return;
1611
1612         /* Our goal here is to return as much of the memory as
1613          * is possible back to the system as we are called from OOM.
1614          * To do this we must instruct the shmfs to drop all of its
1615          * backing pages, *now*.
1616          */
1617         inode = file_inode(obj->base.filp);
1618         shmem_truncate_range(inode, 0, (loff_t)-1);
1619
1620         obj->madv = __I915_MADV_PURGED;
1621 }
1622
1623 static inline int
1624 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1625 {
1626         return obj->madv == I915_MADV_DONTNEED;
1627 }
1628
1629 static void
1630 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1631 {
1632         struct sg_page_iter sg_iter;
1633         int ret;
1634
1635         BUG_ON(obj->madv == __I915_MADV_PURGED);
1636
1637         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1638         if (ret) {
1639                 /* In the event of a disaster, abandon all caches and
1640                  * hope for the best.
1641                  */
1642                 WARN_ON(ret != -EIO);
1643                 i915_gem_clflush_object(obj);
1644                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1645         }
1646
1647         if (i915_gem_object_needs_bit17_swizzle(obj))
1648                 i915_gem_object_save_bit_17_swizzle(obj);
1649
1650         if (obj->madv == I915_MADV_DONTNEED)
1651                 obj->dirty = 0;
1652
1653         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1654                 struct page *page = sg_page_iter_page(&sg_iter);
1655
1656                 if (obj->dirty)
1657                         set_page_dirty(page);
1658
1659                 if (obj->madv == I915_MADV_WILLNEED)
1660                         mark_page_accessed(page);
1661
1662                 page_cache_release(page);
1663         }
1664         obj->dirty = 0;
1665
1666         sg_free_table(obj->pages);
1667         kfree(obj->pages);
1668 }
1669
1670 int
1671 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1672 {
1673         const struct drm_i915_gem_object_ops *ops = obj->ops;
1674
1675         if (obj->pages == NULL)
1676                 return 0;
1677
1678         if (obj->pages_pin_count)
1679                 return -EBUSY;
1680
1681         BUG_ON(i915_gem_obj_bound_any(obj));
1682
1683         /* ->put_pages might need to allocate memory for the bit17 swizzle
1684          * array, hence protect them from being reaped by removing them from gtt
1685          * lists early. */
1686         list_del(&obj->global_list);
1687
1688         ops->put_pages(obj);
1689         obj->pages = NULL;
1690
1691         if (i915_gem_object_is_purgeable(obj))
1692                 i915_gem_object_truncate(obj);
1693
1694         return 0;
1695 }
1696
1697 static long
1698 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1699                   bool purgeable_only)
1700 {
1701         struct drm_i915_gem_object *obj, *next;
1702         long count = 0;
1703
1704         list_for_each_entry_safe(obj, next,
1705                                  &dev_priv->mm.unbound_list,
1706                                  global_list) {
1707                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1708                     i915_gem_object_put_pages(obj) == 0) {
1709                         count += obj->base.size >> PAGE_SHIFT;
1710                         if (count >= target)
1711                                 return count;
1712                 }
1713         }
1714
1715         list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1716                                  global_list) {
1717                 struct i915_vma *vma, *v;
1718
1719                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1720                         continue;
1721
1722                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1723                         if (i915_vma_unbind(vma))
1724                                 break;
1725
1726                 if (!i915_gem_object_put_pages(obj)) {
1727                         count += obj->base.size >> PAGE_SHIFT;
1728                         if (count >= target)
1729                                 return count;
1730                 }
1731         }
1732
1733         return count;
1734 }
1735
1736 static long
1737 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1738 {
1739         return __i915_gem_shrink(dev_priv, target, true);
1740 }
1741
1742 static void
1743 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1744 {
1745         struct drm_i915_gem_object *obj, *next;
1746
1747         i915_gem_evict_everything(dev_priv->dev);
1748
1749         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1750                                  global_list)
1751                 i915_gem_object_put_pages(obj);
1752 }
1753
1754 static int
1755 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1756 {
1757         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1758         int page_count, i;
1759         struct address_space *mapping;
1760         struct sg_table *st;
1761         struct scatterlist *sg;
1762         struct sg_page_iter sg_iter;
1763         struct page *page;
1764         unsigned long last_pfn = 0;     /* suppress gcc warning */
1765         gfp_t gfp;
1766
1767         /* Assert that the object is not currently in any GPU domain. As it
1768          * wasn't in the GTT, there shouldn't be any way it could have been in
1769          * a GPU cache
1770          */
1771         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1772         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1773
1774         st = kmalloc(sizeof(*st), GFP_KERNEL);
1775         if (st == NULL)
1776                 return -ENOMEM;
1777
1778         page_count = obj->base.size / PAGE_SIZE;
1779         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1780                 sg_free_table(st);
1781                 kfree(st);
1782                 return -ENOMEM;
1783         }
1784
1785         /* Get the list of pages out of our struct file.  They'll be pinned
1786          * at this point until we release them.
1787          *
1788          * Fail silently without starting the shrinker
1789          */
1790         mapping = file_inode(obj->base.filp)->i_mapping;
1791         gfp = mapping_gfp_mask(mapping);
1792         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1793         gfp &= ~(__GFP_IO | __GFP_WAIT);
1794         sg = st->sgl;
1795         st->nents = 0;
1796         for (i = 0; i < page_count; i++) {
1797                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1798                 if (IS_ERR(page)) {
1799                         i915_gem_purge(dev_priv, page_count);
1800                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1801                 }
1802                 if (IS_ERR(page)) {
1803                         /* We've tried hard to allocate the memory by reaping
1804                          * our own buffer, now let the real VM do its job and
1805                          * go down in flames if truly OOM.
1806                          */
1807                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1808                         gfp |= __GFP_IO | __GFP_WAIT;
1809
1810                         i915_gem_shrink_all(dev_priv);
1811                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1812                         if (IS_ERR(page))
1813                                 goto err_pages;
1814
1815                         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1816                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1817                 }
1818 #ifdef CONFIG_SWIOTLB
1819                 if (swiotlb_nr_tbl()) {
1820                         st->nents++;
1821                         sg_set_page(sg, page, PAGE_SIZE, 0);
1822                         sg = sg_next(sg);
1823                         continue;
1824                 }
1825 #endif
1826                 if (!i || page_to_pfn(page) != last_pfn + 1) {
1827                         if (i)
1828                                 sg = sg_next(sg);
1829                         st->nents++;
1830                         sg_set_page(sg, page, PAGE_SIZE, 0);
1831                 } else {
1832                         sg->length += PAGE_SIZE;
1833                 }
1834                 last_pfn = page_to_pfn(page);
1835         }
1836 #ifdef CONFIG_SWIOTLB
1837         if (!swiotlb_nr_tbl())
1838 #endif
1839                 sg_mark_end(sg);
1840         obj->pages = st;
1841
1842         if (i915_gem_object_needs_bit17_swizzle(obj))
1843                 i915_gem_object_do_bit_17_swizzle(obj);
1844
1845         return 0;
1846
1847 err_pages:
1848         sg_mark_end(sg);
1849         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1850                 page_cache_release(sg_page_iter_page(&sg_iter));
1851         sg_free_table(st);
1852         kfree(st);
1853         return PTR_ERR(page);
1854 }
1855
1856 /* Ensure that the associated pages are gathered from the backing storage
1857  * and pinned into our object. i915_gem_object_get_pages() may be called
1858  * multiple times before they are released by a single call to
1859  * i915_gem_object_put_pages() - once the pages are no longer referenced
1860  * either as a result of memory pressure (reaping pages under the shrinker)
1861  * or as the object is itself released.
1862  */
1863 int
1864 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1865 {
1866         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1867         const struct drm_i915_gem_object_ops *ops = obj->ops;
1868         int ret;
1869
1870         if (obj->pages)
1871                 return 0;
1872
1873         if (obj->madv != I915_MADV_WILLNEED) {
1874                 DRM_ERROR("Attempting to obtain a purgeable object\n");
1875                 return -EINVAL;
1876         }
1877
1878         BUG_ON(obj->pages_pin_count);
1879
1880         ret = ops->get_pages(obj);
1881         if (ret)
1882                 return ret;
1883
1884         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1885         return 0;
1886 }
1887
1888 void
1889 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1890                                struct intel_ring_buffer *ring)
1891 {
1892         struct drm_device *dev = obj->base.dev;
1893         struct drm_i915_private *dev_priv = dev->dev_private;
1894         u32 seqno = intel_ring_get_seqno(ring);
1895
1896         BUG_ON(ring == NULL);
1897         if (obj->ring != ring && obj->last_write_seqno) {
1898                 /* Keep the seqno relative to the current ring */
1899                 obj->last_write_seqno = seqno;
1900         }
1901         obj->ring = ring;
1902
1903         /* Add a reference if we're newly entering the active list. */
1904         if (!obj->active) {
1905                 drm_gem_object_reference(&obj->base);
1906                 obj->active = 1;
1907         }
1908
1909         list_move_tail(&obj->ring_list, &ring->active_list);
1910
1911         obj->last_read_seqno = seqno;
1912
1913         if (obj->fenced_gpu_access) {
1914                 obj->last_fenced_seqno = seqno;
1915
1916                 /* Bump MRU to take account of the delayed flush */
1917                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1918                         struct drm_i915_fence_reg *reg;
1919
1920                         reg = &dev_priv->fence_regs[obj->fence_reg];
1921                         list_move_tail(&reg->lru_list,
1922                                        &dev_priv->mm.fence_list);
1923                 }
1924         }
1925 }
1926
1927 static void
1928 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1929 {
1930         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1931         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1932         struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1933
1934         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1935         BUG_ON(!obj->active);
1936
1937         list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
1938
1939         list_del_init(&obj->ring_list);
1940         obj->ring = NULL;
1941
1942         obj->last_read_seqno = 0;
1943         obj->last_write_seqno = 0;
1944         obj->base.write_domain = 0;
1945
1946         obj->last_fenced_seqno = 0;
1947         obj->fenced_gpu_access = false;
1948
1949         obj->active = 0;
1950         drm_gem_object_unreference(&obj->base);
1951
1952         WARN_ON(i915_verify_lists(dev));
1953 }
1954
1955 static int
1956 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1957 {
1958         struct drm_i915_private *dev_priv = dev->dev_private;
1959         struct intel_ring_buffer *ring;
1960         int ret, i, j;
1961
1962         /* Carefully retire all requests without writing to the rings */
1963         for_each_ring(ring, dev_priv, i) {
1964                 ret = intel_ring_idle(ring);
1965                 if (ret)
1966                         return ret;
1967         }
1968         i915_gem_retire_requests(dev);
1969
1970         /* Finally reset hw state */
1971         for_each_ring(ring, dev_priv, i) {
1972                 intel_ring_init_seqno(ring, seqno);
1973
1974                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1975                         ring->sync_seqno[j] = 0;
1976         }
1977
1978         return 0;
1979 }
1980
1981 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1982 {
1983         struct drm_i915_private *dev_priv = dev->dev_private;
1984         int ret;
1985
1986         if (seqno == 0)
1987                 return -EINVAL;
1988
1989         /* HWS page needs to be set less than what we
1990          * will inject to ring
1991          */
1992         ret = i915_gem_init_seqno(dev, seqno - 1);
1993         if (ret)
1994                 return ret;
1995
1996         /* Carefully set the last_seqno value so that wrap
1997          * detection still works
1998          */
1999         dev_priv->next_seqno = seqno;
2000         dev_priv->last_seqno = seqno - 1;
2001         if (dev_priv->last_seqno == 0)
2002                 dev_priv->last_seqno--;
2003
2004         return 0;
2005 }
2006
2007 int
2008 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2009 {
2010         struct drm_i915_private *dev_priv = dev->dev_private;
2011
2012         /* reserve 0 for non-seqno */
2013         if (dev_priv->next_seqno == 0) {
2014                 int ret = i915_gem_init_seqno(dev, 0);
2015                 if (ret)
2016                         return ret;
2017
2018                 dev_priv->next_seqno = 1;
2019         }
2020
2021         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2022         return 0;
2023 }
2024
2025 int __i915_add_request(struct intel_ring_buffer *ring,
2026                        struct drm_file *file,
2027                        struct drm_i915_gem_object *obj,
2028                        u32 *out_seqno)
2029 {
2030         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2031         struct drm_i915_gem_request *request;
2032         u32 request_ring_position, request_start;
2033         int was_empty;
2034         int ret;
2035
2036         request_start = intel_ring_get_tail(ring);
2037         /*
2038          * Emit any outstanding flushes - execbuf can fail to emit the flush
2039          * after having emitted the batchbuffer command. Hence we need to fix
2040          * things up similar to emitting the lazy request. The difference here
2041          * is that the flush _must_ happen before the next request, no matter
2042          * what.
2043          */
2044         ret = intel_ring_flush_all_caches(ring);
2045         if (ret)
2046                 return ret;
2047
2048         request = kmalloc(sizeof(*request), GFP_KERNEL);
2049         if (request == NULL)
2050                 return -ENOMEM;
2051
2052
2053         /* Record the position of the start of the request so that
2054          * should we detect the updated seqno part-way through the
2055          * GPU processing the request, we never over-estimate the
2056          * position of the head.
2057          */
2058         request_ring_position = intel_ring_get_tail(ring);
2059
2060         ret = ring->add_request(ring);
2061         if (ret) {
2062                 kfree(request);
2063                 return ret;
2064         }
2065
2066         request->seqno = intel_ring_get_seqno(ring);
2067         request->ring = ring;
2068         request->head = request_start;
2069         request->tail = request_ring_position;
2070         request->ctx = ring->last_context;
2071         request->batch_obj = obj;
2072
2073         /* Whilst this request exists, batch_obj will be on the
2074          * active_list, and so will hold the active reference. Only when this
2075          * request is retired will the the batch_obj be moved onto the
2076          * inactive_list and lose its active reference. Hence we do not need
2077          * to explicitly hold another reference here.
2078          */
2079
2080         if (request->ctx)
2081                 i915_gem_context_reference(request->ctx);
2082
2083         request->emitted_jiffies = jiffies;
2084         was_empty = list_empty(&ring->request_list);
2085         list_add_tail(&request->list, &ring->request_list);
2086         request->file_priv = NULL;
2087
2088         if (file) {
2089                 struct drm_i915_file_private *file_priv = file->driver_priv;
2090
2091                 spin_lock(&file_priv->mm.lock);
2092                 request->file_priv = file_priv;
2093                 list_add_tail(&request->client_list,
2094                               &file_priv->mm.request_list);
2095                 spin_unlock(&file_priv->mm.lock);
2096         }
2097
2098         trace_i915_gem_request_add(ring, request->seqno);
2099         ring->outstanding_lazy_request = 0;
2100
2101         if (!dev_priv->ums.mm_suspended) {
2102                 i915_queue_hangcheck(ring->dev);
2103
2104                 if (was_empty) {
2105                         queue_delayed_work(dev_priv->wq,
2106                                            &dev_priv->mm.retire_work,
2107                                            round_jiffies_up_relative(HZ));
2108                         intel_mark_busy(dev_priv->dev);
2109                 }
2110         }
2111
2112         if (out_seqno)
2113                 *out_seqno = request->seqno;
2114         return 0;
2115 }
2116
2117 static inline void
2118 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2119 {
2120         struct drm_i915_file_private *file_priv = request->file_priv;
2121
2122         if (!file_priv)
2123                 return;
2124
2125         spin_lock(&file_priv->mm.lock);
2126         if (request->file_priv) {
2127                 list_del(&request->client_list);
2128                 request->file_priv = NULL;
2129         }
2130         spin_unlock(&file_priv->mm.lock);
2131 }
2132
2133 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2134                                     struct i915_address_space *vm)
2135 {
2136         if (acthd >= i915_gem_obj_offset(obj, vm) &&
2137             acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2138                 return true;
2139
2140         return false;
2141 }
2142
2143 static bool i915_head_inside_request(const u32 acthd_unmasked,
2144                                      const u32 request_start,
2145                                      const u32 request_end)
2146 {
2147         const u32 acthd = acthd_unmasked & HEAD_ADDR;
2148
2149         if (request_start < request_end) {
2150                 if (acthd >= request_start && acthd < request_end)
2151                         return true;
2152         } else if (request_start > request_end) {
2153                 if (acthd >= request_start || acthd < request_end)
2154                         return true;
2155         }
2156
2157         return false;
2158 }
2159
2160 static struct i915_address_space *
2161 request_to_vm(struct drm_i915_gem_request *request)
2162 {
2163         struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2164         struct i915_address_space *vm;
2165
2166         vm = &dev_priv->gtt.base;
2167
2168         return vm;
2169 }
2170
2171 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2172                                 const u32 acthd, bool *inside)
2173 {
2174         /* There is a possibility that unmasked head address
2175          * pointing inside the ring, matches the batch_obj address range.
2176          * However this is extremely unlikely.
2177          */
2178         if (request->batch_obj) {
2179                 if (i915_head_inside_object(acthd, request->batch_obj,
2180                                             request_to_vm(request))) {
2181                         *inside = true;
2182                         return true;
2183                 }
2184         }
2185
2186         if (i915_head_inside_request(acthd, request->head, request->tail)) {
2187                 *inside = false;
2188                 return true;
2189         }
2190
2191         return false;
2192 }
2193
2194 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2195                                   struct drm_i915_gem_request *request,
2196                                   u32 acthd)
2197 {
2198         struct i915_ctx_hang_stats *hs = NULL;
2199         bool inside, guilty;
2200         unsigned long offset = 0;
2201
2202         /* Innocent until proven guilty */
2203         guilty = false;
2204
2205         if (request->batch_obj)
2206                 offset = i915_gem_obj_offset(request->batch_obj,
2207                                              request_to_vm(request));
2208
2209         if (ring->hangcheck.action != wait &&
2210             i915_request_guilty(request, acthd, &inside)) {
2211                 DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2212                           ring->name,
2213                           inside ? "inside" : "flushing",
2214                           offset,
2215                           request->ctx ? request->ctx->id : 0,
2216                           acthd);
2217
2218                 guilty = true;
2219         }
2220
2221         /* If contexts are disabled or this is the default context, use
2222          * file_priv->reset_state
2223          */
2224         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2225                 hs = &request->ctx->hang_stats;
2226         else if (request->file_priv)
2227                 hs = &request->file_priv->hang_stats;
2228
2229         if (hs) {
2230                 if (guilty)
2231                         hs->batch_active++;
2232                 else
2233                         hs->batch_pending++;
2234         }
2235 }
2236
2237 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2238 {
2239         list_del(&request->list);
2240         i915_gem_request_remove_from_client(request);
2241
2242         if (request->ctx)
2243                 i915_gem_context_unreference(request->ctx);
2244
2245         kfree(request);
2246 }
2247
2248 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2249                                       struct intel_ring_buffer *ring)
2250 {
2251         u32 completed_seqno;
2252         u32 acthd;
2253
2254         acthd = intel_ring_get_active_head(ring);
2255         completed_seqno = ring->get_seqno(ring, false);
2256
2257         while (!list_empty(&ring->request_list)) {
2258                 struct drm_i915_gem_request *request;
2259
2260                 request = list_first_entry(&ring->request_list,
2261                                            struct drm_i915_gem_request,
2262                                            list);
2263
2264                 if (request->seqno > completed_seqno)
2265                         i915_set_reset_status(ring, request, acthd);
2266
2267                 i915_gem_free_request(request);
2268         }
2269
2270         while (!list_empty(&ring->active_list)) {
2271                 struct drm_i915_gem_object *obj;
2272
2273                 obj = list_first_entry(&ring->active_list,
2274                                        struct drm_i915_gem_object,
2275                                        ring_list);
2276
2277                 i915_gem_object_move_to_inactive(obj);
2278         }
2279 }
2280
2281 void i915_gem_restore_fences(struct drm_device *dev)
2282 {
2283         struct drm_i915_private *dev_priv = dev->dev_private;
2284         int i;
2285
2286         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2287                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2288
2289                 /*
2290                  * Commit delayed tiling changes if we have an object still
2291                  * attached to the fence, otherwise just clear the fence.
2292                  */
2293                 if (reg->obj) {
2294                         i915_gem_object_update_fence(reg->obj, reg,
2295                                                      reg->obj->tiling_mode);
2296                 } else {
2297                         i915_gem_write_fence(dev, i, NULL);
2298                 }
2299         }
2300 }
2301
2302 void i915_gem_reset(struct drm_device *dev)
2303 {
2304         struct drm_i915_private *dev_priv = dev->dev_private;
2305         struct intel_ring_buffer *ring;
2306         int i;
2307
2308         for_each_ring(ring, dev_priv, i)
2309                 i915_gem_reset_ring_lists(dev_priv, ring);
2310
2311         i915_gem_restore_fences(dev);
2312 }
2313
2314 /**
2315  * This function clears the request list as sequence numbers are passed.
2316  */
2317 void
2318 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2319 {
2320         uint32_t seqno;
2321
2322         if (list_empty(&ring->request_list))
2323                 return;
2324
2325         WARN_ON(i915_verify_lists(ring->dev));
2326
2327         seqno = ring->get_seqno(ring, true);
2328
2329         while (!list_empty(&ring->request_list)) {
2330                 struct drm_i915_gem_request *request;
2331
2332                 request = list_first_entry(&ring->request_list,
2333                                            struct drm_i915_gem_request,
2334                                            list);
2335
2336                 if (!i915_seqno_passed(seqno, request->seqno))
2337                         break;
2338
2339                 trace_i915_gem_request_retire(ring, request->seqno);
2340                 /* We know the GPU must have read the request to have
2341                  * sent us the seqno + interrupt, so use the position
2342                  * of tail of the request to update the last known position
2343                  * of the GPU head.
2344                  */
2345                 ring->last_retired_head = request->tail;
2346
2347                 i915_gem_free_request(request);
2348         }
2349
2350         /* Move any buffers on the active list that are no longer referenced
2351          * by the ringbuffer to the flushing/inactive lists as appropriate.
2352          */
2353         while (!list_empty(&ring->active_list)) {
2354                 struct drm_i915_gem_object *obj;
2355
2356                 obj = list_first_entry(&ring->active_list,
2357                                       struct drm_i915_gem_object,
2358                                       ring_list);
2359
2360                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2361                         break;
2362
2363                 i915_gem_object_move_to_inactive(obj);
2364         }
2365
2366         if (unlikely(ring->trace_irq_seqno &&
2367                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2368                 ring->irq_put(ring);
2369                 ring->trace_irq_seqno = 0;
2370         }
2371
2372         WARN_ON(i915_verify_lists(ring->dev));
2373 }
2374
2375 void
2376 i915_gem_retire_requests(struct drm_device *dev)
2377 {
2378         drm_i915_private_t *dev_priv = dev->dev_private;
2379         struct intel_ring_buffer *ring;
2380         int i;
2381
2382         for_each_ring(ring, dev_priv, i)
2383                 i915_gem_retire_requests_ring(ring);
2384 }
2385
2386 static void
2387 i915_gem_retire_work_handler(struct work_struct *work)
2388 {
2389         drm_i915_private_t *dev_priv;
2390         struct drm_device *dev;
2391         struct intel_ring_buffer *ring;
2392         bool idle;
2393         int i;
2394
2395         dev_priv = container_of(work, drm_i915_private_t,
2396                                 mm.retire_work.work);
2397         dev = dev_priv->dev;
2398
2399         /* Come back later if the device is busy... */
2400         if (!mutex_trylock(&dev->struct_mutex)) {
2401                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2402                                    round_jiffies_up_relative(HZ));
2403                 return;
2404         }
2405
2406         i915_gem_retire_requests(dev);
2407
2408         /* Send a periodic flush down the ring so we don't hold onto GEM
2409          * objects indefinitely.
2410          */
2411         idle = true;
2412         for_each_ring(ring, dev_priv, i) {
2413                 if (ring->gpu_caches_dirty)
2414                         i915_add_request(ring, NULL);
2415
2416                 idle &= list_empty(&ring->request_list);
2417         }
2418
2419         if (!dev_priv->ums.mm_suspended && !idle)
2420                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2421                                    round_jiffies_up_relative(HZ));
2422         if (idle)
2423                 intel_mark_idle(dev);
2424
2425         mutex_unlock(&dev->struct_mutex);
2426 }
2427
2428 /**
2429  * Ensures that an object will eventually get non-busy by flushing any required
2430  * write domains, emitting any outstanding lazy request and retiring and
2431  * completed requests.
2432  */
2433 static int
2434 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2435 {
2436         int ret;
2437
2438         if (obj->active) {
2439                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2440                 if (ret)
2441                         return ret;
2442
2443                 i915_gem_retire_requests_ring(obj->ring);
2444         }
2445
2446         return 0;
2447 }
2448
2449 /**
2450  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2451  * @DRM_IOCTL_ARGS: standard ioctl arguments
2452  *
2453  * Returns 0 if successful, else an error is returned with the remaining time in
2454  * the timeout parameter.
2455  *  -ETIME: object is still busy after timeout
2456  *  -ERESTARTSYS: signal interrupted the wait
2457  *  -ENONENT: object doesn't exist
2458  * Also possible, but rare:
2459  *  -EAGAIN: GPU wedged
2460  *  -ENOMEM: damn
2461  *  -ENODEV: Internal IRQ fail
2462  *  -E?: The add request failed
2463  *
2464  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2465  * non-zero timeout parameter the wait ioctl will wait for the given number of
2466  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2467  * without holding struct_mutex the object may become re-busied before this
2468  * function completes. A similar but shorter * race condition exists in the busy
2469  * ioctl
2470  */
2471 int
2472 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2473 {
2474         drm_i915_private_t *dev_priv = dev->dev_private;
2475         struct drm_i915_gem_wait *args = data;
2476         struct drm_i915_gem_object *obj;
2477         struct intel_ring_buffer *ring = NULL;
2478         struct timespec timeout_stack, *timeout = NULL;
2479         unsigned reset_counter;
2480         u32 seqno = 0;
2481         int ret = 0;
2482
2483         if (args->timeout_ns >= 0) {
2484                 timeout_stack = ns_to_timespec(args->timeout_ns);
2485                 timeout = &timeout_stack;
2486         }
2487
2488         ret = i915_mutex_lock_interruptible(dev);
2489         if (ret)
2490                 return ret;
2491
2492         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2493         if (&obj->base == NULL) {
2494                 mutex_unlock(&dev->struct_mutex);
2495                 return -ENOENT;
2496         }
2497
2498         /* Need to make sure the object gets inactive eventually. */
2499         ret = i915_gem_object_flush_active(obj);
2500         if (ret)
2501                 goto out;
2502
2503         if (obj->active) {
2504                 seqno = obj->last_read_seqno;
2505                 ring = obj->ring;
2506         }
2507
2508         if (seqno == 0)
2509                  goto out;
2510
2511         /* Do this after OLR check to make sure we make forward progress polling
2512          * on this IOCTL with a 0 timeout (like busy ioctl)
2513          */
2514         if (!args->timeout_ns) {
2515                 ret = -ETIME;
2516                 goto out;
2517         }
2518
2519         drm_gem_object_unreference(&obj->base);
2520         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2521         mutex_unlock(&dev->struct_mutex);
2522
2523         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2524         if (timeout)
2525                 args->timeout_ns = timespec_to_ns(timeout);
2526         return ret;
2527
2528 out:
2529         drm_gem_object_unreference(&obj->base);
2530         mutex_unlock(&dev->struct_mutex);
2531         return ret;
2532 }
2533
2534 /**
2535  * i915_gem_object_sync - sync an object to a ring.
2536  *
2537  * @obj: object which may be in use on another ring.
2538  * @to: ring we wish to use the object on. May be NULL.
2539  *
2540  * This code is meant to abstract object synchronization with the GPU.
2541  * Calling with NULL implies synchronizing the object with the CPU
2542  * rather than a particular GPU ring.
2543  *
2544  * Returns 0 if successful, else propagates up the lower layer error.
2545  */
2546 int
2547 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2548                      struct intel_ring_buffer *to)
2549 {
2550         struct intel_ring_buffer *from = obj->ring;
2551         u32 seqno;
2552         int ret, idx;
2553
2554         if (from == NULL || to == from)
2555                 return 0;
2556
2557         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2558                 return i915_gem_object_wait_rendering(obj, false);
2559
2560         idx = intel_ring_sync_index(from, to);
2561
2562         seqno = obj->last_read_seqno;
2563         if (seqno <= from->sync_seqno[idx])
2564                 return 0;
2565
2566         ret = i915_gem_check_olr(obj->ring, seqno);
2567         if (ret)
2568                 return ret;
2569
2570         ret = to->sync_to(to, from, seqno);
2571         if (!ret)
2572                 /* We use last_read_seqno because sync_to()
2573                  * might have just caused seqno wrap under
2574                  * the radar.
2575                  */
2576                 from->sync_seqno[idx] = obj->last_read_seqno;
2577
2578         return ret;
2579 }
2580
2581 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2582 {
2583         u32 old_write_domain, old_read_domains;
2584
2585         /* Force a pagefault for domain tracking on next user access */
2586         i915_gem_release_mmap(obj);
2587
2588         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2589                 return;
2590
2591         /* Wait for any direct GTT access to complete */
2592         mb();
2593
2594         old_read_domains = obj->base.read_domains;
2595         old_write_domain = obj->base.write_domain;
2596
2597         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2598         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2599
2600         trace_i915_gem_object_change_domain(obj,
2601                                             old_read_domains,
2602                                             old_write_domain);
2603 }
2604
2605 int i915_vma_unbind(struct i915_vma *vma)
2606 {
2607         struct drm_i915_gem_object *obj = vma->obj;
2608         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2609         int ret;
2610
2611         if (list_empty(&vma->vma_link))
2612                 return 0;
2613
2614         if (obj->pin_count)
2615                 return -EBUSY;
2616
2617         BUG_ON(obj->pages == NULL);
2618
2619         ret = i915_gem_object_finish_gpu(obj);
2620         if (ret)
2621                 return ret;
2622         /* Continue on if we fail due to EIO, the GPU is hung so we
2623          * should be safe and we need to cleanup or else we might
2624          * cause memory corruption through use-after-free.
2625          */
2626
2627         i915_gem_object_finish_gtt(obj);
2628
2629         /* release the fence reg _after_ flushing */
2630         ret = i915_gem_object_put_fence(obj);
2631         if (ret)
2632                 return ret;
2633
2634         trace_i915_vma_unbind(vma);
2635
2636         if (obj->has_global_gtt_mapping)
2637                 i915_gem_gtt_unbind_object(obj);
2638         if (obj->has_aliasing_ppgtt_mapping) {
2639                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2640                 obj->has_aliasing_ppgtt_mapping = 0;
2641         }
2642         i915_gem_gtt_finish_object(obj);
2643         i915_gem_object_unpin_pages(obj);
2644
2645         list_del(&vma->mm_list);
2646         /* Avoid an unnecessary call to unbind on rebind. */
2647         if (i915_is_ggtt(vma->vm))
2648                 obj->map_and_fenceable = true;
2649
2650         drm_mm_remove_node(&vma->node);
2651         i915_gem_vma_destroy(vma);
2652
2653         /* Since the unbound list is global, only move to that list if
2654          * no more VMAs exist.
2655          * NB: Until we have real VMAs there will only ever be one */
2656         WARN_ON(!list_empty(&obj->vma_list));
2657         if (list_empty(&obj->vma_list))
2658                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2659
2660         return 0;
2661 }
2662
2663 /**
2664  * Unbinds an object from the global GTT aperture.
2665  */
2666 int
2667 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2668 {
2669         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2670         struct i915_address_space *ggtt = &dev_priv->gtt.base;
2671
2672         if (!i915_gem_obj_ggtt_bound(obj))
2673                 return 0;
2674
2675         if (obj->pin_count)
2676                 return -EBUSY;
2677
2678         BUG_ON(obj->pages == NULL);
2679
2680         return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2681 }
2682
2683 int i915_gpu_idle(struct drm_device *dev)
2684 {
2685         drm_i915_private_t *dev_priv = dev->dev_private;
2686         struct intel_ring_buffer *ring;
2687         int ret, i;
2688
2689         /* Flush everything onto the inactive list. */
2690         for_each_ring(ring, dev_priv, i) {
2691                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2692                 if (ret)
2693                         return ret;
2694
2695                 ret = intel_ring_idle(ring);
2696                 if (ret)
2697                         return ret;
2698         }
2699
2700         return 0;
2701 }
2702
2703 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2704                                  struct drm_i915_gem_object *obj)
2705 {
2706         drm_i915_private_t *dev_priv = dev->dev_private;
2707         int fence_reg;
2708         int fence_pitch_shift;
2709
2710         if (INTEL_INFO(dev)->gen >= 6) {
2711                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2712                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2713         } else {
2714                 fence_reg = FENCE_REG_965_0;
2715                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2716         }
2717
2718         fence_reg += reg * 8;
2719
2720         /* To w/a incoherency with non-atomic 64-bit register updates,
2721          * we split the 64-bit update into two 32-bit writes. In order
2722          * for a partial fence not to be evaluated between writes, we
2723          * precede the update with write to turn off the fence register,
2724          * and only enable the fence as the last step.
2725          *
2726          * For extra levels of paranoia, we make sure each step lands
2727          * before applying the next step.
2728          */
2729         I915_WRITE(fence_reg, 0);
2730         POSTING_READ(fence_reg);
2731
2732         if (obj) {
2733                 u32 size = i915_gem_obj_ggtt_size(obj);
2734                 uint64_t val;
2735
2736                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2737                                  0xfffff000) << 32;
2738                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2739                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2740                 if (obj->tiling_mode == I915_TILING_Y)
2741                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2742                 val |= I965_FENCE_REG_VALID;
2743
2744                 I915_WRITE(fence_reg + 4, val >> 32);
2745                 POSTING_READ(fence_reg + 4);
2746
2747                 I915_WRITE(fence_reg + 0, val);
2748                 POSTING_READ(fence_reg);
2749         } else {
2750                 I915_WRITE(fence_reg + 4, 0);
2751                 POSTING_READ(fence_reg + 4);
2752         }
2753 }
2754
2755 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2756                                  struct drm_i915_gem_object *obj)
2757 {
2758         drm_i915_private_t *dev_priv = dev->dev_private;
2759         u32 val;
2760
2761         if (obj) {
2762                 u32 size = i915_gem_obj_ggtt_size(obj);
2763                 int pitch_val;
2764                 int tile_width;
2765
2766                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2767                      (size & -size) != size ||
2768                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2769                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2770                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2771
2772                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2773                         tile_width = 128;
2774                 else
2775                         tile_width = 512;
2776
2777                 /* Note: pitch better be a power of two tile widths */
2778                 pitch_val = obj->stride / tile_width;
2779                 pitch_val = ffs(pitch_val) - 1;
2780
2781                 val = i915_gem_obj_ggtt_offset(obj);
2782                 if (obj->tiling_mode == I915_TILING_Y)
2783                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2784                 val |= I915_FENCE_SIZE_BITS(size);
2785                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2786                 val |= I830_FENCE_REG_VALID;
2787         } else
2788                 val = 0;
2789
2790         if (reg < 8)
2791                 reg = FENCE_REG_830_0 + reg * 4;
2792         else
2793                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2794
2795         I915_WRITE(reg, val);
2796         POSTING_READ(reg);
2797 }
2798
2799 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2800                                 struct drm_i915_gem_object *obj)
2801 {
2802         drm_i915_private_t *dev_priv = dev->dev_private;
2803         uint32_t val;
2804
2805         if (obj) {
2806                 u32 size = i915_gem_obj_ggtt_size(obj);
2807                 uint32_t pitch_val;
2808
2809                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2810                      (size & -size) != size ||
2811                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2812                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2813                      i915_gem_obj_ggtt_offset(obj), size);
2814
2815                 pitch_val = obj->stride / 128;
2816                 pitch_val = ffs(pitch_val) - 1;
2817
2818                 val = i915_gem_obj_ggtt_offset(obj);
2819                 if (obj->tiling_mode == I915_TILING_Y)
2820                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2821                 val |= I830_FENCE_SIZE_BITS(size);
2822                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2823                 val |= I830_FENCE_REG_VALID;
2824         } else
2825                 val = 0;
2826
2827         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2828         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2829 }
2830
2831 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2832 {
2833         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2834 }
2835
2836 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2837                                  struct drm_i915_gem_object *obj)
2838 {
2839         struct drm_i915_private *dev_priv = dev->dev_private;
2840
2841         /* Ensure that all CPU reads are completed before installing a fence
2842          * and all writes before removing the fence.
2843          */
2844         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2845                 mb();
2846
2847         WARN(obj && (!obj->stride || !obj->tiling_mode),
2848              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2849              obj->stride, obj->tiling_mode);
2850
2851         switch (INTEL_INFO(dev)->gen) {
2852         case 7:
2853         case 6:
2854         case 5:
2855         case 4: i965_write_fence_reg(dev, reg, obj); break;
2856         case 3: i915_write_fence_reg(dev, reg, obj); break;
2857         case 2: i830_write_fence_reg(dev, reg, obj); break;
2858         default: BUG();
2859         }
2860
2861         /* And similarly be paranoid that no direct access to this region
2862          * is reordered to before the fence is installed.
2863          */
2864         if (i915_gem_object_needs_mb(obj))
2865                 mb();
2866 }
2867
2868 static inline int fence_number(struct drm_i915_private *dev_priv,
2869                                struct drm_i915_fence_reg *fence)
2870 {
2871         return fence - dev_priv->fence_regs;
2872 }
2873
2874 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2875                                          struct drm_i915_fence_reg *fence,
2876                                          bool enable)
2877 {
2878         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2879         int reg = fence_number(dev_priv, fence);
2880
2881         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2882
2883         if (enable) {
2884                 obj->fence_reg = reg;
2885                 fence->obj = obj;
2886                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2887         } else {
2888                 obj->fence_reg = I915_FENCE_REG_NONE;
2889                 fence->obj = NULL;
2890                 list_del_init(&fence->lru_list);
2891         }
2892         obj->fence_dirty = false;
2893 }
2894
2895 static int
2896 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2897 {
2898         if (obj->last_fenced_seqno) {
2899                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2900                 if (ret)
2901                         return ret;
2902
2903                 obj->last_fenced_seqno = 0;
2904         }
2905
2906         obj->fenced_gpu_access = false;
2907         return 0;
2908 }
2909
2910 int
2911 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2912 {
2913         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2914         struct drm_i915_fence_reg *fence;
2915         int ret;
2916
2917         ret = i915_gem_object_wait_fence(obj);
2918         if (ret)
2919                 return ret;
2920
2921         if (obj->fence_reg == I915_FENCE_REG_NONE)
2922                 return 0;
2923
2924         fence = &dev_priv->fence_regs[obj->fence_reg];
2925
2926         i915_gem_object_fence_lost(obj);
2927         i915_gem_object_update_fence(obj, fence, false);
2928
2929         return 0;
2930 }
2931
2932 static struct drm_i915_fence_reg *
2933 i915_find_fence_reg(struct drm_device *dev)
2934 {
2935         struct drm_i915_private *dev_priv = dev->dev_private;
2936         struct drm_i915_fence_reg *reg, *avail;
2937         int i;
2938
2939         /* First try to find a free reg */
2940         avail = NULL;
2941         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2942                 reg = &dev_priv->fence_regs[i];
2943                 if (!reg->obj)
2944                         return reg;
2945
2946                 if (!reg->pin_count)
2947                         avail = reg;
2948         }
2949
2950         if (avail == NULL)
2951                 return NULL;
2952
2953         /* None available, try to steal one or wait for a user to finish */
2954         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2955                 if (reg->pin_count)
2956                         continue;
2957
2958                 return reg;
2959         }
2960
2961         return NULL;
2962 }
2963
2964 /**
2965  * i915_gem_object_get_fence - set up fencing for an object
2966  * @obj: object to map through a fence reg
2967  *
2968  * When mapping objects through the GTT, userspace wants to be able to write
2969  * to them without having to worry about swizzling if the object is tiled.
2970  * This function walks the fence regs looking for a free one for @obj,
2971  * stealing one if it can't find any.
2972  *
2973  * It then sets up the reg based on the object's properties: address, pitch
2974  * and tiling format.
2975  *
2976  * For an untiled surface, this removes any existing fence.
2977  */
2978 int
2979 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2980 {
2981         struct drm_device *dev = obj->base.dev;
2982         struct drm_i915_private *dev_priv = dev->dev_private;
2983         bool enable = obj->tiling_mode != I915_TILING_NONE;
2984         struct drm_i915_fence_reg *reg;
2985         int ret;
2986
2987         /* Have we updated the tiling parameters upon the object and so
2988          * will need to serialise the write to the associated fence register?
2989          */
2990         if (obj->fence_dirty) {
2991                 ret = i915_gem_object_wait_fence(obj);
2992                 if (ret)
2993                         return ret;
2994         }
2995
2996         /* Just update our place in the LRU if our fence is getting reused. */
2997         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2998                 reg = &dev_priv->fence_regs[obj->fence_reg];
2999                 if (!obj->fence_dirty) {
3000                         list_move_tail(&reg->lru_list,
3001                                        &dev_priv->mm.fence_list);
3002                         return 0;
3003                 }
3004         } else if (enable) {
3005                 reg = i915_find_fence_reg(dev);
3006                 if (reg == NULL)
3007                         return -EDEADLK;
3008
3009                 if (reg->obj) {
3010                         struct drm_i915_gem_object *old = reg->obj;
3011
3012                         ret = i915_gem_object_wait_fence(old);
3013                         if (ret)
3014                                 return ret;
3015
3016                         i915_gem_object_fence_lost(old);
3017                 }
3018         } else
3019                 return 0;
3020
3021         i915_gem_object_update_fence(obj, reg, enable);
3022
3023         return 0;
3024 }
3025
3026 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3027                                      struct drm_mm_node *gtt_space,
3028                                      unsigned long cache_level)
3029 {
3030         struct drm_mm_node *other;
3031
3032         /* On non-LLC machines we have to be careful when putting differing
3033          * types of snoopable memory together to avoid the prefetcher
3034          * crossing memory domains and dying.
3035          */
3036         if (HAS_LLC(dev))
3037                 return true;
3038
3039         if (!drm_mm_node_allocated(gtt_space))
3040                 return true;
3041
3042         if (list_empty(&gtt_space->node_list))
3043                 return true;
3044
3045         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3046         if (other->allocated && !other->hole_follows && other->color != cache_level)
3047                 return false;
3048
3049         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3050         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3051                 return false;
3052
3053         return true;
3054 }
3055
3056 static void i915_gem_verify_gtt(struct drm_device *dev)
3057 {
3058 #if WATCH_GTT
3059         struct drm_i915_private *dev_priv = dev->dev_private;
3060         struct drm_i915_gem_object *obj;
3061         int err = 0;
3062
3063         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3064                 if (obj->gtt_space == NULL) {
3065                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3066                         err++;
3067                         continue;
3068                 }
3069
3070                 if (obj->cache_level != obj->gtt_space->color) {
3071                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3072                                i915_gem_obj_ggtt_offset(obj),
3073                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3074                                obj->cache_level,
3075                                obj->gtt_space->color);
3076                         err++;
3077                         continue;
3078                 }
3079
3080                 if (!i915_gem_valid_gtt_space(dev,
3081                                               obj->gtt_space,
3082                                               obj->cache_level)) {
3083                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3084                                i915_gem_obj_ggtt_offset(obj),
3085                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3086                                obj->cache_level);
3087                         err++;
3088                         continue;
3089                 }
3090         }
3091
3092         WARN_ON(err);
3093 #endif
3094 }
3095
3096 /**
3097  * Finds free space in the GTT aperture and binds the object there.
3098  */
3099 static int
3100 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3101                            struct i915_address_space *vm,
3102                            unsigned alignment,
3103                            bool map_and_fenceable,
3104                            bool nonblocking)
3105 {
3106         struct drm_device *dev = obj->base.dev;
3107         drm_i915_private_t *dev_priv = dev->dev_private;
3108         u32 size, fence_size, fence_alignment, unfenced_alignment;
3109         bool mappable, fenceable;
3110         size_t gtt_max =
3111                 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3112         struct i915_vma *vma;
3113         int ret;
3114
3115         if (WARN_ON(!list_empty(&obj->vma_list)))
3116                 return -EBUSY;
3117
3118         fence_size = i915_gem_get_gtt_size(dev,
3119                                            obj->base.size,
3120                                            obj->tiling_mode);
3121         fence_alignment = i915_gem_get_gtt_alignment(dev,
3122                                                      obj->base.size,
3123                                                      obj->tiling_mode, true);
3124         unfenced_alignment =
3125                 i915_gem_get_gtt_alignment(dev,
3126                                                     obj->base.size,
3127                                                     obj->tiling_mode, false);
3128
3129         if (alignment == 0)
3130                 alignment = map_and_fenceable ? fence_alignment :
3131                                                 unfenced_alignment;
3132         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3133                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3134                 return -EINVAL;
3135         }
3136
3137         size = map_and_fenceable ? fence_size : obj->base.size;
3138
3139         /* If the object is bigger than the entire aperture, reject it early
3140          * before evicting everything in a vain attempt to find space.
3141          */
3142         if (obj->base.size > gtt_max) {
3143                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3144                           obj->base.size,
3145                           map_and_fenceable ? "mappable" : "total",
3146                           gtt_max);
3147                 return -E2BIG;
3148         }
3149
3150         ret = i915_gem_object_get_pages(obj);
3151         if (ret)
3152                 return ret;
3153
3154         i915_gem_object_pin_pages(obj);
3155
3156         /* FIXME: For now we only ever use 1 VMA per object */
3157         BUG_ON(!i915_is_ggtt(vm));
3158         WARN_ON(!list_empty(&obj->vma_list));
3159
3160         vma = i915_gem_vma_create(obj, vm);
3161         if (IS_ERR(vma)) {
3162                 ret = PTR_ERR(vma);
3163                 goto err_unpin;
3164         }
3165
3166 search_free:
3167         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3168                                                   size, alignment,
3169                                                   obj->cache_level, 0, gtt_max);
3170         if (ret) {
3171                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3172                                                obj->cache_level,
3173                                                map_and_fenceable,
3174                                                nonblocking);
3175                 if (ret == 0)
3176                         goto search_free;
3177
3178                 goto err_free_vma;
3179         }
3180         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3181                                               obj->cache_level))) {
3182                 ret = -EINVAL;
3183                 goto err_remove_node;
3184         }
3185
3186         ret = i915_gem_gtt_prepare_object(obj);
3187         if (ret)
3188                 goto err_remove_node;
3189
3190         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3191         list_add_tail(&vma->mm_list, &vm->inactive_list);
3192
3193         fenceable =
3194                 i915_is_ggtt(vm) &&
3195                 i915_gem_obj_ggtt_size(obj) == fence_size &&
3196                 (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
3197
3198         mappable =
3199                 i915_is_ggtt(vm) &&
3200                 vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
3201
3202         /* Map and fenceable only changes if the VM is the global GGTT */
3203         if (i915_is_ggtt(vm))
3204                 obj->map_and_fenceable = mappable && fenceable;
3205
3206         trace_i915_vma_bind(vma, map_and_fenceable);
3207         i915_gem_verify_gtt(dev);
3208         return 0;
3209
3210 err_remove_node:
3211         drm_mm_remove_node(&vma->node);
3212 err_free_vma:
3213         i915_gem_vma_destroy(vma);
3214 err_unpin:
3215         i915_gem_object_unpin_pages(obj);
3216         return ret;
3217 }
3218
3219 void
3220 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3221 {
3222         /* If we don't have a page list set up, then we're not pinned
3223          * to GPU, and we can ignore the cache flush because it'll happen
3224          * again at bind time.
3225          */
3226         if (obj->pages == NULL)
3227                 return;
3228
3229         /*
3230          * Stolen memory is always coherent with the GPU as it is explicitly
3231          * marked as wc by the system, or the system is cache-coherent.
3232          */
3233         if (obj->stolen)
3234                 return;
3235
3236         /* If the GPU is snooping the contents of the CPU cache,
3237          * we do not need to manually clear the CPU cache lines.  However,
3238          * the caches are only snooped when the render cache is
3239          * flushed/invalidated.  As we always have to emit invalidations
3240          * and flushes when moving into and out of the RENDER domain, correct
3241          * snooping behaviour occurs naturally as the result of our domain
3242          * tracking.
3243          */
3244         if (obj->cache_level != I915_CACHE_NONE)
3245                 return;
3246
3247         trace_i915_gem_object_clflush(obj);
3248
3249         drm_clflush_sg(obj->pages);
3250 }
3251
3252 /** Flushes the GTT write domain for the object if it's dirty. */
3253 static void
3254 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3255 {
3256         uint32_t old_write_domain;
3257
3258         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3259                 return;
3260
3261         /* No actual flushing is required for the GTT write domain.  Writes
3262          * to it immediately go to main memory as far as we know, so there's
3263          * no chipset flush.  It also doesn't land in render cache.
3264          *
3265          * However, we do have to enforce the order so that all writes through
3266          * the GTT land before any writes to the device, such as updates to
3267          * the GATT itself.
3268          */
3269         wmb();
3270
3271         old_write_domain = obj->base.write_domain;
3272         obj->base.write_domain = 0;
3273
3274         trace_i915_gem_object_change_domain(obj,
3275                                             obj->base.read_domains,
3276                                             old_write_domain);
3277 }
3278
3279 /** Flushes the CPU write domain for the object if it's dirty. */
3280 static void
3281 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3282 {
3283         uint32_t old_write_domain;
3284
3285         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3286                 return;
3287
3288         i915_gem_clflush_object(obj);
3289         i915_gem_chipset_flush(obj->base.dev);
3290         old_write_domain = obj->base.write_domain;
3291         obj->base.write_domain = 0;
3292
3293         trace_i915_gem_object_change_domain(obj,
3294                                             obj->base.read_domains,
3295                                             old_write_domain);
3296 }
3297
3298 /**
3299  * Moves a single object to the GTT read, and possibly write domain.
3300  *
3301  * This function returns when the move is complete, including waiting on
3302  * flushes to occur.
3303  */
3304 int
3305 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3306 {
3307         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3308         uint32_t old_write_domain, old_read_domains;
3309         int ret;
3310
3311         /* Not valid to be called on unbound objects. */
3312         if (!i915_gem_obj_bound_any(obj))
3313                 return -EINVAL;
3314
3315         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3316                 return 0;
3317
3318         ret = i915_gem_object_wait_rendering(obj, !write);
3319         if (ret)
3320                 return ret;
3321
3322         i915_gem_object_flush_cpu_write_domain(obj);
3323
3324         /* Serialise direct access to this object with the barriers for
3325          * coherent writes from the GPU, by effectively invalidating the
3326          * GTT domain upon first access.
3327          */
3328         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3329                 mb();
3330
3331         old_write_domain = obj->base.write_domain;
3332         old_read_domains = obj->base.read_domains;
3333
3334         /* It should now be out of any other write domains, and we can update
3335          * the domain values for our changes.
3336          */
3337         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3338         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3339         if (write) {
3340                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3341                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3342                 obj->dirty = 1;
3343         }
3344
3345         trace_i915_gem_object_change_domain(obj,
3346                                             old_read_domains,
3347                                             old_write_domain);
3348
3349         /* And bump the LRU for this access */
3350         if (i915_gem_object_is_inactive(obj)) {
3351                 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3352                                                            &dev_priv->gtt.base);
3353                 if (vma)
3354                         list_move_tail(&vma->mm_list,
3355                                        &dev_priv->gtt.base.inactive_list);
3356
3357         }
3358
3359         return 0;
3360 }
3361
3362 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3363                                     enum i915_cache_level cache_level)
3364 {
3365         struct drm_device *dev = obj->base.dev;
3366         drm_i915_private_t *dev_priv = dev->dev_private;
3367         struct i915_vma *vma;
3368         int ret;
3369
3370         if (obj->cache_level == cache_level)
3371                 return 0;
3372
3373         if (obj->pin_count) {
3374                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3375                 return -EBUSY;
3376         }
3377
3378         list_for_each_entry(vma, &obj->vma_list, vma_link) {
3379                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3380                         ret = i915_vma_unbind(vma);
3381                         if (ret)
3382                                 return ret;
3383
3384                         break;
3385                 }
3386         }
3387
3388         if (i915_gem_obj_bound_any(obj)) {
3389                 ret = i915_gem_object_finish_gpu(obj);
3390                 if (ret)
3391                         return ret;
3392
3393                 i915_gem_object_finish_gtt(obj);
3394
3395                 /* Before SandyBridge, you could not use tiling or fence
3396                  * registers with snooped memory, so relinquish any fences
3397                  * currently pointing to our region in the aperture.
3398                  */
3399                 if (INTEL_INFO(dev)->gen < 6) {
3400                         ret = i915_gem_object_put_fence(obj);
3401                         if (ret)
3402                                 return ret;
3403                 }
3404
3405                 if (obj->has_global_gtt_mapping)
3406                         i915_gem_gtt_bind_object(obj, cache_level);
3407                 if (obj->has_aliasing_ppgtt_mapping)
3408                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3409                                                obj, cache_level);
3410         }
3411
3412         if (cache_level == I915_CACHE_NONE) {
3413                 u32 old_read_domains, old_write_domain;
3414
3415                 /* If we're coming from LLC cached, then we haven't
3416                  * actually been tracking whether the data is in the
3417                  * CPU cache or not, since we only allow one bit set
3418                  * in obj->write_domain and have been skipping the clflushes.
3419                  * Just set it to the CPU cache for now.
3420                  */
3421                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3422                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3423
3424                 old_read_domains = obj->base.read_domains;
3425                 old_write_domain = obj->base.write_domain;
3426
3427                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3428                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3429
3430                 trace_i915_gem_object_change_domain(obj,
3431                                                     old_read_domains,
3432                                                     old_write_domain);
3433         }
3434
3435         list_for_each_entry(vma, &obj->vma_list, vma_link)
3436                 vma->node.color = cache_level;
3437         obj->cache_level = cache_level;
3438         i915_gem_verify_gtt(dev);
3439         return 0;
3440 }
3441
3442 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3443                                struct drm_file *file)
3444 {
3445         struct drm_i915_gem_caching *args = data;
3446         struct drm_i915_gem_object *obj;
3447         int ret;
3448
3449         ret = i915_mutex_lock_interruptible(dev);
3450         if (ret)
3451                 return ret;
3452
3453         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3454         if (&obj->base == NULL) {
3455                 ret = -ENOENT;
3456                 goto unlock;
3457         }
3458
3459         args->caching = obj->cache_level != I915_CACHE_NONE;
3460
3461         drm_gem_object_unreference(&obj->base);
3462 unlock:
3463         mutex_unlock(&dev->struct_mutex);
3464         return ret;
3465 }
3466
3467 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3468                                struct drm_file *file)
3469 {
3470         struct drm_i915_gem_caching *args = data;
3471         struct drm_i915_gem_object *obj;
3472         enum i915_cache_level level;
3473         int ret;
3474
3475         switch (args->caching) {
3476         case I915_CACHING_NONE:
3477                 level = I915_CACHE_NONE;
3478                 break;
3479         case I915_CACHING_CACHED:
3480                 level = I915_CACHE_LLC;
3481                 break;
3482         default:
3483                 return -EINVAL;
3484         }
3485
3486         ret = i915_mutex_lock_interruptible(dev);
3487         if (ret)
3488                 return ret;
3489
3490         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3491         if (&obj->base == NULL) {
3492                 ret = -ENOENT;
3493                 goto unlock;
3494         }
3495
3496         ret = i915_gem_object_set_cache_level(obj, level);
3497
3498         drm_gem_object_unreference(&obj->base);
3499 unlock:
3500         mutex_unlock(&dev->struct_mutex);
3501         return ret;
3502 }
3503
3504 /*
3505  * Prepare buffer for display plane (scanout, cursors, etc).
3506  * Can be called from an uninterruptible phase (modesetting) and allows
3507  * any flushes to be pipelined (for pageflips).
3508  */
3509 int
3510 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3511                                      u32 alignment,
3512                                      struct intel_ring_buffer *pipelined)
3513 {
3514         u32 old_read_domains, old_write_domain;
3515         int ret;
3516
3517         if (pipelined != obj->ring) {
3518                 ret = i915_gem_object_sync(obj, pipelined);
3519                 if (ret)
3520                         return ret;
3521         }
3522
3523         /* The display engine is not coherent with the LLC cache on gen6.  As
3524          * a result, we make sure that the pinning that is about to occur is
3525          * done with uncached PTEs. This is lowest common denominator for all
3526          * chipsets.
3527          *
3528          * However for gen6+, we could do better by using the GFDT bit instead
3529          * of uncaching, which would allow us to flush all the LLC-cached data
3530          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3531          */
3532         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3533         if (ret)
3534                 return ret;
3535
3536         /* As the user may map the buffer once pinned in the display plane
3537          * (e.g. libkms for the bootup splash), we have to ensure that we
3538          * always use map_and_fenceable for all scanout buffers.
3539          */
3540         ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3541         if (ret)
3542                 return ret;
3543
3544         i915_gem_object_flush_cpu_write_domain(obj);
3545
3546         old_write_domain = obj->base.write_domain;
3547         old_read_domains = obj->base.read_domains;
3548
3549         /* It should now be out of any other write domains, and we can update
3550          * the domain values for our changes.
3551          */
3552         obj->base.write_domain = 0;
3553         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3554
3555         trace_i915_gem_object_change_domain(obj,
3556                                             old_read_domains,
3557                                             old_write_domain);
3558
3559         return 0;
3560 }
3561
3562 int
3563 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3564 {
3565         int ret;
3566
3567         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3568                 return 0;
3569
3570         ret = i915_gem_object_wait_rendering(obj, false);
3571         if (ret)
3572                 return ret;
3573
3574         /* Ensure that we invalidate the GPU's caches and TLBs. */
3575         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3576         return 0;
3577 }
3578
3579 /**
3580  * Moves a single object to the CPU read, and possibly write domain.
3581  *
3582  * This function returns when the move is complete, including waiting on
3583  * flushes to occur.
3584  */
3585 int
3586 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3587 {
3588         uint32_t old_write_domain, old_read_domains;
3589         int ret;
3590
3591         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3592                 return 0;
3593
3594         ret = i915_gem_object_wait_rendering(obj, !write);
3595         if (ret)
3596                 return ret;
3597
3598         i915_gem_object_flush_gtt_write_domain(obj);
3599
3600         old_write_domain = obj->base.write_domain;
3601         old_read_domains = obj->base.read_domains;
3602
3603         /* Flush the CPU cache if it's still invalid. */
3604         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3605                 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3606                         i915_gem_clflush_object(obj);
3607
3608                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3609         }
3610
3611         /* It should now be out of any other write domains, and we can update
3612          * the domain values for our changes.
3613          */
3614         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3615
3616         /* If we're writing through the CPU, then the GPU read domains will
3617          * need to be invalidated at next use.
3618          */
3619         if (write) {
3620                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3621                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3622         }
3623
3624         trace_i915_gem_object_change_domain(obj,
3625                                             old_read_domains,
3626                                             old_write_domain);
3627
3628         return 0;
3629 }
3630
3631 /* Throttle our rendering by waiting until the ring has completed our requests
3632  * emitted over 20 msec ago.
3633  *
3634  * Note that if we were to use the current jiffies each time around the loop,
3635  * we wouldn't escape the function with any frames outstanding if the time to
3636  * render a frame was over 20ms.
3637  *
3638  * This should get us reasonable parallelism between CPU and GPU but also
3639  * relatively low latency when blocking on a particular request to finish.
3640  */
3641 static int
3642 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3643 {
3644         struct drm_i915_private *dev_priv = dev->dev_private;
3645         struct drm_i915_file_private *file_priv = file->driver_priv;
3646         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3647         struct drm_i915_gem_request *request;
3648         struct intel_ring_buffer *ring = NULL;
3649         unsigned reset_counter;
3650         u32 seqno = 0;
3651         int ret;
3652
3653         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3654         if (ret)
3655                 return ret;
3656
3657         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3658         if (ret)
3659                 return ret;
3660
3661         spin_lock(&file_priv->mm.lock);
3662         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3663                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3664                         break;
3665
3666                 ring = request->ring;
3667                 seqno = request->seqno;
3668         }
3669         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3670         spin_unlock(&file_priv->mm.lock);
3671
3672         if (seqno == 0)
3673                 return 0;
3674
3675         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3676         if (ret == 0)
3677                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3678
3679         return ret;
3680 }
3681
3682 int
3683 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3684                     struct i915_address_space *vm,
3685                     uint32_t alignment,
3686                     bool map_and_fenceable,
3687                     bool nonblocking)
3688 {
3689         struct i915_vma *vma;
3690         int ret;
3691
3692         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3693                 return -EBUSY;
3694
3695         WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3696
3697         vma = i915_gem_obj_to_vma(obj, vm);
3698
3699         if (vma) {
3700                 if ((alignment &&
3701                      vma->node.start & (alignment - 1)) ||
3702                     (map_and_fenceable && !obj->map_and_fenceable)) {
3703                         WARN(obj->pin_count,
3704                              "bo is already pinned with incorrect alignment:"
3705                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3706                              " obj->map_and_fenceable=%d\n",
3707                              i915_gem_obj_offset(obj, vm), alignment,
3708                              map_and_fenceable,
3709                              obj->map_and_fenceable);
3710                         ret = i915_vma_unbind(vma);
3711                         if (ret)
3712                                 return ret;
3713                 }
3714         }
3715
3716         if (!i915_gem_obj_bound(obj, vm)) {
3717                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3718
3719                 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3720                                                  map_and_fenceable,
3721                                                  nonblocking);
3722                 if (ret)
3723                         return ret;
3724
3725                 if (!dev_priv->mm.aliasing_ppgtt)
3726                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3727         }
3728
3729         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3730                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3731
3732         obj->pin_count++;
3733         obj->pin_mappable |= map_and_fenceable;
3734
3735         return 0;
3736 }
3737
3738 void
3739 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3740 {
3741         BUG_ON(obj->pin_count == 0);
3742         BUG_ON(!i915_gem_obj_bound_any(obj));
3743
3744         if (--obj->pin_count == 0)
3745                 obj->pin_mappable = false;
3746 }
3747
3748 int
3749 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3750                    struct drm_file *file)
3751 {
3752         struct drm_i915_gem_pin *args = data;
3753         struct drm_i915_gem_object *obj;
3754         int ret;
3755
3756         ret = i915_mutex_lock_interruptible(dev);
3757         if (ret)
3758                 return ret;
3759
3760         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3761         if (&obj->base == NULL) {
3762                 ret = -ENOENT;
3763                 goto unlock;
3764         }
3765
3766         if (obj->madv != I915_MADV_WILLNEED) {
3767                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3768                 ret = -EINVAL;
3769                 goto out;
3770         }
3771
3772         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3773                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3774                           args->handle);
3775                 ret = -EINVAL;
3776                 goto out;
3777         }
3778
3779         if (obj->user_pin_count == 0) {
3780                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3781                 if (ret)
3782                         goto out;
3783         }
3784
3785         obj->user_pin_count++;
3786         obj->pin_filp = file;
3787
3788         /* XXX - flush the CPU caches for pinned objects
3789          * as the X server doesn't manage domains yet
3790          */
3791         i915_gem_object_flush_cpu_write_domain(obj);
3792         args->offset = i915_gem_obj_ggtt_offset(obj);
3793 out:
3794         drm_gem_object_unreference(&obj->base);
3795 unlock:
3796         mutex_unlock(&dev->struct_mutex);
3797         return ret;
3798 }
3799
3800 int
3801 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3802                      struct drm_file *file)
3803 {
3804         struct drm_i915_gem_pin *args = data;
3805         struct drm_i915_gem_object *obj;
3806         int ret;
3807
3808         ret = i915_mutex_lock_interruptible(dev);
3809         if (ret)
3810                 return ret;
3811
3812         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3813         if (&obj->base == NULL) {
3814                 ret = -ENOENT;
3815                 goto unlock;
3816         }
3817
3818         if (obj->pin_filp != file) {
3819                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3820                           args->handle);
3821                 ret = -EINVAL;
3822                 goto out;
3823         }
3824         obj->user_pin_count--;
3825         if (obj->user_pin_count == 0) {
3826                 obj->pin_filp = NULL;
3827                 i915_gem_object_unpin(obj);
3828         }
3829
3830 out:
3831         drm_gem_object_unreference(&obj->base);
3832 unlock:
3833         mutex_unlock(&dev->struct_mutex);
3834         return ret;
3835 }
3836
3837 int
3838 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3839                     struct drm_file *file)
3840 {
3841         struct drm_i915_gem_busy *args = data;
3842         struct drm_i915_gem_object *obj;
3843         int ret;
3844
3845         ret = i915_mutex_lock_interruptible(dev);
3846         if (ret)
3847                 return ret;
3848
3849         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3850         if (&obj->base == NULL) {
3851                 ret = -ENOENT;
3852                 goto unlock;
3853         }
3854
3855         /* Count all active objects as busy, even if they are currently not used
3856          * by the gpu. Users of this interface expect objects to eventually
3857          * become non-busy without any further actions, therefore emit any
3858          * necessary flushes here.
3859          */
3860         ret = i915_gem_object_flush_active(obj);
3861
3862         args->busy = obj->active;
3863         if (obj->ring) {
3864                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3865                 args->busy |= intel_ring_flag(obj->ring) << 16;
3866         }
3867
3868         drm_gem_object_unreference(&obj->base);
3869 unlock:
3870         mutex_unlock(&dev->struct_mutex);
3871         return ret;
3872 }
3873
3874 int
3875 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3876                         struct drm_file *file_priv)
3877 {
3878         return i915_gem_ring_throttle(dev, file_priv);
3879 }
3880
3881 int
3882 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3883                        struct drm_file *file_priv)
3884 {
3885         struct drm_i915_gem_madvise *args = data;
3886         struct drm_i915_gem_object *obj;
3887         int ret;
3888
3889         switch (args->madv) {
3890         case I915_MADV_DONTNEED:
3891         case I915_MADV_WILLNEED:
3892             break;
3893         default:
3894             return -EINVAL;
3895         }
3896
3897         ret = i915_mutex_lock_interruptible(dev);
3898         if (ret)
3899                 return ret;
3900
3901         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3902         if (&obj->base == NULL) {
3903                 ret = -ENOENT;
3904                 goto unlock;
3905         }
3906
3907         if (obj->pin_count) {
3908                 ret = -EINVAL;
3909                 goto out;
3910         }
3911
3912         if (obj->madv != __I915_MADV_PURGED)
3913                 obj->madv = args->madv;
3914
3915         /* if the object is no longer attached, discard its backing storage */
3916         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3917                 i915_gem_object_truncate(obj);
3918
3919         args->retained = obj->madv != __I915_MADV_PURGED;
3920
3921 out:
3922         drm_gem_object_unreference(&obj->base);
3923 unlock:
3924         mutex_unlock(&dev->struct_mutex);
3925         return ret;
3926 }
3927
3928 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3929                           const struct drm_i915_gem_object_ops *ops)
3930 {
3931         INIT_LIST_HEAD(&obj->global_list);
3932         INIT_LIST_HEAD(&obj->ring_list);
3933         INIT_LIST_HEAD(&obj->exec_list);
3934         INIT_LIST_HEAD(&obj->vma_list);
3935
3936         obj->ops = ops;
3937
3938         obj->fence_reg = I915_FENCE_REG_NONE;
3939         obj->madv = I915_MADV_WILLNEED;
3940         /* Avoid an unnecessary call to unbind on the first bind. */
3941         obj->map_and_fenceable = true;
3942
3943         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3944 }
3945
3946 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3947         .get_pages = i915_gem_object_get_pages_gtt,
3948         .put_pages = i915_gem_object_put_pages_gtt,
3949 };
3950
3951 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3952                                                   size_t size)
3953 {
3954         struct drm_i915_gem_object *obj;
3955         struct address_space *mapping;
3956         gfp_t mask;
3957
3958         obj = i915_gem_object_alloc(dev);
3959         if (obj == NULL)
3960                 return NULL;
3961
3962         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3963                 i915_gem_object_free(obj);
3964                 return NULL;
3965         }
3966
3967         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3968         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3969                 /* 965gm cannot relocate objects above 4GiB. */
3970                 mask &= ~__GFP_HIGHMEM;
3971                 mask |= __GFP_DMA32;
3972         }
3973
3974         mapping = file_inode(obj->base.filp)->i_mapping;
3975         mapping_set_gfp_mask(mapping, mask);
3976
3977         i915_gem_object_init(obj, &i915_gem_object_ops);
3978
3979         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3980         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3981
3982         if (HAS_LLC(dev)) {
3983                 /* On some devices, we can have the GPU use the LLC (the CPU
3984                  * cache) for about a 10% performance improvement
3985                  * compared to uncached.  Graphics requests other than
3986                  * display scanout are coherent with the CPU in
3987                  * accessing this cache.  This means in this mode we
3988                  * don't need to clflush on the CPU side, and on the
3989                  * GPU side we only need to flush internal caches to
3990                  * get data visible to the CPU.
3991                  *
3992                  * However, we maintain the display planes as UC, and so
3993                  * need to rebind when first used as such.
3994                  */
3995                 obj->cache_level = I915_CACHE_LLC;
3996         } else
3997                 obj->cache_level = I915_CACHE_NONE;
3998
3999         trace_i915_gem_object_create(obj);
4000
4001         return obj;
4002 }
4003
4004 int i915_gem_init_object(struct drm_gem_object *obj)
4005 {
4006         BUG();
4007
4008         return 0;
4009 }
4010
4011 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4012 {
4013         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4014         struct drm_device *dev = obj->base.dev;
4015         drm_i915_private_t *dev_priv = dev->dev_private;
4016         struct i915_vma *vma, *next;
4017
4018         trace_i915_gem_object_destroy(obj);
4019
4020         if (obj->phys_obj)
4021                 i915_gem_detach_phys_object(dev, obj);
4022
4023         obj->pin_count = 0;
4024         /* NB: 0 or 1 elements */
4025         WARN_ON(!list_empty(&obj->vma_list) &&
4026                 !list_is_singular(&obj->vma_list));
4027         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4028                 int ret = i915_vma_unbind(vma);
4029                 if (WARN_ON(ret == -ERESTARTSYS)) {
4030                         bool was_interruptible;
4031
4032                         was_interruptible = dev_priv->mm.interruptible;
4033                         dev_priv->mm.interruptible = false;
4034
4035                         WARN_ON(i915_vma_unbind(vma));
4036
4037                         dev_priv->mm.interruptible = was_interruptible;
4038                 }
4039         }
4040
4041         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4042          * before progressing. */
4043         if (obj->stolen)
4044                 i915_gem_object_unpin_pages(obj);
4045
4046         if (WARN_ON(obj->pages_pin_count))
4047                 obj->pages_pin_count = 0;
4048         i915_gem_object_put_pages(obj);
4049         i915_gem_object_free_mmap_offset(obj);
4050         i915_gem_object_release_stolen(obj);
4051
4052         BUG_ON(obj->pages);
4053
4054         if (obj->base.import_attach)
4055                 drm_prime_gem_destroy(&obj->base, NULL);
4056
4057         drm_gem_object_release(&obj->base);
4058         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4059
4060         kfree(obj->bit_17);
4061         i915_gem_object_free(obj);
4062 }
4063
4064 struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4065                                      struct i915_address_space *vm)
4066 {
4067         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4068         if (vma == NULL)
4069                 return ERR_PTR(-ENOMEM);
4070
4071         INIT_LIST_HEAD(&vma->vma_link);
4072         INIT_LIST_HEAD(&vma->mm_list);
4073         vma->vm = vm;
4074         vma->obj = obj;
4075
4076         /* Keep GGTT vmas first to make debug easier */
4077         if (i915_is_ggtt(vm))
4078                 list_add(&vma->vma_link, &obj->vma_list);
4079         else
4080                 list_add_tail(&vma->vma_link, &obj->vma_list);
4081
4082         return vma;
4083 }
4084
4085 void i915_gem_vma_destroy(struct i915_vma *vma)
4086 {
4087         WARN_ON(vma->node.allocated);
4088         list_del(&vma->vma_link);
4089         kfree(vma);
4090 }
4091
4092 int
4093 i915_gem_idle(struct drm_device *dev)
4094 {
4095         drm_i915_private_t *dev_priv = dev->dev_private;
4096         int ret;
4097
4098         if (dev_priv->ums.mm_suspended) {
4099                 mutex_unlock(&dev->struct_mutex);
4100                 return 0;
4101         }
4102
4103         ret = i915_gpu_idle(dev);
4104         if (ret) {
4105                 mutex_unlock(&dev->struct_mutex);
4106                 return ret;
4107         }
4108         i915_gem_retire_requests(dev);
4109
4110         /* Under UMS, be paranoid and evict. */
4111         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4112                 i915_gem_evict_everything(dev);
4113
4114         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4115
4116         i915_kernel_lost_context(dev);
4117         i915_gem_cleanup_ringbuffer(dev);
4118
4119         /* Cancel the retire work handler, which should be idle now. */
4120         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4121
4122         return 0;
4123 }
4124
4125 void i915_gem_l3_remap(struct drm_device *dev)
4126 {
4127         drm_i915_private_t *dev_priv = dev->dev_private;
4128         u32 misccpctl;
4129         int i;
4130
4131         if (!HAS_L3_GPU_CACHE(dev))
4132                 return;
4133
4134         if (!dev_priv->l3_parity.remap_info)
4135                 return;
4136
4137         misccpctl = I915_READ(GEN7_MISCCPCTL);
4138         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4139         POSTING_READ(GEN7_MISCCPCTL);
4140
4141         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4142                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4143                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4144                         DRM_DEBUG("0x%x was already programmed to %x\n",
4145                                   GEN7_L3LOG_BASE + i, remap);
4146                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4147                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
4148                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4149         }
4150
4151         /* Make sure all the writes land before disabling dop clock gating */
4152         POSTING_READ(GEN7_L3LOG_BASE);
4153
4154         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4155 }
4156
4157 void i915_gem_init_swizzling(struct drm_device *dev)
4158 {
4159         drm_i915_private_t *dev_priv = dev->dev_private;
4160
4161         if (INTEL_INFO(dev)->gen < 5 ||
4162             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4163                 return;
4164
4165         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4166                                  DISP_TILE_SURFACE_SWIZZLING);
4167
4168         if (IS_GEN5(dev))
4169                 return;
4170
4171         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4172         if (IS_GEN6(dev))
4173                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4174         else if (IS_GEN7(dev))
4175                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4176         else
4177                 BUG();
4178 }
4179
4180 static bool
4181 intel_enable_blt(struct drm_device *dev)
4182 {
4183         if (!HAS_BLT(dev))
4184                 return false;
4185
4186         /* The blitter was dysfunctional on early prototypes */
4187         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4188                 DRM_INFO("BLT not supported on this pre-production hardware;"
4189                          " graphics performance will be degraded.\n");
4190                 return false;
4191         }
4192
4193         return true;
4194 }
4195
4196 static int i915_gem_init_rings(struct drm_device *dev)
4197 {
4198         struct drm_i915_private *dev_priv = dev->dev_private;
4199         int ret;
4200
4201         ret = intel_init_render_ring_buffer(dev);
4202         if (ret)
4203                 return ret;
4204
4205         if (HAS_BSD(dev)) {
4206                 ret = intel_init_bsd_ring_buffer(dev);
4207                 if (ret)
4208                         goto cleanup_render_ring;
4209         }
4210
4211         if (intel_enable_blt(dev)) {
4212                 ret = intel_init_blt_ring_buffer(dev);
4213                 if (ret)
4214                         goto cleanup_bsd_ring;
4215         }
4216
4217         if (HAS_VEBOX(dev)) {
4218                 ret = intel_init_vebox_ring_buffer(dev);
4219                 if (ret)
4220                         goto cleanup_blt_ring;
4221         }
4222
4223
4224         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4225         if (ret)
4226                 goto cleanup_vebox_ring;
4227
4228         return 0;
4229
4230 cleanup_vebox_ring:
4231         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4232 cleanup_blt_ring:
4233         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4234 cleanup_bsd_ring:
4235         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4236 cleanup_render_ring:
4237         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4238
4239         return ret;
4240 }
4241
4242 int
4243 i915_gem_init_hw(struct drm_device *dev)
4244 {
4245         drm_i915_private_t *dev_priv = dev->dev_private;
4246         int ret;
4247
4248         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4249                 return -EIO;
4250
4251         if (dev_priv->ellc_size)
4252                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4253
4254         if (HAS_PCH_NOP(dev)) {
4255                 u32 temp = I915_READ(GEN7_MSG_CTL);
4256                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4257                 I915_WRITE(GEN7_MSG_CTL, temp);
4258         }
4259
4260         i915_gem_l3_remap(dev);
4261
4262         i915_gem_init_swizzling(dev);
4263
4264         ret = i915_gem_init_rings(dev);
4265         if (ret)
4266                 return ret;
4267
4268         /*
4269          * XXX: There was some w/a described somewhere suggesting loading
4270          * contexts before PPGTT.
4271          */
4272         i915_gem_context_init(dev);
4273         if (dev_priv->mm.aliasing_ppgtt) {
4274                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4275                 if (ret) {
4276                         i915_gem_cleanup_aliasing_ppgtt(dev);
4277                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4278                 }
4279         }
4280
4281         return 0;
4282 }
4283
4284 int i915_gem_init(struct drm_device *dev)
4285 {
4286         struct drm_i915_private *dev_priv = dev->dev_private;
4287         int ret;
4288
4289         mutex_lock(&dev->struct_mutex);
4290
4291         if (IS_VALLEYVIEW(dev)) {
4292                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4293                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4294                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4295                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4296         }
4297
4298         i915_gem_init_global_gtt(dev);
4299
4300         ret = i915_gem_init_hw(dev);
4301         mutex_unlock(&dev->struct_mutex);
4302         if (ret) {
4303                 i915_gem_cleanup_aliasing_ppgtt(dev);
4304                 return ret;
4305         }
4306
4307         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4308         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4309                 dev_priv->dri1.allow_batchbuffer = 1;
4310         return 0;
4311 }
4312
4313 void
4314 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4315 {
4316         drm_i915_private_t *dev_priv = dev->dev_private;
4317         struct intel_ring_buffer *ring;
4318         int i;
4319
4320         for_each_ring(ring, dev_priv, i)
4321                 intel_cleanup_ring_buffer(ring);
4322 }
4323
4324 int
4325 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4326                        struct drm_file *file_priv)
4327 {
4328         struct drm_i915_private *dev_priv = dev->dev_private;
4329         int ret;
4330
4331         if (drm_core_check_feature(dev, DRIVER_MODESET))
4332                 return 0;
4333
4334         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4335                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4336                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4337         }
4338
4339         mutex_lock(&dev->struct_mutex);
4340         dev_priv->ums.mm_suspended = 0;
4341
4342         ret = i915_gem_init_hw(dev);
4343         if (ret != 0) {
4344                 mutex_unlock(&dev->struct_mutex);
4345                 return ret;
4346         }
4347
4348         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4349         mutex_unlock(&dev->struct_mutex);
4350
4351         ret = drm_irq_install(dev);
4352         if (ret)
4353                 goto cleanup_ringbuffer;
4354
4355         return 0;
4356
4357 cleanup_ringbuffer:
4358         mutex_lock(&dev->struct_mutex);
4359         i915_gem_cleanup_ringbuffer(dev);
4360         dev_priv->ums.mm_suspended = 1;
4361         mutex_unlock(&dev->struct_mutex);
4362
4363         return ret;
4364 }
4365
4366 int
4367 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4368                        struct drm_file *file_priv)
4369 {
4370         struct drm_i915_private *dev_priv = dev->dev_private;
4371         int ret;
4372
4373         if (drm_core_check_feature(dev, DRIVER_MODESET))
4374                 return 0;
4375
4376         drm_irq_uninstall(dev);
4377
4378         mutex_lock(&dev->struct_mutex);
4379         ret =  i915_gem_idle(dev);
4380
4381         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4382          * We need to replace this with a semaphore, or something.
4383          * And not confound ums.mm_suspended!
4384          */
4385         if (ret != 0)
4386                 dev_priv->ums.mm_suspended = 1;
4387         mutex_unlock(&dev->struct_mutex);
4388
4389         return ret;
4390 }
4391
4392 void
4393 i915_gem_lastclose(struct drm_device *dev)
4394 {
4395         int ret;
4396
4397         if (drm_core_check_feature(dev, DRIVER_MODESET))
4398                 return;
4399
4400         mutex_lock(&dev->struct_mutex);
4401         ret = i915_gem_idle(dev);
4402         if (ret)
4403                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4404         mutex_unlock(&dev->struct_mutex);
4405 }
4406
4407 static void
4408 init_ring_lists(struct intel_ring_buffer *ring)
4409 {
4410         INIT_LIST_HEAD(&ring->active_list);
4411         INIT_LIST_HEAD(&ring->request_list);
4412 }
4413
4414 static void i915_init_vm(struct drm_i915_private *dev_priv,
4415                          struct i915_address_space *vm)
4416 {
4417         vm->dev = dev_priv->dev;
4418         INIT_LIST_HEAD(&vm->active_list);
4419         INIT_LIST_HEAD(&vm->inactive_list);
4420         INIT_LIST_HEAD(&vm->global_link);
4421         list_add(&vm->global_link, &dev_priv->vm_list);
4422 }
4423
4424 void
4425 i915_gem_load(struct drm_device *dev)
4426 {
4427         drm_i915_private_t *dev_priv = dev->dev_private;
4428         int i;
4429
4430         dev_priv->slab =
4431                 kmem_cache_create("i915_gem_object",
4432                                   sizeof(struct drm_i915_gem_object), 0,
4433                                   SLAB_HWCACHE_ALIGN,
4434                                   NULL);
4435
4436         INIT_LIST_HEAD(&dev_priv->vm_list);
4437         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4438
4439         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4440         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4441         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4442         for (i = 0; i < I915_NUM_RINGS; i++)
4443                 init_ring_lists(&dev_priv->ring[i]);
4444         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4445                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4446         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4447                           i915_gem_retire_work_handler);
4448         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4449
4450         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4451         if (IS_GEN3(dev)) {
4452                 I915_WRITE(MI_ARB_STATE,
4453                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4454         }
4455
4456         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4457
4458         /* Old X drivers will take 0-2 for front, back, depth buffers */
4459         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4460                 dev_priv->fence_reg_start = 3;
4461
4462         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4463                 dev_priv->num_fence_regs = 32;
4464         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4465                 dev_priv->num_fence_regs = 16;
4466         else
4467                 dev_priv->num_fence_regs = 8;
4468
4469         /* Initialize fence registers to zero */
4470         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4471         i915_gem_restore_fences(dev);
4472
4473         i915_gem_detect_bit_6_swizzle(dev);
4474         init_waitqueue_head(&dev_priv->pending_flip_queue);
4475
4476         dev_priv->mm.interruptible = true;
4477
4478         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4479         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4480         register_shrinker(&dev_priv->mm.inactive_shrinker);
4481 }
4482
4483 /*
4484  * Create a physically contiguous memory object for this object
4485  * e.g. for cursor + overlay regs
4486  */
4487 static int i915_gem_init_phys_object(struct drm_device *dev,
4488                                      int id, int size, int align)
4489 {
4490         drm_i915_private_t *dev_priv = dev->dev_private;
4491         struct drm_i915_gem_phys_object *phys_obj;
4492         int ret;
4493
4494         if (dev_priv->mm.phys_objs[id - 1] || !size)
4495                 return 0;
4496
4497         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4498         if (!phys_obj)
4499                 return -ENOMEM;
4500
4501         phys_obj->id = id;
4502
4503         phys_obj->handle = drm_pci_alloc(dev, size, align);
4504         if (!phys_obj->handle) {
4505                 ret = -ENOMEM;
4506                 goto kfree_obj;
4507         }
4508 #ifdef CONFIG_X86
4509         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4510 #endif
4511
4512         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4513
4514         return 0;
4515 kfree_obj:
4516         kfree(phys_obj);
4517         return ret;
4518 }
4519
4520 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4521 {
4522         drm_i915_private_t *dev_priv = dev->dev_private;
4523         struct drm_i915_gem_phys_object *phys_obj;
4524
4525         if (!dev_priv->mm.phys_objs[id - 1])
4526                 return;
4527
4528         phys_obj = dev_priv->mm.phys_objs[id - 1];
4529         if (phys_obj->cur_obj) {
4530                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4531         }
4532
4533 #ifdef CONFIG_X86
4534         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4535 #endif
4536         drm_pci_free(dev, phys_obj->handle);
4537         kfree(phys_obj);
4538         dev_priv->mm.phys_objs[id - 1] = NULL;
4539 }
4540
4541 void i915_gem_free_all_phys_object(struct drm_device *dev)
4542 {
4543         int i;
4544
4545         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4546                 i915_gem_free_phys_object(dev, i);
4547 }
4548
4549 void i915_gem_detach_phys_object(struct drm_device *dev,
4550                                  struct drm_i915_gem_object *obj)
4551 {
4552         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4553         char *vaddr;
4554         int i;
4555         int page_count;
4556
4557         if (!obj->phys_obj)
4558                 return;
4559         vaddr = obj->phys_obj->handle->vaddr;
4560
4561         page_count = obj->base.size / PAGE_SIZE;
4562         for (i = 0; i < page_count; i++) {
4563                 struct page *page = shmem_read_mapping_page(mapping, i);
4564                 if (!IS_ERR(page)) {
4565                         char *dst = kmap_atomic(page);
4566                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4567                         kunmap_atomic(dst);
4568
4569                         drm_clflush_pages(&page, 1);
4570
4571                         set_page_dirty(page);
4572                         mark_page_accessed(page);
4573                         page_cache_release(page);
4574                 }
4575         }
4576         i915_gem_chipset_flush(dev);
4577
4578         obj->phys_obj->cur_obj = NULL;
4579         obj->phys_obj = NULL;
4580 }
4581
4582 int
4583 i915_gem_attach_phys_object(struct drm_device *dev,
4584                             struct drm_i915_gem_object *obj,
4585                             int id,
4586                             int align)
4587 {
4588         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4589         drm_i915_private_t *dev_priv = dev->dev_private;
4590         int ret = 0;
4591         int page_count;
4592         int i;
4593
4594         if (id > I915_MAX_PHYS_OBJECT)
4595                 return -EINVAL;
4596
4597         if (obj->phys_obj) {
4598                 if (obj->phys_obj->id == id)
4599                         return 0;
4600                 i915_gem_detach_phys_object(dev, obj);
4601         }
4602
4603         /* create a new object */
4604         if (!dev_priv->mm.phys_objs[id - 1]) {
4605                 ret = i915_gem_init_phys_object(dev, id,
4606                                                 obj->base.size, align);
4607                 if (ret) {
4608                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4609                                   id, obj->base.size);
4610                         return ret;
4611                 }
4612         }
4613
4614         /* bind to the object */
4615         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4616         obj->phys_obj->cur_obj = obj;
4617
4618         page_count = obj->base.size / PAGE_SIZE;
4619
4620         for (i = 0; i < page_count; i++) {
4621                 struct page *page;
4622                 char *dst, *src;
4623
4624                 page = shmem_read_mapping_page(mapping, i);
4625                 if (IS_ERR(page))
4626                         return PTR_ERR(page);
4627
4628                 src = kmap_atomic(page);
4629                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4630                 memcpy(dst, src, PAGE_SIZE);
4631                 kunmap_atomic(src);
4632
4633                 mark_page_accessed(page);
4634                 page_cache_release(page);
4635         }
4636
4637         return 0;
4638 }
4639
4640 static int
4641 i915_gem_phys_pwrite(struct drm_device *dev,
4642                      struct drm_i915_gem_object *obj,
4643                      struct drm_i915_gem_pwrite *args,
4644                      struct drm_file *file_priv)
4645 {
4646         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4647         char __user *user_data = to_user_ptr(args->data_ptr);
4648
4649         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4650                 unsigned long unwritten;
4651
4652                 /* The physical object once assigned is fixed for the lifetime
4653                  * of the obj, so we can safely drop the lock and continue
4654                  * to access vaddr.
4655                  */
4656                 mutex_unlock(&dev->struct_mutex);
4657                 unwritten = copy_from_user(vaddr, user_data, args->size);
4658                 mutex_lock(&dev->struct_mutex);
4659                 if (unwritten)
4660                         return -EFAULT;
4661         }
4662
4663         i915_gem_chipset_flush(dev);
4664         return 0;
4665 }
4666
4667 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4668 {
4669         struct drm_i915_file_private *file_priv = file->driver_priv;
4670
4671         /* Clean up our request list when the client is going away, so that
4672          * later retire_requests won't dereference our soon-to-be-gone
4673          * file_priv.
4674          */
4675         spin_lock(&file_priv->mm.lock);
4676         while (!list_empty(&file_priv->mm.request_list)) {
4677                 struct drm_i915_gem_request *request;
4678
4679                 request = list_first_entry(&file_priv->mm.request_list,
4680                                            struct drm_i915_gem_request,
4681                                            client_list);
4682                 list_del(&request->client_list);
4683                 request->file_priv = NULL;
4684         }
4685         spin_unlock(&file_priv->mm.lock);
4686 }
4687
4688 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4689 {
4690         if (!mutex_is_locked(mutex))
4691                 return false;
4692
4693 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4694         return mutex->owner == task;
4695 #else
4696         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4697         return false;
4698 #endif
4699 }
4700
4701 static int
4702 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4703 {
4704         struct drm_i915_private *dev_priv =
4705                 container_of(shrinker,
4706                              struct drm_i915_private,
4707                              mm.inactive_shrinker);
4708         struct drm_device *dev = dev_priv->dev;
4709         struct drm_i915_gem_object *obj;
4710         int nr_to_scan = sc->nr_to_scan;
4711         bool unlock = true;
4712         int cnt;
4713
4714         if (!mutex_trylock(&dev->struct_mutex)) {
4715                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4716                         return 0;
4717
4718                 if (dev_priv->mm.shrinker_no_lock_stealing)
4719                         return 0;
4720
4721                 unlock = false;
4722         }
4723
4724         if (nr_to_scan) {
4725                 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4726                 if (nr_to_scan > 0)
4727                         nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4728                                                         false);
4729                 if (nr_to_scan > 0)
4730                         i915_gem_shrink_all(dev_priv);
4731         }
4732
4733         cnt = 0;
4734         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4735                 if (obj->pages_pin_count == 0)
4736                         cnt += obj->base.size >> PAGE_SHIFT;
4737
4738         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4739                 if (obj->active)
4740                         continue;
4741
4742                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4743                         cnt += obj->base.size >> PAGE_SHIFT;
4744         }
4745
4746         if (unlock)
4747                 mutex_unlock(&dev->struct_mutex);
4748         return cnt;
4749 }
4750
4751 /* All the new VM stuff */
4752 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4753                                   struct i915_address_space *vm)
4754 {
4755         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4756         struct i915_vma *vma;
4757
4758         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4759                 vm = &dev_priv->gtt.base;
4760
4761         BUG_ON(list_empty(&o->vma_list));
4762         list_for_each_entry(vma, &o->vma_list, vma_link) {
4763                 if (vma->vm == vm)
4764                         return vma->node.start;
4765
4766         }
4767         return -1;
4768 }
4769
4770 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4771                         struct i915_address_space *vm)
4772 {
4773         struct i915_vma *vma;
4774
4775         list_for_each_entry(vma, &o->vma_list, vma_link)
4776                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4777                         return true;
4778
4779         return false;
4780 }
4781
4782 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4783 {
4784         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4785         struct i915_address_space *vm;
4786
4787         list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4788                 if (i915_gem_obj_bound(o, vm))
4789                         return true;
4790
4791         return false;
4792 }
4793
4794 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4795                                 struct i915_address_space *vm)
4796 {
4797         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4798         struct i915_vma *vma;
4799
4800         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4801                 vm = &dev_priv->gtt.base;
4802
4803         BUG_ON(list_empty(&o->vma_list));
4804
4805         list_for_each_entry(vma, &o->vma_list, vma_link)
4806                 if (vma->vm == vm)
4807                         return vma->node.size;
4808
4809         return 0;
4810 }
4811
4812 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4813                                      struct i915_address_space *vm)
4814 {
4815         struct i915_vma *vma;
4816         list_for_each_entry(vma, &obj->vma_list, vma_link)
4817                 if (vma->vm == vm)
4818                         return vma;
4819
4820         return NULL;
4821 }