]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_gem.c
Merge branch 'upstream' into for-linus
[~andy/linux] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43                                                           bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45                                                                   uint64_t offset,
46                                                                   uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49                                                     unsigned alignment,
50                                                     bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52                                      struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54                                 struct drm_i915_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60                                     struct shrink_control *sc);
61 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
62
63 /* some bookkeeping */
64 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65                                   size_t size)
66 {
67         dev_priv->mm.object_count++;
68         dev_priv->mm.object_memory += size;
69 }
70
71 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
72                                      size_t size)
73 {
74         dev_priv->mm.object_count--;
75         dev_priv->mm.object_memory -= size;
76 }
77
78 static int
79 i915_gem_wait_for_error(struct drm_device *dev)
80 {
81         struct drm_i915_private *dev_priv = dev->dev_private;
82         struct completion *x = &dev_priv->error_completion;
83         unsigned long flags;
84         int ret;
85
86         if (!atomic_read(&dev_priv->mm.wedged))
87                 return 0;
88
89         ret = wait_for_completion_interruptible(x);
90         if (ret)
91                 return ret;
92
93         if (atomic_read(&dev_priv->mm.wedged)) {
94                 /* GPU is hung, bump the completion count to account for
95                  * the token we just consumed so that we never hit zero and
96                  * end up waiting upon a subsequent completion event that
97                  * will never happen.
98                  */
99                 spin_lock_irqsave(&x->wait.lock, flags);
100                 x->done++;
101                 spin_unlock_irqrestore(&x->wait.lock, flags);
102         }
103         return 0;
104 }
105
106 int i915_mutex_lock_interruptible(struct drm_device *dev)
107 {
108         int ret;
109
110         ret = i915_gem_wait_for_error(dev);
111         if (ret)
112                 return ret;
113
114         ret = mutex_lock_interruptible(&dev->struct_mutex);
115         if (ret)
116                 return ret;
117
118         WARN_ON(i915_verify_lists(dev));
119         return 0;
120 }
121
122 static inline bool
123 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
124 {
125         return obj->gtt_space && !obj->active && obj->pin_count == 0;
126 }
127
128 void i915_gem_do_init(struct drm_device *dev,
129                       unsigned long start,
130                       unsigned long mappable_end,
131                       unsigned long end)
132 {
133         drm_i915_private_t *dev_priv = dev->dev_private;
134
135         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
136
137         dev_priv->mm.gtt_start = start;
138         dev_priv->mm.gtt_mappable_end = mappable_end;
139         dev_priv->mm.gtt_end = end;
140         dev_priv->mm.gtt_total = end - start;
141         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
142
143         /* Take over this portion of the GTT */
144         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
145 }
146
147 int
148 i915_gem_init_ioctl(struct drm_device *dev, void *data,
149                     struct drm_file *file)
150 {
151         struct drm_i915_gem_init *args = data;
152
153         if (args->gtt_start >= args->gtt_end ||
154             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
155                 return -EINVAL;
156
157         mutex_lock(&dev->struct_mutex);
158         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
159         mutex_unlock(&dev->struct_mutex);
160
161         return 0;
162 }
163
164 int
165 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
166                             struct drm_file *file)
167 {
168         struct drm_i915_private *dev_priv = dev->dev_private;
169         struct drm_i915_gem_get_aperture *args = data;
170         struct drm_i915_gem_object *obj;
171         size_t pinned;
172
173         if (!(dev->driver->driver_features & DRIVER_GEM))
174                 return -ENODEV;
175
176         pinned = 0;
177         mutex_lock(&dev->struct_mutex);
178         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
179                 pinned += obj->gtt_space->size;
180         mutex_unlock(&dev->struct_mutex);
181
182         args->aper_size = dev_priv->mm.gtt_total;
183         args->aper_available_size = args->aper_size - pinned;
184
185         return 0;
186 }
187
188 static int
189 i915_gem_create(struct drm_file *file,
190                 struct drm_device *dev,
191                 uint64_t size,
192                 uint32_t *handle_p)
193 {
194         struct drm_i915_gem_object *obj;
195         int ret;
196         u32 handle;
197
198         size = roundup(size, PAGE_SIZE);
199         if (size == 0)
200                 return -EINVAL;
201
202         /* Allocate the new object */
203         obj = i915_gem_alloc_object(dev, size);
204         if (obj == NULL)
205                 return -ENOMEM;
206
207         ret = drm_gem_handle_create(file, &obj->base, &handle);
208         if (ret) {
209                 drm_gem_object_release(&obj->base);
210                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
211                 kfree(obj);
212                 return ret;
213         }
214
215         /* drop reference from allocate - handle holds it now */
216         drm_gem_object_unreference(&obj->base);
217         trace_i915_gem_object_create(obj);
218
219         *handle_p = handle;
220         return 0;
221 }
222
223 int
224 i915_gem_dumb_create(struct drm_file *file,
225                      struct drm_device *dev,
226                      struct drm_mode_create_dumb *args)
227 {
228         /* have to work out size/pitch and return them */
229         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
230         args->size = args->pitch * args->height;
231         return i915_gem_create(file, dev,
232                                args->size, &args->handle);
233 }
234
235 int i915_gem_dumb_destroy(struct drm_file *file,
236                           struct drm_device *dev,
237                           uint32_t handle)
238 {
239         return drm_gem_handle_delete(file, handle);
240 }
241
242 /**
243  * Creates a new mm object and returns a handle to it.
244  */
245 int
246 i915_gem_create_ioctl(struct drm_device *dev, void *data,
247                       struct drm_file *file)
248 {
249         struct drm_i915_gem_create *args = data;
250         return i915_gem_create(file, dev,
251                                args->size, &args->handle);
252 }
253
254 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
255 {
256         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
257
258         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
259                 obj->tiling_mode != I915_TILING_NONE;
260 }
261
262 /**
263  * This is the fast shmem pread path, which attempts to copy_from_user directly
264  * from the backing pages of the object to the user's address space.  On a
265  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
266  */
267 static int
268 i915_gem_shmem_pread_fast(struct drm_device *dev,
269                           struct drm_i915_gem_object *obj,
270                           struct drm_i915_gem_pread *args,
271                           struct drm_file *file)
272 {
273         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
274         ssize_t remain;
275         loff_t offset;
276         char __user *user_data;
277         int page_offset, page_length;
278
279         user_data = (char __user *) (uintptr_t) args->data_ptr;
280         remain = args->size;
281
282         offset = args->offset;
283
284         while (remain > 0) {
285                 struct page *page;
286                 char *vaddr;
287                 int ret;
288
289                 /* Operation in this page
290                  *
291                  * page_offset = offset within page
292                  * page_length = bytes to copy for this page
293                  */
294                 page_offset = offset_in_page(offset);
295                 page_length = remain;
296                 if ((page_offset + remain) > PAGE_SIZE)
297                         page_length = PAGE_SIZE - page_offset;
298
299                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
300                 if (IS_ERR(page))
301                         return PTR_ERR(page);
302
303                 vaddr = kmap_atomic(page);
304                 ret = __copy_to_user_inatomic(user_data,
305                                               vaddr + page_offset,
306                                               page_length);
307                 kunmap_atomic(vaddr);
308
309                 mark_page_accessed(page);
310                 page_cache_release(page);
311                 if (ret)
312                         return -EFAULT;
313
314                 remain -= page_length;
315                 user_data += page_length;
316                 offset += page_length;
317         }
318
319         return 0;
320 }
321
322 static inline int
323 __copy_to_user_swizzled(char __user *cpu_vaddr,
324                         const char *gpu_vaddr, int gpu_offset,
325                         int length)
326 {
327         int ret, cpu_offset = 0;
328
329         while (length > 0) {
330                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
331                 int this_length = min(cacheline_end - gpu_offset, length);
332                 int swizzled_gpu_offset = gpu_offset ^ 64;
333
334                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
335                                      gpu_vaddr + swizzled_gpu_offset,
336                                      this_length);
337                 if (ret)
338                         return ret + length;
339
340                 cpu_offset += this_length;
341                 gpu_offset += this_length;
342                 length -= this_length;
343         }
344
345         return 0;
346 }
347
348 static inline int
349 __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
350                           const char *cpu_vaddr,
351                           int length)
352 {
353         int ret, cpu_offset = 0;
354
355         while (length > 0) {
356                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
357                 int this_length = min(cacheline_end - gpu_offset, length);
358                 int swizzled_gpu_offset = gpu_offset ^ 64;
359
360                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
361                                        cpu_vaddr + cpu_offset,
362                                        this_length);
363                 if (ret)
364                         return ret + length;
365
366                 cpu_offset += this_length;
367                 gpu_offset += this_length;
368                 length -= this_length;
369         }
370
371         return 0;
372 }
373
374 /**
375  * This is the fallback shmem pread path, which allocates temporary storage
376  * in kernel space to copy_to_user into outside of the struct_mutex, so we
377  * can copy out of the object's backing pages while holding the struct mutex
378  * and not take page faults.
379  */
380 static int
381 i915_gem_shmem_pread_slow(struct drm_device *dev,
382                           struct drm_i915_gem_object *obj,
383                           struct drm_i915_gem_pread *args,
384                           struct drm_file *file)
385 {
386         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
387         char __user *user_data;
388         ssize_t remain;
389         loff_t offset;
390         int shmem_page_offset, page_length, ret;
391         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
392
393         user_data = (char __user *) (uintptr_t) args->data_ptr;
394         remain = args->size;
395
396         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
397
398         offset = args->offset;
399
400         mutex_unlock(&dev->struct_mutex);
401
402         while (remain > 0) {
403                 struct page *page;
404                 char *vaddr;
405
406                 /* Operation in this page
407                  *
408                  * shmem_page_offset = offset within page in shmem file
409                  * page_length = bytes to copy for this page
410                  */
411                 shmem_page_offset = offset_in_page(offset);
412                 page_length = remain;
413                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
414                         page_length = PAGE_SIZE - shmem_page_offset;
415
416                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
417                 if (IS_ERR(page)) {
418                         ret = PTR_ERR(page);
419                         goto out;
420                 }
421
422                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
423                         (page_to_phys(page) & (1 << 17)) != 0;
424
425                 vaddr = kmap(page);
426                 if (page_do_bit17_swizzling)
427                         ret = __copy_to_user_swizzled(user_data,
428                                                       vaddr, shmem_page_offset,
429                                                       page_length);
430                 else
431                         ret = __copy_to_user(user_data,
432                                              vaddr + shmem_page_offset,
433                                              page_length);
434                 kunmap(page);
435
436                 mark_page_accessed(page);
437                 page_cache_release(page);
438
439                 if (ret) {
440                         ret = -EFAULT;
441                         goto out;
442                 }
443
444                 remain -= page_length;
445                 user_data += page_length;
446                 offset += page_length;
447         }
448
449 out:
450         mutex_lock(&dev->struct_mutex);
451         /* Fixup: Kill any reinstated backing storage pages */
452         if (obj->madv == __I915_MADV_PURGED)
453                 i915_gem_object_truncate(obj);
454
455         return ret;
456 }
457
458 /**
459  * Reads data from the object referenced by handle.
460  *
461  * On error, the contents of *data are undefined.
462  */
463 int
464 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
465                      struct drm_file *file)
466 {
467         struct drm_i915_gem_pread *args = data;
468         struct drm_i915_gem_object *obj;
469         int ret = 0;
470
471         if (args->size == 0)
472                 return 0;
473
474         if (!access_ok(VERIFY_WRITE,
475                        (char __user *)(uintptr_t)args->data_ptr,
476                        args->size))
477                 return -EFAULT;
478
479         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
480                                        args->size);
481         if (ret)
482                 return -EFAULT;
483
484         ret = i915_mutex_lock_interruptible(dev);
485         if (ret)
486                 return ret;
487
488         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
489         if (&obj->base == NULL) {
490                 ret = -ENOENT;
491                 goto unlock;
492         }
493
494         /* Bounds check source.  */
495         if (args->offset > obj->base.size ||
496             args->size > obj->base.size - args->offset) {
497                 ret = -EINVAL;
498                 goto out;
499         }
500
501         trace_i915_gem_object_pread(obj, args->offset, args->size);
502
503         ret = i915_gem_object_set_cpu_read_domain_range(obj,
504                                                         args->offset,
505                                                         args->size);
506         if (ret)
507                 goto out;
508
509         ret = -EFAULT;
510         if (!i915_gem_object_needs_bit17_swizzle(obj))
511                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
512         if (ret == -EFAULT)
513                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
514
515 out:
516         drm_gem_object_unreference(&obj->base);
517 unlock:
518         mutex_unlock(&dev->struct_mutex);
519         return ret;
520 }
521
522 /* This is the fast write path which cannot handle
523  * page faults in the source data
524  */
525
526 static inline int
527 fast_user_write(struct io_mapping *mapping,
528                 loff_t page_base, int page_offset,
529                 char __user *user_data,
530                 int length)
531 {
532         char *vaddr_atomic;
533         unsigned long unwritten;
534
535         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
536         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
537                                                       user_data, length);
538         io_mapping_unmap_atomic(vaddr_atomic);
539         return unwritten;
540 }
541
542 /* Here's the write path which can sleep for
543  * page faults
544  */
545
546 static inline void
547 slow_kernel_write(struct io_mapping *mapping,
548                   loff_t gtt_base, int gtt_offset,
549                   struct page *user_page, int user_offset,
550                   int length)
551 {
552         char __iomem *dst_vaddr;
553         char *src_vaddr;
554
555         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
556         src_vaddr = kmap(user_page);
557
558         memcpy_toio(dst_vaddr + gtt_offset,
559                     src_vaddr + user_offset,
560                     length);
561
562         kunmap(user_page);
563         io_mapping_unmap(dst_vaddr);
564 }
565
566 /**
567  * This is the fast pwrite path, where we copy the data directly from the
568  * user into the GTT, uncached.
569  */
570 static int
571 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
572                          struct drm_i915_gem_object *obj,
573                          struct drm_i915_gem_pwrite *args,
574                          struct drm_file *file)
575 {
576         drm_i915_private_t *dev_priv = dev->dev_private;
577         ssize_t remain;
578         loff_t offset, page_base;
579         char __user *user_data;
580         int page_offset, page_length;
581
582         user_data = (char __user *) (uintptr_t) args->data_ptr;
583         remain = args->size;
584
585         offset = obj->gtt_offset + args->offset;
586
587         while (remain > 0) {
588                 /* Operation in this page
589                  *
590                  * page_base = page offset within aperture
591                  * page_offset = offset within page
592                  * page_length = bytes to copy for this page
593                  */
594                 page_base = offset & PAGE_MASK;
595                 page_offset = offset_in_page(offset);
596                 page_length = remain;
597                 if ((page_offset + remain) > PAGE_SIZE)
598                         page_length = PAGE_SIZE - page_offset;
599
600                 /* If we get a fault while copying data, then (presumably) our
601                  * source page isn't available.  Return the error and we'll
602                  * retry in the slow path.
603                  */
604                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
605                                     page_offset, user_data, page_length))
606                         return -EFAULT;
607
608                 remain -= page_length;
609                 user_data += page_length;
610                 offset += page_length;
611         }
612
613         return 0;
614 }
615
616 /**
617  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
618  * the memory and maps it using kmap_atomic for copying.
619  *
620  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
621  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
622  */
623 static int
624 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
625                          struct drm_i915_gem_object *obj,
626                          struct drm_i915_gem_pwrite *args,
627                          struct drm_file *file)
628 {
629         drm_i915_private_t *dev_priv = dev->dev_private;
630         ssize_t remain;
631         loff_t gtt_page_base, offset;
632         loff_t first_data_page, last_data_page, num_pages;
633         loff_t pinned_pages, i;
634         struct page **user_pages;
635         struct mm_struct *mm = current->mm;
636         int gtt_page_offset, data_page_offset, data_page_index, page_length;
637         int ret;
638         uint64_t data_ptr = args->data_ptr;
639
640         remain = args->size;
641
642         /* Pin the user pages containing the data.  We can't fault while
643          * holding the struct mutex, and all of the pwrite implementations
644          * want to hold it while dereferencing the user data.
645          */
646         first_data_page = data_ptr / PAGE_SIZE;
647         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
648         num_pages = last_data_page - first_data_page + 1;
649
650         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
651         if (user_pages == NULL)
652                 return -ENOMEM;
653
654         mutex_unlock(&dev->struct_mutex);
655         down_read(&mm->mmap_sem);
656         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
657                                       num_pages, 0, 0, user_pages, NULL);
658         up_read(&mm->mmap_sem);
659         mutex_lock(&dev->struct_mutex);
660         if (pinned_pages < num_pages) {
661                 ret = -EFAULT;
662                 goto out_unpin_pages;
663         }
664
665         ret = i915_gem_object_set_to_gtt_domain(obj, true);
666         if (ret)
667                 goto out_unpin_pages;
668
669         ret = i915_gem_object_put_fence(obj);
670         if (ret)
671                 goto out_unpin_pages;
672
673         offset = obj->gtt_offset + args->offset;
674
675         while (remain > 0) {
676                 /* Operation in this page
677                  *
678                  * gtt_page_base = page offset within aperture
679                  * gtt_page_offset = offset within page in aperture
680                  * data_page_index = page number in get_user_pages return
681                  * data_page_offset = offset with data_page_index page.
682                  * page_length = bytes to copy for this page
683                  */
684                 gtt_page_base = offset & PAGE_MASK;
685                 gtt_page_offset = offset_in_page(offset);
686                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
687                 data_page_offset = offset_in_page(data_ptr);
688
689                 page_length = remain;
690                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
691                         page_length = PAGE_SIZE - gtt_page_offset;
692                 if ((data_page_offset + page_length) > PAGE_SIZE)
693                         page_length = PAGE_SIZE - data_page_offset;
694
695                 slow_kernel_write(dev_priv->mm.gtt_mapping,
696                                   gtt_page_base, gtt_page_offset,
697                                   user_pages[data_page_index],
698                                   data_page_offset,
699                                   page_length);
700
701                 remain -= page_length;
702                 offset += page_length;
703                 data_ptr += page_length;
704         }
705
706 out_unpin_pages:
707         for (i = 0; i < pinned_pages; i++)
708                 page_cache_release(user_pages[i]);
709         drm_free_large(user_pages);
710
711         return ret;
712 }
713
714 /**
715  * This is the fast shmem pwrite path, which attempts to directly
716  * copy_from_user into the kmapped pages backing the object.
717  */
718 static int
719 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
720                            struct drm_i915_gem_object *obj,
721                            struct drm_i915_gem_pwrite *args,
722                            struct drm_file *file)
723 {
724         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
725         ssize_t remain;
726         loff_t offset;
727         char __user *user_data;
728         int page_offset, page_length;
729
730         user_data = (char __user *) (uintptr_t) args->data_ptr;
731         remain = args->size;
732
733         offset = args->offset;
734         obj->dirty = 1;
735
736         while (remain > 0) {
737                 struct page *page;
738                 char *vaddr;
739                 int ret;
740
741                 /* Operation in this page
742                  *
743                  * page_offset = offset within page
744                  * page_length = bytes to copy for this page
745                  */
746                 page_offset = offset_in_page(offset);
747                 page_length = remain;
748                 if ((page_offset + remain) > PAGE_SIZE)
749                         page_length = PAGE_SIZE - page_offset;
750
751                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
752                 if (IS_ERR(page))
753                         return PTR_ERR(page);
754
755                 vaddr = kmap_atomic(page);
756                 ret = __copy_from_user_inatomic(vaddr + page_offset,
757                                                 user_data,
758                                                 page_length);
759                 kunmap_atomic(vaddr);
760
761                 set_page_dirty(page);
762                 mark_page_accessed(page);
763                 page_cache_release(page);
764
765                 /* If we get a fault while copying data, then (presumably) our
766                  * source page isn't available.  Return the error and we'll
767                  * retry in the slow path.
768                  */
769                 if (ret)
770                         return -EFAULT;
771
772                 remain -= page_length;
773                 user_data += page_length;
774                 offset += page_length;
775         }
776
777         return 0;
778 }
779
780 /**
781  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
782  * the memory and maps it using kmap_atomic for copying.
783  *
784  * This avoids taking mmap_sem for faulting on the user's address while the
785  * struct_mutex is held.
786  */
787 static int
788 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
789                            struct drm_i915_gem_object *obj,
790                            struct drm_i915_gem_pwrite *args,
791                            struct drm_file *file)
792 {
793         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
794         ssize_t remain;
795         loff_t offset;
796         char __user *user_data;
797         int shmem_page_offset, page_length, ret;
798         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
799
800         user_data = (char __user *) (uintptr_t) args->data_ptr;
801         remain = args->size;
802
803         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
804
805         offset = args->offset;
806         obj->dirty = 1;
807
808         mutex_unlock(&dev->struct_mutex);
809
810         while (remain > 0) {
811                 struct page *page;
812                 char *vaddr;
813
814                 /* Operation in this page
815                  *
816                  * shmem_page_offset = offset within page in shmem file
817                  * page_length = bytes to copy for this page
818                  */
819                 shmem_page_offset = offset_in_page(offset);
820
821                 page_length = remain;
822                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
823                         page_length = PAGE_SIZE - shmem_page_offset;
824
825                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
826                 if (IS_ERR(page)) {
827                         ret = PTR_ERR(page);
828                         goto out;
829                 }
830
831                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
832                         (page_to_phys(page) & (1 << 17)) != 0;
833
834                 vaddr = kmap(page);
835                 if (page_do_bit17_swizzling)
836                         ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
837                                                         user_data,
838                                                         page_length);
839                 else
840                         ret = __copy_from_user(vaddr + shmem_page_offset,
841                                                user_data,
842                                                page_length);
843                 kunmap(page);
844
845                 set_page_dirty(page);
846                 mark_page_accessed(page);
847                 page_cache_release(page);
848
849                 if (ret) {
850                         ret = -EFAULT;
851                         goto out;
852                 }
853
854                 remain -= page_length;
855                 user_data += page_length;
856                 offset += page_length;
857         }
858
859 out:
860         mutex_lock(&dev->struct_mutex);
861         /* Fixup: Kill any reinstated backing storage pages */
862         if (obj->madv == __I915_MADV_PURGED)
863                 i915_gem_object_truncate(obj);
864         /* and flush dirty cachelines in case the object isn't in the cpu write
865          * domain anymore. */
866         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
867                 i915_gem_clflush_object(obj);
868                 intel_gtt_chipset_flush();
869         }
870
871         return ret;
872 }
873
874 /**
875  * Writes data to the object referenced by handle.
876  *
877  * On error, the contents of the buffer that were to be modified are undefined.
878  */
879 int
880 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
881                       struct drm_file *file)
882 {
883         struct drm_i915_gem_pwrite *args = data;
884         struct drm_i915_gem_object *obj;
885         int ret;
886
887         if (args->size == 0)
888                 return 0;
889
890         if (!access_ok(VERIFY_READ,
891                        (char __user *)(uintptr_t)args->data_ptr,
892                        args->size))
893                 return -EFAULT;
894
895         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
896                                       args->size);
897         if (ret)
898                 return -EFAULT;
899
900         ret = i915_mutex_lock_interruptible(dev);
901         if (ret)
902                 return ret;
903
904         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
905         if (&obj->base == NULL) {
906                 ret = -ENOENT;
907                 goto unlock;
908         }
909
910         /* Bounds check destination. */
911         if (args->offset > obj->base.size ||
912             args->size > obj->base.size - args->offset) {
913                 ret = -EINVAL;
914                 goto out;
915         }
916
917         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
918
919         /* We can only do the GTT pwrite on untiled buffers, as otherwise
920          * it would end up going through the fenced access, and we'll get
921          * different detiling behavior between reading and writing.
922          * pread/pwrite currently are reading and writing from the CPU
923          * perspective, requiring manual detiling by the client.
924          */
925         if (obj->phys_obj) {
926                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
927                 goto out;
928         }
929
930         if (obj->gtt_space &&
931             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
932                 ret = i915_gem_object_pin(obj, 0, true);
933                 if (ret)
934                         goto out;
935
936                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
937                 if (ret)
938                         goto out_unpin;
939
940                 ret = i915_gem_object_put_fence(obj);
941                 if (ret)
942                         goto out_unpin;
943
944                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
945                 if (ret == -EFAULT)
946                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
947
948 out_unpin:
949                 i915_gem_object_unpin(obj);
950
951                 if (ret != -EFAULT)
952                         goto out;
953                 /* Fall through to the shmfs paths because the gtt paths might
954                  * fail with non-page-backed user pointers (e.g. gtt mappings
955                  * when moving data between textures). */
956         }
957
958         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
959         if (ret)
960                 goto out;
961
962         ret = -EFAULT;
963         if (!i915_gem_object_needs_bit17_swizzle(obj))
964                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
965         if (ret == -EFAULT)
966                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
967
968 out:
969         drm_gem_object_unreference(&obj->base);
970 unlock:
971         mutex_unlock(&dev->struct_mutex);
972         return ret;
973 }
974
975 /**
976  * Called when user space prepares to use an object with the CPU, either
977  * through the mmap ioctl's mapping or a GTT mapping.
978  */
979 int
980 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
981                           struct drm_file *file)
982 {
983         struct drm_i915_gem_set_domain *args = data;
984         struct drm_i915_gem_object *obj;
985         uint32_t read_domains = args->read_domains;
986         uint32_t write_domain = args->write_domain;
987         int ret;
988
989         if (!(dev->driver->driver_features & DRIVER_GEM))
990                 return -ENODEV;
991
992         /* Only handle setting domains to types used by the CPU. */
993         if (write_domain & I915_GEM_GPU_DOMAINS)
994                 return -EINVAL;
995
996         if (read_domains & I915_GEM_GPU_DOMAINS)
997                 return -EINVAL;
998
999         /* Having something in the write domain implies it's in the read
1000          * domain, and only that read domain.  Enforce that in the request.
1001          */
1002         if (write_domain != 0 && read_domains != write_domain)
1003                 return -EINVAL;
1004
1005         ret = i915_mutex_lock_interruptible(dev);
1006         if (ret)
1007                 return ret;
1008
1009         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1010         if (&obj->base == NULL) {
1011                 ret = -ENOENT;
1012                 goto unlock;
1013         }
1014
1015         if (read_domains & I915_GEM_DOMAIN_GTT) {
1016                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1017
1018                 /* Silently promote "you're not bound, there was nothing to do"
1019                  * to success, since the client was just asking us to
1020                  * make sure everything was done.
1021                  */
1022                 if (ret == -EINVAL)
1023                         ret = 0;
1024         } else {
1025                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1026         }
1027
1028         drm_gem_object_unreference(&obj->base);
1029 unlock:
1030         mutex_unlock(&dev->struct_mutex);
1031         return ret;
1032 }
1033
1034 /**
1035  * Called when user space has done writes to this buffer
1036  */
1037 int
1038 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1039                          struct drm_file *file)
1040 {
1041         struct drm_i915_gem_sw_finish *args = data;
1042         struct drm_i915_gem_object *obj;
1043         int ret = 0;
1044
1045         if (!(dev->driver->driver_features & DRIVER_GEM))
1046                 return -ENODEV;
1047
1048         ret = i915_mutex_lock_interruptible(dev);
1049         if (ret)
1050                 return ret;
1051
1052         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1053         if (&obj->base == NULL) {
1054                 ret = -ENOENT;
1055                 goto unlock;
1056         }
1057
1058         /* Pinned buffers may be scanout, so flush the cache */
1059         if (obj->pin_count)
1060                 i915_gem_object_flush_cpu_write_domain(obj);
1061
1062         drm_gem_object_unreference(&obj->base);
1063 unlock:
1064         mutex_unlock(&dev->struct_mutex);
1065         return ret;
1066 }
1067
1068 /**
1069  * Maps the contents of an object, returning the address it is mapped
1070  * into.
1071  *
1072  * While the mapping holds a reference on the contents of the object, it doesn't
1073  * imply a ref on the object itself.
1074  */
1075 int
1076 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077                     struct drm_file *file)
1078 {
1079         struct drm_i915_gem_mmap *args = data;
1080         struct drm_gem_object *obj;
1081         unsigned long addr;
1082
1083         if (!(dev->driver->driver_features & DRIVER_GEM))
1084                 return -ENODEV;
1085
1086         obj = drm_gem_object_lookup(dev, file, args->handle);
1087         if (obj == NULL)
1088                 return -ENOENT;
1089
1090         down_write(&current->mm->mmap_sem);
1091         addr = do_mmap(obj->filp, 0, args->size,
1092                        PROT_READ | PROT_WRITE, MAP_SHARED,
1093                        args->offset);
1094         up_write(&current->mm->mmap_sem);
1095         drm_gem_object_unreference_unlocked(obj);
1096         if (IS_ERR((void *)addr))
1097                 return addr;
1098
1099         args->addr_ptr = (uint64_t) addr;
1100
1101         return 0;
1102 }
1103
1104 /**
1105  * i915_gem_fault - fault a page into the GTT
1106  * vma: VMA in question
1107  * vmf: fault info
1108  *
1109  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1110  * from userspace.  The fault handler takes care of binding the object to
1111  * the GTT (if needed), allocating and programming a fence register (again,
1112  * only if needed based on whether the old reg is still valid or the object
1113  * is tiled) and inserting a new PTE into the faulting process.
1114  *
1115  * Note that the faulting process may involve evicting existing objects
1116  * from the GTT and/or fence registers to make room.  So performance may
1117  * suffer if the GTT working set is large or there are few fence registers
1118  * left.
1119  */
1120 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1121 {
1122         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1123         struct drm_device *dev = obj->base.dev;
1124         drm_i915_private_t *dev_priv = dev->dev_private;
1125         pgoff_t page_offset;
1126         unsigned long pfn;
1127         int ret = 0;
1128         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1129
1130         /* We don't use vmf->pgoff since that has the fake offset */
1131         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1132                 PAGE_SHIFT;
1133
1134         ret = i915_mutex_lock_interruptible(dev);
1135         if (ret)
1136                 goto out;
1137
1138         trace_i915_gem_object_fault(obj, page_offset, true, write);
1139
1140         /* Now bind it into the GTT if needed */
1141         if (!obj->map_and_fenceable) {
1142                 ret = i915_gem_object_unbind(obj);
1143                 if (ret)
1144                         goto unlock;
1145         }
1146         if (!obj->gtt_space) {
1147                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1148                 if (ret)
1149                         goto unlock;
1150
1151                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1152                 if (ret)
1153                         goto unlock;
1154         }
1155
1156         if (obj->tiling_mode == I915_TILING_NONE)
1157                 ret = i915_gem_object_put_fence(obj);
1158         else
1159                 ret = i915_gem_object_get_fence(obj, NULL);
1160         if (ret)
1161                 goto unlock;
1162
1163         if (i915_gem_object_is_inactive(obj))
1164                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1165
1166         obj->fault_mappable = true;
1167
1168         pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1169                 page_offset;
1170
1171         /* Finally, remap it using the new GTT offset */
1172         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1173 unlock:
1174         mutex_unlock(&dev->struct_mutex);
1175 out:
1176         switch (ret) {
1177         case -EIO:
1178         case -EAGAIN:
1179                 /* Give the error handler a chance to run and move the
1180                  * objects off the GPU active list. Next time we service the
1181                  * fault, we should be able to transition the page into the
1182                  * GTT without touching the GPU (and so avoid further
1183                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1184                  * with coherency, just lost writes.
1185                  */
1186                 set_need_resched();
1187         case 0:
1188         case -ERESTARTSYS:
1189         case -EINTR:
1190                 return VM_FAULT_NOPAGE;
1191         case -ENOMEM:
1192                 return VM_FAULT_OOM;
1193         default:
1194                 return VM_FAULT_SIGBUS;
1195         }
1196 }
1197
1198 /**
1199  * i915_gem_release_mmap - remove physical page mappings
1200  * @obj: obj in question
1201  *
1202  * Preserve the reservation of the mmapping with the DRM core code, but
1203  * relinquish ownership of the pages back to the system.
1204  *
1205  * It is vital that we remove the page mapping if we have mapped a tiled
1206  * object through the GTT and then lose the fence register due to
1207  * resource pressure. Similarly if the object has been moved out of the
1208  * aperture, than pages mapped into userspace must be revoked. Removing the
1209  * mapping will then trigger a page fault on the next user access, allowing
1210  * fixup by i915_gem_fault().
1211  */
1212 void
1213 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1214 {
1215         if (!obj->fault_mappable)
1216                 return;
1217
1218         if (obj->base.dev->dev_mapping)
1219                 unmap_mapping_range(obj->base.dev->dev_mapping,
1220                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1221                                     obj->base.size, 1);
1222
1223         obj->fault_mappable = false;
1224 }
1225
1226 static uint32_t
1227 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1228 {
1229         uint32_t gtt_size;
1230
1231         if (INTEL_INFO(dev)->gen >= 4 ||
1232             tiling_mode == I915_TILING_NONE)
1233                 return size;
1234
1235         /* Previous chips need a power-of-two fence region when tiling */
1236         if (INTEL_INFO(dev)->gen == 3)
1237                 gtt_size = 1024*1024;
1238         else
1239                 gtt_size = 512*1024;
1240
1241         while (gtt_size < size)
1242                 gtt_size <<= 1;
1243
1244         return gtt_size;
1245 }
1246
1247 /**
1248  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1249  * @obj: object to check
1250  *
1251  * Return the required GTT alignment for an object, taking into account
1252  * potential fence register mapping.
1253  */
1254 static uint32_t
1255 i915_gem_get_gtt_alignment(struct drm_device *dev,
1256                            uint32_t size,
1257                            int tiling_mode)
1258 {
1259         /*
1260          * Minimum alignment is 4k (GTT page size), but might be greater
1261          * if a fence register is needed for the object.
1262          */
1263         if (INTEL_INFO(dev)->gen >= 4 ||
1264             tiling_mode == I915_TILING_NONE)
1265                 return 4096;
1266
1267         /*
1268          * Previous chips need to be aligned to the size of the smallest
1269          * fence register that can contain the object.
1270          */
1271         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1272 }
1273
1274 /**
1275  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1276  *                                       unfenced object
1277  * @dev: the device
1278  * @size: size of the object
1279  * @tiling_mode: tiling mode of the object
1280  *
1281  * Return the required GTT alignment for an object, only taking into account
1282  * unfenced tiled surface requirements.
1283  */
1284 uint32_t
1285 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1286                                     uint32_t size,
1287                                     int tiling_mode)
1288 {
1289         /*
1290          * Minimum alignment is 4k (GTT page size) for sane hw.
1291          */
1292         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1293             tiling_mode == I915_TILING_NONE)
1294                 return 4096;
1295
1296         /* Previous hardware however needs to be aligned to a power-of-two
1297          * tile height. The simplest method for determining this is to reuse
1298          * the power-of-tile object size.
1299          */
1300         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1301 }
1302
1303 int
1304 i915_gem_mmap_gtt(struct drm_file *file,
1305                   struct drm_device *dev,
1306                   uint32_t handle,
1307                   uint64_t *offset)
1308 {
1309         struct drm_i915_private *dev_priv = dev->dev_private;
1310         struct drm_i915_gem_object *obj;
1311         int ret;
1312
1313         if (!(dev->driver->driver_features & DRIVER_GEM))
1314                 return -ENODEV;
1315
1316         ret = i915_mutex_lock_interruptible(dev);
1317         if (ret)
1318                 return ret;
1319
1320         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1321         if (&obj->base == NULL) {
1322                 ret = -ENOENT;
1323                 goto unlock;
1324         }
1325
1326         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1327                 ret = -E2BIG;
1328                 goto out;
1329         }
1330
1331         if (obj->madv != I915_MADV_WILLNEED) {
1332                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1333                 ret = -EINVAL;
1334                 goto out;
1335         }
1336
1337         if (!obj->base.map_list.map) {
1338                 ret = drm_gem_create_mmap_offset(&obj->base);
1339                 if (ret)
1340                         goto out;
1341         }
1342
1343         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1344
1345 out:
1346         drm_gem_object_unreference(&obj->base);
1347 unlock:
1348         mutex_unlock(&dev->struct_mutex);
1349         return ret;
1350 }
1351
1352 /**
1353  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1354  * @dev: DRM device
1355  * @data: GTT mapping ioctl data
1356  * @file: GEM object info
1357  *
1358  * Simply returns the fake offset to userspace so it can mmap it.
1359  * The mmap call will end up in drm_gem_mmap(), which will set things
1360  * up so we can get faults in the handler above.
1361  *
1362  * The fault handler will take care of binding the object into the GTT
1363  * (since it may have been evicted to make room for something), allocating
1364  * a fence register, and mapping the appropriate aperture address into
1365  * userspace.
1366  */
1367 int
1368 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1369                         struct drm_file *file)
1370 {
1371         struct drm_i915_gem_mmap_gtt *args = data;
1372
1373         if (!(dev->driver->driver_features & DRIVER_GEM))
1374                 return -ENODEV;
1375
1376         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1377 }
1378
1379
1380 static int
1381 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1382                               gfp_t gfpmask)
1383 {
1384         int page_count, i;
1385         struct address_space *mapping;
1386         struct inode *inode;
1387         struct page *page;
1388
1389         /* Get the list of pages out of our struct file.  They'll be pinned
1390          * at this point until we release them.
1391          */
1392         page_count = obj->base.size / PAGE_SIZE;
1393         BUG_ON(obj->pages != NULL);
1394         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1395         if (obj->pages == NULL)
1396                 return -ENOMEM;
1397
1398         inode = obj->base.filp->f_path.dentry->d_inode;
1399         mapping = inode->i_mapping;
1400         gfpmask |= mapping_gfp_mask(mapping);
1401
1402         for (i = 0; i < page_count; i++) {
1403                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1404                 if (IS_ERR(page))
1405                         goto err_pages;
1406
1407                 obj->pages[i] = page;
1408         }
1409
1410         if (i915_gem_object_needs_bit17_swizzle(obj))
1411                 i915_gem_object_do_bit_17_swizzle(obj);
1412
1413         return 0;
1414
1415 err_pages:
1416         while (i--)
1417                 page_cache_release(obj->pages[i]);
1418
1419         drm_free_large(obj->pages);
1420         obj->pages = NULL;
1421         return PTR_ERR(page);
1422 }
1423
1424 static void
1425 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1426 {
1427         int page_count = obj->base.size / PAGE_SIZE;
1428         int i;
1429
1430         BUG_ON(obj->madv == __I915_MADV_PURGED);
1431
1432         if (i915_gem_object_needs_bit17_swizzle(obj))
1433                 i915_gem_object_save_bit_17_swizzle(obj);
1434
1435         if (obj->madv == I915_MADV_DONTNEED)
1436                 obj->dirty = 0;
1437
1438         for (i = 0; i < page_count; i++) {
1439                 if (obj->dirty)
1440                         set_page_dirty(obj->pages[i]);
1441
1442                 if (obj->madv == I915_MADV_WILLNEED)
1443                         mark_page_accessed(obj->pages[i]);
1444
1445                 page_cache_release(obj->pages[i]);
1446         }
1447         obj->dirty = 0;
1448
1449         drm_free_large(obj->pages);
1450         obj->pages = NULL;
1451 }
1452
1453 void
1454 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1455                                struct intel_ring_buffer *ring,
1456                                u32 seqno)
1457 {
1458         struct drm_device *dev = obj->base.dev;
1459         struct drm_i915_private *dev_priv = dev->dev_private;
1460
1461         BUG_ON(ring == NULL);
1462         obj->ring = ring;
1463
1464         /* Add a reference if we're newly entering the active list. */
1465         if (!obj->active) {
1466                 drm_gem_object_reference(&obj->base);
1467                 obj->active = 1;
1468         }
1469
1470         /* Move from whatever list we were on to the tail of execution. */
1471         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1472         list_move_tail(&obj->ring_list, &ring->active_list);
1473
1474         obj->last_rendering_seqno = seqno;
1475
1476         if (obj->fenced_gpu_access) {
1477                 obj->last_fenced_seqno = seqno;
1478                 obj->last_fenced_ring = ring;
1479
1480                 /* Bump MRU to take account of the delayed flush */
1481                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1482                         struct drm_i915_fence_reg *reg;
1483
1484                         reg = &dev_priv->fence_regs[obj->fence_reg];
1485                         list_move_tail(&reg->lru_list,
1486                                        &dev_priv->mm.fence_list);
1487                 }
1488         }
1489 }
1490
1491 static void
1492 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1493 {
1494         list_del_init(&obj->ring_list);
1495         obj->last_rendering_seqno = 0;
1496         obj->last_fenced_seqno = 0;
1497 }
1498
1499 static void
1500 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1501 {
1502         struct drm_device *dev = obj->base.dev;
1503         drm_i915_private_t *dev_priv = dev->dev_private;
1504
1505         BUG_ON(!obj->active);
1506         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1507
1508         i915_gem_object_move_off_active(obj);
1509 }
1510
1511 static void
1512 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1513 {
1514         struct drm_device *dev = obj->base.dev;
1515         struct drm_i915_private *dev_priv = dev->dev_private;
1516
1517         if (obj->pin_count != 0)
1518                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1519         else
1520                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1521
1522         BUG_ON(!list_empty(&obj->gpu_write_list));
1523         BUG_ON(!obj->active);
1524         obj->ring = NULL;
1525         obj->last_fenced_ring = NULL;
1526
1527         i915_gem_object_move_off_active(obj);
1528         obj->fenced_gpu_access = false;
1529
1530         obj->active = 0;
1531         obj->pending_gpu_write = false;
1532         drm_gem_object_unreference(&obj->base);
1533
1534         WARN_ON(i915_verify_lists(dev));
1535 }
1536
1537 /* Immediately discard the backing storage */
1538 static void
1539 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1540 {
1541         struct inode *inode;
1542
1543         /* Our goal here is to return as much of the memory as
1544          * is possible back to the system as we are called from OOM.
1545          * To do this we must instruct the shmfs to drop all of its
1546          * backing pages, *now*.
1547          */
1548         inode = obj->base.filp->f_path.dentry->d_inode;
1549         shmem_truncate_range(inode, 0, (loff_t)-1);
1550
1551         obj->madv = __I915_MADV_PURGED;
1552 }
1553
1554 static inline int
1555 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1556 {
1557         return obj->madv == I915_MADV_DONTNEED;
1558 }
1559
1560 static void
1561 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1562                                uint32_t flush_domains)
1563 {
1564         struct drm_i915_gem_object *obj, *next;
1565
1566         list_for_each_entry_safe(obj, next,
1567                                  &ring->gpu_write_list,
1568                                  gpu_write_list) {
1569                 if (obj->base.write_domain & flush_domains) {
1570                         uint32_t old_write_domain = obj->base.write_domain;
1571
1572                         obj->base.write_domain = 0;
1573                         list_del_init(&obj->gpu_write_list);
1574                         i915_gem_object_move_to_active(obj, ring,
1575                                                        i915_gem_next_request_seqno(ring));
1576
1577                         trace_i915_gem_object_change_domain(obj,
1578                                                             obj->base.read_domains,
1579                                                             old_write_domain);
1580                 }
1581         }
1582 }
1583
1584 static u32
1585 i915_gem_get_seqno(struct drm_device *dev)
1586 {
1587         drm_i915_private_t *dev_priv = dev->dev_private;
1588         u32 seqno = dev_priv->next_seqno;
1589
1590         /* reserve 0 for non-seqno */
1591         if (++dev_priv->next_seqno == 0)
1592                 dev_priv->next_seqno = 1;
1593
1594         return seqno;
1595 }
1596
1597 u32
1598 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1599 {
1600         if (ring->outstanding_lazy_request == 0)
1601                 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1602
1603         return ring->outstanding_lazy_request;
1604 }
1605
1606 int
1607 i915_add_request(struct intel_ring_buffer *ring,
1608                  struct drm_file *file,
1609                  struct drm_i915_gem_request *request)
1610 {
1611         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1612         uint32_t seqno;
1613         u32 request_ring_position;
1614         int was_empty;
1615         int ret;
1616
1617         BUG_ON(request == NULL);
1618         seqno = i915_gem_next_request_seqno(ring);
1619
1620         /* Record the position of the start of the request so that
1621          * should we detect the updated seqno part-way through the
1622          * GPU processing the request, we never over-estimate the
1623          * position of the head.
1624          */
1625         request_ring_position = intel_ring_get_tail(ring);
1626
1627         ret = ring->add_request(ring, &seqno);
1628         if (ret)
1629             return ret;
1630
1631         trace_i915_gem_request_add(ring, seqno);
1632
1633         request->seqno = seqno;
1634         request->ring = ring;
1635         request->tail = request_ring_position;
1636         request->emitted_jiffies = jiffies;
1637         was_empty = list_empty(&ring->request_list);
1638         list_add_tail(&request->list, &ring->request_list);
1639
1640         if (file) {
1641                 struct drm_i915_file_private *file_priv = file->driver_priv;
1642
1643                 spin_lock(&file_priv->mm.lock);
1644                 request->file_priv = file_priv;
1645                 list_add_tail(&request->client_list,
1646                               &file_priv->mm.request_list);
1647                 spin_unlock(&file_priv->mm.lock);
1648         }
1649
1650         ring->outstanding_lazy_request = 0;
1651
1652         if (!dev_priv->mm.suspended) {
1653                 if (i915_enable_hangcheck) {
1654                         mod_timer(&dev_priv->hangcheck_timer,
1655                                   jiffies +
1656                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1657                 }
1658                 if (was_empty)
1659                         queue_delayed_work(dev_priv->wq,
1660                                            &dev_priv->mm.retire_work, HZ);
1661         }
1662         return 0;
1663 }
1664
1665 static inline void
1666 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1667 {
1668         struct drm_i915_file_private *file_priv = request->file_priv;
1669
1670         if (!file_priv)
1671                 return;
1672
1673         spin_lock(&file_priv->mm.lock);
1674         if (request->file_priv) {
1675                 list_del(&request->client_list);
1676                 request->file_priv = NULL;
1677         }
1678         spin_unlock(&file_priv->mm.lock);
1679 }
1680
1681 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1682                                       struct intel_ring_buffer *ring)
1683 {
1684         while (!list_empty(&ring->request_list)) {
1685                 struct drm_i915_gem_request *request;
1686
1687                 request = list_first_entry(&ring->request_list,
1688                                            struct drm_i915_gem_request,
1689                                            list);
1690
1691                 list_del(&request->list);
1692                 i915_gem_request_remove_from_client(request);
1693                 kfree(request);
1694         }
1695
1696         while (!list_empty(&ring->active_list)) {
1697                 struct drm_i915_gem_object *obj;
1698
1699                 obj = list_first_entry(&ring->active_list,
1700                                        struct drm_i915_gem_object,
1701                                        ring_list);
1702
1703                 obj->base.write_domain = 0;
1704                 list_del_init(&obj->gpu_write_list);
1705                 i915_gem_object_move_to_inactive(obj);
1706         }
1707 }
1708
1709 static void i915_gem_reset_fences(struct drm_device *dev)
1710 {
1711         struct drm_i915_private *dev_priv = dev->dev_private;
1712         int i;
1713
1714         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1715                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1716                 struct drm_i915_gem_object *obj = reg->obj;
1717
1718                 if (!obj)
1719                         continue;
1720
1721                 if (obj->tiling_mode)
1722                         i915_gem_release_mmap(obj);
1723
1724                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1725                 reg->obj->fenced_gpu_access = false;
1726                 reg->obj->last_fenced_seqno = 0;
1727                 reg->obj->last_fenced_ring = NULL;
1728                 i915_gem_clear_fence_reg(dev, reg);
1729         }
1730 }
1731
1732 void i915_gem_reset(struct drm_device *dev)
1733 {
1734         struct drm_i915_private *dev_priv = dev->dev_private;
1735         struct drm_i915_gem_object *obj;
1736         int i;
1737
1738         for (i = 0; i < I915_NUM_RINGS; i++)
1739                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1740
1741         /* Remove anything from the flushing lists. The GPU cache is likely
1742          * to be lost on reset along with the data, so simply move the
1743          * lost bo to the inactive list.
1744          */
1745         while (!list_empty(&dev_priv->mm.flushing_list)) {
1746                 obj = list_first_entry(&dev_priv->mm.flushing_list,
1747                                       struct drm_i915_gem_object,
1748                                       mm_list);
1749
1750                 obj->base.write_domain = 0;
1751                 list_del_init(&obj->gpu_write_list);
1752                 i915_gem_object_move_to_inactive(obj);
1753         }
1754
1755         /* Move everything out of the GPU domains to ensure we do any
1756          * necessary invalidation upon reuse.
1757          */
1758         list_for_each_entry(obj,
1759                             &dev_priv->mm.inactive_list,
1760                             mm_list)
1761         {
1762                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1763         }
1764
1765         /* The fence registers are invalidated so clear them out */
1766         i915_gem_reset_fences(dev);
1767 }
1768
1769 /**
1770  * This function clears the request list as sequence numbers are passed.
1771  */
1772 void
1773 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1774 {
1775         uint32_t seqno;
1776         int i;
1777
1778         if (list_empty(&ring->request_list))
1779                 return;
1780
1781         WARN_ON(i915_verify_lists(ring->dev));
1782
1783         seqno = ring->get_seqno(ring);
1784
1785         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1786                 if (seqno >= ring->sync_seqno[i])
1787                         ring->sync_seqno[i] = 0;
1788
1789         while (!list_empty(&ring->request_list)) {
1790                 struct drm_i915_gem_request *request;
1791
1792                 request = list_first_entry(&ring->request_list,
1793                                            struct drm_i915_gem_request,
1794                                            list);
1795
1796                 if (!i915_seqno_passed(seqno, request->seqno))
1797                         break;
1798
1799                 trace_i915_gem_request_retire(ring, request->seqno);
1800                 /* We know the GPU must have read the request to have
1801                  * sent us the seqno + interrupt, so use the position
1802                  * of tail of the request to update the last known position
1803                  * of the GPU head.
1804                  */
1805                 ring->last_retired_head = request->tail;
1806
1807                 list_del(&request->list);
1808                 i915_gem_request_remove_from_client(request);
1809                 kfree(request);
1810         }
1811
1812         /* Move any buffers on the active list that are no longer referenced
1813          * by the ringbuffer to the flushing/inactive lists as appropriate.
1814          */
1815         while (!list_empty(&ring->active_list)) {
1816                 struct drm_i915_gem_object *obj;
1817
1818                 obj = list_first_entry(&ring->active_list,
1819                                       struct drm_i915_gem_object,
1820                                       ring_list);
1821
1822                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1823                         break;
1824
1825                 if (obj->base.write_domain != 0)
1826                         i915_gem_object_move_to_flushing(obj);
1827                 else
1828                         i915_gem_object_move_to_inactive(obj);
1829         }
1830
1831         if (unlikely(ring->trace_irq_seqno &&
1832                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1833                 ring->irq_put(ring);
1834                 ring->trace_irq_seqno = 0;
1835         }
1836
1837         WARN_ON(i915_verify_lists(ring->dev));
1838 }
1839
1840 void
1841 i915_gem_retire_requests(struct drm_device *dev)
1842 {
1843         drm_i915_private_t *dev_priv = dev->dev_private;
1844         int i;
1845
1846         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1847             struct drm_i915_gem_object *obj, *next;
1848
1849             /* We must be careful that during unbind() we do not
1850              * accidentally infinitely recurse into retire requests.
1851              * Currently:
1852              *   retire -> free -> unbind -> wait -> retire_ring
1853              */
1854             list_for_each_entry_safe(obj, next,
1855                                      &dev_priv->mm.deferred_free_list,
1856                                      mm_list)
1857                     i915_gem_free_object_tail(obj);
1858         }
1859
1860         for (i = 0; i < I915_NUM_RINGS; i++)
1861                 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1862 }
1863
1864 static void
1865 i915_gem_retire_work_handler(struct work_struct *work)
1866 {
1867         drm_i915_private_t *dev_priv;
1868         struct drm_device *dev;
1869         bool idle;
1870         int i;
1871
1872         dev_priv = container_of(work, drm_i915_private_t,
1873                                 mm.retire_work.work);
1874         dev = dev_priv->dev;
1875
1876         /* Come back later if the device is busy... */
1877         if (!mutex_trylock(&dev->struct_mutex)) {
1878                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1879                 return;
1880         }
1881
1882         i915_gem_retire_requests(dev);
1883
1884         /* Send a periodic flush down the ring so we don't hold onto GEM
1885          * objects indefinitely.
1886          */
1887         idle = true;
1888         for (i = 0; i < I915_NUM_RINGS; i++) {
1889                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1890
1891                 if (!list_empty(&ring->gpu_write_list)) {
1892                         struct drm_i915_gem_request *request;
1893                         int ret;
1894
1895                         ret = i915_gem_flush_ring(ring,
1896                                                   0, I915_GEM_GPU_DOMAINS);
1897                         request = kzalloc(sizeof(*request), GFP_KERNEL);
1898                         if (ret || request == NULL ||
1899                             i915_add_request(ring, NULL, request))
1900                             kfree(request);
1901                 }
1902
1903                 idle &= list_empty(&ring->request_list);
1904         }
1905
1906         if (!dev_priv->mm.suspended && !idle)
1907                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1908
1909         mutex_unlock(&dev->struct_mutex);
1910 }
1911
1912 /**
1913  * Waits for a sequence number to be signaled, and cleans up the
1914  * request and object lists appropriately for that event.
1915  */
1916 int
1917 i915_wait_request(struct intel_ring_buffer *ring,
1918                   uint32_t seqno,
1919                   bool do_retire)
1920 {
1921         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1922         u32 ier;
1923         int ret = 0;
1924
1925         BUG_ON(seqno == 0);
1926
1927         if (atomic_read(&dev_priv->mm.wedged)) {
1928                 struct completion *x = &dev_priv->error_completion;
1929                 bool recovery_complete;
1930                 unsigned long flags;
1931
1932                 /* Give the error handler a chance to run. */
1933                 spin_lock_irqsave(&x->wait.lock, flags);
1934                 recovery_complete = x->done > 0;
1935                 spin_unlock_irqrestore(&x->wait.lock, flags);
1936
1937                 return recovery_complete ? -EIO : -EAGAIN;
1938         }
1939
1940         if (seqno == ring->outstanding_lazy_request) {
1941                 struct drm_i915_gem_request *request;
1942
1943                 request = kzalloc(sizeof(*request), GFP_KERNEL);
1944                 if (request == NULL)
1945                         return -ENOMEM;
1946
1947                 ret = i915_add_request(ring, NULL, request);
1948                 if (ret) {
1949                         kfree(request);
1950                         return ret;
1951                 }
1952
1953                 seqno = request->seqno;
1954         }
1955
1956         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1957                 if (HAS_PCH_SPLIT(ring->dev))
1958                         ier = I915_READ(DEIER) | I915_READ(GTIER);
1959                 else
1960                         ier = I915_READ(IER);
1961                 if (!ier) {
1962                         DRM_ERROR("something (likely vbetool) disabled "
1963                                   "interrupts, re-enabling\n");
1964                         ring->dev->driver->irq_preinstall(ring->dev);
1965                         ring->dev->driver->irq_postinstall(ring->dev);
1966                 }
1967
1968                 trace_i915_gem_request_wait_begin(ring, seqno);
1969
1970                 ring->waiting_seqno = seqno;
1971                 if (ring->irq_get(ring)) {
1972                         if (dev_priv->mm.interruptible)
1973                                 ret = wait_event_interruptible(ring->irq_queue,
1974                                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
1975                                                                || atomic_read(&dev_priv->mm.wedged));
1976                         else
1977                                 wait_event(ring->irq_queue,
1978                                            i915_seqno_passed(ring->get_seqno(ring), seqno)
1979                                            || atomic_read(&dev_priv->mm.wedged));
1980
1981                         ring->irq_put(ring);
1982                 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1983                                                              seqno) ||
1984                                            atomic_read(&dev_priv->mm.wedged), 3000))
1985                         ret = -EBUSY;
1986                 ring->waiting_seqno = 0;
1987
1988                 trace_i915_gem_request_wait_end(ring, seqno);
1989         }
1990         if (atomic_read(&dev_priv->mm.wedged))
1991                 ret = -EAGAIN;
1992
1993         /* Directly dispatch request retiring.  While we have the work queue
1994          * to handle this, the waiter on a request often wants an associated
1995          * buffer to have made it to the inactive list, and we would need
1996          * a separate wait queue to handle that.
1997          */
1998         if (ret == 0 && do_retire)
1999                 i915_gem_retire_requests_ring(ring);
2000
2001         return ret;
2002 }
2003
2004 /**
2005  * Ensures that all rendering to the object has completed and the object is
2006  * safe to unbind from the GTT or access from the CPU.
2007  */
2008 int
2009 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2010 {
2011         int ret;
2012
2013         /* This function only exists to support waiting for existing rendering,
2014          * not for emitting required flushes.
2015          */
2016         BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2017
2018         /* If there is rendering queued on the buffer being evicted, wait for
2019          * it.
2020          */
2021         if (obj->active) {
2022                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
2023                                         true);
2024                 if (ret)
2025                         return ret;
2026         }
2027
2028         return 0;
2029 }
2030
2031 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2032 {
2033         u32 old_write_domain, old_read_domains;
2034
2035         /* Act a barrier for all accesses through the GTT */
2036         mb();
2037
2038         /* Force a pagefault for domain tracking on next user access */
2039         i915_gem_release_mmap(obj);
2040
2041         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2042                 return;
2043
2044         old_read_domains = obj->base.read_domains;
2045         old_write_domain = obj->base.write_domain;
2046
2047         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2048         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2049
2050         trace_i915_gem_object_change_domain(obj,
2051                                             old_read_domains,
2052                                             old_write_domain);
2053 }
2054
2055 /**
2056  * Unbinds an object from the GTT aperture.
2057  */
2058 int
2059 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2060 {
2061         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2062         int ret = 0;
2063
2064         if (obj->gtt_space == NULL)
2065                 return 0;
2066
2067         if (obj->pin_count != 0) {
2068                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2069                 return -EINVAL;
2070         }
2071
2072         ret = i915_gem_object_finish_gpu(obj);
2073         if (ret == -ERESTARTSYS)
2074                 return ret;
2075         /* Continue on if we fail due to EIO, the GPU is hung so we
2076          * should be safe and we need to cleanup or else we might
2077          * cause memory corruption through use-after-free.
2078          */
2079
2080         i915_gem_object_finish_gtt(obj);
2081
2082         /* Move the object to the CPU domain to ensure that
2083          * any possible CPU writes while it's not in the GTT
2084          * are flushed when we go to remap it.
2085          */
2086         if (ret == 0)
2087                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2088         if (ret == -ERESTARTSYS)
2089                 return ret;
2090         if (ret) {
2091                 /* In the event of a disaster, abandon all caches and
2092                  * hope for the best.
2093                  */
2094                 i915_gem_clflush_object(obj);
2095                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2096         }
2097
2098         /* release the fence reg _after_ flushing */
2099         ret = i915_gem_object_put_fence(obj);
2100         if (ret == -ERESTARTSYS)
2101                 return ret;
2102
2103         trace_i915_gem_object_unbind(obj);
2104
2105         i915_gem_gtt_unbind_object(obj);
2106         if (obj->has_aliasing_ppgtt_mapping) {
2107                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2108                 obj->has_aliasing_ppgtt_mapping = 0;
2109         }
2110
2111         i915_gem_object_put_pages_gtt(obj);
2112
2113         list_del_init(&obj->gtt_list);
2114         list_del_init(&obj->mm_list);
2115         /* Avoid an unnecessary call to unbind on rebind. */
2116         obj->map_and_fenceable = true;
2117
2118         drm_mm_put_block(obj->gtt_space);
2119         obj->gtt_space = NULL;
2120         obj->gtt_offset = 0;
2121
2122         if (i915_gem_object_is_purgeable(obj))
2123                 i915_gem_object_truncate(obj);
2124
2125         return ret;
2126 }
2127
2128 int
2129 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2130                     uint32_t invalidate_domains,
2131                     uint32_t flush_domains)
2132 {
2133         int ret;
2134
2135         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2136                 return 0;
2137
2138         trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2139
2140         ret = ring->flush(ring, invalidate_domains, flush_domains);
2141         if (ret)
2142                 return ret;
2143
2144         if (flush_domains & I915_GEM_GPU_DOMAINS)
2145                 i915_gem_process_flushing_list(ring, flush_domains);
2146
2147         return 0;
2148 }
2149
2150 static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
2151 {
2152         int ret;
2153
2154         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2155                 return 0;
2156
2157         if (!list_empty(&ring->gpu_write_list)) {
2158                 ret = i915_gem_flush_ring(ring,
2159                                     I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2160                 if (ret)
2161                         return ret;
2162         }
2163
2164         return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2165                                  do_retire);
2166 }
2167
2168 int i915_gpu_idle(struct drm_device *dev, bool do_retire)
2169 {
2170         drm_i915_private_t *dev_priv = dev->dev_private;
2171         int ret, i;
2172
2173         /* Flush everything onto the inactive list. */
2174         for (i = 0; i < I915_NUM_RINGS; i++) {
2175                 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
2176                 if (ret)
2177                         return ret;
2178         }
2179
2180         return 0;
2181 }
2182
2183 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2184                                        struct intel_ring_buffer *pipelined)
2185 {
2186         struct drm_device *dev = obj->base.dev;
2187         drm_i915_private_t *dev_priv = dev->dev_private;
2188         u32 size = obj->gtt_space->size;
2189         int regnum = obj->fence_reg;
2190         uint64_t val;
2191
2192         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2193                          0xfffff000) << 32;
2194         val |= obj->gtt_offset & 0xfffff000;
2195         val |= (uint64_t)((obj->stride / 128) - 1) <<
2196                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2197
2198         if (obj->tiling_mode == I915_TILING_Y)
2199                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2200         val |= I965_FENCE_REG_VALID;
2201
2202         if (pipelined) {
2203                 int ret = intel_ring_begin(pipelined, 6);
2204                 if (ret)
2205                         return ret;
2206
2207                 intel_ring_emit(pipelined, MI_NOOP);
2208                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2209                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2210                 intel_ring_emit(pipelined, (u32)val);
2211                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2212                 intel_ring_emit(pipelined, (u32)(val >> 32));
2213                 intel_ring_advance(pipelined);
2214         } else
2215                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2216
2217         return 0;
2218 }
2219
2220 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2221                                 struct intel_ring_buffer *pipelined)
2222 {
2223         struct drm_device *dev = obj->base.dev;
2224         drm_i915_private_t *dev_priv = dev->dev_private;
2225         u32 size = obj->gtt_space->size;
2226         int regnum = obj->fence_reg;
2227         uint64_t val;
2228
2229         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2230                     0xfffff000) << 32;
2231         val |= obj->gtt_offset & 0xfffff000;
2232         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2233         if (obj->tiling_mode == I915_TILING_Y)
2234                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2235         val |= I965_FENCE_REG_VALID;
2236
2237         if (pipelined) {
2238                 int ret = intel_ring_begin(pipelined, 6);
2239                 if (ret)
2240                         return ret;
2241
2242                 intel_ring_emit(pipelined, MI_NOOP);
2243                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2244                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2245                 intel_ring_emit(pipelined, (u32)val);
2246                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2247                 intel_ring_emit(pipelined, (u32)(val >> 32));
2248                 intel_ring_advance(pipelined);
2249         } else
2250                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2251
2252         return 0;
2253 }
2254
2255 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2256                                 struct intel_ring_buffer *pipelined)
2257 {
2258         struct drm_device *dev = obj->base.dev;
2259         drm_i915_private_t *dev_priv = dev->dev_private;
2260         u32 size = obj->gtt_space->size;
2261         u32 fence_reg, val, pitch_val;
2262         int tile_width;
2263
2264         if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2265                  (size & -size) != size ||
2266                  (obj->gtt_offset & (size - 1)),
2267                  "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2268                  obj->gtt_offset, obj->map_and_fenceable, size))
2269                 return -EINVAL;
2270
2271         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2272                 tile_width = 128;
2273         else
2274                 tile_width = 512;
2275
2276         /* Note: pitch better be a power of two tile widths */
2277         pitch_val = obj->stride / tile_width;
2278         pitch_val = ffs(pitch_val) - 1;
2279
2280         val = obj->gtt_offset;
2281         if (obj->tiling_mode == I915_TILING_Y)
2282                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2283         val |= I915_FENCE_SIZE_BITS(size);
2284         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2285         val |= I830_FENCE_REG_VALID;
2286
2287         fence_reg = obj->fence_reg;
2288         if (fence_reg < 8)
2289                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2290         else
2291                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2292
2293         if (pipelined) {
2294                 int ret = intel_ring_begin(pipelined, 4);
2295                 if (ret)
2296                         return ret;
2297
2298                 intel_ring_emit(pipelined, MI_NOOP);
2299                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2300                 intel_ring_emit(pipelined, fence_reg);
2301                 intel_ring_emit(pipelined, val);
2302                 intel_ring_advance(pipelined);
2303         } else
2304                 I915_WRITE(fence_reg, val);
2305
2306         return 0;
2307 }
2308
2309 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2310                                 struct intel_ring_buffer *pipelined)
2311 {
2312         struct drm_device *dev = obj->base.dev;
2313         drm_i915_private_t *dev_priv = dev->dev_private;
2314         u32 size = obj->gtt_space->size;
2315         int regnum = obj->fence_reg;
2316         uint32_t val;
2317         uint32_t pitch_val;
2318
2319         if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2320                  (size & -size) != size ||
2321                  (obj->gtt_offset & (size - 1)),
2322                  "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2323                  obj->gtt_offset, size))
2324                 return -EINVAL;
2325
2326         pitch_val = obj->stride / 128;
2327         pitch_val = ffs(pitch_val) - 1;
2328
2329         val = obj->gtt_offset;
2330         if (obj->tiling_mode == I915_TILING_Y)
2331                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2332         val |= I830_FENCE_SIZE_BITS(size);
2333         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2334         val |= I830_FENCE_REG_VALID;
2335
2336         if (pipelined) {
2337                 int ret = intel_ring_begin(pipelined, 4);
2338                 if (ret)
2339                         return ret;
2340
2341                 intel_ring_emit(pipelined, MI_NOOP);
2342                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2343                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2344                 intel_ring_emit(pipelined, val);
2345                 intel_ring_advance(pipelined);
2346         } else
2347                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2348
2349         return 0;
2350 }
2351
2352 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2353 {
2354         return i915_seqno_passed(ring->get_seqno(ring), seqno);
2355 }
2356
2357 static int
2358 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2359                             struct intel_ring_buffer *pipelined)
2360 {
2361         int ret;
2362
2363         if (obj->fenced_gpu_access) {
2364                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2365                         ret = i915_gem_flush_ring(obj->last_fenced_ring,
2366                                                   0, obj->base.write_domain);
2367                         if (ret)
2368                                 return ret;
2369                 }
2370
2371                 obj->fenced_gpu_access = false;
2372         }
2373
2374         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2375                 if (!ring_passed_seqno(obj->last_fenced_ring,
2376                                        obj->last_fenced_seqno)) {
2377                         ret = i915_wait_request(obj->last_fenced_ring,
2378                                                 obj->last_fenced_seqno,
2379                                                 true);
2380                         if (ret)
2381                                 return ret;
2382                 }
2383
2384                 obj->last_fenced_seqno = 0;
2385                 obj->last_fenced_ring = NULL;
2386         }
2387
2388         /* Ensure that all CPU reads are completed before installing a fence
2389          * and all writes before removing the fence.
2390          */
2391         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2392                 mb();
2393
2394         return 0;
2395 }
2396
2397 int
2398 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2399 {
2400         int ret;
2401
2402         if (obj->tiling_mode)
2403                 i915_gem_release_mmap(obj);
2404
2405         ret = i915_gem_object_flush_fence(obj, NULL);
2406         if (ret)
2407                 return ret;
2408
2409         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2410                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2411
2412                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
2413                 i915_gem_clear_fence_reg(obj->base.dev,
2414                                          &dev_priv->fence_regs[obj->fence_reg]);
2415
2416                 obj->fence_reg = I915_FENCE_REG_NONE;
2417         }
2418
2419         return 0;
2420 }
2421
2422 static struct drm_i915_fence_reg *
2423 i915_find_fence_reg(struct drm_device *dev,
2424                     struct intel_ring_buffer *pipelined)
2425 {
2426         struct drm_i915_private *dev_priv = dev->dev_private;
2427         struct drm_i915_fence_reg *reg, *first, *avail;
2428         int i;
2429
2430         /* First try to find a free reg */
2431         avail = NULL;
2432         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2433                 reg = &dev_priv->fence_regs[i];
2434                 if (!reg->obj)
2435                         return reg;
2436
2437                 if (!reg->pin_count)
2438                         avail = reg;
2439         }
2440
2441         if (avail == NULL)
2442                 return NULL;
2443
2444         /* None available, try to steal one or wait for a user to finish */
2445         avail = first = NULL;
2446         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2447                 if (reg->pin_count)
2448                         continue;
2449
2450                 if (first == NULL)
2451                         first = reg;
2452
2453                 if (!pipelined ||
2454                     !reg->obj->last_fenced_ring ||
2455                     reg->obj->last_fenced_ring == pipelined) {
2456                         avail = reg;
2457                         break;
2458                 }
2459         }
2460
2461         if (avail == NULL)
2462                 avail = first;
2463
2464         return avail;
2465 }
2466
2467 /**
2468  * i915_gem_object_get_fence - set up a fence reg for an object
2469  * @obj: object to map through a fence reg
2470  * @pipelined: ring on which to queue the change, or NULL for CPU access
2471  * @interruptible: must we wait uninterruptibly for the register to retire?
2472  *
2473  * When mapping objects through the GTT, userspace wants to be able to write
2474  * to them without having to worry about swizzling if the object is tiled.
2475  *
2476  * This function walks the fence regs looking for a free one for @obj,
2477  * stealing one if it can't find any.
2478  *
2479  * It then sets up the reg based on the object's properties: address, pitch
2480  * and tiling format.
2481  */
2482 int
2483 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2484                           struct intel_ring_buffer *pipelined)
2485 {
2486         struct drm_device *dev = obj->base.dev;
2487         struct drm_i915_private *dev_priv = dev->dev_private;
2488         struct drm_i915_fence_reg *reg;
2489         int ret;
2490
2491         /* XXX disable pipelining. There are bugs. Shocking. */
2492         pipelined = NULL;
2493
2494         /* Just update our place in the LRU if our fence is getting reused. */
2495         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2496                 reg = &dev_priv->fence_regs[obj->fence_reg];
2497                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2498
2499                 if (obj->tiling_changed) {
2500                         ret = i915_gem_object_flush_fence(obj, pipelined);
2501                         if (ret)
2502                                 return ret;
2503
2504                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2505                                 pipelined = NULL;
2506
2507                         if (pipelined) {
2508                                 reg->setup_seqno =
2509                                         i915_gem_next_request_seqno(pipelined);
2510                                 obj->last_fenced_seqno = reg->setup_seqno;
2511                                 obj->last_fenced_ring = pipelined;
2512                         }
2513
2514                         goto update;
2515                 }
2516
2517                 if (!pipelined) {
2518                         if (reg->setup_seqno) {
2519                                 if (!ring_passed_seqno(obj->last_fenced_ring,
2520                                                        reg->setup_seqno)) {
2521                                         ret = i915_wait_request(obj->last_fenced_ring,
2522                                                                 reg->setup_seqno,
2523                                                                 true);
2524                                         if (ret)
2525                                                 return ret;
2526                                 }
2527
2528                                 reg->setup_seqno = 0;
2529                         }
2530                 } else if (obj->last_fenced_ring &&
2531                            obj->last_fenced_ring != pipelined) {
2532                         ret = i915_gem_object_flush_fence(obj, pipelined);
2533                         if (ret)
2534                                 return ret;
2535                 }
2536
2537                 return 0;
2538         }
2539
2540         reg = i915_find_fence_reg(dev, pipelined);
2541         if (reg == NULL)
2542                 return -EDEADLK;
2543
2544         ret = i915_gem_object_flush_fence(obj, pipelined);
2545         if (ret)
2546                 return ret;
2547
2548         if (reg->obj) {
2549                 struct drm_i915_gem_object *old = reg->obj;
2550
2551                 drm_gem_object_reference(&old->base);
2552
2553                 if (old->tiling_mode)
2554                         i915_gem_release_mmap(old);
2555
2556                 ret = i915_gem_object_flush_fence(old, pipelined);
2557                 if (ret) {
2558                         drm_gem_object_unreference(&old->base);
2559                         return ret;
2560                 }
2561
2562                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2563                         pipelined = NULL;
2564
2565                 old->fence_reg = I915_FENCE_REG_NONE;
2566                 old->last_fenced_ring = pipelined;
2567                 old->last_fenced_seqno =
2568                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2569
2570                 drm_gem_object_unreference(&old->base);
2571         } else if (obj->last_fenced_seqno == 0)
2572                 pipelined = NULL;
2573
2574         reg->obj = obj;
2575         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2576         obj->fence_reg = reg - dev_priv->fence_regs;
2577         obj->last_fenced_ring = pipelined;
2578
2579         reg->setup_seqno =
2580                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2581         obj->last_fenced_seqno = reg->setup_seqno;
2582
2583 update:
2584         obj->tiling_changed = false;
2585         switch (INTEL_INFO(dev)->gen) {
2586         case 7:
2587         case 6:
2588                 ret = sandybridge_write_fence_reg(obj, pipelined);
2589                 break;
2590         case 5:
2591         case 4:
2592                 ret = i965_write_fence_reg(obj, pipelined);
2593                 break;
2594         case 3:
2595                 ret = i915_write_fence_reg(obj, pipelined);
2596                 break;
2597         case 2:
2598                 ret = i830_write_fence_reg(obj, pipelined);
2599                 break;
2600         }
2601
2602         return ret;
2603 }
2604
2605 /**
2606  * i915_gem_clear_fence_reg - clear out fence register info
2607  * @obj: object to clear
2608  *
2609  * Zeroes out the fence register itself and clears out the associated
2610  * data structures in dev_priv and obj.
2611  */
2612 static void
2613 i915_gem_clear_fence_reg(struct drm_device *dev,
2614                          struct drm_i915_fence_reg *reg)
2615 {
2616         drm_i915_private_t *dev_priv = dev->dev_private;
2617         uint32_t fence_reg = reg - dev_priv->fence_regs;
2618
2619         switch (INTEL_INFO(dev)->gen) {
2620         case 7:
2621         case 6:
2622                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2623                 break;
2624         case 5:
2625         case 4:
2626                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2627                 break;
2628         case 3:
2629                 if (fence_reg >= 8)
2630                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2631                 else
2632         case 2:
2633                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2634
2635                 I915_WRITE(fence_reg, 0);
2636                 break;
2637         }
2638
2639         list_del_init(&reg->lru_list);
2640         reg->obj = NULL;
2641         reg->setup_seqno = 0;
2642         reg->pin_count = 0;
2643 }
2644
2645 /**
2646  * Finds free space in the GTT aperture and binds the object there.
2647  */
2648 static int
2649 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2650                             unsigned alignment,
2651                             bool map_and_fenceable)
2652 {
2653         struct drm_device *dev = obj->base.dev;
2654         drm_i915_private_t *dev_priv = dev->dev_private;
2655         struct drm_mm_node *free_space;
2656         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2657         u32 size, fence_size, fence_alignment, unfenced_alignment;
2658         bool mappable, fenceable;
2659         int ret;
2660
2661         if (obj->madv != I915_MADV_WILLNEED) {
2662                 DRM_ERROR("Attempting to bind a purgeable object\n");
2663                 return -EINVAL;
2664         }
2665
2666         fence_size = i915_gem_get_gtt_size(dev,
2667                                            obj->base.size,
2668                                            obj->tiling_mode);
2669         fence_alignment = i915_gem_get_gtt_alignment(dev,
2670                                                      obj->base.size,
2671                                                      obj->tiling_mode);
2672         unfenced_alignment =
2673                 i915_gem_get_unfenced_gtt_alignment(dev,
2674                                                     obj->base.size,
2675                                                     obj->tiling_mode);
2676
2677         if (alignment == 0)
2678                 alignment = map_and_fenceable ? fence_alignment :
2679                                                 unfenced_alignment;
2680         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2681                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2682                 return -EINVAL;
2683         }
2684
2685         size = map_and_fenceable ? fence_size : obj->base.size;
2686
2687         /* If the object is bigger than the entire aperture, reject it early
2688          * before evicting everything in a vain attempt to find space.
2689          */
2690         if (obj->base.size >
2691             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2692                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2693                 return -E2BIG;
2694         }
2695
2696  search_free:
2697         if (map_and_fenceable)
2698                 free_space =
2699                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2700                                                     size, alignment, 0,
2701                                                     dev_priv->mm.gtt_mappable_end,
2702                                                     0);
2703         else
2704                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2705                                                 size, alignment, 0);
2706
2707         if (free_space != NULL) {
2708                 if (map_and_fenceable)
2709                         obj->gtt_space =
2710                                 drm_mm_get_block_range_generic(free_space,
2711                                                                size, alignment, 0,
2712                                                                dev_priv->mm.gtt_mappable_end,
2713                                                                0);
2714                 else
2715                         obj->gtt_space =
2716                                 drm_mm_get_block(free_space, size, alignment);
2717         }
2718         if (obj->gtt_space == NULL) {
2719                 /* If the gtt is empty and we're still having trouble
2720                  * fitting our object in, we're out of memory.
2721                  */
2722                 ret = i915_gem_evict_something(dev, size, alignment,
2723                                                map_and_fenceable);
2724                 if (ret)
2725                         return ret;
2726
2727                 goto search_free;
2728         }
2729
2730         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2731         if (ret) {
2732                 drm_mm_put_block(obj->gtt_space);
2733                 obj->gtt_space = NULL;
2734
2735                 if (ret == -ENOMEM) {
2736                         /* first try to reclaim some memory by clearing the GTT */
2737                         ret = i915_gem_evict_everything(dev, false);
2738                         if (ret) {
2739                                 /* now try to shrink everyone else */
2740                                 if (gfpmask) {
2741                                         gfpmask = 0;
2742                                         goto search_free;
2743                                 }
2744
2745                                 return -ENOMEM;
2746                         }
2747
2748                         goto search_free;
2749                 }
2750
2751                 return ret;
2752         }
2753
2754         ret = i915_gem_gtt_bind_object(obj);
2755         if (ret) {
2756                 i915_gem_object_put_pages_gtt(obj);
2757                 drm_mm_put_block(obj->gtt_space);
2758                 obj->gtt_space = NULL;
2759
2760                 if (i915_gem_evict_everything(dev, false))
2761                         return ret;
2762
2763                 goto search_free;
2764         }
2765
2766         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2767         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2768
2769         /* Assert that the object is not currently in any GPU domain. As it
2770          * wasn't in the GTT, there shouldn't be any way it could have been in
2771          * a GPU cache
2772          */
2773         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2774         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2775
2776         obj->gtt_offset = obj->gtt_space->start;
2777
2778         fenceable =
2779                 obj->gtt_space->size == fence_size &&
2780                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2781
2782         mappable =
2783                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2784
2785         obj->map_and_fenceable = mappable && fenceable;
2786
2787         trace_i915_gem_object_bind(obj, map_and_fenceable);
2788         return 0;
2789 }
2790
2791 void
2792 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2793 {
2794         /* If we don't have a page list set up, then we're not pinned
2795          * to GPU, and we can ignore the cache flush because it'll happen
2796          * again at bind time.
2797          */
2798         if (obj->pages == NULL)
2799                 return;
2800
2801         /* If the GPU is snooping the contents of the CPU cache,
2802          * we do not need to manually clear the CPU cache lines.  However,
2803          * the caches are only snooped when the render cache is
2804          * flushed/invalidated.  As we always have to emit invalidations
2805          * and flushes when moving into and out of the RENDER domain, correct
2806          * snooping behaviour occurs naturally as the result of our domain
2807          * tracking.
2808          */
2809         if (obj->cache_level != I915_CACHE_NONE)
2810                 return;
2811
2812         trace_i915_gem_object_clflush(obj);
2813
2814         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2815 }
2816
2817 /** Flushes any GPU write domain for the object if it's dirty. */
2818 static int
2819 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2820 {
2821         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2822                 return 0;
2823
2824         /* Queue the GPU write cache flushing we need. */
2825         return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2826 }
2827
2828 /** Flushes the GTT write domain for the object if it's dirty. */
2829 static void
2830 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2831 {
2832         uint32_t old_write_domain;
2833
2834         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2835                 return;
2836
2837         /* No actual flushing is required for the GTT write domain.  Writes
2838          * to it immediately go to main memory as far as we know, so there's
2839          * no chipset flush.  It also doesn't land in render cache.
2840          *
2841          * However, we do have to enforce the order so that all writes through
2842          * the GTT land before any writes to the device, such as updates to
2843          * the GATT itself.
2844          */
2845         wmb();
2846
2847         old_write_domain = obj->base.write_domain;
2848         obj->base.write_domain = 0;
2849
2850         trace_i915_gem_object_change_domain(obj,
2851                                             obj->base.read_domains,
2852                                             old_write_domain);
2853 }
2854
2855 /** Flushes the CPU write domain for the object if it's dirty. */
2856 static void
2857 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2858 {
2859         uint32_t old_write_domain;
2860
2861         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2862                 return;
2863
2864         i915_gem_clflush_object(obj);
2865         intel_gtt_chipset_flush();
2866         old_write_domain = obj->base.write_domain;
2867         obj->base.write_domain = 0;
2868
2869         trace_i915_gem_object_change_domain(obj,
2870                                             obj->base.read_domains,
2871                                             old_write_domain);
2872 }
2873
2874 /**
2875  * Moves a single object to the GTT read, and possibly write domain.
2876  *
2877  * This function returns when the move is complete, including waiting on
2878  * flushes to occur.
2879  */
2880 int
2881 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2882 {
2883         uint32_t old_write_domain, old_read_domains;
2884         int ret;
2885
2886         /* Not valid to be called on unbound objects. */
2887         if (obj->gtt_space == NULL)
2888                 return -EINVAL;
2889
2890         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2891                 return 0;
2892
2893         ret = i915_gem_object_flush_gpu_write_domain(obj);
2894         if (ret)
2895                 return ret;
2896
2897         if (obj->pending_gpu_write || write) {
2898                 ret = i915_gem_object_wait_rendering(obj);
2899                 if (ret)
2900                         return ret;
2901         }
2902
2903         i915_gem_object_flush_cpu_write_domain(obj);
2904
2905         old_write_domain = obj->base.write_domain;
2906         old_read_domains = obj->base.read_domains;
2907
2908         /* It should now be out of any other write domains, and we can update
2909          * the domain values for our changes.
2910          */
2911         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2912         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2913         if (write) {
2914                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2915                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2916                 obj->dirty = 1;
2917         }
2918
2919         trace_i915_gem_object_change_domain(obj,
2920                                             old_read_domains,
2921                                             old_write_domain);
2922
2923         return 0;
2924 }
2925
2926 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2927                                     enum i915_cache_level cache_level)
2928 {
2929         struct drm_device *dev = obj->base.dev;
2930         drm_i915_private_t *dev_priv = dev->dev_private;
2931         int ret;
2932
2933         if (obj->cache_level == cache_level)
2934                 return 0;
2935
2936         if (obj->pin_count) {
2937                 DRM_DEBUG("can not change the cache level of pinned objects\n");
2938                 return -EBUSY;
2939         }
2940
2941         if (obj->gtt_space) {
2942                 ret = i915_gem_object_finish_gpu(obj);
2943                 if (ret)
2944                         return ret;
2945
2946                 i915_gem_object_finish_gtt(obj);
2947
2948                 /* Before SandyBridge, you could not use tiling or fence
2949                  * registers with snooped memory, so relinquish any fences
2950                  * currently pointing to our region in the aperture.
2951                  */
2952                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2953                         ret = i915_gem_object_put_fence(obj);
2954                         if (ret)
2955                                 return ret;
2956                 }
2957
2958                 i915_gem_gtt_rebind_object(obj, cache_level);
2959                 if (obj->has_aliasing_ppgtt_mapping)
2960                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2961                                                obj, cache_level);
2962         }
2963
2964         if (cache_level == I915_CACHE_NONE) {
2965                 u32 old_read_domains, old_write_domain;
2966
2967                 /* If we're coming from LLC cached, then we haven't
2968                  * actually been tracking whether the data is in the
2969                  * CPU cache or not, since we only allow one bit set
2970                  * in obj->write_domain and have been skipping the clflushes.
2971                  * Just set it to the CPU cache for now.
2972                  */
2973                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2974                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2975
2976                 old_read_domains = obj->base.read_domains;
2977                 old_write_domain = obj->base.write_domain;
2978
2979                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2980                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2981
2982                 trace_i915_gem_object_change_domain(obj,
2983                                                     old_read_domains,
2984                                                     old_write_domain);
2985         }
2986
2987         obj->cache_level = cache_level;
2988         return 0;
2989 }
2990
2991 /*
2992  * Prepare buffer for display plane (scanout, cursors, etc).
2993  * Can be called from an uninterruptible phase (modesetting) and allows
2994  * any flushes to be pipelined (for pageflips).
2995  *
2996  * For the display plane, we want to be in the GTT but out of any write
2997  * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2998  * ability to pipeline the waits, pinning and any additional subtleties
2999  * that may differentiate the display plane from ordinary buffers.
3000  */
3001 int
3002 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3003                                      u32 alignment,
3004                                      struct intel_ring_buffer *pipelined)
3005 {
3006         u32 old_read_domains, old_write_domain;
3007         int ret;
3008
3009         ret = i915_gem_object_flush_gpu_write_domain(obj);
3010         if (ret)
3011                 return ret;
3012
3013         if (pipelined != obj->ring) {
3014                 ret = i915_gem_object_wait_rendering(obj);
3015                 if (ret == -ERESTARTSYS)
3016                         return ret;
3017         }
3018
3019         /* The display engine is not coherent with the LLC cache on gen6.  As
3020          * a result, we make sure that the pinning that is about to occur is
3021          * done with uncached PTEs. This is lowest common denominator for all
3022          * chipsets.
3023          *
3024          * However for gen6+, we could do better by using the GFDT bit instead
3025          * of uncaching, which would allow us to flush all the LLC-cached data
3026          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3027          */
3028         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3029         if (ret)
3030                 return ret;
3031
3032         /* As the user may map the buffer once pinned in the display plane
3033          * (e.g. libkms for the bootup splash), we have to ensure that we
3034          * always use map_and_fenceable for all scanout buffers.
3035          */
3036         ret = i915_gem_object_pin(obj, alignment, true);
3037         if (ret)
3038                 return ret;
3039
3040         i915_gem_object_flush_cpu_write_domain(obj);
3041
3042         old_write_domain = obj->base.write_domain;
3043         old_read_domains = obj->base.read_domains;
3044
3045         /* It should now be out of any other write domains, and we can update
3046          * the domain values for our changes.
3047          */
3048         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3049         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3050
3051         trace_i915_gem_object_change_domain(obj,
3052                                             old_read_domains,
3053                                             old_write_domain);
3054
3055         return 0;
3056 }
3057
3058 int
3059 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3060 {
3061         int ret;
3062
3063         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3064                 return 0;
3065
3066         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3067                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3068                 if (ret)
3069                         return ret;
3070         }
3071
3072         ret = i915_gem_object_wait_rendering(obj);
3073         if (ret)
3074                 return ret;
3075
3076         /* Ensure that we invalidate the GPU's caches and TLBs. */
3077         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3078         return 0;
3079 }
3080
3081 /**
3082  * Moves a single object to the CPU read, and possibly write domain.
3083  *
3084  * This function returns when the move is complete, including waiting on
3085  * flushes to occur.
3086  */
3087 static int
3088 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3089 {
3090         uint32_t old_write_domain, old_read_domains;
3091         int ret;
3092
3093         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3094                 return 0;
3095
3096         ret = i915_gem_object_flush_gpu_write_domain(obj);
3097         if (ret)
3098                 return ret;
3099
3100         ret = i915_gem_object_wait_rendering(obj);
3101         if (ret)
3102                 return ret;
3103
3104         i915_gem_object_flush_gtt_write_domain(obj);
3105
3106         /* If we have a partially-valid cache of the object in the CPU,
3107          * finish invalidating it and free the per-page flags.
3108          */
3109         i915_gem_object_set_to_full_cpu_read_domain(obj);
3110
3111         old_write_domain = obj->base.write_domain;
3112         old_read_domains = obj->base.read_domains;
3113
3114         /* Flush the CPU cache if it's still invalid. */
3115         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3116                 i915_gem_clflush_object(obj);
3117
3118                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3119         }
3120
3121         /* It should now be out of any other write domains, and we can update
3122          * the domain values for our changes.
3123          */
3124         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3125
3126         /* If we're writing through the CPU, then the GPU read domains will
3127          * need to be invalidated at next use.
3128          */
3129         if (write) {
3130                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3131                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3132         }
3133
3134         trace_i915_gem_object_change_domain(obj,
3135                                             old_read_domains,
3136                                             old_write_domain);
3137
3138         return 0;
3139 }
3140
3141 /**
3142  * Moves the object from a partially CPU read to a full one.
3143  *
3144  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3145  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3146  */
3147 static void
3148 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3149 {
3150         if (!obj->page_cpu_valid)
3151                 return;
3152
3153         /* If we're partially in the CPU read domain, finish moving it in.
3154          */
3155         if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3156                 int i;
3157
3158                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3159                         if (obj->page_cpu_valid[i])
3160                                 continue;
3161                         drm_clflush_pages(obj->pages + i, 1);
3162                 }
3163         }
3164
3165         /* Free the page_cpu_valid mappings which are now stale, whether
3166          * or not we've got I915_GEM_DOMAIN_CPU.
3167          */
3168         kfree(obj->page_cpu_valid);
3169         obj->page_cpu_valid = NULL;
3170 }
3171
3172 /**
3173  * Set the CPU read domain on a range of the object.
3174  *
3175  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3176  * not entirely valid.  The page_cpu_valid member of the object flags which
3177  * pages have been flushed, and will be respected by
3178  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3179  * of the whole object.
3180  *
3181  * This function returns when the move is complete, including waiting on
3182  * flushes to occur.
3183  */
3184 static int
3185 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3186                                           uint64_t offset, uint64_t size)
3187 {
3188         uint32_t old_read_domains;
3189         int i, ret;
3190
3191         if (offset == 0 && size == obj->base.size)
3192                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3193
3194         ret = i915_gem_object_flush_gpu_write_domain(obj);
3195         if (ret)
3196                 return ret;
3197
3198         ret = i915_gem_object_wait_rendering(obj);
3199         if (ret)
3200                 return ret;
3201
3202         i915_gem_object_flush_gtt_write_domain(obj);
3203
3204         /* If we're already fully in the CPU read domain, we're done. */
3205         if (obj->page_cpu_valid == NULL &&
3206             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3207                 return 0;
3208
3209         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3210          * newly adding I915_GEM_DOMAIN_CPU
3211          */
3212         if (obj->page_cpu_valid == NULL) {
3213                 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3214                                               GFP_KERNEL);
3215                 if (obj->page_cpu_valid == NULL)
3216                         return -ENOMEM;
3217         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3218                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3219
3220         /* Flush the cache on any pages that are still invalid from the CPU's
3221          * perspective.
3222          */
3223         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3224              i++) {
3225                 if (obj->page_cpu_valid[i])
3226                         continue;
3227
3228                 drm_clflush_pages(obj->pages + i, 1);
3229
3230                 obj->page_cpu_valid[i] = 1;
3231         }
3232
3233         /* It should now be out of any other write domains, and we can update
3234          * the domain values for our changes.
3235          */
3236         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3237
3238         old_read_domains = obj->base.read_domains;
3239         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3240
3241         trace_i915_gem_object_change_domain(obj,
3242                                             old_read_domains,
3243                                             obj->base.write_domain);
3244
3245         return 0;
3246 }
3247
3248 /* Throttle our rendering by waiting until the ring has completed our requests
3249  * emitted over 20 msec ago.
3250  *
3251  * Note that if we were to use the current jiffies each time around the loop,
3252  * we wouldn't escape the function with any frames outstanding if the time to
3253  * render a frame was over 20ms.
3254  *
3255  * This should get us reasonable parallelism between CPU and GPU but also
3256  * relatively low latency when blocking on a particular request to finish.
3257  */
3258 static int
3259 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3260 {
3261         struct drm_i915_private *dev_priv = dev->dev_private;
3262         struct drm_i915_file_private *file_priv = file->driver_priv;
3263         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3264         struct drm_i915_gem_request *request;
3265         struct intel_ring_buffer *ring = NULL;
3266         u32 seqno = 0;
3267         int ret;
3268
3269         if (atomic_read(&dev_priv->mm.wedged))
3270                 return -EIO;
3271
3272         spin_lock(&file_priv->mm.lock);
3273         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3274                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3275                         break;
3276
3277                 ring = request->ring;
3278                 seqno = request->seqno;
3279         }
3280         spin_unlock(&file_priv->mm.lock);
3281
3282         if (seqno == 0)
3283                 return 0;
3284
3285         ret = 0;
3286         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3287                 /* And wait for the seqno passing without holding any locks and
3288                  * causing extra latency for others. This is safe as the irq
3289                  * generation is designed to be run atomically and so is
3290                  * lockless.
3291                  */
3292                 if (ring->irq_get(ring)) {
3293                         ret = wait_event_interruptible(ring->irq_queue,
3294                                                        i915_seqno_passed(ring->get_seqno(ring), seqno)
3295                                                        || atomic_read(&dev_priv->mm.wedged));
3296                         ring->irq_put(ring);
3297
3298                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3299                                 ret = -EIO;
3300                 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3301                                                              seqno) ||
3302                                     atomic_read(&dev_priv->mm.wedged), 3000)) {
3303                         ret = -EBUSY;
3304                 }
3305         }
3306
3307         if (ret == 0)
3308                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3309
3310         return ret;
3311 }
3312
3313 int
3314 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3315                     uint32_t alignment,
3316                     bool map_and_fenceable)
3317 {
3318         struct drm_device *dev = obj->base.dev;
3319         struct drm_i915_private *dev_priv = dev->dev_private;
3320         int ret;
3321
3322         BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3323         WARN_ON(i915_verify_lists(dev));
3324
3325         if (obj->gtt_space != NULL) {
3326                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3327                     (map_and_fenceable && !obj->map_and_fenceable)) {
3328                         WARN(obj->pin_count,
3329                              "bo is already pinned with incorrect alignment:"
3330                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3331                              " obj->map_and_fenceable=%d\n",
3332                              obj->gtt_offset, alignment,
3333                              map_and_fenceable,
3334                              obj->map_and_fenceable);
3335                         ret = i915_gem_object_unbind(obj);
3336                         if (ret)
3337                                 return ret;
3338                 }
3339         }
3340
3341         if (obj->gtt_space == NULL) {
3342                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3343                                                   map_and_fenceable);
3344                 if (ret)
3345                         return ret;
3346         }
3347
3348         if (obj->pin_count++ == 0) {
3349                 if (!obj->active)
3350                         list_move_tail(&obj->mm_list,
3351                                        &dev_priv->mm.pinned_list);
3352         }
3353         obj->pin_mappable |= map_and_fenceable;
3354
3355         WARN_ON(i915_verify_lists(dev));
3356         return 0;
3357 }
3358
3359 void
3360 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3361 {
3362         struct drm_device *dev = obj->base.dev;
3363         drm_i915_private_t *dev_priv = dev->dev_private;
3364
3365         WARN_ON(i915_verify_lists(dev));
3366         BUG_ON(obj->pin_count == 0);
3367         BUG_ON(obj->gtt_space == NULL);
3368
3369         if (--obj->pin_count == 0) {
3370                 if (!obj->active)
3371                         list_move_tail(&obj->mm_list,
3372                                        &dev_priv->mm.inactive_list);
3373                 obj->pin_mappable = false;
3374         }
3375         WARN_ON(i915_verify_lists(dev));
3376 }
3377
3378 int
3379 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3380                    struct drm_file *file)
3381 {
3382         struct drm_i915_gem_pin *args = data;
3383         struct drm_i915_gem_object *obj;
3384         int ret;
3385
3386         ret = i915_mutex_lock_interruptible(dev);
3387         if (ret)
3388                 return ret;
3389
3390         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3391         if (&obj->base == NULL) {
3392                 ret = -ENOENT;
3393                 goto unlock;
3394         }
3395
3396         if (obj->madv != I915_MADV_WILLNEED) {
3397                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3398                 ret = -EINVAL;
3399                 goto out;
3400         }
3401
3402         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3403                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3404                           args->handle);
3405                 ret = -EINVAL;
3406                 goto out;
3407         }
3408
3409         obj->user_pin_count++;
3410         obj->pin_filp = file;
3411         if (obj->user_pin_count == 1) {
3412                 ret = i915_gem_object_pin(obj, args->alignment, true);
3413                 if (ret)
3414                         goto out;
3415         }
3416
3417         /* XXX - flush the CPU caches for pinned objects
3418          * as the X server doesn't manage domains yet
3419          */
3420         i915_gem_object_flush_cpu_write_domain(obj);
3421         args->offset = obj->gtt_offset;
3422 out:
3423         drm_gem_object_unreference(&obj->base);
3424 unlock:
3425         mutex_unlock(&dev->struct_mutex);
3426         return ret;
3427 }
3428
3429 int
3430 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3431                      struct drm_file *file)
3432 {
3433         struct drm_i915_gem_pin *args = data;
3434         struct drm_i915_gem_object *obj;
3435         int ret;
3436
3437         ret = i915_mutex_lock_interruptible(dev);
3438         if (ret)
3439                 return ret;
3440
3441         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3442         if (&obj->base == NULL) {
3443                 ret = -ENOENT;
3444                 goto unlock;
3445         }
3446
3447         if (obj->pin_filp != file) {
3448                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3449                           args->handle);
3450                 ret = -EINVAL;
3451                 goto out;
3452         }
3453         obj->user_pin_count--;
3454         if (obj->user_pin_count == 0) {
3455                 obj->pin_filp = NULL;
3456                 i915_gem_object_unpin(obj);
3457         }
3458
3459 out:
3460         drm_gem_object_unreference(&obj->base);
3461 unlock:
3462         mutex_unlock(&dev->struct_mutex);
3463         return ret;
3464 }
3465
3466 int
3467 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3468                     struct drm_file *file)
3469 {
3470         struct drm_i915_gem_busy *args = data;
3471         struct drm_i915_gem_object *obj;
3472         int ret;
3473
3474         ret = i915_mutex_lock_interruptible(dev);
3475         if (ret)
3476                 return ret;
3477
3478         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3479         if (&obj->base == NULL) {
3480                 ret = -ENOENT;
3481                 goto unlock;
3482         }
3483
3484         /* Count all active objects as busy, even if they are currently not used
3485          * by the gpu. Users of this interface expect objects to eventually
3486          * become non-busy without any further actions, therefore emit any
3487          * necessary flushes here.
3488          */
3489         args->busy = obj->active;
3490         if (args->busy) {
3491                 /* Unconditionally flush objects, even when the gpu still uses this
3492                  * object. Userspace calling this function indicates that it wants to
3493                  * use this buffer rather sooner than later, so issuing the required
3494                  * flush earlier is beneficial.
3495                  */
3496                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3497                         ret = i915_gem_flush_ring(obj->ring,
3498                                                   0, obj->base.write_domain);
3499                 } else if (obj->ring->outstanding_lazy_request ==
3500                            obj->last_rendering_seqno) {
3501                         struct drm_i915_gem_request *request;
3502
3503                         /* This ring is not being cleared by active usage,
3504                          * so emit a request to do so.
3505                          */
3506                         request = kzalloc(sizeof(*request), GFP_KERNEL);
3507                         if (request) {
3508                                 ret = i915_add_request(obj->ring, NULL, request);
3509                                 if (ret)
3510                                         kfree(request);
3511                         } else
3512                                 ret = -ENOMEM;
3513                 }
3514
3515                 /* Update the active list for the hardware's current position.
3516                  * Otherwise this only updates on a delayed timer or when irqs
3517                  * are actually unmasked, and our working set ends up being
3518                  * larger than required.
3519                  */
3520                 i915_gem_retire_requests_ring(obj->ring);
3521
3522                 args->busy = obj->active;
3523         }
3524
3525         drm_gem_object_unreference(&obj->base);
3526 unlock:
3527         mutex_unlock(&dev->struct_mutex);
3528         return ret;
3529 }
3530
3531 int
3532 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3533                         struct drm_file *file_priv)
3534 {
3535         return i915_gem_ring_throttle(dev, file_priv);
3536 }
3537
3538 int
3539 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3540                        struct drm_file *file_priv)
3541 {
3542         struct drm_i915_gem_madvise *args = data;
3543         struct drm_i915_gem_object *obj;
3544         int ret;
3545
3546         switch (args->madv) {
3547         case I915_MADV_DONTNEED:
3548         case I915_MADV_WILLNEED:
3549             break;
3550         default:
3551             return -EINVAL;
3552         }
3553
3554         ret = i915_mutex_lock_interruptible(dev);
3555         if (ret)
3556                 return ret;
3557
3558         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3559         if (&obj->base == NULL) {
3560                 ret = -ENOENT;
3561                 goto unlock;
3562         }
3563
3564         if (obj->pin_count) {
3565                 ret = -EINVAL;
3566                 goto out;
3567         }
3568
3569         if (obj->madv != __I915_MADV_PURGED)
3570                 obj->madv = args->madv;
3571
3572         /* if the object is no longer bound, discard its backing storage */
3573         if (i915_gem_object_is_purgeable(obj) &&
3574             obj->gtt_space == NULL)
3575                 i915_gem_object_truncate(obj);
3576
3577         args->retained = obj->madv != __I915_MADV_PURGED;
3578
3579 out:
3580         drm_gem_object_unreference(&obj->base);
3581 unlock:
3582         mutex_unlock(&dev->struct_mutex);
3583         return ret;
3584 }
3585
3586 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3587                                                   size_t size)
3588 {
3589         struct drm_i915_private *dev_priv = dev->dev_private;
3590         struct drm_i915_gem_object *obj;
3591         struct address_space *mapping;
3592
3593         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3594         if (obj == NULL)
3595                 return NULL;
3596
3597         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3598                 kfree(obj);
3599                 return NULL;
3600         }
3601
3602         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3603         mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3604
3605         i915_gem_info_add_obj(dev_priv, size);
3606
3607         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3608         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3609
3610         if (HAS_LLC(dev)) {
3611                 /* On some devices, we can have the GPU use the LLC (the CPU
3612                  * cache) for about a 10% performance improvement
3613                  * compared to uncached.  Graphics requests other than
3614                  * display scanout are coherent with the CPU in
3615                  * accessing this cache.  This means in this mode we
3616                  * don't need to clflush on the CPU side, and on the
3617                  * GPU side we only need to flush internal caches to
3618                  * get data visible to the CPU.
3619                  *
3620                  * However, we maintain the display planes as UC, and so
3621                  * need to rebind when first used as such.
3622                  */
3623                 obj->cache_level = I915_CACHE_LLC;
3624         } else
3625                 obj->cache_level = I915_CACHE_NONE;
3626
3627         obj->base.driver_private = NULL;
3628         obj->fence_reg = I915_FENCE_REG_NONE;
3629         INIT_LIST_HEAD(&obj->mm_list);
3630         INIT_LIST_HEAD(&obj->gtt_list);
3631         INIT_LIST_HEAD(&obj->ring_list);
3632         INIT_LIST_HEAD(&obj->exec_list);
3633         INIT_LIST_HEAD(&obj->gpu_write_list);
3634         obj->madv = I915_MADV_WILLNEED;
3635         /* Avoid an unnecessary call to unbind on the first bind. */
3636         obj->map_and_fenceable = true;
3637
3638         return obj;
3639 }
3640
3641 int i915_gem_init_object(struct drm_gem_object *obj)
3642 {
3643         BUG();
3644
3645         return 0;
3646 }
3647
3648 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3649 {
3650         struct drm_device *dev = obj->base.dev;
3651         drm_i915_private_t *dev_priv = dev->dev_private;
3652         int ret;
3653
3654         ret = i915_gem_object_unbind(obj);
3655         if (ret == -ERESTARTSYS) {
3656                 list_move(&obj->mm_list,
3657                           &dev_priv->mm.deferred_free_list);
3658                 return;
3659         }
3660
3661         trace_i915_gem_object_destroy(obj);
3662
3663         if (obj->base.map_list.map)
3664                 drm_gem_free_mmap_offset(&obj->base);
3665
3666         drm_gem_object_release(&obj->base);
3667         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3668
3669         kfree(obj->page_cpu_valid);
3670         kfree(obj->bit_17);
3671         kfree(obj);
3672 }
3673
3674 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3675 {
3676         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3677         struct drm_device *dev = obj->base.dev;
3678
3679         while (obj->pin_count > 0)
3680                 i915_gem_object_unpin(obj);
3681
3682         if (obj->phys_obj)
3683                 i915_gem_detach_phys_object(dev, obj);
3684
3685         i915_gem_free_object_tail(obj);
3686 }
3687
3688 int
3689 i915_gem_idle(struct drm_device *dev)
3690 {
3691         drm_i915_private_t *dev_priv = dev->dev_private;
3692         int ret;
3693
3694         mutex_lock(&dev->struct_mutex);
3695
3696         if (dev_priv->mm.suspended) {
3697                 mutex_unlock(&dev->struct_mutex);
3698                 return 0;
3699         }
3700
3701         ret = i915_gpu_idle(dev, true);
3702         if (ret) {
3703                 mutex_unlock(&dev->struct_mutex);
3704                 return ret;
3705         }
3706
3707         /* Under UMS, be paranoid and evict. */
3708         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3709                 ret = i915_gem_evict_inactive(dev, false);
3710                 if (ret) {
3711                         mutex_unlock(&dev->struct_mutex);
3712                         return ret;
3713                 }
3714         }
3715
3716         i915_gem_reset_fences(dev);
3717
3718         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3719          * We need to replace this with a semaphore, or something.
3720          * And not confound mm.suspended!
3721          */
3722         dev_priv->mm.suspended = 1;
3723         del_timer_sync(&dev_priv->hangcheck_timer);
3724
3725         i915_kernel_lost_context(dev);
3726         i915_gem_cleanup_ringbuffer(dev);
3727
3728         mutex_unlock(&dev->struct_mutex);
3729
3730         /* Cancel the retire work handler, which should be idle now. */
3731         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3732
3733         return 0;
3734 }
3735
3736 void i915_gem_init_swizzling(struct drm_device *dev)
3737 {
3738         drm_i915_private_t *dev_priv = dev->dev_private;
3739
3740         if (INTEL_INFO(dev)->gen < 5 ||
3741             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3742                 return;
3743
3744         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3745                                  DISP_TILE_SURFACE_SWIZZLING);
3746
3747         if (IS_GEN5(dev))
3748                 return;
3749
3750         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3751         if (IS_GEN6(dev))
3752                 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
3753         else
3754                 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
3755 }
3756
3757 void i915_gem_init_ppgtt(struct drm_device *dev)
3758 {
3759         drm_i915_private_t *dev_priv = dev->dev_private;
3760         uint32_t pd_offset;
3761         struct intel_ring_buffer *ring;
3762         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3763         uint32_t __iomem *pd_addr;
3764         uint32_t pd_entry;
3765         int i;
3766
3767         if (!dev_priv->mm.aliasing_ppgtt)
3768                 return;
3769
3770
3771         pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3772         for (i = 0; i < ppgtt->num_pd_entries; i++) {
3773                 dma_addr_t pt_addr;
3774
3775                 if (dev_priv->mm.gtt->needs_dmar)
3776                         pt_addr = ppgtt->pt_dma_addr[i];
3777                 else
3778                         pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3779
3780                 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3781                 pd_entry |= GEN6_PDE_VALID;
3782
3783                 writel(pd_entry, pd_addr + i);
3784         }
3785         readl(pd_addr);
3786
3787         pd_offset = ppgtt->pd_offset;
3788         pd_offset /= 64; /* in cachelines, */
3789         pd_offset <<= 16;
3790
3791         if (INTEL_INFO(dev)->gen == 6) {
3792                 uint32_t ecochk = I915_READ(GAM_ECOCHK);
3793                 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3794                                        ECOCHK_PPGTT_CACHE64B);
3795                 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3796         } else if (INTEL_INFO(dev)->gen >= 7) {
3797                 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3798                 /* GFX_MODE is per-ring on gen7+ */
3799         }
3800
3801         for (i = 0; i < I915_NUM_RINGS; i++) {
3802                 ring = &dev_priv->ring[i];
3803
3804                 if (INTEL_INFO(dev)->gen >= 7)
3805                         I915_WRITE(RING_MODE_GEN7(ring),
3806                                    GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3807
3808                 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3809                 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3810         }
3811 }
3812
3813 int
3814 i915_gem_init_hw(struct drm_device *dev)
3815 {
3816         drm_i915_private_t *dev_priv = dev->dev_private;
3817         int ret;
3818
3819         i915_gem_init_swizzling(dev);
3820
3821         ret = intel_init_render_ring_buffer(dev);
3822         if (ret)
3823                 return ret;
3824
3825         if (HAS_BSD(dev)) {
3826                 ret = intel_init_bsd_ring_buffer(dev);
3827                 if (ret)
3828                         goto cleanup_render_ring;
3829         }
3830
3831         if (HAS_BLT(dev)) {
3832                 ret = intel_init_blt_ring_buffer(dev);
3833                 if (ret)
3834                         goto cleanup_bsd_ring;
3835         }
3836
3837         dev_priv->next_seqno = 1;
3838
3839         i915_gem_init_ppgtt(dev);
3840
3841         return 0;
3842
3843 cleanup_bsd_ring:
3844         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3845 cleanup_render_ring:
3846         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3847         return ret;
3848 }
3849
3850 void
3851 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3852 {
3853         drm_i915_private_t *dev_priv = dev->dev_private;
3854         int i;
3855
3856         for (i = 0; i < I915_NUM_RINGS; i++)
3857                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3858 }
3859
3860 int
3861 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3862                        struct drm_file *file_priv)
3863 {
3864         drm_i915_private_t *dev_priv = dev->dev_private;
3865         int ret, i;
3866
3867         if (drm_core_check_feature(dev, DRIVER_MODESET))
3868                 return 0;
3869
3870         if (atomic_read(&dev_priv->mm.wedged)) {
3871                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3872                 atomic_set(&dev_priv->mm.wedged, 0);
3873         }
3874
3875         mutex_lock(&dev->struct_mutex);
3876         dev_priv->mm.suspended = 0;
3877
3878         ret = i915_gem_init_hw(dev);
3879         if (ret != 0) {
3880                 mutex_unlock(&dev->struct_mutex);
3881                 return ret;
3882         }
3883
3884         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3885         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3886         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3887         for (i = 0; i < I915_NUM_RINGS; i++) {
3888                 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3889                 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3890         }
3891         mutex_unlock(&dev->struct_mutex);
3892
3893         ret = drm_irq_install(dev);
3894         if (ret)
3895                 goto cleanup_ringbuffer;
3896
3897         return 0;
3898
3899 cleanup_ringbuffer:
3900         mutex_lock(&dev->struct_mutex);
3901         i915_gem_cleanup_ringbuffer(dev);
3902         dev_priv->mm.suspended = 1;
3903         mutex_unlock(&dev->struct_mutex);
3904
3905         return ret;
3906 }
3907
3908 int
3909 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3910                        struct drm_file *file_priv)
3911 {
3912         if (drm_core_check_feature(dev, DRIVER_MODESET))
3913                 return 0;
3914
3915         drm_irq_uninstall(dev);
3916         return i915_gem_idle(dev);
3917 }
3918
3919 void
3920 i915_gem_lastclose(struct drm_device *dev)
3921 {
3922         int ret;
3923
3924         if (drm_core_check_feature(dev, DRIVER_MODESET))
3925                 return;
3926
3927         ret = i915_gem_idle(dev);
3928         if (ret)
3929                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3930 }
3931
3932 static void
3933 init_ring_lists(struct intel_ring_buffer *ring)
3934 {
3935         INIT_LIST_HEAD(&ring->active_list);
3936         INIT_LIST_HEAD(&ring->request_list);
3937         INIT_LIST_HEAD(&ring->gpu_write_list);
3938 }
3939
3940 void
3941 i915_gem_load(struct drm_device *dev)
3942 {
3943         int i;
3944         drm_i915_private_t *dev_priv = dev->dev_private;
3945
3946         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3947         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3948         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3949         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3950         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3951         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3952         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3953         for (i = 0; i < I915_NUM_RINGS; i++)
3954                 init_ring_lists(&dev_priv->ring[i]);
3955         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3956                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3957         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3958                           i915_gem_retire_work_handler);
3959         init_completion(&dev_priv->error_completion);
3960
3961         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3962         if (IS_GEN3(dev)) {
3963                 u32 tmp = I915_READ(MI_ARB_STATE);
3964                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3965                         /* arb state is a masked write, so set bit + bit in mask */
3966                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3967                         I915_WRITE(MI_ARB_STATE, tmp);
3968                 }
3969         }
3970
3971         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3972
3973         /* Old X drivers will take 0-2 for front, back, depth buffers */
3974         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3975                 dev_priv->fence_reg_start = 3;
3976
3977         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3978                 dev_priv->num_fence_regs = 16;
3979         else
3980                 dev_priv->num_fence_regs = 8;
3981
3982         /* Initialize fence registers to zero */
3983         for (i = 0; i < dev_priv->num_fence_regs; i++) {
3984                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3985         }
3986
3987         i915_gem_detect_bit_6_swizzle(dev);
3988         init_waitqueue_head(&dev_priv->pending_flip_queue);
3989
3990         dev_priv->mm.interruptible = true;
3991
3992         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3993         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3994         register_shrinker(&dev_priv->mm.inactive_shrinker);
3995 }
3996
3997 /*
3998  * Create a physically contiguous memory object for this object
3999  * e.g. for cursor + overlay regs
4000  */
4001 static int i915_gem_init_phys_object(struct drm_device *dev,
4002                                      int id, int size, int align)
4003 {
4004         drm_i915_private_t *dev_priv = dev->dev_private;
4005         struct drm_i915_gem_phys_object *phys_obj;
4006         int ret;
4007
4008         if (dev_priv->mm.phys_objs[id - 1] || !size)
4009                 return 0;
4010
4011         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4012         if (!phys_obj)
4013                 return -ENOMEM;
4014
4015         phys_obj->id = id;
4016
4017         phys_obj->handle = drm_pci_alloc(dev, size, align);
4018         if (!phys_obj->handle) {
4019                 ret = -ENOMEM;
4020                 goto kfree_obj;
4021         }
4022 #ifdef CONFIG_X86
4023         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4024 #endif
4025
4026         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4027
4028         return 0;
4029 kfree_obj:
4030         kfree(phys_obj);
4031         return ret;
4032 }
4033
4034 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4035 {
4036         drm_i915_private_t *dev_priv = dev->dev_private;
4037         struct drm_i915_gem_phys_object *phys_obj;
4038
4039         if (!dev_priv->mm.phys_objs[id - 1])
4040                 return;
4041
4042         phys_obj = dev_priv->mm.phys_objs[id - 1];
4043         if (phys_obj->cur_obj) {
4044                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4045         }
4046
4047 #ifdef CONFIG_X86
4048         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4049 #endif
4050         drm_pci_free(dev, phys_obj->handle);
4051         kfree(phys_obj);
4052         dev_priv->mm.phys_objs[id - 1] = NULL;
4053 }
4054
4055 void i915_gem_free_all_phys_object(struct drm_device *dev)
4056 {
4057         int i;
4058
4059         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4060                 i915_gem_free_phys_object(dev, i);
4061 }
4062
4063 void i915_gem_detach_phys_object(struct drm_device *dev,
4064                                  struct drm_i915_gem_object *obj)
4065 {
4066         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4067         char *vaddr;
4068         int i;
4069         int page_count;
4070
4071         if (!obj->phys_obj)
4072                 return;
4073         vaddr = obj->phys_obj->handle->vaddr;
4074
4075         page_count = obj->base.size / PAGE_SIZE;
4076         for (i = 0; i < page_count; i++) {
4077                 struct page *page = shmem_read_mapping_page(mapping, i);
4078                 if (!IS_ERR(page)) {
4079                         char *dst = kmap_atomic(page);
4080                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4081                         kunmap_atomic(dst);
4082
4083                         drm_clflush_pages(&page, 1);
4084
4085                         set_page_dirty(page);
4086                         mark_page_accessed(page);
4087                         page_cache_release(page);
4088                 }
4089         }
4090         intel_gtt_chipset_flush();
4091
4092         obj->phys_obj->cur_obj = NULL;
4093         obj->phys_obj = NULL;
4094 }
4095
4096 int
4097 i915_gem_attach_phys_object(struct drm_device *dev,
4098                             struct drm_i915_gem_object *obj,
4099                             int id,
4100                             int align)
4101 {
4102         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4103         drm_i915_private_t *dev_priv = dev->dev_private;
4104         int ret = 0;
4105         int page_count;
4106         int i;
4107
4108         if (id > I915_MAX_PHYS_OBJECT)
4109                 return -EINVAL;
4110
4111         if (obj->phys_obj) {
4112                 if (obj->phys_obj->id == id)
4113                         return 0;
4114                 i915_gem_detach_phys_object(dev, obj);
4115         }
4116
4117         /* create a new object */
4118         if (!dev_priv->mm.phys_objs[id - 1]) {
4119                 ret = i915_gem_init_phys_object(dev, id,
4120                                                 obj->base.size, align);
4121                 if (ret) {
4122                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4123                                   id, obj->base.size);
4124                         return ret;
4125                 }
4126         }
4127
4128         /* bind to the object */
4129         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4130         obj->phys_obj->cur_obj = obj;
4131
4132         page_count = obj->base.size / PAGE_SIZE;
4133
4134         for (i = 0; i < page_count; i++) {
4135                 struct page *page;
4136                 char *dst, *src;
4137
4138                 page = shmem_read_mapping_page(mapping, i);
4139                 if (IS_ERR(page))
4140                         return PTR_ERR(page);
4141
4142                 src = kmap_atomic(page);
4143                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4144                 memcpy(dst, src, PAGE_SIZE);
4145                 kunmap_atomic(src);
4146
4147                 mark_page_accessed(page);
4148                 page_cache_release(page);
4149         }
4150
4151         return 0;
4152 }
4153
4154 static int
4155 i915_gem_phys_pwrite(struct drm_device *dev,
4156                      struct drm_i915_gem_object *obj,
4157                      struct drm_i915_gem_pwrite *args,
4158                      struct drm_file *file_priv)
4159 {
4160         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4161         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4162
4163         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4164                 unsigned long unwritten;
4165
4166                 /* The physical object once assigned is fixed for the lifetime
4167                  * of the obj, so we can safely drop the lock and continue
4168                  * to access vaddr.
4169                  */
4170                 mutex_unlock(&dev->struct_mutex);
4171                 unwritten = copy_from_user(vaddr, user_data, args->size);
4172                 mutex_lock(&dev->struct_mutex);
4173                 if (unwritten)
4174                         return -EFAULT;
4175         }
4176
4177         intel_gtt_chipset_flush();
4178         return 0;
4179 }
4180
4181 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4182 {
4183         struct drm_i915_file_private *file_priv = file->driver_priv;
4184
4185         /* Clean up our request list when the client is going away, so that
4186          * later retire_requests won't dereference our soon-to-be-gone
4187          * file_priv.
4188          */
4189         spin_lock(&file_priv->mm.lock);
4190         while (!list_empty(&file_priv->mm.request_list)) {
4191                 struct drm_i915_gem_request *request;
4192
4193                 request = list_first_entry(&file_priv->mm.request_list,
4194                                            struct drm_i915_gem_request,
4195                                            client_list);
4196                 list_del(&request->client_list);
4197                 request->file_priv = NULL;
4198         }
4199         spin_unlock(&file_priv->mm.lock);
4200 }
4201
4202 static int
4203 i915_gpu_is_active(struct drm_device *dev)
4204 {
4205         drm_i915_private_t *dev_priv = dev->dev_private;
4206         int lists_empty;
4207
4208         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4209                       list_empty(&dev_priv->mm.active_list);
4210
4211         return !lists_empty;
4212 }
4213
4214 static int
4215 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4216 {
4217         struct drm_i915_private *dev_priv =
4218                 container_of(shrinker,
4219                              struct drm_i915_private,
4220                              mm.inactive_shrinker);
4221         struct drm_device *dev = dev_priv->dev;
4222         struct drm_i915_gem_object *obj, *next;
4223         int nr_to_scan = sc->nr_to_scan;
4224         int cnt;
4225
4226         if (!mutex_trylock(&dev->struct_mutex))
4227                 return 0;
4228
4229         /* "fast-path" to count number of available objects */
4230         if (nr_to_scan == 0) {
4231                 cnt = 0;
4232                 list_for_each_entry(obj,
4233                                     &dev_priv->mm.inactive_list,
4234                                     mm_list)
4235                         cnt++;
4236                 mutex_unlock(&dev->struct_mutex);
4237                 return cnt / 100 * sysctl_vfs_cache_pressure;
4238         }
4239
4240 rescan:
4241         /* first scan for clean buffers */
4242         i915_gem_retire_requests(dev);
4243
4244         list_for_each_entry_safe(obj, next,
4245                                  &dev_priv->mm.inactive_list,
4246                                  mm_list) {
4247                 if (i915_gem_object_is_purgeable(obj)) {
4248                         if (i915_gem_object_unbind(obj) == 0 &&
4249                             --nr_to_scan == 0)
4250                                 break;
4251                 }
4252         }
4253
4254         /* second pass, evict/count anything still on the inactive list */
4255         cnt = 0;
4256         list_for_each_entry_safe(obj, next,
4257                                  &dev_priv->mm.inactive_list,
4258                                  mm_list) {
4259                 if (nr_to_scan &&
4260                     i915_gem_object_unbind(obj) == 0)
4261                         nr_to_scan--;
4262                 else
4263                         cnt++;
4264         }
4265
4266         if (nr_to_scan && i915_gpu_is_active(dev)) {
4267                 /*
4268                  * We are desperate for pages, so as a last resort, wait
4269                  * for the GPU to finish and discard whatever we can.
4270                  * This has a dramatic impact to reduce the number of
4271                  * OOM-killer events whilst running the GPU aggressively.
4272                  */
4273                 if (i915_gpu_idle(dev, true) == 0)
4274                         goto rescan;
4275         }
4276         mutex_unlock(&dev->struct_mutex);
4277         return cnt / 100 * sysctl_vfs_cache_pressure;
4278 }