]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/udl/udl_gem.c
Merge tag 'for-v3.4-rc1' of git://git.infradead.org/battery-2.6
[~andy/linux] / drivers / gpu / drm / udl / udl_gem.c
1 /*
2  * Copyright (C) 2012 Red Hat
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License v2. See the file COPYING in the main directory of this archive for
6  * more details.
7  */
8
9 #include "drmP.h"
10 #include "udl_drv.h"
11 #include <linux/shmem_fs.h>
12
13 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
14                                             size_t size)
15 {
16         struct udl_gem_object *obj;
17
18         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
19         if (obj == NULL)
20                 return NULL;
21
22         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
23                 kfree(obj);
24                 return NULL;
25         }
26
27         return obj;
28 }
29
30 static int
31 udl_gem_create(struct drm_file *file,
32                struct drm_device *dev,
33                uint64_t size,
34                uint32_t *handle_p)
35 {
36         struct udl_gem_object *obj;
37         int ret;
38         u32 handle;
39
40         size = roundup(size, PAGE_SIZE);
41
42         obj = udl_gem_alloc_object(dev, size);
43         if (obj == NULL)
44                 return -ENOMEM;
45
46         ret = drm_gem_handle_create(file, &obj->base, &handle);
47         if (ret) {
48                 drm_gem_object_release(&obj->base);
49                 kfree(obj);
50                 return ret;
51         }
52
53         drm_gem_object_unreference(&obj->base);
54         *handle_p = handle;
55         return 0;
56 }
57
58 int udl_dumb_create(struct drm_file *file,
59                     struct drm_device *dev,
60                     struct drm_mode_create_dumb *args)
61 {
62         args->pitch = args->width * ((args->bpp + 1) / 8);
63         args->size = args->pitch * args->height;
64         return udl_gem_create(file, dev,
65                               args->size, &args->handle);
66 }
67
68 int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
69                      uint32_t handle)
70 {
71         return drm_gem_handle_delete(file, handle);
72 }
73
74 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75 {
76         struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
77         struct page *page;
78         unsigned int page_offset;
79         int ret = 0;
80
81         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
82                 PAGE_SHIFT;
83
84         if (!obj->pages)
85                 return VM_FAULT_SIGBUS;
86
87         page = obj->pages[page_offset];
88         ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
89         switch (ret) {
90         case -EAGAIN:
91                 set_need_resched();
92         case 0:
93         case -ERESTARTSYS:
94                 return VM_FAULT_NOPAGE;
95         case -ENOMEM:
96                 return VM_FAULT_OOM;
97         default:
98                 return VM_FAULT_SIGBUS;
99         }
100 }
101
102 int udl_gem_init_object(struct drm_gem_object *obj)
103 {
104         BUG();
105
106         return 0;
107 }
108
109 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
110 {
111         int page_count, i;
112         struct page *page;
113         struct inode *inode;
114         struct address_space *mapping;
115
116         if (obj->pages)
117                 return 0;
118
119         page_count = obj->base.size / PAGE_SIZE;
120         BUG_ON(obj->pages != NULL);
121         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
122         if (obj->pages == NULL)
123                 return -ENOMEM;
124
125         inode = obj->base.filp->f_path.dentry->d_inode;
126         mapping = inode->i_mapping;
127         gfpmask |= mapping_gfp_mask(mapping);
128
129         for (i = 0; i < page_count; i++) {
130                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
131                 if (IS_ERR(page))
132                         goto err_pages;
133                 obj->pages[i] = page;
134         }
135
136         return 0;
137 err_pages:
138         while (i--)
139                 page_cache_release(obj->pages[i]);
140         drm_free_large(obj->pages);
141         obj->pages = NULL;
142         return PTR_ERR(page);
143 }
144
145 static void udl_gem_put_pages(struct udl_gem_object *obj)
146 {
147         int page_count = obj->base.size / PAGE_SIZE;
148         int i;
149
150         for (i = 0; i < page_count; i++)
151                 page_cache_release(obj->pages[i]);
152
153         drm_free_large(obj->pages);
154         obj->pages = NULL;
155 }
156
157 int udl_gem_vmap(struct udl_gem_object *obj)
158 {
159         int page_count = obj->base.size / PAGE_SIZE;
160         int ret;
161
162         ret = udl_gem_get_pages(obj, GFP_KERNEL);
163         if (ret)
164                 return ret;
165
166         obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
167         if (!obj->vmapping)
168                 return -ENOMEM;
169         return 0;
170 }
171
172 void udl_gem_vunmap(struct udl_gem_object *obj)
173 {
174         if (obj->vmapping)
175                 vunmap(obj->vmapping);
176
177         udl_gem_put_pages(obj);
178 }
179
180 void udl_gem_free_object(struct drm_gem_object *gem_obj)
181 {
182         struct udl_gem_object *obj = to_udl_bo(gem_obj);
183
184         if (obj->vmapping)
185                 udl_gem_vunmap(obj);
186
187         if (obj->pages)
188                 udl_gem_put_pages(obj);
189
190         if (gem_obj->map_list.map)
191                 drm_gem_free_mmap_offset(gem_obj);
192 }
193
194 /* the dumb interface doesn't work with the GEM straight MMAP
195    interface, it expects to do MMAP on the drm fd, like normal */
196 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
197                  uint32_t handle, uint64_t *offset)
198 {
199         struct udl_gem_object *gobj;
200         struct drm_gem_object *obj;
201         int ret = 0;
202
203         mutex_lock(&dev->struct_mutex);
204         obj = drm_gem_object_lookup(dev, file, handle);
205         if (obj == NULL) {
206                 ret = -ENOENT;
207                 goto unlock;
208         }
209         gobj = to_udl_bo(obj);
210
211         ret = udl_gem_get_pages(gobj, GFP_KERNEL);
212         if (ret)
213                 return ret;
214         if (!gobj->base.map_list.map) {
215                 ret = drm_gem_create_mmap_offset(obj);
216                 if (ret)
217                         goto out;
218         }
219
220         *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
221
222 out:
223         drm_gem_object_unreference(&gobj->base);
224 unlock:
225         mutex_unlock(&dev->struct_mutex);
226         return ret;
227 }