2 * Copyright (C) 2012 Red Hat
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
11 #include <linux/shmem_fs.h>
13 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
16 struct udl_gem_object *obj;
18 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
22 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
31 udl_gem_create(struct drm_file *file,
32 struct drm_device *dev,
36 struct udl_gem_object *obj;
40 size = roundup(size, PAGE_SIZE);
42 obj = udl_gem_alloc_object(dev, size);
46 ret = drm_gem_handle_create(file, &obj->base, &handle);
48 drm_gem_object_release(&obj->base);
53 drm_gem_object_unreference(&obj->base);
58 int udl_dumb_create(struct drm_file *file,
59 struct drm_device *dev,
60 struct drm_mode_create_dumb *args)
62 args->pitch = args->width * ((args->bpp + 1) / 8);
63 args->size = args->pitch * args->height;
64 return udl_gem_create(file, dev,
65 args->size, &args->handle);
68 int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
71 return drm_gem_handle_delete(file, handle);
74 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
78 ret = drm_gem_mmap(filp, vma);
82 vma->vm_flags &= ~VM_PFNMAP;
83 vma->vm_flags |= VM_MIXEDMAP;
88 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
92 unsigned int page_offset;
95 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
99 return VM_FAULT_SIGBUS;
101 page = obj->pages[page_offset];
102 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
108 return VM_FAULT_NOPAGE;
112 return VM_FAULT_SIGBUS;
116 int udl_gem_init_object(struct drm_gem_object *obj)
123 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
128 struct address_space *mapping;
133 page_count = obj->base.size / PAGE_SIZE;
134 BUG_ON(obj->pages != NULL);
135 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
136 if (obj->pages == NULL)
139 inode = obj->base.filp->f_path.dentry->d_inode;
140 mapping = inode->i_mapping;
141 gfpmask |= mapping_gfp_mask(mapping);
143 for (i = 0; i < page_count; i++) {
144 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
147 obj->pages[i] = page;
153 page_cache_release(obj->pages[i]);
154 drm_free_large(obj->pages);
156 return PTR_ERR(page);
159 static void udl_gem_put_pages(struct udl_gem_object *obj)
161 int page_count = obj->base.size / PAGE_SIZE;
164 for (i = 0; i < page_count; i++)
165 page_cache_release(obj->pages[i]);
167 drm_free_large(obj->pages);
171 int udl_gem_vmap(struct udl_gem_object *obj)
173 int page_count = obj->base.size / PAGE_SIZE;
176 ret = udl_gem_get_pages(obj, GFP_KERNEL);
180 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
186 void udl_gem_vunmap(struct udl_gem_object *obj)
189 vunmap(obj->vmapping);
191 udl_gem_put_pages(obj);
194 void udl_gem_free_object(struct drm_gem_object *gem_obj)
196 struct udl_gem_object *obj = to_udl_bo(gem_obj);
202 udl_gem_put_pages(obj);
204 if (gem_obj->map_list.map)
205 drm_gem_free_mmap_offset(gem_obj);
208 /* the dumb interface doesn't work with the GEM straight MMAP
209 interface, it expects to do MMAP on the drm fd, like normal */
210 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
211 uint32_t handle, uint64_t *offset)
213 struct udl_gem_object *gobj;
214 struct drm_gem_object *obj;
217 mutex_lock(&dev->struct_mutex);
218 obj = drm_gem_object_lookup(dev, file, handle);
223 gobj = to_udl_bo(obj);
225 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
228 if (!gobj->base.map_list.map) {
229 ret = drm_gem_create_mmap_offset(obj);
234 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
237 drm_gem_object_unreference(&gobj->base);
239 mutex_unlock(&dev->struct_mutex);