]> Pileus Git - ~andy/linux/blob - drivers/staging/android/ion/ion_heap.c
2d1d5555fc3d7e4697da8390f92caa0ed157760a
[~andy/linux] / drivers / staging / android / ion / ion_heap.c
1 /*
2  * drivers/staging/android/ion/ion_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29                           struct ion_buffer *buffer)
30 {
31         struct scatterlist *sg;
32         int i, j;
33         void *vaddr;
34         pgprot_t pgprot;
35         struct sg_table *table = buffer->sg_table;
36         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37         struct page **pages = vmalloc(sizeof(struct page *) * npages);
38         struct page **tmp = pages;
39
40         if (!pages)
41                 return NULL;
42
43         if (buffer->flags & ION_FLAG_CACHED)
44                 pgprot = PAGE_KERNEL;
45         else
46                 pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48         for_each_sg(table->sgl, sg, table->nents, i) {
49                 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50                 struct page *page = sg_page(sg);
51                 BUG_ON(i >= npages);
52                 for (j = 0; j < npages_this_entry; j++) {
53                         *(tmp++) = page++;
54                 }
55         }
56         vaddr = vmap(pages, npages, VM_MAP, pgprot);
57         vfree(pages);
58
59         if (vaddr == NULL)
60                 return ERR_PTR(-ENOMEM);
61
62         return vaddr;
63 }
64
65 void ion_heap_unmap_kernel(struct ion_heap *heap,
66                            struct ion_buffer *buffer)
67 {
68         vunmap(buffer->vaddr);
69 }
70
71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72                       struct vm_area_struct *vma)
73 {
74         struct sg_table *table = buffer->sg_table;
75         unsigned long addr = vma->vm_start;
76         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77         struct scatterlist *sg;
78         int i;
79         int ret;
80
81         for_each_sg(table->sgl, sg, table->nents, i) {
82                 struct page *page = sg_page(sg);
83                 unsigned long remainder = vma->vm_end - addr;
84                 unsigned long len = sg->length;
85
86                 if (offset >= sg->length) {
87                         offset -= sg->length;
88                         continue;
89                 } else if (offset) {
90                         page += offset / PAGE_SIZE;
91                         len = sg->length - offset;
92                         offset = 0;
93                 }
94                 len = min(len, remainder);
95                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
96                                 vma->vm_page_prot);
97                 if (ret)
98                         return ret;
99                 addr += len;
100                 if (addr >= vma->vm_end)
101                         return 0;
102         }
103         return 0;
104 }
105
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 {
108         void *addr = vm_map_ram(pages, num, -1, pgprot);
109         if (!addr)
110                 return -ENOMEM;
111         memset(addr, 0, PAGE_SIZE * num);
112         vm_unmap_ram(addr, num);
113
114         return 0;
115 }
116
117 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
118                                                 pgprot_t pgprot)
119 {
120         int p = 0;
121         int ret = 0;
122         struct sg_page_iter piter;
123         struct page *pages[32];
124
125         for_each_sg_page(sgl, &piter, nents, 0) {
126                 pages[p++] = sg_page_iter_page(&piter);
127                 if (p == ARRAY_SIZE(pages)) {
128                         ret = ion_heap_clear_pages(pages, p, pgprot);
129                         if (ret)
130                                 return ret;
131                         p = 0;
132                 }
133         }
134         if (p)
135                 ret = ion_heap_clear_pages(pages, p, pgprot);
136
137         return ret;
138 }
139
140 int ion_heap_buffer_zero(struct ion_buffer *buffer)
141 {
142         struct sg_table *table = buffer->sg_table;
143         pgprot_t pgprot;
144
145         if (buffer->flags & ION_FLAG_CACHED)
146                 pgprot = PAGE_KERNEL;
147         else
148                 pgprot = pgprot_writecombine(PAGE_KERNEL);
149
150         return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
151 }
152
153 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
154 {
155         struct scatterlist sg;
156
157         sg_init_table(&sg, 1);
158         sg_set_page(&sg, page, size, 0);
159         return ion_heap_sglist_zero(&sg, 1, pgprot);
160 }
161
162 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
163 {
164         rt_mutex_lock(&heap->lock);
165         list_add(&buffer->list, &heap->free_list);
166         heap->free_list_size += buffer->size;
167         rt_mutex_unlock(&heap->lock);
168         wake_up(&heap->waitqueue);
169 }
170
171 size_t ion_heap_freelist_size(struct ion_heap *heap)
172 {
173         size_t size;
174
175         rt_mutex_lock(&heap->lock);
176         size = heap->free_list_size;
177         rt_mutex_unlock(&heap->lock);
178
179         return size;
180 }
181
182 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
183 {
184         struct ion_buffer *buffer, *tmp;
185         size_t total_drained = 0;
186
187         if (ion_heap_freelist_size(heap) == 0)
188                 return 0;
189
190         rt_mutex_lock(&heap->lock);
191         if (size == 0)
192                 size = heap->free_list_size;
193
194         list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
195                 if (total_drained >= size)
196                         break;
197                 list_del(&buffer->list);
198                 heap->free_list_size -= buffer->size;
199                 total_drained += buffer->size;
200                 ion_buffer_destroy(buffer);
201         }
202         rt_mutex_unlock(&heap->lock);
203
204         return total_drained;
205 }
206
207 static int ion_heap_deferred_free(void *data)
208 {
209         struct ion_heap *heap = data;
210
211         while (true) {
212                 struct ion_buffer *buffer;
213
214                 wait_event_freezable(heap->waitqueue,
215                                      ion_heap_freelist_size(heap) > 0);
216
217                 rt_mutex_lock(&heap->lock);
218                 if (list_empty(&heap->free_list)) {
219                         rt_mutex_unlock(&heap->lock);
220                         continue;
221                 }
222                 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
223                                           list);
224                 list_del(&buffer->list);
225                 heap->free_list_size -= buffer->size;
226                 rt_mutex_unlock(&heap->lock);
227                 ion_buffer_destroy(buffer);
228         }
229
230         return 0;
231 }
232
233 int ion_heap_init_deferred_free(struct ion_heap *heap)
234 {
235         struct sched_param param = { .sched_priority = 0 };
236
237         INIT_LIST_HEAD(&heap->free_list);
238         heap->free_list_size = 0;
239         rt_mutex_init(&heap->lock);
240         init_waitqueue_head(&heap->waitqueue);
241         heap->task = kthread_run(ion_heap_deferred_free, heap,
242                                  "%s", heap->name);
243         sched_setscheduler(heap->task, SCHED_IDLE, &param);
244         if (IS_ERR(heap->task)) {
245                 pr_err("%s: creating thread for deferred free failed\n",
246                        __func__);
247                 return PTR_RET(heap->task);
248         }
249         return 0;
250 }
251
252 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
253 {
254         struct ion_heap *heap = NULL;
255
256         switch (heap_data->type) {
257         case ION_HEAP_TYPE_SYSTEM_CONTIG:
258                 heap = ion_system_contig_heap_create(heap_data);
259                 break;
260         case ION_HEAP_TYPE_SYSTEM:
261                 heap = ion_system_heap_create(heap_data);
262                 break;
263         case ION_HEAP_TYPE_CARVEOUT:
264                 heap = ion_carveout_heap_create(heap_data);
265                 break;
266         case ION_HEAP_TYPE_CHUNK:
267                 heap = ion_chunk_heap_create(heap_data);
268                 break;
269         case ION_HEAP_TYPE_DMA:
270                 heap = ion_cma_heap_create(heap_data);
271                 break;
272         default:
273                 pr_err("%s: Invalid heap type %d\n", __func__,
274                        heap_data->type);
275                 return ERR_PTR(-EINVAL);
276         }
277
278         if (IS_ERR_OR_NULL(heap)) {
279                 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
280                        __func__, heap_data->name, heap_data->type,
281                        heap_data->base, heap_data->size);
282                 return ERR_PTR(-EINVAL);
283         }
284
285         heap->name = heap_data->name;
286         heap->id = heap_data->id;
287         return heap;
288 }
289
290 void ion_heap_destroy(struct ion_heap *heap)
291 {
292         if (!heap)
293                 return;
294
295         switch (heap->type) {
296         case ION_HEAP_TYPE_SYSTEM_CONTIG:
297                 ion_system_contig_heap_destroy(heap);
298                 break;
299         case ION_HEAP_TYPE_SYSTEM:
300                 ion_system_heap_destroy(heap);
301                 break;
302         case ION_HEAP_TYPE_CARVEOUT:
303                 ion_carveout_heap_destroy(heap);
304                 break;
305         case ION_HEAP_TYPE_CHUNK:
306                 ion_chunk_heap_destroy(heap);
307                 break;
308         case ION_HEAP_TYPE_DMA:
309                 ion_cma_heap_destroy(heap);
310                 break;
311         default:
312                 pr_err("%s: Invalid heap type %d\n", __func__,
313                        heap->type);
314         }
315 }