2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
29 static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30 __GFP_NOWARN | __GFP_NORETRY) &
32 static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
34 static const unsigned int orders[] = {8, 4, 0};
35 static const int num_orders = ARRAY_SIZE(orders);
36 static int order_to_index(unsigned int order)
39 for (i = 0; i < num_orders; i++)
40 if (order == orders[i])
46 static unsigned int order_to_size(int order)
48 return PAGE_SIZE << order;
51 struct ion_system_heap {
53 struct ion_page_pool **pools;
59 struct list_head list;
62 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63 struct ion_buffer *buffer,
66 bool cached = ion_buffer_cached(buffer);
67 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
71 page = ion_page_pool_alloc(pool);
73 gfp_t gfp_flags = low_order_gfp_flags;
76 gfp_flags = high_order_gfp_flags;
77 page = ion_heap_alloc_pages(buffer, gfp_flags, order);
80 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
89 static void free_buffer_page(struct ion_system_heap *heap,
90 struct ion_buffer *buffer, struct page *page,
93 bool cached = ion_buffer_cached(buffer);
94 bool split_pages = ion_buffer_fault_user_mappings(buffer);
98 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
99 ion_page_pool_free(pool, page);
100 } else if (split_pages) {
101 for (i = 0; i < (1 << order); i++)
102 __free_page(page + i);
104 __free_pages(page, order);
109 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
110 struct ion_buffer *buffer,
112 unsigned int max_order)
115 struct page_info *info;
118 for (i = 0; i < num_orders; i++) {
119 if (size < order_to_size(orders[i]))
121 if (max_order < orders[i])
124 page = alloc_buffer_page(heap, buffer, orders[i]);
128 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
130 info->order = orders[i];
136 static int ion_system_heap_allocate(struct ion_heap *heap,
137 struct ion_buffer *buffer,
138 unsigned long size, unsigned long align,
141 struct ion_system_heap *sys_heap = container_of(heap,
142 struct ion_system_heap,
144 struct sg_table *table;
145 struct scatterlist *sg;
147 struct list_head pages;
148 struct page_info *info, *tmp_info;
150 long size_remaining = PAGE_ALIGN(size);
151 unsigned int max_order = orders[0];
153 if (align > PAGE_SIZE)
156 if (ion_buffer_fault_user_mappings(buffer))
159 INIT_LIST_HEAD(&pages);
160 while (size_remaining > 0) {
161 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
164 list_add_tail(&info->list, &pages);
165 size_remaining -= (1 << info->order) * PAGE_SIZE;
166 max_order = info->order;
169 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
173 ret = sg_alloc_table(table, i, GFP_KERNEL);
178 list_for_each_entry_safe(info, tmp_info, &pages, list) {
179 struct page *page = info->page;
180 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
182 list_del(&info->list);
186 buffer->priv_virt = table;
191 list_for_each_entry_safe(info, tmp_info, &pages, list) {
192 free_buffer_page(sys_heap, buffer, info->page, info->order);
198 void ion_system_heap_free(struct ion_buffer *buffer)
200 struct ion_heap *heap = buffer->heap;
201 struct ion_system_heap *sys_heap = container_of(heap,
202 struct ion_system_heap,
204 struct sg_table *table = buffer->sg_table;
205 bool cached = ion_buffer_cached(buffer);
206 struct scatterlist *sg;
210 /* uncached pages come from the page pools, zero them before returning
211 for security purposes (other allocations are zerod at alloc time */
213 ion_heap_buffer_zero(buffer);
215 for_each_sg(table->sgl, sg, table->nents, i)
216 free_buffer_page(sys_heap, buffer, sg_page(sg),
217 get_order(sg->length));
218 sg_free_table(table);
222 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
223 struct ion_buffer *buffer)
225 return buffer->priv_virt;
228 void ion_system_heap_unmap_dma(struct ion_heap *heap,
229 struct ion_buffer *buffer)
234 static struct ion_heap_ops system_heap_ops = {
235 .allocate = ion_system_heap_allocate,
236 .free = ion_system_heap_free,
237 .map_dma = ion_system_heap_map_dma,
238 .unmap_dma = ion_system_heap_unmap_dma,
239 .map_kernel = ion_heap_map_kernel,
240 .unmap_kernel = ion_heap_unmap_kernel,
241 .map_user = ion_heap_map_user,
244 static int ion_system_heap_shrink(struct shrinker *shrinker,
245 struct shrink_control *sc) {
247 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
249 struct ion_system_heap *sys_heap = container_of(heap,
250 struct ion_system_heap,
256 if (sc->nr_to_scan == 0)
259 /* shrink the free list first, no point in zeroing the memory if
260 we're just going to reclaim it */
261 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
264 if (nr_freed >= sc->nr_to_scan)
267 for (i = 0; i < num_orders; i++) {
268 struct ion_page_pool *pool = sys_heap->pools[i];
270 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
272 if (nr_freed >= sc->nr_to_scan)
277 /* total number of items is whatever the page pools are holding
278 plus whatever's in the freelist */
279 for (i = 0; i < num_orders; i++) {
280 struct ion_page_pool *pool = sys_heap->pools[i];
281 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
283 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
288 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
292 struct ion_system_heap *sys_heap = container_of(heap,
293 struct ion_system_heap,
296 for (i = 0; i < num_orders; i++) {
297 struct ion_page_pool *pool = sys_heap->pools[i];
298 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
299 pool->high_count, pool->order,
300 (1 << pool->order) * PAGE_SIZE * pool->high_count);
301 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
302 pool->low_count, pool->order,
303 (1 << pool->order) * PAGE_SIZE * pool->low_count);
308 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
310 struct ion_system_heap *heap;
313 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
315 return ERR_PTR(-ENOMEM);
316 heap->heap.ops = &system_heap_ops;
317 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
318 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
319 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
322 goto err_alloc_pools;
323 for (i = 0; i < num_orders; i++) {
324 struct ion_page_pool *pool;
325 gfp_t gfp_flags = low_order_gfp_flags;
328 gfp_flags = high_order_gfp_flags;
329 pool = ion_page_pool_create(gfp_flags, orders[i]);
331 goto err_create_pool;
332 heap->pools[i] = pool;
335 heap->heap.shrinker.shrink = ion_system_heap_shrink;
336 heap->heap.shrinker.seeks = DEFAULT_SEEKS;
337 heap->heap.shrinker.batch = 0;
338 register_shrinker(&heap->heap.shrinker);
339 heap->heap.debug_show = ion_system_heap_debug_show;
342 for (i = 0; i < num_orders; i++)
344 ion_page_pool_destroy(heap->pools[i]);
348 return ERR_PTR(-ENOMEM);
351 void ion_system_heap_destroy(struct ion_heap *heap)
353 struct ion_system_heap *sys_heap = container_of(heap,
354 struct ion_system_heap,
358 for (i = 0; i < num_orders; i++)
359 ion_page_pool_destroy(sys_heap->pools[i]);
360 kfree(sys_heap->pools);
364 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
365 struct ion_buffer *buffer,
370 int order = get_order(len);
372 if (align > (PAGE_SIZE << order))
375 if (ion_buffer_fault_user_mappings(buffer))
378 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
379 if (!buffer->priv_virt)
384 void ion_system_contig_heap_free(struct ion_buffer *buffer)
386 kfree(buffer->priv_virt);
389 static int ion_system_contig_heap_phys(struct ion_heap *heap,
390 struct ion_buffer *buffer,
391 ion_phys_addr_t *addr, size_t *len)
393 *addr = virt_to_phys(buffer->priv_virt);
398 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
399 struct ion_buffer *buffer)
401 struct sg_table *table;
404 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
406 return ERR_PTR(-ENOMEM);
407 ret = sg_alloc_table(table, 1, GFP_KERNEL);
412 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
417 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
418 struct ion_buffer *buffer)
420 sg_free_table(buffer->sg_table);
421 kfree(buffer->sg_table);
424 static struct ion_heap_ops kmalloc_ops = {
425 .allocate = ion_system_contig_heap_allocate,
426 .free = ion_system_contig_heap_free,
427 .phys = ion_system_contig_heap_phys,
428 .map_dma = ion_system_contig_heap_map_dma,
429 .unmap_dma = ion_system_contig_heap_unmap_dma,
430 .map_kernel = ion_heap_map_kernel,
431 .unmap_kernel = ion_heap_unmap_kernel,
432 .map_user = ion_heap_map_user,
435 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
437 struct ion_heap *heap;
439 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
441 return ERR_PTR(-ENOMEM);
442 heap->ops = &kmalloc_ops;
443 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
447 void ion_system_contig_heap_destroy(struct ion_heap *heap)