2 * drivers/staging/android/ion/ion_chunk_heap.c
4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
28 struct ion_chunk_heap {
30 struct gen_pool *pool;
32 unsigned long chunk_size;
34 unsigned long allocated;
37 static int ion_chunk_heap_allocate(struct ion_heap *heap,
38 struct ion_buffer *buffer,
39 unsigned long size, unsigned long align,
42 struct ion_chunk_heap *chunk_heap =
43 container_of(heap, struct ion_chunk_heap, heap);
44 struct sg_table *table;
45 struct scatterlist *sg;
47 unsigned long num_chunks;
48 unsigned long allocated_size;
50 if (align > chunk_heap->chunk_size)
53 allocated_size = ALIGN(size, chunk_heap->chunk_size);
54 num_chunks = allocated_size / chunk_heap->chunk_size;
56 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
59 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
62 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
69 for (i = 0; i < num_chunks; i++) {
70 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
71 chunk_heap->chunk_size);
74 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
75 chunk_heap->chunk_size, 0);
79 buffer->priv_virt = table;
80 chunk_heap->allocated += allocated_size;
84 for (i -= 1; i >= 0; i--) {
85 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
94 static void ion_chunk_heap_free(struct ion_buffer *buffer)
96 struct ion_heap *heap = buffer->heap;
97 struct ion_chunk_heap *chunk_heap =
98 container_of(heap, struct ion_chunk_heap, heap);
99 struct sg_table *table = buffer->priv_virt;
100 struct scatterlist *sg;
102 unsigned long allocated_size;
104 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
106 ion_heap_buffer_zero(buffer);
108 if (ion_buffer_cached(buffer))
109 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
112 for_each_sg(table->sgl, sg, table->nents, i) {
113 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
116 chunk_heap->allocated -= allocated_size;
117 sg_free_table(table);
121 static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
122 struct ion_buffer *buffer)
124 return buffer->priv_virt;
127 static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
128 struct ion_buffer *buffer)
133 static struct ion_heap_ops chunk_heap_ops = {
134 .allocate = ion_chunk_heap_allocate,
135 .free = ion_chunk_heap_free,
136 .map_dma = ion_chunk_heap_map_dma,
137 .unmap_dma = ion_chunk_heap_unmap_dma,
138 .map_user = ion_heap_map_user,
139 .map_kernel = ion_heap_map_kernel,
140 .unmap_kernel = ion_heap_unmap_kernel,
143 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
145 struct ion_chunk_heap *chunk_heap;
150 page = pfn_to_page(PFN_DOWN(heap_data->base));
151 size = heap_data->size;
153 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
155 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
159 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
161 return ERR_PTR(-ENOMEM);
163 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
164 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
166 if (!chunk_heap->pool) {
168 goto error_gen_pool_create;
170 chunk_heap->base = heap_data->base;
171 chunk_heap->size = heap_data->size;
172 chunk_heap->allocated = 0;
174 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
175 chunk_heap->heap.ops = &chunk_heap_ops;
176 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
177 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
178 pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
179 heap_data->size, heap_data->align);
181 return &chunk_heap->heap;
183 error_gen_pool_create:
188 void ion_chunk_heap_destroy(struct ion_heap *heap)
190 struct ion_chunk_heap *chunk_heap =
191 container_of(heap, struct ion_chunk_heap, heap);
193 gen_pool_destroy(chunk_heap->pool);