]> Pileus Git - ~andy/linux/blob - drivers/staging/android/ion/ion_chunk_heap.c
a0720891fe4e4ca52598da28ce87d8f28a60e276
[~andy/linux] / drivers / staging / android / ion / ion_chunk_heap.c
1 /*
2  * drivers/staging/android/ion/ion_chunk_heap.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 //#include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/genalloc.h>
20 #include <linux/io.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 struct ion_chunk_heap {
29         struct ion_heap heap;
30         struct gen_pool *pool;
31         ion_phys_addr_t base;
32         unsigned long chunk_size;
33         unsigned long size;
34         unsigned long allocated;
35 };
36
37 static int ion_chunk_heap_allocate(struct ion_heap *heap,
38                                       struct ion_buffer *buffer,
39                                       unsigned long size, unsigned long align,
40                                       unsigned long flags)
41 {
42         struct ion_chunk_heap *chunk_heap =
43                 container_of(heap, struct ion_chunk_heap, heap);
44         struct sg_table *table;
45         struct scatterlist *sg;
46         int ret, i;
47         unsigned long num_chunks;
48         unsigned long allocated_size;
49
50         if (align > chunk_heap->chunk_size)
51                 return -EINVAL;
52
53         allocated_size = ALIGN(size, chunk_heap->chunk_size);
54         num_chunks = allocated_size / chunk_heap->chunk_size;
55
56         if (allocated_size > chunk_heap->size - chunk_heap->allocated)
57                 return -ENOMEM;
58
59         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
60         if (!table)
61                 return -ENOMEM;
62         ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
63         if (ret) {
64                 kfree(table);
65                 return ret;
66         }
67
68         sg = table->sgl;
69         for (i = 0; i < num_chunks; i++) {
70                 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
71                                                      chunk_heap->chunk_size);
72                 if (!paddr)
73                         goto err;
74                 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
75                                 chunk_heap->chunk_size, 0);
76                 sg = sg_next(sg);
77         }
78
79         buffer->priv_virt = table;
80         chunk_heap->allocated += allocated_size;
81         return 0;
82 err:
83         sg = table->sgl;
84         for (i -= 1; i >= 0; i--) {
85                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
86                               sg->length);
87                 sg = sg_next(sg);
88         }
89         sg_free_table(table);
90         kfree(table);
91         return -ENOMEM;
92 }
93
94 static void ion_chunk_heap_free(struct ion_buffer *buffer)
95 {
96         struct ion_heap *heap = buffer->heap;
97         struct ion_chunk_heap *chunk_heap =
98                 container_of(heap, struct ion_chunk_heap, heap);
99         struct sg_table *table = buffer->priv_virt;
100         struct scatterlist *sg;
101         int i;
102         unsigned long allocated_size;
103
104         allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
105
106         ion_heap_buffer_zero(buffer);
107
108         if (ion_buffer_cached(buffer))
109                 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
110                                                                 DMA_BIDIRECTIONAL);
111
112         for_each_sg(table->sgl, sg, table->nents, i) {
113                 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
114                               sg->length);
115         }
116         chunk_heap->allocated -= allocated_size;
117         sg_free_table(table);
118         kfree(table);
119 }
120
121 static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
122                                                struct ion_buffer *buffer)
123 {
124         return buffer->priv_virt;
125 }
126
127 static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
128                                      struct ion_buffer *buffer)
129 {
130         return;
131 }
132
133 static struct ion_heap_ops chunk_heap_ops = {
134         .allocate = ion_chunk_heap_allocate,
135         .free = ion_chunk_heap_free,
136         .map_dma = ion_chunk_heap_map_dma,
137         .unmap_dma = ion_chunk_heap_unmap_dma,
138         .map_user = ion_heap_map_user,
139         .map_kernel = ion_heap_map_kernel,
140         .unmap_kernel = ion_heap_unmap_kernel,
141 };
142
143 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
144 {
145         struct ion_chunk_heap *chunk_heap;
146         int ret;
147         struct page *page;
148         size_t size;
149
150         page = pfn_to_page(PFN_DOWN(heap_data->base));
151         size = heap_data->size;
152
153         ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
154
155         ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
156         if (ret)
157                 return ERR_PTR(ret);
158
159         chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
160         if (!chunk_heap)
161                 return ERR_PTR(-ENOMEM);
162
163         chunk_heap->chunk_size = (unsigned long)heap_data->priv;
164         chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
165                                            PAGE_SHIFT, -1);
166         if (!chunk_heap->pool) {
167                 ret = -ENOMEM;
168                 goto error_gen_pool_create;
169         }
170         chunk_heap->base = heap_data->base;
171         chunk_heap->size = heap_data->size;
172         chunk_heap->allocated = 0;
173
174         gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
175         chunk_heap->heap.ops = &chunk_heap_ops;
176         chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
177         chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
178         pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
179                 heap_data->size, heap_data->align);
180
181         return &chunk_heap->heap;
182
183 error_gen_pool_create:
184         kfree(chunk_heap);
185         return ERR_PTR(ret);
186 }
187
188 void ion_chunk_heap_destroy(struct ion_heap *heap)
189 {
190         struct ion_chunk_heap *chunk_heap =
191              container_of(heap, struct  ion_chunk_heap, heap);
192
193         gen_pool_destroy(chunk_heap->pool);
194         kfree(chunk_heap);
195         chunk_heap = NULL;
196 }