]> Pileus Git - ~andy/linux/blob - drivers/staging/tidspbridge/rmgr/rmm.c
staging: ti dspbridge: make variables in prototypes match within functions definitions
[~andy/linux] / drivers / staging / tidspbridge / rmgr / rmm.c
1 /*
2  * rmm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Copyright (C) 2005-2006 Texas Instruments, Inc.
7  *
8  * This package is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 /*
18  *  This memory manager provides general heap management and arbitrary
19  *  alignment for any number of memory segments.
20  *
21  *  Notes:
22  *
23  *  Memory blocks are allocated from the end of the first free memory
24  *  block large enough to satisfy the request.  Alignment requirements
25  *  are satisfied by "sliding" the block forward until its base satisfies
26  *  the alignment specification; if this is not possible then the next
27  *  free block large enough to hold the request is tried.
28  *
29  *  Since alignment can cause the creation of a new free block - the
30  *  unused memory formed between the start of the original free block
31  *  and the start of the allocated block - the memory manager must free
32  *  this memory to prevent a memory leak.
33  *
34  *  Overlay memory is managed by reserving through rmm_alloc, and freeing
35  *  it through rmm_free. The memory manager prevents DSP code/data that is
36  *  overlayed from being overwritten as long as the memory it runs at has
37  *  been allocated, and not yet freed.
38  */
39
40 /*  ----------------------------------- DSP/BIOS Bridge */
41 #include <dspbridge/std.h>
42 #include <dspbridge/dbdefs.h>
43
44 /*  ----------------------------------- Trace & Debug */
45 #include <dspbridge/dbc.h>
46
47 /*  ----------------------------------- OS Adaptation Layer */
48 #include <dspbridge/list.h>
49
50 /*  ----------------------------------- This */
51 #include <dspbridge/rmm.h>
52
53 /*
54  *  ======== rmm_header ========
55  *  This header is used to maintain a list of free memory blocks.
56  */
57 struct rmm_header {
58         struct rmm_header *next;        /* form a free memory link list */
59         u32 size;               /* size of the free memory */
60         u32 addr;               /* DSP address of memory block */
61 };
62
63 /*
64  *  ======== rmm_ovly_sect ========
65  *  Keeps track of memory occupied by overlay section.
66  */
67 struct rmm_ovly_sect {
68         struct list_head list_elem;
69         u32 addr;               /* Start of memory section */
70         u32 size;               /* Length (target MAUs) of section */
71         s32 page;               /* Memory page */
72 };
73
74 /*
75  *  ======== rmm_target_obj ========
76  */
77 struct rmm_target_obj {
78         struct rmm_segment *seg_tab;
79         struct rmm_header **free_list;
80         u32 num_segs;
81         struct lst_list *ovly_list;     /* List of overlay memory in use */
82 };
83
84 static u32 refs;                /* module reference count */
85
86 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
87                         u32 align, u32 *dsp_address);
88 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
89                        u32 size);
90
91 /*
92  *  ======== rmm_alloc ========
93  */
94 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
95                      u32 align, u32 *dsp_address, bool reserve)
96 {
97         struct rmm_ovly_sect *sect;
98         struct rmm_ovly_sect *prev_sect = NULL;
99         struct rmm_ovly_sect *new_sect;
100         u32 addr;
101         int status = 0;
102
103         DBC_REQUIRE(target);
104         DBC_REQUIRE(dsp_address != NULL);
105         DBC_REQUIRE(size > 0);
106         DBC_REQUIRE(reserve || (target->num_segs > 0));
107         DBC_REQUIRE(refs > 0);
108
109         if (!reserve) {
110                 if (!alloc_block(target, segid, size, align, dsp_address)) {
111                         status = -ENOMEM;
112                 } else {
113                         /* Increment the number of allocated blocks in this
114                          * segment */
115                         target->seg_tab[segid].number++;
116                 }
117                 goto func_end;
118         }
119         /* An overlay section - See if block is already in use. If not,
120          * insert into the list in ascending address size. */
121         addr = *dsp_address;
122         sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
123         /*  Find place to insert new list element. List is sorted from
124          *  smallest to largest address. */
125         while (sect != NULL) {
126                 if (addr <= sect->addr) {
127                         /* Check for overlap with sect */
128                         if ((addr + size > sect->addr) || (prev_sect &&
129                                                            (prev_sect->addr +
130                                                             prev_sect->size >
131                                                             addr))) {
132                                 status = -ENXIO;
133                         }
134                         break;
135                 }
136                 prev_sect = sect;
137                 sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
138                                                         (struct list_head *)
139                                                         sect);
140         }
141         if (DSP_SUCCEEDED(status)) {
142                 /* No overlap - allocate list element for new section. */
143                 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
144                 if (new_sect == NULL) {
145                         status = -ENOMEM;
146                 } else {
147                         lst_init_elem((struct list_head *)new_sect);
148                         new_sect->addr = addr;
149                         new_sect->size = size;
150                         new_sect->page = segid;
151                         if (sect == NULL) {
152                                 /* Put new section at the end of the list */
153                                 lst_put_tail(target->ovly_list,
154                                              (struct list_head *)new_sect);
155                         } else {
156                                 /* Put new section just before sect */
157                                 lst_insert_before(target->ovly_list,
158                                                   (struct list_head *)new_sect,
159                                                   (struct list_head *)sect);
160                         }
161                 }
162         }
163 func_end:
164         return status;
165 }
166
167 /*
168  *  ======== rmm_create ========
169  */
170 int rmm_create(struct rmm_target_obj **target_obj,
171                       struct rmm_segment seg_tab[], u32 num_segs)
172 {
173         struct rmm_header *hptr;
174         struct rmm_segment *sptr, *tmp;
175         struct rmm_target_obj *target;
176         s32 i;
177         int status = 0;
178
179         DBC_REQUIRE(target_obj != NULL);
180         DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
181
182         /* Allocate DBL target object */
183         target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
184
185         if (target == NULL)
186                 status = -ENOMEM;
187
188         if (DSP_FAILED(status))
189                 goto func_cont;
190
191         target->num_segs = num_segs;
192         if (!(num_segs > 0))
193                 goto func_cont;
194
195         /* Allocate the memory for freelist from host's memory */
196         target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
197                                                         GFP_KERNEL);
198         if (target->free_list == NULL) {
199                 status = -ENOMEM;
200         } else {
201                 /* Allocate headers for each element on the free list */
202                 for (i = 0; i < (s32) num_segs; i++) {
203                         target->free_list[i] =
204                                 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
205                         if (target->free_list[i] == NULL) {
206                                 status = -ENOMEM;
207                                 break;
208                         }
209                 }
210                 /* Allocate memory for initial segment table */
211                 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
212                                                                 GFP_KERNEL);
213                 if (target->seg_tab == NULL) {
214                         status = -ENOMEM;
215                 } else {
216                         /* Initialize segment table and free list */
217                         sptr = target->seg_tab;
218                         for (i = 0, tmp = seg_tab; num_segs > 0;
219                              num_segs--, i++) {
220                                 *sptr = *tmp;
221                                 hptr = target->free_list[i];
222                                 hptr->addr = tmp->base;
223                                 hptr->size = tmp->length;
224                                 hptr->next = NULL;
225                                 tmp++;
226                                 sptr++;
227                         }
228                 }
229         }
230 func_cont:
231         /* Initialize overlay memory list */
232         if (DSP_SUCCEEDED(status)) {
233                 target->ovly_list = kzalloc(sizeof(struct lst_list),
234                                                         GFP_KERNEL);
235                 if (target->ovly_list == NULL)
236                         status = -ENOMEM;
237                 else
238                         INIT_LIST_HEAD(&target->ovly_list->head);
239         }
240
241         if (DSP_SUCCEEDED(status)) {
242                 *target_obj = target;
243         } else {
244                 *target_obj = NULL;
245                 if (target)
246                         rmm_delete(target);
247
248         }
249
250         DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj)
251                    || (DSP_FAILED(status) && *target_obj == NULL));
252
253         return status;
254 }
255
256 /*
257  *  ======== rmm_delete ========
258  */
259 void rmm_delete(struct rmm_target_obj *target)
260 {
261         struct rmm_ovly_sect *ovly_section;
262         struct rmm_header *hptr;
263         struct rmm_header *next;
264         u32 i;
265
266         DBC_REQUIRE(target);
267
268         kfree(target->seg_tab);
269
270         if (target->ovly_list) {
271                 while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
272                         (target->ovly_list))) {
273                         kfree(ovly_section);
274                 }
275                 DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
276                 kfree(target->ovly_list);
277         }
278
279         if (target->free_list != NULL) {
280                 /* Free elements on freelist */
281                 for (i = 0; i < target->num_segs; i++) {
282                         hptr = next = target->free_list[i];
283                         while (next) {
284                                 hptr = next;
285                                 next = hptr->next;
286                                 kfree(hptr);
287                         }
288                 }
289                 kfree(target->free_list);
290         }
291
292         kfree(target);
293 }
294
295 /*
296  *  ======== rmm_exit ========
297  */
298 void rmm_exit(void)
299 {
300         DBC_REQUIRE(refs > 0);
301
302         refs--;
303
304         DBC_ENSURE(refs >= 0);
305 }
306
307 /*
308  *  ======== rmm_free ========
309  */
310 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
311               bool reserved)
312 {
313         struct rmm_ovly_sect *sect;
314         bool ret = true;
315
316         DBC_REQUIRE(target);
317
318         DBC_REQUIRE(reserved || segid < target->num_segs);
319         DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
320                                  (dsp_addr + size) <= (target->seg_tab[segid].
321                                                    base +
322                                                    target->seg_tab[segid].
323                                                    length)));
324
325         /*
326          *  Free or unreserve memory.
327          */
328         if (!reserved) {
329                 ret = free_block(target, segid, dsp_addr, size);
330                 if (ret)
331                         target->seg_tab[segid].number--;
332
333         } else {
334                 /* Unreserve memory */
335                 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
336                 while (sect != NULL) {
337                         if (dsp_addr == sect->addr) {
338                                 DBC_ASSERT(size == sect->size);
339                                 /* Remove from list */
340                                 lst_remove_elem(target->ovly_list,
341                                                 (struct list_head *)sect);
342                                 kfree(sect);
343                                 break;
344                         }
345                         sect =
346                             (struct rmm_ovly_sect *)lst_next(target->ovly_list,
347                                                              (struct list_head
348                                                               *)sect);
349                 }
350                 if (sect == NULL)
351                         ret = false;
352
353         }
354         return ret;
355 }
356
357 /*
358  *  ======== rmm_init ========
359  */
360 bool rmm_init(void)
361 {
362         DBC_REQUIRE(refs >= 0);
363
364         refs++;
365
366         return true;
367 }
368
369 /*
370  *  ======== rmm_stat ========
371  */
372 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
373               struct dsp_memstat *mem_stat_buf)
374 {
375         struct rmm_header *head;
376         bool ret = false;
377         u32 max_free_size = 0;
378         u32 total_free_size = 0;
379         u32 free_blocks = 0;
380
381         DBC_REQUIRE(mem_stat_buf != NULL);
382         DBC_ASSERT(target != NULL);
383
384         if ((u32) segid < target->num_segs) {
385                 head = target->free_list[segid];
386
387                 /* Collect data from free_list */
388                 while (head != NULL) {
389                         max_free_size = max(max_free_size, head->size);
390                         total_free_size += head->size;
391                         free_blocks++;
392                         head = head->next;
393                 }
394
395                 /* ul_size */
396                 mem_stat_buf->ul_size = target->seg_tab[segid].length;
397
398                 /* ul_num_free_blocks */
399                 mem_stat_buf->ul_num_free_blocks = free_blocks;
400
401                 /* ul_total_free_size */
402                 mem_stat_buf->ul_total_free_size = total_free_size;
403
404                 /* ul_len_max_free_block */
405                 mem_stat_buf->ul_len_max_free_block = max_free_size;
406
407                 /* ul_num_alloc_blocks */
408                 mem_stat_buf->ul_num_alloc_blocks =
409                     target->seg_tab[segid].number;
410
411                 ret = true;
412         }
413
414         return ret;
415 }
416
417 /*
418  *  ======== balloc ========
419  *  This allocation function allocates memory from the lowest addresses
420  *  first.
421  */
422 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
423                         u32 align, u32 *dsp_address)
424 {
425         struct rmm_header *head;
426         struct rmm_header *prevhead = NULL;
427         struct rmm_header *next;
428         u32 tmpalign;
429         u32 alignbytes;
430         u32 hsize;
431         u32 allocsize;
432         u32 addr;
433
434         alignbytes = (align == 0) ? 1 : align;
435         prevhead = NULL;
436         head = target->free_list[segid];
437
438         do {
439                 hsize = head->size;
440                 next = head->next;
441
442                 addr = head->addr;      /* alloc from the bottom */
443
444                 /* align allocation */
445                 (tmpalign = (u32) addr % alignbytes);
446                 if (tmpalign != 0)
447                         tmpalign = alignbytes - tmpalign;
448
449                 allocsize = size + tmpalign;
450
451                 if (hsize >= allocsize) {       /* big enough */
452                         if (hsize == allocsize && prevhead != NULL) {
453                                 prevhead->next = next;
454                                 kfree(head);
455                         } else {
456                                 head->size = hsize - allocsize;
457                                 head->addr += allocsize;
458                         }
459
460                         /* free up any hole created by alignment */
461                         if (tmpalign)
462                                 free_block(target, segid, addr, tmpalign);
463
464                         *dsp_address = addr + tmpalign;
465                         return true;
466                 }
467
468                 prevhead = head;
469                 head = next;
470
471         } while (head != NULL);
472
473         return false;
474 }
475
476 /*
477  *  ======== free_block ========
478  *  TO DO: free_block() allocates memory, which could result in failure.
479  *  Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
480  *  free_block() could use an rmm_header from the pool, freeing as blocks
481  *  are coalesced.
482  */
483 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
484                        u32 size)
485 {
486         struct rmm_header *head;
487         struct rmm_header *thead;
488         struct rmm_header *rhead;
489         bool ret = true;
490
491         /* Create a memory header to hold the newly free'd block. */
492         rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
493         if (rhead == NULL) {
494                 ret = false;
495         } else {
496                 /* search down the free list to find the right place for addr */
497                 head = target->free_list[segid];
498
499                 if (addr >= head->addr) {
500                         while (head->next != NULL && addr > head->next->addr)
501                                 head = head->next;
502
503                         thead = head->next;
504
505                         head->next = rhead;
506                         rhead->next = thead;
507                         rhead->addr = addr;
508                         rhead->size = size;
509                 } else {
510                         *rhead = *head;
511                         head->next = rhead;
512                         head->addr = addr;
513                         head->size = size;
514                         thead = rhead->next;
515                 }
516
517                 /* join with upper block, if possible */
518                 if (thead != NULL && (rhead->addr + rhead->size) ==
519                     thead->addr) {
520                         head->next = rhead->next;
521                         thead->size = size + thead->size;
522                         thead->addr = addr;
523                         kfree(rhead);
524                         rhead = thead;
525                 }
526
527                 /* join with the lower block, if possible */
528                 if ((head->addr + head->size) == rhead->addr) {
529                         head->next = rhead->next;
530                         head->size = head->size + rhead->size;
531                         kfree(rhead);
532                 }
533         }
534
535         return ret;
536 }