4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 * This memory manager provides general heap management and arbitrary
19 * alignment for any number of memory segments.
23 * Memory blocks are allocated from the end of the first free memory
24 * block large enough to satisfy the request. Alignment requirements
25 * are satisfied by "sliding" the block forward until its base satisfies
26 * the alignment specification; if this is not possible then the next
27 * free block large enough to hold the request is tried.
29 * Since alignment can cause the creation of a new free block - the
30 * unused memory formed between the start of the original free block
31 * and the start of the allocated block - the memory manager must free
32 * this memory to prevent a memory leak.
34 * Overlay memory is managed by reserving through rmm_alloc, and freeing
35 * it through rmm_free. The memory manager prevents DSP code/data that is
36 * overlayed from being overwritten as long as the memory it runs at has
37 * been allocated, and not yet freed.
40 /* ----------------------------------- DSP/BIOS Bridge */
41 #include <dspbridge/std.h>
42 #include <dspbridge/dbdefs.h>
44 /* ----------------------------------- Trace & Debug */
45 #include <dspbridge/dbc.h>
47 /* ----------------------------------- OS Adaptation Layer */
48 #include <dspbridge/list.h>
50 /* ----------------------------------- This */
51 #include <dspbridge/rmm.h>
54 * ======== rmm_header ========
55 * This header is used to maintain a list of free memory blocks.
58 struct rmm_header *next; /* form a free memory link list */
59 u32 size; /* size of the free memory */
60 u32 addr; /* DSP address of memory block */
64 * ======== rmm_ovly_sect ========
65 * Keeps track of memory occupied by overlay section.
67 struct rmm_ovly_sect {
68 struct list_head list_elem;
69 u32 addr; /* Start of memory section */
70 u32 size; /* Length (target MAUs) of section */
71 s32 page; /* Memory page */
75 * ======== rmm_target_obj ========
77 struct rmm_target_obj {
78 struct rmm_segment *seg_tab;
79 struct rmm_header **free_list;
81 struct lst_list *ovly_list; /* List of overlay memory in use */
84 static u32 refs; /* module reference count */
86 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
87 u32 align, u32 *dsp_address);
88 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
92 * ======== rmm_alloc ========
94 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
95 u32 align, u32 *dsp_address, bool reserve)
97 struct rmm_ovly_sect *sect;
98 struct rmm_ovly_sect *prev_sect = NULL;
99 struct rmm_ovly_sect *new_sect;
104 DBC_REQUIRE(dsp_address != NULL);
105 DBC_REQUIRE(size > 0);
106 DBC_REQUIRE(reserve || (target->num_segs > 0));
107 DBC_REQUIRE(refs > 0);
110 if (!alloc_block(target, segid, size, align, dsp_address)) {
113 /* Increment the number of allocated blocks in this
115 target->seg_tab[segid].number++;
119 /* An overlay section - See if block is already in use. If not,
120 * insert into the list in ascending address size. */
122 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
123 /* Find place to insert new list element. List is sorted from
124 * smallest to largest address. */
125 while (sect != NULL) {
126 if (addr <= sect->addr) {
127 /* Check for overlap with sect */
128 if ((addr + size > sect->addr) || (prev_sect &&
137 sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
141 if (DSP_SUCCEEDED(status)) {
142 /* No overlap - allocate list element for new section. */
143 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
144 if (new_sect == NULL) {
147 lst_init_elem((struct list_head *)new_sect);
148 new_sect->addr = addr;
149 new_sect->size = size;
150 new_sect->page = segid;
152 /* Put new section at the end of the list */
153 lst_put_tail(target->ovly_list,
154 (struct list_head *)new_sect);
156 /* Put new section just before sect */
157 lst_insert_before(target->ovly_list,
158 (struct list_head *)new_sect,
159 (struct list_head *)sect);
168 * ======== rmm_create ========
170 int rmm_create(struct rmm_target_obj **target_obj,
171 struct rmm_segment seg_tab[], u32 num_segs)
173 struct rmm_header *hptr;
174 struct rmm_segment *sptr, *tmp;
175 struct rmm_target_obj *target;
179 DBC_REQUIRE(target_obj != NULL);
180 DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
182 /* Allocate DBL target object */
183 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
188 if (DSP_FAILED(status))
191 target->num_segs = num_segs;
195 /* Allocate the memory for freelist from host's memory */
196 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
198 if (target->free_list == NULL) {
201 /* Allocate headers for each element on the free list */
202 for (i = 0; i < (s32) num_segs; i++) {
203 target->free_list[i] =
204 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
205 if (target->free_list[i] == NULL) {
210 /* Allocate memory for initial segment table */
211 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
213 if (target->seg_tab == NULL) {
216 /* Initialize segment table and free list */
217 sptr = target->seg_tab;
218 for (i = 0, tmp = seg_tab; num_segs > 0;
221 hptr = target->free_list[i];
222 hptr->addr = tmp->base;
223 hptr->size = tmp->length;
231 /* Initialize overlay memory list */
232 if (DSP_SUCCEEDED(status)) {
233 target->ovly_list = kzalloc(sizeof(struct lst_list),
235 if (target->ovly_list == NULL)
238 INIT_LIST_HEAD(&target->ovly_list->head);
241 if (DSP_SUCCEEDED(status)) {
242 *target_obj = target;
250 DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj)
251 || (DSP_FAILED(status) && *target_obj == NULL));
257 * ======== rmm_delete ========
259 void rmm_delete(struct rmm_target_obj *target)
261 struct rmm_ovly_sect *ovly_section;
262 struct rmm_header *hptr;
263 struct rmm_header *next;
268 kfree(target->seg_tab);
270 if (target->ovly_list) {
271 while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
272 (target->ovly_list))) {
275 DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
276 kfree(target->ovly_list);
279 if (target->free_list != NULL) {
280 /* Free elements on freelist */
281 for (i = 0; i < target->num_segs; i++) {
282 hptr = next = target->free_list[i];
289 kfree(target->free_list);
296 * ======== rmm_exit ========
300 DBC_REQUIRE(refs > 0);
304 DBC_ENSURE(refs >= 0);
308 * ======== rmm_free ========
310 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
313 struct rmm_ovly_sect *sect;
318 DBC_REQUIRE(reserved || segid < target->num_segs);
319 DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
320 (dsp_addr + size) <= (target->seg_tab[segid].
322 target->seg_tab[segid].
326 * Free or unreserve memory.
329 ret = free_block(target, segid, dsp_addr, size);
331 target->seg_tab[segid].number--;
334 /* Unreserve memory */
335 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
336 while (sect != NULL) {
337 if (dsp_addr == sect->addr) {
338 DBC_ASSERT(size == sect->size);
339 /* Remove from list */
340 lst_remove_elem(target->ovly_list,
341 (struct list_head *)sect);
346 (struct rmm_ovly_sect *)lst_next(target->ovly_list,
358 * ======== rmm_init ========
362 DBC_REQUIRE(refs >= 0);
370 * ======== rmm_stat ========
372 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
373 struct dsp_memstat *mem_stat_buf)
375 struct rmm_header *head;
377 u32 max_free_size = 0;
378 u32 total_free_size = 0;
381 DBC_REQUIRE(mem_stat_buf != NULL);
382 DBC_ASSERT(target != NULL);
384 if ((u32) segid < target->num_segs) {
385 head = target->free_list[segid];
387 /* Collect data from free_list */
388 while (head != NULL) {
389 max_free_size = max(max_free_size, head->size);
390 total_free_size += head->size;
396 mem_stat_buf->ul_size = target->seg_tab[segid].length;
398 /* ul_num_free_blocks */
399 mem_stat_buf->ul_num_free_blocks = free_blocks;
401 /* ul_total_free_size */
402 mem_stat_buf->ul_total_free_size = total_free_size;
404 /* ul_len_max_free_block */
405 mem_stat_buf->ul_len_max_free_block = max_free_size;
407 /* ul_num_alloc_blocks */
408 mem_stat_buf->ul_num_alloc_blocks =
409 target->seg_tab[segid].number;
418 * ======== balloc ========
419 * This allocation function allocates memory from the lowest addresses
422 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
423 u32 align, u32 *dsp_address)
425 struct rmm_header *head;
426 struct rmm_header *prevhead = NULL;
427 struct rmm_header *next;
434 alignbytes = (align == 0) ? 1 : align;
436 head = target->free_list[segid];
442 addr = head->addr; /* alloc from the bottom */
444 /* align allocation */
445 (tmpalign = (u32) addr % alignbytes);
447 tmpalign = alignbytes - tmpalign;
449 allocsize = size + tmpalign;
451 if (hsize >= allocsize) { /* big enough */
452 if (hsize == allocsize && prevhead != NULL) {
453 prevhead->next = next;
456 head->size = hsize - allocsize;
457 head->addr += allocsize;
460 /* free up any hole created by alignment */
462 free_block(target, segid, addr, tmpalign);
464 *dsp_address = addr + tmpalign;
471 } while (head != NULL);
477 * ======== free_block ========
478 * TO DO: free_block() allocates memory, which could result in failure.
479 * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
480 * free_block() could use an rmm_header from the pool, freeing as blocks
483 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
486 struct rmm_header *head;
487 struct rmm_header *thead;
488 struct rmm_header *rhead;
491 /* Create a memory header to hold the newly free'd block. */
492 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
496 /* search down the free list to find the right place for addr */
497 head = target->free_list[segid];
499 if (addr >= head->addr) {
500 while (head->next != NULL && addr > head->next->addr)
517 /* join with upper block, if possible */
518 if (thead != NULL && (rhead->addr + rhead->size) ==
520 head->next = rhead->next;
521 thead->size = size + thead->size;
527 /* join with the lower block, if possible */
528 if ((head->addr + head->size) == rhead->addr) {
529 head->next = rhead->next;
530 head->size = head->size + rhead->size;