1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
5 #ifdef CONFIG_HAVE_MEMBLOCK
7 * Logical memory blocks.
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/init.h>
20 #define INIT_MEMBLOCK_REGIONS 128
22 /* Definition of memblock flags. */
23 #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
25 struct memblock_region {
29 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
34 struct memblock_type {
35 unsigned long cnt; /* number of regions */
36 unsigned long max; /* size of the allocated array */
37 phys_addr_t total_size; /* size of all regions */
38 struct memblock_region *regions;
42 bool bottom_up; /* is bottom up direction? */
43 phys_addr_t current_limit;
44 struct memblock_type memory;
45 struct memblock_type reserved;
48 extern struct memblock memblock;
49 extern int memblock_debug;
50 #ifdef CONFIG_MOVABLE_NODE
51 /* If movable_node boot option specified */
52 extern bool movable_node_enabled;
53 #endif /* CONFIG_MOVABLE_NODE */
55 #define memblock_dbg(fmt, ...) \
56 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
58 phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
59 phys_addr_t size, phys_addr_t align, int nid);
60 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
61 phys_addr_t size, phys_addr_t align);
62 phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
63 void memblock_allow_resize(void);
64 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
65 int memblock_add(phys_addr_t base, phys_addr_t size);
66 int memblock_remove(phys_addr_t base, phys_addr_t size);
67 int memblock_free(phys_addr_t base, phys_addr_t size);
68 int memblock_reserve(phys_addr_t base, phys_addr_t size);
69 void memblock_trim_memory(phys_addr_t align);
70 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
71 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
72 #ifdef CONFIG_MOVABLE_NODE
73 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
75 return m->flags & MEMBLOCK_HOTPLUG;
78 static inline bool movable_node_is_enabled(void)
80 return movable_node_enabled;
83 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
87 static inline bool movable_node_is_enabled(void)
93 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
94 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
95 unsigned long *end_pfn);
96 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
97 unsigned long *out_end_pfn, int *out_nid);
100 * for_each_mem_pfn_range - early memory pfn range iterator
101 * @i: an integer used as loop variable
102 * @nid: node selector, %MAX_NUMNODES for all nodes
103 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
104 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
105 * @p_nid: ptr to int for nid of the range, can be %NULL
107 * Walks over configured memory ranges.
109 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
110 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
111 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
112 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
114 void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
115 phys_addr_t *out_end, int *out_nid);
118 * for_each_free_mem_range - iterate through free memblock areas
119 * @i: u64 used as loop variable
120 * @nid: node selector, %MAX_NUMNODES for all nodes
121 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
122 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
123 * @p_nid: ptr to int for nid of the range, can be %NULL
125 * Walks over free (memory && !reserved) areas of memblock. Available as
126 * soon as memblock is initialized.
128 #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
130 __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \
132 __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
134 void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
135 phys_addr_t *out_end, int *out_nid);
138 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
139 * @i: u64 used as loop variable
140 * @nid: node selector, %MAX_NUMNODES for all nodes
141 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
142 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
143 * @p_nid: ptr to int for nid of the range, can be %NULL
145 * Walks over free (memory && !reserved) areas of memblock in reverse
146 * order. Available as soon as memblock is initialized.
148 #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
149 for (i = (u64)ULLONG_MAX, \
150 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
151 i != (u64)ULLONG_MAX; \
152 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
154 static inline void memblock_set_region_flags(struct memblock_region *r,
160 static inline void memblock_clear_region_flags(struct memblock_region *r,
166 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
167 int memblock_set_node(phys_addr_t base, phys_addr_t size,
168 struct memblock_type *type, int nid);
170 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
175 static inline int memblock_get_region_node(const struct memblock_region *r)
180 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
184 static inline int memblock_get_region_node(const struct memblock_region *r)
188 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
190 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
191 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
193 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
195 #ifdef CONFIG_MOVABLE_NODE
197 * Set the allocation direction to bottom-up or top-down.
199 static inline void memblock_set_bottom_up(bool enable)
201 memblock.bottom_up = enable;
205 * Check if the allocation direction is bottom-up or not.
206 * if this is true, that said, memblock will allocate memory
207 * in bottom-up direction.
209 static inline bool memblock_bottom_up(void)
211 return memblock.bottom_up;
214 static inline void memblock_set_bottom_up(bool enable) {}
215 static inline bool memblock_bottom_up(void) { return false; }
218 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
219 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
220 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
222 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
223 phys_addr_t max_addr);
224 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
225 phys_addr_t max_addr);
226 phys_addr_t memblock_phys_mem_size(void);
227 phys_addr_t memblock_mem_size(unsigned long limit_pfn);
228 phys_addr_t memblock_start_of_DRAM(void);
229 phys_addr_t memblock_end_of_DRAM(void);
230 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
231 int memblock_is_memory(phys_addr_t addr);
232 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
233 int memblock_is_reserved(phys_addr_t addr);
234 int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
236 extern void __memblock_dump_all(void);
238 static inline void memblock_dump_all(void)
241 __memblock_dump_all();
245 * memblock_set_current_limit - Set the current allocation limit to allow
246 * limiting allocations to what is currently
247 * accessible during boot
248 * @limit: New limit value (physical address)
250 void memblock_set_current_limit(phys_addr_t limit);
254 * pfn conversion functions
256 * While the memory MEMBLOCKs should always be page aligned, the reserved
257 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
258 * idea of what they return for such non aligned MEMBLOCKs.
262 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
263 * @reg: memblock_region structure
265 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
267 return PFN_UP(reg->base);
271 * memblock_region_memory_end_pfn - Return the end_pfn this region
272 * @reg: memblock_region structure
274 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
276 return PFN_DOWN(reg->base + reg->size);
280 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
281 * @reg: memblock_region structure
283 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
285 return PFN_DOWN(reg->base);
289 * memblock_region_reserved_end_pfn - Return the end_pfn this region
290 * @reg: memblock_region structure
292 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
294 return PFN_UP(reg->base + reg->size);
297 #define for_each_memblock(memblock_type, region) \
298 for (region = memblock.memblock_type.regions; \
299 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
303 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
304 #define __init_memblock __meminit
305 #define __initdata_memblock __meminitdata
307 #define __init_memblock
308 #define __initdata_memblock
312 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
317 #endif /* CONFIG_HAVE_MEMBLOCK */
319 #endif /* __KERNEL__ */
321 #endif /* _LINUX_MEMBLOCK_H */