]> Pileus Git - ~andy/linux/blob - drivers/staging/zsmalloc/zsmalloc-main.c
Merge branch 'drm-next' of ../main_line/linux-drm into dave-drm-next
[~andy/linux] / drivers / staging / zsmalloc / zsmalloc-main.c
1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the license that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  */
12
13 #ifdef CONFIG_ZSMALLOC_DEBUG
14 #define DEBUG
15 #endif
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <asm/tlbflush.h>
26 #include <asm/pgtable.h>
27 #include <linux/cpumask.h>
28 #include <linux/cpu.h>
29 #include <linux/vmalloc.h>
30
31 #include "zsmalloc.h"
32 #include "zsmalloc_int.h"
33
34 /*
35  * A zspage's class index and fullness group
36  * are encoded in its (first)page->mapping
37  */
38 #define CLASS_IDX_BITS  28
39 #define FULLNESS_BITS   4
40 #define CLASS_IDX_MASK  ((1 << CLASS_IDX_BITS) - 1)
41 #define FULLNESS_MASK   ((1 << FULLNESS_BITS) - 1)
42
43 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
44 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
45
46 static int is_first_page(struct page *page)
47 {
48         return PagePrivate(page);
49 }
50
51 static int is_last_page(struct page *page)
52 {
53         return PagePrivate2(page);
54 }
55
56 static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
57                                 enum fullness_group *fullness)
58 {
59         unsigned long m;
60         BUG_ON(!is_first_page(page));
61
62         m = (unsigned long)page->mapping;
63         *fullness = m & FULLNESS_MASK;
64         *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
65 }
66
67 static void set_zspage_mapping(struct page *page, unsigned int class_idx,
68                                 enum fullness_group fullness)
69 {
70         unsigned long m;
71         BUG_ON(!is_first_page(page));
72
73         m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
74                         (fullness & FULLNESS_MASK);
75         page->mapping = (struct address_space *)m;
76 }
77
78 static int get_size_class_index(int size)
79 {
80         int idx = 0;
81
82         if (likely(size > ZS_MIN_ALLOC_SIZE))
83                 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
84                                 ZS_SIZE_CLASS_DELTA);
85
86         return idx;
87 }
88
89 static enum fullness_group get_fullness_group(struct page *page)
90 {
91         int inuse, max_objects;
92         enum fullness_group fg;
93         BUG_ON(!is_first_page(page));
94
95         inuse = page->inuse;
96         max_objects = page->objects;
97
98         if (inuse == 0)
99                 fg = ZS_EMPTY;
100         else if (inuse == max_objects)
101                 fg = ZS_FULL;
102         else if (inuse <= max_objects / fullness_threshold_frac)
103                 fg = ZS_ALMOST_EMPTY;
104         else
105                 fg = ZS_ALMOST_FULL;
106
107         return fg;
108 }
109
110 static void insert_zspage(struct page *page, struct size_class *class,
111                                 enum fullness_group fullness)
112 {
113         struct page **head;
114
115         BUG_ON(!is_first_page(page));
116
117         if (fullness >= _ZS_NR_FULLNESS_GROUPS)
118                 return;
119
120         head = &class->fullness_list[fullness];
121         if (*head)
122                 list_add_tail(&page->lru, &(*head)->lru);
123
124         *head = page;
125 }
126
127 static void remove_zspage(struct page *page, struct size_class *class,
128                                 enum fullness_group fullness)
129 {
130         struct page **head;
131
132         BUG_ON(!is_first_page(page));
133
134         if (fullness >= _ZS_NR_FULLNESS_GROUPS)
135                 return;
136
137         head = &class->fullness_list[fullness];
138         BUG_ON(!*head);
139         if (list_empty(&(*head)->lru))
140                 *head = NULL;
141         else if (*head == page)
142                 *head = (struct page *)list_entry((*head)->lru.next,
143                                         struct page, lru);
144
145         list_del_init(&page->lru);
146 }
147
148 static enum fullness_group fix_fullness_group(struct zs_pool *pool,
149                                                 struct page *page)
150 {
151         int class_idx;
152         struct size_class *class;
153         enum fullness_group currfg, newfg;
154
155         BUG_ON(!is_first_page(page));
156
157         get_zspage_mapping(page, &class_idx, &currfg);
158         newfg = get_fullness_group(page);
159         if (newfg == currfg)
160                 goto out;
161
162         class = &pool->size_class[class_idx];
163         remove_zspage(page, class, currfg);
164         insert_zspage(page, class, newfg);
165         set_zspage_mapping(page, class_idx, newfg);
166
167 out:
168         return newfg;
169 }
170
171 /*
172  * We have to decide on how many pages to link together
173  * to form a zspage for each size class. This is important
174  * to reduce wastage due to unusable space left at end of
175  * each zspage which is given as:
176  *      wastage = Zp - Zp % size_class
177  * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
178  *
179  * For example, for size class of 3/8 * PAGE_SIZE, we should
180  * link together 3 PAGE_SIZE sized pages to form a zspage
181  * since then we can perfectly fit in 8 such objects.
182  */
183 static int get_pages_per_zspage(int class_size)
184 {
185         int i, max_usedpc = 0;
186         /* zspage order which gives maximum used size per KB */
187         int max_usedpc_order = 1;
188
189         for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
190                 int zspage_size;
191                 int waste, usedpc;
192
193                 zspage_size = i * PAGE_SIZE;
194                 waste = zspage_size % class_size;
195                 usedpc = (zspage_size - waste) * 100 / zspage_size;
196
197                 if (usedpc > max_usedpc) {
198                         max_usedpc = usedpc;
199                         max_usedpc_order = i;
200                 }
201         }
202
203         return max_usedpc_order;
204 }
205
206 /*
207  * A single 'zspage' is composed of many system pages which are
208  * linked together using fields in struct page. This function finds
209  * the first/head page, given any component page of a zspage.
210  */
211 static struct page *get_first_page(struct page *page)
212 {
213         if (is_first_page(page))
214                 return page;
215         else
216                 return page->first_page;
217 }
218
219 static struct page *get_next_page(struct page *page)
220 {
221         struct page *next;
222
223         if (is_last_page(page))
224                 next = NULL;
225         else if (is_first_page(page))
226                 next = (struct page *)page->private;
227         else
228                 next = list_entry(page->lru.next, struct page, lru);
229
230         return next;
231 }
232
233 /* Encode <page, obj_idx> as a single handle value */
234 static void *obj_location_to_handle(struct page *page, unsigned long obj_idx)
235 {
236         unsigned long handle;
237
238         if (!page) {
239                 BUG_ON(obj_idx);
240                 return NULL;
241         }
242
243         handle = page_to_pfn(page) << OBJ_INDEX_BITS;
244         handle |= (obj_idx & OBJ_INDEX_MASK);
245
246         return (void *)handle;
247 }
248
249 /* Decode <page, obj_idx> pair from the given object handle */
250 static void obj_handle_to_location(void *handle, struct page **page,
251                                 unsigned long *obj_idx)
252 {
253         unsigned long hval = (unsigned long)handle;
254
255         *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
256         *obj_idx = hval & OBJ_INDEX_MASK;
257 }
258
259 static unsigned long obj_idx_to_offset(struct page *page,
260                                 unsigned long obj_idx, int class_size)
261 {
262         unsigned long off = 0;
263
264         if (!is_first_page(page))
265                 off = page->index;
266
267         return off + obj_idx * class_size;
268 }
269
270 static void reset_page(struct page *page)
271 {
272         clear_bit(PG_private, &page->flags);
273         clear_bit(PG_private_2, &page->flags);
274         set_page_private(page, 0);
275         page->mapping = NULL;
276         page->freelist = NULL;
277         reset_page_mapcount(page);
278 }
279
280 static void free_zspage(struct page *first_page)
281 {
282         struct page *nextp, *tmp, *head_extra;
283
284         BUG_ON(!is_first_page(first_page));
285         BUG_ON(first_page->inuse);
286
287         head_extra = (struct page *)page_private(first_page);
288
289         reset_page(first_page);
290         __free_page(first_page);
291
292         /* zspage with only 1 system page */
293         if (!head_extra)
294                 return;
295
296         list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
297                 list_del(&nextp->lru);
298                 reset_page(nextp);
299                 __free_page(nextp);
300         }
301         reset_page(head_extra);
302         __free_page(head_extra);
303 }
304
305 /* Initialize a newly allocated zspage */
306 static void init_zspage(struct page *first_page, struct size_class *class)
307 {
308         unsigned long off = 0;
309         struct page *page = first_page;
310
311         BUG_ON(!is_first_page(first_page));
312         while (page) {
313                 struct page *next_page;
314                 struct link_free *link;
315                 unsigned int i, objs_on_page;
316
317                 /*
318                  * page->index stores offset of first object starting
319                  * in the page. For the first page, this is always 0,
320                  * so we use first_page->index (aka ->freelist) to store
321                  * head of corresponding zspage's freelist.
322                  */
323                 if (page != first_page)
324                         page->index = off;
325
326                 link = (struct link_free *)kmap_atomic(page) +
327                                                 off / sizeof(*link);
328                 objs_on_page = (PAGE_SIZE - off) / class->size;
329
330                 for (i = 1; i <= objs_on_page; i++) {
331                         off += class->size;
332                         if (off < PAGE_SIZE) {
333                                 link->next = obj_location_to_handle(page, i);
334                                 link += class->size / sizeof(*link);
335                         }
336                 }
337
338                 /*
339                  * We now come to the last (full or partial) object on this
340                  * page, which must point to the first object on the next
341                  * page (if present)
342                  */
343                 next_page = get_next_page(page);
344                 link->next = obj_location_to_handle(next_page, 0);
345                 kunmap_atomic(link);
346                 page = next_page;
347                 off = (off + class->size) % PAGE_SIZE;
348         }
349 }
350
351 /*
352  * Allocate a zspage for the given size class
353  */
354 static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
355 {
356         int i, error;
357         struct page *first_page = NULL;
358
359         /*
360          * Allocate individual pages and link them together as:
361          * 1. first page->private = first sub-page
362          * 2. all sub-pages are linked together using page->lru
363          * 3. each sub-page is linked to the first page using page->first_page
364          *
365          * For each size class, First/Head pages are linked together using
366          * page->lru. Also, we set PG_private to identify the first page
367          * (i.e. no other sub-page has this flag set) and PG_private_2 to
368          * identify the last page.
369          */
370         error = -ENOMEM;
371         for (i = 0; i < class->pages_per_zspage; i++) {
372                 struct page *page, *prev_page;
373
374                 page = alloc_page(flags);
375                 if (!page)
376                         goto cleanup;
377
378                 INIT_LIST_HEAD(&page->lru);
379                 if (i == 0) {   /* first page */
380                         SetPagePrivate(page);
381                         set_page_private(page, 0);
382                         first_page = page;
383                         first_page->inuse = 0;
384                 }
385                 if (i == 1)
386                         first_page->private = (unsigned long)page;
387                 if (i >= 1)
388                         page->first_page = first_page;
389                 if (i >= 2)
390                         list_add(&page->lru, &prev_page->lru);
391                 if (i == class->pages_per_zspage - 1)   /* last page */
392                         SetPagePrivate2(page);
393                 prev_page = page;
394         }
395
396         init_zspage(first_page, class);
397
398         first_page->freelist = obj_location_to_handle(first_page, 0);
399         /* Maximum number of objects we can store in this zspage */
400         first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
401
402         error = 0; /* Success */
403
404 cleanup:
405         if (unlikely(error) && first_page) {
406                 free_zspage(first_page);
407                 first_page = NULL;
408         }
409
410         return first_page;
411 }
412
413 static struct page *find_get_zspage(struct size_class *class)
414 {
415         int i;
416         struct page *page;
417
418         for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
419                 page = class->fullness_list[i];
420                 if (page)
421                         break;
422         }
423
424         return page;
425 }
426
427
428 /*
429  * If this becomes a separate module, register zs_init() with
430  * module_init(), zs_exit with module_exit(), and remove zs_initialized
431 */
432 static int zs_initialized;
433
434 static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
435                                 void *pcpu)
436 {
437         int cpu = (long)pcpu;
438         struct mapping_area *area;
439
440         switch (action) {
441         case CPU_UP_PREPARE:
442                 area = &per_cpu(zs_map_area, cpu);
443                 if (area->vm)
444                         break;
445                 area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
446                 if (!area->vm)
447                         return notifier_from_errno(-ENOMEM);
448                 break;
449         case CPU_DEAD:
450         case CPU_UP_CANCELED:
451                 area = &per_cpu(zs_map_area, cpu);
452                 if (area->vm)
453                         free_vm_area(area->vm);
454                 area->vm = NULL;
455                 break;
456         }
457
458         return NOTIFY_OK;
459 }
460
461 static struct notifier_block zs_cpu_nb = {
462         .notifier_call = zs_cpu_notifier
463 };
464
465 static void zs_exit(void)
466 {
467         int cpu;
468
469         for_each_online_cpu(cpu)
470                 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
471         unregister_cpu_notifier(&zs_cpu_nb);
472 }
473
474 static int zs_init(void)
475 {
476         int cpu, ret;
477
478         register_cpu_notifier(&zs_cpu_nb);
479         for_each_online_cpu(cpu) {
480                 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
481                 if (notifier_to_errno(ret))
482                         goto fail;
483         }
484         return 0;
485 fail:
486         zs_exit();
487         return notifier_to_errno(ret);
488 }
489
490 struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
491 {
492         int i, error, ovhd_size;
493         struct zs_pool *pool;
494
495         if (!name)
496                 return NULL;
497
498         ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
499         pool = kzalloc(ovhd_size, GFP_KERNEL);
500         if (!pool)
501                 return NULL;
502
503         for (i = 0; i < ZS_SIZE_CLASSES; i++) {
504                 int size;
505                 struct size_class *class;
506
507                 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
508                 if (size > ZS_MAX_ALLOC_SIZE)
509                         size = ZS_MAX_ALLOC_SIZE;
510
511                 class = &pool->size_class[i];
512                 class->size = size;
513                 class->index = i;
514                 spin_lock_init(&class->lock);
515                 class->pages_per_zspage = get_pages_per_zspage(size);
516
517         }
518
519         /*
520          * If this becomes a separate module, register zs_init with
521          * module_init, and remove this block
522         */
523         if (!zs_initialized) {
524                 error = zs_init();
525                 if (error)
526                         goto cleanup;
527                 zs_initialized = 1;
528         }
529
530         pool->flags = flags;
531         pool->name = name;
532
533         error = 0; /* Success */
534
535 cleanup:
536         if (error) {
537                 zs_destroy_pool(pool);
538                 pool = NULL;
539         }
540
541         return pool;
542 }
543 EXPORT_SYMBOL_GPL(zs_create_pool);
544
545 void zs_destroy_pool(struct zs_pool *pool)
546 {
547         int i;
548
549         for (i = 0; i < ZS_SIZE_CLASSES; i++) {
550                 int fg;
551                 struct size_class *class = &pool->size_class[i];
552
553                 for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
554                         if (class->fullness_list[fg]) {
555                                 pr_info("Freeing non-empty class with size "
556                                         "%db, fullness group %d\n",
557                                         class->size, fg);
558                         }
559                 }
560         }
561         kfree(pool);
562 }
563 EXPORT_SYMBOL_GPL(zs_destroy_pool);
564
565 /**
566  * zs_malloc - Allocate block of given size from pool.
567  * @pool: pool to allocate from
568  * @size: size of block to allocate
569  *
570  * On success, handle to the allocated object is returned,
571  * otherwise NULL.
572  * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
573  */
574 void *zs_malloc(struct zs_pool *pool, size_t size)
575 {
576         void *obj;
577         struct link_free *link;
578         int class_idx;
579         struct size_class *class;
580
581         struct page *first_page, *m_page;
582         unsigned long m_objidx, m_offset;
583
584         if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
585                 return NULL;
586
587         class_idx = get_size_class_index(size);
588         class = &pool->size_class[class_idx];
589         BUG_ON(class_idx != class->index);
590
591         spin_lock(&class->lock);
592         first_page = find_get_zspage(class);
593
594         if (!first_page) {
595                 spin_unlock(&class->lock);
596                 first_page = alloc_zspage(class, pool->flags);
597                 if (unlikely(!first_page))
598                         return NULL;
599
600                 set_zspage_mapping(first_page, class->index, ZS_EMPTY);
601                 spin_lock(&class->lock);
602                 class->pages_allocated += class->pages_per_zspage;
603         }
604
605         obj = first_page->freelist;
606         obj_handle_to_location(obj, &m_page, &m_objidx);
607         m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
608
609         link = (struct link_free *)kmap_atomic(m_page) +
610                                         m_offset / sizeof(*link);
611         first_page->freelist = link->next;
612         memset(link, POISON_INUSE, sizeof(*link));
613         kunmap_atomic(link);
614
615         first_page->inuse++;
616         /* Now move the zspage to another fullness group, if required */
617         fix_fullness_group(pool, first_page);
618         spin_unlock(&class->lock);
619
620         return obj;
621 }
622 EXPORT_SYMBOL_GPL(zs_malloc);
623
624 void zs_free(struct zs_pool *pool, void *obj)
625 {
626         struct link_free *link;
627         struct page *first_page, *f_page;
628         unsigned long f_objidx, f_offset;
629
630         int class_idx;
631         struct size_class *class;
632         enum fullness_group fullness;
633
634         if (unlikely(!obj))
635                 return;
636
637         obj_handle_to_location(obj, &f_page, &f_objidx);
638         first_page = get_first_page(f_page);
639
640         get_zspage_mapping(first_page, &class_idx, &fullness);
641         class = &pool->size_class[class_idx];
642         f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
643
644         spin_lock(&class->lock);
645
646         /* Insert this object in containing zspage's freelist */
647         link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
648                                                         + f_offset);
649         link->next = first_page->freelist;
650         kunmap_atomic(link);
651         first_page->freelist = obj;
652
653         first_page->inuse--;
654         fullness = fix_fullness_group(pool, first_page);
655
656         if (fullness == ZS_EMPTY)
657                 class->pages_allocated -= class->pages_per_zspage;
658
659         spin_unlock(&class->lock);
660
661         if (fullness == ZS_EMPTY)
662                 free_zspage(first_page);
663 }
664 EXPORT_SYMBOL_GPL(zs_free);
665
666 /**
667  * zs_map_object - get address of allocated object from handle.
668  * @pool: pool from which the object was allocated
669  * @handle: handle returned from zs_malloc
670  *
671  * Before using an object allocated from zs_malloc, it must be mapped using
672  * this function. When done with the object, it must be unmapped using
673  * zs_unmap_object
674 */
675 void *zs_map_object(struct zs_pool *pool, void *handle)
676 {
677         struct page *page;
678         unsigned long obj_idx, off;
679
680         unsigned int class_idx;
681         enum fullness_group fg;
682         struct size_class *class;
683         struct mapping_area *area;
684
685         BUG_ON(!handle);
686
687         obj_handle_to_location(handle, &page, &obj_idx);
688         get_zspage_mapping(get_first_page(page), &class_idx, &fg);
689         class = &pool->size_class[class_idx];
690         off = obj_idx_to_offset(page, obj_idx, class->size);
691
692         area = &get_cpu_var(zs_map_area);
693         if (off + class->size <= PAGE_SIZE) {
694                 /* this object is contained entirely within a page */
695                 area->vm_addr = kmap_atomic(page);
696         } else {
697                 /* this object spans two pages */
698                 struct page *nextp;
699
700                 nextp = get_next_page(page);
701                 BUG_ON(!nextp);
702
703
704                 set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
705                 set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
706
707                 /* We pre-allocated VM area so mapping can never fail */
708                 area->vm_addr = area->vm->addr;
709         }
710
711         return area->vm_addr + off;
712 }
713 EXPORT_SYMBOL_GPL(zs_map_object);
714
715 void zs_unmap_object(struct zs_pool *pool, void *handle)
716 {
717         struct page *page;
718         unsigned long obj_idx, off;
719
720         unsigned int class_idx;
721         enum fullness_group fg;
722         struct size_class *class;
723         struct mapping_area *area;
724
725         BUG_ON(!handle);
726
727         obj_handle_to_location(handle, &page, &obj_idx);
728         get_zspage_mapping(get_first_page(page), &class_idx, &fg);
729         class = &pool->size_class[class_idx];
730         off = obj_idx_to_offset(page, obj_idx, class->size);
731
732         area = &__get_cpu_var(zs_map_area);
733         if (off + class->size <= PAGE_SIZE) {
734                 kunmap_atomic(area->vm_addr);
735         } else {
736                 set_pte(area->vm_ptes[0], __pte(0));
737                 set_pte(area->vm_ptes[1], __pte(0));
738                 __flush_tlb_one((unsigned long)area->vm_addr);
739                 __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
740         }
741         put_cpu_var(zs_map_area);
742 }
743 EXPORT_SYMBOL_GPL(zs_unmap_object);
744
745 u64 zs_get_total_size_bytes(struct zs_pool *pool)
746 {
747         int i;
748         u64 npages = 0;
749
750         for (i = 0; i < ZS_SIZE_CLASSES; i++)
751                 npages += pool->size_class[i].pages_allocated;
752
753         return npages << PAGE_SHIFT;
754 }
755 EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);