]> Pileus Git - ~andy/linux/blob - mm/hugetlb.c
hugetlb/cgroup: add support for cgroup removal
[~andy/linux] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/hugetlb_cgroup.h>
32 #include <linux/node.h>
33 #include "internal.h"
34
35 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
36 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
37 unsigned long hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42
43 __initdata LIST_HEAD(huge_boot_pages);
44
45 /* for command line parsing */
46 static struct hstate * __initdata parsed_hstate;
47 static unsigned long __initdata default_hstate_max_huge_pages;
48 static unsigned long __initdata default_hstate_size;
49
50 /*
51  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
52  */
53 DEFINE_SPINLOCK(hugetlb_lock);
54
55 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
56 {
57         bool free = (spool->count == 0) && (spool->used_hpages == 0);
58
59         spin_unlock(&spool->lock);
60
61         /* If no pages are used, and no other handles to the subpool
62          * remain, free the subpool the subpool remain */
63         if (free)
64                 kfree(spool);
65 }
66
67 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
68 {
69         struct hugepage_subpool *spool;
70
71         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
72         if (!spool)
73                 return NULL;
74
75         spin_lock_init(&spool->lock);
76         spool->count = 1;
77         spool->max_hpages = nr_blocks;
78         spool->used_hpages = 0;
79
80         return spool;
81 }
82
83 void hugepage_put_subpool(struct hugepage_subpool *spool)
84 {
85         spin_lock(&spool->lock);
86         BUG_ON(!spool->count);
87         spool->count--;
88         unlock_or_release_subpool(spool);
89 }
90
91 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
92                                       long delta)
93 {
94         int ret = 0;
95
96         if (!spool)
97                 return 0;
98
99         spin_lock(&spool->lock);
100         if ((spool->used_hpages + delta) <= spool->max_hpages) {
101                 spool->used_hpages += delta;
102         } else {
103                 ret = -ENOMEM;
104         }
105         spin_unlock(&spool->lock);
106
107         return ret;
108 }
109
110 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
111                                        long delta)
112 {
113         if (!spool)
114                 return;
115
116         spin_lock(&spool->lock);
117         spool->used_hpages -= delta;
118         /* If hugetlbfs_put_super couldn't free spool due to
119         * an outstanding quota reference, free it now. */
120         unlock_or_release_subpool(spool);
121 }
122
123 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
124 {
125         return HUGETLBFS_SB(inode->i_sb)->spool;
126 }
127
128 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
129 {
130         return subpool_inode(vma->vm_file->f_dentry->d_inode);
131 }
132
133 /*
134  * Region tracking -- allows tracking of reservations and instantiated pages
135  *                    across the pages in a mapping.
136  *
137  * The region data structures are protected by a combination of the mmap_sem
138  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
139  * must either hold the mmap_sem for write, or the mmap_sem for read and
140  * the hugetlb_instantiation mutex:
141  *
142  *      down_write(&mm->mmap_sem);
143  * or
144  *      down_read(&mm->mmap_sem);
145  *      mutex_lock(&hugetlb_instantiation_mutex);
146  */
147 struct file_region {
148         struct list_head link;
149         long from;
150         long to;
151 };
152
153 static long region_add(struct list_head *head, long f, long t)
154 {
155         struct file_region *rg, *nrg, *trg;
156
157         /* Locate the region we are either in or before. */
158         list_for_each_entry(rg, head, link)
159                 if (f <= rg->to)
160                         break;
161
162         /* Round our left edge to the current segment if it encloses us. */
163         if (f > rg->from)
164                 f = rg->from;
165
166         /* Check for and consume any regions we now overlap with. */
167         nrg = rg;
168         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
169                 if (&rg->link == head)
170                         break;
171                 if (rg->from > t)
172                         break;
173
174                 /* If this area reaches higher then extend our area to
175                  * include it completely.  If this is not the first area
176                  * which we intend to reuse, free it. */
177                 if (rg->to > t)
178                         t = rg->to;
179                 if (rg != nrg) {
180                         list_del(&rg->link);
181                         kfree(rg);
182                 }
183         }
184         nrg->from = f;
185         nrg->to = t;
186         return 0;
187 }
188
189 static long region_chg(struct list_head *head, long f, long t)
190 {
191         struct file_region *rg, *nrg;
192         long chg = 0;
193
194         /* Locate the region we are before or in. */
195         list_for_each_entry(rg, head, link)
196                 if (f <= rg->to)
197                         break;
198
199         /* If we are below the current region then a new region is required.
200          * Subtle, allocate a new region at the position but make it zero
201          * size such that we can guarantee to record the reservation. */
202         if (&rg->link == head || t < rg->from) {
203                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
204                 if (!nrg)
205                         return -ENOMEM;
206                 nrg->from = f;
207                 nrg->to   = f;
208                 INIT_LIST_HEAD(&nrg->link);
209                 list_add(&nrg->link, rg->link.prev);
210
211                 return t - f;
212         }
213
214         /* Round our left edge to the current segment if it encloses us. */
215         if (f > rg->from)
216                 f = rg->from;
217         chg = t - f;
218
219         /* Check for and consume any regions we now overlap with. */
220         list_for_each_entry(rg, rg->link.prev, link) {
221                 if (&rg->link == head)
222                         break;
223                 if (rg->from > t)
224                         return chg;
225
226                 /* We overlap with this area, if it extends further than
227                  * us then we must extend ourselves.  Account for its
228                  * existing reservation. */
229                 if (rg->to > t) {
230                         chg += rg->to - t;
231                         t = rg->to;
232                 }
233                 chg -= rg->to - rg->from;
234         }
235         return chg;
236 }
237
238 static long region_truncate(struct list_head *head, long end)
239 {
240         struct file_region *rg, *trg;
241         long chg = 0;
242
243         /* Locate the region we are either in or before. */
244         list_for_each_entry(rg, head, link)
245                 if (end <= rg->to)
246                         break;
247         if (&rg->link == head)
248                 return 0;
249
250         /* If we are in the middle of a region then adjust it. */
251         if (end > rg->from) {
252                 chg = rg->to - end;
253                 rg->to = end;
254                 rg = list_entry(rg->link.next, typeof(*rg), link);
255         }
256
257         /* Drop any remaining regions. */
258         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
259                 if (&rg->link == head)
260                         break;
261                 chg += rg->to - rg->from;
262                 list_del(&rg->link);
263                 kfree(rg);
264         }
265         return chg;
266 }
267
268 static long region_count(struct list_head *head, long f, long t)
269 {
270         struct file_region *rg;
271         long chg = 0;
272
273         /* Locate each segment we overlap with, and count that overlap. */
274         list_for_each_entry(rg, head, link) {
275                 long seg_from;
276                 long seg_to;
277
278                 if (rg->to <= f)
279                         continue;
280                 if (rg->from >= t)
281                         break;
282
283                 seg_from = max(rg->from, f);
284                 seg_to = min(rg->to, t);
285
286                 chg += seg_to - seg_from;
287         }
288
289         return chg;
290 }
291
292 /*
293  * Convert the address within this vma to the page offset within
294  * the mapping, in pagecache page units; huge pages here.
295  */
296 static pgoff_t vma_hugecache_offset(struct hstate *h,
297                         struct vm_area_struct *vma, unsigned long address)
298 {
299         return ((address - vma->vm_start) >> huge_page_shift(h)) +
300                         (vma->vm_pgoff >> huge_page_order(h));
301 }
302
303 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
304                                      unsigned long address)
305 {
306         return vma_hugecache_offset(hstate_vma(vma), vma, address);
307 }
308
309 /*
310  * Return the size of the pages allocated when backing a VMA. In the majority
311  * cases this will be same size as used by the page table entries.
312  */
313 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
314 {
315         struct hstate *hstate;
316
317         if (!is_vm_hugetlb_page(vma))
318                 return PAGE_SIZE;
319
320         hstate = hstate_vma(vma);
321
322         return 1UL << (hstate->order + PAGE_SHIFT);
323 }
324 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
325
326 /*
327  * Return the page size being used by the MMU to back a VMA. In the majority
328  * of cases, the page size used by the kernel matches the MMU size. On
329  * architectures where it differs, an architecture-specific version of this
330  * function is required.
331  */
332 #ifndef vma_mmu_pagesize
333 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
334 {
335         return vma_kernel_pagesize(vma);
336 }
337 #endif
338
339 /*
340  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
341  * bits of the reservation map pointer, which are always clear due to
342  * alignment.
343  */
344 #define HPAGE_RESV_OWNER    (1UL << 0)
345 #define HPAGE_RESV_UNMAPPED (1UL << 1)
346 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
347
348 /*
349  * These helpers are used to track how many pages are reserved for
350  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
351  * is guaranteed to have their future faults succeed.
352  *
353  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
354  * the reserve counters are updated with the hugetlb_lock held. It is safe
355  * to reset the VMA at fork() time as it is not in use yet and there is no
356  * chance of the global counters getting corrupted as a result of the values.
357  *
358  * The private mapping reservation is represented in a subtly different
359  * manner to a shared mapping.  A shared mapping has a region map associated
360  * with the underlying file, this region map represents the backing file
361  * pages which have ever had a reservation assigned which this persists even
362  * after the page is instantiated.  A private mapping has a region map
363  * associated with the original mmap which is attached to all VMAs which
364  * reference it, this region map represents those offsets which have consumed
365  * reservation ie. where pages have been instantiated.
366  */
367 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
368 {
369         return (unsigned long)vma->vm_private_data;
370 }
371
372 static void set_vma_private_data(struct vm_area_struct *vma,
373                                                         unsigned long value)
374 {
375         vma->vm_private_data = (void *)value;
376 }
377
378 struct resv_map {
379         struct kref refs;
380         struct list_head regions;
381 };
382
383 static struct resv_map *resv_map_alloc(void)
384 {
385         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
386         if (!resv_map)
387                 return NULL;
388
389         kref_init(&resv_map->refs);
390         INIT_LIST_HEAD(&resv_map->regions);
391
392         return resv_map;
393 }
394
395 static void resv_map_release(struct kref *ref)
396 {
397         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
398
399         /* Clear out any active regions before we release the map. */
400         region_truncate(&resv_map->regions, 0);
401         kfree(resv_map);
402 }
403
404 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
405 {
406         VM_BUG_ON(!is_vm_hugetlb_page(vma));
407         if (!(vma->vm_flags & VM_MAYSHARE))
408                 return (struct resv_map *)(get_vma_private_data(vma) &
409                                                         ~HPAGE_RESV_MASK);
410         return NULL;
411 }
412
413 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
414 {
415         VM_BUG_ON(!is_vm_hugetlb_page(vma));
416         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
417
418         set_vma_private_data(vma, (get_vma_private_data(vma) &
419                                 HPAGE_RESV_MASK) | (unsigned long)map);
420 }
421
422 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
423 {
424         VM_BUG_ON(!is_vm_hugetlb_page(vma));
425         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
426
427         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
428 }
429
430 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
431 {
432         VM_BUG_ON(!is_vm_hugetlb_page(vma));
433
434         return (get_vma_private_data(vma) & flag) != 0;
435 }
436
437 /* Decrement the reserved pages in the hugepage pool by one */
438 static void decrement_hugepage_resv_vma(struct hstate *h,
439                         struct vm_area_struct *vma)
440 {
441         if (vma->vm_flags & VM_NORESERVE)
442                 return;
443
444         if (vma->vm_flags & VM_MAYSHARE) {
445                 /* Shared mappings always use reserves */
446                 h->resv_huge_pages--;
447         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
448                 /*
449                  * Only the process that called mmap() has reserves for
450                  * private mappings.
451                  */
452                 h->resv_huge_pages--;
453         }
454 }
455
456 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
457 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
458 {
459         VM_BUG_ON(!is_vm_hugetlb_page(vma));
460         if (!(vma->vm_flags & VM_MAYSHARE))
461                 vma->vm_private_data = (void *)0;
462 }
463
464 /* Returns true if the VMA has associated reserve pages */
465 static int vma_has_reserves(struct vm_area_struct *vma)
466 {
467         if (vma->vm_flags & VM_MAYSHARE)
468                 return 1;
469         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
470                 return 1;
471         return 0;
472 }
473
474 static void copy_gigantic_page(struct page *dst, struct page *src)
475 {
476         int i;
477         struct hstate *h = page_hstate(src);
478         struct page *dst_base = dst;
479         struct page *src_base = src;
480
481         for (i = 0; i < pages_per_huge_page(h); ) {
482                 cond_resched();
483                 copy_highpage(dst, src);
484
485                 i++;
486                 dst = mem_map_next(dst, dst_base, i);
487                 src = mem_map_next(src, src_base, i);
488         }
489 }
490
491 void copy_huge_page(struct page *dst, struct page *src)
492 {
493         int i;
494         struct hstate *h = page_hstate(src);
495
496         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
497                 copy_gigantic_page(dst, src);
498                 return;
499         }
500
501         might_sleep();
502         for (i = 0; i < pages_per_huge_page(h); i++) {
503                 cond_resched();
504                 copy_highpage(dst + i, src + i);
505         }
506 }
507
508 static void enqueue_huge_page(struct hstate *h, struct page *page)
509 {
510         int nid = page_to_nid(page);
511         list_move(&page->lru, &h->hugepage_freelists[nid]);
512         h->free_huge_pages++;
513         h->free_huge_pages_node[nid]++;
514 }
515
516 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
517 {
518         struct page *page;
519
520         if (list_empty(&h->hugepage_freelists[nid]))
521                 return NULL;
522         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
523         list_move(&page->lru, &h->hugepage_activelist);
524         set_page_refcounted(page);
525         h->free_huge_pages--;
526         h->free_huge_pages_node[nid]--;
527         return page;
528 }
529
530 static struct page *dequeue_huge_page_vma(struct hstate *h,
531                                 struct vm_area_struct *vma,
532                                 unsigned long address, int avoid_reserve)
533 {
534         struct page *page = NULL;
535         struct mempolicy *mpol;
536         nodemask_t *nodemask;
537         struct zonelist *zonelist;
538         struct zone *zone;
539         struct zoneref *z;
540         unsigned int cpuset_mems_cookie;
541
542 retry_cpuset:
543         cpuset_mems_cookie = get_mems_allowed();
544         zonelist = huge_zonelist(vma, address,
545                                         htlb_alloc_mask, &mpol, &nodemask);
546         /*
547          * A child process with MAP_PRIVATE mappings created by their parent
548          * have no page reserves. This check ensures that reservations are
549          * not "stolen". The child may still get SIGKILLed
550          */
551         if (!vma_has_reserves(vma) &&
552                         h->free_huge_pages - h->resv_huge_pages == 0)
553                 goto err;
554
555         /* If reserves cannot be used, ensure enough pages are in the pool */
556         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
557                 goto err;
558
559         for_each_zone_zonelist_nodemask(zone, z, zonelist,
560                                                 MAX_NR_ZONES - 1, nodemask) {
561                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
562                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
563                         if (page) {
564                                 if (!avoid_reserve)
565                                         decrement_hugepage_resv_vma(h, vma);
566                                 break;
567                         }
568                 }
569         }
570
571         mpol_cond_put(mpol);
572         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
573                 goto retry_cpuset;
574         return page;
575
576 err:
577         mpol_cond_put(mpol);
578         return NULL;
579 }
580
581 static void update_and_free_page(struct hstate *h, struct page *page)
582 {
583         int i;
584
585         VM_BUG_ON(h->order >= MAX_ORDER);
586
587         h->nr_huge_pages--;
588         h->nr_huge_pages_node[page_to_nid(page)]--;
589         for (i = 0; i < pages_per_huge_page(h); i++) {
590                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
591                                 1 << PG_referenced | 1 << PG_dirty |
592                                 1 << PG_active | 1 << PG_reserved |
593                                 1 << PG_private | 1 << PG_writeback);
594         }
595         VM_BUG_ON(hugetlb_cgroup_from_page(page));
596         set_compound_page_dtor(page, NULL);
597         set_page_refcounted(page);
598         arch_release_hugepage(page);
599         __free_pages(page, huge_page_order(h));
600 }
601
602 struct hstate *size_to_hstate(unsigned long size)
603 {
604         struct hstate *h;
605
606         for_each_hstate(h) {
607                 if (huge_page_size(h) == size)
608                         return h;
609         }
610         return NULL;
611 }
612
613 static void free_huge_page(struct page *page)
614 {
615         /*
616          * Can't pass hstate in here because it is called from the
617          * compound page destructor.
618          */
619         struct hstate *h = page_hstate(page);
620         int nid = page_to_nid(page);
621         struct hugepage_subpool *spool =
622                 (struct hugepage_subpool *)page_private(page);
623
624         set_page_private(page, 0);
625         page->mapping = NULL;
626         BUG_ON(page_count(page));
627         BUG_ON(page_mapcount(page));
628
629         spin_lock(&hugetlb_lock);
630         hugetlb_cgroup_uncharge_page(hstate_index(h),
631                                      pages_per_huge_page(h), page);
632         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
633                 /* remove the page from active list */
634                 list_del(&page->lru);
635                 update_and_free_page(h, page);
636                 h->surplus_huge_pages--;
637                 h->surplus_huge_pages_node[nid]--;
638         } else {
639                 enqueue_huge_page(h, page);
640         }
641         spin_unlock(&hugetlb_lock);
642         hugepage_subpool_put_pages(spool, 1);
643 }
644
645 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
646 {
647         INIT_LIST_HEAD(&page->lru);
648         set_compound_page_dtor(page, free_huge_page);
649         spin_lock(&hugetlb_lock);
650         set_hugetlb_cgroup(page, NULL);
651         h->nr_huge_pages++;
652         h->nr_huge_pages_node[nid]++;
653         spin_unlock(&hugetlb_lock);
654         put_page(page); /* free it into the hugepage allocator */
655 }
656
657 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
658 {
659         int i;
660         int nr_pages = 1 << order;
661         struct page *p = page + 1;
662
663         /* we rely on prep_new_huge_page to set the destructor */
664         set_compound_order(page, order);
665         __SetPageHead(page);
666         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
667                 __SetPageTail(p);
668                 set_page_count(p, 0);
669                 p->first_page = page;
670         }
671 }
672
673 int PageHuge(struct page *page)
674 {
675         compound_page_dtor *dtor;
676
677         if (!PageCompound(page))
678                 return 0;
679
680         page = compound_head(page);
681         dtor = get_compound_page_dtor(page);
682
683         return dtor == free_huge_page;
684 }
685 EXPORT_SYMBOL_GPL(PageHuge);
686
687 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
688 {
689         struct page *page;
690
691         if (h->order >= MAX_ORDER)
692                 return NULL;
693
694         page = alloc_pages_exact_node(nid,
695                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
696                                                 __GFP_REPEAT|__GFP_NOWARN,
697                 huge_page_order(h));
698         if (page) {
699                 if (arch_prepare_hugepage(page)) {
700                         __free_pages(page, huge_page_order(h));
701                         return NULL;
702                 }
703                 prep_new_huge_page(h, page, nid);
704         }
705
706         return page;
707 }
708
709 /*
710  * common helper functions for hstate_next_node_to_{alloc|free}.
711  * We may have allocated or freed a huge page based on a different
712  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
713  * be outside of *nodes_allowed.  Ensure that we use an allowed
714  * node for alloc or free.
715  */
716 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
717 {
718         nid = next_node(nid, *nodes_allowed);
719         if (nid == MAX_NUMNODES)
720                 nid = first_node(*nodes_allowed);
721         VM_BUG_ON(nid >= MAX_NUMNODES);
722
723         return nid;
724 }
725
726 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
727 {
728         if (!node_isset(nid, *nodes_allowed))
729                 nid = next_node_allowed(nid, nodes_allowed);
730         return nid;
731 }
732
733 /*
734  * returns the previously saved node ["this node"] from which to
735  * allocate a persistent huge page for the pool and advance the
736  * next node from which to allocate, handling wrap at end of node
737  * mask.
738  */
739 static int hstate_next_node_to_alloc(struct hstate *h,
740                                         nodemask_t *nodes_allowed)
741 {
742         int nid;
743
744         VM_BUG_ON(!nodes_allowed);
745
746         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
747         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
748
749         return nid;
750 }
751
752 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
753 {
754         struct page *page;
755         int start_nid;
756         int next_nid;
757         int ret = 0;
758
759         start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
760         next_nid = start_nid;
761
762         do {
763                 page = alloc_fresh_huge_page_node(h, next_nid);
764                 if (page) {
765                         ret = 1;
766                         break;
767                 }
768                 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
769         } while (next_nid != start_nid);
770
771         if (ret)
772                 count_vm_event(HTLB_BUDDY_PGALLOC);
773         else
774                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
775
776         return ret;
777 }
778
779 /*
780  * helper for free_pool_huge_page() - return the previously saved
781  * node ["this node"] from which to free a huge page.  Advance the
782  * next node id whether or not we find a free huge page to free so
783  * that the next attempt to free addresses the next node.
784  */
785 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
786 {
787         int nid;
788
789         VM_BUG_ON(!nodes_allowed);
790
791         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
792         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
793
794         return nid;
795 }
796
797 /*
798  * Free huge page from pool from next node to free.
799  * Attempt to keep persistent huge pages more or less
800  * balanced over allowed nodes.
801  * Called with hugetlb_lock locked.
802  */
803 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
804                                                          bool acct_surplus)
805 {
806         int start_nid;
807         int next_nid;
808         int ret = 0;
809
810         start_nid = hstate_next_node_to_free(h, nodes_allowed);
811         next_nid = start_nid;
812
813         do {
814                 /*
815                  * If we're returning unused surplus pages, only examine
816                  * nodes with surplus pages.
817                  */
818                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
819                     !list_empty(&h->hugepage_freelists[next_nid])) {
820                         struct page *page =
821                                 list_entry(h->hugepage_freelists[next_nid].next,
822                                           struct page, lru);
823                         list_del(&page->lru);
824                         h->free_huge_pages--;
825                         h->free_huge_pages_node[next_nid]--;
826                         if (acct_surplus) {
827                                 h->surplus_huge_pages--;
828                                 h->surplus_huge_pages_node[next_nid]--;
829                         }
830                         update_and_free_page(h, page);
831                         ret = 1;
832                         break;
833                 }
834                 next_nid = hstate_next_node_to_free(h, nodes_allowed);
835         } while (next_nid != start_nid);
836
837         return ret;
838 }
839
840 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
841 {
842         struct page *page;
843         unsigned int r_nid;
844
845         if (h->order >= MAX_ORDER)
846                 return NULL;
847
848         /*
849          * Assume we will successfully allocate the surplus page to
850          * prevent racing processes from causing the surplus to exceed
851          * overcommit
852          *
853          * This however introduces a different race, where a process B
854          * tries to grow the static hugepage pool while alloc_pages() is
855          * called by process A. B will only examine the per-node
856          * counters in determining if surplus huge pages can be
857          * converted to normal huge pages in adjust_pool_surplus(). A
858          * won't be able to increment the per-node counter, until the
859          * lock is dropped by B, but B doesn't drop hugetlb_lock until
860          * no more huge pages can be converted from surplus to normal
861          * state (and doesn't try to convert again). Thus, we have a
862          * case where a surplus huge page exists, the pool is grown, and
863          * the surplus huge page still exists after, even though it
864          * should just have been converted to a normal huge page. This
865          * does not leak memory, though, as the hugepage will be freed
866          * once it is out of use. It also does not allow the counters to
867          * go out of whack in adjust_pool_surplus() as we don't modify
868          * the node values until we've gotten the hugepage and only the
869          * per-node value is checked there.
870          */
871         spin_lock(&hugetlb_lock);
872         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
873                 spin_unlock(&hugetlb_lock);
874                 return NULL;
875         } else {
876                 h->nr_huge_pages++;
877                 h->surplus_huge_pages++;
878         }
879         spin_unlock(&hugetlb_lock);
880
881         if (nid == NUMA_NO_NODE)
882                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
883                                    __GFP_REPEAT|__GFP_NOWARN,
884                                    huge_page_order(h));
885         else
886                 page = alloc_pages_exact_node(nid,
887                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
888                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
889
890         if (page && arch_prepare_hugepage(page)) {
891                 __free_pages(page, huge_page_order(h));
892                 page = NULL;
893         }
894
895         spin_lock(&hugetlb_lock);
896         if (page) {
897                 INIT_LIST_HEAD(&page->lru);
898                 r_nid = page_to_nid(page);
899                 set_compound_page_dtor(page, free_huge_page);
900                 set_hugetlb_cgroup(page, NULL);
901                 /*
902                  * We incremented the global counters already
903                  */
904                 h->nr_huge_pages_node[r_nid]++;
905                 h->surplus_huge_pages_node[r_nid]++;
906                 __count_vm_event(HTLB_BUDDY_PGALLOC);
907         } else {
908                 h->nr_huge_pages--;
909                 h->surplus_huge_pages--;
910                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
911         }
912         spin_unlock(&hugetlb_lock);
913
914         return page;
915 }
916
917 /*
918  * This allocation function is useful in the context where vma is irrelevant.
919  * E.g. soft-offlining uses this function because it only cares physical
920  * address of error page.
921  */
922 struct page *alloc_huge_page_node(struct hstate *h, int nid)
923 {
924         struct page *page;
925
926         spin_lock(&hugetlb_lock);
927         page = dequeue_huge_page_node(h, nid);
928         spin_unlock(&hugetlb_lock);
929
930         if (!page)
931                 page = alloc_buddy_huge_page(h, nid);
932
933         return page;
934 }
935
936 /*
937  * Increase the hugetlb pool such that it can accommodate a reservation
938  * of size 'delta'.
939  */
940 static int gather_surplus_pages(struct hstate *h, int delta)
941 {
942         struct list_head surplus_list;
943         struct page *page, *tmp;
944         int ret, i;
945         int needed, allocated;
946         bool alloc_ok = true;
947
948         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
949         if (needed <= 0) {
950                 h->resv_huge_pages += delta;
951                 return 0;
952         }
953
954         allocated = 0;
955         INIT_LIST_HEAD(&surplus_list);
956
957         ret = -ENOMEM;
958 retry:
959         spin_unlock(&hugetlb_lock);
960         for (i = 0; i < needed; i++) {
961                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
962                 if (!page) {
963                         alloc_ok = false;
964                         break;
965                 }
966                 list_add(&page->lru, &surplus_list);
967         }
968         allocated += i;
969
970         /*
971          * After retaking hugetlb_lock, we need to recalculate 'needed'
972          * because either resv_huge_pages or free_huge_pages may have changed.
973          */
974         spin_lock(&hugetlb_lock);
975         needed = (h->resv_huge_pages + delta) -
976                         (h->free_huge_pages + allocated);
977         if (needed > 0) {
978                 if (alloc_ok)
979                         goto retry;
980                 /*
981                  * We were not able to allocate enough pages to
982                  * satisfy the entire reservation so we free what
983                  * we've allocated so far.
984                  */
985                 goto free;
986         }
987         /*
988          * The surplus_list now contains _at_least_ the number of extra pages
989          * needed to accommodate the reservation.  Add the appropriate number
990          * of pages to the hugetlb pool and free the extras back to the buddy
991          * allocator.  Commit the entire reservation here to prevent another
992          * process from stealing the pages as they are added to the pool but
993          * before they are reserved.
994          */
995         needed += allocated;
996         h->resv_huge_pages += delta;
997         ret = 0;
998
999         /* Free the needed pages to the hugetlb pool */
1000         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1001                 if ((--needed) < 0)
1002                         break;
1003                 /*
1004                  * This page is now managed by the hugetlb allocator and has
1005                  * no users -- drop the buddy allocator's reference.
1006                  */
1007                 put_page_testzero(page);
1008                 VM_BUG_ON(page_count(page));
1009                 enqueue_huge_page(h, page);
1010         }
1011 free:
1012         spin_unlock(&hugetlb_lock);
1013
1014         /* Free unnecessary surplus pages to the buddy allocator */
1015         if (!list_empty(&surplus_list)) {
1016                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1017                         put_page(page);
1018                 }
1019         }
1020         spin_lock(&hugetlb_lock);
1021
1022         return ret;
1023 }
1024
1025 /*
1026  * When releasing a hugetlb pool reservation, any surplus pages that were
1027  * allocated to satisfy the reservation must be explicitly freed if they were
1028  * never used.
1029  * Called with hugetlb_lock held.
1030  */
1031 static void return_unused_surplus_pages(struct hstate *h,
1032                                         unsigned long unused_resv_pages)
1033 {
1034         unsigned long nr_pages;
1035
1036         /* Uncommit the reservation */
1037         h->resv_huge_pages -= unused_resv_pages;
1038
1039         /* Cannot return gigantic pages currently */
1040         if (h->order >= MAX_ORDER)
1041                 return;
1042
1043         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1044
1045         /*
1046          * We want to release as many surplus pages as possible, spread
1047          * evenly across all nodes with memory. Iterate across these nodes
1048          * until we can no longer free unreserved surplus pages. This occurs
1049          * when the nodes with surplus pages have no free pages.
1050          * free_pool_huge_page() will balance the the freed pages across the
1051          * on-line nodes with memory and will handle the hstate accounting.
1052          */
1053         while (nr_pages--) {
1054                 if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
1055                         break;
1056         }
1057 }
1058
1059 /*
1060  * Determine if the huge page at addr within the vma has an associated
1061  * reservation.  Where it does not we will need to logically increase
1062  * reservation and actually increase subpool usage before an allocation
1063  * can occur.  Where any new reservation would be required the
1064  * reservation change is prepared, but not committed.  Once the page
1065  * has been allocated from the subpool and instantiated the change should
1066  * be committed via vma_commit_reservation.  No action is required on
1067  * failure.
1068  */
1069 static long vma_needs_reservation(struct hstate *h,
1070                         struct vm_area_struct *vma, unsigned long addr)
1071 {
1072         struct address_space *mapping = vma->vm_file->f_mapping;
1073         struct inode *inode = mapping->host;
1074
1075         if (vma->vm_flags & VM_MAYSHARE) {
1076                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1077                 return region_chg(&inode->i_mapping->private_list,
1078                                                         idx, idx + 1);
1079
1080         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1081                 return 1;
1082
1083         } else  {
1084                 long err;
1085                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1086                 struct resv_map *reservations = vma_resv_map(vma);
1087
1088                 err = region_chg(&reservations->regions, idx, idx + 1);
1089                 if (err < 0)
1090                         return err;
1091                 return 0;
1092         }
1093 }
1094 static void vma_commit_reservation(struct hstate *h,
1095                         struct vm_area_struct *vma, unsigned long addr)
1096 {
1097         struct address_space *mapping = vma->vm_file->f_mapping;
1098         struct inode *inode = mapping->host;
1099
1100         if (vma->vm_flags & VM_MAYSHARE) {
1101                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1102                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1103
1104         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1105                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1106                 struct resv_map *reservations = vma_resv_map(vma);
1107
1108                 /* Mark this page used in the map. */
1109                 region_add(&reservations->regions, idx, idx + 1);
1110         }
1111 }
1112
1113 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1114                                     unsigned long addr, int avoid_reserve)
1115 {
1116         struct hugepage_subpool *spool = subpool_vma(vma);
1117         struct hstate *h = hstate_vma(vma);
1118         struct page *page;
1119         long chg;
1120         int ret, idx;
1121         struct hugetlb_cgroup *h_cg;
1122
1123         idx = hstate_index(h);
1124         /*
1125          * Processes that did not create the mapping will have no
1126          * reserves and will not have accounted against subpool
1127          * limit. Check that the subpool limit can be made before
1128          * satisfying the allocation MAP_NORESERVE mappings may also
1129          * need pages and subpool limit allocated allocated if no reserve
1130          * mapping overlaps.
1131          */
1132         chg = vma_needs_reservation(h, vma, addr);
1133         if (chg < 0)
1134                 return ERR_PTR(-ENOMEM);
1135         if (chg)
1136                 if (hugepage_subpool_get_pages(spool, chg))
1137                         return ERR_PTR(-ENOSPC);
1138
1139         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1140         if (ret) {
1141                 hugepage_subpool_put_pages(spool, chg);
1142                 return ERR_PTR(-ENOSPC);
1143         }
1144         spin_lock(&hugetlb_lock);
1145         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1146         spin_unlock(&hugetlb_lock);
1147
1148         if (!page) {
1149                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1150                 if (!page) {
1151                         hugetlb_cgroup_uncharge_cgroup(idx,
1152                                                        pages_per_huge_page(h),
1153                                                        h_cg);
1154                         hugepage_subpool_put_pages(spool, chg);
1155                         return ERR_PTR(-ENOSPC);
1156                 }
1157         }
1158
1159         set_page_private(page, (unsigned long)spool);
1160
1161         vma_commit_reservation(h, vma, addr);
1162         /* update page cgroup details */
1163         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1164         return page;
1165 }
1166
1167 int __weak alloc_bootmem_huge_page(struct hstate *h)
1168 {
1169         struct huge_bootmem_page *m;
1170         int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1171
1172         while (nr_nodes) {
1173                 void *addr;
1174
1175                 addr = __alloc_bootmem_node_nopanic(
1176                                 NODE_DATA(hstate_next_node_to_alloc(h,
1177                                                 &node_states[N_HIGH_MEMORY])),
1178                                 huge_page_size(h), huge_page_size(h), 0);
1179
1180                 if (addr) {
1181                         /*
1182                          * Use the beginning of the huge page to store the
1183                          * huge_bootmem_page struct (until gather_bootmem
1184                          * puts them into the mem_map).
1185                          */
1186                         m = addr;
1187                         goto found;
1188                 }
1189                 nr_nodes--;
1190         }
1191         return 0;
1192
1193 found:
1194         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1195         /* Put them into a private list first because mem_map is not up yet */
1196         list_add(&m->list, &huge_boot_pages);
1197         m->hstate = h;
1198         return 1;
1199 }
1200
1201 static void prep_compound_huge_page(struct page *page, int order)
1202 {
1203         if (unlikely(order > (MAX_ORDER - 1)))
1204                 prep_compound_gigantic_page(page, order);
1205         else
1206                 prep_compound_page(page, order);
1207 }
1208
1209 /* Put bootmem huge pages into the standard lists after mem_map is up */
1210 static void __init gather_bootmem_prealloc(void)
1211 {
1212         struct huge_bootmem_page *m;
1213
1214         list_for_each_entry(m, &huge_boot_pages, list) {
1215                 struct hstate *h = m->hstate;
1216                 struct page *page;
1217
1218 #ifdef CONFIG_HIGHMEM
1219                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1220                 free_bootmem_late((unsigned long)m,
1221                                   sizeof(struct huge_bootmem_page));
1222 #else
1223                 page = virt_to_page(m);
1224 #endif
1225                 __ClearPageReserved(page);
1226                 WARN_ON(page_count(page) != 1);
1227                 prep_compound_huge_page(page, h->order);
1228                 prep_new_huge_page(h, page, page_to_nid(page));
1229                 /*
1230                  * If we had gigantic hugepages allocated at boot time, we need
1231                  * to restore the 'stolen' pages to totalram_pages in order to
1232                  * fix confusing memory reports from free(1) and another
1233                  * side-effects, like CommitLimit going negative.
1234                  */
1235                 if (h->order > (MAX_ORDER - 1))
1236                         totalram_pages += 1 << h->order;
1237         }
1238 }
1239
1240 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1241 {
1242         unsigned long i;
1243
1244         for (i = 0; i < h->max_huge_pages; ++i) {
1245                 if (h->order >= MAX_ORDER) {
1246                         if (!alloc_bootmem_huge_page(h))
1247                                 break;
1248                 } else if (!alloc_fresh_huge_page(h,
1249                                          &node_states[N_HIGH_MEMORY]))
1250                         break;
1251         }
1252         h->max_huge_pages = i;
1253 }
1254
1255 static void __init hugetlb_init_hstates(void)
1256 {
1257         struct hstate *h;
1258
1259         for_each_hstate(h) {
1260                 /* oversize hugepages were init'ed in early boot */
1261                 if (h->order < MAX_ORDER)
1262                         hugetlb_hstate_alloc_pages(h);
1263         }
1264 }
1265
1266 static char * __init memfmt(char *buf, unsigned long n)
1267 {
1268         if (n >= (1UL << 30))
1269                 sprintf(buf, "%lu GB", n >> 30);
1270         else if (n >= (1UL << 20))
1271                 sprintf(buf, "%lu MB", n >> 20);
1272         else
1273                 sprintf(buf, "%lu KB", n >> 10);
1274         return buf;
1275 }
1276
1277 static void __init report_hugepages(void)
1278 {
1279         struct hstate *h;
1280
1281         for_each_hstate(h) {
1282                 char buf[32];
1283                 printk(KERN_INFO "HugeTLB registered %s page size, "
1284                                  "pre-allocated %ld pages\n",
1285                         memfmt(buf, huge_page_size(h)),
1286                         h->free_huge_pages);
1287         }
1288 }
1289
1290 #ifdef CONFIG_HIGHMEM
1291 static void try_to_free_low(struct hstate *h, unsigned long count,
1292                                                 nodemask_t *nodes_allowed)
1293 {
1294         int i;
1295
1296         if (h->order >= MAX_ORDER)
1297                 return;
1298
1299         for_each_node_mask(i, *nodes_allowed) {
1300                 struct page *page, *next;
1301                 struct list_head *freel = &h->hugepage_freelists[i];
1302                 list_for_each_entry_safe(page, next, freel, lru) {
1303                         if (count >= h->nr_huge_pages)
1304                                 return;
1305                         if (PageHighMem(page))
1306                                 continue;
1307                         list_del(&page->lru);
1308                         update_and_free_page(h, page);
1309                         h->free_huge_pages--;
1310                         h->free_huge_pages_node[page_to_nid(page)]--;
1311                 }
1312         }
1313 }
1314 #else
1315 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1316                                                 nodemask_t *nodes_allowed)
1317 {
1318 }
1319 #endif
1320
1321 /*
1322  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1323  * balanced by operating on them in a round-robin fashion.
1324  * Returns 1 if an adjustment was made.
1325  */
1326 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1327                                 int delta)
1328 {
1329         int start_nid, next_nid;
1330         int ret = 0;
1331
1332         VM_BUG_ON(delta != -1 && delta != 1);
1333
1334         if (delta < 0)
1335                 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1336         else
1337                 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1338         next_nid = start_nid;
1339
1340         do {
1341                 int nid = next_nid;
1342                 if (delta < 0)  {
1343                         /*
1344                          * To shrink on this node, there must be a surplus page
1345                          */
1346                         if (!h->surplus_huge_pages_node[nid]) {
1347                                 next_nid = hstate_next_node_to_alloc(h,
1348                                                                 nodes_allowed);
1349                                 continue;
1350                         }
1351                 }
1352                 if (delta > 0) {
1353                         /*
1354                          * Surplus cannot exceed the total number of pages
1355                          */
1356                         if (h->surplus_huge_pages_node[nid] >=
1357                                                 h->nr_huge_pages_node[nid]) {
1358                                 next_nid = hstate_next_node_to_free(h,
1359                                                                 nodes_allowed);
1360                                 continue;
1361                         }
1362                 }
1363
1364                 h->surplus_huge_pages += delta;
1365                 h->surplus_huge_pages_node[nid] += delta;
1366                 ret = 1;
1367                 break;
1368         } while (next_nid != start_nid);
1369
1370         return ret;
1371 }
1372
1373 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1374 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1375                                                 nodemask_t *nodes_allowed)
1376 {
1377         unsigned long min_count, ret;
1378
1379         if (h->order >= MAX_ORDER)
1380                 return h->max_huge_pages;
1381
1382         /*
1383          * Increase the pool size
1384          * First take pages out of surplus state.  Then make up the
1385          * remaining difference by allocating fresh huge pages.
1386          *
1387          * We might race with alloc_buddy_huge_page() here and be unable
1388          * to convert a surplus huge page to a normal huge page. That is
1389          * not critical, though, it just means the overall size of the
1390          * pool might be one hugepage larger than it needs to be, but
1391          * within all the constraints specified by the sysctls.
1392          */
1393         spin_lock(&hugetlb_lock);
1394         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1395                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1396                         break;
1397         }
1398
1399         while (count > persistent_huge_pages(h)) {
1400                 /*
1401                  * If this allocation races such that we no longer need the
1402                  * page, free_huge_page will handle it by freeing the page
1403                  * and reducing the surplus.
1404                  */
1405                 spin_unlock(&hugetlb_lock);
1406                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1407                 spin_lock(&hugetlb_lock);
1408                 if (!ret)
1409                         goto out;
1410
1411                 /* Bail for signals. Probably ctrl-c from user */
1412                 if (signal_pending(current))
1413                         goto out;
1414         }
1415
1416         /*
1417          * Decrease the pool size
1418          * First return free pages to the buddy allocator (being careful
1419          * to keep enough around to satisfy reservations).  Then place
1420          * pages into surplus state as needed so the pool will shrink
1421          * to the desired size as pages become free.
1422          *
1423          * By placing pages into the surplus state independent of the
1424          * overcommit value, we are allowing the surplus pool size to
1425          * exceed overcommit. There are few sane options here. Since
1426          * alloc_buddy_huge_page() is checking the global counter,
1427          * though, we'll note that we're not allowed to exceed surplus
1428          * and won't grow the pool anywhere else. Not until one of the
1429          * sysctls are changed, or the surplus pages go out of use.
1430          */
1431         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1432         min_count = max(count, min_count);
1433         try_to_free_low(h, min_count, nodes_allowed);
1434         while (min_count < persistent_huge_pages(h)) {
1435                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1436                         break;
1437         }
1438         while (count < persistent_huge_pages(h)) {
1439                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1440                         break;
1441         }
1442 out:
1443         ret = persistent_huge_pages(h);
1444         spin_unlock(&hugetlb_lock);
1445         return ret;
1446 }
1447
1448 #define HSTATE_ATTR_RO(_name) \
1449         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1450
1451 #define HSTATE_ATTR(_name) \
1452         static struct kobj_attribute _name##_attr = \
1453                 __ATTR(_name, 0644, _name##_show, _name##_store)
1454
1455 static struct kobject *hugepages_kobj;
1456 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1457
1458 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1459
1460 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1461 {
1462         int i;
1463
1464         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1465                 if (hstate_kobjs[i] == kobj) {
1466                         if (nidp)
1467                                 *nidp = NUMA_NO_NODE;
1468                         return &hstates[i];
1469                 }
1470
1471         return kobj_to_node_hstate(kobj, nidp);
1472 }
1473
1474 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1475                                         struct kobj_attribute *attr, char *buf)
1476 {
1477         struct hstate *h;
1478         unsigned long nr_huge_pages;
1479         int nid;
1480
1481         h = kobj_to_hstate(kobj, &nid);
1482         if (nid == NUMA_NO_NODE)
1483                 nr_huge_pages = h->nr_huge_pages;
1484         else
1485                 nr_huge_pages = h->nr_huge_pages_node[nid];
1486
1487         return sprintf(buf, "%lu\n", nr_huge_pages);
1488 }
1489
1490 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1491                         struct kobject *kobj, struct kobj_attribute *attr,
1492                         const char *buf, size_t len)
1493 {
1494         int err;
1495         int nid;
1496         unsigned long count;
1497         struct hstate *h;
1498         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1499
1500         err = strict_strtoul(buf, 10, &count);
1501         if (err)
1502                 goto out;
1503
1504         h = kobj_to_hstate(kobj, &nid);
1505         if (h->order >= MAX_ORDER) {
1506                 err = -EINVAL;
1507                 goto out;
1508         }
1509
1510         if (nid == NUMA_NO_NODE) {
1511                 /*
1512                  * global hstate attribute
1513                  */
1514                 if (!(obey_mempolicy &&
1515                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1516                         NODEMASK_FREE(nodes_allowed);
1517                         nodes_allowed = &node_states[N_HIGH_MEMORY];
1518                 }
1519         } else if (nodes_allowed) {
1520                 /*
1521                  * per node hstate attribute: adjust count to global,
1522                  * but restrict alloc/free to the specified node.
1523                  */
1524                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1525                 init_nodemask_of_node(nodes_allowed, nid);
1526         } else
1527                 nodes_allowed = &node_states[N_HIGH_MEMORY];
1528
1529         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1530
1531         if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1532                 NODEMASK_FREE(nodes_allowed);
1533
1534         return len;
1535 out:
1536         NODEMASK_FREE(nodes_allowed);
1537         return err;
1538 }
1539
1540 static ssize_t nr_hugepages_show(struct kobject *kobj,
1541                                        struct kobj_attribute *attr, char *buf)
1542 {
1543         return nr_hugepages_show_common(kobj, attr, buf);
1544 }
1545
1546 static ssize_t nr_hugepages_store(struct kobject *kobj,
1547                struct kobj_attribute *attr, const char *buf, size_t len)
1548 {
1549         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1550 }
1551 HSTATE_ATTR(nr_hugepages);
1552
1553 #ifdef CONFIG_NUMA
1554
1555 /*
1556  * hstate attribute for optionally mempolicy-based constraint on persistent
1557  * huge page alloc/free.
1558  */
1559 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1560                                        struct kobj_attribute *attr, char *buf)
1561 {
1562         return nr_hugepages_show_common(kobj, attr, buf);
1563 }
1564
1565 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1566                struct kobj_attribute *attr, const char *buf, size_t len)
1567 {
1568         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1569 }
1570 HSTATE_ATTR(nr_hugepages_mempolicy);
1571 #endif
1572
1573
1574 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1575                                         struct kobj_attribute *attr, char *buf)
1576 {
1577         struct hstate *h = kobj_to_hstate(kobj, NULL);
1578         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1579 }
1580
1581 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1582                 struct kobj_attribute *attr, const char *buf, size_t count)
1583 {
1584         int err;
1585         unsigned long input;
1586         struct hstate *h = kobj_to_hstate(kobj, NULL);
1587
1588         if (h->order >= MAX_ORDER)
1589                 return -EINVAL;
1590
1591         err = strict_strtoul(buf, 10, &input);
1592         if (err)
1593                 return err;
1594
1595         spin_lock(&hugetlb_lock);
1596         h->nr_overcommit_huge_pages = input;
1597         spin_unlock(&hugetlb_lock);
1598
1599         return count;
1600 }
1601 HSTATE_ATTR(nr_overcommit_hugepages);
1602
1603 static ssize_t free_hugepages_show(struct kobject *kobj,
1604                                         struct kobj_attribute *attr, char *buf)
1605 {
1606         struct hstate *h;
1607         unsigned long free_huge_pages;
1608         int nid;
1609
1610         h = kobj_to_hstate(kobj, &nid);
1611         if (nid == NUMA_NO_NODE)
1612                 free_huge_pages = h->free_huge_pages;
1613         else
1614                 free_huge_pages = h->free_huge_pages_node[nid];
1615
1616         return sprintf(buf, "%lu\n", free_huge_pages);
1617 }
1618 HSTATE_ATTR_RO(free_hugepages);
1619
1620 static ssize_t resv_hugepages_show(struct kobject *kobj,
1621                                         struct kobj_attribute *attr, char *buf)
1622 {
1623         struct hstate *h = kobj_to_hstate(kobj, NULL);
1624         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1625 }
1626 HSTATE_ATTR_RO(resv_hugepages);
1627
1628 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1629                                         struct kobj_attribute *attr, char *buf)
1630 {
1631         struct hstate *h;
1632         unsigned long surplus_huge_pages;
1633         int nid;
1634
1635         h = kobj_to_hstate(kobj, &nid);
1636         if (nid == NUMA_NO_NODE)
1637                 surplus_huge_pages = h->surplus_huge_pages;
1638         else
1639                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1640
1641         return sprintf(buf, "%lu\n", surplus_huge_pages);
1642 }
1643 HSTATE_ATTR_RO(surplus_hugepages);
1644
1645 static struct attribute *hstate_attrs[] = {
1646         &nr_hugepages_attr.attr,
1647         &nr_overcommit_hugepages_attr.attr,
1648         &free_hugepages_attr.attr,
1649         &resv_hugepages_attr.attr,
1650         &surplus_hugepages_attr.attr,
1651 #ifdef CONFIG_NUMA
1652         &nr_hugepages_mempolicy_attr.attr,
1653 #endif
1654         NULL,
1655 };
1656
1657 static struct attribute_group hstate_attr_group = {
1658         .attrs = hstate_attrs,
1659 };
1660
1661 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1662                                     struct kobject **hstate_kobjs,
1663                                     struct attribute_group *hstate_attr_group)
1664 {
1665         int retval;
1666         int hi = hstate_index(h);
1667
1668         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1669         if (!hstate_kobjs[hi])
1670                 return -ENOMEM;
1671
1672         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1673         if (retval)
1674                 kobject_put(hstate_kobjs[hi]);
1675
1676         return retval;
1677 }
1678
1679 static void __init hugetlb_sysfs_init(void)
1680 {
1681         struct hstate *h;
1682         int err;
1683
1684         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1685         if (!hugepages_kobj)
1686                 return;
1687
1688         for_each_hstate(h) {
1689                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1690                                          hstate_kobjs, &hstate_attr_group);
1691                 if (err)
1692                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1693                                                                 h->name);
1694         }
1695 }
1696
1697 #ifdef CONFIG_NUMA
1698
1699 /*
1700  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1701  * with node devices in node_devices[] using a parallel array.  The array
1702  * index of a node device or _hstate == node id.
1703  * This is here to avoid any static dependency of the node device driver, in
1704  * the base kernel, on the hugetlb module.
1705  */
1706 struct node_hstate {
1707         struct kobject          *hugepages_kobj;
1708         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1709 };
1710 struct node_hstate node_hstates[MAX_NUMNODES];
1711
1712 /*
1713  * A subset of global hstate attributes for node devices
1714  */
1715 static struct attribute *per_node_hstate_attrs[] = {
1716         &nr_hugepages_attr.attr,
1717         &free_hugepages_attr.attr,
1718         &surplus_hugepages_attr.attr,
1719         NULL,
1720 };
1721
1722 static struct attribute_group per_node_hstate_attr_group = {
1723         .attrs = per_node_hstate_attrs,
1724 };
1725
1726 /*
1727  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1728  * Returns node id via non-NULL nidp.
1729  */
1730 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1731 {
1732         int nid;
1733
1734         for (nid = 0; nid < nr_node_ids; nid++) {
1735                 struct node_hstate *nhs = &node_hstates[nid];
1736                 int i;
1737                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1738                         if (nhs->hstate_kobjs[i] == kobj) {
1739                                 if (nidp)
1740                                         *nidp = nid;
1741                                 return &hstates[i];
1742                         }
1743         }
1744
1745         BUG();
1746         return NULL;
1747 }
1748
1749 /*
1750  * Unregister hstate attributes from a single node device.
1751  * No-op if no hstate attributes attached.
1752  */
1753 void hugetlb_unregister_node(struct node *node)
1754 {
1755         struct hstate *h;
1756         struct node_hstate *nhs = &node_hstates[node->dev.id];
1757
1758         if (!nhs->hugepages_kobj)
1759                 return;         /* no hstate attributes */
1760
1761         for_each_hstate(h) {
1762                 int idx = hstate_index(h);
1763                 if (nhs->hstate_kobjs[idx]) {
1764                         kobject_put(nhs->hstate_kobjs[idx]);
1765                         nhs->hstate_kobjs[idx] = NULL;
1766                 }
1767         }
1768
1769         kobject_put(nhs->hugepages_kobj);
1770         nhs->hugepages_kobj = NULL;
1771 }
1772
1773 /*
1774  * hugetlb module exit:  unregister hstate attributes from node devices
1775  * that have them.
1776  */
1777 static void hugetlb_unregister_all_nodes(void)
1778 {
1779         int nid;
1780
1781         /*
1782          * disable node device registrations.
1783          */
1784         register_hugetlbfs_with_node(NULL, NULL);
1785
1786         /*
1787          * remove hstate attributes from any nodes that have them.
1788          */
1789         for (nid = 0; nid < nr_node_ids; nid++)
1790                 hugetlb_unregister_node(&node_devices[nid]);
1791 }
1792
1793 /*
1794  * Register hstate attributes for a single node device.
1795  * No-op if attributes already registered.
1796  */
1797 void hugetlb_register_node(struct node *node)
1798 {
1799         struct hstate *h;
1800         struct node_hstate *nhs = &node_hstates[node->dev.id];
1801         int err;
1802
1803         if (nhs->hugepages_kobj)
1804                 return;         /* already allocated */
1805
1806         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1807                                                         &node->dev.kobj);
1808         if (!nhs->hugepages_kobj)
1809                 return;
1810
1811         for_each_hstate(h) {
1812                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1813                                                 nhs->hstate_kobjs,
1814                                                 &per_node_hstate_attr_group);
1815                 if (err) {
1816                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1817                                         " for node %d\n",
1818                                                 h->name, node->dev.id);
1819                         hugetlb_unregister_node(node);
1820                         break;
1821                 }
1822         }
1823 }
1824
1825 /*
1826  * hugetlb init time:  register hstate attributes for all registered node
1827  * devices of nodes that have memory.  All on-line nodes should have
1828  * registered their associated device by this time.
1829  */
1830 static void hugetlb_register_all_nodes(void)
1831 {
1832         int nid;
1833
1834         for_each_node_state(nid, N_HIGH_MEMORY) {
1835                 struct node *node = &node_devices[nid];
1836                 if (node->dev.id == nid)
1837                         hugetlb_register_node(node);
1838         }
1839
1840         /*
1841          * Let the node device driver know we're here so it can
1842          * [un]register hstate attributes on node hotplug.
1843          */
1844         register_hugetlbfs_with_node(hugetlb_register_node,
1845                                      hugetlb_unregister_node);
1846 }
1847 #else   /* !CONFIG_NUMA */
1848
1849 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1850 {
1851         BUG();
1852         if (nidp)
1853                 *nidp = -1;
1854         return NULL;
1855 }
1856
1857 static void hugetlb_unregister_all_nodes(void) { }
1858
1859 static void hugetlb_register_all_nodes(void) { }
1860
1861 #endif
1862
1863 static void __exit hugetlb_exit(void)
1864 {
1865         struct hstate *h;
1866
1867         hugetlb_unregister_all_nodes();
1868
1869         for_each_hstate(h) {
1870                 kobject_put(hstate_kobjs[hstate_index(h)]);
1871         }
1872
1873         kobject_put(hugepages_kobj);
1874 }
1875 module_exit(hugetlb_exit);
1876
1877 static int __init hugetlb_init(void)
1878 {
1879         /* Some platform decide whether they support huge pages at boot
1880          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1881          * there is no such support
1882          */
1883         if (HPAGE_SHIFT == 0)
1884                 return 0;
1885
1886         if (!size_to_hstate(default_hstate_size)) {
1887                 default_hstate_size = HPAGE_SIZE;
1888                 if (!size_to_hstate(default_hstate_size))
1889                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1890         }
1891         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1892         if (default_hstate_max_huge_pages)
1893                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1894
1895         hugetlb_init_hstates();
1896
1897         gather_bootmem_prealloc();
1898
1899         report_hugepages();
1900
1901         hugetlb_sysfs_init();
1902
1903         hugetlb_register_all_nodes();
1904
1905         return 0;
1906 }
1907 module_init(hugetlb_init);
1908
1909 /* Should be called on processing a hugepagesz=... option */
1910 void __init hugetlb_add_hstate(unsigned order)
1911 {
1912         struct hstate *h;
1913         unsigned long i;
1914
1915         if (size_to_hstate(PAGE_SIZE << order)) {
1916                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1917                 return;
1918         }
1919         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1920         BUG_ON(order == 0);
1921         h = &hstates[hugetlb_max_hstate++];
1922         h->order = order;
1923         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1924         h->nr_huge_pages = 0;
1925         h->free_huge_pages = 0;
1926         for (i = 0; i < MAX_NUMNODES; ++i)
1927                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1928         INIT_LIST_HEAD(&h->hugepage_activelist);
1929         h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1930         h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1931         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1932                                         huge_page_size(h)/1024);
1933
1934         parsed_hstate = h;
1935 }
1936
1937 static int __init hugetlb_nrpages_setup(char *s)
1938 {
1939         unsigned long *mhp;
1940         static unsigned long *last_mhp;
1941
1942         /*
1943          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1944          * so this hugepages= parameter goes to the "default hstate".
1945          */
1946         if (!hugetlb_max_hstate)
1947                 mhp = &default_hstate_max_huge_pages;
1948         else
1949                 mhp = &parsed_hstate->max_huge_pages;
1950
1951         if (mhp == last_mhp) {
1952                 printk(KERN_WARNING "hugepages= specified twice without "
1953                         "interleaving hugepagesz=, ignoring\n");
1954                 return 1;
1955         }
1956
1957         if (sscanf(s, "%lu", mhp) <= 0)
1958                 *mhp = 0;
1959
1960         /*
1961          * Global state is always initialized later in hugetlb_init.
1962          * But we need to allocate >= MAX_ORDER hstates here early to still
1963          * use the bootmem allocator.
1964          */
1965         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1966                 hugetlb_hstate_alloc_pages(parsed_hstate);
1967
1968         last_mhp = mhp;
1969
1970         return 1;
1971 }
1972 __setup("hugepages=", hugetlb_nrpages_setup);
1973
1974 static int __init hugetlb_default_setup(char *s)
1975 {
1976         default_hstate_size = memparse(s, &s);
1977         return 1;
1978 }
1979 __setup("default_hugepagesz=", hugetlb_default_setup);
1980
1981 static unsigned int cpuset_mems_nr(unsigned int *array)
1982 {
1983         int node;
1984         unsigned int nr = 0;
1985
1986         for_each_node_mask(node, cpuset_current_mems_allowed)
1987                 nr += array[node];
1988
1989         return nr;
1990 }
1991
1992 #ifdef CONFIG_SYSCTL
1993 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1994                          struct ctl_table *table, int write,
1995                          void __user *buffer, size_t *length, loff_t *ppos)
1996 {
1997         struct hstate *h = &default_hstate;
1998         unsigned long tmp;
1999         int ret;
2000
2001         tmp = h->max_huge_pages;
2002
2003         if (write && h->order >= MAX_ORDER)
2004                 return -EINVAL;
2005
2006         table->data = &tmp;
2007         table->maxlen = sizeof(unsigned long);
2008         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2009         if (ret)
2010                 goto out;
2011
2012         if (write) {
2013                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2014                                                 GFP_KERNEL | __GFP_NORETRY);
2015                 if (!(obey_mempolicy &&
2016                                init_nodemask_of_mempolicy(nodes_allowed))) {
2017                         NODEMASK_FREE(nodes_allowed);
2018                         nodes_allowed = &node_states[N_HIGH_MEMORY];
2019                 }
2020                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2021
2022                 if (nodes_allowed != &node_states[N_HIGH_MEMORY])
2023                         NODEMASK_FREE(nodes_allowed);
2024         }
2025 out:
2026         return ret;
2027 }
2028
2029 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2030                           void __user *buffer, size_t *length, loff_t *ppos)
2031 {
2032
2033         return hugetlb_sysctl_handler_common(false, table, write,
2034                                                         buffer, length, ppos);
2035 }
2036
2037 #ifdef CONFIG_NUMA
2038 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2039                           void __user *buffer, size_t *length, loff_t *ppos)
2040 {
2041         return hugetlb_sysctl_handler_common(true, table, write,
2042                                                         buffer, length, ppos);
2043 }
2044 #endif /* CONFIG_NUMA */
2045
2046 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2047                         void __user *buffer,
2048                         size_t *length, loff_t *ppos)
2049 {
2050         proc_dointvec(table, write, buffer, length, ppos);
2051         if (hugepages_treat_as_movable)
2052                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2053         else
2054                 htlb_alloc_mask = GFP_HIGHUSER;
2055         return 0;
2056 }
2057
2058 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2059                         void __user *buffer,
2060                         size_t *length, loff_t *ppos)
2061 {
2062         struct hstate *h = &default_hstate;
2063         unsigned long tmp;
2064         int ret;
2065
2066         tmp = h->nr_overcommit_huge_pages;
2067
2068         if (write && h->order >= MAX_ORDER)
2069                 return -EINVAL;
2070
2071         table->data = &tmp;
2072         table->maxlen = sizeof(unsigned long);
2073         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2074         if (ret)
2075                 goto out;
2076
2077         if (write) {
2078                 spin_lock(&hugetlb_lock);
2079                 h->nr_overcommit_huge_pages = tmp;
2080                 spin_unlock(&hugetlb_lock);
2081         }
2082 out:
2083         return ret;
2084 }
2085
2086 #endif /* CONFIG_SYSCTL */
2087
2088 void hugetlb_report_meminfo(struct seq_file *m)
2089 {
2090         struct hstate *h = &default_hstate;
2091         seq_printf(m,
2092                         "HugePages_Total:   %5lu\n"
2093                         "HugePages_Free:    %5lu\n"
2094                         "HugePages_Rsvd:    %5lu\n"
2095                         "HugePages_Surp:    %5lu\n"
2096                         "Hugepagesize:   %8lu kB\n",
2097                         h->nr_huge_pages,
2098                         h->free_huge_pages,
2099                         h->resv_huge_pages,
2100                         h->surplus_huge_pages,
2101                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2102 }
2103
2104 int hugetlb_report_node_meminfo(int nid, char *buf)
2105 {
2106         struct hstate *h = &default_hstate;
2107         return sprintf(buf,
2108                 "Node %d HugePages_Total: %5u\n"
2109                 "Node %d HugePages_Free:  %5u\n"
2110                 "Node %d HugePages_Surp:  %5u\n",
2111                 nid, h->nr_huge_pages_node[nid],
2112                 nid, h->free_huge_pages_node[nid],
2113                 nid, h->surplus_huge_pages_node[nid]);
2114 }
2115
2116 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2117 unsigned long hugetlb_total_pages(void)
2118 {
2119         struct hstate *h = &default_hstate;
2120         return h->nr_huge_pages * pages_per_huge_page(h);
2121 }
2122
2123 static int hugetlb_acct_memory(struct hstate *h, long delta)
2124 {
2125         int ret = -ENOMEM;
2126
2127         spin_lock(&hugetlb_lock);
2128         /*
2129          * When cpuset is configured, it breaks the strict hugetlb page
2130          * reservation as the accounting is done on a global variable. Such
2131          * reservation is completely rubbish in the presence of cpuset because
2132          * the reservation is not checked against page availability for the
2133          * current cpuset. Application can still potentially OOM'ed by kernel
2134          * with lack of free htlb page in cpuset that the task is in.
2135          * Attempt to enforce strict accounting with cpuset is almost
2136          * impossible (or too ugly) because cpuset is too fluid that
2137          * task or memory node can be dynamically moved between cpusets.
2138          *
2139          * The change of semantics for shared hugetlb mapping with cpuset is
2140          * undesirable. However, in order to preserve some of the semantics,
2141          * we fall back to check against current free page availability as
2142          * a best attempt and hopefully to minimize the impact of changing
2143          * semantics that cpuset has.
2144          */
2145         if (delta > 0) {
2146                 if (gather_surplus_pages(h, delta) < 0)
2147                         goto out;
2148
2149                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2150                         return_unused_surplus_pages(h, delta);
2151                         goto out;
2152                 }
2153         }
2154
2155         ret = 0;
2156         if (delta < 0)
2157                 return_unused_surplus_pages(h, (unsigned long) -delta);
2158
2159 out:
2160         spin_unlock(&hugetlb_lock);
2161         return ret;
2162 }
2163
2164 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2165 {
2166         struct resv_map *reservations = vma_resv_map(vma);
2167
2168         /*
2169          * This new VMA should share its siblings reservation map if present.
2170          * The VMA will only ever have a valid reservation map pointer where
2171          * it is being copied for another still existing VMA.  As that VMA
2172          * has a reference to the reservation map it cannot disappear until
2173          * after this open call completes.  It is therefore safe to take a
2174          * new reference here without additional locking.
2175          */
2176         if (reservations)
2177                 kref_get(&reservations->refs);
2178 }
2179
2180 static void resv_map_put(struct vm_area_struct *vma)
2181 {
2182         struct resv_map *reservations = vma_resv_map(vma);
2183
2184         if (!reservations)
2185                 return;
2186         kref_put(&reservations->refs, resv_map_release);
2187 }
2188
2189 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2190 {
2191         struct hstate *h = hstate_vma(vma);
2192         struct resv_map *reservations = vma_resv_map(vma);
2193         struct hugepage_subpool *spool = subpool_vma(vma);
2194         unsigned long reserve;
2195         unsigned long start;
2196         unsigned long end;
2197
2198         if (reservations) {
2199                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2200                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2201
2202                 reserve = (end - start) -
2203                         region_count(&reservations->regions, start, end);
2204
2205                 resv_map_put(vma);
2206
2207                 if (reserve) {
2208                         hugetlb_acct_memory(h, -reserve);
2209                         hugepage_subpool_put_pages(spool, reserve);
2210                 }
2211         }
2212 }
2213
2214 /*
2215  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2216  * handle_mm_fault() to try to instantiate regular-sized pages in the
2217  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2218  * this far.
2219  */
2220 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2221 {
2222         BUG();
2223         return 0;
2224 }
2225
2226 const struct vm_operations_struct hugetlb_vm_ops = {
2227         .fault = hugetlb_vm_op_fault,
2228         .open = hugetlb_vm_op_open,
2229         .close = hugetlb_vm_op_close,
2230 };
2231
2232 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2233                                 int writable)
2234 {
2235         pte_t entry;
2236
2237         if (writable) {
2238                 entry =
2239                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2240         } else {
2241                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2242         }
2243         entry = pte_mkyoung(entry);
2244         entry = pte_mkhuge(entry);
2245         entry = arch_make_huge_pte(entry, vma, page, writable);
2246
2247         return entry;
2248 }
2249
2250 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2251                                    unsigned long address, pte_t *ptep)
2252 {
2253         pte_t entry;
2254
2255         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2256         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2257                 update_mmu_cache(vma, address, ptep);
2258 }
2259
2260
2261 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2262                             struct vm_area_struct *vma)
2263 {
2264         pte_t *src_pte, *dst_pte, entry;
2265         struct page *ptepage;
2266         unsigned long addr;
2267         int cow;
2268         struct hstate *h = hstate_vma(vma);
2269         unsigned long sz = huge_page_size(h);
2270
2271         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2272
2273         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2274                 src_pte = huge_pte_offset(src, addr);
2275                 if (!src_pte)
2276                         continue;
2277                 dst_pte = huge_pte_alloc(dst, addr, sz);
2278                 if (!dst_pte)
2279                         goto nomem;
2280
2281                 /* If the pagetables are shared don't copy or take references */
2282                 if (dst_pte == src_pte)
2283                         continue;
2284
2285                 spin_lock(&dst->page_table_lock);
2286                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2287                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2288                         if (cow)
2289                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2290                         entry = huge_ptep_get(src_pte);
2291                         ptepage = pte_page(entry);
2292                         get_page(ptepage);
2293                         page_dup_rmap(ptepage);
2294                         set_huge_pte_at(dst, addr, dst_pte, entry);
2295                 }
2296                 spin_unlock(&src->page_table_lock);
2297                 spin_unlock(&dst->page_table_lock);
2298         }
2299         return 0;
2300
2301 nomem:
2302         return -ENOMEM;
2303 }
2304
2305 static int is_hugetlb_entry_migration(pte_t pte)
2306 {
2307         swp_entry_t swp;
2308
2309         if (huge_pte_none(pte) || pte_present(pte))
2310                 return 0;
2311         swp = pte_to_swp_entry(pte);
2312         if (non_swap_entry(swp) && is_migration_entry(swp))
2313                 return 1;
2314         else
2315                 return 0;
2316 }
2317
2318 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2319 {
2320         swp_entry_t swp;
2321
2322         if (huge_pte_none(pte) || pte_present(pte))
2323                 return 0;
2324         swp = pte_to_swp_entry(pte);
2325         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2326                 return 1;
2327         else
2328                 return 0;
2329 }
2330
2331 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2332                             unsigned long start, unsigned long end,
2333                             struct page *ref_page)
2334 {
2335         int force_flush = 0;
2336         struct mm_struct *mm = vma->vm_mm;
2337         unsigned long address;
2338         pte_t *ptep;
2339         pte_t pte;
2340         struct page *page;
2341         struct hstate *h = hstate_vma(vma);
2342         unsigned long sz = huge_page_size(h);
2343
2344         WARN_ON(!is_vm_hugetlb_page(vma));
2345         BUG_ON(start & ~huge_page_mask(h));
2346         BUG_ON(end & ~huge_page_mask(h));
2347
2348         tlb_start_vma(tlb, vma);
2349         mmu_notifier_invalidate_range_start(mm, start, end);
2350 again:
2351         spin_lock(&mm->page_table_lock);
2352         for (address = start; address < end; address += sz) {
2353                 ptep = huge_pte_offset(mm, address);
2354                 if (!ptep)
2355                         continue;
2356
2357                 if (huge_pmd_unshare(mm, &address, ptep))
2358                         continue;
2359
2360                 pte = huge_ptep_get(ptep);
2361                 if (huge_pte_none(pte))
2362                         continue;
2363
2364                 /*
2365                  * HWPoisoned hugepage is already unmapped and dropped reference
2366                  */
2367                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2368                         continue;
2369
2370                 page = pte_page(pte);
2371                 /*
2372                  * If a reference page is supplied, it is because a specific
2373                  * page is being unmapped, not a range. Ensure the page we
2374                  * are about to unmap is the actual page of interest.
2375                  */
2376                 if (ref_page) {
2377                         if (page != ref_page)
2378                                 continue;
2379
2380                         /*
2381                          * Mark the VMA as having unmapped its page so that
2382                          * future faults in this VMA will fail rather than
2383                          * looking like data was lost
2384                          */
2385                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2386                 }
2387
2388                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2389                 tlb_remove_tlb_entry(tlb, ptep, address);
2390                 if (pte_dirty(pte))
2391                         set_page_dirty(page);
2392
2393                 page_remove_rmap(page);
2394                 force_flush = !__tlb_remove_page(tlb, page);
2395                 if (force_flush)
2396                         break;
2397                 /* Bail out after unmapping reference page if supplied */
2398                 if (ref_page)
2399                         break;
2400         }
2401         spin_unlock(&mm->page_table_lock);
2402         /*
2403          * mmu_gather ran out of room to batch pages, we break out of
2404          * the PTE lock to avoid doing the potential expensive TLB invalidate
2405          * and page-free while holding it.
2406          */
2407         if (force_flush) {
2408                 force_flush = 0;
2409                 tlb_flush_mmu(tlb);
2410                 if (address < end && !ref_page)
2411                         goto again;
2412         }
2413         mmu_notifier_invalidate_range_end(mm, start, end);
2414         tlb_end_vma(tlb, vma);
2415 }
2416
2417 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2418                           unsigned long end, struct page *ref_page)
2419 {
2420         struct mm_struct *mm;
2421         struct mmu_gather tlb;
2422
2423         mm = vma->vm_mm;
2424
2425         tlb_gather_mmu(&tlb, mm, 0);
2426         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2427         tlb_finish_mmu(&tlb, start, end);
2428 }
2429
2430 /*
2431  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2432  * mappping it owns the reserve page for. The intention is to unmap the page
2433  * from other VMAs and let the children be SIGKILLed if they are faulting the
2434  * same region.
2435  */
2436 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2437                                 struct page *page, unsigned long address)
2438 {
2439         struct hstate *h = hstate_vma(vma);
2440         struct vm_area_struct *iter_vma;
2441         struct address_space *mapping;
2442         struct prio_tree_iter iter;
2443         pgoff_t pgoff;
2444
2445         /*
2446          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2447          * from page cache lookup which is in HPAGE_SIZE units.
2448          */
2449         address = address & huge_page_mask(h);
2450         pgoff = vma_hugecache_offset(h, vma, address);
2451         mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2452
2453         /*
2454          * Take the mapping lock for the duration of the table walk. As
2455          * this mapping should be shared between all the VMAs,
2456          * __unmap_hugepage_range() is called as the lock is already held
2457          */
2458         mutex_lock(&mapping->i_mmap_mutex);
2459         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2460                 /* Do not unmap the current VMA */
2461                 if (iter_vma == vma)
2462                         continue;
2463
2464                 /*
2465                  * Unmap the page from other VMAs without their own reserves.
2466                  * They get marked to be SIGKILLed if they fault in these
2467                  * areas. This is because a future no-page fault on this VMA
2468                  * could insert a zeroed page instead of the data existing
2469                  * from the time of fork. This would look like data corruption
2470                  */
2471                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2472                         unmap_hugepage_range(iter_vma, address,
2473                                              address + huge_page_size(h), page);
2474         }
2475         mutex_unlock(&mapping->i_mmap_mutex);
2476
2477         return 1;
2478 }
2479
2480 /*
2481  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2482  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2483  * cannot race with other handlers or page migration.
2484  * Keep the pte_same checks anyway to make transition from the mutex easier.
2485  */
2486 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2487                         unsigned long address, pte_t *ptep, pte_t pte,
2488                         struct page *pagecache_page)
2489 {
2490         struct hstate *h = hstate_vma(vma);
2491         struct page *old_page, *new_page;
2492         int avoidcopy;
2493         int outside_reserve = 0;
2494
2495         old_page = pte_page(pte);
2496
2497 retry_avoidcopy:
2498         /* If no-one else is actually using this page, avoid the copy
2499          * and just make the page writable */
2500         avoidcopy = (page_mapcount(old_page) == 1);
2501         if (avoidcopy) {
2502                 if (PageAnon(old_page))
2503                         page_move_anon_rmap(old_page, vma, address);
2504                 set_huge_ptep_writable(vma, address, ptep);
2505                 return 0;
2506         }
2507
2508         /*
2509          * If the process that created a MAP_PRIVATE mapping is about to
2510          * perform a COW due to a shared page count, attempt to satisfy
2511          * the allocation without using the existing reserves. The pagecache
2512          * page is used to determine if the reserve at this address was
2513          * consumed or not. If reserves were used, a partial faulted mapping
2514          * at the time of fork() could consume its reserves on COW instead
2515          * of the full address range.
2516          */
2517         if (!(vma->vm_flags & VM_MAYSHARE) &&
2518                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2519                         old_page != pagecache_page)
2520                 outside_reserve = 1;
2521
2522         page_cache_get(old_page);
2523
2524         /* Drop page_table_lock as buddy allocator may be called */
2525         spin_unlock(&mm->page_table_lock);
2526         new_page = alloc_huge_page(vma, address, outside_reserve);
2527
2528         if (IS_ERR(new_page)) {
2529                 long err = PTR_ERR(new_page);
2530                 page_cache_release(old_page);
2531
2532                 /*
2533                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2534                  * it is due to references held by a child and an insufficient
2535                  * huge page pool. To guarantee the original mappers
2536                  * reliability, unmap the page from child processes. The child
2537                  * may get SIGKILLed if it later faults.
2538                  */
2539                 if (outside_reserve) {
2540                         BUG_ON(huge_pte_none(pte));
2541                         if (unmap_ref_private(mm, vma, old_page, address)) {
2542                                 BUG_ON(huge_pte_none(pte));
2543                                 spin_lock(&mm->page_table_lock);
2544                                 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2545                                 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2546                                         goto retry_avoidcopy;
2547                                 /*
2548                                  * race occurs while re-acquiring page_table_lock, and
2549                                  * our job is done.
2550                                  */
2551                                 return 0;
2552                         }
2553                         WARN_ON_ONCE(1);
2554                 }
2555
2556                 /* Caller expects lock to be held */
2557                 spin_lock(&mm->page_table_lock);
2558                 if (err == -ENOMEM)
2559                         return VM_FAULT_OOM;
2560                 else
2561                         return VM_FAULT_SIGBUS;
2562         }
2563
2564         /*
2565          * When the original hugepage is shared one, it does not have
2566          * anon_vma prepared.
2567          */
2568         if (unlikely(anon_vma_prepare(vma))) {
2569                 page_cache_release(new_page);
2570                 page_cache_release(old_page);
2571                 /* Caller expects lock to be held */
2572                 spin_lock(&mm->page_table_lock);
2573                 return VM_FAULT_OOM;
2574         }
2575
2576         copy_user_huge_page(new_page, old_page, address, vma,
2577                             pages_per_huge_page(h));
2578         __SetPageUptodate(new_page);
2579
2580         /*
2581          * Retake the page_table_lock to check for racing updates
2582          * before the page tables are altered
2583          */
2584         spin_lock(&mm->page_table_lock);
2585         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2586         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2587                 /* Break COW */
2588                 mmu_notifier_invalidate_range_start(mm,
2589                         address & huge_page_mask(h),
2590                         (address & huge_page_mask(h)) + huge_page_size(h));
2591                 huge_ptep_clear_flush(vma, address, ptep);
2592                 set_huge_pte_at(mm, address, ptep,
2593                                 make_huge_pte(vma, new_page, 1));
2594                 page_remove_rmap(old_page);
2595                 hugepage_add_new_anon_rmap(new_page, vma, address);
2596                 /* Make the old page be freed below */
2597                 new_page = old_page;
2598                 mmu_notifier_invalidate_range_end(mm,
2599                         address & huge_page_mask(h),
2600                         (address & huge_page_mask(h)) + huge_page_size(h));
2601         }
2602         page_cache_release(new_page);
2603         page_cache_release(old_page);
2604         return 0;
2605 }
2606
2607 /* Return the pagecache page at a given address within a VMA */
2608 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2609                         struct vm_area_struct *vma, unsigned long address)
2610 {
2611         struct address_space *mapping;
2612         pgoff_t idx;
2613
2614         mapping = vma->vm_file->f_mapping;
2615         idx = vma_hugecache_offset(h, vma, address);
2616
2617         return find_lock_page(mapping, idx);
2618 }
2619
2620 /*
2621  * Return whether there is a pagecache page to back given address within VMA.
2622  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2623  */
2624 static bool hugetlbfs_pagecache_present(struct hstate *h,
2625                         struct vm_area_struct *vma, unsigned long address)
2626 {
2627         struct address_space *mapping;
2628         pgoff_t idx;
2629         struct page *page;
2630
2631         mapping = vma->vm_file->f_mapping;
2632         idx = vma_hugecache_offset(h, vma, address);
2633
2634         page = find_get_page(mapping, idx);
2635         if (page)
2636                 put_page(page);
2637         return page != NULL;
2638 }
2639
2640 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2641                         unsigned long address, pte_t *ptep, unsigned int flags)
2642 {
2643         struct hstate *h = hstate_vma(vma);
2644         int ret = VM_FAULT_SIGBUS;
2645         int anon_rmap = 0;
2646         pgoff_t idx;
2647         unsigned long size;
2648         struct page *page;
2649         struct address_space *mapping;
2650         pte_t new_pte;
2651
2652         /*
2653          * Currently, we are forced to kill the process in the event the
2654          * original mapper has unmapped pages from the child due to a failed
2655          * COW. Warn that such a situation has occurred as it may not be obvious
2656          */
2657         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2658                 printk(KERN_WARNING
2659                         "PID %d killed due to inadequate hugepage pool\n",
2660                         current->pid);
2661                 return ret;
2662         }
2663
2664         mapping = vma->vm_file->f_mapping;
2665         idx = vma_hugecache_offset(h, vma, address);
2666
2667         /*
2668          * Use page lock to guard against racing truncation
2669          * before we get page_table_lock.
2670          */
2671 retry:
2672         page = find_lock_page(mapping, idx);
2673         if (!page) {
2674                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2675                 if (idx >= size)
2676                         goto out;
2677                 page = alloc_huge_page(vma, address, 0);
2678                 if (IS_ERR(page)) {
2679                         ret = PTR_ERR(page);
2680                         if (ret == -ENOMEM)
2681                                 ret = VM_FAULT_OOM;
2682                         else
2683                                 ret = VM_FAULT_SIGBUS;
2684                         goto out;
2685                 }
2686                 clear_huge_page(page, address, pages_per_huge_page(h));
2687                 __SetPageUptodate(page);
2688
2689                 if (vma->vm_flags & VM_MAYSHARE) {
2690                         int err;
2691                         struct inode *inode = mapping->host;
2692
2693                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2694                         if (err) {
2695                                 put_page(page);
2696                                 if (err == -EEXIST)
2697                                         goto retry;
2698                                 goto out;
2699                         }
2700
2701                         spin_lock(&inode->i_lock);
2702                         inode->i_blocks += blocks_per_huge_page(h);
2703                         spin_unlock(&inode->i_lock);
2704                 } else {
2705                         lock_page(page);
2706                         if (unlikely(anon_vma_prepare(vma))) {
2707                                 ret = VM_FAULT_OOM;
2708                                 goto backout_unlocked;
2709                         }
2710                         anon_rmap = 1;
2711                 }
2712         } else {
2713                 /*
2714                  * If memory error occurs between mmap() and fault, some process
2715                  * don't have hwpoisoned swap entry for errored virtual address.
2716                  * So we need to block hugepage fault by PG_hwpoison bit check.
2717                  */
2718                 if (unlikely(PageHWPoison(page))) {
2719                         ret = VM_FAULT_HWPOISON |
2720                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2721                         goto backout_unlocked;
2722                 }
2723         }
2724
2725         /*
2726          * If we are going to COW a private mapping later, we examine the
2727          * pending reservations for this page now. This will ensure that
2728          * any allocations necessary to record that reservation occur outside
2729          * the spinlock.
2730          */
2731         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2732                 if (vma_needs_reservation(h, vma, address) < 0) {
2733                         ret = VM_FAULT_OOM;
2734                         goto backout_unlocked;
2735                 }
2736
2737         spin_lock(&mm->page_table_lock);
2738         size = i_size_read(mapping->host) >> huge_page_shift(h);
2739         if (idx >= size)
2740                 goto backout;
2741
2742         ret = 0;
2743         if (!huge_pte_none(huge_ptep_get(ptep)))
2744                 goto backout;
2745
2746         if (anon_rmap)
2747                 hugepage_add_new_anon_rmap(page, vma, address);
2748         else
2749                 page_dup_rmap(page);
2750         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2751                                 && (vma->vm_flags & VM_SHARED)));
2752         set_huge_pte_at(mm, address, ptep, new_pte);
2753
2754         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2755                 /* Optimization, do the COW without a second fault */
2756                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2757         }
2758
2759         spin_unlock(&mm->page_table_lock);
2760         unlock_page(page);
2761 out:
2762         return ret;
2763
2764 backout:
2765         spin_unlock(&mm->page_table_lock);
2766 backout_unlocked:
2767         unlock_page(page);
2768         put_page(page);
2769         goto out;
2770 }
2771
2772 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2773                         unsigned long address, unsigned int flags)
2774 {
2775         pte_t *ptep;
2776         pte_t entry;
2777         int ret;
2778         struct page *page = NULL;
2779         struct page *pagecache_page = NULL;
2780         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2781         struct hstate *h = hstate_vma(vma);
2782
2783         address &= huge_page_mask(h);
2784
2785         ptep = huge_pte_offset(mm, address);
2786         if (ptep) {
2787                 entry = huge_ptep_get(ptep);
2788                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2789                         migration_entry_wait(mm, (pmd_t *)ptep, address);
2790                         return 0;
2791                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2792                         return VM_FAULT_HWPOISON_LARGE |
2793                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2794         }
2795
2796         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2797         if (!ptep)
2798                 return VM_FAULT_OOM;
2799
2800         /*
2801          * Serialize hugepage allocation and instantiation, so that we don't
2802          * get spurious allocation failures if two CPUs race to instantiate
2803          * the same page in the page cache.
2804          */
2805         mutex_lock(&hugetlb_instantiation_mutex);
2806         entry = huge_ptep_get(ptep);
2807         if (huge_pte_none(entry)) {
2808                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2809                 goto out_mutex;
2810         }
2811
2812         ret = 0;
2813
2814         /*
2815          * If we are going to COW the mapping later, we examine the pending
2816          * reservations for this page now. This will ensure that any
2817          * allocations necessary to record that reservation occur outside the
2818          * spinlock. For private mappings, we also lookup the pagecache
2819          * page now as it is used to determine if a reservation has been
2820          * consumed.
2821          */
2822         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2823                 if (vma_needs_reservation(h, vma, address) < 0) {
2824                         ret = VM_FAULT_OOM;
2825                         goto out_mutex;
2826                 }
2827
2828                 if (!(vma->vm_flags & VM_MAYSHARE))
2829                         pagecache_page = hugetlbfs_pagecache_page(h,
2830                                                                 vma, address);
2831         }
2832
2833         /*
2834          * hugetlb_cow() requires page locks of pte_page(entry) and
2835          * pagecache_page, so here we need take the former one
2836          * when page != pagecache_page or !pagecache_page.
2837          * Note that locking order is always pagecache_page -> page,
2838          * so no worry about deadlock.
2839          */
2840         page = pte_page(entry);
2841         get_page(page);
2842         if (page != pagecache_page)
2843                 lock_page(page);
2844
2845         spin_lock(&mm->page_table_lock);
2846         /* Check for a racing update before calling hugetlb_cow */
2847         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2848                 goto out_page_table_lock;
2849
2850
2851         if (flags & FAULT_FLAG_WRITE) {
2852                 if (!pte_write(entry)) {
2853                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2854                                                         pagecache_page);
2855                         goto out_page_table_lock;
2856                 }
2857                 entry = pte_mkdirty(entry);
2858         }
2859         entry = pte_mkyoung(entry);
2860         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2861                                                 flags & FAULT_FLAG_WRITE))
2862                 update_mmu_cache(vma, address, ptep);
2863
2864 out_page_table_lock:
2865         spin_unlock(&mm->page_table_lock);
2866
2867         if (pagecache_page) {
2868                 unlock_page(pagecache_page);
2869                 put_page(pagecache_page);
2870         }
2871         if (page != pagecache_page)
2872                 unlock_page(page);
2873         put_page(page);
2874
2875 out_mutex:
2876         mutex_unlock(&hugetlb_instantiation_mutex);
2877
2878         return ret;
2879 }
2880
2881 /* Can be overriden by architectures */
2882 __attribute__((weak)) struct page *
2883 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2884                pud_t *pud, int write)
2885 {
2886         BUG();
2887         return NULL;
2888 }
2889
2890 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2891                         struct page **pages, struct vm_area_struct **vmas,
2892                         unsigned long *position, int *length, int i,
2893                         unsigned int flags)
2894 {
2895         unsigned long pfn_offset;
2896         unsigned long vaddr = *position;
2897         int remainder = *length;
2898         struct hstate *h = hstate_vma(vma);
2899
2900         spin_lock(&mm->page_table_lock);
2901         while (vaddr < vma->vm_end && remainder) {
2902                 pte_t *pte;
2903                 int absent;
2904                 struct page *page;
2905
2906                 /*
2907                  * Some archs (sparc64, sh*) have multiple pte_ts to
2908                  * each hugepage.  We have to make sure we get the
2909                  * first, for the page indexing below to work.
2910                  */
2911                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2912                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2913
2914                 /*
2915                  * When coredumping, it suits get_dump_page if we just return
2916                  * an error where there's an empty slot with no huge pagecache
2917                  * to back it.  This way, we avoid allocating a hugepage, and
2918                  * the sparse dumpfile avoids allocating disk blocks, but its
2919                  * huge holes still show up with zeroes where they need to be.
2920                  */
2921                 if (absent && (flags & FOLL_DUMP) &&
2922                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2923                         remainder = 0;
2924                         break;
2925                 }
2926
2927                 if (absent ||
2928                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2929                         int ret;
2930
2931                         spin_unlock(&mm->page_table_lock);
2932                         ret = hugetlb_fault(mm, vma, vaddr,
2933                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2934                         spin_lock(&mm->page_table_lock);
2935                         if (!(ret & VM_FAULT_ERROR))
2936                                 continue;
2937
2938                         remainder = 0;
2939                         break;
2940                 }
2941
2942                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2943                 page = pte_page(huge_ptep_get(pte));
2944 same_page:
2945                 if (pages) {
2946                         pages[i] = mem_map_offset(page, pfn_offset);
2947                         get_page(pages[i]);
2948                 }
2949
2950                 if (vmas)
2951                         vmas[i] = vma;
2952
2953                 vaddr += PAGE_SIZE;
2954                 ++pfn_offset;
2955                 --remainder;
2956                 ++i;
2957                 if (vaddr < vma->vm_end && remainder &&
2958                                 pfn_offset < pages_per_huge_page(h)) {
2959                         /*
2960                          * We use pfn_offset to avoid touching the pageframes
2961                          * of this compound page.
2962                          */
2963                         goto same_page;
2964                 }
2965         }
2966         spin_unlock(&mm->page_table_lock);
2967         *length = remainder;
2968         *position = vaddr;
2969
2970         return i ? i : -EFAULT;
2971 }
2972
2973 void hugetlb_change_protection(struct vm_area_struct *vma,
2974                 unsigned long address, unsigned long end, pgprot_t newprot)
2975 {
2976         struct mm_struct *mm = vma->vm_mm;
2977         unsigned long start = address;
2978         pte_t *ptep;
2979         pte_t pte;
2980         struct hstate *h = hstate_vma(vma);
2981
2982         BUG_ON(address >= end);
2983         flush_cache_range(vma, address, end);
2984
2985         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2986         spin_lock(&mm->page_table_lock);
2987         for (; address < end; address += huge_page_size(h)) {
2988                 ptep = huge_pte_offset(mm, address);
2989                 if (!ptep)
2990                         continue;
2991                 if (huge_pmd_unshare(mm, &address, ptep))
2992                         continue;
2993                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2994                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2995                         pte = pte_mkhuge(pte_modify(pte, newprot));
2996                         set_huge_pte_at(mm, address, ptep, pte);
2997                 }
2998         }
2999         spin_unlock(&mm->page_table_lock);
3000         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3001
3002         flush_tlb_range(vma, start, end);
3003 }
3004
3005 int hugetlb_reserve_pages(struct inode *inode,
3006                                         long from, long to,
3007                                         struct vm_area_struct *vma,
3008                                         vm_flags_t vm_flags)
3009 {
3010         long ret, chg;
3011         struct hstate *h = hstate_inode(inode);
3012         struct hugepage_subpool *spool = subpool_inode(inode);
3013
3014         /*
3015          * Only apply hugepage reservation if asked. At fault time, an
3016          * attempt will be made for VM_NORESERVE to allocate a page
3017          * without using reserves
3018          */
3019         if (vm_flags & VM_NORESERVE)
3020                 return 0;
3021
3022         /*
3023          * Shared mappings base their reservation on the number of pages that
3024          * are already allocated on behalf of the file. Private mappings need
3025          * to reserve the full area even if read-only as mprotect() may be
3026          * called to make the mapping read-write. Assume !vma is a shm mapping
3027          */
3028         if (!vma || vma->vm_flags & VM_MAYSHARE)
3029                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3030         else {
3031                 struct resv_map *resv_map = resv_map_alloc();
3032                 if (!resv_map)
3033                         return -ENOMEM;
3034
3035                 chg = to - from;
3036
3037                 set_vma_resv_map(vma, resv_map);
3038                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3039         }
3040
3041         if (chg < 0) {
3042                 ret = chg;
3043                 goto out_err;
3044         }
3045
3046         /* There must be enough pages in the subpool for the mapping */
3047         if (hugepage_subpool_get_pages(spool, chg)) {
3048                 ret = -ENOSPC;
3049                 goto out_err;
3050         }
3051
3052         /*
3053          * Check enough hugepages are available for the reservation.
3054          * Hand the pages back to the subpool if there are not
3055          */
3056         ret = hugetlb_acct_memory(h, chg);
3057         if (ret < 0) {
3058                 hugepage_subpool_put_pages(spool, chg);
3059                 goto out_err;
3060         }
3061
3062         /*
3063          * Account for the reservations made. Shared mappings record regions
3064          * that have reservations as they are shared by multiple VMAs.
3065          * When the last VMA disappears, the region map says how much
3066          * the reservation was and the page cache tells how much of
3067          * the reservation was consumed. Private mappings are per-VMA and
3068          * only the consumed reservations are tracked. When the VMA
3069          * disappears, the original reservation is the VMA size and the
3070          * consumed reservations are stored in the map. Hence, nothing
3071          * else has to be done for private mappings here
3072          */
3073         if (!vma || vma->vm_flags & VM_MAYSHARE)
3074                 region_add(&inode->i_mapping->private_list, from, to);
3075         return 0;
3076 out_err:
3077         if (vma)
3078                 resv_map_put(vma);
3079         return ret;
3080 }
3081
3082 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3083 {
3084         struct hstate *h = hstate_inode(inode);
3085         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3086         struct hugepage_subpool *spool = subpool_inode(inode);
3087
3088         spin_lock(&inode->i_lock);
3089         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3090         spin_unlock(&inode->i_lock);
3091
3092         hugepage_subpool_put_pages(spool, (chg - freed));
3093         hugetlb_acct_memory(h, -(chg - freed));
3094 }
3095
3096 #ifdef CONFIG_MEMORY_FAILURE
3097
3098 /* Should be called in hugetlb_lock */
3099 static int is_hugepage_on_freelist(struct page *hpage)
3100 {
3101         struct page *page;
3102         struct page *tmp;
3103         struct hstate *h = page_hstate(hpage);
3104         int nid = page_to_nid(hpage);
3105
3106         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3107                 if (page == hpage)
3108                         return 1;
3109         return 0;
3110 }
3111
3112 /*
3113  * This function is called from memory failure code.
3114  * Assume the caller holds page lock of the head page.
3115  */
3116 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3117 {
3118         struct hstate *h = page_hstate(hpage);
3119         int nid = page_to_nid(hpage);
3120         int ret = -EBUSY;
3121
3122         spin_lock(&hugetlb_lock);
3123         if (is_hugepage_on_freelist(hpage)) {
3124                 list_del(&hpage->lru);
3125                 set_page_refcounted(hpage);
3126                 h->free_huge_pages--;
3127                 h->free_huge_pages_node[nid]--;
3128                 ret = 0;
3129         }
3130         spin_unlock(&hugetlb_lock);
3131         return ret;
3132 }
3133 #endif