]> Pileus Git - ~andy/linux/blob - mm/huge_memory.c
edfeb8cb23df27ad77716408962687ceed4ba342
[~andy/linux] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <linux/freezer.h>
19 #include <linux/mman.h>
20 #include <asm/tlb.h>
21 #include <asm/pgalloc.h>
22 #include "internal.h"
23
24 /*
25  * By default transparent hugepage support is enabled for all mappings
26  * and khugepaged scans all mappings. Defrag is only invoked by
27  * khugepaged hugepage allocations and by page faults inside
28  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29  * allocations.
30  */
31 unsigned long transparent_hugepage_flags __read_mostly =
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
33         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
34 #endif
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
37 #endif
38         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
39         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
40
41 /* default scan 8*512 pte (or vmas) every 30 second */
42 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
43 static unsigned int khugepaged_pages_collapsed;
44 static unsigned int khugepaged_full_scans;
45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
46 /* during fragmentation poll the hugepage allocator once every minute */
47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
48 static struct task_struct *khugepaged_thread __read_mostly;
49 static DEFINE_MUTEX(khugepaged_mutex);
50 static DEFINE_SPINLOCK(khugepaged_mm_lock);
51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
52 /*
53  * default collapse hugepages if there is at least one pte mapped like
54  * it would have happened if the vma was large enough during page
55  * fault.
56  */
57 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
58
59 static int khugepaged(void *none);
60 static int mm_slots_hash_init(void);
61 static int khugepaged_slab_init(void);
62 static void khugepaged_slab_free(void);
63
64 #define MM_SLOTS_HASH_HEADS 1024
65 static struct hlist_head *mm_slots_hash __read_mostly;
66 static struct kmem_cache *mm_slot_cache __read_mostly;
67
68 /**
69  * struct mm_slot - hash lookup from mm to mm_slot
70  * @hash: hash collision list
71  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72  * @mm: the mm that this information is valid for
73  */
74 struct mm_slot {
75         struct hlist_node hash;
76         struct list_head mm_node;
77         struct mm_struct *mm;
78 };
79
80 /**
81  * struct khugepaged_scan - cursor for scanning
82  * @mm_head: the head of the mm list to scan
83  * @mm_slot: the current mm_slot we are scanning
84  * @address: the next address inside that to be scanned
85  *
86  * There is only the one khugepaged_scan instance of this cursor structure.
87  */
88 struct khugepaged_scan {
89         struct list_head mm_head;
90         struct mm_slot *mm_slot;
91         unsigned long address;
92 };
93 static struct khugepaged_scan khugepaged_scan = {
94         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
95 };
96
97
98 static int set_recommended_min_free_kbytes(void)
99 {
100         struct zone *zone;
101         int nr_zones = 0;
102         unsigned long recommended_min;
103         extern int min_free_kbytes;
104
105         if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
106                       &transparent_hugepage_flags) &&
107             !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
108                       &transparent_hugepage_flags))
109                 return 0;
110
111         for_each_populated_zone(zone)
112                 nr_zones++;
113
114         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
115         recommended_min = pageblock_nr_pages * nr_zones * 2;
116
117         /*
118          * Make sure that on average at least two pageblocks are almost free
119          * of another type, one for a migratetype to fall back to and a
120          * second to avoid subsequent fallbacks of other types There are 3
121          * MIGRATE_TYPES we care about.
122          */
123         recommended_min += pageblock_nr_pages * nr_zones *
124                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
125
126         /* don't ever allow to reserve more than 5% of the lowmem */
127         recommended_min = min(recommended_min,
128                               (unsigned long) nr_free_buffer_pages() / 20);
129         recommended_min <<= (PAGE_SHIFT-10);
130
131         if (recommended_min > min_free_kbytes)
132                 min_free_kbytes = recommended_min;
133         setup_per_zone_wmarks();
134         return 0;
135 }
136 late_initcall(set_recommended_min_free_kbytes);
137
138 static int start_khugepaged(void)
139 {
140         int err = 0;
141         if (khugepaged_enabled()) {
142                 int wakeup;
143                 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
144                         err = -ENOMEM;
145                         goto out;
146                 }
147                 mutex_lock(&khugepaged_mutex);
148                 if (!khugepaged_thread)
149                         khugepaged_thread = kthread_run(khugepaged, NULL,
150                                                         "khugepaged");
151                 if (unlikely(IS_ERR(khugepaged_thread))) {
152                         printk(KERN_ERR
153                                "khugepaged: kthread_run(khugepaged) failed\n");
154                         err = PTR_ERR(khugepaged_thread);
155                         khugepaged_thread = NULL;
156                 }
157                 wakeup = !list_empty(&khugepaged_scan.mm_head);
158                 mutex_unlock(&khugepaged_mutex);
159                 if (wakeup)
160                         wake_up_interruptible(&khugepaged_wait);
161
162                 set_recommended_min_free_kbytes();
163         } else
164                 /* wakeup to exit */
165                 wake_up_interruptible(&khugepaged_wait);
166 out:
167         return err;
168 }
169
170 #ifdef CONFIG_SYSFS
171
172 static ssize_t double_flag_show(struct kobject *kobj,
173                                 struct kobj_attribute *attr, char *buf,
174                                 enum transparent_hugepage_flag enabled,
175                                 enum transparent_hugepage_flag req_madv)
176 {
177         if (test_bit(enabled, &transparent_hugepage_flags)) {
178                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
179                 return sprintf(buf, "[always] madvise never\n");
180         } else if (test_bit(req_madv, &transparent_hugepage_flags))
181                 return sprintf(buf, "always [madvise] never\n");
182         else
183                 return sprintf(buf, "always madvise [never]\n");
184 }
185 static ssize_t double_flag_store(struct kobject *kobj,
186                                  struct kobj_attribute *attr,
187                                  const char *buf, size_t count,
188                                  enum transparent_hugepage_flag enabled,
189                                  enum transparent_hugepage_flag req_madv)
190 {
191         if (!memcmp("always", buf,
192                     min(sizeof("always")-1, count))) {
193                 set_bit(enabled, &transparent_hugepage_flags);
194                 clear_bit(req_madv, &transparent_hugepage_flags);
195         } else if (!memcmp("madvise", buf,
196                            min(sizeof("madvise")-1, count))) {
197                 clear_bit(enabled, &transparent_hugepage_flags);
198                 set_bit(req_madv, &transparent_hugepage_flags);
199         } else if (!memcmp("never", buf,
200                            min(sizeof("never")-1, count))) {
201                 clear_bit(enabled, &transparent_hugepage_flags);
202                 clear_bit(req_madv, &transparent_hugepage_flags);
203         } else
204                 return -EINVAL;
205
206         return count;
207 }
208
209 static ssize_t enabled_show(struct kobject *kobj,
210                             struct kobj_attribute *attr, char *buf)
211 {
212         return double_flag_show(kobj, attr, buf,
213                                 TRANSPARENT_HUGEPAGE_FLAG,
214                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
215 }
216 static ssize_t enabled_store(struct kobject *kobj,
217                              struct kobj_attribute *attr,
218                              const char *buf, size_t count)
219 {
220         ssize_t ret;
221
222         ret = double_flag_store(kobj, attr, buf, count,
223                                 TRANSPARENT_HUGEPAGE_FLAG,
224                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
225
226         if (ret > 0) {
227                 int err = start_khugepaged();
228                 if (err)
229                         ret = err;
230         }
231
232         if (ret > 0 &&
233             (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
234                       &transparent_hugepage_flags) ||
235              test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
236                       &transparent_hugepage_flags)))
237                 set_recommended_min_free_kbytes();
238
239         return ret;
240 }
241 static struct kobj_attribute enabled_attr =
242         __ATTR(enabled, 0644, enabled_show, enabled_store);
243
244 static ssize_t single_flag_show(struct kobject *kobj,
245                                 struct kobj_attribute *attr, char *buf,
246                                 enum transparent_hugepage_flag flag)
247 {
248         return sprintf(buf, "%d\n",
249                        !!test_bit(flag, &transparent_hugepage_flags));
250 }
251
252 static ssize_t single_flag_store(struct kobject *kobj,
253                                  struct kobj_attribute *attr,
254                                  const char *buf, size_t count,
255                                  enum transparent_hugepage_flag flag)
256 {
257         unsigned long value;
258         int ret;
259
260         ret = kstrtoul(buf, 10, &value);
261         if (ret < 0)
262                 return ret;
263         if (value > 1)
264                 return -EINVAL;
265
266         if (value)
267                 set_bit(flag, &transparent_hugepage_flags);
268         else
269                 clear_bit(flag, &transparent_hugepage_flags);
270
271         return count;
272 }
273
274 /*
275  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
276  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
277  * memory just to allocate one more hugepage.
278  */
279 static ssize_t defrag_show(struct kobject *kobj,
280                            struct kobj_attribute *attr, char *buf)
281 {
282         return double_flag_show(kobj, attr, buf,
283                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
284                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
285 }
286 static ssize_t defrag_store(struct kobject *kobj,
287                             struct kobj_attribute *attr,
288                             const char *buf, size_t count)
289 {
290         return double_flag_store(kobj, attr, buf, count,
291                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
292                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
293 }
294 static struct kobj_attribute defrag_attr =
295         __ATTR(defrag, 0644, defrag_show, defrag_store);
296
297 #ifdef CONFIG_DEBUG_VM
298 static ssize_t debug_cow_show(struct kobject *kobj,
299                                 struct kobj_attribute *attr, char *buf)
300 {
301         return single_flag_show(kobj, attr, buf,
302                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303 }
304 static ssize_t debug_cow_store(struct kobject *kobj,
305                                struct kobj_attribute *attr,
306                                const char *buf, size_t count)
307 {
308         return single_flag_store(kobj, attr, buf, count,
309                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
310 }
311 static struct kobj_attribute debug_cow_attr =
312         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
313 #endif /* CONFIG_DEBUG_VM */
314
315 static struct attribute *hugepage_attr[] = {
316         &enabled_attr.attr,
317         &defrag_attr.attr,
318 #ifdef CONFIG_DEBUG_VM
319         &debug_cow_attr.attr,
320 #endif
321         NULL,
322 };
323
324 static struct attribute_group hugepage_attr_group = {
325         .attrs = hugepage_attr,
326 };
327
328 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
329                                          struct kobj_attribute *attr,
330                                          char *buf)
331 {
332         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
333 }
334
335 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
336                                           struct kobj_attribute *attr,
337                                           const char *buf, size_t count)
338 {
339         unsigned long msecs;
340         int err;
341
342         err = strict_strtoul(buf, 10, &msecs);
343         if (err || msecs > UINT_MAX)
344                 return -EINVAL;
345
346         khugepaged_scan_sleep_millisecs = msecs;
347         wake_up_interruptible(&khugepaged_wait);
348
349         return count;
350 }
351 static struct kobj_attribute scan_sleep_millisecs_attr =
352         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
353                scan_sleep_millisecs_store);
354
355 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
356                                           struct kobj_attribute *attr,
357                                           char *buf)
358 {
359         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
360 }
361
362 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
363                                            struct kobj_attribute *attr,
364                                            const char *buf, size_t count)
365 {
366         unsigned long msecs;
367         int err;
368
369         err = strict_strtoul(buf, 10, &msecs);
370         if (err || msecs > UINT_MAX)
371                 return -EINVAL;
372
373         khugepaged_alloc_sleep_millisecs = msecs;
374         wake_up_interruptible(&khugepaged_wait);
375
376         return count;
377 }
378 static struct kobj_attribute alloc_sleep_millisecs_attr =
379         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
380                alloc_sleep_millisecs_store);
381
382 static ssize_t pages_to_scan_show(struct kobject *kobj,
383                                   struct kobj_attribute *attr,
384                                   char *buf)
385 {
386         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
387 }
388 static ssize_t pages_to_scan_store(struct kobject *kobj,
389                                    struct kobj_attribute *attr,
390                                    const char *buf, size_t count)
391 {
392         int err;
393         unsigned long pages;
394
395         err = strict_strtoul(buf, 10, &pages);
396         if (err || !pages || pages > UINT_MAX)
397                 return -EINVAL;
398
399         khugepaged_pages_to_scan = pages;
400
401         return count;
402 }
403 static struct kobj_attribute pages_to_scan_attr =
404         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
405                pages_to_scan_store);
406
407 static ssize_t pages_collapsed_show(struct kobject *kobj,
408                                     struct kobj_attribute *attr,
409                                     char *buf)
410 {
411         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
412 }
413 static struct kobj_attribute pages_collapsed_attr =
414         __ATTR_RO(pages_collapsed);
415
416 static ssize_t full_scans_show(struct kobject *kobj,
417                                struct kobj_attribute *attr,
418                                char *buf)
419 {
420         return sprintf(buf, "%u\n", khugepaged_full_scans);
421 }
422 static struct kobj_attribute full_scans_attr =
423         __ATTR_RO(full_scans);
424
425 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
426                                       struct kobj_attribute *attr, char *buf)
427 {
428         return single_flag_show(kobj, attr, buf,
429                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
430 }
431 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
432                                        struct kobj_attribute *attr,
433                                        const char *buf, size_t count)
434 {
435         return single_flag_store(kobj, attr, buf, count,
436                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
437 }
438 static struct kobj_attribute khugepaged_defrag_attr =
439         __ATTR(defrag, 0644, khugepaged_defrag_show,
440                khugepaged_defrag_store);
441
442 /*
443  * max_ptes_none controls if khugepaged should collapse hugepages over
444  * any unmapped ptes in turn potentially increasing the memory
445  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
446  * reduce the available free memory in the system as it
447  * runs. Increasing max_ptes_none will instead potentially reduce the
448  * free memory in the system during the khugepaged scan.
449  */
450 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
451                                              struct kobj_attribute *attr,
452                                              char *buf)
453 {
454         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
455 }
456 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
457                                               struct kobj_attribute *attr,
458                                               const char *buf, size_t count)
459 {
460         int err;
461         unsigned long max_ptes_none;
462
463         err = strict_strtoul(buf, 10, &max_ptes_none);
464         if (err || max_ptes_none > HPAGE_PMD_NR-1)
465                 return -EINVAL;
466
467         khugepaged_max_ptes_none = max_ptes_none;
468
469         return count;
470 }
471 static struct kobj_attribute khugepaged_max_ptes_none_attr =
472         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
473                khugepaged_max_ptes_none_store);
474
475 static struct attribute *khugepaged_attr[] = {
476         &khugepaged_defrag_attr.attr,
477         &khugepaged_max_ptes_none_attr.attr,
478         &pages_to_scan_attr.attr,
479         &pages_collapsed_attr.attr,
480         &full_scans_attr.attr,
481         &scan_sleep_millisecs_attr.attr,
482         &alloc_sleep_millisecs_attr.attr,
483         NULL,
484 };
485
486 static struct attribute_group khugepaged_attr_group = {
487         .attrs = khugepaged_attr,
488         .name = "khugepaged",
489 };
490
491 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
492 {
493         int err;
494
495         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
496         if (unlikely(!*hugepage_kobj)) {
497                 printk(KERN_ERR "hugepage: failed kobject create\n");
498                 return -ENOMEM;
499         }
500
501         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
502         if (err) {
503                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
504                 goto delete_obj;
505         }
506
507         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
508         if (err) {
509                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
510                 goto remove_hp_group;
511         }
512
513         return 0;
514
515 remove_hp_group:
516         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
517 delete_obj:
518         kobject_put(*hugepage_kobj);
519         return err;
520 }
521
522 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
523 {
524         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
525         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
526         kobject_put(hugepage_kobj);
527 }
528 #else
529 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
530 {
531         return 0;
532 }
533
534 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
535 {
536 }
537 #endif /* CONFIG_SYSFS */
538
539 static int __init hugepage_init(void)
540 {
541         int err;
542         struct kobject *hugepage_kobj;
543
544         if (!has_transparent_hugepage()) {
545                 transparent_hugepage_flags = 0;
546                 return -EINVAL;
547         }
548
549         err = hugepage_init_sysfs(&hugepage_kobj);
550         if (err)
551                 return err;
552
553         err = khugepaged_slab_init();
554         if (err)
555                 goto out;
556
557         err = mm_slots_hash_init();
558         if (err) {
559                 khugepaged_slab_free();
560                 goto out;
561         }
562
563         /*
564          * By default disable transparent hugepages on smaller systems,
565          * where the extra memory used could hurt more than TLB overhead
566          * is likely to save.  The admin can still enable it through /sys.
567          */
568         if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
569                 transparent_hugepage_flags = 0;
570
571         start_khugepaged();
572
573         set_recommended_min_free_kbytes();
574
575         return 0;
576 out:
577         hugepage_exit_sysfs(hugepage_kobj);
578         return err;
579 }
580 module_init(hugepage_init)
581
582 static int __init setup_transparent_hugepage(char *str)
583 {
584         int ret = 0;
585         if (!str)
586                 goto out;
587         if (!strcmp(str, "always")) {
588                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
589                         &transparent_hugepage_flags);
590                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
591                           &transparent_hugepage_flags);
592                 ret = 1;
593         } else if (!strcmp(str, "madvise")) {
594                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
595                           &transparent_hugepage_flags);
596                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
597                         &transparent_hugepage_flags);
598                 ret = 1;
599         } else if (!strcmp(str, "never")) {
600                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
601                           &transparent_hugepage_flags);
602                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
603                           &transparent_hugepage_flags);
604                 ret = 1;
605         }
606 out:
607         if (!ret)
608                 printk(KERN_WARNING
609                        "transparent_hugepage= cannot parse, ignored\n");
610         return ret;
611 }
612 __setup("transparent_hugepage=", setup_transparent_hugepage);
613
614 static void prepare_pmd_huge_pte(pgtable_t pgtable,
615                                  struct mm_struct *mm)
616 {
617         assert_spin_locked(&mm->page_table_lock);
618
619         /* FIFO */
620         if (!mm->pmd_huge_pte)
621                 INIT_LIST_HEAD(&pgtable->lru);
622         else
623                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
624         mm->pmd_huge_pte = pgtable;
625 }
626
627 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
628 {
629         if (likely(vma->vm_flags & VM_WRITE))
630                 pmd = pmd_mkwrite(pmd);
631         return pmd;
632 }
633
634 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
635                                         struct vm_area_struct *vma,
636                                         unsigned long haddr, pmd_t *pmd,
637                                         struct page *page)
638 {
639         pgtable_t pgtable;
640
641         VM_BUG_ON(!PageCompound(page));
642         pgtable = pte_alloc_one(mm, haddr);
643         if (unlikely(!pgtable))
644                 return VM_FAULT_OOM;
645
646         clear_huge_page(page, haddr, HPAGE_PMD_NR);
647         __SetPageUptodate(page);
648
649         spin_lock(&mm->page_table_lock);
650         if (unlikely(!pmd_none(*pmd))) {
651                 spin_unlock(&mm->page_table_lock);
652                 mem_cgroup_uncharge_page(page);
653                 put_page(page);
654                 pte_free(mm, pgtable);
655         } else {
656                 pmd_t entry;
657                 entry = mk_pmd(page, vma->vm_page_prot);
658                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
659                 entry = pmd_mkhuge(entry);
660                 /*
661                  * The spinlocking to take the lru_lock inside
662                  * page_add_new_anon_rmap() acts as a full memory
663                  * barrier to be sure clear_huge_page writes become
664                  * visible after the set_pmd_at() write.
665                  */
666                 page_add_new_anon_rmap(page, vma, haddr);
667                 set_pmd_at(mm, haddr, pmd, entry);
668                 prepare_pmd_huge_pte(pgtable, mm);
669                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
670                 mm->nr_ptes++;
671                 spin_unlock(&mm->page_table_lock);
672         }
673
674         return 0;
675 }
676
677 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
678 {
679         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
680 }
681
682 static inline struct page *alloc_hugepage_vma(int defrag,
683                                               struct vm_area_struct *vma,
684                                               unsigned long haddr, int nd,
685                                               gfp_t extra_gfp)
686 {
687         return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
688                                HPAGE_PMD_ORDER, vma, haddr, nd);
689 }
690
691 #ifndef CONFIG_NUMA
692 static inline struct page *alloc_hugepage(int defrag)
693 {
694         return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
695                            HPAGE_PMD_ORDER);
696 }
697 #endif
698
699 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
700                                unsigned long address, pmd_t *pmd,
701                                unsigned int flags)
702 {
703         struct page *page;
704         unsigned long haddr = address & HPAGE_PMD_MASK;
705         pte_t *pte;
706
707         if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
708                 if (unlikely(anon_vma_prepare(vma)))
709                         return VM_FAULT_OOM;
710                 if (unlikely(khugepaged_enter(vma)))
711                         return VM_FAULT_OOM;
712                 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
713                                           vma, haddr, numa_node_id(), 0);
714                 if (unlikely(!page)) {
715                         count_vm_event(THP_FAULT_FALLBACK);
716                         goto out;
717                 }
718                 count_vm_event(THP_FAULT_ALLOC);
719                 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
720                         put_page(page);
721                         goto out;
722                 }
723                 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
724                                                           page))) {
725                         mem_cgroup_uncharge_page(page);
726                         put_page(page);
727                         goto out;
728                 }
729
730                 return 0;
731         }
732 out:
733         /*
734          * Use __pte_alloc instead of pte_alloc_map, because we can't
735          * run pte_offset_map on the pmd, if an huge pmd could
736          * materialize from under us from a different thread.
737          */
738         if (unlikely(__pte_alloc(mm, vma, pmd, address)))
739                 return VM_FAULT_OOM;
740         /* if an huge pmd materialized from under us just retry later */
741         if (unlikely(pmd_trans_huge(*pmd)))
742                 return 0;
743         /*
744          * A regular pmd is established and it can't morph into a huge pmd
745          * from under us anymore at this point because we hold the mmap_sem
746          * read mode and khugepaged takes it in write mode. So now it's
747          * safe to run pte_offset_map().
748          */
749         pte = pte_offset_map(pmd, address);
750         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
751 }
752
753 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
754                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
755                   struct vm_area_struct *vma)
756 {
757         struct page *src_page;
758         pmd_t pmd;
759         pgtable_t pgtable;
760         int ret;
761
762         ret = -ENOMEM;
763         pgtable = pte_alloc_one(dst_mm, addr);
764         if (unlikely(!pgtable))
765                 goto out;
766
767         spin_lock(&dst_mm->page_table_lock);
768         spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
769
770         ret = -EAGAIN;
771         pmd = *src_pmd;
772         if (unlikely(!pmd_trans_huge(pmd))) {
773                 pte_free(dst_mm, pgtable);
774                 goto out_unlock;
775         }
776         if (unlikely(pmd_trans_splitting(pmd))) {
777                 /* split huge page running from under us */
778                 spin_unlock(&src_mm->page_table_lock);
779                 spin_unlock(&dst_mm->page_table_lock);
780                 pte_free(dst_mm, pgtable);
781
782                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
783                 goto out;
784         }
785         src_page = pmd_page(pmd);
786         VM_BUG_ON(!PageHead(src_page));
787         get_page(src_page);
788         page_dup_rmap(src_page);
789         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
790
791         pmdp_set_wrprotect(src_mm, addr, src_pmd);
792         pmd = pmd_mkold(pmd_wrprotect(pmd));
793         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
794         prepare_pmd_huge_pte(pgtable, dst_mm);
795         dst_mm->nr_ptes++;
796
797         ret = 0;
798 out_unlock:
799         spin_unlock(&src_mm->page_table_lock);
800         spin_unlock(&dst_mm->page_table_lock);
801 out:
802         return ret;
803 }
804
805 /* no "address" argument so destroys page coloring of some arch */
806 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
807 {
808         pgtable_t pgtable;
809
810         assert_spin_locked(&mm->page_table_lock);
811
812         /* FIFO */
813         pgtable = mm->pmd_huge_pte;
814         if (list_empty(&pgtable->lru))
815                 mm->pmd_huge_pte = NULL;
816         else {
817                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
818                                               struct page, lru);
819                 list_del(&pgtable->lru);
820         }
821         return pgtable;
822 }
823
824 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
825                                         struct vm_area_struct *vma,
826                                         unsigned long address,
827                                         pmd_t *pmd, pmd_t orig_pmd,
828                                         struct page *page,
829                                         unsigned long haddr)
830 {
831         pgtable_t pgtable;
832         pmd_t _pmd;
833         int ret = 0, i;
834         struct page **pages;
835
836         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
837                         GFP_KERNEL);
838         if (unlikely(!pages)) {
839                 ret |= VM_FAULT_OOM;
840                 goto out;
841         }
842
843         for (i = 0; i < HPAGE_PMD_NR; i++) {
844                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
845                                                __GFP_OTHER_NODE,
846                                                vma, address, page_to_nid(page));
847                 if (unlikely(!pages[i] ||
848                              mem_cgroup_newpage_charge(pages[i], mm,
849                                                        GFP_KERNEL))) {
850                         if (pages[i])
851                                 put_page(pages[i]);
852                         mem_cgroup_uncharge_start();
853                         while (--i >= 0) {
854                                 mem_cgroup_uncharge_page(pages[i]);
855                                 put_page(pages[i]);
856                         }
857                         mem_cgroup_uncharge_end();
858                         kfree(pages);
859                         ret |= VM_FAULT_OOM;
860                         goto out;
861                 }
862         }
863
864         for (i = 0; i < HPAGE_PMD_NR; i++) {
865                 copy_user_highpage(pages[i], page + i,
866                                    haddr + PAGE_SIZE * i, vma);
867                 __SetPageUptodate(pages[i]);
868                 cond_resched();
869         }
870
871         spin_lock(&mm->page_table_lock);
872         if (unlikely(!pmd_same(*pmd, orig_pmd)))
873                 goto out_free_pages;
874         VM_BUG_ON(!PageHead(page));
875
876         pmdp_clear_flush_notify(vma, haddr, pmd);
877         /* leave pmd empty until pte is filled */
878
879         pgtable = get_pmd_huge_pte(mm);
880         pmd_populate(mm, &_pmd, pgtable);
881
882         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
883                 pte_t *pte, entry;
884                 entry = mk_pte(pages[i], vma->vm_page_prot);
885                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
886                 page_add_new_anon_rmap(pages[i], vma, haddr);
887                 pte = pte_offset_map(&_pmd, haddr);
888                 VM_BUG_ON(!pte_none(*pte));
889                 set_pte_at(mm, haddr, pte, entry);
890                 pte_unmap(pte);
891         }
892         kfree(pages);
893
894         smp_wmb(); /* make pte visible before pmd */
895         pmd_populate(mm, pmd, pgtable);
896         page_remove_rmap(page);
897         spin_unlock(&mm->page_table_lock);
898
899         ret |= VM_FAULT_WRITE;
900         put_page(page);
901
902 out:
903         return ret;
904
905 out_free_pages:
906         spin_unlock(&mm->page_table_lock);
907         mem_cgroup_uncharge_start();
908         for (i = 0; i < HPAGE_PMD_NR; i++) {
909                 mem_cgroup_uncharge_page(pages[i]);
910                 put_page(pages[i]);
911         }
912         mem_cgroup_uncharge_end();
913         kfree(pages);
914         goto out;
915 }
916
917 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
918                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
919 {
920         int ret = 0;
921         struct page *page, *new_page;
922         unsigned long haddr;
923
924         VM_BUG_ON(!vma->anon_vma);
925         spin_lock(&mm->page_table_lock);
926         if (unlikely(!pmd_same(*pmd, orig_pmd)))
927                 goto out_unlock;
928
929         page = pmd_page(orig_pmd);
930         VM_BUG_ON(!PageCompound(page) || !PageHead(page));
931         haddr = address & HPAGE_PMD_MASK;
932         if (page_mapcount(page) == 1) {
933                 pmd_t entry;
934                 entry = pmd_mkyoung(orig_pmd);
935                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
936                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
937                         update_mmu_cache(vma, address, entry);
938                 ret |= VM_FAULT_WRITE;
939                 goto out_unlock;
940         }
941         get_page(page);
942         spin_unlock(&mm->page_table_lock);
943
944         if (transparent_hugepage_enabled(vma) &&
945             !transparent_hugepage_debug_cow())
946                 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
947                                               vma, haddr, numa_node_id(), 0);
948         else
949                 new_page = NULL;
950
951         if (unlikely(!new_page)) {
952                 count_vm_event(THP_FAULT_FALLBACK);
953                 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
954                                                    pmd, orig_pmd, page, haddr);
955                 if (ret & VM_FAULT_OOM)
956                         split_huge_page(page);
957                 put_page(page);
958                 goto out;
959         }
960         count_vm_event(THP_FAULT_ALLOC);
961
962         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
963                 put_page(new_page);
964                 split_huge_page(page);
965                 put_page(page);
966                 ret |= VM_FAULT_OOM;
967                 goto out;
968         }
969
970         copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
971         __SetPageUptodate(new_page);
972
973         spin_lock(&mm->page_table_lock);
974         put_page(page);
975         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
976                 mem_cgroup_uncharge_page(new_page);
977                 put_page(new_page);
978         } else {
979                 pmd_t entry;
980                 VM_BUG_ON(!PageHead(page));
981                 entry = mk_pmd(new_page, vma->vm_page_prot);
982                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
983                 entry = pmd_mkhuge(entry);
984                 pmdp_clear_flush_notify(vma, haddr, pmd);
985                 page_add_new_anon_rmap(new_page, vma, haddr);
986                 set_pmd_at(mm, haddr, pmd, entry);
987                 update_mmu_cache(vma, address, entry);
988                 page_remove_rmap(page);
989                 put_page(page);
990                 ret |= VM_FAULT_WRITE;
991         }
992 out_unlock:
993         spin_unlock(&mm->page_table_lock);
994 out:
995         return ret;
996 }
997
998 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
999                                    unsigned long addr,
1000                                    pmd_t *pmd,
1001                                    unsigned int flags)
1002 {
1003         struct page *page = NULL;
1004
1005         assert_spin_locked(&mm->page_table_lock);
1006
1007         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1008                 goto out;
1009
1010         page = pmd_page(*pmd);
1011         VM_BUG_ON(!PageHead(page));
1012         if (flags & FOLL_TOUCH) {
1013                 pmd_t _pmd;
1014                 /*
1015                  * We should set the dirty bit only for FOLL_WRITE but
1016                  * for now the dirty bit in the pmd is meaningless.
1017                  * And if the dirty bit will become meaningful and
1018                  * we'll only set it with FOLL_WRITE, an atomic
1019                  * set_bit will be required on the pmd to set the
1020                  * young bit, instead of the current set_pmd_at.
1021                  */
1022                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1023                 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
1024         }
1025         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1026         VM_BUG_ON(!PageCompound(page));
1027         if (flags & FOLL_GET)
1028                 get_page_foll(page);
1029
1030 out:
1031         return page;
1032 }
1033
1034 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1035                  pmd_t *pmd, unsigned long addr)
1036 {
1037         int ret = 0;
1038
1039         if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1040                 struct page *page;
1041                 pgtable_t pgtable;
1042                 pgtable = get_pmd_huge_pte(tlb->mm);
1043                 page = pmd_page(*pmd);
1044                 pmd_clear(pmd);
1045                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1046                 page_remove_rmap(page);
1047                 VM_BUG_ON(page_mapcount(page) < 0);
1048                 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1049                 VM_BUG_ON(!PageHead(page));
1050                 tlb->mm->nr_ptes--;
1051                 spin_unlock(&tlb->mm->page_table_lock);
1052                 tlb_remove_page(tlb, page);
1053                 pte_free(tlb->mm, pgtable);
1054                 ret = 1;
1055         }
1056         return ret;
1057 }
1058
1059 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1060                 unsigned long addr, unsigned long end,
1061                 unsigned char *vec)
1062 {
1063         int ret = 0;
1064
1065         if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1066                 /*
1067                  * All logical pages in the range are present
1068                  * if backed by a huge page.
1069                  */
1070                 spin_unlock(&vma->vm_mm->page_table_lock);
1071                 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1072                 ret = 1;
1073         }
1074
1075         return ret;
1076 }
1077
1078 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1079                   unsigned long old_addr,
1080                   unsigned long new_addr, unsigned long old_end,
1081                   pmd_t *old_pmd, pmd_t *new_pmd)
1082 {
1083         int ret = 0;
1084         pmd_t pmd;
1085
1086         struct mm_struct *mm = vma->vm_mm;
1087
1088         if ((old_addr & ~HPAGE_PMD_MASK) ||
1089             (new_addr & ~HPAGE_PMD_MASK) ||
1090             old_end - old_addr < HPAGE_PMD_SIZE ||
1091             (new_vma->vm_flags & VM_NOHUGEPAGE))
1092                 goto out;
1093
1094         /*
1095          * The destination pmd shouldn't be established, free_pgtables()
1096          * should have release it.
1097          */
1098         if (WARN_ON(!pmd_none(*new_pmd))) {
1099                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1100                 goto out;
1101         }
1102
1103         ret = __pmd_trans_huge_lock(old_pmd, vma);
1104         if (ret == 1) {
1105                 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1106                 VM_BUG_ON(!pmd_none(*new_pmd));
1107                 set_pmd_at(mm, new_addr, new_pmd, pmd);
1108                 spin_unlock(&mm->page_table_lock);
1109         }
1110 out:
1111         return ret;
1112 }
1113
1114 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1115                 unsigned long addr, pgprot_t newprot)
1116 {
1117         struct mm_struct *mm = vma->vm_mm;
1118         int ret = 0;
1119
1120         if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1121                 pmd_t entry;
1122                 entry = pmdp_get_and_clear(mm, addr, pmd);
1123                 entry = pmd_modify(entry, newprot);
1124                 set_pmd_at(mm, addr, pmd, entry);
1125                 spin_unlock(&vma->vm_mm->page_table_lock);
1126                 ret = 1;
1127         }
1128
1129         return ret;
1130 }
1131
1132 /*
1133  * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1134  * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1135  *
1136  * Note that if it returns 1, this routine returns without unlocking page
1137  * table locks. So callers must unlock them.
1138  */
1139 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1140 {
1141         spin_lock(&vma->vm_mm->page_table_lock);
1142         if (likely(pmd_trans_huge(*pmd))) {
1143                 if (unlikely(pmd_trans_splitting(*pmd))) {
1144                         spin_unlock(&vma->vm_mm->page_table_lock);
1145                         wait_split_huge_page(vma->anon_vma, pmd);
1146                         return -1;
1147                 } else {
1148                         /* Thp mapped by 'pmd' is stable, so we can
1149                          * handle it as it is. */
1150                         return 1;
1151                 }
1152         }
1153         spin_unlock(&vma->vm_mm->page_table_lock);
1154         return 0;
1155 }
1156
1157 pmd_t *page_check_address_pmd(struct page *page,
1158                               struct mm_struct *mm,
1159                               unsigned long address,
1160                               enum page_check_address_pmd_flag flag)
1161 {
1162         pgd_t *pgd;
1163         pud_t *pud;
1164         pmd_t *pmd, *ret = NULL;
1165
1166         if (address & ~HPAGE_PMD_MASK)
1167                 goto out;
1168
1169         pgd = pgd_offset(mm, address);
1170         if (!pgd_present(*pgd))
1171                 goto out;
1172
1173         pud = pud_offset(pgd, address);
1174         if (!pud_present(*pud))
1175                 goto out;
1176
1177         pmd = pmd_offset(pud, address);
1178         if (pmd_none(*pmd))
1179                 goto out;
1180         if (pmd_page(*pmd) != page)
1181                 goto out;
1182         /*
1183          * split_vma() may create temporary aliased mappings. There is
1184          * no risk as long as all huge pmd are found and have their
1185          * splitting bit set before __split_huge_page_refcount
1186          * runs. Finding the same huge pmd more than once during the
1187          * same rmap walk is not a problem.
1188          */
1189         if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1190             pmd_trans_splitting(*pmd))
1191                 goto out;
1192         if (pmd_trans_huge(*pmd)) {
1193                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1194                           !pmd_trans_splitting(*pmd));
1195                 ret = pmd;
1196         }
1197 out:
1198         return ret;
1199 }
1200
1201 static int __split_huge_page_splitting(struct page *page,
1202                                        struct vm_area_struct *vma,
1203                                        unsigned long address)
1204 {
1205         struct mm_struct *mm = vma->vm_mm;
1206         pmd_t *pmd;
1207         int ret = 0;
1208
1209         spin_lock(&mm->page_table_lock);
1210         pmd = page_check_address_pmd(page, mm, address,
1211                                      PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1212         if (pmd) {
1213                 /*
1214                  * We can't temporarily set the pmd to null in order
1215                  * to split it, the pmd must remain marked huge at all
1216                  * times or the VM won't take the pmd_trans_huge paths
1217                  * and it won't wait on the anon_vma->root->mutex to
1218                  * serialize against split_huge_page*.
1219                  */
1220                 pmdp_splitting_flush_notify(vma, address, pmd);
1221                 ret = 1;
1222         }
1223         spin_unlock(&mm->page_table_lock);
1224
1225         return ret;
1226 }
1227
1228 static void __split_huge_page_refcount(struct page *page)
1229 {
1230         int i;
1231         struct zone *zone = page_zone(page);
1232         int tail_count = 0;
1233
1234         /* prevent PageLRU to go away from under us, and freeze lru stats */
1235         spin_lock_irq(&zone->lru_lock);
1236         compound_lock(page);
1237         /* complete memcg works before add pages to LRU */
1238         mem_cgroup_split_huge_fixup(page);
1239
1240         for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1241                 struct page *page_tail = page + i;
1242
1243                 /* tail_page->_mapcount cannot change */
1244                 BUG_ON(page_mapcount(page_tail) < 0);
1245                 tail_count += page_mapcount(page_tail);
1246                 /* check for overflow */
1247                 BUG_ON(tail_count < 0);
1248                 BUG_ON(atomic_read(&page_tail->_count) != 0);
1249                 /*
1250                  * tail_page->_count is zero and not changing from
1251                  * under us. But get_page_unless_zero() may be running
1252                  * from under us on the tail_page. If we used
1253                  * atomic_set() below instead of atomic_add(), we
1254                  * would then run atomic_set() concurrently with
1255                  * get_page_unless_zero(), and atomic_set() is
1256                  * implemented in C not using locked ops. spin_unlock
1257                  * on x86 sometime uses locked ops because of PPro
1258                  * errata 66, 92, so unless somebody can guarantee
1259                  * atomic_set() here would be safe on all archs (and
1260                  * not only on x86), it's safer to use atomic_add().
1261                  */
1262                 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1263                            &page_tail->_count);
1264
1265                 /* after clearing PageTail the gup refcount can be released */
1266                 smp_mb();
1267
1268                 /*
1269                  * retain hwpoison flag of the poisoned tail page:
1270                  *   fix for the unsuitable process killed on Guest Machine(KVM)
1271                  *   by the memory-failure.
1272                  */
1273                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1274                 page_tail->flags |= (page->flags &
1275                                      ((1L << PG_referenced) |
1276                                       (1L << PG_swapbacked) |
1277                                       (1L << PG_mlocked) |
1278                                       (1L << PG_uptodate)));
1279                 page_tail->flags |= (1L << PG_dirty);
1280
1281                 /* clear PageTail before overwriting first_page */
1282                 smp_wmb();
1283
1284                 /*
1285                  * __split_huge_page_splitting() already set the
1286                  * splitting bit in all pmd that could map this
1287                  * hugepage, that will ensure no CPU can alter the
1288                  * mapcount on the head page. The mapcount is only
1289                  * accounted in the head page and it has to be
1290                  * transferred to all tail pages in the below code. So
1291                  * for this code to be safe, the split the mapcount
1292                  * can't change. But that doesn't mean userland can't
1293                  * keep changing and reading the page contents while
1294                  * we transfer the mapcount, so the pmd splitting
1295                  * status is achieved setting a reserved bit in the
1296                  * pmd, not by clearing the present bit.
1297                 */
1298                 page_tail->_mapcount = page->_mapcount;
1299
1300                 BUG_ON(page_tail->mapping);
1301                 page_tail->mapping = page->mapping;
1302
1303                 page_tail->index = page->index + i;
1304
1305                 BUG_ON(!PageAnon(page_tail));
1306                 BUG_ON(!PageUptodate(page_tail));
1307                 BUG_ON(!PageDirty(page_tail));
1308                 BUG_ON(!PageSwapBacked(page_tail));
1309
1310
1311                 lru_add_page_tail(zone, page, page_tail);
1312         }
1313         atomic_sub(tail_count, &page->_count);
1314         BUG_ON(atomic_read(&page->_count) <= 0);
1315
1316         __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1317         __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1318
1319         ClearPageCompound(page);
1320         compound_unlock(page);
1321         spin_unlock_irq(&zone->lru_lock);
1322
1323         for (i = 1; i < HPAGE_PMD_NR; i++) {
1324                 struct page *page_tail = page + i;
1325                 BUG_ON(page_count(page_tail) <= 0);
1326                 /*
1327                  * Tail pages may be freed if there wasn't any mapping
1328                  * like if add_to_swap() is running on a lru page that
1329                  * had its mapping zapped. And freeing these pages
1330                  * requires taking the lru_lock so we do the put_page
1331                  * of the tail pages after the split is complete.
1332                  */
1333                 put_page(page_tail);
1334         }
1335
1336         /*
1337          * Only the head page (now become a regular page) is required
1338          * to be pinned by the caller.
1339          */
1340         BUG_ON(page_count(page) <= 0);
1341 }
1342
1343 static int __split_huge_page_map(struct page *page,
1344                                  struct vm_area_struct *vma,
1345                                  unsigned long address)
1346 {
1347         struct mm_struct *mm = vma->vm_mm;
1348         pmd_t *pmd, _pmd;
1349         int ret = 0, i;
1350         pgtable_t pgtable;
1351         unsigned long haddr;
1352
1353         spin_lock(&mm->page_table_lock);
1354         pmd = page_check_address_pmd(page, mm, address,
1355                                      PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1356         if (pmd) {
1357                 pgtable = get_pmd_huge_pte(mm);
1358                 pmd_populate(mm, &_pmd, pgtable);
1359
1360                 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1361                      i++, haddr += PAGE_SIZE) {
1362                         pte_t *pte, entry;
1363                         BUG_ON(PageCompound(page+i));
1364                         entry = mk_pte(page + i, vma->vm_page_prot);
1365                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1366                         if (!pmd_write(*pmd))
1367                                 entry = pte_wrprotect(entry);
1368                         else
1369                                 BUG_ON(page_mapcount(page) != 1);
1370                         if (!pmd_young(*pmd))
1371                                 entry = pte_mkold(entry);
1372                         pte = pte_offset_map(&_pmd, haddr);
1373                         BUG_ON(!pte_none(*pte));
1374                         set_pte_at(mm, haddr, pte, entry);
1375                         pte_unmap(pte);
1376                 }
1377
1378                 smp_wmb(); /* make pte visible before pmd */
1379                 /*
1380                  * Up to this point the pmd is present and huge and
1381                  * userland has the whole access to the hugepage
1382                  * during the split (which happens in place). If we
1383                  * overwrite the pmd with the not-huge version
1384                  * pointing to the pte here (which of course we could
1385                  * if all CPUs were bug free), userland could trigger
1386                  * a small page size TLB miss on the small sized TLB
1387                  * while the hugepage TLB entry is still established
1388                  * in the huge TLB. Some CPU doesn't like that. See
1389                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1390                  * Erratum 383 on page 93. Intel should be safe but is
1391                  * also warns that it's only safe if the permission
1392                  * and cache attributes of the two entries loaded in
1393                  * the two TLB is identical (which should be the case
1394                  * here). But it is generally safer to never allow
1395                  * small and huge TLB entries for the same virtual
1396                  * address to be loaded simultaneously. So instead of
1397                  * doing "pmd_populate(); flush_tlb_range();" we first
1398                  * mark the current pmd notpresent (atomically because
1399                  * here the pmd_trans_huge and pmd_trans_splitting
1400                  * must remain set at all times on the pmd until the
1401                  * split is complete for this pmd), then we flush the
1402                  * SMP TLB and finally we write the non-huge version
1403                  * of the pmd entry with pmd_populate.
1404                  */
1405                 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1406                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1407                 pmd_populate(mm, pmd, pgtable);
1408                 ret = 1;
1409         }
1410         spin_unlock(&mm->page_table_lock);
1411
1412         return ret;
1413 }
1414
1415 /* must be called with anon_vma->root->mutex hold */
1416 static void __split_huge_page(struct page *page,
1417                               struct anon_vma *anon_vma)
1418 {
1419         int mapcount, mapcount2;
1420         struct anon_vma_chain *avc;
1421
1422         BUG_ON(!PageHead(page));
1423         BUG_ON(PageTail(page));
1424
1425         mapcount = 0;
1426         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1427                 struct vm_area_struct *vma = avc->vma;
1428                 unsigned long addr = vma_address(page, vma);
1429                 BUG_ON(is_vma_temporary_stack(vma));
1430                 if (addr == -EFAULT)
1431                         continue;
1432                 mapcount += __split_huge_page_splitting(page, vma, addr);
1433         }
1434         /*
1435          * It is critical that new vmas are added to the tail of the
1436          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1437          * and establishes a child pmd before
1438          * __split_huge_page_splitting() freezes the parent pmd (so if
1439          * we fail to prevent copy_huge_pmd() from running until the
1440          * whole __split_huge_page() is complete), we will still see
1441          * the newly established pmd of the child later during the
1442          * walk, to be able to set it as pmd_trans_splitting too.
1443          */
1444         if (mapcount != page_mapcount(page))
1445                 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1446                        mapcount, page_mapcount(page));
1447         BUG_ON(mapcount != page_mapcount(page));
1448
1449         __split_huge_page_refcount(page);
1450
1451         mapcount2 = 0;
1452         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1453                 struct vm_area_struct *vma = avc->vma;
1454                 unsigned long addr = vma_address(page, vma);
1455                 BUG_ON(is_vma_temporary_stack(vma));
1456                 if (addr == -EFAULT)
1457                         continue;
1458                 mapcount2 += __split_huge_page_map(page, vma, addr);
1459         }
1460         if (mapcount != mapcount2)
1461                 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1462                        mapcount, mapcount2, page_mapcount(page));
1463         BUG_ON(mapcount != mapcount2);
1464 }
1465
1466 int split_huge_page(struct page *page)
1467 {
1468         struct anon_vma *anon_vma;
1469         int ret = 1;
1470
1471         BUG_ON(!PageAnon(page));
1472         anon_vma = page_lock_anon_vma(page);
1473         if (!anon_vma)
1474                 goto out;
1475         ret = 0;
1476         if (!PageCompound(page))
1477                 goto out_unlock;
1478
1479         BUG_ON(!PageSwapBacked(page));
1480         __split_huge_page(page, anon_vma);
1481         count_vm_event(THP_SPLIT);
1482
1483         BUG_ON(PageCompound(page));
1484 out_unlock:
1485         page_unlock_anon_vma(anon_vma);
1486 out:
1487         return ret;
1488 }
1489
1490 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1491                    VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1492
1493 int hugepage_madvise(struct vm_area_struct *vma,
1494                      unsigned long *vm_flags, int advice)
1495 {
1496         switch (advice) {
1497         case MADV_HUGEPAGE:
1498                 /*
1499                  * Be somewhat over-protective like KSM for now!
1500                  */
1501                 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1502                         return -EINVAL;
1503                 *vm_flags &= ~VM_NOHUGEPAGE;
1504                 *vm_flags |= VM_HUGEPAGE;
1505                 /*
1506                  * If the vma become good for khugepaged to scan,
1507                  * register it here without waiting a page fault that
1508                  * may not happen any time soon.
1509                  */
1510                 if (unlikely(khugepaged_enter_vma_merge(vma)))
1511                         return -ENOMEM;
1512                 break;
1513         case MADV_NOHUGEPAGE:
1514                 /*
1515                  * Be somewhat over-protective like KSM for now!
1516                  */
1517                 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1518                         return -EINVAL;
1519                 *vm_flags &= ~VM_HUGEPAGE;
1520                 *vm_flags |= VM_NOHUGEPAGE;
1521                 /*
1522                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1523                  * this vma even if we leave the mm registered in khugepaged if
1524                  * it got registered before VM_NOHUGEPAGE was set.
1525                  */
1526                 break;
1527         }
1528
1529         return 0;
1530 }
1531
1532 static int __init khugepaged_slab_init(void)
1533 {
1534         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1535                                           sizeof(struct mm_slot),
1536                                           __alignof__(struct mm_slot), 0, NULL);
1537         if (!mm_slot_cache)
1538                 return -ENOMEM;
1539
1540         return 0;
1541 }
1542
1543 static void __init khugepaged_slab_free(void)
1544 {
1545         kmem_cache_destroy(mm_slot_cache);
1546         mm_slot_cache = NULL;
1547 }
1548
1549 static inline struct mm_slot *alloc_mm_slot(void)
1550 {
1551         if (!mm_slot_cache)     /* initialization failed */
1552                 return NULL;
1553         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1554 }
1555
1556 static inline void free_mm_slot(struct mm_slot *mm_slot)
1557 {
1558         kmem_cache_free(mm_slot_cache, mm_slot);
1559 }
1560
1561 static int __init mm_slots_hash_init(void)
1562 {
1563         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1564                                 GFP_KERNEL);
1565         if (!mm_slots_hash)
1566                 return -ENOMEM;
1567         return 0;
1568 }
1569
1570 #if 0
1571 static void __init mm_slots_hash_free(void)
1572 {
1573         kfree(mm_slots_hash);
1574         mm_slots_hash = NULL;
1575 }
1576 #endif
1577
1578 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1579 {
1580         struct mm_slot *mm_slot;
1581         struct hlist_head *bucket;
1582         struct hlist_node *node;
1583
1584         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1585                                 % MM_SLOTS_HASH_HEADS];
1586         hlist_for_each_entry(mm_slot, node, bucket, hash) {
1587                 if (mm == mm_slot->mm)
1588                         return mm_slot;
1589         }
1590         return NULL;
1591 }
1592
1593 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1594                                     struct mm_slot *mm_slot)
1595 {
1596         struct hlist_head *bucket;
1597
1598         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1599                                 % MM_SLOTS_HASH_HEADS];
1600         mm_slot->mm = mm;
1601         hlist_add_head(&mm_slot->hash, bucket);
1602 }
1603
1604 static inline int khugepaged_test_exit(struct mm_struct *mm)
1605 {
1606         return atomic_read(&mm->mm_users) == 0;
1607 }
1608
1609 int __khugepaged_enter(struct mm_struct *mm)
1610 {
1611         struct mm_slot *mm_slot;
1612         int wakeup;
1613
1614         mm_slot = alloc_mm_slot();
1615         if (!mm_slot)
1616                 return -ENOMEM;
1617
1618         /* __khugepaged_exit() must not run from under us */
1619         VM_BUG_ON(khugepaged_test_exit(mm));
1620         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1621                 free_mm_slot(mm_slot);
1622                 return 0;
1623         }
1624
1625         spin_lock(&khugepaged_mm_lock);
1626         insert_to_mm_slots_hash(mm, mm_slot);
1627         /*
1628          * Insert just behind the scanning cursor, to let the area settle
1629          * down a little.
1630          */
1631         wakeup = list_empty(&khugepaged_scan.mm_head);
1632         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1633         spin_unlock(&khugepaged_mm_lock);
1634
1635         atomic_inc(&mm->mm_count);
1636         if (wakeup)
1637                 wake_up_interruptible(&khugepaged_wait);
1638
1639         return 0;
1640 }
1641
1642 int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1643 {
1644         unsigned long hstart, hend;
1645         if (!vma->anon_vma)
1646                 /*
1647                  * Not yet faulted in so we will register later in the
1648                  * page fault if needed.
1649                  */
1650                 return 0;
1651         if (vma->vm_ops)
1652                 /* khugepaged not yet working on file or special mappings */
1653                 return 0;
1654         /*
1655          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1656          * true too, verify it here.
1657          */
1658         VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1659         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1660         hend = vma->vm_end & HPAGE_PMD_MASK;
1661         if (hstart < hend)
1662                 return khugepaged_enter(vma);
1663         return 0;
1664 }
1665
1666 void __khugepaged_exit(struct mm_struct *mm)
1667 {
1668         struct mm_slot *mm_slot;
1669         int free = 0;
1670
1671         spin_lock(&khugepaged_mm_lock);
1672         mm_slot = get_mm_slot(mm);
1673         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1674                 hlist_del(&mm_slot->hash);
1675                 list_del(&mm_slot->mm_node);
1676                 free = 1;
1677         }
1678         spin_unlock(&khugepaged_mm_lock);
1679
1680         if (free) {
1681                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1682                 free_mm_slot(mm_slot);
1683                 mmdrop(mm);
1684         } else if (mm_slot) {
1685                 /*
1686                  * This is required to serialize against
1687                  * khugepaged_test_exit() (which is guaranteed to run
1688                  * under mmap sem read mode). Stop here (after we
1689                  * return all pagetables will be destroyed) until
1690                  * khugepaged has finished working on the pagetables
1691                  * under the mmap_sem.
1692                  */
1693                 down_write(&mm->mmap_sem);
1694                 up_write(&mm->mmap_sem);
1695         }
1696 }
1697
1698 static void release_pte_page(struct page *page)
1699 {
1700         /* 0 stands for page_is_file_cache(page) == false */
1701         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1702         unlock_page(page);
1703         putback_lru_page(page);
1704 }
1705
1706 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1707 {
1708         while (--_pte >= pte) {
1709                 pte_t pteval = *_pte;
1710                 if (!pte_none(pteval))
1711                         release_pte_page(pte_page(pteval));
1712         }
1713 }
1714
1715 static void release_all_pte_pages(pte_t *pte)
1716 {
1717         release_pte_pages(pte, pte + HPAGE_PMD_NR);
1718 }
1719
1720 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1721                                         unsigned long address,
1722                                         pte_t *pte)
1723 {
1724         struct page *page;
1725         pte_t *_pte;
1726         int referenced = 0, isolated = 0, none = 0;
1727         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1728              _pte++, address += PAGE_SIZE) {
1729                 pte_t pteval = *_pte;
1730                 if (pte_none(pteval)) {
1731                         if (++none <= khugepaged_max_ptes_none)
1732                                 continue;
1733                         else {
1734                                 release_pte_pages(pte, _pte);
1735                                 goto out;
1736                         }
1737                 }
1738                 if (!pte_present(pteval) || !pte_write(pteval)) {
1739                         release_pte_pages(pte, _pte);
1740                         goto out;
1741                 }
1742                 page = vm_normal_page(vma, address, pteval);
1743                 if (unlikely(!page)) {
1744                         release_pte_pages(pte, _pte);
1745                         goto out;
1746                 }
1747                 VM_BUG_ON(PageCompound(page));
1748                 BUG_ON(!PageAnon(page));
1749                 VM_BUG_ON(!PageSwapBacked(page));
1750
1751                 /* cannot use mapcount: can't collapse if there's a gup pin */
1752                 if (page_count(page) != 1) {
1753                         release_pte_pages(pte, _pte);
1754                         goto out;
1755                 }
1756                 /*
1757                  * We can do it before isolate_lru_page because the
1758                  * page can't be freed from under us. NOTE: PG_lock
1759                  * is needed to serialize against split_huge_page
1760                  * when invoked from the VM.
1761                  */
1762                 if (!trylock_page(page)) {
1763                         release_pte_pages(pte, _pte);
1764                         goto out;
1765                 }
1766                 /*
1767                  * Isolate the page to avoid collapsing an hugepage
1768                  * currently in use by the VM.
1769                  */
1770                 if (isolate_lru_page(page)) {
1771                         unlock_page(page);
1772                         release_pte_pages(pte, _pte);
1773                         goto out;
1774                 }
1775                 /* 0 stands for page_is_file_cache(page) == false */
1776                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1777                 VM_BUG_ON(!PageLocked(page));
1778                 VM_BUG_ON(PageLRU(page));
1779
1780                 /* If there is no mapped pte young don't collapse the page */
1781                 if (pte_young(pteval) || PageReferenced(page) ||
1782                     mmu_notifier_test_young(vma->vm_mm, address))
1783                         referenced = 1;
1784         }
1785         if (unlikely(!referenced))
1786                 release_all_pte_pages(pte);
1787         else
1788                 isolated = 1;
1789 out:
1790         return isolated;
1791 }
1792
1793 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1794                                       struct vm_area_struct *vma,
1795                                       unsigned long address,
1796                                       spinlock_t *ptl)
1797 {
1798         pte_t *_pte;
1799         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1800                 pte_t pteval = *_pte;
1801                 struct page *src_page;
1802
1803                 if (pte_none(pteval)) {
1804                         clear_user_highpage(page, address);
1805                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1806                 } else {
1807                         src_page = pte_page(pteval);
1808                         copy_user_highpage(page, src_page, address, vma);
1809                         VM_BUG_ON(page_mapcount(src_page) != 1);
1810                         VM_BUG_ON(page_count(src_page) != 2);
1811                         release_pte_page(src_page);
1812                         /*
1813                          * ptl mostly unnecessary, but preempt has to
1814                          * be disabled to update the per-cpu stats
1815                          * inside page_remove_rmap().
1816                          */
1817                         spin_lock(ptl);
1818                         /*
1819                          * paravirt calls inside pte_clear here are
1820                          * superfluous.
1821                          */
1822                         pte_clear(vma->vm_mm, address, _pte);
1823                         page_remove_rmap(src_page);
1824                         spin_unlock(ptl);
1825                         free_page_and_swap_cache(src_page);
1826                 }
1827
1828                 address += PAGE_SIZE;
1829                 page++;
1830         }
1831 }
1832
1833 static void collapse_huge_page(struct mm_struct *mm,
1834                                unsigned long address,
1835                                struct page **hpage,
1836                                struct vm_area_struct *vma,
1837                                int node)
1838 {
1839         pgd_t *pgd;
1840         pud_t *pud;
1841         pmd_t *pmd, _pmd;
1842         pte_t *pte;
1843         pgtable_t pgtable;
1844         struct page *new_page;
1845         spinlock_t *ptl;
1846         int isolated;
1847         unsigned long hstart, hend;
1848
1849         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1850 #ifndef CONFIG_NUMA
1851         up_read(&mm->mmap_sem);
1852         VM_BUG_ON(!*hpage);
1853         new_page = *hpage;
1854 #else
1855         VM_BUG_ON(*hpage);
1856         /*
1857          * Allocate the page while the vma is still valid and under
1858          * the mmap_sem read mode so there is no memory allocation
1859          * later when we take the mmap_sem in write mode. This is more
1860          * friendly behavior (OTOH it may actually hide bugs) to
1861          * filesystems in userland with daemons allocating memory in
1862          * the userland I/O paths.  Allocating memory with the
1863          * mmap_sem in read mode is good idea also to allow greater
1864          * scalability.
1865          */
1866         new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1867                                       node, __GFP_OTHER_NODE);
1868
1869         /*
1870          * After allocating the hugepage, release the mmap_sem read lock in
1871          * preparation for taking it in write mode.
1872          */
1873         up_read(&mm->mmap_sem);
1874         if (unlikely(!new_page)) {
1875                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1876                 *hpage = ERR_PTR(-ENOMEM);
1877                 return;
1878         }
1879 #endif
1880
1881         count_vm_event(THP_COLLAPSE_ALLOC);
1882         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1883 #ifdef CONFIG_NUMA
1884                 put_page(new_page);
1885 #endif
1886                 return;
1887         }
1888
1889         /*
1890          * Prevent all access to pagetables with the exception of
1891          * gup_fast later hanlded by the ptep_clear_flush and the VM
1892          * handled by the anon_vma lock + PG_lock.
1893          */
1894         down_write(&mm->mmap_sem);
1895         if (unlikely(khugepaged_test_exit(mm)))
1896                 goto out;
1897
1898         vma = find_vma(mm, address);
1899         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1900         hend = vma->vm_end & HPAGE_PMD_MASK;
1901         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1902                 goto out;
1903
1904         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1905             (vma->vm_flags & VM_NOHUGEPAGE))
1906                 goto out;
1907
1908         if (!vma->anon_vma || vma->vm_ops)
1909                 goto out;
1910         if (is_vma_temporary_stack(vma))
1911                 goto out;
1912         /*
1913          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1914          * true too, verify it here.
1915          */
1916         VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1917
1918         pgd = pgd_offset(mm, address);
1919         if (!pgd_present(*pgd))
1920                 goto out;
1921
1922         pud = pud_offset(pgd, address);
1923         if (!pud_present(*pud))
1924                 goto out;
1925
1926         pmd = pmd_offset(pud, address);
1927         /* pmd can't go away or become huge under us */
1928         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1929                 goto out;
1930
1931         anon_vma_lock(vma->anon_vma);
1932
1933         pte = pte_offset_map(pmd, address);
1934         ptl = pte_lockptr(mm, pmd);
1935
1936         spin_lock(&mm->page_table_lock); /* probably unnecessary */
1937         /*
1938          * After this gup_fast can't run anymore. This also removes
1939          * any huge TLB entry from the CPU so we won't allow
1940          * huge and small TLB entries for the same virtual address
1941          * to avoid the risk of CPU bugs in that area.
1942          */
1943         _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1944         spin_unlock(&mm->page_table_lock);
1945
1946         spin_lock(ptl);
1947         isolated = __collapse_huge_page_isolate(vma, address, pte);
1948         spin_unlock(ptl);
1949
1950         if (unlikely(!isolated)) {
1951                 pte_unmap(pte);
1952                 spin_lock(&mm->page_table_lock);
1953                 BUG_ON(!pmd_none(*pmd));
1954                 set_pmd_at(mm, address, pmd, _pmd);
1955                 spin_unlock(&mm->page_table_lock);
1956                 anon_vma_unlock(vma->anon_vma);
1957                 goto out;
1958         }
1959
1960         /*
1961          * All pages are isolated and locked so anon_vma rmap
1962          * can't run anymore.
1963          */
1964         anon_vma_unlock(vma->anon_vma);
1965
1966         __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1967         pte_unmap(pte);
1968         __SetPageUptodate(new_page);
1969         pgtable = pmd_pgtable(_pmd);
1970         VM_BUG_ON(page_count(pgtable) != 1);
1971         VM_BUG_ON(page_mapcount(pgtable) != 0);
1972
1973         _pmd = mk_pmd(new_page, vma->vm_page_prot);
1974         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1975         _pmd = pmd_mkhuge(_pmd);
1976
1977         /*
1978          * spin_lock() below is not the equivalent of smp_wmb(), so
1979          * this is needed to avoid the copy_huge_page writes to become
1980          * visible after the set_pmd_at() write.
1981          */
1982         smp_wmb();
1983
1984         spin_lock(&mm->page_table_lock);
1985         BUG_ON(!pmd_none(*pmd));
1986         page_add_new_anon_rmap(new_page, vma, address);
1987         set_pmd_at(mm, address, pmd, _pmd);
1988         update_mmu_cache(vma, address, _pmd);
1989         prepare_pmd_huge_pte(pgtable, mm);
1990         spin_unlock(&mm->page_table_lock);
1991
1992 #ifndef CONFIG_NUMA
1993         *hpage = NULL;
1994 #endif
1995         khugepaged_pages_collapsed++;
1996 out_up_write:
1997         up_write(&mm->mmap_sem);
1998         return;
1999
2000 out:
2001         mem_cgroup_uncharge_page(new_page);
2002 #ifdef CONFIG_NUMA
2003         put_page(new_page);
2004 #endif
2005         goto out_up_write;
2006 }
2007
2008 static int khugepaged_scan_pmd(struct mm_struct *mm,
2009                                struct vm_area_struct *vma,
2010                                unsigned long address,
2011                                struct page **hpage)
2012 {
2013         pgd_t *pgd;
2014         pud_t *pud;
2015         pmd_t *pmd;
2016         pte_t *pte, *_pte;
2017         int ret = 0, referenced = 0, none = 0;
2018         struct page *page;
2019         unsigned long _address;
2020         spinlock_t *ptl;
2021         int node = -1;
2022
2023         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2024
2025         pgd = pgd_offset(mm, address);
2026         if (!pgd_present(*pgd))
2027                 goto out;
2028
2029         pud = pud_offset(pgd, address);
2030         if (!pud_present(*pud))
2031                 goto out;
2032
2033         pmd = pmd_offset(pud, address);
2034         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2035                 goto out;
2036
2037         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2038         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2039              _pte++, _address += PAGE_SIZE) {
2040                 pte_t pteval = *_pte;
2041                 if (pte_none(pteval)) {
2042                         if (++none <= khugepaged_max_ptes_none)
2043                                 continue;
2044                         else
2045                                 goto out_unmap;
2046                 }
2047                 if (!pte_present(pteval) || !pte_write(pteval))
2048                         goto out_unmap;
2049                 page = vm_normal_page(vma, _address, pteval);
2050                 if (unlikely(!page))
2051                         goto out_unmap;
2052                 /*
2053                  * Chose the node of the first page. This could
2054                  * be more sophisticated and look at more pages,
2055                  * but isn't for now.
2056                  */
2057                 if (node == -1)
2058                         node = page_to_nid(page);
2059                 VM_BUG_ON(PageCompound(page));
2060                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2061                         goto out_unmap;
2062                 /* cannot use mapcount: can't collapse if there's a gup pin */
2063                 if (page_count(page) != 1)
2064                         goto out_unmap;
2065                 if (pte_young(pteval) || PageReferenced(page) ||
2066                     mmu_notifier_test_young(vma->vm_mm, address))
2067                         referenced = 1;
2068         }
2069         if (referenced)
2070                 ret = 1;
2071 out_unmap:
2072         pte_unmap_unlock(pte, ptl);
2073         if (ret)
2074                 /* collapse_huge_page will return with the mmap_sem released */
2075                 collapse_huge_page(mm, address, hpage, vma, node);
2076 out:
2077         return ret;
2078 }
2079
2080 static void collect_mm_slot(struct mm_slot *mm_slot)
2081 {
2082         struct mm_struct *mm = mm_slot->mm;
2083
2084         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2085
2086         if (khugepaged_test_exit(mm)) {
2087                 /* free mm_slot */
2088                 hlist_del(&mm_slot->hash);
2089                 list_del(&mm_slot->mm_node);
2090
2091                 /*
2092                  * Not strictly needed because the mm exited already.
2093                  *
2094                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2095                  */
2096
2097                 /* khugepaged_mm_lock actually not necessary for the below */
2098                 free_mm_slot(mm_slot);
2099                 mmdrop(mm);
2100         }
2101 }
2102
2103 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2104                                             struct page **hpage)
2105         __releases(&khugepaged_mm_lock)
2106         __acquires(&khugepaged_mm_lock)
2107 {
2108         struct mm_slot *mm_slot;
2109         struct mm_struct *mm;
2110         struct vm_area_struct *vma;
2111         int progress = 0;
2112
2113         VM_BUG_ON(!pages);
2114         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2115
2116         if (khugepaged_scan.mm_slot)
2117                 mm_slot = khugepaged_scan.mm_slot;
2118         else {
2119                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2120                                      struct mm_slot, mm_node);
2121                 khugepaged_scan.address = 0;
2122                 khugepaged_scan.mm_slot = mm_slot;
2123         }
2124         spin_unlock(&khugepaged_mm_lock);
2125
2126         mm = mm_slot->mm;
2127         down_read(&mm->mmap_sem);
2128         if (unlikely(khugepaged_test_exit(mm)))
2129                 vma = NULL;
2130         else
2131                 vma = find_vma(mm, khugepaged_scan.address);
2132
2133         progress++;
2134         for (; vma; vma = vma->vm_next) {
2135                 unsigned long hstart, hend;
2136
2137                 cond_resched();
2138                 if (unlikely(khugepaged_test_exit(mm))) {
2139                         progress++;
2140                         break;
2141                 }
2142
2143                 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2144                      !khugepaged_always()) ||
2145                     (vma->vm_flags & VM_NOHUGEPAGE)) {
2146                 skip:
2147                         progress++;
2148                         continue;
2149                 }
2150                 if (!vma->anon_vma || vma->vm_ops)
2151                         goto skip;
2152                 if (is_vma_temporary_stack(vma))
2153                         goto skip;
2154                 /*
2155                  * If is_pfn_mapping() is true is_learn_pfn_mapping()
2156                  * must be true too, verify it here.
2157                  */
2158                 VM_BUG_ON(is_linear_pfn_mapping(vma) ||
2159                           vma->vm_flags & VM_NO_THP);
2160
2161                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2162                 hend = vma->vm_end & HPAGE_PMD_MASK;
2163                 if (hstart >= hend)
2164                         goto skip;
2165                 if (khugepaged_scan.address > hend)
2166                         goto skip;
2167                 if (khugepaged_scan.address < hstart)
2168                         khugepaged_scan.address = hstart;
2169                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2170
2171                 while (khugepaged_scan.address < hend) {
2172                         int ret;
2173                         cond_resched();
2174                         if (unlikely(khugepaged_test_exit(mm)))
2175                                 goto breakouterloop;
2176
2177                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2178                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2179                                   hend);
2180                         ret = khugepaged_scan_pmd(mm, vma,
2181                                                   khugepaged_scan.address,
2182                                                   hpage);
2183                         /* move to next address */
2184                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2185                         progress += HPAGE_PMD_NR;
2186                         if (ret)
2187                                 /* we released mmap_sem so break loop */
2188                                 goto breakouterloop_mmap_sem;
2189                         if (progress >= pages)
2190                                 goto breakouterloop;
2191                 }
2192         }
2193 breakouterloop:
2194         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2195 breakouterloop_mmap_sem:
2196
2197         spin_lock(&khugepaged_mm_lock);
2198         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2199         /*
2200          * Release the current mm_slot if this mm is about to die, or
2201          * if we scanned all vmas of this mm.
2202          */
2203         if (khugepaged_test_exit(mm) || !vma) {
2204                 /*
2205                  * Make sure that if mm_users is reaching zero while
2206                  * khugepaged runs here, khugepaged_exit will find
2207                  * mm_slot not pointing to the exiting mm.
2208                  */
2209                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2210                         khugepaged_scan.mm_slot = list_entry(
2211                                 mm_slot->mm_node.next,
2212                                 struct mm_slot, mm_node);
2213                         khugepaged_scan.address = 0;
2214                 } else {
2215                         khugepaged_scan.mm_slot = NULL;
2216                         khugepaged_full_scans++;
2217                 }
2218
2219                 collect_mm_slot(mm_slot);
2220         }
2221
2222         return progress;
2223 }
2224
2225 static int khugepaged_has_work(void)
2226 {
2227         return !list_empty(&khugepaged_scan.mm_head) &&
2228                 khugepaged_enabled();
2229 }
2230
2231 static int khugepaged_wait_event(void)
2232 {
2233         return !list_empty(&khugepaged_scan.mm_head) ||
2234                 !khugepaged_enabled();
2235 }
2236
2237 static void khugepaged_do_scan(struct page **hpage)
2238 {
2239         unsigned int progress = 0, pass_through_head = 0;
2240         unsigned int pages = khugepaged_pages_to_scan;
2241
2242         barrier(); /* write khugepaged_pages_to_scan to local stack */
2243
2244         while (progress < pages) {
2245                 cond_resched();
2246
2247 #ifndef CONFIG_NUMA
2248                 if (!*hpage) {
2249                         *hpage = alloc_hugepage(khugepaged_defrag());
2250                         if (unlikely(!*hpage)) {
2251                                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2252                                 break;
2253                         }
2254                         count_vm_event(THP_COLLAPSE_ALLOC);
2255                 }
2256 #else
2257                 if (IS_ERR(*hpage))
2258                         break;
2259 #endif
2260
2261                 if (unlikely(kthread_should_stop() || freezing(current)))
2262                         break;
2263
2264                 spin_lock(&khugepaged_mm_lock);
2265                 if (!khugepaged_scan.mm_slot)
2266                         pass_through_head++;
2267                 if (khugepaged_has_work() &&
2268                     pass_through_head < 2)
2269                         progress += khugepaged_scan_mm_slot(pages - progress,
2270                                                             hpage);
2271                 else
2272                         progress = pages;
2273                 spin_unlock(&khugepaged_mm_lock);
2274         }
2275 }
2276
2277 static void khugepaged_alloc_sleep(void)
2278 {
2279         wait_event_freezable_timeout(khugepaged_wait, false,
2280                         msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2281 }
2282
2283 #ifndef CONFIG_NUMA
2284 static struct page *khugepaged_alloc_hugepage(void)
2285 {
2286         struct page *hpage;
2287
2288         do {
2289                 hpage = alloc_hugepage(khugepaged_defrag());
2290                 if (!hpage) {
2291                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2292                         khugepaged_alloc_sleep();
2293                 } else
2294                         count_vm_event(THP_COLLAPSE_ALLOC);
2295         } while (unlikely(!hpage) &&
2296                  likely(khugepaged_enabled()));
2297         return hpage;
2298 }
2299 #endif
2300
2301 static void khugepaged_loop(void)
2302 {
2303         struct page *hpage;
2304
2305 #ifdef CONFIG_NUMA
2306         hpage = NULL;
2307 #endif
2308         while (likely(khugepaged_enabled())) {
2309 #ifndef CONFIG_NUMA
2310                 hpage = khugepaged_alloc_hugepage();
2311                 if (unlikely(!hpage))
2312                         break;
2313 #else
2314                 if (IS_ERR(hpage)) {
2315                         khugepaged_alloc_sleep();
2316                         hpage = NULL;
2317                 }
2318 #endif
2319
2320                 khugepaged_do_scan(&hpage);
2321 #ifndef CONFIG_NUMA
2322                 if (hpage)
2323                         put_page(hpage);
2324 #endif
2325                 try_to_freeze();
2326                 if (unlikely(kthread_should_stop()))
2327                         break;
2328                 if (khugepaged_has_work()) {
2329                         if (!khugepaged_scan_sleep_millisecs)
2330                                 continue;
2331                         wait_event_freezable_timeout(khugepaged_wait, false,
2332                             msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2333                 } else if (khugepaged_enabled())
2334                         wait_event_freezable(khugepaged_wait,
2335                                              khugepaged_wait_event());
2336         }
2337 }
2338
2339 static int khugepaged(void *none)
2340 {
2341         struct mm_slot *mm_slot;
2342
2343         set_freezable();
2344         set_user_nice(current, 19);
2345
2346         /* serialize with start_khugepaged() */
2347         mutex_lock(&khugepaged_mutex);
2348
2349         for (;;) {
2350                 mutex_unlock(&khugepaged_mutex);
2351                 VM_BUG_ON(khugepaged_thread != current);
2352                 khugepaged_loop();
2353                 VM_BUG_ON(khugepaged_thread != current);
2354
2355                 mutex_lock(&khugepaged_mutex);
2356                 if (!khugepaged_enabled())
2357                         break;
2358                 if (unlikely(kthread_should_stop()))
2359                         break;
2360         }
2361
2362         spin_lock(&khugepaged_mm_lock);
2363         mm_slot = khugepaged_scan.mm_slot;
2364         khugepaged_scan.mm_slot = NULL;
2365         if (mm_slot)
2366                 collect_mm_slot(mm_slot);
2367         spin_unlock(&khugepaged_mm_lock);
2368
2369         khugepaged_thread = NULL;
2370         mutex_unlock(&khugepaged_mutex);
2371
2372         return 0;
2373 }
2374
2375 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2376 {
2377         struct page *page;
2378
2379         spin_lock(&mm->page_table_lock);
2380         if (unlikely(!pmd_trans_huge(*pmd))) {
2381                 spin_unlock(&mm->page_table_lock);
2382                 return;
2383         }
2384         page = pmd_page(*pmd);
2385         VM_BUG_ON(!page_count(page));
2386         get_page(page);
2387         spin_unlock(&mm->page_table_lock);
2388
2389         split_huge_page(page);
2390
2391         put_page(page);
2392         BUG_ON(pmd_trans_huge(*pmd));
2393 }
2394
2395 static void split_huge_page_address(struct mm_struct *mm,
2396                                     unsigned long address)
2397 {
2398         pgd_t *pgd;
2399         pud_t *pud;
2400         pmd_t *pmd;
2401
2402         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2403
2404         pgd = pgd_offset(mm, address);
2405         if (!pgd_present(*pgd))
2406                 return;
2407
2408         pud = pud_offset(pgd, address);
2409         if (!pud_present(*pud))
2410                 return;
2411
2412         pmd = pmd_offset(pud, address);
2413         if (!pmd_present(*pmd))
2414                 return;
2415         /*
2416          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2417          * materialize from under us.
2418          */
2419         split_huge_page_pmd(mm, pmd);
2420 }
2421
2422 void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2423                              unsigned long start,
2424                              unsigned long end,
2425                              long adjust_next)
2426 {
2427         /*
2428          * If the new start address isn't hpage aligned and it could
2429          * previously contain an hugepage: check if we need to split
2430          * an huge pmd.
2431          */
2432         if (start & ~HPAGE_PMD_MASK &&
2433             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2434             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2435                 split_huge_page_address(vma->vm_mm, start);
2436
2437         /*
2438          * If the new end address isn't hpage aligned and it could
2439          * previously contain an hugepage: check if we need to split
2440          * an huge pmd.
2441          */
2442         if (end & ~HPAGE_PMD_MASK &&
2443             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2444             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2445                 split_huge_page_address(vma->vm_mm, end);
2446
2447         /*
2448          * If we're also updating the vma->vm_next->vm_start, if the new
2449          * vm_next->vm_start isn't page aligned and it could previously
2450          * contain an hugepage: check if we need to split an huge pmd.
2451          */
2452         if (adjust_next > 0) {
2453                 struct vm_area_struct *next = vma->vm_next;
2454                 unsigned long nstart = next->vm_start;
2455                 nstart += adjust_next << PAGE_SHIFT;
2456                 if (nstart & ~HPAGE_PMD_MASK &&
2457                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2458                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2459                         split_huge_page_address(next->vm_mm, nstart);
2460         }
2461 }