]> Pileus Git - ~andy/linux/blobdiff - mm/huge_memory.c
hugetlb: do not allow pagesize >= MAX_ORDER pool adjustment
[~andy/linux] / mm / huge_memory.c
index b6facc35e8932d8b612e0d78ac48056f4c2aa815..004c9c2aac788ebca9c7de0edb95244396b880f1 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/mm_inline.h>
 #include <linux/kthread.h>
 #include <linux/khugepaged.h>
+#include <linux/freezer.h>
+#include <linux/mman.h>
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
 #include "internal.h"
@@ -487,7 +489,15 @@ static int __init hugepage_init(void)
        int err;
 #ifdef CONFIG_SYSFS
        static struct kobject *hugepage_kobj;
+#endif
+
+       err = -EINVAL;
+       if (!has_transparent_hugepage()) {
+               transparent_hugepage_flags = 0;
+               goto out;
+       }
 
+#ifdef CONFIG_SYSFS
        err = -ENOMEM;
        hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
        if (unlikely(!hugepage_kobj)) {
@@ -518,6 +528,14 @@ static int __init hugepage_init(void)
                goto out;
        }
 
+       /*
+        * By default disable transparent hugepages on smaller systems,
+        * where the extra memory used could hurt more than TLB overhead
+        * is likely to save.  The admin can still enable it through /sys.
+        */
+       if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+               transparent_hugepage_flags = 0;
+
        start_khugepaged();
 
        set_recommended_min_free_kbytes();
@@ -1126,6 +1144,7 @@ static void __split_huge_page_refcount(struct page *page)
        int i;
        unsigned long head_index = page->index;
        struct zone *zone = page_zone(page);
+       int zonestat;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
@@ -1190,6 +1209,15 @@ static void __split_huge_page_refcount(struct page *page)
        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
 
+       /*
+        * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
+        * so adjust those appropriately if this page is on the LRU.
+        */
+       if (PageLRU(page)) {
+               zonestat = NR_LRU_BASE + page_lru(page);
+               __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
+       }
+
        ClearPageCompound(page);
        compound_unlock(page);
        spin_unlock_irq(&zone->lru_lock);
@@ -1361,18 +1389,49 @@ out:
        return ret;
 }
 
-int hugepage_madvise(unsigned long *vm_flags)
+int hugepage_madvise(struct vm_area_struct *vma,
+                    unsigned long *vm_flags, int advice)
 {
-       /*
-        * Be somewhat over-protective like KSM for now!
-        */
-       if (*vm_flags & (VM_HUGEPAGE | VM_SHARED  | VM_MAYSHARE   |
-                        VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
-                        VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
-                        VM_MIXEDMAP | VM_SAO))
-               return -EINVAL;
-
-       *vm_flags |= VM_HUGEPAGE;
+       switch (advice) {
+       case MADV_HUGEPAGE:
+               /*
+                * Be somewhat over-protective like KSM for now!
+                */
+               if (*vm_flags & (VM_HUGEPAGE |
+                                VM_SHARED   | VM_MAYSHARE   |
+                                VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+                                VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+                                VM_MIXEDMAP | VM_SAO))
+                       return -EINVAL;
+               *vm_flags &= ~VM_NOHUGEPAGE;
+               *vm_flags |= VM_HUGEPAGE;
+               /*
+                * If the vma become good for khugepaged to scan,
+                * register it here without waiting a page fault that
+                * may not happen any time soon.
+                */
+               if (unlikely(khugepaged_enter_vma_merge(vma)))
+                       return -ENOMEM;
+               break;
+       case MADV_NOHUGEPAGE:
+               /*
+                * Be somewhat over-protective like KSM for now!
+                */
+               if (*vm_flags & (VM_NOHUGEPAGE |
+                                VM_SHARED   | VM_MAYSHARE   |
+                                VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+                                VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+                                VM_MIXEDMAP | VM_SAO))
+                       return -EINVAL;
+               *vm_flags &= ~VM_HUGEPAGE;
+               *vm_flags |= VM_NOHUGEPAGE;
+               /*
+                * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
+                * this vma even if we leave the mm registered in khugepaged if
+                * it got registered before VM_NOHUGEPAGE was set.
+                */
+               break;
+       }
 
        return 0;
 }
@@ -1624,7 +1683,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                VM_BUG_ON(PageLRU(page));
 
                /* If there is no mapped pte young don't collapse the page */
-               if (pte_young(pteval))
+               if (pte_young(pteval) || PageReferenced(page) ||
+                   mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = 1;
        }
        if (unlikely(!referenced))
@@ -1737,7 +1797,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
                goto out;
 
-       if (!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always())
+       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+           (vma->vm_flags & VM_NOHUGEPAGE))
                goto out;
 
        /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
@@ -1884,7 +1945,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                /* cannot use mapcount: can't collapse if there's a gup pin */
                if (page_count(page) != 1)
                        goto out_unmap;
-               if (pte_young(pteval))
+               if (pte_young(pteval) || PageReferenced(page) ||
+                   mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = 1;
        }
        if (referenced)
@@ -1959,8 +2021,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        break;
                }
 
-               if (!(vma->vm_flags & VM_HUGEPAGE) &&
-                   !khugepaged_always()) {
+               if ((!(vma->vm_flags & VM_HUGEPAGE) &&
+                    !khugepaged_always()) ||
+                   (vma->vm_flags & VM_NOHUGEPAGE)) {
                        progress++;
                        continue;
                }
@@ -2075,6 +2138,9 @@ static void khugepaged_do_scan(struct page **hpage)
                        break;
 #endif
 
+               if (unlikely(kthread_should_stop() || freezing(current)))
+                       break;
+
                spin_lock(&khugepaged_mm_lock);
                if (!khugepaged_scan.mm_slot)
                        pass_through_head++;
@@ -2137,6 +2203,9 @@ static void khugepaged_loop(void)
                if (hpage)
                        put_page(hpage);
 #endif
+               try_to_freeze();
+               if (unlikely(kthread_should_stop()))
+                       break;
                if (khugepaged_has_work()) {
                        DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
@@ -2147,8 +2216,8 @@ static void khugepaged_loop(void)
                                        khugepaged_scan_sleep_millisecs));
                        remove_wait_queue(&khugepaged_wait, &wait);
                } else if (khugepaged_enabled())
-                       wait_event_interruptible(khugepaged_wait,
-                                                khugepaged_wait_event());
+                       wait_event_freezable(khugepaged_wait,
+                                            khugepaged_wait_event());
        }
 }
 
@@ -2156,6 +2225,7 @@ static int khugepaged(void *none)
 {
        struct mm_slot *mm_slot;
 
+       set_freezable();
        set_user_nice(current, 19);
 
        /* serialize with start_khugepaged() */
@@ -2170,6 +2240,8 @@ static int khugepaged(void *none)
                mutex_lock(&khugepaged_mutex);
                if (!khugepaged_enabled())
                        break;
+               if (unlikely(kthread_should_stop()))
+                       break;
        }
 
        spin_lock(&khugepaged_mm_lock);