]> Pileus Git - ~andy/linux/blobdiff - mm/huge_memory.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
[~andy/linux] / mm / huge_memory.c
index 010d32944d14d64f7b6174bc1bed9a38c159dc9d..a863af26c79c0190f378c36ac4e680add06f700a 100644 (file)
@@ -787,6 +787,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        pmd_t _pmd;
        int ret = 0, i;
        struct page **pages;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
                        GFP_KERNEL);
@@ -823,12 +825,16 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                cond_resched();
        }
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_free_pages;
        VM_BUG_ON(!PageHead(page));
 
-       pmdp_clear_flush_notify(vma, haddr, pmd);
+       pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
        pgtable = pgtable_trans_huge_withdraw(mm);
@@ -851,6 +857,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        page_remove_rmap(page);
        spin_unlock(&mm->page_table_lock);
 
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
        ret |= VM_FAULT_WRITE;
        put_page(page);
 
@@ -859,6 +867,7 @@ out:
 
 out_free_pages:
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        mem_cgroup_uncharge_start();
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                mem_cgroup_uncharge_page(pages[i]);
@@ -875,6 +884,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int ret = 0;
        struct page *page, *new_page;
        unsigned long haddr;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        VM_BUG_ON(!vma->anon_vma);
        spin_lock(&mm->page_table_lock);
@@ -889,7 +900,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache_pmd(vma, address, pmd);
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
@@ -925,38 +936,47 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
+       mmun_start = haddr;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        spin_lock(&mm->page_table_lock);
        put_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(&mm->page_table_lock);
                mem_cgroup_uncharge_page(new_page);
                put_page(new_page);
-               goto out;
+               goto out_mn;
        } else {
                pmd_t entry;
                VM_BUG_ON(!PageHead(page));
                entry = mk_pmd(new_page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                entry = pmd_mkhuge(entry);
-               pmdp_clear_flush_notify(vma, haddr, pmd);
+               pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache_pmd(vma, address, pmd);
                page_remove_rmap(page);
                put_page(page);
                ret |= VM_FAULT_WRITE;
        }
-out_unlock:
        spin_unlock(&mm->page_table_lock);
+out_mn:
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out:
        return ret;
+out_unlock:
+       spin_unlock(&mm->page_table_lock);
+       return ret;
 }
 
-struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
                                   unsigned int flags)
 {
+       struct mm_struct *mm = vma->vm_mm;
        struct page *page = NULL;
 
        assert_spin_locked(&mm->page_table_lock);
@@ -979,6 +999,14 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
                set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
        }
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+               if (page->mapping && trylock_page(page)) {
+                       lru_add_drain();
+                       if (page->mapping)
+                               mlock_vma_page(page);
+                       unlock_page(page);
+               }
+       }
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
@@ -996,9 +1024,10 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                struct page *page;
                pgtable_t pgtable;
+               pmd_t orig_pmd;
                pgtable = pgtable_trans_huge_withdraw(tlb->mm);
-               page = pmd_page(*pmd);
-               pmd_clear(pmd);
+               orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
+               page = pmd_page(orig_pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
                page_remove_rmap(page);
                VM_BUG_ON(page_mapcount(page) < 0);
@@ -1162,7 +1191,11 @@ static int __split_huge_page_splitting(struct page *page,
        struct mm_struct *mm = vma->vm_mm;
        pmd_t *pmd;
        int ret = 0;
+       /* For mmu_notifiers */
+       const unsigned long mmun_start = address;
+       const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
 
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        pmd = page_check_address_pmd(page, mm, address,
                                     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
@@ -1174,10 +1207,11 @@ static int __split_huge_page_splitting(struct page *page,
                 * and it won't wait on the anon_vma->root->mutex to
                 * serialize against split_huge_page*.
                 */
-               pmdp_splitting_flush_notify(vma, address, pmd);
+               pmdp_splitting_flush(vma, address, pmd);
                ret = 1;
        }
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        return ret;
 }
@@ -1375,18 +1409,17 @@ static void __split_huge_page(struct page *page,
                              struct anon_vma *anon_vma)
 {
        int mapcount, mapcount2;
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
        struct anon_vma_chain *avc;
 
        BUG_ON(!PageHead(page));
        BUG_ON(PageTail(page));
 
        mapcount = 0;
-       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long addr = vma_address(page, vma);
                BUG_ON(is_vma_temporary_stack(vma));
-               if (addr == -EFAULT)
-                       continue;
                mapcount += __split_huge_page_splitting(page, vma, addr);
        }
        /*
@@ -1407,12 +1440,10 @@ static void __split_huge_page(struct page *page,
        __split_huge_page_refcount(page);
 
        mapcount2 = 0;
-       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long addr = vma_address(page, vma);
                BUG_ON(is_vma_temporary_stack(vma));
-               if (addr == -EFAULT)
-                       continue;
                mapcount2 += __split_huge_page_map(page, vma, addr);
        }
        if (mapcount != mapcount2)
@@ -1800,6 +1831,7 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
                        return false;
 
                *wait = false;
+               *hpage = NULL;
                khugepaged_alloc_sleep();
        } else if (*hpage) {
                put_page(*hpage);
@@ -1900,6 +1932,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        spinlock_t *ptl;
        int isolated;
        unsigned long hstart, hend;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
@@ -1954,6 +1988,9 @@ static void collapse_huge_page(struct mm_struct *mm,
        pte = pte_offset_map(pmd, address);
        ptl = pte_lockptr(mm, pmd);
 
+       mmun_start = address;
+       mmun_end   = address + HPAGE_PMD_SIZE;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock); /* probably unnecessary */
        /*
         * After this gup_fast can't run anymore. This also removes
@@ -1961,8 +1998,9 @@ static void collapse_huge_page(struct mm_struct *mm,
         * huge and small TLB entries for the same virtual address
         * to avoid the risk of CPU bugs in that area.
         */
-       _pmd = pmdp_clear_flush_notify(vma, address, pmd);
+       _pmd = pmdp_clear_flush(vma, address, pmd);
        spin_unlock(&mm->page_table_lock);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
        spin_lock(ptl);
        isolated = __collapse_huge_page_isolate(vma, address, pte);
@@ -2004,7 +2042,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache(vma, address, _pmd);
+       update_mmu_cache_pmd(vma, address, pmd);
        pgtable_trans_huge_deposit(mm, pgtable);
        spin_unlock(&mm->page_table_lock);