]> Pileus Git - ~andy/linux/blobdiff - mm/memory.c
thp: make MADV_HUGEPAGE check for mm->def_flags
[~andy/linux] / mm / memory.c
index 57361708d1a57d7bc11c8f34d269a35c50317dbb..e09c048131869ac3c956aa8d6cfbec9677cdc642 100644 (file)
@@ -1047,7 +1047,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
-       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
+       if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
+                              VM_PFNMAP | VM_MIXEDMAP))) {
                if (!vma->anon_vma)
                        return 0;
        }
@@ -1055,12 +1056,12 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
-       if (unlikely(is_pfn_mapping(vma))) {
+       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
                /*
                 * We do not free on error cases below as remove_vma
                 * gets called on error from higher level routine
                 */
-               ret = track_pfn_vma_copy(vma);
+               ret = track_pfn_copy(vma);
                if (ret)
                        return ret;
        }
@@ -1327,8 +1328,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
        if (vma->vm_file)
                uprobe_munmap(vma, start, end);
 
-       if (unlikely(is_pfn_mapping(vma)))
-               untrack_pfn_vma(vma, 0, 0);
+       if (unlikely(vma->vm_flags & VM_PFNMAP))
+               untrack_pfn(vma, 0, 0);
 
        if (start != end) {
                if (unlikely(is_vm_hugetlb_page(vma))) {
@@ -2085,6 +2086,11 @@ out:
  * ask for a shared writable mapping!
  *
  * The page does not need to be reserved.
+ *
+ * Usually this function is called from f_op->mmap() handler
+ * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
+ * Caller must set VM_MIXEDMAP on vma if it wants to call this
+ * function from other places, for example from page-fault handler.
  */
 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                        struct page *page)
@@ -2093,7 +2099,11 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
-       vma->vm_flags |= VM_INSERTPAGE;
+       if (!(vma->vm_flags & VM_MIXEDMAP)) {
+               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(vma->vm_flags & VM_PFNMAP);
+               vma->vm_flags |= VM_MIXEDMAP;
+       }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
@@ -2162,14 +2172,11 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
+       if (track_pfn_insert(vma, &pgprot, pfn))
                return -EINVAL;
 
        ret = insert_pfn(vma, addr, pfn, pgprot);
 
-       if (ret)
-               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
-
        return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
@@ -2290,37 +2297,30 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * rest of the world about it:
         *   VM_IO tells people not to look at these pages
         *      (accesses can have side effects).
-        *   VM_RESERVED is specified all over the place, because
-        *      in 2.4 it kept swapout's vma scan off this vma; but
-        *      in 2.6 the LRU scan won't even find its pages, so this
-        *      flag means no more than count its pages in reserved_vm,
-        *      and omit it from core dump, even when VM_IO turned off.
         *   VM_PFNMAP tells the core MM that the base pages are just
         *      raw PFN mappings, and do not have a "struct page" associated
         *      with them.
+        *   VM_DONTEXPAND
+        *      Disable vma merging and expanding with mremap().
+        *   VM_DONTDUMP
+        *      Omit vma from core dump, even when VM_IO turned off.
         *
         * There's a horrible special case to handle copy-on-write
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
+        * See vm_normal_page() for details.
         */
-       if (addr == vma->vm_start && end == vma->vm_end) {
+       if (is_cow_mapping(vma->vm_flags)) {
+               if (addr != vma->vm_start || end != vma->vm_end)
+                       return -EINVAL;
                vma->vm_pgoff = pfn;
-               vma->vm_flags |= VM_PFN_AT_MMAP;
-       } else if (is_cow_mapping(vma->vm_flags))
-               return -EINVAL;
-
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+       }
 
-       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
-       if (err) {
-               /*
-                * To indicate that track_pfn related cleanup is not
-                * needed from higher level routine calling unmap_vmas
-                */
-               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
-               vma->vm_flags &= ~VM_PFN_AT_MMAP;
+       err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
+       if (err)
                return -EINVAL;
-       }
+
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -2335,7 +2335,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
        } while (pgd++, addr = next, addr != end);
 
        if (err)
-               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+               untrack_pfn(vma, pfn, PAGE_ALIGN(size));
 
        return err;
 }