]> Pileus Git - ~andy/linux/blobdiff - mm/memory.c
netfilter: ipv4, defrag: switch hook PFs to nfproto
[~andy/linux] / mm / memory.c
index 6105f475fa8633edf5180792b2cf0c5288734f08..1b7dc662bf9f229063cb3e7b97e8e4c22147b92b 100644 (file)
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
 
 static void unmap_single_vma(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr, unsigned long *nr_accounted,
+               unsigned long end_addr,
                struct zap_details *details)
 {
        unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,8 +1307,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
        if (end <= vma->vm_start)
                return;
 
-       if (vma->vm_flags & VM_ACCOUNT)
-               *nr_accounted += (end - start) >> PAGE_SHIFT;
+       if (vma->vm_file)
+               uprobe_munmap(vma, start, end);
 
        if (unlikely(is_pfn_mapping(vma)))
                untrack_pfn_vma(vma, 0, 0);
@@ -1339,8 +1339,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  * @vma: the starting vma
  * @start_addr: virtual address at which to start unmapping
  * @end_addr: virtual address at which to end unmapping
- * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
- * @details: details of nonlinear truncation or shared cache invalidation
  *
  * Unmap all pages in the vma list.
  *
@@ -1355,15 +1353,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  */
 void unmap_vmas(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long start_addr,
-               unsigned long end_addr, unsigned long *nr_accounted,
-               struct zap_details *details)
+               unsigned long end_addr)
 {
        struct mm_struct *mm = vma->vm_mm;
 
        mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
        for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
-               unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
-                                details);
+               unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
        mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
 }
 
@@ -1376,19 +1372,21 @@ void unmap_vmas(struct mmu_gather *tlb,
  *
  * Caller must protect the VMA list
  */
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range(struct vm_area_struct *vma, unsigned long start,
                unsigned long size, struct zap_details *details)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct mmu_gather tlb;
-       unsigned long end = address + size;
-       unsigned long nr_accounted = 0;
+       unsigned long end = start + size;
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
-       unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
-       tlb_finish_mmu(&tlb, address, end);
+       mmu_notifier_invalidate_range_start(mm, start, end);
+       for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
+               unmap_single_vma(&tlb, vma, start, end, details);
+       mmu_notifier_invalidate_range_end(mm, start, end);
+       tlb_finish_mmu(&tlb, start, end);
 }
 
 /**
@@ -1406,13 +1404,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
        struct mm_struct *mm = vma->vm_mm;
        struct mmu_gather tlb;
        unsigned long end = address + size;
-       unsigned long nr_accounted = 0;
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, address, end);
-       unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
+       unmap_single_vma(&tlb, vma, address, end, details);
        mmu_notifier_invalidate_range_end(mm, address, end);
        tlb_finish_mmu(&tlb, address, end);
 }
@@ -2911,7 +2908,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
-               grab_swap_token(mm); /* Contend for token _before_ read-in */
                page = swapin_readahead(entry,
                                        GFP_HIGHUSER_MOVABLE, vma, address);
                if (!page) {
@@ -2941,6 +2937,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        locked = lock_page_or_retry(page, mm, flags);
+
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
        if (!locked) {
                ret |= VM_FAULT_RETRY;
@@ -3489,6 +3486,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
+retry:
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
@@ -3502,13 +3500,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                          pmd, flags);
        } else {
                pmd_t orig_pmd = *pmd;
+               int ret;
+
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
                        if (flags & FAULT_FLAG_WRITE &&
                            !pmd_write(orig_pmd) &&
-                           !pmd_trans_splitting(orig_pmd))
-                               return do_huge_pmd_wp_page(mm, vma, address,
-                                                          pmd, orig_pmd);
+                           !pmd_trans_splitting(orig_pmd)) {
+                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+                                                         orig_pmd);
+                               /*
+                                * If COW results in an oom, the huge pmd will
+                                * have been split, so retry the fault on the
+                                * pte for a smaller charge.
+                                */
+                               if (unlikely(ret & VM_FAULT_OOM))
+                                       goto retry;
+                               return ret;
+                       }
                        return 0;
                }
        }