]> Pileus Git - ~andy/linux/blobdiff - mm/rmap.c
mm: fix swapops.h:131 bug if remap_file_pages raced migration
[~andy/linux] / mm / rmap.c
index 080413036406e732410fefb5393d6cb4566a8d23..8fc049f9a5a6c5d511ac7a5ac0dd41c698ed99a3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -812,12 +812,13 @@ int page_referenced(struct page *page,
 }
 
 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
-                           unsigned long address)
+                           unsigned long address, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte;
        spinlock_t *ptl;
        int ret = 0;
+       int *cleaned = arg;
 
        pte = page_check_address(page, mm, address, &ptl, 1);
        if (!pte)
@@ -836,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 
        pte_unmap_unlock(pte, ptl);
 
-       if (ret)
+       if (ret) {
                mmu_notifier_invalidate_page(mm, address);
+               (*cleaned)++;
+       }
 out:
-       return ret;
+       return SWAP_AGAIN;
 }
 
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
+static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-       struct vm_area_struct *vma;
-       int ret = 0;
-
-       BUG_ON(PageAnon(page));
+       if (vma->vm_flags & VM_SHARED)
+               return false;
 
-       mutex_lock(&mapping->i_mmap_mutex);
-       vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
-               if (vma->vm_flags & VM_SHARED) {
-                       unsigned long address = vma_address(page, vma);
-                       ret += page_mkclean_one(page, vma, address);
-               }
-       }
-       mutex_unlock(&mapping->i_mmap_mutex);
-       return ret;
+       return true;
 }
 
 int page_mkclean(struct page *page)
 {
-       int ret = 0;
+       int cleaned = 0;
+       struct address_space *mapping;
+       struct rmap_walk_control rwc = {
+               .arg = (void *)&cleaned,
+               .rmap_one = page_mkclean_one,
+               .invalid_vma = invalid_mkclean_vma,
+       };
 
        BUG_ON(!PageLocked(page));
 
-       if (page_mapped(page)) {
-               struct address_space *mapping = page_mapping(page);
-               if (mapping)
-                       ret = page_mkclean_file(mapping, page);
-       }
+       if (!page_mapped(page))
+               return 0;
 
-       return ret;
+       mapping = page_mapping(page);
+       if (!mapping)
+               return 0;
+
+       rmap_walk(page, &rwc);
+
+       return cleaned;
 }
 EXPORT_SYMBOL_GPL(page_mkclean);
 
@@ -893,9 +894,9 @@ void page_move_anon_rmap(struct page *page,
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON(!anon_vma);
-       VM_BUG_ON(page->index != linear_page_index(vma, address));
+       VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        page->mapping = (struct address_space *) anon_vma;
@@ -994,7 +995,7 @@ void do_page_add_anon_rmap(struct page *page,
        if (unlikely(PageKsm(page)))
                return;
 
-       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
        /* address might be in next vma when migration races vma_adjust */
        if (first)
                __page_set_anon_rmap(page, vma, address, exclusive);
@@ -1359,8 +1360,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 }
 
 static int try_to_unmap_nonlinear(struct page *page,
-               struct address_space *mapping, struct vm_area_struct *vma)
+               struct address_space *mapping, void *arg)
 {
+       struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
        unsigned long cursor;
        unsigned long max_nl_cursor = 0;
@@ -1480,7 +1482,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
                .anon_lock = page_lock_anon_vma_read,
        };
 
-       VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
+       VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
 
        /*
         * During exec, a temporary VMA is setup and later moved.
@@ -1532,7 +1534,7 @@ int try_to_munlock(struct page *page)
 
        };
 
-       VM_BUG_ON(!PageLocked(page) || PageLRU(page));
+       VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
 
        ret = rmap_walk(page, &rwc);
        return ret;
@@ -1662,7 +1664,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
        if (list_empty(&mapping->i_mmap_nonlinear))
                goto done;
 
-       ret = rwc->file_nonlinear(page, mapping, vma);
+       ret = rwc->file_nonlinear(page, mapping, rwc->arg);
 
 done:
        mutex_unlock(&mapping->i_mmap_mutex);