]> Pileus Git - ~andy/linux/blobdiff - mm/huge_memory.c
mcast: add multicast proxy support (IPv4 and IPv6)
[~andy/linux] / mm / huge_memory.c
index d7ee1691fd21038a87cbfeffbff56897deb2fd4b..6001ee6347a9694f4a9b31ef9060913ff30440bf 100644 (file)
@@ -574,19 +574,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 
        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
        if (unlikely(!*hugepage_kobj)) {
-               printk(KERN_ERR "hugepage: failed kobject create\n");
+               printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
                return -ENOMEM;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto delete_obj;
        }
 
        err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
        if (err) {
-               printk(KERN_ERR "hugepage: failed register hugeage group\n");
+               printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
                goto remove_hp_group;
        }
 
@@ -1460,9 +1460,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                pmd_t entry;
                entry = pmdp_get_and_clear(mm, addr, pmd);
-               if (!prot_numa)
+               if (!prot_numa) {
                        entry = pmd_modify(entry, newprot);
-               else {
+                       BUG_ON(pmd_write(entry));
+               } else {
                        struct page *page = pmd_page(*pmd);
 
                        /* only check non-shared pages */
@@ -1471,7 +1472,6 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pmd_mknuma(entry);
                        }
                }
-               BUG_ON(pmd_write(entry));
                set_pmd_at(mm, addr, pmd, entry);
                spin_unlock(&vma->vm_mm->page_table_lock);
                ret = 1;
@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
 
        BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma_read(page);
+
+       /*
+        * The caller does not necessarily hold an mmap_sem that would prevent
+        * the anon_vma disappearing so we first we take a reference to it
+        * and then lock the anon_vma for write. This is similar to
+        * page_lock_anon_vma_read except the write lock is taken to serialise
+        * against parallel split or collapse operations.
+        */
+       anon_vma = page_get_anon_vma(page);
        if (!anon_vma)
                goto out;
+       anon_vma_lock_write(anon_vma);
+
        ret = 0;
        if (!PageCompound(page))
                goto out_unlock;
@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma_read(anon_vma);
+       anon_vma_unlock(anon_vma);
+       put_anon_vma(anon_vma);
 out:
        return ret;
 }