]> Pileus Git - ~andy/linux/blobdiff - mm/hugetlb.c
core: Replace __get_cpu_var with __this_cpu_read if not used for an address.
[~andy/linux] / mm / hugetlb.c
index 636be5d6aaddbbb64a2d74f962d8412844c1c1aa..85855240933d7cf195ce1548faa75d8120c21478 100644 (file)
@@ -2448,8 +2448,11 @@ retry_avoidcopy:
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       if (unlikely(anon_vma_prepare(vma)))
+       if (unlikely(anon_vma_prepare(vma))) {
+               /* Caller expects lock to be held */
+               spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
+       }
 
        copy_user_huge_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
@@ -2589,7 +2592,8 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON;
+                       ret = VM_FAULT_HWPOISON | 
+                             VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
                page_dup_rmap(page);
@@ -2656,7 +2660,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON;
+                       return VM_FAULT_HWPOISON_LARGE | 
+                              VM_FAULT_SET_HINDEX(h - hstates);
        }
 
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
@@ -2733,7 +2738,8 @@ out_page_table_lock:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
-       unlock_page(page);
+       if (page != pagecache_page)
+               unlock_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
@@ -2946,6 +2952,8 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
        hugetlb_acct_memory(h, -(chg - freed));
 }
 
+#ifdef CONFIG_MEMORY_FAILURE
+
 /* Should be called in hugetlb_lock */
 static int is_hugepage_on_freelist(struct page *hpage)
 {
@@ -2960,7 +2968,6 @@ static int is_hugepage_on_freelist(struct page *hpage)
        return 0;
 }
 
-#ifdef CONFIG_MEMORY_FAILURE
 /*
  * This function is called from memory failure code.
  * Assume the caller holds page lock of the head page.
@@ -2974,6 +2981,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
        spin_lock(&hugetlb_lock);
        if (is_hugepage_on_freelist(hpage)) {
                list_del(&hpage->lru);
+               set_page_refcounted(hpage);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                ret = 0;