* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
- if (unlikely(anon_vma_prepare(vma)))
+ if (unlikely(anon_vma_prepare(vma))) {
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
+ }
copy_user_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
- ret = VM_FAULT_HWPOISON;
+ ret = VM_FAULT_HWPOISON |
+ VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
page_dup_rmap(page);
migration_entry_wait(mm, (pmd_t *)ptep, address);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
- return VM_FAULT_HWPOISON;
+ return VM_FAULT_HWPOISON_LARGE |
+ VM_FAULT_SET_HINDEX(h - hstates);
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
unlock_page(pagecache_page);
put_page(pagecache_page);
}
- unlock_page(page);
+ if (page != pagecache_page)
+ unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
hugetlb_acct_memory(h, -(chg - freed));
}
+#ifdef CONFIG_MEMORY_FAILURE
+
/* Should be called in hugetlb_lock */
static int is_hugepage_on_freelist(struct page *hpage)
{
return 0;
}
-#ifdef CONFIG_MEMORY_FAILURE
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
list_del(&hpage->lru);
+ set_page_refcounted(hpage);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
ret = 0;