X-Git-Url: http://pileus.org/git/?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=65f38c218207231e0bf5b612ccacc5858be1b4e0;hb=ba198098a21a5dc8885fddfb308135bc2f138003;hp=450493d255727c95f92d769353673bfeeb633c92;hpb=bad44b5be84cf3bb1ff900bec02ee61e1993328c;p=~andy%2Flinux diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 450493d2557..65f38c21820 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1278,6 +1278,9 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, if (!ret) goto out; + /* Bail for signals. Probably ctrl-c from user */ + if (signal_pending(current)) + goto out; } /* @@ -2237,6 +2240,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, + (vma->vm_pgoff >> PAGE_SHIFT); mapping = (struct address_space *)page_private(page); + /* + * Take the mapping lock for the duration of the table walk. As + * this mapping should be shared between all the VMAs, + * __unmap_hugepage_range() is called as the lock is already held + */ + spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -2250,10 +2259,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) - unmap_hugepage_range(iter_vma, + __unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } + spin_unlock(&mapping->i_mmap_lock); return 1; } @@ -2293,6 +2303,9 @@ retry_avoidcopy: outside_reserve = 1; page_cache_get(old_page); + + /* Drop page_table_lock as buddy allocator may be called */ + spin_unlock(&mm->page_table_lock); new_page = alloc_huge_page(vma, address, outside_reserve); if (IS_ERR(new_page)) { @@ -2310,19 +2323,25 @@ retry_avoidcopy: if (unmap_ref_private(mm, vma, old_page, address)) { BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); + spin_lock(&mm->page_table_lock); goto retry_avoidcopy; } WARN_ON_ONCE(1); } + /* Caller expects lock to be held */ + spin_lock(&mm->page_table_lock); return -PTR_ERR(new_page); } - spin_unlock(&mm->page_table_lock); copy_huge_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - spin_lock(&mm->page_table_lock); + /* + * Retake the page_table_lock to check for racing updates + * before the page tables are altered + */ + spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); if (likely(pte_same(huge_ptep_get(ptep), pte))) { /* Break COW */