]> Pileus Git - ~andy/linux/blobdiff - mm/memory.c
mm, sched: Drop voluntary schedule from might_fault()
[~andy/linux] / mm / memory.c
index ba94dec5b25900e47093544e11f8c765bf243f9d..c1f190f51f6f2d2ff62a4851e2fd37cbdcaacdc7 100644 (file)
@@ -715,11 +715,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
         * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
         */
        if (vma->vm_ops)
-               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
-                               (unsigned long)vma->vm_ops->fault);
+               printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
+                      vma->vm_ops->fault);
        if (vma->vm_file && vma->vm_file->f_op)
-               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
-                               (unsigned long)vma->vm_file->f_op->mmap);
+               printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
+                      vma->vm_file->f_op->mmap);
        dump_stack();
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 }
@@ -3244,6 +3244,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page = alloc_zeroed_user_highpage_movable(vma, address);
        if (!page)
                goto oom;
+       /*
+        * The memory barrier inside __SetPageUptodate makes sure that
+        * preceeding stores to the page contents become visible before
+        * the set_pte_at() write.
+        */
        __SetPageUptodate(page);
 
        if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
@@ -4217,7 +4222,8 @@ void might_fault(void)
        if (segment_eq(get_fs(), KERNEL_DS))
                return;
 
-       might_sleep();
+       __might_sleep(__FILE__, __LINE__, 0);
+
        /*
         * it would be nicer only to annotate paths which are not under
         * pagefault_disable, however that requires a larger audit and