]> Pileus Git - ~andy/linux/blobdiff - mm/memory.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bcopeland...
[~andy/linux] / mm / memory.c
index 806a37ec71bd595289eed44d810e9e1765da8f75..e48945ab362b87904d3a541e27902dc0d492cecb 100644 (file)
@@ -1576,9 +1576,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
-                                       if (ret &
-                                           (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
-                                            VM_FAULT_SIGBUS))
+                                       if (ret & (VM_FAULT_HWPOISON |
+                                                  VM_FAULT_HWPOISON_LARGE)) {
+                                               if (i)
+                                                       return i;
+                                               else if (gup_flags & FOLL_HWPOISON)
+                                                       return -EHWPOISON;
+                                               else
+                                                       return -EFAULT;
+                                       }
+                                       if (ret & VM_FAULT_SIGBUS)
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -2165,10 +2172,10 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
  * handle_pte_fault chooses page fault handler according to an entry
  * which was read non-atomically.  Before making any commitment, on
  * those architectures or configurations (e.g. i386 with PAE) which
- * might give a mix of unmatched parts, do_swap_page and do_file_page
+ * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
  * must check under lock before unmapping the pte and proceeding
  * (but do_wp_page is only called after already making such a check;
- * and do_anonymous_page and do_no_page can safely check later on).
+ * and do_anonymous_page can safely check later on).
  */
 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
                                pte_t *page_table, pte_t orig_pte)
@@ -2364,7 +2371,7 @@ reuse:
                 * bit after it clear all dirty ptes, but before a racing
                 * do_wp_page installs a dirty pte.
                 *
-                * do_no_page is protected similarly.
+                * __do_fault is protected similarly.
                 */
                if (!page_mkwrite) {
                        wait_on_page_locked(dirty_page);