]> Pileus Git - ~andy/linux/blobdiff - mm/mprotect.c
sched/numa: Set preferred NUMA node based on number of private faults
[~andy/linux] / mm / mprotect.c
index 41e02923fcd906109054b3e9c79f94687e147955..f0b087d1069ce1cd5117bff5397523eab43f306f 100644 (file)
@@ -37,14 +37,15 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable, int prot_numa, bool *ret_all_same_node)
+               int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte, oldpte;
        spinlock_t *ptl;
        unsigned long pages = 0;
-       bool all_same_node = true;
+       bool all_same_nidpid = true;
        int last_nid = -1;
+       int last_pid = -1;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
@@ -63,11 +64,18 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                                page = vm_normal_page(vma, addr, oldpte);
                                if (page) {
-                                       int this_nid = page_to_nid(page);
+                                       int nidpid = page_nidpid_last(page);
+                                       int this_nid = nidpid_to_nid(nidpid);
+                                       int this_pid = nidpid_to_pid(nidpid);
+
                                        if (last_nid == -1)
                                                last_nid = this_nid;
-                                       if (last_nid != this_nid)
-                                               all_same_node = false;
+                                       if (last_pid == -1)
+                                               last_pid = this_pid;
+                                       if (last_nid != this_nid ||
+                                           last_pid != this_pid) {
+                                               all_same_nidpid = false;
+                                       }
 
                                        if (!pte_numa(oldpte)) {
                                                ptent = pte_mknuma(ptent);
@@ -107,7 +115,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
-       *ret_all_same_node = all_same_node;
+       *ret_all_same_nidpid = all_same_nidpid;
        return pages;
 }
 
@@ -134,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
        pmd_t *pmd;
        unsigned long next;
        unsigned long pages = 0;
-       bool all_same_node;
+       bool all_same_nidpid;
 
        pmd = pmd_offset(pud, addr);
        do {
@@ -158,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                if (pmd_none_or_clear_bad(pmd))
                        continue;
                pages += change_pte_range(vma, pmd, addr, next, newprot,
-                                dirty_accountable, prot_numa, &all_same_node);
+                                dirty_accountable, prot_numa, &all_same_nidpid);
 
                /*
                 * If we are changing protections for NUMA hinting faults then
@@ -166,7 +174,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                 * node. This allows a regular PMD to be handled as one fault
                 * and effectively batches the taking of the PTL
                 */
-               if (prot_numa && all_same_node)
+               if (prot_numa && all_same_nidpid)
                        change_pmd_protnuma(vma->vm_mm, addr, pmd);
        } while (pmd++, addr = next, addr != end);