]> Pileus Git - ~andy/linux/blobdiff - mm/mmap.c
serial: sh-sci: Fix probe error paths
[~andy/linux] / mm / mmap.c
index a7bf6a31c9f62be11cb8e5819322565b7cf2c266..3edfcdfa42d9f27a5238780065220ec3b4fc702a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
 #include <linux/perf_event.h>
 #include <linux/audit.h>
 #include <linux/khugepaged.h>
+#include <linux/uprobes.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -240,6 +241,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        return next;
 }
 
+static unsigned long do_brk(unsigned long addr, unsigned long len);
+
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long rlim, retval;
@@ -544,8 +547,15 @@ again:                     remove_next = 1 + (end > next->vm_end);
 
        if (file) {
                mapping = file->f_mapping;
-               if (!(vma->vm_flags & VM_NONLINEAR))
+               if (!(vma->vm_flags & VM_NONLINEAR)) {
                        root = &mapping->i_mmap;
+                       uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+                       if (adjust_next)
+                               uprobe_munmap(next, next->vm_start,
+                                                       next->vm_end);
+               }
+
                mutex_lock(&mapping->i_mmap_mutex);
                if (insert) {
                        /*
@@ -615,8 +625,16 @@ again:                     remove_next = 1 + (end > next->vm_end);
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
 
+       if (root) {
+               uprobe_mmap(vma);
+
+               if (adjust_next)
+                       uprobe_mmap(next);
+       }
+
        if (remove_next) {
                if (file) {
+                       uprobe_munmap(next, next->vm_start, next->vm_end);
                        fput(file);
                        if (next->vm_flags & VM_EXECUTABLE)
                                removed_exe_file_vma(mm);
@@ -636,6 +654,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                        goto again;
                }
        }
+       if (insert && file)
+               uprobe_mmap(insert);
 
        validate_mm(mm);
 
@@ -958,8 +978,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
-       int error;
-       unsigned long reqprot = prot;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1081,13 +1099,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-       if (error)
-               return error;
-
        return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
-EXPORT_SYMBOL(do_mmap_pgoff);
 
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
@@ -1120,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1344,6 +1354,11 @@ out:
                        mm->locked_vm += (len >> PAGE_SHIFT);
        } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
+
+       if (file && uprobe_mmap(vma))
+               /* matching probes but cannot insert */
+               goto unmap_and_free_vma;
+
        return addr;
 
 unmap_and_free_vma:
@@ -1579,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return arch_rebalance_pgtables(addr, len);
+       addr = arch_rebalance_pgtables(addr, len);
+       error = security_mmap_addr(addr);
+       return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1589,33 +1606,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
+       if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
+               return NULL;
+
+       /* Check the cache first. */
+       /* (Cache hit rate is typically around 35%.) */
+       vma = mm->mmap_cache;
+       if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = mm->mm_rb.rb_node;
+               vma = NULL;
+
+               while (rb_node) {
+                       struct vm_area_struct *vma_tmp;
+
+                       vma_tmp = rb_entry(rb_node,
+                                          struct vm_area_struct, vm_rb);
+
+                       if (vma_tmp->vm_end > addr) {
+                               vma = vma_tmp;
+                               if (vma_tmp->vm_start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
                }
+               if (vma)
+                       mm->mmap_cache = vma;
        }
        return vma;
 }
@@ -1768,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       error = security_mmap_addr(address);
        if (error)
                return error;
 
@@ -1862,15 +1880,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
  */
 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 {
+       unsigned long nr_accounted = 0;
+
        /* Update high watermark before we lower total_vm */
        update_hiwater_vm(mm);
        do {
                long nrpages = vma_pages(vma);
 
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += nrpages;
                mm->total_vm -= nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
+       vm_unacct_memory(nr_accounted);
        validate_mm(mm);
 }
 
@@ -1885,13 +1908,11 @@ static void unmap_region(struct mm_struct *mm,
 {
        struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
        struct mmu_gather tlb;
-       unsigned long nr_accounted = 0;
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
-       unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
+       unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
                                 next ? next->vm_start : 0);
        tlb_finish_mmu(&tlb, start, end);
@@ -2106,20 +2127,23 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        return 0;
 }
 
-EXPORT_SYMBOL(do_munmap);
-
-SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+int vm_munmap(unsigned long start, size_t len)
 {
        int ret;
        struct mm_struct *mm = current->mm;
 
-       profile_munmap(addr);
-
        down_write(&mm->mmap_sem);
-       ret = do_munmap(mm, addr, len);
+       ret = do_munmap(mm, start, len);
        up_write(&mm->mmap_sem);
        return ret;
 }
+EXPORT_SYMBOL(vm_munmap);
+
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+{
+       profile_munmap(addr);
+       return vm_munmap(addr, len);
+}
 
 static inline void verify_mm_writelocked(struct mm_struct *mm)
 {
@@ -2136,7 +2160,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-unsigned long do_brk(unsigned long addr, unsigned long len)
+static unsigned long do_brk(unsigned long addr, unsigned long len)
 {
        struct mm_struct * mm = current->mm;
        struct vm_area_struct * vma, * prev;
@@ -2149,10 +2173,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-       if (error)
-               return error;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2232,7 +2252,17 @@ out:
        return addr;
 }
 
-EXPORT_SYMBOL(do_brk);
+unsigned long vm_brk(unsigned long addr, unsigned long len)
+{
+       struct mm_struct *mm = current->mm;
+       unsigned long ret;
+
+       down_write(&mm->mmap_sem);
+       ret = do_brk(addr, len);
+       up_write(&mm->mmap_sem);
+       return ret;
+}
+EXPORT_SYMBOL(vm_brk);
 
 /* Release all mmaps. */
 void exit_mmap(struct mm_struct *mm)
@@ -2264,8 +2294,7 @@ void exit_mmap(struct mm_struct *mm)
        tlb_gather_mmu(&tlb, mm, 1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
-       unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
+       unmap_vmas(&tlb, vma, 0, -1);
 
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(&tlb, 0, -1);
@@ -2274,8 +2303,12 @@ void exit_mmap(struct mm_struct *mm)
         * Walk the list again, actually closing and freeing it,
         * with preemption enabled, without holding any MM locks.
         */
-       while (vma)
+       while (vma) {
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += vma_pages(vma);
                vma = remove_vma(vma);
+       }
+       vm_unacct_memory(nr_accounted);
 
        BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
@@ -2311,6 +2344,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        if ((vma->vm_flags & VM_ACCOUNT) &&
             security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
+
+       if (vma->vm_file && uprobe_mmap(vma))
+               return -EINVAL;
+
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
 }
@@ -2380,6 +2417,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        new_vma->vm_pgoff = pgoff;
                        if (new_vma->vm_file) {
                                get_file(new_vma->vm_file);
+
+                               if (uprobe_mmap(new_vma))
+                                       goto out_free_mempol;
+
                                if (vma->vm_flags & VM_EXECUTABLE)
                                        added_exe_file_vma(mm);
                        }
@@ -2484,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (ret)
-               goto out;
-
        ret = insert_vm_struct(mm, vma);
        if (ret)
                goto out;