]> Pileus Git - ~andy/linux/blobdiff - fs/exec.c
ext4: don't wait for extent conversion in ext4_punch_hole()
[~andy/linux] / fs / exec.c
index a96a4885bbbfa130750caa70b9b3f504ea10b244..643019585574956f44bdca47c52c3993046db069 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * when the old and new regions overlap clear from new_end.
                 */
                free_pgd_range(&tlb, new_end, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : 0);
+                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        } else {
                /*
                 * otherwise, clean from old_start; this is done to not touch
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
                 * for the others its just a little faster.
                 */
                free_pgd_range(&tlb, old_start, old_end, new_end,
-                       vma->vm_next ? vma->vm_next->vm_start : 0);
+                       vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
        }
        tlb_finish_mmu(&tlb, new_end, old_end);
 
@@ -802,6 +802,15 @@ int kernel_read(struct file *file, loff_t offset,
 
 EXPORT_SYMBOL(kernel_read);
 
+ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
+{
+       ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos);
+       if (res > 0)
+               flush_icache_range(addr, addr + len);
+       return res;
+}
+EXPORT_SYMBOL(read_code);
+
 static int exec_mmap(struct mm_struct *mm)
 {
        struct task_struct *tsk;
@@ -898,11 +907,13 @@ static int de_thread(struct task_struct *tsk)
 
                sig->notify_count = -1; /* for exit_notify() */
                for (;;) {
+                       threadgroup_change_begin(tsk);
                        write_lock_irq(&tasklist_lock);
                        if (likely(leader->exit_state))
                                break;
                        __set_current_state(TASK_KILLABLE);
                        write_unlock_irq(&tasklist_lock);
+                       threadgroup_change_end(tsk);
                        schedule();
                        if (unlikely(__fatal_signal_pending(tsk)))
                                goto killed;
@@ -960,6 +971,7 @@ static int de_thread(struct task_struct *tsk)
                if (unlikely(leader->ptrace))
                        __wake_up_parent(leader, leader->parent);
                write_unlock_irq(&tasklist_lock);
+               threadgroup_change_end(tsk);
 
                release_task(leader);
        }
@@ -1027,17 +1039,7 @@ EXPORT_SYMBOL_GPL(get_task_comm);
 void set_task_comm(struct task_struct *tsk, char *buf)
 {
        task_lock(tsk);
-
        trace_task_rename(tsk, buf);
-
-       /*
-        * Threads may access current->comm without holding
-        * the task lock, so write the string carefully.
-        * Readers without a lock may see incomplete new
-        * names but are safe from non-terminating string reads.
-        */
-       memset(tsk->comm, 0, TASK_COMM_LEN);
-       wmb();
        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
        task_unlock(tsk);
        perf_event_comm(tsk);