]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kernel/process_64.c
Merge branches 'x86-cpu-for-linus', 'x86-boot-for-linus', 'x86-cpufeature-for-linus...
[~andy/linux] / arch / x86 / kernel / process_64.c
index 9b9fe4a85c87fcebcd370c1e4a33d235f67140ec..442e7bfe10ae5ef2879374498e57026011e5a98f 100644 (file)
@@ -156,9 +156,7 @@ void cpu_idle(void)
                }
 
                tick_nohz_idle_exit();
-               preempt_enable_no_resched();
-               schedule();
-               preempt_disable();
+               schedule_preempt_disabled();
        }
 }
 
@@ -286,6 +284,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 
        set_tsk_thread_flag(p, TIF_FORK);
 
+       p->fpu_counter = 0;
        p->thread.io_bitmap_ptr = NULL;
 
        savesegment(gs, p->thread.gsindex);
@@ -341,6 +340,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
        loadsegment(es, _ds);
        loadsegment(ds, _ds);
        load_gs_index(0);
+       current->thread.usersp  = new_sp;
        regs->ip                = new_ip;
        regs->sp                = new_sp;
        percpu_write(old_rsp, new_sp);
@@ -386,18 +386,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
        unsigned fsindex, gsindex;
-       bool preload_fpu;
+       fpu_switch_t fpu;
 
-       /*
-        * If the task has used fpu the last 5 timeslices, just do a full
-        * restore of the math state immediately to avoid the trap; the
-        * chances of needing FPU soon are obviously high now
-        */
-       preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
-
-       /* we're going to use this soon, after a few expensive things */
-       if (preload_fpu)
-               prefetch(next->fpu.state);
+       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
        /*
         * Reload esp0, LDT and the page table pointer:
@@ -427,13 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        load_TLS(next, cpu);
 
-       /* Must be after DS reload */
-       __unlazy_fpu(prev_p);
-
-       /* Make sure cpu is ready for new context */
-       if (preload_fpu)
-               clts();
-
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
@@ -474,6 +458,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
        prev->gsindex = gsindex;
 
+       switch_fpu_finish(next_p, fpu);
+
        /*
         * Switch the PDA and FPU contexts.
         */
@@ -492,13 +478,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
                __switch_to_xtra(prev_p, next_p, tss);
 
-       /*
-        * Preload the FPU context, now that we've determined that the
-        * task is likely to be using it. 
-        */
-       if (preload_fpu)
-               __math_state_restore();
-
        return prev_p;
 }