]> Pileus Git - ~andy/linux/blobdiff - arch/x86/kernel/i387.c
i387: Uninline the generic FP helpers that we expose to kernel modules
[~andy/linux] / arch / x86 / kernel / i387.c
index 739d8598f7899bbebd4cd6666d9df08ca8f988f1..17b7549c4134f997289729bfeb17855c1cf62dad 100644 (file)
 # define user32_fxsr_struct    user_fxsr_struct
 #endif
 
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+       return !__thread_has_fpu(current) &&
+               (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+       struct pt_regs *regs = get_irq_regs();
+       return regs && user_mode_vm(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+bool irq_fpu_usable(void)
+{
+       return !in_interrupt() ||
+               interrupted_user_mode() ||
+               interrupted_kernel_fpu_idle();
+}
+EXPORT_SYMBOL(irq_fpu_usable);
+
+void kernel_fpu_begin(void)
+{
+       struct task_struct *me = current;
+
+       WARN_ON_ONCE(!irq_fpu_usable());
+       preempt_disable();
+       if (__thread_has_fpu(me)) {
+               __save_init_fpu(me);
+               __thread_clear_has_fpu(me);
+               /* We do 'stts()' in kernel_fpu_end() */
+       } else {
+               percpu_write(fpu_owner_task, NULL);
+               clts();
+       }
+}
+EXPORT_SYMBOL(kernel_fpu_begin);
+
+void kernel_fpu_end(void)
+{
+       stts();
+       preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpu_end);
+
+void unlazy_fpu(struct task_struct *tsk)
+{
+       preempt_disable();
+       if (__thread_has_fpu(tsk)) {
+               __save_init_fpu(tsk);
+               __thread_fpu_end(tsk);
+       } else
+               tsk->fpu_counter = 0;
+       preempt_enable();
+}
+EXPORT_SYMBOL(unlazy_fpu);
+
 #ifdef CONFIG_MATH_EMULATION
 # define HAVE_HWFP             (boot_cpu_data.hard_math)
 #else