From 73a6fdc48bf52e93c26874dc8c0f0f8d5585a809 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 13 May 2013 11:39:50 +0100 Subject: [PATCH] ARM: spinlock: use inner-shareable dsb variant prior to sev instruction When unlocking a spinlock, we use the sev instruction to signal other CPUs waiting on the lock. Since sev is not a memory access instruction, we require a dsb in order to ensure that the sev is not issued ahead of the store placing the lock in an unlocked state. However, as sev is only concerned with other processors in a multiprocessor system, we can restrict the scope of the preceding dsb to the inner-shareable domain. Furthermore, we can restrict the scope to consider only stores, since there are no independent loads on the unlock path. A side-effect of this change is that a spin_unlock operation no longer forces completion of pending TLB invalidation, something which we rely on when unlocking runqueues to ensure that CPU migration during TLB maintenance routines doesn't cause us to continue before the operation has completed. This patch adds the -ishst suffix to the ARMv7 definition of dsb_sev() and adds an inner-shareable dsb to the context-switch path when running a preemptible, SMP, v7 kernel. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/spinlock.h | 2 +- arch/arm/include/asm/switch_to.h | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e..2c1e748f52d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -46,7 +46,7 @@ static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 __asm__ __volatile__ ( - "dsb\n" + "dsb ishst\n" SEV ); #else diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index fa09e6b49bf..c99e259469f 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,6 +3,16 @@ #include +/* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb + * to ensure that the maintenance completes in case we migrate to another + * CPU. + */ +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#define finish_arch_switch(prev) dsb(ish) +#endif + /* * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. schedule() itself -- 2.43.2