]> Pileus Git - ~andy/linux/commitdiff
ARM: cacheflush: split user cache-flushing into interruptible chunks
authorWill Deacon <will.deacon@arm.com>
Mon, 13 May 2013 14:21:49 +0000 (15:21 +0100)
committerWill Deacon <will.deacon@arm.com>
Tue, 20 Aug 2013 10:54:53 +0000 (11:54 +0100)
Flushing a large, non-faulting VMA from userspace can potentially result
in a long time spent flushing the cache line-by-line without preemption
occurring (in the case of CONFIG_PREEMPT=n).

Whilst this doesn't affect the stability of the system, it can certainly
affect the responsiveness and CPU availability for other tasks.

This patch splits up the user cacheflush code so that it flushes in
chunks of a page. After each chunk has been flushed, we may reschedule
if appropriate and, before processing the next chunk, we allow any
pending signals to be handled before resuming from where we left off.

Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/include/asm/thread_info.h
arch/arm/kernel/traps.c

index 214d4158089afce9c04102604fe07c5257c454e5..7d77645128a883d4a7e888529592a0c8de1a479a 100644 (file)
@@ -43,6 +43,16 @@ struct cpu_context_save {
        __u32   extra[2];               /* Xscale 'acc' register, etc */
 };
 
+struct arm_restart_block {
+       union {
+               /* For user cache flushing */
+               struct {
+                       unsigned long start;
+                       unsigned long end;
+               } cache;
+       };
+};
+
 /*
  * low level task data that entry.S needs immediate access to.
  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -68,6 +78,7 @@ struct thread_info {
        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
 #endif
        struct restart_block    restart_block;
+       struct arm_restart_block        arm_restart_block;
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
index cab094c234ee85d646b9534d2044010c10c727ee..4d268d912b0eb3634b9ed8782fbe93163ca781ae 100644 (file)
@@ -499,6 +499,54 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
+static long do_cache_op_restart(struct restart_block *);
+
+static inline int
+__do_cache_op(unsigned long start, unsigned long end)
+{
+       int ret;
+       unsigned long chunk = PAGE_SIZE;
+
+       do {
+               if (signal_pending(current)) {
+                       struct thread_info *ti = current_thread_info();
+
+                       ti->restart_block = (struct restart_block) {
+                               .fn     = do_cache_op_restart,
+                       };
+
+                       ti->arm_restart_block = (struct arm_restart_block) {
+                               {
+                                       .cache = {
+                                               .start  = start,
+                                               .end    = end,
+                                       },
+                               },
+                       };
+
+                       return -ERESTART_RESTARTBLOCK;
+               }
+
+               ret = flush_cache_user_range(start, start + chunk);
+               if (ret)
+                       return ret;
+
+               cond_resched();
+               start += chunk;
+       } while (start < end);
+
+       return 0;
+}
+
+static long do_cache_op_restart(struct restart_block *unused)
+{
+       struct arm_restart_block *restart_block;
+
+       restart_block = &current_thread_info()->arm_restart_block;
+       return __do_cache_op(restart_block->cache.start,
+                            restart_block->cache.end);
+}
+
 static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
@@ -510,17 +558,18 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
 
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, start);
-       if (vma && vma->vm_start < end) {
-               if (start < vma->vm_start)
-                       start = vma->vm_start;
-               if (end > vma->vm_end)
-                       end = vma->vm_end;
-
+       if (!vma || vma->vm_start >= end) {
                up_read(&mm->mmap_sem);
-               return flush_cache_user_range(start, end);
+               return -EINVAL;
        }
+
+       if (start < vma->vm_start)
+               start = vma->vm_start;
+       if (end > vma->vm_end)
+               end = vma->vm_end;
        up_read(&mm->mmap_sem);
-       return -EINVAL;
+
+       return __do_cache_op(start, end);
 }
 
 /*