]> Pileus Git - ~andy/linux/blobdiff - block/blk-softirq.c
block: improve rq_affinity placement
[~andy/linux] / block / blk-softirq.c
index 487addc85bb5b14decfbf431ae5339628d1123d4..58340d0cb23a82d40edf95b77874a5781d49bcdb 100644 (file)
@@ -103,7 +103,7 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = {
 
 void __blk_complete_request(struct request *req)
 {
-       int ccpu, cpu;
+       int ccpu, cpu, group_cpu = NR_CPUS;
        struct request_queue *q = req->q;
        unsigned long flags;
 
@@ -117,12 +117,22 @@ void __blk_complete_request(struct request *req)
         */
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) {
                ccpu = req->cpu;
-               if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
+               if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
                        ccpu = blk_cpu_to_group(ccpu);
+                       group_cpu = blk_cpu_to_group(cpu);
+               }
        } else
                ccpu = cpu;
 
-       if (ccpu == cpu) {
+       /*
+        * If current CPU and requested CPU are in the same group, running
+        * softirq in current CPU. One might concern this is just like
+        * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
+        * running in interrupt handler, and currently I/O controller doesn't
+        * support multiple interrupts, so current CPU is unique actually. This
+        * avoids IPI sending from current CPU to the first CPU of a group.
+        */
+       if (ccpu == cpu || ccpu == group_cpu) {
                struct list_head *list;
 do_local:
                list = &__get_cpu_var(blk_cpu_done);