]> Pileus Git - ~andy/linux/commitdiff
kvm: optimize out smp_mb after srcu_read_unlock
authorMichael S. Tsirkin <mst@redhat.com>
Mon, 4 Nov 2013 20:36:25 +0000 (22:36 +0200)
committerGleb Natapov <gleb@redhat.com>
Wed, 6 Nov 2013 07:32:31 +0000 (09:32 +0200)
I noticed that srcu_read_lock/unlock both have a memory barrier,
so just by moving srcu_read_unlock earlier we can get rid of
one call to smp_mb() using smp_mb__after_srcu_read_unlock instead.

Unsurprisingly, the gain is small but measureable using the unit test
microbenchmark:
before
        vmcall in the ballpark of 1410 cycles
after
        vmcall in the ballpark of 1360 cycles

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/x86.c

index 07c127fc20644025b85a0c45776438b6794f2f94..21ef1ba184ae8500a70061f566ea55fde76cbfd2 100644 (file)
@@ -5966,10 +5966,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        vcpu->mode = IN_GUEST_MODE;
 
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
        /* We should set ->mode before check ->requests,
         * see the comment in make_all_cpus_request.
         */
-       smp_mb();
+       smp_mb__after_srcu_read_unlock();
 
        local_irq_disable();
 
@@ -5979,12 +5981,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = 1;
                goto cancel_injection;
        }
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-
        if (req_immediate_exit)
                smp_send_reschedule(vcpu->cpu);