]> Pileus Git - ~andy/linux/blobdiff - kernel/smp.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[~andy/linux] / kernel / smp.c
index 4b83cd6815e281c9b8f3c4b94d1fdfc75392ad8a..9910744f0856c5a084b4f9721dca09f376415645 100644 (file)
@@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void)
         */
        list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
                int refs;
+               void (*func) (void *info);
 
-               if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
+               /*
+                * Since we walk the list without any locks, we might
+                * see an entry that was completed, removed from the
+                * list and is in the process of being reused.
+                *
+                * We must check that the cpu is in the cpumask before
+                * checking the refs, and both must be set before
+                * executing the callback on this cpu.
+                */
+
+               if (!cpumask_test_cpu(cpu, data->cpumask))
                        continue;
 
+               smp_rmb();
+
+               if (atomic_read(&data->refs) == 0)
+                       continue;
+
+               func = data->csd.func;                  /* for later warn */
                data->csd.func(data->csd.info);
 
+               /*
+                * If the cpu mask is not still set then it enabled interrupts,
+                * we took another smp interrupt, and executed the function
+                * twice on this cpu.  In theory that copy decremented refs.
+                */
+               if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
+                       WARN(1, "%pS enabled interrupts and double executed\n",
+                            func);
+                       continue;
+               }
+
                refs = atomic_dec_return(&data->refs);
                WARN_ON(refs < 0);
-               if (!refs) {
-                       raw_spin_lock(&call_function.lock);
-                       list_del_rcu(&data->csd.list);
-                       raw_spin_unlock(&call_function.lock);
-               }
 
                if (refs)
                        continue;
 
+               WARN_ON(!cpumask_empty(data->cpumask));
+
+               raw_spin_lock(&call_function.lock);
+               list_del_rcu(&data->csd.list);
+               raw_spin_unlock(&call_function.lock);
+
                csd_unlock(&data->csd);
        }
 
@@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask,
 
        data = &__get_cpu_var(cfd_data);
        csd_lock(&data->csd);
+       BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
 
        data->csd.func = func;
        data->csd.info = info;
        cpumask_and(data->cpumask, mask, cpu_online_mask);
        cpumask_clear_cpu(this_cpu, data->cpumask);
+
+       /*
+        * To ensure the interrupt handler gets an complete view
+        * we order the cpumask and refs writes and order the read
+        * of them in the interrupt handler.  In addition we may
+        * only clear our own cpu bit from the mask.
+        */
+       smp_wmb();
+
        atomic_set(&data->refs, cpumask_weight(data->cpumask));
 
        raw_spin_lock_irqsave(&call_function.lock, flags);