]> Pileus Git - ~andy/linux/blobdiff - kernel/workqueue.c
wlcore: allow setting sleep_auth before interface init
[~andy/linux] / kernel / workqueue.c
index f2c5638bb5ab1aa44740dea600ffa8535a88018e..9a3128dc67df450d201969bb5fa9a99773d99c1b 100644 (file)
@@ -476,13 +476,8 @@ static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
                                            struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND)) {
-               if (likely(cpu < nr_cpu_ids)) {
-#ifdef CONFIG_SMP
+               if (likely(cpu < nr_cpu_ids))
                        return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
-#else
-                       return wq->cpu_wq.single;
-#endif
-               }
        } else if (likely(cpu == WORK_CPU_UNBOUND))
                return wq->cpu_wq.single;
        return NULL;
@@ -1037,7 +1032,10 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        cwq = get_cwq(gcwq->cpu, wq);
        trace_workqueue_queue_work(cpu, cwq, work);
 
-       BUG_ON(!list_empty(&work->entry));
+       if (WARN_ON(!list_empty(&work->entry))) {
+               spin_unlock_irqrestore(&gcwq->lock, flags);
+               return;
+       }
 
        cwq->nr_in_flight[cwq->work_color]++;
        work_flags = work_color_to_flags(cwq->work_color);
@@ -1215,8 +1213,13 @@ static void worker_enter_idle(struct worker *worker)
        } else
                wake_up_all(&gcwq->trustee_wait);
 
-       /* sanity check nr_running */
-       WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
+       /*
+        * Sanity check nr_running.  Because trustee releases gcwq->lock
+        * between setting %WORKER_ROGUE and zapping nr_running, the
+        * warning may trigger spuriously.  Check iff trustee is idle.
+        */
+       WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
+                    gcwq->nr_workers == gcwq->nr_idle &&
                     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
 }
 
@@ -1815,7 +1818,9 @@ __acquires(&gcwq->lock)
         * lock freed" warnings as well as problems when looking into
         * work->lockdep_map, make a copy and use that here.
         */
-       struct lockdep_map lockdep_map = work->lockdep_map;
+       struct lockdep_map lockdep_map;
+
+       lockdep_copy_map(&lockdep_map, &work->lockdep_map);
 #endif
        /*
         * A single work shouldn't be executed concurrently by
@@ -2511,6 +2516,9 @@ bool flush_work(struct work_struct *work)
 {
        struct wq_barrier barr;
 
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
+
        if (start_flush_work(work, &barr, true)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
@@ -2899,13 +2907,8 @@ static int alloc_cwqs(struct workqueue_struct *wq)
        const size_t size = sizeof(struct cpu_workqueue_struct);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
 
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                wq->cpu_wq.pcpu = __alloc_percpu(size, align);
        else {
                void *ptr;
@@ -2929,13 +2932,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
 
 static void free_cwqs(struct workqueue_struct *wq)
 {
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
-
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                free_percpu(wq->cpu_wq.pcpu);
        else if (wq->cpu_wq.single) {
                /* the pointer to free is stored right after the cwq */