]> Pileus Git - ~andy/linux/blobdiff - kernel/workqueue.c
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[~andy/linux] / kernel / workqueue.c
index ee8e29a2320c7c76d67df8f4816af52cd9da5f68..7f5d4be220345b0899d38ced692f2458b9278ce7 100644 (file)
@@ -272,6 +272,15 @@ static cpumask_var_t *wq_numa_possible_cpumask;
 static bool wq_disable_numa;
 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 
+/* see the comment above the definition of WQ_POWER_EFFICIENT */
+#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
+static bool wq_power_efficient = true;
+#else
+static bool wq_power_efficient;
+#endif
+
+module_param_named(power_efficient, wq_power_efficient, bool, 0444);
+
 static bool wq_numa_enabled;           /* unbound NUMA affinity enabled */
 
 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@ -305,6 +314,10 @@ struct workqueue_struct *system_unbound_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_unbound_wq);
 struct workqueue_struct *system_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
+struct workqueue_struct *system_power_efficient_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_power_efficient_wq);
+struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 
 static int worker_thread(void *__worker);
 static void copy_workqueue_attrs(struct workqueue_attrs *to,
@@ -2804,6 +2817,19 @@ already_gone:
        return false;
 }
 
+static bool __flush_work(struct work_struct *work)
+{
+       struct wq_barrier barr;
+
+       if (start_flush_work(work, &barr)) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+               return true;
+       } else {
+               return false;
+       }
+}
+
 /**
  * flush_work - wait for a work to finish executing the last queueing instance
  * @work: the work to flush
@@ -2817,18 +2843,10 @@ already_gone:
  */
 bool flush_work(struct work_struct *work)
 {
-       struct wq_barrier barr;
-
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       if (start_flush_work(work, &barr)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else {
-               return false;
-       }
+       return __flush_work(work);
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
@@ -3398,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
 {
        to->nice = from->nice;
        cpumask_copy(to->cpumask, from->cpumask);
+       /*
+        * Unlike hash and equality test, this function doesn't ignore
+        * ->no_numa as it is used for both pool and wq attrs.  Instead,
+        * get_unbound_pool() explicitly clears ->no_numa after copying.
+        */
+       to->no_numa = from->no_numa;
 }
 
 /* hash value of the content of @attr */
@@ -3565,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
        copy_workqueue_attrs(pool->attrs, attrs);
 
+       /*
+        * no_numa isn't a worker_pool attribute, always clear it.  See
+        * 'struct workqueue_attrs' comments for detail.
+        */
+       pool->attrs->no_numa = false;
+
        /* if cpumask is contained inside a NUMA node, we belong to that node */
        if (wq_numa_enabled) {
                for_each_node(node) {
@@ -4086,6 +4116,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        struct workqueue_struct *wq;
        struct pool_workqueue *pwq;
 
+       /* see the comment above the definition of WQ_POWER_EFFICIENT */
+       if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+               flags |= WQ_UNBOUND;
+
        /* allocate wq and format name */
        if (flags & WQ_UNBOUND)
                tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
@@ -4627,7 +4661,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
  * Workqueues should be brought up before normal priority CPU notifiers.
  * This will be registered high priority CPU notifier.
  */
-static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+static int workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
 {
@@ -4680,7 +4714,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
  * Workqueues should be brought down after normal priority CPU notifiers.
  * This will be registered as low priority CPU notifier.
  */
-static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+static int workqueue_cpu_down_callback(struct notifier_block *nfb,
                                                 unsigned long action,
                                                 void *hcpu)
 {
@@ -4739,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 
        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
        schedule_work_on(cpu, &wfc.work);
-       flush_work(&wfc.work);
+
+       /*
+        * The work item is on-stack and can't lead to deadlock through
+        * flushing.  Use __flush_work() to avoid spurious lockdep warnings
+        * when work_on_cpu()s are nested.
+        */
+       __flush_work(&wfc.work);
+
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -4985,8 +5026,15 @@ static int __init init_workqueues(void)
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
+       system_power_efficient_wq = alloc_workqueue("events_power_efficient",
+                                             WQ_POWER_EFFICIENT, 0);
+       system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
+                                             WQ_FREEZABLE | WQ_POWER_EFFICIENT,
+                                             0);
        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
-              !system_unbound_wq || !system_freezable_wq);
+              !system_unbound_wq || !system_freezable_wq ||
+              !system_power_efficient_wq ||
+              !system_freezable_power_efficient_wq);
        return 0;
 }
 early_initcall(init_workqueues);