]> Pileus Git - ~andy/linux/blob - fs/btrfs/async-thread.c
Btrfs: Give all the worker threads descriptive names
[~andy/linux] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
26 #else
27 # include <linux/sched.h>
28 #endif
29
30 #include "async-thread.h"
31
32 /*
33  * container for the kthread task pointer and the list of pending work
34  * One of these is allocated per thread.
35  */
36 struct btrfs_worker_thread {
37         /* pool we belong to */
38         struct btrfs_workers *workers;
39
40         /* list of struct btrfs_work that are waiting for service */
41         struct list_head pending;
42
43         /* list of worker threads from struct btrfs_workers */
44         struct list_head worker_list;
45
46         /* kthread */
47         struct task_struct *task;
48
49         /* number of things on the pending list */
50         atomic_t num_pending;
51
52         /* protects the pending list. */
53         spinlock_t lock;
54
55         /* set to non-zero when this thread is already awake and kicking */
56         int working;
57
58         /* are we currently idle */
59         int idle;
60 };
61
62 /*
63  * helper function to move a thread onto the idle list after it
64  * has finished some requests.
65  */
66 static void check_idle_worker(struct btrfs_worker_thread *worker)
67 {
68         if (!worker->idle && atomic_read(&worker->num_pending) <
69             worker->workers->idle_thresh / 2) {
70                 unsigned long flags;
71                 spin_lock_irqsave(&worker->workers->lock, flags);
72                 worker->idle = 1;
73                 list_move(&worker->worker_list, &worker->workers->idle_list);
74                 spin_unlock_irqrestore(&worker->workers->lock, flags);
75         }
76 }
77
78 /*
79  * helper function to move a thread off the idle list after new
80  * pending work is added.
81  */
82 static void check_busy_worker(struct btrfs_worker_thread *worker)
83 {
84         if (worker->idle && atomic_read(&worker->num_pending) >=
85             worker->workers->idle_thresh) {
86                 unsigned long flags;
87                 spin_lock_irqsave(&worker->workers->lock, flags);
88                 worker->idle = 0;
89                 list_move_tail(&worker->worker_list,
90                                &worker->workers->worker_list);
91                 spin_unlock_irqrestore(&worker->workers->lock, flags);
92         }
93 }
94
95 /*
96  * main loop for servicing work items
97  */
98 static int worker_loop(void *arg)
99 {
100         struct btrfs_worker_thread *worker = arg;
101         struct list_head *cur;
102         struct btrfs_work *work;
103         do {
104                 spin_lock_irq(&worker->lock);
105                 while(!list_empty(&worker->pending)) {
106                         cur = worker->pending.next;
107                         work = list_entry(cur, struct btrfs_work, list);
108                         list_del(&work->list);
109                         clear_bit(0, &work->flags);
110
111                         work->worker = worker;
112                         spin_unlock_irq(&worker->lock);
113
114                         work->func(work);
115
116                         atomic_dec(&worker->num_pending);
117                         spin_lock_irq(&worker->lock);
118                         check_idle_worker(worker);
119                 }
120                 worker->working = 0;
121                 if (freezing(current)) {
122                         refrigerator();
123                 } else {
124                         set_current_state(TASK_INTERRUPTIBLE);
125                         spin_unlock_irq(&worker->lock);
126                         schedule();
127                         __set_current_state(TASK_RUNNING);
128                 }
129         } while (!kthread_should_stop());
130         return 0;
131 }
132
133 /*
134  * this will wait for all the worker threads to shutdown
135  */
136 int btrfs_stop_workers(struct btrfs_workers *workers)
137 {
138         struct list_head *cur;
139         struct btrfs_worker_thread *worker;
140
141         list_splice_init(&workers->idle_list, &workers->worker_list);
142         while(!list_empty(&workers->worker_list)) {
143                 cur = workers->worker_list.next;
144                 worker = list_entry(cur, struct btrfs_worker_thread,
145                                     worker_list);
146                 kthread_stop(worker->task);
147                 list_del(&worker->worker_list);
148                 kfree(worker);
149         }
150         return 0;
151 }
152
153 /*
154  * simple init on struct btrfs_workers
155  */
156 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
157 {
158         workers->num_workers = 0;
159         INIT_LIST_HEAD(&workers->worker_list);
160         INIT_LIST_HEAD(&workers->idle_list);
161         spin_lock_init(&workers->lock);
162         workers->max_workers = max;
163         workers->idle_thresh = 32;
164         workers->name = name;
165 }
166
167 /*
168  * starts new worker threads.  This does not enforce the max worker
169  * count in case you need to temporarily go past it.
170  */
171 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
172 {
173         struct btrfs_worker_thread *worker;
174         int ret = 0;
175         int i;
176
177         for (i = 0; i < num_workers; i++) {
178                 worker = kzalloc(sizeof(*worker), GFP_NOFS);
179                 if (!worker) {
180                         ret = -ENOMEM;
181                         goto fail;
182                 }
183
184                 INIT_LIST_HEAD(&worker->pending);
185                 INIT_LIST_HEAD(&worker->worker_list);
186                 spin_lock_init(&worker->lock);
187                 atomic_set(&worker->num_pending, 0);
188                 worker->task = kthread_run(worker_loop, worker,
189                                            "btrfs-%s-%d", workers->name,
190                                            workers->num_workers + i);
191                 worker->workers = workers;
192                 if (IS_ERR(worker->task)) {
193                         kfree(worker);
194                         ret = PTR_ERR(worker->task);
195                         goto fail;
196                 }
197
198                 spin_lock_irq(&workers->lock);
199                 list_add_tail(&worker->worker_list, &workers->idle_list);
200                 workers->num_workers++;
201                 spin_unlock_irq(&workers->lock);
202         }
203         return 0;
204 fail:
205         btrfs_stop_workers(workers);
206         return ret;
207 }
208
209 /*
210  * run through the list and find a worker thread that doesn't have a lot
211  * to do right now.  This can return null if we aren't yet at the thread
212  * count limit and all of the threads are busy.
213  */
214 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
215 {
216         struct btrfs_worker_thread *worker;
217         struct list_head *next;
218         int enforce_min = workers->num_workers < workers->max_workers;
219
220         /*
221          * if we find an idle thread, don't move it to the end of the
222          * idle list.  This improves the chance that the next submission
223          * will reuse the same thread, and maybe catch it while it is still
224          * working
225          */
226         if (!list_empty(&workers->idle_list)) {
227                 next = workers->idle_list.next;
228                 worker = list_entry(next, struct btrfs_worker_thread,
229                                     worker_list);
230                 return worker;
231         }
232         if (enforce_min || list_empty(&workers->worker_list))
233                 return NULL;
234
235         /*
236          * if we pick a busy task, move the task to the end of the list.
237          * hopefully this will keep things somewhat evenly balanced
238          */
239         next = workers->worker_list.next;
240         worker = list_entry(next, struct btrfs_worker_thread, worker_list);
241         list_move_tail(next, &workers->worker_list);
242         return worker;
243 }
244
245 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
246 {
247         struct btrfs_worker_thread *worker;
248         unsigned long flags;
249
250 again:
251         spin_lock_irqsave(&workers->lock, flags);
252         worker = next_worker(workers);
253         spin_unlock_irqrestore(&workers->lock, flags);
254
255         if (!worker) {
256                 spin_lock_irqsave(&workers->lock, flags);
257                 if (workers->num_workers >= workers->max_workers) {
258                         struct list_head *fallback = NULL;
259                         /*
260                          * we have failed to find any workers, just
261                          * return the force one
262                          */
263                         if (!list_empty(&workers->worker_list))
264                                 fallback = workers->worker_list.next;
265                         if (!list_empty(&workers->idle_list))
266                                 fallback = workers->idle_list.next;
267                         BUG_ON(!fallback);
268                         worker = list_entry(fallback,
269                                   struct btrfs_worker_thread, worker_list);
270                         spin_unlock_irqrestore(&workers->lock, flags);
271                 } else {
272                         spin_unlock_irqrestore(&workers->lock, flags);
273                         /* we're below the limit, start another worker */
274                         btrfs_start_workers(workers, 1);
275                         goto again;
276                 }
277         }
278         return worker;
279 }
280
281 /*
282  * btrfs_requeue_work just puts the work item back on the tail of the list
283  * it was taken from.  It is intended for use with long running work functions
284  * that make some progress and want to give the cpu up for others.
285  */
286 int btrfs_requeue_work(struct btrfs_work *work)
287 {
288         struct btrfs_worker_thread *worker = work->worker;
289         unsigned long flags;
290
291         if (test_and_set_bit(0, &work->flags))
292                 goto out;
293
294         spin_lock_irqsave(&worker->lock, flags);
295         atomic_inc(&worker->num_pending);
296         list_add_tail(&work->list, &worker->pending);
297         check_busy_worker(worker);
298         spin_unlock_irqrestore(&worker->lock, flags);
299 out:
300         return 0;
301 }
302
303 /*
304  * places a struct btrfs_work into the pending queue of one of the kthreads
305  */
306 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
307 {
308         struct btrfs_worker_thread *worker;
309         unsigned long flags;
310         int wake = 0;
311
312         /* don't requeue something already on a list */
313         if (test_and_set_bit(0, &work->flags))
314                 goto out;
315
316         worker = find_worker(workers);
317
318         spin_lock_irqsave(&worker->lock, flags);
319         atomic_inc(&worker->num_pending);
320         check_busy_worker(worker);
321         list_add_tail(&work->list, &worker->pending);
322
323         /*
324          * avoid calling into wake_up_process if this thread has already
325          * been kicked
326          */
327         if (!worker->working)
328                 wake = 1;
329         worker->working = 1;
330
331         spin_unlock_irqrestore(&worker->lock, flags);
332
333         if (wake)
334                 wake_up_process(worker->task);
335 out:
336         return 0;
337 }