]> Pileus Git - ~andy/linux/blob - drivers/md/dm-thin.c
Merge branch 'for-3.14' of git://linux-nfs.org/~bfields/linux
[~andy/linux] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX   "thin"
20
21 /*
22  * Tunable constants
23  */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
30                 "A percentage of time allocated for copy on write");
31
32 /*
33  * The block size of the device holding pool data must be
34  * between 64KB and 1GB.
35  */
36 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
37 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
38
39 /*
40  * Device id is restricted to 24 bits.
41  */
42 #define MAX_DEV_ID ((1 << 24) - 1)
43
44 /*
45  * How do we handle breaking sharing of data blocks?
46  * =================================================
47  *
48  * We use a standard copy-on-write btree to store the mappings for the
49  * devices (note I'm talking about copy-on-write of the metadata here, not
50  * the data).  When you take an internal snapshot you clone the root node
51  * of the origin btree.  After this there is no concept of an origin or a
52  * snapshot.  They are just two device trees that happen to point to the
53  * same data blocks.
54  *
55  * When we get a write in we decide if it's to a shared data block using
56  * some timestamp magic.  If it is, we have to break sharing.
57  *
58  * Let's say we write to a shared block in what was the origin.  The
59  * steps are:
60  *
61  * i) plug io further to this physical block. (see bio_prison code).
62  *
63  * ii) quiesce any read io to that shared data block.  Obviously
64  * including all devices that share this block.  (see dm_deferred_set code)
65  *
66  * iii) copy the data block to a newly allocate block.  This step can be
67  * missed out if the io covers the block. (schedule_copy).
68  *
69  * iv) insert the new mapping into the origin's btree
70  * (process_prepared_mapping).  This act of inserting breaks some
71  * sharing of btree nodes between the two devices.  Breaking sharing only
72  * effects the btree of that specific device.  Btrees for the other
73  * devices that share the block never change.  The btree for the origin
74  * device as it was after the last commit is untouched, ie. we're using
75  * persistent data structures in the functional programming sense.
76  *
77  * v) unplug io to this physical block, including the io that triggered
78  * the breaking of sharing.
79  *
80  * Steps (ii) and (iii) occur in parallel.
81  *
82  * The metadata _doesn't_ need to be committed before the io continues.  We
83  * get away with this because the io is always written to a _new_ block.
84  * If there's a crash, then:
85  *
86  * - The origin mapping will point to the old origin block (the shared
87  * one).  This will contain the data as it was before the io that triggered
88  * the breaking of sharing came in.
89  *
90  * - The snap mapping still points to the old block.  As it would after
91  * the commit.
92  *
93  * The downside of this scheme is the timestamp magic isn't perfect, and
94  * will continue to think that data block in the snapshot device is shared
95  * even after the write to the origin has broken sharing.  I suspect data
96  * blocks will typically be shared by many different devices, so we're
97  * breaking sharing n + 1 times, rather than n, where n is the number of
98  * devices that reference this data block.  At the moment I think the
99  * benefits far, far outweigh the disadvantages.
100  */
101
102 /*----------------------------------------------------------------*/
103
104 /*
105  * Key building.
106  */
107 static void build_data_key(struct dm_thin_device *td,
108                            dm_block_t b, struct dm_cell_key *key)
109 {
110         key->virtual = 0;
111         key->dev = dm_thin_dev_id(td);
112         key->block = b;
113 }
114
115 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116                               struct dm_cell_key *key)
117 {
118         key->virtual = 1;
119         key->dev = dm_thin_dev_id(td);
120         key->block = b;
121 }
122
123 /*----------------------------------------------------------------*/
124
125 /*
126  * A pool device ties together a metadata device and a data device.  It
127  * also provides the interface for creating and destroying internal
128  * devices.
129  */
130 struct dm_thin_new_mapping;
131
132 /*
133  * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
134  */
135 enum pool_mode {
136         PM_WRITE,               /* metadata may be changed */
137         PM_READ_ONLY,           /* metadata may not be changed */
138         PM_FAIL,                /* all I/O fails */
139 };
140
141 struct pool_features {
142         enum pool_mode mode;
143
144         bool zero_new_blocks:1;
145         bool discard_enabled:1;
146         bool discard_passdown:1;
147         bool error_if_no_space:1;
148 };
149
150 struct thin_c;
151 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
152 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
153
154 struct pool {
155         struct list_head list;
156         struct dm_target *ti;   /* Only set if a pool target is bound */
157
158         struct mapped_device *pool_md;
159         struct block_device *md_dev;
160         struct dm_pool_metadata *pmd;
161
162         dm_block_t low_water_blocks;
163         uint32_t sectors_per_block;
164         int sectors_per_block_shift;
165
166         struct pool_features pf;
167         bool low_water_triggered:1;     /* A dm event has been sent */
168
169         struct dm_bio_prison *prison;
170         struct dm_kcopyd_client *copier;
171
172         struct workqueue_struct *wq;
173         struct work_struct worker;
174         struct delayed_work waker;
175
176         unsigned long last_commit_jiffies;
177         unsigned ref_count;
178
179         spinlock_t lock;
180         struct bio_list deferred_bios;
181         struct bio_list deferred_flush_bios;
182         struct list_head prepared_mappings;
183         struct list_head prepared_discards;
184
185         struct bio_list retry_on_resume_list;
186
187         struct dm_deferred_set *shared_read_ds;
188         struct dm_deferred_set *all_io_ds;
189
190         struct dm_thin_new_mapping *next_mapping;
191         mempool_t *mapping_pool;
192
193         process_bio_fn process_bio;
194         process_bio_fn process_discard;
195
196         process_mapping_fn process_prepared_mapping;
197         process_mapping_fn process_prepared_discard;
198 };
199
200 static enum pool_mode get_pool_mode(struct pool *pool);
201 static void out_of_data_space(struct pool *pool);
202 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203
204 /*
205  * Target context for a pool.
206  */
207 struct pool_c {
208         struct dm_target *ti;
209         struct pool *pool;
210         struct dm_dev *data_dev;
211         struct dm_dev *metadata_dev;
212         struct dm_target_callbacks callbacks;
213
214         dm_block_t low_water_blocks;
215         struct pool_features requested_pf; /* Features requested during table load */
216         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217 };
218
219 /*
220  * Target context for a thin.
221  */
222 struct thin_c {
223         struct dm_dev *pool_dev;
224         struct dm_dev *origin_dev;
225         dm_thin_id dev_id;
226
227         struct pool *pool;
228         struct dm_thin_device *td;
229 };
230
231 /*----------------------------------------------------------------*/
232
233 /*
234  * wake_worker() is used when new work is queued and when pool_resume is
235  * ready to continue deferred IO processing.
236  */
237 static void wake_worker(struct pool *pool)
238 {
239         queue_work(pool->wq, &pool->worker);
240 }
241
242 /*----------------------------------------------------------------*/
243
244 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
245                       struct dm_bio_prison_cell **cell_result)
246 {
247         int r;
248         struct dm_bio_prison_cell *cell_prealloc;
249
250         /*
251          * Allocate a cell from the prison's mempool.
252          * This might block but it can't fail.
253          */
254         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
255
256         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
257         if (r)
258                 /*
259                  * We reused an old cell; we can get rid of
260                  * the new one.
261                  */
262                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
263
264         return r;
265 }
266
267 static void cell_release(struct pool *pool,
268                          struct dm_bio_prison_cell *cell,
269                          struct bio_list *bios)
270 {
271         dm_cell_release(pool->prison, cell, bios);
272         dm_bio_prison_free_cell(pool->prison, cell);
273 }
274
275 static void cell_release_no_holder(struct pool *pool,
276                                    struct dm_bio_prison_cell *cell,
277                                    struct bio_list *bios)
278 {
279         dm_cell_release_no_holder(pool->prison, cell, bios);
280         dm_bio_prison_free_cell(pool->prison, cell);
281 }
282
283 static void cell_defer_no_holder_no_free(struct thin_c *tc,
284                                          struct dm_bio_prison_cell *cell)
285 {
286         struct pool *pool = tc->pool;
287         unsigned long flags;
288
289         spin_lock_irqsave(&pool->lock, flags);
290         dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
291         spin_unlock_irqrestore(&pool->lock, flags);
292
293         wake_worker(pool);
294 }
295
296 static void cell_error(struct pool *pool,
297                        struct dm_bio_prison_cell *cell)
298 {
299         dm_cell_error(pool->prison, cell);
300         dm_bio_prison_free_cell(pool->prison, cell);
301 }
302
303 /*----------------------------------------------------------------*/
304
305 /*
306  * A global list of pools that uses a struct mapped_device as a key.
307  */
308 static struct dm_thin_pool_table {
309         struct mutex mutex;
310         struct list_head pools;
311 } dm_thin_pool_table;
312
313 static void pool_table_init(void)
314 {
315         mutex_init(&dm_thin_pool_table.mutex);
316         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
317 }
318
319 static void __pool_table_insert(struct pool *pool)
320 {
321         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
322         list_add(&pool->list, &dm_thin_pool_table.pools);
323 }
324
325 static void __pool_table_remove(struct pool *pool)
326 {
327         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
328         list_del(&pool->list);
329 }
330
331 static struct pool *__pool_table_lookup(struct mapped_device *md)
332 {
333         struct pool *pool = NULL, *tmp;
334
335         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
336
337         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
338                 if (tmp->pool_md == md) {
339                         pool = tmp;
340                         break;
341                 }
342         }
343
344         return pool;
345 }
346
347 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
348 {
349         struct pool *pool = NULL, *tmp;
350
351         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
352
353         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
354                 if (tmp->md_dev == md_dev) {
355                         pool = tmp;
356                         break;
357                 }
358         }
359
360         return pool;
361 }
362
363 /*----------------------------------------------------------------*/
364
365 struct dm_thin_endio_hook {
366         struct thin_c *tc;
367         struct dm_deferred_entry *shared_read_entry;
368         struct dm_deferred_entry *all_io_entry;
369         struct dm_thin_new_mapping *overwrite_mapping;
370 };
371
372 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
373 {
374         struct bio *bio;
375         struct bio_list bios;
376
377         bio_list_init(&bios);
378         bio_list_merge(&bios, master);
379         bio_list_init(master);
380
381         while ((bio = bio_list_pop(&bios))) {
382                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
383
384                 if (h->tc == tc)
385                         bio_endio(bio, DM_ENDIO_REQUEUE);
386                 else
387                         bio_list_add(master, bio);
388         }
389 }
390
391 static void requeue_io(struct thin_c *tc)
392 {
393         struct pool *pool = tc->pool;
394         unsigned long flags;
395
396         spin_lock_irqsave(&pool->lock, flags);
397         __requeue_bio_list(tc, &pool->deferred_bios);
398         __requeue_bio_list(tc, &pool->retry_on_resume_list);
399         spin_unlock_irqrestore(&pool->lock, flags);
400 }
401
402 /*
403  * This section of code contains the logic for processing a thin device's IO.
404  * Much of the code depends on pool object resources (lists, workqueues, etc)
405  * but most is exclusively called from the thin target rather than the thin-pool
406  * target.
407  */
408
409 static bool block_size_is_power_of_two(struct pool *pool)
410 {
411         return pool->sectors_per_block_shift >= 0;
412 }
413
414 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
415 {
416         struct pool *pool = tc->pool;
417         sector_t block_nr = bio->bi_sector;
418
419         if (block_size_is_power_of_two(pool))
420                 block_nr >>= pool->sectors_per_block_shift;
421         else
422                 (void) sector_div(block_nr, pool->sectors_per_block);
423
424         return block_nr;
425 }
426
427 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
428 {
429         struct pool *pool = tc->pool;
430         sector_t bi_sector = bio->bi_sector;
431
432         bio->bi_bdev = tc->pool_dev->bdev;
433         if (block_size_is_power_of_two(pool))
434                 bio->bi_sector = (block << pool->sectors_per_block_shift) |
435                                 (bi_sector & (pool->sectors_per_block - 1));
436         else
437                 bio->bi_sector = (block * pool->sectors_per_block) +
438                                  sector_div(bi_sector, pool->sectors_per_block);
439 }
440
441 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
442 {
443         bio->bi_bdev = tc->origin_dev->bdev;
444 }
445
446 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
447 {
448         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
449                 dm_thin_changed_this_transaction(tc->td);
450 }
451
452 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
453 {
454         struct dm_thin_endio_hook *h;
455
456         if (bio->bi_rw & REQ_DISCARD)
457                 return;
458
459         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
460         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
461 }
462
463 static void issue(struct thin_c *tc, struct bio *bio)
464 {
465         struct pool *pool = tc->pool;
466         unsigned long flags;
467
468         if (!bio_triggers_commit(tc, bio)) {
469                 generic_make_request(bio);
470                 return;
471         }
472
473         /*
474          * Complete bio with an error if earlier I/O caused changes to
475          * the metadata that can't be committed e.g, due to I/O errors
476          * on the metadata device.
477          */
478         if (dm_thin_aborted_changes(tc->td)) {
479                 bio_io_error(bio);
480                 return;
481         }
482
483         /*
484          * Batch together any bios that trigger commits and then issue a
485          * single commit for them in process_deferred_bios().
486          */
487         spin_lock_irqsave(&pool->lock, flags);
488         bio_list_add(&pool->deferred_flush_bios, bio);
489         spin_unlock_irqrestore(&pool->lock, flags);
490 }
491
492 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
493 {
494         remap_to_origin(tc, bio);
495         issue(tc, bio);
496 }
497
498 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
499                             dm_block_t block)
500 {
501         remap(tc, bio, block);
502         issue(tc, bio);
503 }
504
505 /*----------------------------------------------------------------*/
506
507 /*
508  * Bio endio functions.
509  */
510 struct dm_thin_new_mapping {
511         struct list_head list;
512
513         bool quiesced:1;
514         bool prepared:1;
515         bool pass_discard:1;
516         bool definitely_not_shared:1;
517
518         int err;
519         struct thin_c *tc;
520         dm_block_t virt_block;
521         dm_block_t data_block;
522         struct dm_bio_prison_cell *cell, *cell2;
523
524         /*
525          * If the bio covers the whole area of a block then we can avoid
526          * zeroing or copying.  Instead this bio is hooked.  The bio will
527          * still be in the cell, so care has to be taken to avoid issuing
528          * the bio twice.
529          */
530         struct bio *bio;
531         bio_end_io_t *saved_bi_end_io;
532 };
533
534 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
535 {
536         struct pool *pool = m->tc->pool;
537
538         if (m->quiesced && m->prepared) {
539                 list_add_tail(&m->list, &pool->prepared_mappings);
540                 wake_worker(pool);
541         }
542 }
543
544 static void copy_complete(int read_err, unsigned long write_err, void *context)
545 {
546         unsigned long flags;
547         struct dm_thin_new_mapping *m = context;
548         struct pool *pool = m->tc->pool;
549
550         m->err = read_err || write_err ? -EIO : 0;
551
552         spin_lock_irqsave(&pool->lock, flags);
553         m->prepared = true;
554         __maybe_add_mapping(m);
555         spin_unlock_irqrestore(&pool->lock, flags);
556 }
557
558 static void overwrite_endio(struct bio *bio, int err)
559 {
560         unsigned long flags;
561         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
562         struct dm_thin_new_mapping *m = h->overwrite_mapping;
563         struct pool *pool = m->tc->pool;
564
565         m->err = err;
566
567         spin_lock_irqsave(&pool->lock, flags);
568         m->prepared = true;
569         __maybe_add_mapping(m);
570         spin_unlock_irqrestore(&pool->lock, flags);
571 }
572
573 /*----------------------------------------------------------------*/
574
575 /*
576  * Workqueue.
577  */
578
579 /*
580  * Prepared mapping jobs.
581  */
582
583 /*
584  * This sends the bios in the cell back to the deferred_bios list.
585  */
586 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
587 {
588         struct pool *pool = tc->pool;
589         unsigned long flags;
590
591         spin_lock_irqsave(&pool->lock, flags);
592         cell_release(pool, cell, &pool->deferred_bios);
593         spin_unlock_irqrestore(&tc->pool->lock, flags);
594
595         wake_worker(pool);
596 }
597
598 /*
599  * Same as cell_defer above, except it omits the original holder of the cell.
600  */
601 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
602 {
603         struct pool *pool = tc->pool;
604         unsigned long flags;
605
606         spin_lock_irqsave(&pool->lock, flags);
607         cell_release_no_holder(pool, cell, &pool->deferred_bios);
608         spin_unlock_irqrestore(&pool->lock, flags);
609
610         wake_worker(pool);
611 }
612
613 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
614 {
615         if (m->bio)
616                 m->bio->bi_end_io = m->saved_bi_end_io;
617         cell_error(m->tc->pool, m->cell);
618         list_del(&m->list);
619         mempool_free(m, m->tc->pool->mapping_pool);
620 }
621
622 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
623 {
624         struct thin_c *tc = m->tc;
625         struct pool *pool = tc->pool;
626         struct bio *bio;
627         int r;
628
629         bio = m->bio;
630         if (bio)
631                 bio->bi_end_io = m->saved_bi_end_io;
632
633         if (m->err) {
634                 cell_error(pool, m->cell);
635                 goto out;
636         }
637
638         /*
639          * Commit the prepared block into the mapping btree.
640          * Any I/O for this block arriving after this point will get
641          * remapped to it directly.
642          */
643         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
644         if (r) {
645                 metadata_operation_failed(pool, "dm_thin_insert_block", r);
646                 cell_error(pool, m->cell);
647                 goto out;
648         }
649
650         /*
651          * Release any bios held while the block was being provisioned.
652          * If we are processing a write bio that completely covers the block,
653          * we already processed it so can ignore it now when processing
654          * the bios in the cell.
655          */
656         if (bio) {
657                 cell_defer_no_holder(tc, m->cell);
658                 bio_endio(bio, 0);
659         } else
660                 cell_defer(tc, m->cell);
661
662 out:
663         list_del(&m->list);
664         mempool_free(m, pool->mapping_pool);
665 }
666
667 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
668 {
669         struct thin_c *tc = m->tc;
670
671         bio_io_error(m->bio);
672         cell_defer_no_holder(tc, m->cell);
673         cell_defer_no_holder(tc, m->cell2);
674         mempool_free(m, tc->pool->mapping_pool);
675 }
676
677 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
678 {
679         struct thin_c *tc = m->tc;
680
681         inc_all_io_entry(tc->pool, m->bio);
682         cell_defer_no_holder(tc, m->cell);
683         cell_defer_no_holder(tc, m->cell2);
684
685         if (m->pass_discard)
686                 if (m->definitely_not_shared)
687                         remap_and_issue(tc, m->bio, m->data_block);
688                 else {
689                         bool used = false;
690                         if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
691                                 bio_endio(m->bio, 0);
692                         else
693                                 remap_and_issue(tc, m->bio, m->data_block);
694                 }
695         else
696                 bio_endio(m->bio, 0);
697
698         mempool_free(m, tc->pool->mapping_pool);
699 }
700
701 static void process_prepared_discard(struct dm_thin_new_mapping *m)
702 {
703         int r;
704         struct thin_c *tc = m->tc;
705
706         r = dm_thin_remove_block(tc->td, m->virt_block);
707         if (r)
708                 DMERR_LIMIT("dm_thin_remove_block() failed");
709
710         process_prepared_discard_passdown(m);
711 }
712
713 static void process_prepared(struct pool *pool, struct list_head *head,
714                              process_mapping_fn *fn)
715 {
716         unsigned long flags;
717         struct list_head maps;
718         struct dm_thin_new_mapping *m, *tmp;
719
720         INIT_LIST_HEAD(&maps);
721         spin_lock_irqsave(&pool->lock, flags);
722         list_splice_init(head, &maps);
723         spin_unlock_irqrestore(&pool->lock, flags);
724
725         list_for_each_entry_safe(m, tmp, &maps, list)
726                 (*fn)(m);
727 }
728
729 /*
730  * Deferred bio jobs.
731  */
732 static int io_overlaps_block(struct pool *pool, struct bio *bio)
733 {
734         return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
735 }
736
737 static int io_overwrites_block(struct pool *pool, struct bio *bio)
738 {
739         return (bio_data_dir(bio) == WRITE) &&
740                 io_overlaps_block(pool, bio);
741 }
742
743 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
744                                bio_end_io_t *fn)
745 {
746         *save = bio->bi_end_io;
747         bio->bi_end_io = fn;
748 }
749
750 static int ensure_next_mapping(struct pool *pool)
751 {
752         if (pool->next_mapping)
753                 return 0;
754
755         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
756
757         return pool->next_mapping ? 0 : -ENOMEM;
758 }
759
760 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
761 {
762         struct dm_thin_new_mapping *m = pool->next_mapping;
763
764         BUG_ON(!pool->next_mapping);
765
766         memset(m, 0, sizeof(struct dm_thin_new_mapping));
767         INIT_LIST_HEAD(&m->list);
768         m->bio = NULL;
769
770         pool->next_mapping = NULL;
771
772         return m;
773 }
774
775 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
776                           struct dm_dev *origin, dm_block_t data_origin,
777                           dm_block_t data_dest,
778                           struct dm_bio_prison_cell *cell, struct bio *bio)
779 {
780         int r;
781         struct pool *pool = tc->pool;
782         struct dm_thin_new_mapping *m = get_next_mapping(pool);
783
784         m->tc = tc;
785         m->virt_block = virt_block;
786         m->data_block = data_dest;
787         m->cell = cell;
788
789         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
790                 m->quiesced = true;
791
792         /*
793          * IO to pool_dev remaps to the pool target's data_dev.
794          *
795          * If the whole block of data is being overwritten, we can issue the
796          * bio immediately. Otherwise we use kcopyd to clone the data first.
797          */
798         if (io_overwrites_block(pool, bio)) {
799                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
800
801                 h->overwrite_mapping = m;
802                 m->bio = bio;
803                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
804                 inc_all_io_entry(pool, bio);
805                 remap_and_issue(tc, bio, data_dest);
806         } else {
807                 struct dm_io_region from, to;
808
809                 from.bdev = origin->bdev;
810                 from.sector = data_origin * pool->sectors_per_block;
811                 from.count = pool->sectors_per_block;
812
813                 to.bdev = tc->pool_dev->bdev;
814                 to.sector = data_dest * pool->sectors_per_block;
815                 to.count = pool->sectors_per_block;
816
817                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
818                                    0, copy_complete, m);
819                 if (r < 0) {
820                         mempool_free(m, pool->mapping_pool);
821                         DMERR_LIMIT("dm_kcopyd_copy() failed");
822                         cell_error(pool, cell);
823                 }
824         }
825 }
826
827 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
828                                    dm_block_t data_origin, dm_block_t data_dest,
829                                    struct dm_bio_prison_cell *cell, struct bio *bio)
830 {
831         schedule_copy(tc, virt_block, tc->pool_dev,
832                       data_origin, data_dest, cell, bio);
833 }
834
835 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
836                                    dm_block_t data_dest,
837                                    struct dm_bio_prison_cell *cell, struct bio *bio)
838 {
839         schedule_copy(tc, virt_block, tc->origin_dev,
840                       virt_block, data_dest, cell, bio);
841 }
842
843 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
844                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
845                           struct bio *bio)
846 {
847         struct pool *pool = tc->pool;
848         struct dm_thin_new_mapping *m = get_next_mapping(pool);
849
850         m->quiesced = true;
851         m->prepared = false;
852         m->tc = tc;
853         m->virt_block = virt_block;
854         m->data_block = data_block;
855         m->cell = cell;
856
857         /*
858          * If the whole block of data is being overwritten or we are not
859          * zeroing pre-existing data, we can issue the bio immediately.
860          * Otherwise we use kcopyd to zero the data first.
861          */
862         if (!pool->pf.zero_new_blocks)
863                 process_prepared_mapping(m);
864
865         else if (io_overwrites_block(pool, bio)) {
866                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
867
868                 h->overwrite_mapping = m;
869                 m->bio = bio;
870                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
871                 inc_all_io_entry(pool, bio);
872                 remap_and_issue(tc, bio, data_block);
873         } else {
874                 int r;
875                 struct dm_io_region to;
876
877                 to.bdev = tc->pool_dev->bdev;
878                 to.sector = data_block * pool->sectors_per_block;
879                 to.count = pool->sectors_per_block;
880
881                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
882                 if (r < 0) {
883                         mempool_free(m, pool->mapping_pool);
884                         DMERR_LIMIT("dm_kcopyd_zero() failed");
885                         cell_error(pool, cell);
886                 }
887         }
888 }
889
890 /*
891  * A non-zero return indicates read_only or fail_io mode.
892  * Many callers don't care about the return value.
893  */
894 static int commit(struct pool *pool)
895 {
896         int r;
897
898         if (get_pool_mode(pool) != PM_WRITE)
899                 return -EINVAL;
900
901         r = dm_pool_commit_metadata(pool->pmd);
902         if (r)
903                 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
904
905         return r;
906 }
907
908 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
909 {
910         unsigned long flags;
911
912         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
913                 DMWARN("%s: reached low water mark for data device: sending event.",
914                        dm_device_name(pool->pool_md));
915                 spin_lock_irqsave(&pool->lock, flags);
916                 pool->low_water_triggered = true;
917                 spin_unlock_irqrestore(&pool->lock, flags);
918                 dm_table_event(pool->ti->table);
919         }
920 }
921
922 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
923 {
924         int r;
925         dm_block_t free_blocks;
926         struct pool *pool = tc->pool;
927
928         if (get_pool_mode(pool) != PM_WRITE)
929                 return -EINVAL;
930
931         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
932         if (r) {
933                 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
934                 return r;
935         }
936
937         check_low_water_mark(pool, free_blocks);
938
939         if (!free_blocks) {
940                 /*
941                  * Try to commit to see if that will free up some
942                  * more space.
943                  */
944                 r = commit(pool);
945                 if (r)
946                         return r;
947
948                 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
949                 if (r) {
950                         metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
951                         return r;
952                 }
953
954                 if (!free_blocks) {
955                         out_of_data_space(pool);
956                         return -ENOSPC;
957                 }
958         }
959
960         r = dm_pool_alloc_data_block(pool->pmd, result);
961         if (r) {
962                 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
963                 return r;
964         }
965
966         return 0;
967 }
968
969 /*
970  * If we have run out of space, queue bios until the device is
971  * resumed, presumably after having been reloaded with more space.
972  */
973 static void retry_on_resume(struct bio *bio)
974 {
975         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
976         struct thin_c *tc = h->tc;
977         struct pool *pool = tc->pool;
978         unsigned long flags;
979
980         spin_lock_irqsave(&pool->lock, flags);
981         bio_list_add(&pool->retry_on_resume_list, bio);
982         spin_unlock_irqrestore(&pool->lock, flags);
983 }
984
985 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
986 {
987         /*
988          * When pool is read-only, no cell locking is needed because
989          * nothing is changing.
990          */
991         WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
992
993         if (pool->pf.error_if_no_space)
994                 bio_io_error(bio);
995         else
996                 retry_on_resume(bio);
997 }
998
999 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1000 {
1001         struct bio *bio;
1002         struct bio_list bios;
1003
1004         bio_list_init(&bios);
1005         cell_release(pool, cell, &bios);
1006
1007         while ((bio = bio_list_pop(&bios)))
1008                 handle_unserviceable_bio(pool, bio);
1009 }
1010
1011 static void process_discard(struct thin_c *tc, struct bio *bio)
1012 {
1013         int r;
1014         unsigned long flags;
1015         struct pool *pool = tc->pool;
1016         struct dm_bio_prison_cell *cell, *cell2;
1017         struct dm_cell_key key, key2;
1018         dm_block_t block = get_bio_block(tc, bio);
1019         struct dm_thin_lookup_result lookup_result;
1020         struct dm_thin_new_mapping *m;
1021
1022         build_virtual_key(tc->td, block, &key);
1023         if (bio_detain(tc->pool, &key, bio, &cell))
1024                 return;
1025
1026         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1027         switch (r) {
1028         case 0:
1029                 /*
1030                  * Check nobody is fiddling with this pool block.  This can
1031                  * happen if someone's in the process of breaking sharing
1032                  * on this block.
1033                  */
1034                 build_data_key(tc->td, lookup_result.block, &key2);
1035                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1036                         cell_defer_no_holder(tc, cell);
1037                         break;
1038                 }
1039
1040                 if (io_overlaps_block(pool, bio)) {
1041                         /*
1042                          * IO may still be going to the destination block.  We must
1043                          * quiesce before we can do the removal.
1044                          */
1045                         m = get_next_mapping(pool);
1046                         m->tc = tc;
1047                         m->pass_discard = pool->pf.discard_passdown;
1048                         m->definitely_not_shared = !lookup_result.shared;
1049                         m->virt_block = block;
1050                         m->data_block = lookup_result.block;
1051                         m->cell = cell;
1052                         m->cell2 = cell2;
1053                         m->bio = bio;
1054
1055                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1056                                 spin_lock_irqsave(&pool->lock, flags);
1057                                 list_add_tail(&m->list, &pool->prepared_discards);
1058                                 spin_unlock_irqrestore(&pool->lock, flags);
1059                                 wake_worker(pool);
1060                         }
1061                 } else {
1062                         inc_all_io_entry(pool, bio);
1063                         cell_defer_no_holder(tc, cell);
1064                         cell_defer_no_holder(tc, cell2);
1065
1066                         /*
1067                          * The DM core makes sure that the discard doesn't span
1068                          * a block boundary.  So we submit the discard of a
1069                          * partial block appropriately.
1070                          */
1071                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1072                                 remap_and_issue(tc, bio, lookup_result.block);
1073                         else
1074                                 bio_endio(bio, 0);
1075                 }
1076                 break;
1077
1078         case -ENODATA:
1079                 /*
1080                  * It isn't provisioned, just forget it.
1081                  */
1082                 cell_defer_no_holder(tc, cell);
1083                 bio_endio(bio, 0);
1084                 break;
1085
1086         default:
1087                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1088                             __func__, r);
1089                 cell_defer_no_holder(tc, cell);
1090                 bio_io_error(bio);
1091                 break;
1092         }
1093 }
1094
1095 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1096                           struct dm_cell_key *key,
1097                           struct dm_thin_lookup_result *lookup_result,
1098                           struct dm_bio_prison_cell *cell)
1099 {
1100         int r;
1101         dm_block_t data_block;
1102         struct pool *pool = tc->pool;
1103
1104         r = alloc_data_block(tc, &data_block);
1105         switch (r) {
1106         case 0:
1107                 schedule_internal_copy(tc, block, lookup_result->block,
1108                                        data_block, cell, bio);
1109                 break;
1110
1111         case -ENOSPC:
1112                 retry_bios_on_resume(pool, cell);
1113                 break;
1114
1115         default:
1116                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1117                             __func__, r);
1118                 cell_error(pool, cell);
1119                 break;
1120         }
1121 }
1122
1123 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1124                                dm_block_t block,
1125                                struct dm_thin_lookup_result *lookup_result)
1126 {
1127         struct dm_bio_prison_cell *cell;
1128         struct pool *pool = tc->pool;
1129         struct dm_cell_key key;
1130
1131         /*
1132          * If cell is already occupied, then sharing is already in the process
1133          * of being broken so we have nothing further to do here.
1134          */
1135         build_data_key(tc->td, lookup_result->block, &key);
1136         if (bio_detain(pool, &key, bio, &cell))
1137                 return;
1138
1139         if (bio_data_dir(bio) == WRITE && bio->bi_size)
1140                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1141         else {
1142                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1143
1144                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1145                 inc_all_io_entry(pool, bio);
1146                 cell_defer_no_holder(tc, cell);
1147
1148                 remap_and_issue(tc, bio, lookup_result->block);
1149         }
1150 }
1151
1152 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1153                             struct dm_bio_prison_cell *cell)
1154 {
1155         int r;
1156         dm_block_t data_block;
1157         struct pool *pool = tc->pool;
1158
1159         /*
1160          * Remap empty bios (flushes) immediately, without provisioning.
1161          */
1162         if (!bio->bi_size) {
1163                 inc_all_io_entry(pool, bio);
1164                 cell_defer_no_holder(tc, cell);
1165
1166                 remap_and_issue(tc, bio, 0);
1167                 return;
1168         }
1169
1170         /*
1171          * Fill read bios with zeroes and complete them immediately.
1172          */
1173         if (bio_data_dir(bio) == READ) {
1174                 zero_fill_bio(bio);
1175                 cell_defer_no_holder(tc, cell);
1176                 bio_endio(bio, 0);
1177                 return;
1178         }
1179
1180         r = alloc_data_block(tc, &data_block);
1181         switch (r) {
1182         case 0:
1183                 if (tc->origin_dev)
1184                         schedule_external_copy(tc, block, data_block, cell, bio);
1185                 else
1186                         schedule_zero(tc, block, data_block, cell, bio);
1187                 break;
1188
1189         case -ENOSPC:
1190                 retry_bios_on_resume(pool, cell);
1191                 break;
1192
1193         default:
1194                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1195                             __func__, r);
1196                 cell_error(pool, cell);
1197                 break;
1198         }
1199 }
1200
1201 static void process_bio(struct thin_c *tc, struct bio *bio)
1202 {
1203         int r;
1204         struct pool *pool = tc->pool;
1205         dm_block_t block = get_bio_block(tc, bio);
1206         struct dm_bio_prison_cell *cell;
1207         struct dm_cell_key key;
1208         struct dm_thin_lookup_result lookup_result;
1209
1210         /*
1211          * If cell is already occupied, then the block is already
1212          * being provisioned so we have nothing further to do here.
1213          */
1214         build_virtual_key(tc->td, block, &key);
1215         if (bio_detain(pool, &key, bio, &cell))
1216                 return;
1217
1218         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1219         switch (r) {
1220         case 0:
1221                 if (lookup_result.shared) {
1222                         process_shared_bio(tc, bio, block, &lookup_result);
1223                         cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1224                 } else {
1225                         inc_all_io_entry(pool, bio);
1226                         cell_defer_no_holder(tc, cell);
1227
1228                         remap_and_issue(tc, bio, lookup_result.block);
1229                 }
1230                 break;
1231
1232         case -ENODATA:
1233                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1234                         inc_all_io_entry(pool, bio);
1235                         cell_defer_no_holder(tc, cell);
1236
1237                         remap_to_origin_and_issue(tc, bio);
1238                 } else
1239                         provision_block(tc, bio, block, cell);
1240                 break;
1241
1242         default:
1243                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1244                             __func__, r);
1245                 cell_defer_no_holder(tc, cell);
1246                 bio_io_error(bio);
1247                 break;
1248         }
1249 }
1250
1251 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1252 {
1253         int r;
1254         int rw = bio_data_dir(bio);
1255         dm_block_t block = get_bio_block(tc, bio);
1256         struct dm_thin_lookup_result lookup_result;
1257
1258         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1259         switch (r) {
1260         case 0:
1261                 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1262                         handle_unserviceable_bio(tc->pool, bio);
1263                 else {
1264                         inc_all_io_entry(tc->pool, bio);
1265                         remap_and_issue(tc, bio, lookup_result.block);
1266                 }
1267                 break;
1268
1269         case -ENODATA:
1270                 if (rw != READ) {
1271                         handle_unserviceable_bio(tc->pool, bio);
1272                         break;
1273                 }
1274
1275                 if (tc->origin_dev) {
1276                         inc_all_io_entry(tc->pool, bio);
1277                         remap_to_origin_and_issue(tc, bio);
1278                         break;
1279                 }
1280
1281                 zero_fill_bio(bio);
1282                 bio_endio(bio, 0);
1283                 break;
1284
1285         default:
1286                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1287                             __func__, r);
1288                 bio_io_error(bio);
1289                 break;
1290         }
1291 }
1292
1293 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1294 {
1295         bio_io_error(bio);
1296 }
1297
1298 /*
1299  * FIXME: should we also commit due to size of transaction, measured in
1300  * metadata blocks?
1301  */
1302 static int need_commit_due_to_time(struct pool *pool)
1303 {
1304         return jiffies < pool->last_commit_jiffies ||
1305                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1306 }
1307
1308 static void process_deferred_bios(struct pool *pool)
1309 {
1310         unsigned long flags;
1311         struct bio *bio;
1312         struct bio_list bios;
1313
1314         bio_list_init(&bios);
1315
1316         spin_lock_irqsave(&pool->lock, flags);
1317         bio_list_merge(&bios, &pool->deferred_bios);
1318         bio_list_init(&pool->deferred_bios);
1319         spin_unlock_irqrestore(&pool->lock, flags);
1320
1321         while ((bio = bio_list_pop(&bios))) {
1322                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1323                 struct thin_c *tc = h->tc;
1324
1325                 /*
1326                  * If we've got no free new_mapping structs, and processing
1327                  * this bio might require one, we pause until there are some
1328                  * prepared mappings to process.
1329                  */
1330                 if (ensure_next_mapping(pool)) {
1331                         spin_lock_irqsave(&pool->lock, flags);
1332                         bio_list_merge(&pool->deferred_bios, &bios);
1333                         spin_unlock_irqrestore(&pool->lock, flags);
1334
1335                         break;
1336                 }
1337
1338                 if (bio->bi_rw & REQ_DISCARD)
1339                         pool->process_discard(tc, bio);
1340                 else
1341                         pool->process_bio(tc, bio);
1342         }
1343
1344         /*
1345          * If there are any deferred flush bios, we must commit
1346          * the metadata before issuing them.
1347          */
1348         bio_list_init(&bios);
1349         spin_lock_irqsave(&pool->lock, flags);
1350         bio_list_merge(&bios, &pool->deferred_flush_bios);
1351         bio_list_init(&pool->deferred_flush_bios);
1352         spin_unlock_irqrestore(&pool->lock, flags);
1353
1354         if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1355                 return;
1356
1357         if (commit(pool)) {
1358                 while ((bio = bio_list_pop(&bios)))
1359                         bio_io_error(bio);
1360                 return;
1361         }
1362         pool->last_commit_jiffies = jiffies;
1363
1364         while ((bio = bio_list_pop(&bios)))
1365                 generic_make_request(bio);
1366 }
1367
1368 static void do_worker(struct work_struct *ws)
1369 {
1370         struct pool *pool = container_of(ws, struct pool, worker);
1371
1372         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1373         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1374         process_deferred_bios(pool);
1375 }
1376
1377 /*
1378  * We want to commit periodically so that not too much
1379  * unwritten data builds up.
1380  */
1381 static void do_waker(struct work_struct *ws)
1382 {
1383         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1384         wake_worker(pool);
1385         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1386 }
1387
1388 /*----------------------------------------------------------------*/
1389
1390 static enum pool_mode get_pool_mode(struct pool *pool)
1391 {
1392         return pool->pf.mode;
1393 }
1394
1395 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1396 {
1397         int r;
1398         enum pool_mode old_mode = pool->pf.mode;
1399
1400         switch (new_mode) {
1401         case PM_FAIL:
1402                 if (old_mode != new_mode)
1403                         DMERR("%s: switching pool to failure mode",
1404                               dm_device_name(pool->pool_md));
1405                 dm_pool_metadata_read_only(pool->pmd);
1406                 pool->process_bio = process_bio_fail;
1407                 pool->process_discard = process_bio_fail;
1408                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1409                 pool->process_prepared_discard = process_prepared_discard_fail;
1410                 break;
1411
1412         case PM_READ_ONLY:
1413                 if (old_mode != new_mode)
1414                         DMERR("%s: switching pool to read-only mode",
1415                               dm_device_name(pool->pool_md));
1416                 r = dm_pool_abort_metadata(pool->pmd);
1417                 if (r) {
1418                         DMERR("%s: aborting transaction failed",
1419                               dm_device_name(pool->pool_md));
1420                         new_mode = PM_FAIL;
1421                         set_pool_mode(pool, new_mode);
1422                 } else {
1423                         dm_pool_metadata_read_only(pool->pmd);
1424                         pool->process_bio = process_bio_read_only;
1425                         pool->process_discard = process_discard;
1426                         pool->process_prepared_mapping = process_prepared_mapping_fail;
1427                         pool->process_prepared_discard = process_prepared_discard_passdown;
1428                 }
1429                 break;
1430
1431         case PM_WRITE:
1432                 if (old_mode != new_mode)
1433                         DMINFO("%s: switching pool to write mode",
1434                                dm_device_name(pool->pool_md));
1435                 dm_pool_metadata_read_write(pool->pmd);
1436                 pool->process_bio = process_bio;
1437                 pool->process_discard = process_discard;
1438                 pool->process_prepared_mapping = process_prepared_mapping;
1439                 pool->process_prepared_discard = process_prepared_discard;
1440                 break;
1441         }
1442
1443         pool->pf.mode = new_mode;
1444 }
1445
1446 /*
1447  * Rather than calling set_pool_mode directly, use these which describe the
1448  * reason for mode degradation.
1449  */
1450 static void out_of_data_space(struct pool *pool)
1451 {
1452         DMERR_LIMIT("%s: no free data space available.",
1453                     dm_device_name(pool->pool_md));
1454         set_pool_mode(pool, PM_READ_ONLY);
1455 }
1456
1457 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1458 {
1459         dm_block_t free_blocks;
1460
1461         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1462                     dm_device_name(pool->pool_md), op, r);
1463
1464         if (r == -ENOSPC &&
1465             !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
1466             !free_blocks)
1467                 DMERR_LIMIT("%s: no free metadata space available.",
1468                             dm_device_name(pool->pool_md));
1469
1470         set_pool_mode(pool, PM_READ_ONLY);
1471 }
1472
1473 /*----------------------------------------------------------------*/
1474
1475 /*
1476  * Mapping functions.
1477  */
1478
1479 /*
1480  * Called only while mapping a thin bio to hand it over to the workqueue.
1481  */
1482 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1483 {
1484         unsigned long flags;
1485         struct pool *pool = tc->pool;
1486
1487         spin_lock_irqsave(&pool->lock, flags);
1488         bio_list_add(&pool->deferred_bios, bio);
1489         spin_unlock_irqrestore(&pool->lock, flags);
1490
1491         wake_worker(pool);
1492 }
1493
1494 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1495 {
1496         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1497
1498         h->tc = tc;
1499         h->shared_read_entry = NULL;
1500         h->all_io_entry = NULL;
1501         h->overwrite_mapping = NULL;
1502 }
1503
1504 /*
1505  * Non-blocking function called from the thin target's map function.
1506  */
1507 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1508 {
1509         int r;
1510         struct thin_c *tc = ti->private;
1511         dm_block_t block = get_bio_block(tc, bio);
1512         struct dm_thin_device *td = tc->td;
1513         struct dm_thin_lookup_result result;
1514         struct dm_bio_prison_cell cell1, cell2;
1515         struct dm_bio_prison_cell *cell_result;
1516         struct dm_cell_key key;
1517
1518         thin_hook_bio(tc, bio);
1519
1520         if (get_pool_mode(tc->pool) == PM_FAIL) {
1521                 bio_io_error(bio);
1522                 return DM_MAPIO_SUBMITTED;
1523         }
1524
1525         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1526                 thin_defer_bio(tc, bio);
1527                 return DM_MAPIO_SUBMITTED;
1528         }
1529
1530         r = dm_thin_find_block(td, block, 0, &result);
1531
1532         /*
1533          * Note that we defer readahead too.
1534          */
1535         switch (r) {
1536         case 0:
1537                 if (unlikely(result.shared)) {
1538                         /*
1539                          * We have a race condition here between the
1540                          * result.shared value returned by the lookup and
1541                          * snapshot creation, which may cause new
1542                          * sharing.
1543                          *
1544                          * To avoid this always quiesce the origin before
1545                          * taking the snap.  You want to do this anyway to
1546                          * ensure a consistent application view
1547                          * (i.e. lockfs).
1548                          *
1549                          * More distant ancestors are irrelevant. The
1550                          * shared flag will be set in their case.
1551                          */
1552                         thin_defer_bio(tc, bio);
1553                         return DM_MAPIO_SUBMITTED;
1554                 }
1555
1556                 build_virtual_key(tc->td, block, &key);
1557                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1558                         return DM_MAPIO_SUBMITTED;
1559
1560                 build_data_key(tc->td, result.block, &key);
1561                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1562                         cell_defer_no_holder_no_free(tc, &cell1);
1563                         return DM_MAPIO_SUBMITTED;
1564                 }
1565
1566                 inc_all_io_entry(tc->pool, bio);
1567                 cell_defer_no_holder_no_free(tc, &cell2);
1568                 cell_defer_no_holder_no_free(tc, &cell1);
1569
1570                 remap(tc, bio, result.block);
1571                 return DM_MAPIO_REMAPPED;
1572
1573         case -ENODATA:
1574                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1575                         /*
1576                          * This block isn't provisioned, and we have no way
1577                          * of doing so.
1578                          */
1579                         handle_unserviceable_bio(tc->pool, bio);
1580                         return DM_MAPIO_SUBMITTED;
1581                 }
1582                 /* fall through */
1583
1584         case -EWOULDBLOCK:
1585                 /*
1586                  * In future, the failed dm_thin_find_block above could
1587                  * provide the hint to load the metadata into cache.
1588                  */
1589                 thin_defer_bio(tc, bio);
1590                 return DM_MAPIO_SUBMITTED;
1591
1592         default:
1593                 /*
1594                  * Must always call bio_io_error on failure.
1595                  * dm_thin_find_block can fail with -EINVAL if the
1596                  * pool is switched to fail-io mode.
1597                  */
1598                 bio_io_error(bio);
1599                 return DM_MAPIO_SUBMITTED;
1600         }
1601 }
1602
1603 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1604 {
1605         int r;
1606         unsigned long flags;
1607         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1608
1609         spin_lock_irqsave(&pt->pool->lock, flags);
1610         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1611         spin_unlock_irqrestore(&pt->pool->lock, flags);
1612
1613         if (!r) {
1614                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1615                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1616         }
1617
1618         return r;
1619 }
1620
1621 static void __requeue_bios(struct pool *pool)
1622 {
1623         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1624         bio_list_init(&pool->retry_on_resume_list);
1625 }
1626
1627 /*----------------------------------------------------------------
1628  * Binding of control targets to a pool object
1629  *--------------------------------------------------------------*/
1630 static bool data_dev_supports_discard(struct pool_c *pt)
1631 {
1632         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1633
1634         return q && blk_queue_discard(q);
1635 }
1636
1637 static bool is_factor(sector_t block_size, uint32_t n)
1638 {
1639         return !sector_div(block_size, n);
1640 }
1641
1642 /*
1643  * If discard_passdown was enabled verify that the data device
1644  * supports discards.  Disable discard_passdown if not.
1645  */
1646 static void disable_passdown_if_not_supported(struct pool_c *pt)
1647 {
1648         struct pool *pool = pt->pool;
1649         struct block_device *data_bdev = pt->data_dev->bdev;
1650         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1651         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1652         const char *reason = NULL;
1653         char buf[BDEVNAME_SIZE];
1654
1655         if (!pt->adjusted_pf.discard_passdown)
1656                 return;
1657
1658         if (!data_dev_supports_discard(pt))
1659                 reason = "discard unsupported";
1660
1661         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1662                 reason = "max discard sectors smaller than a block";
1663
1664         else if (data_limits->discard_granularity > block_size)
1665                 reason = "discard granularity larger than a block";
1666
1667         else if (!is_factor(block_size, data_limits->discard_granularity))
1668                 reason = "discard granularity not a factor of block size";
1669
1670         if (reason) {
1671                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1672                 pt->adjusted_pf.discard_passdown = false;
1673         }
1674 }
1675
1676 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1677 {
1678         struct pool_c *pt = ti->private;
1679
1680         /*
1681          * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1682          */
1683         enum pool_mode old_mode = pool->pf.mode;
1684         enum pool_mode new_mode = pt->adjusted_pf.mode;
1685
1686         /*
1687          * Don't change the pool's mode until set_pool_mode() below.
1688          * Otherwise the pool's process_* function pointers may
1689          * not match the desired pool mode.
1690          */
1691         pt->adjusted_pf.mode = old_mode;
1692
1693         pool->ti = ti;
1694         pool->pf = pt->adjusted_pf;
1695         pool->low_water_blocks = pt->low_water_blocks;
1696
1697         /*
1698          * If we were in PM_FAIL mode, rollback of metadata failed.  We're
1699          * not going to recover without a thin_repair.  So we never let the
1700          * pool move out of the old mode.  On the other hand a PM_READ_ONLY
1701          * may have been due to a lack of metadata or data space, and may
1702          * now work (ie. if the underlying devices have been resized).
1703          */
1704         if (old_mode == PM_FAIL)
1705                 new_mode = old_mode;
1706
1707         set_pool_mode(pool, new_mode);
1708
1709         return 0;
1710 }
1711
1712 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1713 {
1714         if (pool->ti == ti)
1715                 pool->ti = NULL;
1716 }
1717
1718 /*----------------------------------------------------------------
1719  * Pool creation
1720  *--------------------------------------------------------------*/
1721 /* Initialize pool features. */
1722 static void pool_features_init(struct pool_features *pf)
1723 {
1724         pf->mode = PM_WRITE;
1725         pf->zero_new_blocks = true;
1726         pf->discard_enabled = true;
1727         pf->discard_passdown = true;
1728         pf->error_if_no_space = false;
1729 }
1730
1731 static void __pool_destroy(struct pool *pool)
1732 {
1733         __pool_table_remove(pool);
1734
1735         if (dm_pool_metadata_close(pool->pmd) < 0)
1736                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1737
1738         dm_bio_prison_destroy(pool->prison);
1739         dm_kcopyd_client_destroy(pool->copier);
1740
1741         if (pool->wq)
1742                 destroy_workqueue(pool->wq);
1743
1744         if (pool->next_mapping)
1745                 mempool_free(pool->next_mapping, pool->mapping_pool);
1746         mempool_destroy(pool->mapping_pool);
1747         dm_deferred_set_destroy(pool->shared_read_ds);
1748         dm_deferred_set_destroy(pool->all_io_ds);
1749         kfree(pool);
1750 }
1751
1752 static struct kmem_cache *_new_mapping_cache;
1753
1754 static struct pool *pool_create(struct mapped_device *pool_md,
1755                                 struct block_device *metadata_dev,
1756                                 unsigned long block_size,
1757                                 int read_only, char **error)
1758 {
1759         int r;
1760         void *err_p;
1761         struct pool *pool;
1762         struct dm_pool_metadata *pmd;
1763         bool format_device = read_only ? false : true;
1764
1765         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1766         if (IS_ERR(pmd)) {
1767                 *error = "Error creating metadata object";
1768                 return (struct pool *)pmd;
1769         }
1770
1771         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1772         if (!pool) {
1773                 *error = "Error allocating memory for pool";
1774                 err_p = ERR_PTR(-ENOMEM);
1775                 goto bad_pool;
1776         }
1777
1778         pool->pmd = pmd;
1779         pool->sectors_per_block = block_size;
1780         if (block_size & (block_size - 1))
1781                 pool->sectors_per_block_shift = -1;
1782         else
1783                 pool->sectors_per_block_shift = __ffs(block_size);
1784         pool->low_water_blocks = 0;
1785         pool_features_init(&pool->pf);
1786         pool->prison = dm_bio_prison_create(PRISON_CELLS);
1787         if (!pool->prison) {
1788                 *error = "Error creating pool's bio prison";
1789                 err_p = ERR_PTR(-ENOMEM);
1790                 goto bad_prison;
1791         }
1792
1793         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1794         if (IS_ERR(pool->copier)) {
1795                 r = PTR_ERR(pool->copier);
1796                 *error = "Error creating pool's kcopyd client";
1797                 err_p = ERR_PTR(r);
1798                 goto bad_kcopyd_client;
1799         }
1800
1801         /*
1802          * Create singlethreaded workqueue that will service all devices
1803          * that use this metadata.
1804          */
1805         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1806         if (!pool->wq) {
1807                 *error = "Error creating pool's workqueue";
1808                 err_p = ERR_PTR(-ENOMEM);
1809                 goto bad_wq;
1810         }
1811
1812         INIT_WORK(&pool->worker, do_worker);
1813         INIT_DELAYED_WORK(&pool->waker, do_waker);
1814         spin_lock_init(&pool->lock);
1815         bio_list_init(&pool->deferred_bios);
1816         bio_list_init(&pool->deferred_flush_bios);
1817         INIT_LIST_HEAD(&pool->prepared_mappings);
1818         INIT_LIST_HEAD(&pool->prepared_discards);
1819         pool->low_water_triggered = false;
1820         bio_list_init(&pool->retry_on_resume_list);
1821
1822         pool->shared_read_ds = dm_deferred_set_create();
1823         if (!pool->shared_read_ds) {
1824                 *error = "Error creating pool's shared read deferred set";
1825                 err_p = ERR_PTR(-ENOMEM);
1826                 goto bad_shared_read_ds;
1827         }
1828
1829         pool->all_io_ds = dm_deferred_set_create();
1830         if (!pool->all_io_ds) {
1831                 *error = "Error creating pool's all io deferred set";
1832                 err_p = ERR_PTR(-ENOMEM);
1833                 goto bad_all_io_ds;
1834         }
1835
1836         pool->next_mapping = NULL;
1837         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1838                                                       _new_mapping_cache);
1839         if (!pool->mapping_pool) {
1840                 *error = "Error creating pool's mapping mempool";
1841                 err_p = ERR_PTR(-ENOMEM);
1842                 goto bad_mapping_pool;
1843         }
1844
1845         pool->ref_count = 1;
1846         pool->last_commit_jiffies = jiffies;
1847         pool->pool_md = pool_md;
1848         pool->md_dev = metadata_dev;
1849         __pool_table_insert(pool);
1850
1851         return pool;
1852
1853 bad_mapping_pool:
1854         dm_deferred_set_destroy(pool->all_io_ds);
1855 bad_all_io_ds:
1856         dm_deferred_set_destroy(pool->shared_read_ds);
1857 bad_shared_read_ds:
1858         destroy_workqueue(pool->wq);
1859 bad_wq:
1860         dm_kcopyd_client_destroy(pool->copier);
1861 bad_kcopyd_client:
1862         dm_bio_prison_destroy(pool->prison);
1863 bad_prison:
1864         kfree(pool);
1865 bad_pool:
1866         if (dm_pool_metadata_close(pmd))
1867                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1868
1869         return err_p;
1870 }
1871
1872 static void __pool_inc(struct pool *pool)
1873 {
1874         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1875         pool->ref_count++;
1876 }
1877
1878 static void __pool_dec(struct pool *pool)
1879 {
1880         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1881         BUG_ON(!pool->ref_count);
1882         if (!--pool->ref_count)
1883                 __pool_destroy(pool);
1884 }
1885
1886 static struct pool *__pool_find(struct mapped_device *pool_md,
1887                                 struct block_device *metadata_dev,
1888                                 unsigned long block_size, int read_only,
1889                                 char **error, int *created)
1890 {
1891         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1892
1893         if (pool) {
1894                 if (pool->pool_md != pool_md) {
1895                         *error = "metadata device already in use by a pool";
1896                         return ERR_PTR(-EBUSY);
1897                 }
1898                 __pool_inc(pool);
1899
1900         } else {
1901                 pool = __pool_table_lookup(pool_md);
1902                 if (pool) {
1903                         if (pool->md_dev != metadata_dev) {
1904                                 *error = "different pool cannot replace a pool";
1905                                 return ERR_PTR(-EINVAL);
1906                         }
1907                         __pool_inc(pool);
1908
1909                 } else {
1910                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1911                         *created = 1;
1912                 }
1913         }
1914
1915         return pool;
1916 }
1917
1918 /*----------------------------------------------------------------
1919  * Pool target methods
1920  *--------------------------------------------------------------*/
1921 static void pool_dtr(struct dm_target *ti)
1922 {
1923         struct pool_c *pt = ti->private;
1924
1925         mutex_lock(&dm_thin_pool_table.mutex);
1926
1927         unbind_control_target(pt->pool, ti);
1928         __pool_dec(pt->pool);
1929         dm_put_device(ti, pt->metadata_dev);
1930         dm_put_device(ti, pt->data_dev);
1931         kfree(pt);
1932
1933         mutex_unlock(&dm_thin_pool_table.mutex);
1934 }
1935
1936 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1937                                struct dm_target *ti)
1938 {
1939         int r;
1940         unsigned argc;
1941         const char *arg_name;
1942
1943         static struct dm_arg _args[] = {
1944                 {0, 4, "Invalid number of pool feature arguments"},
1945         };
1946
1947         /*
1948          * No feature arguments supplied.
1949          */
1950         if (!as->argc)
1951                 return 0;
1952
1953         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1954         if (r)
1955                 return -EINVAL;
1956
1957         while (argc && !r) {
1958                 arg_name = dm_shift_arg(as);
1959                 argc--;
1960
1961                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1962                         pf->zero_new_blocks = false;
1963
1964                 else if (!strcasecmp(arg_name, "ignore_discard"))
1965                         pf->discard_enabled = false;
1966
1967                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1968                         pf->discard_passdown = false;
1969
1970                 else if (!strcasecmp(arg_name, "read_only"))
1971                         pf->mode = PM_READ_ONLY;
1972
1973                 else if (!strcasecmp(arg_name, "error_if_no_space"))
1974                         pf->error_if_no_space = true;
1975
1976                 else {
1977                         ti->error = "Unrecognised pool feature requested";
1978                         r = -EINVAL;
1979                         break;
1980                 }
1981         }
1982
1983         return r;
1984 }
1985
1986 static void metadata_low_callback(void *context)
1987 {
1988         struct pool *pool = context;
1989
1990         DMWARN("%s: reached low water mark for metadata device: sending event.",
1991                dm_device_name(pool->pool_md));
1992
1993         dm_table_event(pool->ti->table);
1994 }
1995
1996 static sector_t get_metadata_dev_size(struct block_device *bdev)
1997 {
1998         sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
1999         char buffer[BDEVNAME_SIZE];
2000
2001         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
2002                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2003                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2004                 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
2005         }
2006
2007         return metadata_dev_size;
2008 }
2009
2010 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2011 {
2012         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2013
2014         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
2015
2016         return metadata_dev_size;
2017 }
2018
2019 /*
2020  * When a metadata threshold is crossed a dm event is triggered, and
2021  * userland should respond by growing the metadata device.  We could let
2022  * userland set the threshold, like we do with the data threshold, but I'm
2023  * not sure they know enough to do this well.
2024  */
2025 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2026 {
2027         /*
2028          * 4M is ample for all ops with the possible exception of thin
2029          * device deletion which is harmless if it fails (just retry the
2030          * delete after you've grown the device).
2031          */
2032         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2033         return min((dm_block_t)1024ULL /* 4M */, quarter);
2034 }
2035
2036 /*
2037  * thin-pool <metadata dev> <data dev>
2038  *           <data block size (sectors)>
2039  *           <low water mark (blocks)>
2040  *           [<#feature args> [<arg>]*]
2041  *
2042  * Optional feature arguments are:
2043  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2044  *           ignore_discard: disable discard
2045  *           no_discard_passdown: don't pass discards down to the data device
2046  *           read_only: Don't allow any changes to be made to the pool metadata.
2047  *           error_if_no_space: error IOs, instead of queueing, if no space.
2048  */
2049 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2050 {
2051         int r, pool_created = 0;
2052         struct pool_c *pt;
2053         struct pool *pool;
2054         struct pool_features pf;
2055         struct dm_arg_set as;
2056         struct dm_dev *data_dev;
2057         unsigned long block_size;
2058         dm_block_t low_water_blocks;
2059         struct dm_dev *metadata_dev;
2060         fmode_t metadata_mode;
2061
2062         /*
2063          * FIXME Remove validation from scope of lock.
2064          */
2065         mutex_lock(&dm_thin_pool_table.mutex);
2066
2067         if (argc < 4) {
2068                 ti->error = "Invalid argument count";
2069                 r = -EINVAL;
2070                 goto out_unlock;
2071         }
2072
2073         as.argc = argc;
2074         as.argv = argv;
2075
2076         /*
2077          * Set default pool features.
2078          */
2079         pool_features_init(&pf);
2080
2081         dm_consume_args(&as, 4);
2082         r = parse_pool_features(&as, &pf, ti);
2083         if (r)
2084                 goto out_unlock;
2085
2086         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2087         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2088         if (r) {
2089                 ti->error = "Error opening metadata block device";
2090                 goto out_unlock;
2091         }
2092
2093         /*
2094          * Run for the side-effect of possibly issuing a warning if the
2095          * device is too big.
2096          */
2097         (void) get_metadata_dev_size(metadata_dev->bdev);
2098
2099         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2100         if (r) {
2101                 ti->error = "Error getting data device";
2102                 goto out_metadata;
2103         }
2104
2105         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2106             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2107             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2108             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2109                 ti->error = "Invalid block size";
2110                 r = -EINVAL;
2111                 goto out;
2112         }
2113
2114         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2115                 ti->error = "Invalid low water mark";
2116                 r = -EINVAL;
2117                 goto out;
2118         }
2119
2120         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2121         if (!pt) {
2122                 r = -ENOMEM;
2123                 goto out;
2124         }
2125
2126         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2127                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2128         if (IS_ERR(pool)) {
2129                 r = PTR_ERR(pool);
2130                 goto out_free_pt;
2131         }
2132
2133         /*
2134          * 'pool_created' reflects whether this is the first table load.
2135          * Top level discard support is not allowed to be changed after
2136          * initial load.  This would require a pool reload to trigger thin
2137          * device changes.
2138          */
2139         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2140                 ti->error = "Discard support cannot be disabled once enabled";
2141                 r = -EINVAL;
2142                 goto out_flags_changed;
2143         }
2144
2145         pt->pool = pool;
2146         pt->ti = ti;
2147         pt->metadata_dev = metadata_dev;
2148         pt->data_dev = data_dev;
2149         pt->low_water_blocks = low_water_blocks;
2150         pt->adjusted_pf = pt->requested_pf = pf;
2151         ti->num_flush_bios = 1;
2152
2153         /*
2154          * Only need to enable discards if the pool should pass
2155          * them down to the data device.  The thin device's discard
2156          * processing will cause mappings to be removed from the btree.
2157          */
2158         ti->discard_zeroes_data_unsupported = true;
2159         if (pf.discard_enabled && pf.discard_passdown) {
2160                 ti->num_discard_bios = 1;
2161
2162                 /*
2163                  * Setting 'discards_supported' circumvents the normal
2164                  * stacking of discard limits (this keeps the pool and
2165                  * thin devices' discard limits consistent).
2166                  */
2167                 ti->discards_supported = true;
2168         }
2169         ti->private = pt;
2170
2171         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2172                                                 calc_metadata_threshold(pt),
2173                                                 metadata_low_callback,
2174                                                 pool);
2175         if (r)
2176                 goto out_free_pt;
2177
2178         pt->callbacks.congested_fn = pool_is_congested;
2179         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2180
2181         mutex_unlock(&dm_thin_pool_table.mutex);
2182
2183         return 0;
2184
2185 out_flags_changed:
2186         __pool_dec(pool);
2187 out_free_pt:
2188         kfree(pt);
2189 out:
2190         dm_put_device(ti, data_dev);
2191 out_metadata:
2192         dm_put_device(ti, metadata_dev);
2193 out_unlock:
2194         mutex_unlock(&dm_thin_pool_table.mutex);
2195
2196         return r;
2197 }
2198
2199 static int pool_map(struct dm_target *ti, struct bio *bio)
2200 {
2201         int r;
2202         struct pool_c *pt = ti->private;
2203         struct pool *pool = pt->pool;
2204         unsigned long flags;
2205
2206         /*
2207          * As this is a singleton target, ti->begin is always zero.
2208          */
2209         spin_lock_irqsave(&pool->lock, flags);
2210         bio->bi_bdev = pt->data_dev->bdev;
2211         r = DM_MAPIO_REMAPPED;
2212         spin_unlock_irqrestore(&pool->lock, flags);
2213
2214         return r;
2215 }
2216
2217 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2218 {
2219         int r;
2220         struct pool_c *pt = ti->private;
2221         struct pool *pool = pt->pool;
2222         sector_t data_size = ti->len;
2223         dm_block_t sb_data_size;
2224
2225         *need_commit = false;
2226
2227         (void) sector_div(data_size, pool->sectors_per_block);
2228
2229         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2230         if (r) {
2231                 DMERR("%s: failed to retrieve data device size",
2232                       dm_device_name(pool->pool_md));
2233                 return r;
2234         }
2235
2236         if (data_size < sb_data_size) {
2237                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2238                       dm_device_name(pool->pool_md),
2239                       (unsigned long long)data_size, sb_data_size);
2240                 return -EINVAL;
2241
2242         } else if (data_size > sb_data_size) {
2243                 if (sb_data_size)
2244                         DMINFO("%s: growing the data device from %llu to %llu blocks",
2245                                dm_device_name(pool->pool_md),
2246                                sb_data_size, (unsigned long long)data_size);
2247                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2248                 if (r) {
2249                         metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
2250                         return r;
2251                 }
2252
2253                 *need_commit = true;
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2260 {
2261         int r;
2262         struct pool_c *pt = ti->private;
2263         struct pool *pool = pt->pool;
2264         dm_block_t metadata_dev_size, sb_metadata_dev_size;
2265
2266         *need_commit = false;
2267
2268         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2269
2270         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2271         if (r) {
2272                 DMERR("%s: failed to retrieve metadata device size",
2273                       dm_device_name(pool->pool_md));
2274                 return r;
2275         }
2276
2277         if (metadata_dev_size < sb_metadata_dev_size) {
2278                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2279                       dm_device_name(pool->pool_md),
2280                       metadata_dev_size, sb_metadata_dev_size);
2281                 return -EINVAL;
2282
2283         } else if (metadata_dev_size > sb_metadata_dev_size) {
2284                 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2285                        dm_device_name(pool->pool_md),
2286                        sb_metadata_dev_size, metadata_dev_size);
2287                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2288                 if (r) {
2289                         metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
2290                         return r;
2291                 }
2292
2293                 *need_commit = true;
2294         }
2295
2296         return 0;
2297 }
2298
2299 /*
2300  * Retrieves the number of blocks of the data device from
2301  * the superblock and compares it to the actual device size,
2302  * thus resizing the data device in case it has grown.
2303  *
2304  * This both copes with opening preallocated data devices in the ctr
2305  * being followed by a resume
2306  * -and-
2307  * calling the resume method individually after userspace has
2308  * grown the data device in reaction to a table event.
2309  */
2310 static int pool_preresume(struct dm_target *ti)
2311 {
2312         int r;
2313         bool need_commit1, need_commit2;
2314         struct pool_c *pt = ti->private;
2315         struct pool *pool = pt->pool;
2316
2317         /*
2318          * Take control of the pool object.
2319          */
2320         r = bind_control_target(pool, ti);
2321         if (r)
2322                 return r;
2323
2324         r = maybe_resize_data_dev(ti, &need_commit1);
2325         if (r)
2326                 return r;
2327
2328         r = maybe_resize_metadata_dev(ti, &need_commit2);
2329         if (r)
2330                 return r;
2331
2332         if (need_commit1 || need_commit2)
2333                 (void) commit(pool);
2334
2335         return 0;
2336 }
2337
2338 static void pool_resume(struct dm_target *ti)
2339 {
2340         struct pool_c *pt = ti->private;
2341         struct pool *pool = pt->pool;
2342         unsigned long flags;
2343
2344         spin_lock_irqsave(&pool->lock, flags);
2345         pool->low_water_triggered = false;
2346         __requeue_bios(pool);
2347         spin_unlock_irqrestore(&pool->lock, flags);
2348
2349         do_waker(&pool->waker.work);
2350 }
2351
2352 static void pool_postsuspend(struct dm_target *ti)
2353 {
2354         struct pool_c *pt = ti->private;
2355         struct pool *pool = pt->pool;
2356
2357         cancel_delayed_work(&pool->waker);
2358         flush_workqueue(pool->wq);
2359         (void) commit(pool);
2360 }
2361
2362 static int check_arg_count(unsigned argc, unsigned args_required)
2363 {
2364         if (argc != args_required) {
2365                 DMWARN("Message received with %u arguments instead of %u.",
2366                        argc, args_required);
2367                 return -EINVAL;
2368         }
2369
2370         return 0;
2371 }
2372
2373 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2374 {
2375         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2376             *dev_id <= MAX_DEV_ID)
2377                 return 0;
2378
2379         if (warning)
2380                 DMWARN("Message received with invalid device id: %s", arg);
2381
2382         return -EINVAL;
2383 }
2384
2385 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2386 {
2387         dm_thin_id dev_id;
2388         int r;
2389
2390         r = check_arg_count(argc, 2);
2391         if (r)
2392                 return r;
2393
2394         r = read_dev_id(argv[1], &dev_id, 1);
2395         if (r)
2396                 return r;
2397
2398         r = dm_pool_create_thin(pool->pmd, dev_id);
2399         if (r) {
2400                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2401                        argv[1]);
2402                 return r;
2403         }
2404
2405         return 0;
2406 }
2407
2408 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2409 {
2410         dm_thin_id dev_id;
2411         dm_thin_id origin_dev_id;
2412         int r;
2413
2414         r = check_arg_count(argc, 3);
2415         if (r)
2416                 return r;
2417
2418         r = read_dev_id(argv[1], &dev_id, 1);
2419         if (r)
2420                 return r;
2421
2422         r = read_dev_id(argv[2], &origin_dev_id, 1);
2423         if (r)
2424                 return r;
2425
2426         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2427         if (r) {
2428                 DMWARN("Creation of new snapshot %s of device %s failed.",
2429                        argv[1], argv[2]);
2430                 return r;
2431         }
2432
2433         return 0;
2434 }
2435
2436 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2437 {
2438         dm_thin_id dev_id;
2439         int r;
2440
2441         r = check_arg_count(argc, 2);
2442         if (r)
2443                 return r;
2444
2445         r = read_dev_id(argv[1], &dev_id, 1);
2446         if (r)
2447                 return r;
2448
2449         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2450         if (r)
2451                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2452
2453         return r;
2454 }
2455
2456 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2457 {
2458         dm_thin_id old_id, new_id;
2459         int r;
2460
2461         r = check_arg_count(argc, 3);
2462         if (r)
2463                 return r;
2464
2465         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2466                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2467                 return -EINVAL;
2468         }
2469
2470         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2471                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2472                 return -EINVAL;
2473         }
2474
2475         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2476         if (r) {
2477                 DMWARN("Failed to change transaction id from %s to %s.",
2478                        argv[1], argv[2]);
2479                 return r;
2480         }
2481
2482         return 0;
2483 }
2484
2485 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2486 {
2487         int r;
2488
2489         r = check_arg_count(argc, 1);
2490         if (r)
2491                 return r;
2492
2493         (void) commit(pool);
2494
2495         r = dm_pool_reserve_metadata_snap(pool->pmd);
2496         if (r)
2497                 DMWARN("reserve_metadata_snap message failed.");
2498
2499         return r;
2500 }
2501
2502 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2503 {
2504         int r;
2505
2506         r = check_arg_count(argc, 1);
2507         if (r)
2508                 return r;
2509
2510         r = dm_pool_release_metadata_snap(pool->pmd);
2511         if (r)
2512                 DMWARN("release_metadata_snap message failed.");
2513
2514         return r;
2515 }
2516
2517 /*
2518  * Messages supported:
2519  *   create_thin        <dev_id>
2520  *   create_snap        <dev_id> <origin_id>
2521  *   delete             <dev_id>
2522  *   trim               <dev_id> <new_size_in_sectors>
2523  *   set_transaction_id <current_trans_id> <new_trans_id>
2524  *   reserve_metadata_snap
2525  *   release_metadata_snap
2526  */
2527 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2528 {
2529         int r = -EINVAL;
2530         struct pool_c *pt = ti->private;
2531         struct pool *pool = pt->pool;
2532
2533         if (!strcasecmp(argv[0], "create_thin"))
2534                 r = process_create_thin_mesg(argc, argv, pool);
2535
2536         else if (!strcasecmp(argv[0], "create_snap"))
2537                 r = process_create_snap_mesg(argc, argv, pool);
2538
2539         else if (!strcasecmp(argv[0], "delete"))
2540                 r = process_delete_mesg(argc, argv, pool);
2541
2542         else if (!strcasecmp(argv[0], "set_transaction_id"))
2543                 r = process_set_transaction_id_mesg(argc, argv, pool);
2544
2545         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2546                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2547
2548         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2549                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2550
2551         else
2552                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2553
2554         if (!r)
2555                 (void) commit(pool);
2556
2557         return r;
2558 }
2559
2560 static void emit_flags(struct pool_features *pf, char *result,
2561                        unsigned sz, unsigned maxlen)
2562 {
2563         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2564                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2565                 pf->error_if_no_space;
2566         DMEMIT("%u ", count);
2567
2568         if (!pf->zero_new_blocks)
2569                 DMEMIT("skip_block_zeroing ");
2570
2571         if (!pf->discard_enabled)
2572                 DMEMIT("ignore_discard ");
2573
2574         if (!pf->discard_passdown)
2575                 DMEMIT("no_discard_passdown ");
2576
2577         if (pf->mode == PM_READ_ONLY)
2578                 DMEMIT("read_only ");
2579
2580         if (pf->error_if_no_space)
2581                 DMEMIT("error_if_no_space ");
2582 }
2583
2584 /*
2585  * Status line is:
2586  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2587  *    <used data sectors>/<total data sectors> <held metadata root>
2588  */
2589 static void pool_status(struct dm_target *ti, status_type_t type,
2590                         unsigned status_flags, char *result, unsigned maxlen)
2591 {
2592         int r;
2593         unsigned sz = 0;
2594         uint64_t transaction_id;
2595         dm_block_t nr_free_blocks_data;
2596         dm_block_t nr_free_blocks_metadata;
2597         dm_block_t nr_blocks_data;
2598         dm_block_t nr_blocks_metadata;
2599         dm_block_t held_root;
2600         char buf[BDEVNAME_SIZE];
2601         char buf2[BDEVNAME_SIZE];
2602         struct pool_c *pt = ti->private;
2603         struct pool *pool = pt->pool;
2604
2605         switch (type) {
2606         case STATUSTYPE_INFO:
2607                 if (get_pool_mode(pool) == PM_FAIL) {
2608                         DMEMIT("Fail");
2609                         break;
2610                 }
2611
2612                 /* Commit to ensure statistics aren't out-of-date */
2613                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2614                         (void) commit(pool);
2615
2616                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2617                 if (r) {
2618                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2619                               dm_device_name(pool->pool_md), r);
2620                         goto err;
2621                 }
2622
2623                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2624                 if (r) {
2625                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2626                               dm_device_name(pool->pool_md), r);
2627                         goto err;
2628                 }
2629
2630                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2631                 if (r) {
2632                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2633                               dm_device_name(pool->pool_md), r);
2634                         goto err;
2635                 }
2636
2637                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2638                 if (r) {
2639                         DMERR("%s: dm_pool_get_free_block_count returned %d",
2640                               dm_device_name(pool->pool_md), r);
2641                         goto err;
2642                 }
2643
2644                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2645                 if (r) {
2646                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
2647                               dm_device_name(pool->pool_md), r);
2648                         goto err;
2649                 }
2650
2651                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2652                 if (r) {
2653                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
2654                               dm_device_name(pool->pool_md), r);
2655                         goto err;
2656                 }
2657
2658                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2659                        (unsigned long long)transaction_id,
2660                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2661                        (unsigned long long)nr_blocks_metadata,
2662                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2663                        (unsigned long long)nr_blocks_data);
2664
2665                 if (held_root)
2666                         DMEMIT("%llu ", held_root);
2667                 else
2668                         DMEMIT("- ");
2669
2670                 if (pool->pf.mode == PM_READ_ONLY)
2671                         DMEMIT("ro ");
2672                 else
2673                         DMEMIT("rw ");
2674
2675                 if (!pool->pf.discard_enabled)
2676                         DMEMIT("ignore_discard ");
2677                 else if (pool->pf.discard_passdown)
2678                         DMEMIT("discard_passdown ");
2679                 else
2680                         DMEMIT("no_discard_passdown ");
2681
2682                 if (pool->pf.error_if_no_space)
2683                         DMEMIT("error_if_no_space ");
2684                 else
2685                         DMEMIT("queue_if_no_space ");
2686
2687                 break;
2688
2689         case STATUSTYPE_TABLE:
2690                 DMEMIT("%s %s %lu %llu ",
2691                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2692                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2693                        (unsigned long)pool->sectors_per_block,
2694                        (unsigned long long)pt->low_water_blocks);
2695                 emit_flags(&pt->requested_pf, result, sz, maxlen);
2696                 break;
2697         }
2698         return;
2699
2700 err:
2701         DMEMIT("Error");
2702 }
2703
2704 static int pool_iterate_devices(struct dm_target *ti,
2705                                 iterate_devices_callout_fn fn, void *data)
2706 {
2707         struct pool_c *pt = ti->private;
2708
2709         return fn(ti, pt->data_dev, 0, ti->len, data);
2710 }
2711
2712 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2713                       struct bio_vec *biovec, int max_size)
2714 {
2715         struct pool_c *pt = ti->private;
2716         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2717
2718         if (!q->merge_bvec_fn)
2719                 return max_size;
2720
2721         bvm->bi_bdev = pt->data_dev->bdev;
2722
2723         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2724 }
2725
2726 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2727 {
2728         struct pool *pool = pt->pool;
2729         struct queue_limits *data_limits;
2730
2731         limits->max_discard_sectors = pool->sectors_per_block;
2732
2733         /*
2734          * discard_granularity is just a hint, and not enforced.
2735          */
2736         if (pt->adjusted_pf.discard_passdown) {
2737                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2738                 limits->discard_granularity = data_limits->discard_granularity;
2739         } else
2740                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2741 }
2742
2743 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2744 {
2745         struct pool_c *pt = ti->private;
2746         struct pool *pool = pt->pool;
2747         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2748
2749         /*
2750          * If the system-determined stacked limits are compatible with the
2751          * pool's blocksize (io_opt is a factor) do not override them.
2752          */
2753         if (io_opt_sectors < pool->sectors_per_block ||
2754             do_div(io_opt_sectors, pool->sectors_per_block)) {
2755                 blk_limits_io_min(limits, 0);
2756                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2757         }
2758
2759         /*
2760          * pt->adjusted_pf is a staging area for the actual features to use.
2761          * They get transferred to the live pool in bind_control_target()
2762          * called from pool_preresume().
2763          */
2764         if (!pt->adjusted_pf.discard_enabled) {
2765                 /*
2766                  * Must explicitly disallow stacking discard limits otherwise the
2767                  * block layer will stack them if pool's data device has support.
2768                  * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2769                  * user to see that, so make sure to set all discard limits to 0.
2770                  */
2771                 limits->discard_granularity = 0;
2772                 return;
2773         }
2774
2775         disable_passdown_if_not_supported(pt);
2776
2777         set_discard_limits(pt, limits);
2778 }
2779
2780 static struct target_type pool_target = {
2781         .name = "thin-pool",
2782         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2783                     DM_TARGET_IMMUTABLE,
2784         .version = {1, 10, 0},
2785         .module = THIS_MODULE,
2786         .ctr = pool_ctr,
2787         .dtr = pool_dtr,
2788         .map = pool_map,
2789         .postsuspend = pool_postsuspend,
2790         .preresume = pool_preresume,
2791         .resume = pool_resume,
2792         .message = pool_message,
2793         .status = pool_status,
2794         .merge = pool_merge,
2795         .iterate_devices = pool_iterate_devices,
2796         .io_hints = pool_io_hints,
2797 };
2798
2799 /*----------------------------------------------------------------
2800  * Thin target methods
2801  *--------------------------------------------------------------*/
2802 static void thin_dtr(struct dm_target *ti)
2803 {
2804         struct thin_c *tc = ti->private;
2805
2806         mutex_lock(&dm_thin_pool_table.mutex);
2807
2808         __pool_dec(tc->pool);
2809         dm_pool_close_thin_device(tc->td);
2810         dm_put_device(ti, tc->pool_dev);
2811         if (tc->origin_dev)
2812                 dm_put_device(ti, tc->origin_dev);
2813         kfree(tc);
2814
2815         mutex_unlock(&dm_thin_pool_table.mutex);
2816 }
2817
2818 /*
2819  * Thin target parameters:
2820  *
2821  * <pool_dev> <dev_id> [origin_dev]
2822  *
2823  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2824  * dev_id: the internal device identifier
2825  * origin_dev: a device external to the pool that should act as the origin
2826  *
2827  * If the pool device has discards disabled, they get disabled for the thin
2828  * device as well.
2829  */
2830 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2831 {
2832         int r;
2833         struct thin_c *tc;
2834         struct dm_dev *pool_dev, *origin_dev;
2835         struct mapped_device *pool_md;
2836
2837         mutex_lock(&dm_thin_pool_table.mutex);
2838
2839         if (argc != 2 && argc != 3) {
2840                 ti->error = "Invalid argument count";
2841                 r = -EINVAL;
2842                 goto out_unlock;
2843         }
2844
2845         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2846         if (!tc) {
2847                 ti->error = "Out of memory";
2848                 r = -ENOMEM;
2849                 goto out_unlock;
2850         }
2851
2852         if (argc == 3) {
2853                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2854                 if (r) {
2855                         ti->error = "Error opening origin device";
2856                         goto bad_origin_dev;
2857                 }
2858                 tc->origin_dev = origin_dev;
2859         }
2860
2861         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2862         if (r) {
2863                 ti->error = "Error opening pool device";
2864                 goto bad_pool_dev;
2865         }
2866         tc->pool_dev = pool_dev;
2867
2868         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2869                 ti->error = "Invalid device id";
2870                 r = -EINVAL;
2871                 goto bad_common;
2872         }
2873
2874         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2875         if (!pool_md) {
2876                 ti->error = "Couldn't get pool mapped device";
2877                 r = -EINVAL;
2878                 goto bad_common;
2879         }
2880
2881         tc->pool = __pool_table_lookup(pool_md);
2882         if (!tc->pool) {
2883                 ti->error = "Couldn't find pool object";
2884                 r = -EINVAL;
2885                 goto bad_pool_lookup;
2886         }
2887         __pool_inc(tc->pool);
2888
2889         if (get_pool_mode(tc->pool) == PM_FAIL) {
2890                 ti->error = "Couldn't open thin device, Pool is in fail mode";
2891                 goto bad_thin_open;
2892         }
2893
2894         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2895         if (r) {
2896                 ti->error = "Couldn't open thin internal device";
2897                 goto bad_thin_open;
2898         }
2899
2900         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2901         if (r)
2902                 goto bad_thin_open;
2903
2904         ti->num_flush_bios = 1;
2905         ti->flush_supported = true;
2906         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2907
2908         /* In case the pool supports discards, pass them on. */
2909         ti->discard_zeroes_data_unsupported = true;
2910         if (tc->pool->pf.discard_enabled) {
2911                 ti->discards_supported = true;
2912                 ti->num_discard_bios = 1;
2913                 /* Discard bios must be split on a block boundary */
2914                 ti->split_discard_bios = true;
2915         }
2916
2917         dm_put(pool_md);
2918
2919         mutex_unlock(&dm_thin_pool_table.mutex);
2920
2921         return 0;
2922
2923 bad_thin_open:
2924         __pool_dec(tc->pool);
2925 bad_pool_lookup:
2926         dm_put(pool_md);
2927 bad_common:
2928         dm_put_device(ti, tc->pool_dev);
2929 bad_pool_dev:
2930         if (tc->origin_dev)
2931                 dm_put_device(ti, tc->origin_dev);
2932 bad_origin_dev:
2933         kfree(tc);
2934 out_unlock:
2935         mutex_unlock(&dm_thin_pool_table.mutex);
2936
2937         return r;
2938 }
2939
2940 static int thin_map(struct dm_target *ti, struct bio *bio)
2941 {
2942         bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2943
2944         return thin_bio_map(ti, bio);
2945 }
2946
2947 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2948 {
2949         unsigned long flags;
2950         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2951         struct list_head work;
2952         struct dm_thin_new_mapping *m, *tmp;
2953         struct pool *pool = h->tc->pool;
2954
2955         if (h->shared_read_entry) {
2956                 INIT_LIST_HEAD(&work);
2957                 dm_deferred_entry_dec(h->shared_read_entry, &work);
2958
2959                 spin_lock_irqsave(&pool->lock, flags);
2960                 list_for_each_entry_safe(m, tmp, &work, list) {
2961                         list_del(&m->list);
2962                         m->quiesced = true;
2963                         __maybe_add_mapping(m);
2964                 }
2965                 spin_unlock_irqrestore(&pool->lock, flags);
2966         }
2967
2968         if (h->all_io_entry) {
2969                 INIT_LIST_HEAD(&work);
2970                 dm_deferred_entry_dec(h->all_io_entry, &work);
2971                 if (!list_empty(&work)) {
2972                         spin_lock_irqsave(&pool->lock, flags);
2973                         list_for_each_entry_safe(m, tmp, &work, list)
2974                                 list_add_tail(&m->list, &pool->prepared_discards);
2975                         spin_unlock_irqrestore(&pool->lock, flags);
2976                         wake_worker(pool);
2977                 }
2978         }
2979
2980         return 0;
2981 }
2982
2983 static void thin_postsuspend(struct dm_target *ti)
2984 {
2985         if (dm_noflush_suspending(ti))
2986                 requeue_io((struct thin_c *)ti->private);
2987 }
2988
2989 /*
2990  * <nr mapped sectors> <highest mapped sector>
2991  */
2992 static void thin_status(struct dm_target *ti, status_type_t type,
2993                         unsigned status_flags, char *result, unsigned maxlen)
2994 {
2995         int r;
2996         ssize_t sz = 0;
2997         dm_block_t mapped, highest;
2998         char buf[BDEVNAME_SIZE];
2999         struct thin_c *tc = ti->private;
3000
3001         if (get_pool_mode(tc->pool) == PM_FAIL) {
3002                 DMEMIT("Fail");
3003                 return;
3004         }
3005
3006         if (!tc->td)
3007                 DMEMIT("-");
3008         else {
3009                 switch (type) {
3010                 case STATUSTYPE_INFO:
3011                         r = dm_thin_get_mapped_count(tc->td, &mapped);
3012                         if (r) {
3013                                 DMERR("dm_thin_get_mapped_count returned %d", r);
3014                                 goto err;
3015                         }
3016
3017                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3018                         if (r < 0) {
3019                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3020                                 goto err;
3021                         }
3022
3023                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3024                         if (r)
3025                                 DMEMIT("%llu", ((highest + 1) *
3026                                                 tc->pool->sectors_per_block) - 1);
3027                         else
3028                                 DMEMIT("-");
3029                         break;
3030
3031                 case STATUSTYPE_TABLE:
3032                         DMEMIT("%s %lu",
3033                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3034                                (unsigned long) tc->dev_id);
3035                         if (tc->origin_dev)
3036                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3037                         break;
3038                 }
3039         }
3040
3041         return;
3042
3043 err:
3044         DMEMIT("Error");
3045 }
3046
3047 static int thin_iterate_devices(struct dm_target *ti,
3048                                 iterate_devices_callout_fn fn, void *data)
3049 {
3050         sector_t blocks;
3051         struct thin_c *tc = ti->private;
3052         struct pool *pool = tc->pool;
3053
3054         /*
3055          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
3056          * we follow a more convoluted path through to the pool's target.
3057          */
3058         if (!pool->ti)
3059                 return 0;       /* nothing is bound */
3060
3061         blocks = pool->ti->len;
3062         (void) sector_div(blocks, pool->sectors_per_block);
3063         if (blocks)
3064                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
3065
3066         return 0;
3067 }
3068
3069 static struct target_type thin_target = {
3070         .name = "thin",
3071         .version = {1, 10, 0},
3072         .module = THIS_MODULE,
3073         .ctr = thin_ctr,
3074         .dtr = thin_dtr,
3075         .map = thin_map,
3076         .end_io = thin_endio,
3077         .postsuspend = thin_postsuspend,
3078         .status = thin_status,
3079         .iterate_devices = thin_iterate_devices,
3080 };
3081
3082 /*----------------------------------------------------------------*/
3083
3084 static int __init dm_thin_init(void)
3085 {
3086         int r;
3087
3088         pool_table_init();
3089
3090         r = dm_register_target(&thin_target);
3091         if (r)
3092                 return r;
3093
3094         r = dm_register_target(&pool_target);
3095         if (r)
3096                 goto bad_pool_target;
3097
3098         r = -ENOMEM;
3099
3100         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3101         if (!_new_mapping_cache)
3102                 goto bad_new_mapping_cache;
3103
3104         return 0;
3105
3106 bad_new_mapping_cache:
3107         dm_unregister_target(&pool_target);
3108 bad_pool_target:
3109         dm_unregister_target(&thin_target);
3110
3111         return r;
3112 }
3113
3114 static void dm_thin_exit(void)
3115 {
3116         dm_unregister_target(&thin_target);
3117         dm_unregister_target(&pool_target);
3118
3119         kmem_cache_destroy(_new_mapping_cache);
3120 }
3121
3122 module_init(dm_thin_init);
3123 module_exit(dm_thin_exit);
3124
3125 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3126 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3127 MODULE_LICENSE("GPL");