2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <asm/div64.h>
29 #include "extent_map.h"
31 #include "transaction.h"
32 #include "print-tree.h"
34 #include "async-thread.h"
35 #include "check-integrity.h"
37 static int init_first_rw_device(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct btrfs_device *device);
40 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
42 static DEFINE_MUTEX(uuid_mutex);
43 static LIST_HEAD(fs_uuids);
45 static void lock_chunks(struct btrfs_root *root)
47 mutex_lock(&root->fs_info->chunk_mutex);
50 static void unlock_chunks(struct btrfs_root *root)
52 mutex_unlock(&root->fs_info->chunk_mutex);
55 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
57 struct btrfs_device *device;
58 WARN_ON(fs_devices->opened);
59 while (!list_empty(&fs_devices->devices)) {
60 device = list_entry(fs_devices->devices.next,
61 struct btrfs_device, dev_list);
62 list_del(&device->dev_list);
69 int btrfs_cleanup_fs_uuids(void)
71 struct btrfs_fs_devices *fs_devices;
73 while (!list_empty(&fs_uuids)) {
74 fs_devices = list_entry(fs_uuids.next,
75 struct btrfs_fs_devices, list);
76 list_del(&fs_devices->list);
77 free_fs_devices(fs_devices);
82 static noinline struct btrfs_device *__find_device(struct list_head *head,
85 struct btrfs_device *dev;
87 list_for_each_entry(dev, head, dev_list) {
88 if (dev->devid == devid &&
89 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
96 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
98 struct btrfs_fs_devices *fs_devices;
100 list_for_each_entry(fs_devices, &fs_uuids, list) {
101 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
107 static void requeue_list(struct btrfs_pending_bios *pending_bios,
108 struct bio *head, struct bio *tail)
111 struct bio *old_head;
113 old_head = pending_bios->head;
114 pending_bios->head = head;
115 if (pending_bios->tail)
116 tail->bi_next = old_head;
118 pending_bios->tail = tail;
122 * we try to collect pending bios for a device so we don't get a large
123 * number of procs sending bios down to the same device. This greatly
124 * improves the schedulers ability to collect and merge the bios.
126 * But, it also turns into a long list of bios to process and that is sure
127 * to eventually make the worker thread block. The solution here is to
128 * make some progress and then put this work struct back at the end of
129 * the list if the block device is congested. This way, multiple devices
130 * can make progress from a single worker thread.
132 static noinline int run_scheduled_bios(struct btrfs_device *device)
135 struct backing_dev_info *bdi;
136 struct btrfs_fs_info *fs_info;
137 struct btrfs_pending_bios *pending_bios;
141 unsigned long num_run;
142 unsigned long batch_run = 0;
144 unsigned long last_waited = 0;
146 int sync_pending = 0;
147 struct blk_plug plug;
150 * this function runs all the bios we've collected for
151 * a particular device. We don't want to wander off to
152 * another device without first sending all of these down.
153 * So, setup a plug here and finish it off before we return
155 blk_start_plug(&plug);
157 bdi = blk_get_backing_dev_info(device->bdev);
158 fs_info = device->dev_root->fs_info;
159 limit = btrfs_async_submit_limit(fs_info);
160 limit = limit * 2 / 3;
163 spin_lock(&device->io_lock);
168 /* take all the bios off the list at once and process them
169 * later on (without the lock held). But, remember the
170 * tail and other pointers so the bios can be properly reinserted
171 * into the list if we hit congestion
173 if (!force_reg && device->pending_sync_bios.head) {
174 pending_bios = &device->pending_sync_bios;
177 pending_bios = &device->pending_bios;
181 pending = pending_bios->head;
182 tail = pending_bios->tail;
183 WARN_ON(pending && !tail);
186 * if pending was null this time around, no bios need processing
187 * at all and we can stop. Otherwise it'll loop back up again
188 * and do an additional check so no bios are missed.
190 * device->running_pending is used to synchronize with the
193 if (device->pending_sync_bios.head == NULL &&
194 device->pending_bios.head == NULL) {
196 device->running_pending = 0;
199 device->running_pending = 1;
202 pending_bios->head = NULL;
203 pending_bios->tail = NULL;
205 spin_unlock(&device->io_lock);
210 /* we want to work on both lists, but do more bios on the
211 * sync list than the regular list
214 pending_bios != &device->pending_sync_bios &&
215 device->pending_sync_bios.head) ||
216 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
217 device->pending_bios.head)) {
218 spin_lock(&device->io_lock);
219 requeue_list(pending_bios, pending, tail);
224 pending = pending->bi_next;
226 atomic_dec(&fs_info->nr_async_bios);
228 if (atomic_read(&fs_info->nr_async_bios) < limit &&
229 waitqueue_active(&fs_info->async_submit_wait))
230 wake_up(&fs_info->async_submit_wait);
232 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
235 * if we're doing the sync list, record that our
236 * plug has some sync requests on it
238 * If we're doing the regular list and there are
239 * sync requests sitting around, unplug before
242 if (pending_bios == &device->pending_sync_bios) {
244 } else if (sync_pending) {
245 blk_finish_plug(&plug);
246 blk_start_plug(&plug);
250 btrfsic_submit_bio(cur->bi_rw, cur);
257 * we made progress, there is more work to do and the bdi
258 * is now congested. Back off and let other work structs
261 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
262 fs_info->fs_devices->open_devices > 1) {
263 struct io_context *ioc;
265 ioc = current->io_context;
268 * the main goal here is that we don't want to
269 * block if we're going to be able to submit
270 * more requests without blocking.
272 * This code does two great things, it pokes into
273 * the elevator code from a filesystem _and_
274 * it makes assumptions about how batching works.
276 if (ioc && ioc->nr_batch_requests > 0 &&
277 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
279 ioc->last_waited == last_waited)) {
281 * we want to go through our batch of
282 * requests and stop. So, we copy out
283 * the ioc->last_waited time and test
284 * against it before looping
286 last_waited = ioc->last_waited;
291 spin_lock(&device->io_lock);
292 requeue_list(pending_bios, pending, tail);
293 device->running_pending = 1;
295 spin_unlock(&device->io_lock);
296 btrfs_requeue_work(&device->work);
299 /* unplug every 64 requests just for good measure */
300 if (batch_run % 64 == 0) {
301 blk_finish_plug(&plug);
302 blk_start_plug(&plug);
311 spin_lock(&device->io_lock);
312 if (device->pending_bios.head || device->pending_sync_bios.head)
314 spin_unlock(&device->io_lock);
317 blk_finish_plug(&plug);
321 static void pending_bios_fn(struct btrfs_work *work)
323 struct btrfs_device *device;
325 device = container_of(work, struct btrfs_device, work);
326 run_scheduled_bios(device);
329 static noinline int device_list_add(const char *path,
330 struct btrfs_super_block *disk_super,
331 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
333 struct btrfs_device *device;
334 struct btrfs_fs_devices *fs_devices;
335 u64 found_transid = btrfs_super_generation(disk_super);
338 fs_devices = find_fsid(disk_super->fsid);
340 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
343 INIT_LIST_HEAD(&fs_devices->devices);
344 INIT_LIST_HEAD(&fs_devices->alloc_list);
345 list_add(&fs_devices->list, &fs_uuids);
346 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
347 fs_devices->latest_devid = devid;
348 fs_devices->latest_trans = found_transid;
349 mutex_init(&fs_devices->device_list_mutex);
352 device = __find_device(&fs_devices->devices, devid,
353 disk_super->dev_item.uuid);
356 if (fs_devices->opened)
359 device = kzalloc(sizeof(*device), GFP_NOFS);
361 /* we can safely leave the fs_devices entry around */
364 device->devid = devid;
365 device->work.func = pending_bios_fn;
366 memcpy(device->uuid, disk_super->dev_item.uuid,
368 spin_lock_init(&device->io_lock);
369 device->name = kstrdup(path, GFP_NOFS);
374 INIT_LIST_HEAD(&device->dev_alloc_list);
376 /* init readahead state */
377 spin_lock_init(&device->reada_lock);
378 device->reada_curr_zone = NULL;
379 atomic_set(&device->reada_in_flight, 0);
380 device->reada_next = 0;
381 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
382 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
384 mutex_lock(&fs_devices->device_list_mutex);
385 list_add_rcu(&device->dev_list, &fs_devices->devices);
386 mutex_unlock(&fs_devices->device_list_mutex);
388 device->fs_devices = fs_devices;
389 fs_devices->num_devices++;
390 } else if (!device->name || strcmp(device->name, path)) {
391 name = kstrdup(path, GFP_NOFS);
396 if (device->missing) {
397 fs_devices->missing_devices--;
402 if (found_transid > fs_devices->latest_trans) {
403 fs_devices->latest_devid = devid;
404 fs_devices->latest_trans = found_transid;
406 *fs_devices_ret = fs_devices;
410 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
412 struct btrfs_fs_devices *fs_devices;
413 struct btrfs_device *device;
414 struct btrfs_device *orig_dev;
416 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
418 return ERR_PTR(-ENOMEM);
420 INIT_LIST_HEAD(&fs_devices->devices);
421 INIT_LIST_HEAD(&fs_devices->alloc_list);
422 INIT_LIST_HEAD(&fs_devices->list);
423 mutex_init(&fs_devices->device_list_mutex);
424 fs_devices->latest_devid = orig->latest_devid;
425 fs_devices->latest_trans = orig->latest_trans;
426 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
428 /* We have held the volume lock, it is safe to get the devices. */
429 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
430 device = kzalloc(sizeof(*device), GFP_NOFS);
434 device->name = kstrdup(orig_dev->name, GFP_NOFS);
440 device->devid = orig_dev->devid;
441 device->work.func = pending_bios_fn;
442 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
443 spin_lock_init(&device->io_lock);
444 INIT_LIST_HEAD(&device->dev_list);
445 INIT_LIST_HEAD(&device->dev_alloc_list);
447 list_add(&device->dev_list, &fs_devices->devices);
448 device->fs_devices = fs_devices;
449 fs_devices->num_devices++;
453 free_fs_devices(fs_devices);
454 return ERR_PTR(-ENOMEM);
457 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459 struct btrfs_device *device, *next;
461 mutex_lock(&uuid_mutex);
463 /* This is the initialized path, it is safe to release the devices. */
464 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
465 if (device->in_fs_metadata)
469 blkdev_put(device->bdev, device->mode);
471 fs_devices->open_devices--;
473 if (device->writeable) {
474 list_del_init(&device->dev_alloc_list);
475 device->writeable = 0;
476 fs_devices->rw_devices--;
478 list_del_init(&device->dev_list);
479 fs_devices->num_devices--;
484 if (fs_devices->seed) {
485 fs_devices = fs_devices->seed;
489 mutex_unlock(&uuid_mutex);
493 static void __free_device(struct work_struct *work)
495 struct btrfs_device *device;
497 device = container_of(work, struct btrfs_device, rcu_work);
500 blkdev_put(device->bdev, device->mode);
506 static void free_device(struct rcu_head *head)
508 struct btrfs_device *device;
510 device = container_of(head, struct btrfs_device, rcu);
512 INIT_WORK(&device->rcu_work, __free_device);
513 schedule_work(&device->rcu_work);
516 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
518 struct btrfs_device *device;
520 if (--fs_devices->opened > 0)
523 mutex_lock(&fs_devices->device_list_mutex);
524 list_for_each_entry(device, &fs_devices->devices, dev_list) {
525 struct btrfs_device *new_device;
528 fs_devices->open_devices--;
530 if (device->writeable) {
531 list_del_init(&device->dev_alloc_list);
532 fs_devices->rw_devices--;
535 if (device->can_discard)
536 fs_devices->num_can_discard--;
538 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
540 memcpy(new_device, device, sizeof(*new_device));
541 new_device->name = kstrdup(device->name, GFP_NOFS);
542 BUG_ON(device->name && !new_device->name);
543 new_device->bdev = NULL;
544 new_device->writeable = 0;
545 new_device->in_fs_metadata = 0;
546 new_device->can_discard = 0;
547 list_replace_rcu(&device->dev_list, &new_device->dev_list);
549 call_rcu(&device->rcu, free_device);
551 mutex_unlock(&fs_devices->device_list_mutex);
553 WARN_ON(fs_devices->open_devices);
554 WARN_ON(fs_devices->rw_devices);
555 fs_devices->opened = 0;
556 fs_devices->seeding = 0;
561 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
563 struct btrfs_fs_devices *seed_devices = NULL;
566 mutex_lock(&uuid_mutex);
567 ret = __btrfs_close_devices(fs_devices);
568 if (!fs_devices->opened) {
569 seed_devices = fs_devices->seed;
570 fs_devices->seed = NULL;
572 mutex_unlock(&uuid_mutex);
574 while (seed_devices) {
575 fs_devices = seed_devices;
576 seed_devices = fs_devices->seed;
577 __btrfs_close_devices(fs_devices);
578 free_fs_devices(fs_devices);
583 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
584 fmode_t flags, void *holder)
586 struct request_queue *q;
587 struct block_device *bdev;
588 struct list_head *head = &fs_devices->devices;
589 struct btrfs_device *device;
590 struct block_device *latest_bdev = NULL;
591 struct buffer_head *bh;
592 struct btrfs_super_block *disk_super;
593 u64 latest_devid = 0;
594 u64 latest_transid = 0;
601 list_for_each_entry(device, head, dev_list) {
607 bdev = blkdev_get_by_path(device->name, flags, holder);
609 printk(KERN_INFO "open %s failed\n", device->name);
612 set_blocksize(bdev, 4096);
614 bh = btrfs_read_dev_super(bdev);
618 disk_super = (struct btrfs_super_block *)bh->b_data;
619 devid = btrfs_stack_device_id(&disk_super->dev_item);
620 if (devid != device->devid)
623 if (memcmp(device->uuid, disk_super->dev_item.uuid,
627 device->generation = btrfs_super_generation(disk_super);
628 if (!latest_transid || device->generation > latest_transid) {
629 latest_devid = devid;
630 latest_transid = device->generation;
634 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
635 device->writeable = 0;
637 device->writeable = !bdev_read_only(bdev);
641 q = bdev_get_queue(bdev);
642 if (blk_queue_discard(q)) {
643 device->can_discard = 1;
644 fs_devices->num_can_discard++;
648 device->in_fs_metadata = 0;
649 device->mode = flags;
651 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
652 fs_devices->rotating = 1;
654 fs_devices->open_devices++;
655 if (device->writeable) {
656 fs_devices->rw_devices++;
657 list_add(&device->dev_alloc_list,
658 &fs_devices->alloc_list);
666 blkdev_put(bdev, flags);
670 if (fs_devices->open_devices == 0) {
674 fs_devices->seeding = seeding;
675 fs_devices->opened = 1;
676 fs_devices->latest_bdev = latest_bdev;
677 fs_devices->latest_devid = latest_devid;
678 fs_devices->latest_trans = latest_transid;
679 fs_devices->total_rw_bytes = 0;
684 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
685 fmode_t flags, void *holder)
689 mutex_lock(&uuid_mutex);
690 if (fs_devices->opened) {
691 fs_devices->opened++;
694 ret = __btrfs_open_devices(fs_devices, flags, holder);
696 mutex_unlock(&uuid_mutex);
700 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
701 struct btrfs_fs_devices **fs_devices_ret)
703 struct btrfs_super_block *disk_super;
704 struct block_device *bdev;
705 struct buffer_head *bh;
710 mutex_lock(&uuid_mutex);
713 bdev = blkdev_get_by_path(path, flags, holder);
720 ret = set_blocksize(bdev, 4096);
723 bh = btrfs_read_dev_super(bdev);
728 disk_super = (struct btrfs_super_block *)bh->b_data;
729 devid = btrfs_stack_device_id(&disk_super->dev_item);
730 transid = btrfs_super_generation(disk_super);
731 if (disk_super->label[0])
732 printk(KERN_INFO "device label %s ", disk_super->label);
734 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
735 printk(KERN_CONT "devid %llu transid %llu %s\n",
736 (unsigned long long)devid, (unsigned long long)transid, path);
737 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
741 blkdev_put(bdev, flags);
743 mutex_unlock(&uuid_mutex);
747 /* helper to account the used device space in the range */
748 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
749 u64 end, u64 *length)
751 struct btrfs_key key;
752 struct btrfs_root *root = device->dev_root;
753 struct btrfs_dev_extent *dev_extent;
754 struct btrfs_path *path;
758 struct extent_buffer *l;
762 if (start >= device->total_bytes)
765 path = btrfs_alloc_path();
770 key.objectid = device->devid;
772 key.type = BTRFS_DEV_EXTENT_KEY;
774 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
778 ret = btrfs_previous_item(root, path, key.objectid, key.type);
785 slot = path->slots[0];
786 if (slot >= btrfs_header_nritems(l)) {
787 ret = btrfs_next_leaf(root, path);
795 btrfs_item_key_to_cpu(l, &key, slot);
797 if (key.objectid < device->devid)
800 if (key.objectid > device->devid)
803 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
806 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
807 extent_end = key.offset + btrfs_dev_extent_length(l,
809 if (key.offset <= start && extent_end > end) {
810 *length = end - start + 1;
812 } else if (key.offset <= start && extent_end > start)
813 *length += extent_end - start;
814 else if (key.offset > start && extent_end <= end)
815 *length += extent_end - key.offset;
816 else if (key.offset > start && key.offset <= end) {
817 *length += end - key.offset + 1;
819 } else if (key.offset > end)
827 btrfs_free_path(path);
832 * find_free_dev_extent - find free space in the specified device
833 * @trans: transaction handler
834 * @device: the device which we search the free space in
835 * @num_bytes: the size of the free space that we need
836 * @start: store the start of the free space.
837 * @len: the size of the free space. that we find, or the size of the max
838 * free space if we don't find suitable free space
840 * this uses a pretty simple search, the expectation is that it is
841 * called very infrequently and that a given device has a small number
844 * @start is used to store the start of the free space if we find. But if we
845 * don't find suitable free space, it will be used to store the start position
846 * of the max free space.
848 * @len is used to store the size of the free space that we find.
849 * But if we don't find suitable free space, it is used to store the size of
850 * the max free space.
852 int find_free_dev_extent(struct btrfs_trans_handle *trans,
853 struct btrfs_device *device, u64 num_bytes,
854 u64 *start, u64 *len)
856 struct btrfs_key key;
857 struct btrfs_root *root = device->dev_root;
858 struct btrfs_dev_extent *dev_extent;
859 struct btrfs_path *path;
865 u64 search_end = device->total_bytes;
868 struct extent_buffer *l;
870 /* FIXME use last free of some kind */
872 /* we don't want to overwrite the superblock on the drive,
873 * so we make sure to start at an offset of at least 1MB
875 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
877 max_hole_start = search_start;
881 if (search_start >= search_end) {
886 path = btrfs_alloc_path();
893 key.objectid = device->devid;
894 key.offset = search_start;
895 key.type = BTRFS_DEV_EXTENT_KEY;
897 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
901 ret = btrfs_previous_item(root, path, key.objectid, key.type);
908 slot = path->slots[0];
909 if (slot >= btrfs_header_nritems(l)) {
910 ret = btrfs_next_leaf(root, path);
918 btrfs_item_key_to_cpu(l, &key, slot);
920 if (key.objectid < device->devid)
923 if (key.objectid > device->devid)
926 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
929 if (key.offset > search_start) {
930 hole_size = key.offset - search_start;
932 if (hole_size > max_hole_size) {
933 max_hole_start = search_start;
934 max_hole_size = hole_size;
938 * If this free space is greater than which we need,
939 * it must be the max free space that we have found
940 * until now, so max_hole_start must point to the start
941 * of this free space and the length of this free space
942 * is stored in max_hole_size. Thus, we return
943 * max_hole_start and max_hole_size and go back to the
946 if (hole_size >= num_bytes) {
952 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
953 extent_end = key.offset + btrfs_dev_extent_length(l,
955 if (extent_end > search_start)
956 search_start = extent_end;
963 * At this point, search_start should be the end of
964 * allocated dev extents, and when shrinking the device,
965 * search_end may be smaller than search_start.
967 if (search_end > search_start)
968 hole_size = search_end - search_start;
970 if (hole_size > max_hole_size) {
971 max_hole_start = search_start;
972 max_hole_size = hole_size;
976 if (hole_size < num_bytes)
982 btrfs_free_path(path);
984 *start = max_hole_start;
986 *len = max_hole_size;
990 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
991 struct btrfs_device *device,
995 struct btrfs_path *path;
996 struct btrfs_root *root = device->dev_root;
997 struct btrfs_key key;
998 struct btrfs_key found_key;
999 struct extent_buffer *leaf = NULL;
1000 struct btrfs_dev_extent *extent = NULL;
1002 path = btrfs_alloc_path();
1006 key.objectid = device->devid;
1008 key.type = BTRFS_DEV_EXTENT_KEY;
1010 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1012 ret = btrfs_previous_item(root, path, key.objectid,
1013 BTRFS_DEV_EXTENT_KEY);
1016 leaf = path->nodes[0];
1017 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1018 extent = btrfs_item_ptr(leaf, path->slots[0],
1019 struct btrfs_dev_extent);
1020 BUG_ON(found_key.offset > start || found_key.offset +
1021 btrfs_dev_extent_length(leaf, extent) < start);
1023 btrfs_release_path(path);
1025 } else if (ret == 0) {
1026 leaf = path->nodes[0];
1027 extent = btrfs_item_ptr(leaf, path->slots[0],
1028 struct btrfs_dev_extent);
1032 if (device->bytes_used > 0) {
1033 u64 len = btrfs_dev_extent_length(leaf, extent);
1034 device->bytes_used -= len;
1035 spin_lock(&root->fs_info->free_chunk_lock);
1036 root->fs_info->free_chunk_space += len;
1037 spin_unlock(&root->fs_info->free_chunk_lock);
1039 ret = btrfs_del_item(trans, root, path);
1042 btrfs_free_path(path);
1046 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1047 struct btrfs_device *device,
1048 u64 chunk_tree, u64 chunk_objectid,
1049 u64 chunk_offset, u64 start, u64 num_bytes)
1052 struct btrfs_path *path;
1053 struct btrfs_root *root = device->dev_root;
1054 struct btrfs_dev_extent *extent;
1055 struct extent_buffer *leaf;
1056 struct btrfs_key key;
1058 WARN_ON(!device->in_fs_metadata);
1059 path = btrfs_alloc_path();
1063 key.objectid = device->devid;
1065 key.type = BTRFS_DEV_EXTENT_KEY;
1066 ret = btrfs_insert_empty_item(trans, root, path, &key,
1070 leaf = path->nodes[0];
1071 extent = btrfs_item_ptr(leaf, path->slots[0],
1072 struct btrfs_dev_extent);
1073 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1074 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1075 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1077 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1078 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1081 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1082 btrfs_mark_buffer_dirty(leaf);
1083 btrfs_free_path(path);
1087 static noinline int find_next_chunk(struct btrfs_root *root,
1088 u64 objectid, u64 *offset)
1090 struct btrfs_path *path;
1092 struct btrfs_key key;
1093 struct btrfs_chunk *chunk;
1094 struct btrfs_key found_key;
1096 path = btrfs_alloc_path();
1100 key.objectid = objectid;
1101 key.offset = (u64)-1;
1102 key.type = BTRFS_CHUNK_ITEM_KEY;
1104 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1110 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1114 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1116 if (found_key.objectid != objectid)
1119 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1120 struct btrfs_chunk);
1121 *offset = found_key.offset +
1122 btrfs_chunk_length(path->nodes[0], chunk);
1127 btrfs_free_path(path);
1131 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1134 struct btrfs_key key;
1135 struct btrfs_key found_key;
1136 struct btrfs_path *path;
1138 root = root->fs_info->chunk_root;
1140 path = btrfs_alloc_path();
1144 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1145 key.type = BTRFS_DEV_ITEM_KEY;
1146 key.offset = (u64)-1;
1148 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1154 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1155 BTRFS_DEV_ITEM_KEY);
1159 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1161 *objectid = found_key.offset + 1;
1165 btrfs_free_path(path);
1170 * the device information is stored in the chunk root
1171 * the btrfs_device struct should be fully filled in
1173 int btrfs_add_device(struct btrfs_trans_handle *trans,
1174 struct btrfs_root *root,
1175 struct btrfs_device *device)
1178 struct btrfs_path *path;
1179 struct btrfs_dev_item *dev_item;
1180 struct extent_buffer *leaf;
1181 struct btrfs_key key;
1184 root = root->fs_info->chunk_root;
1186 path = btrfs_alloc_path();
1190 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1191 key.type = BTRFS_DEV_ITEM_KEY;
1192 key.offset = device->devid;
1194 ret = btrfs_insert_empty_item(trans, root, path, &key,
1199 leaf = path->nodes[0];
1200 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1202 btrfs_set_device_id(leaf, dev_item, device->devid);
1203 btrfs_set_device_generation(leaf, dev_item, 0);
1204 btrfs_set_device_type(leaf, dev_item, device->type);
1205 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1206 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1207 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1208 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1209 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1210 btrfs_set_device_group(leaf, dev_item, 0);
1211 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1212 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1213 btrfs_set_device_start_offset(leaf, dev_item, 0);
1215 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1216 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1217 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1218 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1219 btrfs_mark_buffer_dirty(leaf);
1223 btrfs_free_path(path);
1227 static int btrfs_rm_dev_item(struct btrfs_root *root,
1228 struct btrfs_device *device)
1231 struct btrfs_path *path;
1232 struct btrfs_key key;
1233 struct btrfs_trans_handle *trans;
1235 root = root->fs_info->chunk_root;
1237 path = btrfs_alloc_path();
1241 trans = btrfs_start_transaction(root, 0);
1242 if (IS_ERR(trans)) {
1243 btrfs_free_path(path);
1244 return PTR_ERR(trans);
1246 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1247 key.type = BTRFS_DEV_ITEM_KEY;
1248 key.offset = device->devid;
1251 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1260 ret = btrfs_del_item(trans, root, path);
1264 btrfs_free_path(path);
1265 unlock_chunks(root);
1266 btrfs_commit_transaction(trans, root);
1270 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1272 struct btrfs_device *device;
1273 struct btrfs_device *next_device;
1274 struct block_device *bdev;
1275 struct buffer_head *bh = NULL;
1276 struct btrfs_super_block *disk_super;
1277 struct btrfs_fs_devices *cur_devices;
1283 bool clear_super = false;
1285 mutex_lock(&uuid_mutex);
1286 mutex_lock(&root->fs_info->volume_mutex);
1288 all_avail = root->fs_info->avail_data_alloc_bits |
1289 root->fs_info->avail_system_alloc_bits |
1290 root->fs_info->avail_metadata_alloc_bits;
1292 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1293 root->fs_info->fs_devices->num_devices <= 4) {
1294 printk(KERN_ERR "btrfs: unable to go below four devices "
1300 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1301 root->fs_info->fs_devices->num_devices <= 2) {
1302 printk(KERN_ERR "btrfs: unable to go below two "
1303 "devices on raid1\n");
1308 if (strcmp(device_path, "missing") == 0) {
1309 struct list_head *devices;
1310 struct btrfs_device *tmp;
1313 devices = &root->fs_info->fs_devices->devices;
1315 * It is safe to read the devices since the volume_mutex
1318 list_for_each_entry(tmp, devices, dev_list) {
1319 if (tmp->in_fs_metadata && !tmp->bdev) {
1328 printk(KERN_ERR "btrfs: no missing devices found to "
1333 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1334 root->fs_info->bdev_holder);
1336 ret = PTR_ERR(bdev);
1340 set_blocksize(bdev, 4096);
1341 bh = btrfs_read_dev_super(bdev);
1346 disk_super = (struct btrfs_super_block *)bh->b_data;
1347 devid = btrfs_stack_device_id(&disk_super->dev_item);
1348 dev_uuid = disk_super->dev_item.uuid;
1349 device = btrfs_find_device(root, devid, dev_uuid,
1357 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1358 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1364 if (device->writeable) {
1366 list_del_init(&device->dev_alloc_list);
1367 unlock_chunks(root);
1368 root->fs_info->fs_devices->rw_devices--;
1372 ret = btrfs_shrink_device(device, 0);
1376 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1380 spin_lock(&root->fs_info->free_chunk_lock);
1381 root->fs_info->free_chunk_space = device->total_bytes -
1383 spin_unlock(&root->fs_info->free_chunk_lock);
1385 device->in_fs_metadata = 0;
1386 btrfs_scrub_cancel_dev(root, device);
1389 * the device list mutex makes sure that we don't change
1390 * the device list while someone else is writing out all
1391 * the device supers.
1394 cur_devices = device->fs_devices;
1395 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1396 list_del_rcu(&device->dev_list);
1398 device->fs_devices->num_devices--;
1400 if (device->missing)
1401 root->fs_info->fs_devices->missing_devices--;
1403 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1404 struct btrfs_device, dev_list);
1405 if (device->bdev == root->fs_info->sb->s_bdev)
1406 root->fs_info->sb->s_bdev = next_device->bdev;
1407 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1408 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1411 device->fs_devices->open_devices--;
1413 call_rcu(&device->rcu, free_device);
1414 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1416 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1417 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1419 if (cur_devices->open_devices == 0) {
1420 struct btrfs_fs_devices *fs_devices;
1421 fs_devices = root->fs_info->fs_devices;
1422 while (fs_devices) {
1423 if (fs_devices->seed == cur_devices)
1425 fs_devices = fs_devices->seed;
1427 fs_devices->seed = cur_devices->seed;
1428 cur_devices->seed = NULL;
1430 __btrfs_close_devices(cur_devices);
1431 unlock_chunks(root);
1432 free_fs_devices(cur_devices);
1436 * at this point, the device is zero sized. We want to
1437 * remove it from the devices list and zero out the old super
1440 /* make sure this device isn't detected as part of
1443 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1444 set_buffer_dirty(bh);
1445 sync_dirty_buffer(bh);
1454 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1456 mutex_unlock(&root->fs_info->volume_mutex);
1457 mutex_unlock(&uuid_mutex);
1460 if (device->writeable) {
1462 list_add(&device->dev_alloc_list,
1463 &root->fs_info->fs_devices->alloc_list);
1464 unlock_chunks(root);
1465 root->fs_info->fs_devices->rw_devices++;
1471 * does all the dirty work required for changing file system's UUID.
1473 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1474 struct btrfs_root *root)
1476 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1477 struct btrfs_fs_devices *old_devices;
1478 struct btrfs_fs_devices *seed_devices;
1479 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1480 struct btrfs_device *device;
1483 BUG_ON(!mutex_is_locked(&uuid_mutex));
1484 if (!fs_devices->seeding)
1487 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1491 old_devices = clone_fs_devices(fs_devices);
1492 if (IS_ERR(old_devices)) {
1493 kfree(seed_devices);
1494 return PTR_ERR(old_devices);
1497 list_add(&old_devices->list, &fs_uuids);
1499 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1500 seed_devices->opened = 1;
1501 INIT_LIST_HEAD(&seed_devices->devices);
1502 INIT_LIST_HEAD(&seed_devices->alloc_list);
1503 mutex_init(&seed_devices->device_list_mutex);
1505 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1506 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1508 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1510 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1511 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1512 device->fs_devices = seed_devices;
1515 fs_devices->seeding = 0;
1516 fs_devices->num_devices = 0;
1517 fs_devices->open_devices = 0;
1518 fs_devices->seed = seed_devices;
1520 generate_random_uuid(fs_devices->fsid);
1521 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1522 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1523 super_flags = btrfs_super_flags(disk_super) &
1524 ~BTRFS_SUPER_FLAG_SEEDING;
1525 btrfs_set_super_flags(disk_super, super_flags);
1531 * strore the expected generation for seed devices in device items.
1533 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1534 struct btrfs_root *root)
1536 struct btrfs_path *path;
1537 struct extent_buffer *leaf;
1538 struct btrfs_dev_item *dev_item;
1539 struct btrfs_device *device;
1540 struct btrfs_key key;
1541 u8 fs_uuid[BTRFS_UUID_SIZE];
1542 u8 dev_uuid[BTRFS_UUID_SIZE];
1546 path = btrfs_alloc_path();
1550 root = root->fs_info->chunk_root;
1551 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1553 key.type = BTRFS_DEV_ITEM_KEY;
1556 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1560 leaf = path->nodes[0];
1562 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1563 ret = btrfs_next_leaf(root, path);
1568 leaf = path->nodes[0];
1569 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1570 btrfs_release_path(path);
1574 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1575 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1576 key.type != BTRFS_DEV_ITEM_KEY)
1579 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1580 struct btrfs_dev_item);
1581 devid = btrfs_device_id(leaf, dev_item);
1582 read_extent_buffer(leaf, dev_uuid,
1583 (unsigned long)btrfs_device_uuid(dev_item),
1585 read_extent_buffer(leaf, fs_uuid,
1586 (unsigned long)btrfs_device_fsid(dev_item),
1588 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1591 if (device->fs_devices->seeding) {
1592 btrfs_set_device_generation(leaf, dev_item,
1593 device->generation);
1594 btrfs_mark_buffer_dirty(leaf);
1602 btrfs_free_path(path);
1606 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1608 struct request_queue *q;
1609 struct btrfs_trans_handle *trans;
1610 struct btrfs_device *device;
1611 struct block_device *bdev;
1612 struct list_head *devices;
1613 struct super_block *sb = root->fs_info->sb;
1615 int seeding_dev = 0;
1618 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1621 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1622 root->fs_info->bdev_holder);
1624 return PTR_ERR(bdev);
1626 if (root->fs_info->fs_devices->seeding) {
1628 down_write(&sb->s_umount);
1629 mutex_lock(&uuid_mutex);
1632 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1633 mutex_lock(&root->fs_info->volume_mutex);
1635 devices = &root->fs_info->fs_devices->devices;
1637 * we have the volume lock, so we don't need the extra
1638 * device list mutex while reading the list here.
1640 list_for_each_entry(device, devices, dev_list) {
1641 if (device->bdev == bdev) {
1647 device = kzalloc(sizeof(*device), GFP_NOFS);
1649 /* we can safely leave the fs_devices entry around */
1654 device->name = kstrdup(device_path, GFP_NOFS);
1655 if (!device->name) {
1661 ret = find_next_devid(root, &device->devid);
1663 kfree(device->name);
1668 trans = btrfs_start_transaction(root, 0);
1669 if (IS_ERR(trans)) {
1670 kfree(device->name);
1672 ret = PTR_ERR(trans);
1678 q = bdev_get_queue(bdev);
1679 if (blk_queue_discard(q))
1680 device->can_discard = 1;
1681 device->writeable = 1;
1682 device->work.func = pending_bios_fn;
1683 generate_random_uuid(device->uuid);
1684 spin_lock_init(&device->io_lock);
1685 device->generation = trans->transid;
1686 device->io_width = root->sectorsize;
1687 device->io_align = root->sectorsize;
1688 device->sector_size = root->sectorsize;
1689 device->total_bytes = i_size_read(bdev->bd_inode);
1690 device->disk_total_bytes = device->total_bytes;
1691 device->dev_root = root->fs_info->dev_root;
1692 device->bdev = bdev;
1693 device->in_fs_metadata = 1;
1694 device->mode = FMODE_EXCL;
1695 set_blocksize(device->bdev, 4096);
1698 sb->s_flags &= ~MS_RDONLY;
1699 ret = btrfs_prepare_sprout(trans, root);
1703 device->fs_devices = root->fs_info->fs_devices;
1706 * we don't want write_supers to jump in here with our device
1709 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1710 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1711 list_add(&device->dev_alloc_list,
1712 &root->fs_info->fs_devices->alloc_list);
1713 root->fs_info->fs_devices->num_devices++;
1714 root->fs_info->fs_devices->open_devices++;
1715 root->fs_info->fs_devices->rw_devices++;
1716 if (device->can_discard)
1717 root->fs_info->fs_devices->num_can_discard++;
1718 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1720 spin_lock(&root->fs_info->free_chunk_lock);
1721 root->fs_info->free_chunk_space += device->total_bytes;
1722 spin_unlock(&root->fs_info->free_chunk_lock);
1724 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1725 root->fs_info->fs_devices->rotating = 1;
1727 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1728 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1729 total_bytes + device->total_bytes);
1731 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1732 btrfs_set_super_num_devices(root->fs_info->super_copy,
1734 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1737 ret = init_first_rw_device(trans, root, device);
1739 ret = btrfs_finish_sprout(trans, root);
1742 ret = btrfs_add_device(trans, root, device);
1746 * we've got more storage, clear any full flags on the space
1749 btrfs_clear_space_info_full(root->fs_info);
1751 unlock_chunks(root);
1752 btrfs_commit_transaction(trans, root);
1755 mutex_unlock(&uuid_mutex);
1756 up_write(&sb->s_umount);
1758 ret = btrfs_relocate_sys_chunks(root);
1762 mutex_unlock(&root->fs_info->volume_mutex);
1765 blkdev_put(bdev, FMODE_EXCL);
1767 mutex_unlock(&uuid_mutex);
1768 up_write(&sb->s_umount);
1773 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1774 struct btrfs_device *device)
1777 struct btrfs_path *path;
1778 struct btrfs_root *root;
1779 struct btrfs_dev_item *dev_item;
1780 struct extent_buffer *leaf;
1781 struct btrfs_key key;
1783 root = device->dev_root->fs_info->chunk_root;
1785 path = btrfs_alloc_path();
1789 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1790 key.type = BTRFS_DEV_ITEM_KEY;
1791 key.offset = device->devid;
1793 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1802 leaf = path->nodes[0];
1803 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1805 btrfs_set_device_id(leaf, dev_item, device->devid);
1806 btrfs_set_device_type(leaf, dev_item, device->type);
1807 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1808 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1809 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1810 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1811 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1812 btrfs_mark_buffer_dirty(leaf);
1815 btrfs_free_path(path);
1819 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1820 struct btrfs_device *device, u64 new_size)
1822 struct btrfs_super_block *super_copy =
1823 device->dev_root->fs_info->super_copy;
1824 u64 old_total = btrfs_super_total_bytes(super_copy);
1825 u64 diff = new_size - device->total_bytes;
1827 if (!device->writeable)
1829 if (new_size <= device->total_bytes)
1832 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1833 device->fs_devices->total_rw_bytes += diff;
1835 device->total_bytes = new_size;
1836 device->disk_total_bytes = new_size;
1837 btrfs_clear_space_info_full(device->dev_root->fs_info);
1839 return btrfs_update_device(trans, device);
1842 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1843 struct btrfs_device *device, u64 new_size)
1846 lock_chunks(device->dev_root);
1847 ret = __btrfs_grow_device(trans, device, new_size);
1848 unlock_chunks(device->dev_root);
1852 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1853 struct btrfs_root *root,
1854 u64 chunk_tree, u64 chunk_objectid,
1858 struct btrfs_path *path;
1859 struct btrfs_key key;
1861 root = root->fs_info->chunk_root;
1862 path = btrfs_alloc_path();
1866 key.objectid = chunk_objectid;
1867 key.offset = chunk_offset;
1868 key.type = BTRFS_CHUNK_ITEM_KEY;
1870 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1873 ret = btrfs_del_item(trans, root, path);
1875 btrfs_free_path(path);
1879 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1882 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1883 struct btrfs_disk_key *disk_key;
1884 struct btrfs_chunk *chunk;
1891 struct btrfs_key key;
1893 array_size = btrfs_super_sys_array_size(super_copy);
1895 ptr = super_copy->sys_chunk_array;
1898 while (cur < array_size) {
1899 disk_key = (struct btrfs_disk_key *)ptr;
1900 btrfs_disk_key_to_cpu(&key, disk_key);
1902 len = sizeof(*disk_key);
1904 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1905 chunk = (struct btrfs_chunk *)(ptr + len);
1906 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1907 len += btrfs_chunk_item_size(num_stripes);
1912 if (key.objectid == chunk_objectid &&
1913 key.offset == chunk_offset) {
1914 memmove(ptr, ptr + len, array_size - (cur + len));
1916 btrfs_set_super_sys_array_size(super_copy, array_size);
1925 static int btrfs_relocate_chunk(struct btrfs_root *root,
1926 u64 chunk_tree, u64 chunk_objectid,
1929 struct extent_map_tree *em_tree;
1930 struct btrfs_root *extent_root;
1931 struct btrfs_trans_handle *trans;
1932 struct extent_map *em;
1933 struct map_lookup *map;
1937 root = root->fs_info->chunk_root;
1938 extent_root = root->fs_info->extent_root;
1939 em_tree = &root->fs_info->mapping_tree.map_tree;
1941 ret = btrfs_can_relocate(extent_root, chunk_offset);
1945 /* step one, relocate all the extents inside this chunk */
1946 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1950 trans = btrfs_start_transaction(root, 0);
1951 BUG_ON(IS_ERR(trans));
1956 * step two, delete the device extents and the
1957 * chunk tree entries
1959 read_lock(&em_tree->lock);
1960 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1961 read_unlock(&em_tree->lock);
1963 BUG_ON(em->start > chunk_offset ||
1964 em->start + em->len < chunk_offset);
1965 map = (struct map_lookup *)em->bdev;
1967 for (i = 0; i < map->num_stripes; i++) {
1968 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1969 map->stripes[i].physical);
1972 if (map->stripes[i].dev) {
1973 ret = btrfs_update_device(trans, map->stripes[i].dev);
1977 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1982 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1984 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1985 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1989 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1992 write_lock(&em_tree->lock);
1993 remove_extent_mapping(em_tree, em);
1994 write_unlock(&em_tree->lock);
1999 /* once for the tree */
2000 free_extent_map(em);
2002 free_extent_map(em);
2004 unlock_chunks(root);
2005 btrfs_end_transaction(trans, root);
2009 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2011 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2012 struct btrfs_path *path;
2013 struct extent_buffer *leaf;
2014 struct btrfs_chunk *chunk;
2015 struct btrfs_key key;
2016 struct btrfs_key found_key;
2017 u64 chunk_tree = chunk_root->root_key.objectid;
2019 bool retried = false;
2023 path = btrfs_alloc_path();
2028 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2029 key.offset = (u64)-1;
2030 key.type = BTRFS_CHUNK_ITEM_KEY;
2033 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2038 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2045 leaf = path->nodes[0];
2046 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2048 chunk = btrfs_item_ptr(leaf, path->slots[0],
2049 struct btrfs_chunk);
2050 chunk_type = btrfs_chunk_type(leaf, chunk);
2051 btrfs_release_path(path);
2053 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2054 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2063 if (found_key.offset == 0)
2065 key.offset = found_key.offset - 1;
2068 if (failed && !retried) {
2072 } else if (failed && retried) {
2077 btrfs_free_path(path);
2081 static u64 div_factor(u64 num, int factor)
2090 int btrfs_balance(struct btrfs_root *dev_root)
2093 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
2094 struct btrfs_device *device;
2097 struct btrfs_path *path;
2098 struct btrfs_key key;
2099 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
2100 struct btrfs_trans_handle *trans;
2101 struct btrfs_key found_key;
2103 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
2106 if (!capable(CAP_SYS_ADMIN))
2109 mutex_lock(&dev_root->fs_info->volume_mutex);
2110 dev_root = dev_root->fs_info->dev_root;
2112 /* step one make some room on all the devices */
2113 list_for_each_entry(device, devices, dev_list) {
2114 old_size = device->total_bytes;
2115 size_to_free = div_factor(old_size, 1);
2116 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2117 if (!device->writeable ||
2118 device->total_bytes - device->bytes_used > size_to_free)
2121 ret = btrfs_shrink_device(device, old_size - size_to_free);
2126 trans = btrfs_start_transaction(dev_root, 0);
2127 BUG_ON(IS_ERR(trans));
2129 ret = btrfs_grow_device(trans, device, old_size);
2132 btrfs_end_transaction(trans, dev_root);
2135 /* step two, relocate all the chunks */
2136 path = btrfs_alloc_path();
2141 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2142 key.offset = (u64)-1;
2143 key.type = BTRFS_CHUNK_ITEM_KEY;
2146 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2151 * this shouldn't happen, it means the last relocate
2157 ret = btrfs_previous_item(chunk_root, path, 0,
2158 BTRFS_CHUNK_ITEM_KEY);
2162 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2164 if (found_key.objectid != key.objectid)
2167 /* chunk zero is special */
2168 if (found_key.offset == 0)
2171 btrfs_release_path(path);
2172 ret = btrfs_relocate_chunk(chunk_root,
2173 chunk_root->root_key.objectid,
2176 if (ret && ret != -ENOSPC)
2178 key.offset = found_key.offset - 1;
2182 btrfs_free_path(path);
2183 mutex_unlock(&dev_root->fs_info->volume_mutex);
2188 * shrinking a device means finding all of the device extents past
2189 * the new size, and then following the back refs to the chunks.
2190 * The chunk relocation code actually frees the device extent
2192 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2194 struct btrfs_trans_handle *trans;
2195 struct btrfs_root *root = device->dev_root;
2196 struct btrfs_dev_extent *dev_extent = NULL;
2197 struct btrfs_path *path;
2205 bool retried = false;
2206 struct extent_buffer *l;
2207 struct btrfs_key key;
2208 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2209 u64 old_total = btrfs_super_total_bytes(super_copy);
2210 u64 old_size = device->total_bytes;
2211 u64 diff = device->total_bytes - new_size;
2213 if (new_size >= device->total_bytes)
2216 path = btrfs_alloc_path();
2224 device->total_bytes = new_size;
2225 if (device->writeable) {
2226 device->fs_devices->total_rw_bytes -= diff;
2227 spin_lock(&root->fs_info->free_chunk_lock);
2228 root->fs_info->free_chunk_space -= diff;
2229 spin_unlock(&root->fs_info->free_chunk_lock);
2231 unlock_chunks(root);
2234 key.objectid = device->devid;
2235 key.offset = (u64)-1;
2236 key.type = BTRFS_DEV_EXTENT_KEY;
2239 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2243 ret = btrfs_previous_item(root, path, 0, key.type);
2248 btrfs_release_path(path);
2253 slot = path->slots[0];
2254 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2256 if (key.objectid != device->devid) {
2257 btrfs_release_path(path);
2261 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2262 length = btrfs_dev_extent_length(l, dev_extent);
2264 if (key.offset + length <= new_size) {
2265 btrfs_release_path(path);
2269 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2270 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2271 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2272 btrfs_release_path(path);
2274 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
2276 if (ret && ret != -ENOSPC)
2283 if (failed && !retried) {
2287 } else if (failed && retried) {
2291 device->total_bytes = old_size;
2292 if (device->writeable)
2293 device->fs_devices->total_rw_bytes += diff;
2294 spin_lock(&root->fs_info->free_chunk_lock);
2295 root->fs_info->free_chunk_space += diff;
2296 spin_unlock(&root->fs_info->free_chunk_lock);
2297 unlock_chunks(root);
2301 /* Shrinking succeeded, else we would be at "done". */
2302 trans = btrfs_start_transaction(root, 0);
2303 if (IS_ERR(trans)) {
2304 ret = PTR_ERR(trans);
2310 device->disk_total_bytes = new_size;
2311 /* Now btrfs_update_device() will change the on-disk size. */
2312 ret = btrfs_update_device(trans, device);
2314 unlock_chunks(root);
2315 btrfs_end_transaction(trans, root);
2318 WARN_ON(diff > old_total);
2319 btrfs_set_super_total_bytes(super_copy, old_total - diff);
2320 unlock_chunks(root);
2321 btrfs_end_transaction(trans, root);
2323 btrfs_free_path(path);
2327 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
2328 struct btrfs_root *root,
2329 struct btrfs_key *key,
2330 struct btrfs_chunk *chunk, int item_size)
2332 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2333 struct btrfs_disk_key disk_key;
2337 array_size = btrfs_super_sys_array_size(super_copy);
2338 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
2341 ptr = super_copy->sys_chunk_array + array_size;
2342 btrfs_cpu_key_to_disk(&disk_key, key);
2343 memcpy(ptr, &disk_key, sizeof(disk_key));
2344 ptr += sizeof(disk_key);
2345 memcpy(ptr, chunk, item_size);
2346 item_size += sizeof(disk_key);
2347 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
2352 * sort the devices in descending order by max_avail, total_avail
2354 static int btrfs_cmp_device_info(const void *a, const void *b)
2356 const struct btrfs_device_info *di_a = a;
2357 const struct btrfs_device_info *di_b = b;
2359 if (di_a->max_avail > di_b->max_avail)
2361 if (di_a->max_avail < di_b->max_avail)
2363 if (di_a->total_avail > di_b->total_avail)
2365 if (di_a->total_avail < di_b->total_avail)
2370 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *extent_root,
2372 struct map_lookup **map_ret,
2373 u64 *num_bytes_out, u64 *stripe_size_out,
2374 u64 start, u64 type)
2376 struct btrfs_fs_info *info = extent_root->fs_info;
2377 struct btrfs_fs_devices *fs_devices = info->fs_devices;
2378 struct list_head *cur;
2379 struct map_lookup *map = NULL;
2380 struct extent_map_tree *em_tree;
2381 struct extent_map *em;
2382 struct btrfs_device_info *devices_info = NULL;
2384 int num_stripes; /* total number of stripes to allocate */
2385 int sub_stripes; /* sub_stripes info for map */
2386 int dev_stripes; /* stripes per dev */
2387 int devs_max; /* max devs to use */
2388 int devs_min; /* min devs needed */
2389 int devs_increment; /* ndevs has to be a multiple of this */
2390 int ncopies; /* how many copies to data has */
2392 u64 max_stripe_size;
2400 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
2401 (type & BTRFS_BLOCK_GROUP_DUP)) {
2403 type &= ~BTRFS_BLOCK_GROUP_DUP;
2406 if (list_empty(&fs_devices->alloc_list))
2413 devs_max = 0; /* 0 == as many as possible */
2417 * define the properties of each RAID type.
2418 * FIXME: move this to a global table and use it in all RAID
2421 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2425 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
2427 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2432 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2441 if (type & BTRFS_BLOCK_GROUP_DATA) {
2442 max_stripe_size = 1024 * 1024 * 1024;
2443 max_chunk_size = 10 * max_stripe_size;
2444 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2445 max_stripe_size = 256 * 1024 * 1024;
2446 max_chunk_size = max_stripe_size;
2447 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2448 max_stripe_size = 8 * 1024 * 1024;
2449 max_chunk_size = 2 * max_stripe_size;
2451 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
2456 /* we don't want a chunk larger than 10% of writeable space */
2457 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2460 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
2465 cur = fs_devices->alloc_list.next;
2468 * in the first pass through the devices list, we gather information
2469 * about the available holes on each device.
2472 while (cur != &fs_devices->alloc_list) {
2473 struct btrfs_device *device;
2477 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2481 if (!device->writeable) {
2483 "btrfs: read-only device in alloc_list\n");
2488 if (!device->in_fs_metadata)
2491 if (device->total_bytes > device->bytes_used)
2492 total_avail = device->total_bytes - device->bytes_used;
2496 /* If there is no space on this device, skip it. */
2497 if (total_avail == 0)
2500 ret = find_free_dev_extent(trans, device,
2501 max_stripe_size * dev_stripes,
2502 &dev_offset, &max_avail);
2503 if (ret && ret != -ENOSPC)
2507 max_avail = max_stripe_size * dev_stripes;
2509 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
2512 devices_info[ndevs].dev_offset = dev_offset;
2513 devices_info[ndevs].max_avail = max_avail;
2514 devices_info[ndevs].total_avail = total_avail;
2515 devices_info[ndevs].dev = device;
2520 * now sort the devices by hole size / available space
2522 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
2523 btrfs_cmp_device_info, NULL);
2525 /* round down to number of usable stripes */
2526 ndevs -= ndevs % devs_increment;
2528 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
2533 if (devs_max && ndevs > devs_max)
2536 * the primary goal is to maximize the number of stripes, so use as many
2537 * devices as possible, even if the stripes are not maximum sized.
2539 stripe_size = devices_info[ndevs-1].max_avail;
2540 num_stripes = ndevs * dev_stripes;
2542 if (stripe_size * num_stripes > max_chunk_size * ncopies) {
2543 stripe_size = max_chunk_size * ncopies;
2544 do_div(stripe_size, num_stripes);
2547 do_div(stripe_size, dev_stripes);
2548 do_div(stripe_size, BTRFS_STRIPE_LEN);
2549 stripe_size *= BTRFS_STRIPE_LEN;
2551 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2556 map->num_stripes = num_stripes;
2558 for (i = 0; i < ndevs; ++i) {
2559 for (j = 0; j < dev_stripes; ++j) {
2560 int s = i * dev_stripes + j;
2561 map->stripes[s].dev = devices_info[i].dev;
2562 map->stripes[s].physical = devices_info[i].dev_offset +
2566 map->sector_size = extent_root->sectorsize;
2567 map->stripe_len = BTRFS_STRIPE_LEN;
2568 map->io_align = BTRFS_STRIPE_LEN;
2569 map->io_width = BTRFS_STRIPE_LEN;
2571 map->sub_stripes = sub_stripes;
2574 num_bytes = stripe_size * (num_stripes / ncopies);
2576 *stripe_size_out = stripe_size;
2577 *num_bytes_out = num_bytes;
2579 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
2581 em = alloc_extent_map();
2586 em->bdev = (struct block_device *)map;
2588 em->len = num_bytes;
2589 em->block_start = 0;
2590 em->block_len = em->len;
2592 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2593 write_lock(&em_tree->lock);
2594 ret = add_extent_mapping(em_tree, em);
2595 write_unlock(&em_tree->lock);
2597 free_extent_map(em);
2599 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2600 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2604 for (i = 0; i < map->num_stripes; ++i) {
2605 struct btrfs_device *device;
2608 device = map->stripes[i].dev;
2609 dev_offset = map->stripes[i].physical;
2611 ret = btrfs_alloc_dev_extent(trans, device,
2612 info->chunk_root->root_key.objectid,
2613 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2614 start, dev_offset, stripe_size);
2618 kfree(devices_info);
2623 kfree(devices_info);
2627 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2628 struct btrfs_root *extent_root,
2629 struct map_lookup *map, u64 chunk_offset,
2630 u64 chunk_size, u64 stripe_size)
2633 struct btrfs_key key;
2634 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2635 struct btrfs_device *device;
2636 struct btrfs_chunk *chunk;
2637 struct btrfs_stripe *stripe;
2638 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2642 chunk = kzalloc(item_size, GFP_NOFS);
2647 while (index < map->num_stripes) {
2648 device = map->stripes[index].dev;
2649 device->bytes_used += stripe_size;
2650 ret = btrfs_update_device(trans, device);
2655 spin_lock(&extent_root->fs_info->free_chunk_lock);
2656 extent_root->fs_info->free_chunk_space -= (stripe_size *
2658 spin_unlock(&extent_root->fs_info->free_chunk_lock);
2661 stripe = &chunk->stripe;
2662 while (index < map->num_stripes) {
2663 device = map->stripes[index].dev;
2664 dev_offset = map->stripes[index].physical;
2666 btrfs_set_stack_stripe_devid(stripe, device->devid);
2667 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2668 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2673 btrfs_set_stack_chunk_length(chunk, chunk_size);
2674 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2675 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2676 btrfs_set_stack_chunk_type(chunk, map->type);
2677 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2678 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2679 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2680 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2681 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2683 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2684 key.type = BTRFS_CHUNK_ITEM_KEY;
2685 key.offset = chunk_offset;
2687 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2690 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2691 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2701 * Chunk allocation falls into two parts. The first part does works
2702 * that make the new allocated chunk useable, but not do any operation
2703 * that modifies the chunk tree. The second part does the works that
2704 * require modifying the chunk tree. This division is important for the
2705 * bootstrap process of adding storage to a seed btrfs.
2707 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2708 struct btrfs_root *extent_root, u64 type)
2713 struct map_lookup *map;
2714 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2717 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2722 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2723 &stripe_size, chunk_offset, type);
2727 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2728 chunk_size, stripe_size);
2733 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2734 struct btrfs_root *root,
2735 struct btrfs_device *device)
2738 u64 sys_chunk_offset;
2742 u64 sys_stripe_size;
2744 struct map_lookup *map;
2745 struct map_lookup *sys_map;
2746 struct btrfs_fs_info *fs_info = root->fs_info;
2747 struct btrfs_root *extent_root = fs_info->extent_root;
2750 ret = find_next_chunk(fs_info->chunk_root,
2751 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2755 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2756 (fs_info->metadata_alloc_profile &
2757 fs_info->avail_metadata_alloc_bits);
2758 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2760 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2761 &stripe_size, chunk_offset, alloc_profile);
2764 sys_chunk_offset = chunk_offset + chunk_size;
2766 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2767 (fs_info->system_alloc_profile &
2768 fs_info->avail_system_alloc_bits);
2769 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2771 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2772 &sys_chunk_size, &sys_stripe_size,
2773 sys_chunk_offset, alloc_profile);
2776 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2780 * Modifying chunk tree needs allocating new blocks from both
2781 * system block group and metadata block group. So we only can
2782 * do operations require modifying the chunk tree after both
2783 * block groups were created.
2785 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2786 chunk_size, stripe_size);
2789 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2790 sys_chunk_offset, sys_chunk_size,
2796 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2798 struct extent_map *em;
2799 struct map_lookup *map;
2800 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2804 read_lock(&map_tree->map_tree.lock);
2805 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2806 read_unlock(&map_tree->map_tree.lock);
2810 if (btrfs_test_opt(root, DEGRADED)) {
2811 free_extent_map(em);
2815 map = (struct map_lookup *)em->bdev;
2816 for (i = 0; i < map->num_stripes; i++) {
2817 if (!map->stripes[i].dev->writeable) {
2822 free_extent_map(em);
2826 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2828 extent_map_tree_init(&tree->map_tree);
2831 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2833 struct extent_map *em;
2836 write_lock(&tree->map_tree.lock);
2837 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2839 remove_extent_mapping(&tree->map_tree, em);
2840 write_unlock(&tree->map_tree.lock);
2845 free_extent_map(em);
2846 /* once for the tree */
2847 free_extent_map(em);
2851 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2853 struct extent_map *em;
2854 struct map_lookup *map;
2855 struct extent_map_tree *em_tree = &map_tree->map_tree;
2858 read_lock(&em_tree->lock);
2859 em = lookup_extent_mapping(em_tree, logical, len);
2860 read_unlock(&em_tree->lock);
2863 BUG_ON(em->start > logical || em->start + em->len < logical);
2864 map = (struct map_lookup *)em->bdev;
2865 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2866 ret = map->num_stripes;
2867 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2868 ret = map->sub_stripes;
2871 free_extent_map(em);
2875 static int find_live_mirror(struct map_lookup *map, int first, int num,
2879 if (map->stripes[optimal].dev->bdev)
2881 for (i = first; i < first + num; i++) {
2882 if (map->stripes[i].dev->bdev)
2885 /* we couldn't find one that doesn't fail. Just return something
2886 * and the io error handling code will clean up eventually
2891 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2892 u64 logical, u64 *length,
2893 struct btrfs_bio **bbio_ret,
2896 struct extent_map *em;
2897 struct map_lookup *map;
2898 struct extent_map_tree *em_tree = &map_tree->map_tree;
2901 u64 stripe_end_offset;
2905 int stripes_allocated = 8;
2906 int stripes_required = 1;
2911 struct btrfs_bio *bbio = NULL;
2913 if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
2914 stripes_allocated = 1;
2917 bbio = kzalloc(btrfs_bio_size(stripes_allocated),
2922 atomic_set(&bbio->error, 0);
2925 read_lock(&em_tree->lock);
2926 em = lookup_extent_mapping(em_tree, logical, *length);
2927 read_unlock(&em_tree->lock);
2930 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2931 (unsigned long long)logical,
2932 (unsigned long long)*length);
2936 BUG_ON(em->start > logical || em->start + em->len < logical);
2937 map = (struct map_lookup *)em->bdev;
2938 offset = logical - em->start;
2940 if (mirror_num > map->num_stripes)
2943 /* if our btrfs_bio struct is too small, back off and try again */
2944 if (rw & REQ_WRITE) {
2945 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2946 BTRFS_BLOCK_GROUP_DUP)) {
2947 stripes_required = map->num_stripes;
2949 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2950 stripes_required = map->sub_stripes;
2954 if (rw & REQ_DISCARD) {
2955 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2956 BTRFS_BLOCK_GROUP_RAID1 |
2957 BTRFS_BLOCK_GROUP_DUP |
2958 BTRFS_BLOCK_GROUP_RAID10)) {
2959 stripes_required = map->num_stripes;
2962 if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
2963 stripes_allocated < stripes_required) {
2964 stripes_allocated = map->num_stripes;
2965 free_extent_map(em);
2971 * stripe_nr counts the total number of stripes we have to stride
2972 * to get to this block
2974 do_div(stripe_nr, map->stripe_len);
2976 stripe_offset = stripe_nr * map->stripe_len;
2977 BUG_ON(offset < stripe_offset);
2979 /* stripe_offset is the offset of this block in its stripe*/
2980 stripe_offset = offset - stripe_offset;
2982 if (rw & REQ_DISCARD)
2983 *length = min_t(u64, em->len - offset, *length);
2984 else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2985 BTRFS_BLOCK_GROUP_RAID1 |
2986 BTRFS_BLOCK_GROUP_RAID10 |
2987 BTRFS_BLOCK_GROUP_DUP)) {
2988 /* we limit the length of each bio to what fits in a stripe */
2989 *length = min_t(u64, em->len - offset,
2990 map->stripe_len - stripe_offset);
2992 *length = em->len - offset;
3000 stripe_nr_orig = stripe_nr;
3001 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3002 (~(map->stripe_len - 1));
3003 do_div(stripe_nr_end, map->stripe_len);
3004 stripe_end_offset = stripe_nr_end * map->stripe_len -
3006 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3007 if (rw & REQ_DISCARD)
3008 num_stripes = min_t(u64, map->num_stripes,
3009 stripe_nr_end - stripe_nr_orig);
3010 stripe_index = do_div(stripe_nr, map->num_stripes);
3011 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3012 if (rw & (REQ_WRITE | REQ_DISCARD))
3013 num_stripes = map->num_stripes;
3014 else if (mirror_num)
3015 stripe_index = mirror_num - 1;
3017 stripe_index = find_live_mirror(map, 0,
3019 current->pid % map->num_stripes);
3020 mirror_num = stripe_index + 1;
3023 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3024 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3025 num_stripes = map->num_stripes;
3026 } else if (mirror_num) {
3027 stripe_index = mirror_num - 1;
3032 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3033 int factor = map->num_stripes / map->sub_stripes;
3035 stripe_index = do_div(stripe_nr, factor);
3036 stripe_index *= map->sub_stripes;
3039 num_stripes = map->sub_stripes;
3040 else if (rw & REQ_DISCARD)
3041 num_stripes = min_t(u64, map->sub_stripes *
3042 (stripe_nr_end - stripe_nr_orig),
3044 else if (mirror_num)
3045 stripe_index += mirror_num - 1;
3047 stripe_index = find_live_mirror(map, stripe_index,
3048 map->sub_stripes, stripe_index +
3049 current->pid % map->sub_stripes);
3050 mirror_num = stripe_index + 1;
3054 * after this do_div call, stripe_nr is the number of stripes
3055 * on this device we have to walk to find the data, and
3056 * stripe_index is the number of our device in the stripe array
3058 stripe_index = do_div(stripe_nr, map->num_stripes);
3059 mirror_num = stripe_index + 1;
3061 BUG_ON(stripe_index >= map->num_stripes);
3063 if (rw & REQ_DISCARD) {
3064 for (i = 0; i < num_stripes; i++) {
3065 bbio->stripes[i].physical =
3066 map->stripes[stripe_index].physical +
3067 stripe_offset + stripe_nr * map->stripe_len;
3068 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3070 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3072 u32 last_stripe = 0;
3075 div_u64_rem(stripe_nr_end - 1,
3079 for (j = 0; j < map->num_stripes; j++) {
3082 div_u64_rem(stripe_nr_end - 1 - j,
3083 map->num_stripes, &test);
3084 if (test == stripe_index)
3087 stripes = stripe_nr_end - 1 - j;
3088 do_div(stripes, map->num_stripes);
3089 bbio->stripes[i].length = map->stripe_len *
3090 (stripes - stripe_nr + 1);
3093 bbio->stripes[i].length -=
3097 if (stripe_index == last_stripe)
3098 bbio->stripes[i].length -=
3100 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3103 int factor = map->num_stripes /
3105 u32 last_stripe = 0;
3107 div_u64_rem(stripe_nr_end - 1,
3108 factor, &last_stripe);
3109 last_stripe *= map->sub_stripes;
3111 for (j = 0; j < factor; j++) {
3114 div_u64_rem(stripe_nr_end - 1 - j,
3118 stripe_index / map->sub_stripes)
3121 stripes = stripe_nr_end - 1 - j;
3122 do_div(stripes, factor);
3123 bbio->stripes[i].length = map->stripe_len *
3124 (stripes - stripe_nr + 1);
3126 if (i < map->sub_stripes) {
3127 bbio->stripes[i].length -=
3129 if (i == map->sub_stripes - 1)
3132 if (stripe_index >= last_stripe &&
3133 stripe_index <= (last_stripe +
3134 map->sub_stripes - 1)) {
3135 bbio->stripes[i].length -=
3139 bbio->stripes[i].length = *length;
3142 if (stripe_index == map->num_stripes) {
3143 /* This could only happen for RAID0/10 */
3149 for (i = 0; i < num_stripes; i++) {
3150 bbio->stripes[i].physical =
3151 map->stripes[stripe_index].physical +
3153 stripe_nr * map->stripe_len;
3154 bbio->stripes[i].dev =
3155 map->stripes[stripe_index].dev;
3161 bbio->num_stripes = num_stripes;
3162 bbio->max_errors = max_errors;
3163 bbio->mirror_num = mirror_num;
3166 free_extent_map(em);
3170 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3171 u64 logical, u64 *length,
3172 struct btrfs_bio **bbio_ret, int mirror_num)
3174 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3178 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3179 u64 chunk_start, u64 physical, u64 devid,
3180 u64 **logical, int *naddrs, int *stripe_len)
3182 struct extent_map_tree *em_tree = &map_tree->map_tree;
3183 struct extent_map *em;
3184 struct map_lookup *map;
3191 read_lock(&em_tree->lock);
3192 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3193 read_unlock(&em_tree->lock);
3195 BUG_ON(!em || em->start != chunk_start);
3196 map = (struct map_lookup *)em->bdev;
3199 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3200 do_div(length, map->num_stripes / map->sub_stripes);
3201 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3202 do_div(length, map->num_stripes);
3204 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3207 for (i = 0; i < map->num_stripes; i++) {
3208 if (devid && map->stripes[i].dev->devid != devid)
3210 if (map->stripes[i].physical > physical ||
3211 map->stripes[i].physical + length <= physical)
3214 stripe_nr = physical - map->stripes[i].physical;
3215 do_div(stripe_nr, map->stripe_len);
3217 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3218 stripe_nr = stripe_nr * map->num_stripes + i;
3219 do_div(stripe_nr, map->sub_stripes);
3220 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3221 stripe_nr = stripe_nr * map->num_stripes + i;
3223 bytenr = chunk_start + stripe_nr * map->stripe_len;
3224 WARN_ON(nr >= map->num_stripes);
3225 for (j = 0; j < nr; j++) {
3226 if (buf[j] == bytenr)
3230 WARN_ON(nr >= map->num_stripes);
3237 *stripe_len = map->stripe_len;
3239 free_extent_map(em);
3243 static void btrfs_end_bio(struct bio *bio, int err)
3245 struct btrfs_bio *bbio = bio->bi_private;
3246 int is_orig_bio = 0;
3249 atomic_inc(&bbio->error);
3251 if (bio == bbio->orig_bio)
3254 if (atomic_dec_and_test(&bbio->stripes_pending)) {
3257 bio = bbio->orig_bio;
3259 bio->bi_private = bbio->private;
3260 bio->bi_end_io = bbio->end_io;
3261 bio->bi_bdev = (struct block_device *)
3262 (unsigned long)bbio->mirror_num;
3263 /* only send an error to the higher layers if it is
3264 * beyond the tolerance of the multi-bio
3266 if (atomic_read(&bbio->error) > bbio->max_errors) {
3270 * this bio is actually up to date, we didn't
3271 * go over the max number of errors
3273 set_bit(BIO_UPTODATE, &bio->bi_flags);
3278 bio_endio(bio, err);
3279 } else if (!is_orig_bio) {
3284 struct async_sched {
3287 struct btrfs_fs_info *info;
3288 struct btrfs_work work;
3292 * see run_scheduled_bios for a description of why bios are collected for
3295 * This will add one bio to the pending list for a device and make sure
3296 * the work struct is scheduled.
3298 static noinline int schedule_bio(struct btrfs_root *root,
3299 struct btrfs_device *device,
3300 int rw, struct bio *bio)
3302 int should_queue = 1;
3303 struct btrfs_pending_bios *pending_bios;
3305 /* don't bother with additional async steps for reads, right now */
3306 if (!(rw & REQ_WRITE)) {
3308 btrfsic_submit_bio(rw, bio);
3314 * nr_async_bios allows us to reliably return congestion to the
3315 * higher layers. Otherwise, the async bio makes it appear we have
3316 * made progress against dirty pages when we've really just put it
3317 * on a queue for later
3319 atomic_inc(&root->fs_info->nr_async_bios);
3320 WARN_ON(bio->bi_next);
3321 bio->bi_next = NULL;
3324 spin_lock(&device->io_lock);
3325 if (bio->bi_rw & REQ_SYNC)
3326 pending_bios = &device->pending_sync_bios;
3328 pending_bios = &device->pending_bios;
3330 if (pending_bios->tail)
3331 pending_bios->tail->bi_next = bio;
3333 pending_bios->tail = bio;
3334 if (!pending_bios->head)
3335 pending_bios->head = bio;
3336 if (device->running_pending)
3339 spin_unlock(&device->io_lock);
3342 btrfs_queue_worker(&root->fs_info->submit_workers,
3347 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
3348 int mirror_num, int async_submit)
3350 struct btrfs_mapping_tree *map_tree;
3351 struct btrfs_device *dev;
3352 struct bio *first_bio = bio;
3353 u64 logical = (u64)bio->bi_sector << 9;
3359 struct btrfs_bio *bbio = NULL;
3361 length = bio->bi_size;
3362 map_tree = &root->fs_info->mapping_tree;
3363 map_length = length;
3365 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
3369 total_devs = bbio->num_stripes;
3370 if (map_length < length) {
3371 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
3372 "len %llu\n", (unsigned long long)logical,
3373 (unsigned long long)length,
3374 (unsigned long long)map_length);
3378 bbio->orig_bio = first_bio;
3379 bbio->private = first_bio->bi_private;
3380 bbio->end_io = first_bio->bi_end_io;
3381 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
3383 while (dev_nr < total_devs) {
3384 if (dev_nr < total_devs - 1) {
3385 bio = bio_clone(first_bio, GFP_NOFS);
3390 bio->bi_private = bbio;
3391 bio->bi_end_io = btrfs_end_bio;
3392 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
3393 dev = bbio->stripes[dev_nr].dev;
3394 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
3395 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
3396 "(%s id %llu), size=%u\n", rw,
3397 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
3398 dev->name, dev->devid, bio->bi_size);
3399 bio->bi_bdev = dev->bdev;
3401 schedule_bio(root, dev, rw, bio);
3403 btrfsic_submit_bio(rw, bio);
3405 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
3406 bio->bi_sector = logical >> 9;
3407 bio_endio(bio, -EIO);
3414 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
3417 struct btrfs_device *device;
3418 struct btrfs_fs_devices *cur_devices;
3420 cur_devices = root->fs_info->fs_devices;
3421 while (cur_devices) {
3423 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3424 device = __find_device(&cur_devices->devices,
3429 cur_devices = cur_devices->seed;
3434 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3435 u64 devid, u8 *dev_uuid)
3437 struct btrfs_device *device;
3438 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
3440 device = kzalloc(sizeof(*device), GFP_NOFS);
3443 list_add(&device->dev_list,
3444 &fs_devices->devices);
3445 device->dev_root = root->fs_info->dev_root;
3446 device->devid = devid;
3447 device->work.func = pending_bios_fn;
3448 device->fs_devices = fs_devices;
3449 device->missing = 1;
3450 fs_devices->num_devices++;
3451 fs_devices->missing_devices++;
3452 spin_lock_init(&device->io_lock);
3453 INIT_LIST_HEAD(&device->dev_alloc_list);
3454 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
3458 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3459 struct extent_buffer *leaf,
3460 struct btrfs_chunk *chunk)
3462 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3463 struct map_lookup *map;
3464 struct extent_map *em;
3468 u8 uuid[BTRFS_UUID_SIZE];
3473 logical = key->offset;
3474 length = btrfs_chunk_length(leaf, chunk);
3476 read_lock(&map_tree->map_tree.lock);
3477 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3478 read_unlock(&map_tree->map_tree.lock);
3480 /* already mapped? */
3481 if (em && em->start <= logical && em->start + em->len > logical) {
3482 free_extent_map(em);
3485 free_extent_map(em);
3488 em = alloc_extent_map();
3491 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3492 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3494 free_extent_map(em);
3498 em->bdev = (struct block_device *)map;
3499 em->start = logical;
3501 em->block_start = 0;
3502 em->block_len = em->len;
3504 map->num_stripes = num_stripes;
3505 map->io_width = btrfs_chunk_io_width(leaf, chunk);
3506 map->io_align = btrfs_chunk_io_align(leaf, chunk);
3507 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
3508 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
3509 map->type = btrfs_chunk_type(leaf, chunk);
3510 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
3511 for (i = 0; i < num_stripes; i++) {
3512 map->stripes[i].physical =
3513 btrfs_stripe_offset_nr(leaf, chunk, i);
3514 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
3515 read_extent_buffer(leaf, uuid, (unsigned long)
3516 btrfs_stripe_dev_uuid_nr(chunk, i),
3518 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
3520 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
3522 free_extent_map(em);
3525 if (!map->stripes[i].dev) {
3526 map->stripes[i].dev =
3527 add_missing_dev(root, devid, uuid);
3528 if (!map->stripes[i].dev) {
3530 free_extent_map(em);
3534 map->stripes[i].dev->in_fs_metadata = 1;
3537 write_lock(&map_tree->map_tree.lock);
3538 ret = add_extent_mapping(&map_tree->map_tree, em);
3539 write_unlock(&map_tree->map_tree.lock);
3541 free_extent_map(em);
3546 static int fill_device_from_item(struct extent_buffer *leaf,
3547 struct btrfs_dev_item *dev_item,
3548 struct btrfs_device *device)
3552 device->devid = btrfs_device_id(leaf, dev_item);
3553 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
3554 device->total_bytes = device->disk_total_bytes;
3555 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
3556 device->type = btrfs_device_type(leaf, dev_item);
3557 device->io_align = btrfs_device_io_align(leaf, dev_item);
3558 device->io_width = btrfs_device_io_width(leaf, dev_item);
3559 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3561 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3562 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3567 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3569 struct btrfs_fs_devices *fs_devices;
3572 mutex_lock(&uuid_mutex);
3574 fs_devices = root->fs_info->fs_devices->seed;
3575 while (fs_devices) {
3576 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3580 fs_devices = fs_devices->seed;
3583 fs_devices = find_fsid(fsid);
3589 fs_devices = clone_fs_devices(fs_devices);
3590 if (IS_ERR(fs_devices)) {
3591 ret = PTR_ERR(fs_devices);
3595 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3596 root->fs_info->bdev_holder);
3600 if (!fs_devices->seeding) {
3601 __btrfs_close_devices(fs_devices);
3602 free_fs_devices(fs_devices);
3607 fs_devices->seed = root->fs_info->fs_devices->seed;
3608 root->fs_info->fs_devices->seed = fs_devices;
3610 mutex_unlock(&uuid_mutex);
3614 static int read_one_dev(struct btrfs_root *root,
3615 struct extent_buffer *leaf,
3616 struct btrfs_dev_item *dev_item)
3618 struct btrfs_device *device;
3621 u8 fs_uuid[BTRFS_UUID_SIZE];
3622 u8 dev_uuid[BTRFS_UUID_SIZE];
3624 devid = btrfs_device_id(leaf, dev_item);
3625 read_extent_buffer(leaf, dev_uuid,
3626 (unsigned long)btrfs_device_uuid(dev_item),
3628 read_extent_buffer(leaf, fs_uuid,
3629 (unsigned long)btrfs_device_fsid(dev_item),
3632 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3633 ret = open_seed_devices(root, fs_uuid);
3634 if (ret && !btrfs_test_opt(root, DEGRADED))
3638 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3639 if (!device || !device->bdev) {
3640 if (!btrfs_test_opt(root, DEGRADED))
3644 printk(KERN_WARNING "warning devid %llu missing\n",
3645 (unsigned long long)devid);
3646 device = add_missing_dev(root, devid, dev_uuid);
3649 } else if (!device->missing) {
3651 * this happens when a device that was properly setup
3652 * in the device info lists suddenly goes bad.
3653 * device->bdev is NULL, and so we have to set
3654 * device->missing to one here
3656 root->fs_info->fs_devices->missing_devices++;
3657 device->missing = 1;
3661 if (device->fs_devices != root->fs_info->fs_devices) {
3662 BUG_ON(device->writeable);
3663 if (device->generation !=
3664 btrfs_device_generation(leaf, dev_item))
3668 fill_device_from_item(leaf, dev_item, device);
3669 device->dev_root = root->fs_info->dev_root;
3670 device->in_fs_metadata = 1;
3671 if (device->writeable) {
3672 device->fs_devices->total_rw_bytes += device->total_bytes;
3673 spin_lock(&root->fs_info->free_chunk_lock);
3674 root->fs_info->free_chunk_space += device->total_bytes -
3676 spin_unlock(&root->fs_info->free_chunk_lock);
3682 int btrfs_read_sys_array(struct btrfs_root *root)
3684 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3685 struct extent_buffer *sb;
3686 struct btrfs_disk_key *disk_key;
3687 struct btrfs_chunk *chunk;
3689 unsigned long sb_ptr;
3695 struct btrfs_key key;
3697 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3698 BTRFS_SUPER_INFO_SIZE);
3701 btrfs_set_buffer_uptodate(sb);
3702 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
3704 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3705 array_size = btrfs_super_sys_array_size(super_copy);
3707 ptr = super_copy->sys_chunk_array;
3708 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3711 while (cur < array_size) {
3712 disk_key = (struct btrfs_disk_key *)ptr;
3713 btrfs_disk_key_to_cpu(&key, disk_key);
3715 len = sizeof(*disk_key); ptr += len;
3719 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3720 chunk = (struct btrfs_chunk *)sb_ptr;
3721 ret = read_one_chunk(root, &key, sb, chunk);
3724 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3725 len = btrfs_chunk_item_size(num_stripes);
3734 free_extent_buffer(sb);
3738 int btrfs_read_chunk_tree(struct btrfs_root *root)
3740 struct btrfs_path *path;
3741 struct extent_buffer *leaf;
3742 struct btrfs_key key;
3743 struct btrfs_key found_key;
3747 root = root->fs_info->chunk_root;
3749 path = btrfs_alloc_path();
3753 /* first we search for all of the device items, and then we
3754 * read in all of the chunk items. This way we can create chunk
3755 * mappings that reference all of the devices that are afound
3757 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3761 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3765 leaf = path->nodes[0];
3766 slot = path->slots[0];
3767 if (slot >= btrfs_header_nritems(leaf)) {
3768 ret = btrfs_next_leaf(root, path);
3775 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3776 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3777 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3779 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3780 struct btrfs_dev_item *dev_item;
3781 dev_item = btrfs_item_ptr(leaf, slot,
3782 struct btrfs_dev_item);
3783 ret = read_one_dev(root, leaf, dev_item);
3787 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3788 struct btrfs_chunk *chunk;
3789 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3790 ret = read_one_chunk(root, &found_key, leaf, chunk);
3796 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3798 btrfs_release_path(path);
3803 btrfs_free_path(path);