1 #ifndef _BCACHE_WRITEBACK_H
2 #define _BCACHE_WRITEBACK_H
4 #define CUTOFF_WRITEBACK 40
5 #define CUTOFF_WRITEBACK_SYNC 70
7 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
17 static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
21 uint64_t stripe = offset;
23 do_div(stripe, d->stripe_size);
26 if (atomic_read(d->stripe_sectors_dirty + stripe))
29 if (nr_sectors <= d->stripe_size)
32 nr_sectors -= d->stripe_size;
37 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
38 unsigned cache_mode, bool would_skip)
40 unsigned in_use = dc->disk.c->gc_stats.in_use;
42 if (cache_mode != CACHE_MODE_WRITEBACK ||
43 atomic_read(&dc->disk.detaching) ||
44 in_use > CUTOFF_WRITEBACK_SYNC)
47 if (dc->partial_stripes_expensive &&
48 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
55 return bio->bi_rw & REQ_SYNC ||
56 in_use <= CUTOFF_WRITEBACK;
59 static inline void bch_writeback_queue(struct cached_dev *dc)
61 wake_up_process(dc->writeback_thread);
64 static inline void bch_writeback_add(struct cached_dev *dc)
66 if (!atomic_read(&dc->has_dirty) &&
67 !atomic_xchg(&dc->has_dirty, 1)) {
68 atomic_inc(&dc->count);
70 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
71 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
72 /* XXX: should do this synchronously */
73 bch_write_bdev_super(dc, NULL);
76 bch_writeback_queue(dc);
80 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
82 void bch_sectors_dirty_init(struct cached_dev *dc);
83 int bch_cached_dev_writeback_init(struct cached_dev *);