2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the LGPL.
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/ratelimit.h>
21 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
25 unsigned long long ll;
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 unsigned int argc, char **argv);
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
42 * The map function must return:
44 * = 0: The target will handle the io by resubmitting it later
45 * = 1: simple remap complete
46 * = 2: The target wants to push back the io
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context);
54 * < 0 : error (currently ignored)
55 * 0 : ended successfully
56 * 1 : for some reason the io has still not completed (eg,
57 * multipath target might want to requeue a failed io).
58 * 2 : The target wants to push back the io
60 typedef int (*dm_endio_fn) (struct dm_target *ti,
61 struct bio *bio, int error);
62 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 struct request *clone, int error,
64 union map_info *map_context);
66 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68 typedef int (*dm_preresume_fn) (struct dm_target *ti);
69 typedef void (*dm_resume_fn) (struct dm_target *ti);
71 typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
72 unsigned status_flags, char *result, unsigned maxlen);
74 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
76 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
79 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 struct bio_vec *biovec, int max_size);
82 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
84 sector_t start, sector_t len,
87 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
88 iterate_devices_callout_fn fn,
91 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
92 struct queue_limits *limits);
96 * 0: The target can handle the next I/O immediately.
97 * 1: The target can't handle the next I/O immediately.
99 typedef int (*dm_busy_fn) (struct dm_target *ti);
101 void dm_error(const char *message);
104 * Combine device limits.
106 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
107 sector_t start, sector_t len, void *data);
110 struct block_device *bdev;
116 * Constructors should call these functions to ensure destination devices
117 * are opened/closed correctly.
119 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
120 struct dm_dev **result);
121 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
124 * Information about a target type
130 struct module *module;
135 dm_map_request_fn map_rq;
137 dm_request_endio_fn rq_end_io;
138 dm_presuspend_fn presuspend;
139 dm_postsuspend_fn postsuspend;
140 dm_preresume_fn preresume;
143 dm_message_fn message;
147 dm_iterate_devices_fn iterate_devices;
148 dm_io_hints_fn io_hints;
150 /* For internal device-mapper use. */
151 struct list_head list;
159 * Any table that contains an instance of this target must have only one.
161 #define DM_TARGET_SINGLETON 0x00000001
162 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
165 * Indicates that a target does not support read-only devices.
167 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
168 #define dm_target_always_writeable(type) \
169 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
172 * Any device that contains a table with an instance of this target may never
173 * have tables containing any different target type.
175 #define DM_TARGET_IMMUTABLE 0x00000004
176 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
179 struct dm_table *table;
180 struct target_type *type;
186 /* If non-zero, maximum size of I/O submitted to a target. */
190 * A number of zero-length barrier requests that will be submitted
191 * to the target for the purpose of flushing cache.
193 * The request number can be accessed with dm_bio_get_target_request_nr.
194 * It is a responsibility of the target driver to remap these requests
195 * to the real underlying devices.
197 unsigned num_flush_requests;
200 * The number of discard requests that will be submitted to the target.
201 * The request number can be accessed with dm_bio_get_target_request_nr.
203 unsigned num_discard_requests;
206 * The number of WRITE SAME requests that will be submitted to the target.
207 * The request number can be accessed with dm_bio_get_target_request_nr.
209 unsigned num_write_same_requests;
212 * The minimum number of extra bytes allocated in each bio for the
213 * target to use. dm_per_bio_data returns the data location.
215 unsigned per_bio_data_size;
217 /* target specific data */
220 /* Used to provide an error string from the ctr */
224 * Set if this target needs to receive flushes regardless of
225 * whether or not its underlying devices have support.
227 bool flush_supported:1;
230 * Set if this target needs to receive discards regardless of
231 * whether or not its underlying devices have support.
233 bool discards_supported:1;
236 * Set if the target required discard request to be split
237 * on max_io_len boundary.
239 bool split_discard_requests:1;
242 * Set if this target does not return zeroes on discarded blocks.
244 bool discard_zeroes_data_unsupported:1;
247 /* Each target can link one of these into the table */
248 struct dm_target_callbacks {
249 struct list_head list;
250 int (*congested_fn) (struct dm_target_callbacks *, int);
255 * One of these is allocated for each bio.
256 * This structure shouldn't be touched directly by target drivers.
257 * It is here so that we can inline dm_per_bio_data and
258 * dm_bio_from_per_bio_data
260 struct dm_target_io {
262 struct dm_target *ti;
264 unsigned target_request_nr;
268 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
270 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
273 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
275 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
278 static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
280 return container_of(bio, struct dm_target_io, clone)->target_request_nr;
283 int dm_register_target(struct target_type *t);
284 void dm_unregister_target(struct target_type *t);
287 * Target argument parsing.
295 * The minimum and maximum value of a numeric argument, together with
296 * the error message to use if the number is found to be outside that range.
305 * Validate the next argument, either returning it as *value or, if invalid,
306 * returning -EINVAL and setting *error.
308 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
309 unsigned *value, char **error);
312 * Process the next argument as the start of a group containing between
313 * arg->min and arg->max further arguments. Either return the size as
314 * *num_args or, if invalid, return -EINVAL and set *error.
316 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
317 unsigned *num_args, char **error);
320 * Return the current argument and shift to the next.
322 const char *dm_shift_arg(struct dm_arg_set *as);
325 * Move through num_args arguments.
327 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
329 /*-----------------------------------------------------------------
330 * Functions for creating and manipulating mapped devices.
331 * Drop the reference with dm_put when you finish with the object.
332 *---------------------------------------------------------------*/
335 * DM_ANY_MINOR chooses the next available minor number.
337 #define DM_ANY_MINOR (-1)
338 int dm_create(int minor, struct mapped_device **md);
341 * Reference counting for md.
343 struct mapped_device *dm_get_md(dev_t dev);
344 void dm_get(struct mapped_device *md);
345 void dm_put(struct mapped_device *md);
348 * An arbitrary pointer may be stored alongside a mapped device.
350 void dm_set_mdptr(struct mapped_device *md, void *ptr);
351 void *dm_get_mdptr(struct mapped_device *md);
354 * A device can still be used while suspended, but I/O is deferred.
356 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
357 int dm_resume(struct mapped_device *md);
362 uint32_t dm_get_event_nr(struct mapped_device *md);
363 int dm_wait_event(struct mapped_device *md, int event_nr);
364 uint32_t dm_next_uevent_seq(struct mapped_device *md);
365 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
370 const char *dm_device_name(struct mapped_device *md);
371 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
372 struct gendisk *dm_disk(struct mapped_device *md);
373 int dm_suspended(struct dm_target *ti);
374 int dm_noflush_suspending(struct dm_target *ti);
375 union map_info *dm_get_mapinfo(struct bio *bio);
376 union map_info *dm_get_rq_mapinfo(struct request *rq);
379 * Geometry functions.
381 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
382 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
385 /*-----------------------------------------------------------------
386 * Functions for manipulating device-mapper tables.
387 *---------------------------------------------------------------*/
390 * First create an empty table.
392 int dm_table_create(struct dm_table **result, fmode_t mode,
393 unsigned num_targets, struct mapped_device *md);
396 * Then call this once for each target.
398 int dm_table_add_target(struct dm_table *t, const char *type,
399 sector_t start, sector_t len, char *params);
402 * Target_ctr should call this if it needs to add any callbacks.
404 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
407 * Finally call this to make the table ready for use.
409 int dm_table_complete(struct dm_table *t);
412 * Target may require that it is never sent I/O larger than len.
414 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
417 * Table reference counting.
419 struct dm_table *dm_get_live_table(struct mapped_device *md);
420 void dm_table_get(struct dm_table *t);
421 void dm_table_put(struct dm_table *t);
426 sector_t dm_table_get_size(struct dm_table *t);
427 unsigned int dm_table_get_num_targets(struct dm_table *t);
428 fmode_t dm_table_get_mode(struct dm_table *t);
429 struct mapped_device *dm_table_get_md(struct dm_table *t);
434 void dm_table_event(struct dm_table *t);
437 * The device must be suspended before calling this method.
438 * Returns the previous table, which the caller must destroy.
440 struct dm_table *dm_swap_table(struct mapped_device *md,
444 * A wrapper around vmalloc.
446 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
448 /*-----------------------------------------------------------------
450 *---------------------------------------------------------------*/
451 #define DM_NAME "device-mapper"
454 extern struct ratelimit_state dm_ratelimit_state;
456 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
458 #define dm_ratelimit() 0
461 #define DMCRIT(f, arg...) \
462 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
464 #define DMERR(f, arg...) \
465 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
466 #define DMERR_LIMIT(f, arg...) \
468 if (dm_ratelimit()) \
469 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
473 #define DMWARN(f, arg...) \
474 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
475 #define DMWARN_LIMIT(f, arg...) \
477 if (dm_ratelimit()) \
478 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
482 #define DMINFO(f, arg...) \
483 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
484 #define DMINFO_LIMIT(f, arg...) \
486 if (dm_ratelimit()) \
487 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
491 #ifdef CONFIG_DM_DEBUG
492 # define DMDEBUG(f, arg...) \
493 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
494 # define DMDEBUG_LIMIT(f, arg...) \
496 if (dm_ratelimit()) \
497 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
501 # define DMDEBUG(f, arg...) do {} while (0)
502 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
505 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
506 0 : scnprintf(result + sz, maxlen - sz, x))
508 #define SECTOR_SHIFT 9
511 * Definitions of return values from target end_io function.
513 #define DM_ENDIO_INCOMPLETE 1
514 #define DM_ENDIO_REQUEUE 2
517 * Definitions of return values from target map function.
519 #define DM_MAPIO_SUBMITTED 0
520 #define DM_MAPIO_REMAPPED 1
521 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
526 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
528 #define dm_sector_div_up(n, sz) ( \
530 sector_t _r = ((n) + (sz) - 1); \
531 sector_div(_r, (sz)); \
537 * ceiling(n / size) * size
539 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
541 #define dm_array_too_big(fixed, obj, num) \
542 ((num) > (UINT_MAX - (fixed)) / (obj))
545 * Sector offset taken relative to the start of the target instead of
546 * relative to the start of the device.
548 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
550 static inline sector_t to_sector(unsigned long n)
552 return (n >> SECTOR_SHIFT);
555 static inline unsigned long to_bytes(sector_t n)
557 return (n << SECTOR_SHIFT);
560 /*-----------------------------------------------------------------
561 * Helper for block layer and dm core operations
562 *---------------------------------------------------------------*/
563 void dm_dispatch_request(struct request *rq);
564 void dm_requeue_unmapped_request(struct request *rq);
565 void dm_kill_unmapped_request(struct request *rq, int error);
566 int dm_underlying_device_busy(struct request_queue *q);
568 #endif /* _LINUX_DEVICE_MAPPER_H */