]> Pileus Git - ~andy/linux/blob - include/linux/blk-mq.h
nfsd: fix lost nfserrno() call in nfsd_setattr()
[~andy/linux] / include / linux / blk-mq.h
1 #ifndef BLK_MQ_H
2 #define BLK_MQ_H
3
4 #include <linux/blkdev.h>
5
6 struct blk_mq_tags;
7
8 struct blk_mq_cpu_notifier {
9         struct list_head list;
10         void *data;
11         void (*notify)(void *data, unsigned long action, unsigned int cpu);
12 };
13
14 struct blk_mq_hw_ctx {
15         struct {
16                 spinlock_t              lock;
17                 struct list_head        dispatch;
18         } ____cacheline_aligned_in_smp;
19
20         unsigned long           state;          /* BLK_MQ_S_* flags */
21         struct delayed_work     delayed_work;
22
23         unsigned long           flags;          /* BLK_MQ_F_* flags */
24
25         struct request_queue    *queue;
26         unsigned int            queue_num;
27
28         void                    *driver_data;
29
30         unsigned int            nr_ctx;
31         struct blk_mq_ctx       **ctxs;
32         unsigned int            nr_ctx_map;
33         unsigned long           *ctx_map;
34
35         struct request          **rqs;
36         struct list_head        page_list;
37         struct blk_mq_tags      *tags;
38
39         unsigned long           queued;
40         unsigned long           run;
41 #define BLK_MQ_MAX_DISPATCH_ORDER       10
42         unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43
44         unsigned int            queue_depth;
45         unsigned int            numa_node;
46         unsigned int            cmd_size;       /* per-request extra data */
47
48         struct blk_mq_cpu_notifier      cpu_notifier;
49         struct kobject          kobj;
50 };
51
52 struct blk_mq_reg {
53         struct blk_mq_ops       *ops;
54         unsigned int            nr_hw_queues;
55         unsigned int            queue_depth;
56         unsigned int            reserved_tags;
57         unsigned int            cmd_size;       /* per-request extra data */
58         int                     numa_node;
59         unsigned int            timeout;
60         unsigned int            flags;          /* BLK_MQ_F_* */
61 };
62
63 typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64 typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65 typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66 typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69
70 struct blk_mq_ops {
71         /*
72          * Queue request
73          */
74         queue_rq_fn             *queue_rq;
75
76         /*
77          * Map to specific hardware queue
78          */
79         map_queue_fn            *map_queue;
80
81         /*
82          * Called on request timeout
83          */
84         rq_timed_out_fn         *timeout;
85
86         /*
87          * Override for hctx allocations (should probably go)
88          */
89         alloc_hctx_fn           *alloc_hctx;
90         free_hctx_fn            *free_hctx;
91
92         /*
93          * Called when the block layer side of a hardware queue has been
94          * set up, allowing the driver to allocate/init matching structures.
95          * Ditto for exit/teardown.
96          */
97         init_hctx_fn            *init_hctx;
98         exit_hctx_fn            *exit_hctx;
99 };
100
101 enum {
102         BLK_MQ_RQ_QUEUE_OK      = 0,    /* queued fine */
103         BLK_MQ_RQ_QUEUE_BUSY    = 1,    /* requeue IO for later */
104         BLK_MQ_RQ_QUEUE_ERROR   = 2,    /* end IO with error */
105
106         BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
107         BLK_MQ_F_SHOULD_SORT    = 1 << 1,
108         BLK_MQ_F_SHOULD_IPI     = 1 << 2,
109
110         BLK_MQ_S_STOPPED        = 1 << 0,
111
112         BLK_MQ_MAX_DEPTH        = 2048,
113 };
114
115 struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
116 int blk_mq_register_disk(struct gendisk *);
117 void blk_mq_unregister_disk(struct gendisk *);
118 void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
119
120 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
121
122 void blk_mq_insert_request(struct request_queue *, struct request *, bool);
123 void blk_mq_run_queues(struct request_queue *q, bool async);
124 void blk_mq_free_request(struct request *rq);
125 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
126 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
127 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
128 struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
129
130 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
131 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
132 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
133
134 void blk_mq_end_io(struct request *rq, int error);
135
136 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
137 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
138 void blk_mq_stop_hw_queues(struct request_queue *q);
139 void blk_mq_start_stopped_hw_queues(struct request_queue *q);
140
141 /*
142  * Driver command data is immediately after the request. So subtract request
143  * size to get back to the original request.
144  */
145 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
146 {
147         return pdu - sizeof(struct request);
148 }
149 static inline void *blk_mq_rq_to_pdu(struct request *rq)
150 {
151         return (void *) rq + sizeof(*rq);
152 }
153
154 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
155                                                unsigned int tag)
156 {
157         return hctx->rqs[tag];
158 }
159
160 #define queue_for_each_hw_ctx(q, hctx, i)                               \
161         for ((i) = 0; (i) < (q)->nr_hw_queues &&                        \
162              ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
163
164 #define queue_for_each_ctx(q, ctx, i)                                   \
165         for ((i) = 0; (i) < (q)->nr_queues &&                           \
166              ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
167
168 #define hctx_for_each_ctx(hctx, ctx, i)                                 \
169         for ((i) = 0; (i) < (hctx)->nr_ctx &&                           \
170              ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
171
172 #define blk_ctx_sum(q, sum)                                             \
173 ({                                                                      \
174         struct blk_mq_ctx *__x;                                         \
175         unsigned int __ret = 0, __i;                                    \
176                                                                         \
177         queue_for_each_ctx((q), __x, __i)                               \
178                 __ret += sum;                                           \
179         __ret;                                                          \
180 })
181
182 #endif