summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
34b7d2c)
Block low level drivers for some reason have been pretty good at
abusing block layer API. Especially struct request's fields tend to
get violated in all possible ways. Make it clear that low level
drivers MUST NOT access or manipulate rq->sector and rq->data_len
directly by prefixing them with double underscores.
This change is also necessary to break build of out-of-tree codes
which assume the previous block API where internal fields can be
manipulated and rq->data_len carries residual count on completion.
[ Impact: hide internal fields, block API change ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
- rq->sector = (sector_t) -1;
+ rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
- req->sector = bio->bi_sector;
+ req->__sector = bio->bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
req->biotail->bi_next = bio;
req->biotail = bio;
req->biotail->bi_next = bio;
req->biotail = bio;
- req->data_len += bytes;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->sector = bio->bi_sector;
- req->data_len += bytes;
+ req->__sector = bio->bi_sector;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
* can find how many bytes remain in the request
* later.
*/
* can find how many bytes remain in the request
* later.
*/
bio_iovec(bio)->bv_len -= nr_bytes;
}
bio_iovec(bio)->bv_len -= nr_bytes;
}
- req->data_len -= total_bytes;
+ req->__data_len -= total_bytes;
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (blk_fs_request(req) || blk_discard_rq(req))
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (blk_fs_request(req) || blk_discard_rq(req))
- req->sector += total_bytes >> 9;
+ req->__sector += total_bytes >> 9;
/*
* If total number of sectors is less than the first segment
/*
* If total number of sectors is less than the first segment
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n");
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n");
- req->data_len = blk_rq_cur_bytes(req);
+ req->__data_len = blk_rq_cur_bytes(req);
}
/* recalculate the number of segments */
}
/* recalculate the number of segments */
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->data_len = bio->bi_size;
+ rq->__data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->data_len += bio->bi_size;
+ rq->__data_len += bio->bi_size;
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- req->data_len += blk_rq_bytes(next);
+ req->__data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);
elv_merge_requests(q, req, next);
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
- sector_t sector; /* sector cursor */
- unsigned int data_len; /* total data len, don't access directly */
+ /* the following two fields are internal, NEVER access directly */
+ sector_t __sector; /* sector cursor */
+ unsigned int __data_len; /* total data len */
struct bio *bio;
struct bio *biotail;
struct bio *bio;
struct bio *biotail;
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
}
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
}
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
}
static inline int blk_rq_cur_bytes(const struct request *rq)
}
static inline int blk_rq_cur_bytes(const struct request *rq)