diff options
author | Tejun Heo <htejun@gmail.com> | 2006-01-06 09:49:58 +0100 |
---|---|---|
committer | Jens Axboe <axboe@suse.de> | 2006-01-06 09:49:58 +0100 |
commit | 52d9e675361261a1eb1716b02222ec6177ec342b (patch) | |
tree | a9ed62b6fe9b6622b7c42249e983136f38f75255 /block | |
parent | 8ffdc6550c47f75ca4e6c9f30a2a89063e035cf2 (diff) | |
download | linux-52d9e675361261a1eb1716b02222ec6177ec342b.tar.gz linux-52d9e675361261a1eb1716b02222ec6177ec342b.tar.xz |
[BLOCK] ll_rw_blk: separate out bio init part from __make_request
Separate out bio initialization part from __make_request. It
will be used by the following blk_ordered_reimpl.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 62 |
1 files changed, 33 insertions, 29 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b1ae69bc5ac..65c4efc02adf 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -36,6 +36,8 @@ static void blk_unplug_work(void *data); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); +static void init_request_from_bio(struct request *req, struct bio *bio); +static int __make_request(request_queue_t *q, struct bio *bio); /* * For the allocated request tables @@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q) return 0; } -static int __make_request(request_queue_t *, struct bio *); - request_queue_t *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); @@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) EXPORT_SYMBOL(blk_attempt_remerge); +static void init_request_from_bio(struct request *req, struct bio *bio) +{ + req->flags |= REQ_CMD; + + /* + * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) + */ + if (bio_rw_ahead(bio) || bio_failfast(bio)) + req->flags |= REQ_FAILFAST; + + /* + * REQ_BARRIER implies no merging, but lets make it explicit + */ + if (unlikely(bio_barrier(bio))) + req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + + req->errors = 0; + req->hard_sector = req->sector = bio->bi_sector; + req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); + req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio); + req->nr_phys_segments = bio_phys_segments(req->q, bio); + req->nr_hw_segments = bio_hw_segments(req->q, bio); + req->buffer = bio_data(bio); /* see ->buffer comment above */ + req->waiting = NULL; + req->bio = req->biotail = bio; + req->ioprio = bio_prio(bio); + req->rq_disk = bio->bi_bdev->bd_disk; + req->start_time = jiffies; +} + static int __make_request(request_queue_t *q, struct bio *bio) { struct request *req; @@ -2754,33 +2784,7 @@ get_rq: * We don't worry about that case for efficiency. It won't happen * often, and the elevators are able to handle it. */ - - req->flags |= REQ_CMD; - - /* - * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) - */ - if (bio_rw_ahead(bio) || bio_failfast(bio)) - req->flags |= REQ_FAILFAST; - - /* - * REQ_BARRIER implies no merging, but lets make it explicit - */ - if (unlikely(barrier)) - req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); - - req->errors = 0; - req->hard_sector = req->sector = sector; - req->hard_nr_sectors = req->nr_sectors = nr_sectors; - req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors; - req->nr_phys_segments = bio_phys_segments(q, bio); - req->nr_hw_segments = bio_hw_segments(q, bio); - req->buffer = bio_data(bio); /* see ->buffer comment above */ - req->waiting = NULL; - req->bio = req->biotail = bio; - req->ioprio = prio; - req->rq_disk = bio->bi_bdev->bd_disk; - req->start_time = jiffies; + init_request_from_bio(req, bio); spin_lock_irq(q->queue_lock); if (elv_queue_empty(q)) |