block: introduce blk_init_flush and its pair
These two temporary functions are introduced for holding flush initialization and de-initialization, so that we can introduce 'flush queue' easier in the following patch. And once 'flush queue' and its allocation/free functions are ready, they will be removed for sake of code readability. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
@@ -705,8 +705,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
|||||||
if (!q)
|
if (!q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
|
if (blk_init_flush(q))
|
||||||
if (!q->flush_rq)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
|
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
|
||||||
@@ -742,7 +741,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
|||||||
return q;
|
return q;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kfree(q->flush_rq);
|
blk_exit_flush(q);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||||
|
@@ -472,7 +472,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||||
|
|
||||||
int blk_mq_init_flush(struct request_queue *q)
|
static int blk_mq_init_flush(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
|
||||||
@@ -485,3 +485,20 @@ int blk_mq_init_flush(struct request_queue *q)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int blk_init_flush(struct request_queue *q)
|
||||||
|
{
|
||||||
|
if (q->mq_ops)
|
||||||
|
return blk_mq_init_flush(q);
|
||||||
|
|
||||||
|
q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
|
||||||
|
if (!q->flush_rq)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_exit_flush(struct request_queue *q)
|
||||||
|
{
|
||||||
|
kfree(q->flush_rq);
|
||||||
|
}
|
||||||
|
@@ -1859,7 +1859,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
|
|
||||||
blk_mq_add_queue_tag_set(set, q);
|
blk_mq_add_queue_tag_set(set, q);
|
||||||
|
|
||||||
if (blk_mq_init_flush(q))
|
if (blk_init_flush(q))
|
||||||
goto err_hw_queues;
|
goto err_hw_queues;
|
||||||
|
|
||||||
blk_mq_map_swqueue(q);
|
blk_mq_map_swqueue(q);
|
||||||
|
@@ -27,7 +27,6 @@ struct blk_mq_ctx {
|
|||||||
|
|
||||||
void __blk_mq_complete_request(struct request *rq);
|
void __blk_mq_complete_request(struct request *rq);
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||||
int blk_mq_init_flush(struct request_queue *q);
|
|
||||||
void blk_mq_freeze_queue(struct request_queue *q);
|
void blk_mq_freeze_queue(struct request_queue *q);
|
||||||
void blk_mq_free_queue(struct request_queue *q);
|
void blk_mq_free_queue(struct request_queue *q);
|
||||||
void blk_mq_clone_flush_request(struct request *flush_rq,
|
void blk_mq_clone_flush_request(struct request *flush_rq,
|
||||||
|
@@ -517,11 +517,11 @@ static void blk_release_queue(struct kobject *kobj)
|
|||||||
if (q->queue_tags)
|
if (q->queue_tags)
|
||||||
__blk_queue_free_tags(q);
|
__blk_queue_free_tags(q);
|
||||||
|
|
||||||
|
blk_exit_flush(q);
|
||||||
|
|
||||||
if (q->mq_ops)
|
if (q->mq_ops)
|
||||||
blk_mq_free_queue(q);
|
blk_mq_free_queue(q);
|
||||||
|
|
||||||
kfree(q->flush_rq);
|
|
||||||
|
|
||||||
blk_trace_shutdown(q);
|
blk_trace_shutdown(q);
|
||||||
|
|
||||||
bdi_destroy(&q->backing_dev_info);
|
bdi_destroy(&q->backing_dev_info);
|
||||||
|
@@ -22,6 +22,9 @@ static inline void __blk_get_queue(struct request_queue *q)
|
|||||||
kobject_get(&q->kobj);
|
kobject_get(&q->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int blk_init_flush(struct request_queue *q);
|
||||||
|
void blk_exit_flush(struct request_queue *q);
|
||||||
|
|
||||||
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||||
gfp_t gfp_mask);
|
gfp_t gfp_mask);
|
||||||
void blk_exit_rl(struct request_list *rl);
|
void blk_exit_rl(struct request_list *rl);
|
||||||
|
Reference in New Issue
Block a user