block: make rq sector size accessible for block stats

Currently rq->data_len will be decreased by partial completion or
zeroed by completion, so when blk_stat_add() is invoked, data_len
will be zero and there will never be samples in poll_cb because
blk_mq_poll_stats_bkt() will return -1 if data_len is zero.

We could move blk_stat_add() back to __blk_mq_complete_request(),
but that would make the effort of trying to call ktime_get_ns()
once in vain. Instead we can reuse throtl_size field, and use
it for both block stats and block throttle, and adjust the
logic in blk_mq_poll_stats_bkt() accordingly.

Fixes: 4bc6339a58 ("block: move blk_stat_add() to __blk_mq_end_request()")
Tested-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Hou Tao 2019-05-21 15:59:03 +08:00 committed by Jens Axboe
parent 89f3b6d62f
commit 3d24430694
3 changed files with 19 additions and 10 deletions

View File

@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static int blk_mq_poll_stats_bkt(const struct request *rq) static int blk_mq_poll_stats_bkt(const struct request *rq)
{ {
int ddir, bytes, bucket; int ddir, sectors, bucket;
ddir = rq_data_dir(rq); ddir = rq_data_dir(rq);
bytes = blk_rq_bytes(rq); sectors = blk_rq_stats_sectors(rq);
bucket = ddir + 2*(ilog2(bytes) - 9); bucket = ddir + 2 * ilog2(sectors);
if (bucket < 0) if (bucket < 0)
return -1; return -1;
@ -333,6 +333,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
else else
rq->start_time_ns = 0; rq->start_time_ns = 0;
rq->io_start_time_ns = 0; rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0; rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY) #if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0; rq->nr_integrity_segments = 0;
@ -681,9 +682,7 @@ void blk_mq_start_request(struct request *rq)
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns(); rq->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW rq->stats_sectors = blk_rq_sectors(rq);
rq->throtl_size = blk_rq_sectors(rq);
#endif
rq->rq_flags |= RQF_STATS; rq->rq_flags |= RQF_STATS;
rq_qos_issue(q, rq); rq_qos_issue(q, rq);
} }

View File

@ -2248,7 +2248,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct throtl_data *td = q->td; struct throtl_data *td = q->td;
throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10); throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
time_ns >> 10);
} }
void blk_throtl_bio_endio(struct bio *bio) void blk_throtl_bio_endio(struct bio *bio)

View File

@ -206,9 +206,12 @@ struct request {
#ifdef CONFIG_BLK_WBT #ifdef CONFIG_BLK_WBT
unsigned short wbt_flags; unsigned short wbt_flags;
#endif #endif
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW /*
unsigned short throtl_size; * rq sectors used for blk stats. It has the same value
#endif * with blk_rq_sectors(rq), except that it never be zeroed
* by completion.
*/
unsigned short stats_sectors;
/* /*
* Number of scatter-gather DMA addr+len pairs after * Number of scatter-gather DMA addr+len pairs after
@ -917,6 +920,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request * blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment * blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_stats_sectors() : sectors of the entire request used for stats
*/ */
static inline sector_t blk_rq_pos(const struct request *rq) static inline sector_t blk_rq_pos(const struct request *rq)
{ {
@ -945,6 +949,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
} }
static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
{
return rq->stats_sectors;
}
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq) static inline unsigned int blk_rq_zone_no(struct request *rq)
{ {