block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
committed by
Jens Axboe
parent
e1defc4ff0
commit
ae03bf639a
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
||||
* combine_restrictions_low()
|
||||
*/
|
||||
rs->max_sectors =
|
||||
min_not_zero(rs->max_sectors, q->max_sectors);
|
||||
min_not_zero(rs->max_sectors, queue_max_sectors(q));
|
||||
|
||||
/*
|
||||
* Check if merge fn is supported.
|
||||
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
||||
|
||||
rs->max_phys_segments =
|
||||
min_not_zero(rs->max_phys_segments,
|
||||
q->max_phys_segments);
|
||||
queue_max_phys_segments(q));
|
||||
|
||||
rs->max_hw_segments =
|
||||
min_not_zero(rs->max_hw_segments, q->max_hw_segments);
|
||||
min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
|
||||
|
||||
rs->logical_block_size = max(rs->logical_block_size,
|
||||
queue_logical_block_size(q));
|
||||
|
||||
rs->max_segment_size =
|
||||
min_not_zero(rs->max_segment_size, q->max_segment_size);
|
||||
min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
|
||||
|
||||
rs->max_hw_sectors =
|
||||
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
|
||||
min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
|
||||
|
||||
rs->seg_boundary_mask =
|
||||
min_not_zero(rs->seg_boundary_mask,
|
||||
q->seg_boundary_mask);
|
||||
queue_segment_boundary(q));
|
||||
|
||||
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
|
||||
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
|
||||
|
||||
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||
}
|
||||
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
|
||||
* restrictions.
|
||||
*/
|
||||
blk_queue_max_sectors(q, t->limits.max_sectors);
|
||||
q->max_phys_segments = t->limits.max_phys_segments;
|
||||
q->max_hw_segments = t->limits.max_hw_segments;
|
||||
q->logical_block_size = t->limits.logical_block_size;
|
||||
q->max_segment_size = t->limits.max_segment_size;
|
||||
q->max_hw_sectors = t->limits.max_hw_sectors;
|
||||
q->seg_boundary_mask = t->limits.seg_boundary_mask;
|
||||
q->bounce_pfn = t->limits.bounce_pfn;
|
||||
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
|
||||
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
|
||||
blk_queue_logical_block_size(q, t->limits.logical_block_size);
|
||||
blk_queue_max_segment_size(q, t->limits.max_segment_size);
|
||||
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
|
||||
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
|
||||
blk_queue_bounce_limit(q, t->limits.bounce_pfn);
|
||||
|
||||
if (t->limits.no_cluster)
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||
|
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
||||
* a one page request is never in violation.
|
||||
*/
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
disk->num_sectors = rdev->sectors;
|
||||
|
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
* merge_bvec_fn will be involved in multipath.)
|
||||
*/
|
||||
if (q->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(q) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
conf->working_disks++;
|
||||
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
|
||||
* violating it, not that we ever expect a device with
|
||||
* a merge_bvec_fn to be involved in multipath */
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
if (!test_bit(Faulty, &rdev->flags))
|
||||
|
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
|
||||
*/
|
||||
|
||||
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
if (!smallest || (rdev1->sectors < smallest->sectors))
|
||||
|
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
* a one page request is never in violation.
|
||||
*/
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
p->head_position = 0;
|
||||
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
|
||||
* a one page request is never in violation.
|
||||
*/
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
disk->head_position = 0;
|
||||
|
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
* a one page request is never in violation.
|
||||
*/
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
mddev->queue->max_sectors = (PAGE_SIZE>>9);
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
p->head_position = 0;
|
||||
rdev->raid_disk = mirror;
|
||||
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
|
||||
* a one page request is never in violation.
|
||||
*/
|
||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
||||
mddev->queue->max_sectors = (PAGE_SIZE>>9);
|
||||
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||
|
||||
disk->head_position = 0;
|
||||
}
|
||||
|
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
|
||||
|
||||
if ((bi->bi_size>>9) > q->max_sectors)
|
||||
if ((bi->bi_size>>9) > queue_max_sectors(q))
|
||||
return 0;
|
||||
blk_recount_segments(q, bi);
|
||||
if (bi->bi_phys_segments > q->max_phys_segments)
|
||||
if (bi->bi_phys_segments > queue_max_phys_segments(q))
|
||||
return 0;
|
||||
|
||||
if (q->merge_bvec_fn)
|
||||
|
Reference in New Issue
Block a user