block: Export I/O topology for block devices and partitions
To support devices with physical block sizes bigger than 512 bytes we need to ensure proper alignment. This patch adds support for exposing I/O topology characteristics as devices are stacked. logical_block_size is the smallest unit the device can address. physical_block_size indicates the smallest I/O the device can write without incurring a read-modify-write penalty. The io_min parameter is the smallest preferred I/O size reported by the device. In many cases this is the same as the physical block size. However, the io_min parameter can be scaled up when stacking (RAID5 chunk size > physical block size). The io_opt characteristic indicates the optimal I/O size reported by the device. This is usually the stripe width for arrays. The alignment_offset parameter indicates the number of bytes the start of the device/partition is offset from the device's natural alignment. Partition tools and MD/DM utilities can use this to pad their offsets so filesystems start on proper boundaries. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
committed by
Jens Axboe
parent
cd43e26f07
commit
c72758f337
@@ -314,11 +314,16 @@ struct queue_limits {
|
||||
unsigned int max_hw_sectors;
|
||||
unsigned int max_sectors;
|
||||
unsigned int max_segment_size;
|
||||
unsigned int physical_block_size;
|
||||
unsigned int alignment_offset;
|
||||
unsigned int io_min;
|
||||
unsigned int io_opt;
|
||||
|
||||
unsigned short logical_block_size;
|
||||
unsigned short max_hw_segments;
|
||||
unsigned short max_phys_segments;
|
||||
|
||||
unsigned char misaligned;
|
||||
unsigned char no_cluster;
|
||||
};
|
||||
|
||||
@@ -911,6 +916,15 @@ extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
unsigned int alignment);
|
||||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
sector_t offset);
|
||||
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
||||
sector_t offset);
|
||||
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
|
||||
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
|
||||
@@ -1047,6 +1061,39 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
|
||||
return queue_logical_block_size(bdev_get_queue(bdev));
|
||||
}
|
||||
|
||||
static inline unsigned int queue_physical_block_size(struct request_queue *q)
|
||||
{
|
||||
return q->limits.physical_block_size;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_io_min(struct request_queue *q)
|
||||
{
|
||||
return q->limits.io_min;
|
||||
}
|
||||
|
||||
static inline unsigned int queue_io_opt(struct request_queue *q)
|
||||
{
|
||||
return q->limits.io_opt;
|
||||
}
|
||||
|
||||
static inline int queue_alignment_offset(struct request_queue *q)
|
||||
{
|
||||
if (q && q->limits.misaligned)
|
||||
return -1;
|
||||
|
||||
if (q && q->limits.alignment_offset)
|
||||
return q->limits.alignment_offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int queue_sector_alignment_offset(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
return ((sector << 9) - q->limits.alignment_offset)
|
||||
& (q->limits.io_min - 1);
|
||||
}
|
||||
|
||||
static inline int queue_dma_alignment(struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
|
Reference in New Issue
Block a user