[SCSI] seperate max_sectors from max_hw_sectors
- export __blk_put_request and blk_execute_rq_nowait needed for async REQ_BLOCK_PC requests - seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was already testing against max_sectors and SCSI-ml was setting max_sectors and max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set a valid max_hw_sectors for all LLDs. Today if a LLD does not set it SCSI-ml sets it to a safe default and some LLDs set it to a artificial low value to overcome memory and feedback issues. Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024, drivers that used to call blk_queue_max_sectors with a large value of max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS. Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
This commit is contained in:
committed by
James Bottomley
parent
8b05b773b6
commit
defd94b754
20
fs/bio.c
20
fs/bio.c
@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
|
||||
}
|
||||
|
||||
static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
|
||||
*page, unsigned int len, unsigned int offset)
|
||||
*page, unsigned int len, unsigned int offset,
|
||||
unsigned short max_sectors)
|
||||
{
|
||||
int retried_segments = 0;
|
||||
struct bio_vec *bvec;
|
||||
@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
|
||||
if (bio->bi_vcnt >= bio->bi_max_vecs)
|
||||
return 0;
|
||||
|
||||
if (((bio->bi_size + len) >> 9) > q->max_sectors)
|
||||
if (((bio->bi_size + len) >> 9) > max_sectors)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
|
||||
int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int offset)
|
||||
{
|
||||
return __bio_add_page(q, bio, page, len, offset);
|
||||
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
|
||||
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||
unsigned int offset)
|
||||
{
|
||||
return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page,
|
||||
len, offset);
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
|
||||
}
|
||||
|
||||
struct bio_map_data {
|
||||
@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
|
||||
break;
|
||||
}
|
||||
|
||||
if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
|
||||
if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
|
||||
/*
|
||||
* sorry...
|
||||
*/
|
||||
if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
|
||||
if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
|
||||
bytes)
|
||||
break;
|
||||
|
||||
len -= bytes;
|
||||
@@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
if (__bio_add_page(q, bio, virt_to_page(data), bytes,
|
||||
offset) < bytes)
|
||||
if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
|
||||
offset) < bytes)
|
||||
break;
|
||||
|
||||
data += bytes;
|
||||
|
Reference in New Issue
Block a user