dm table: establish queue limits by copying table limits
Copy the table's queue_limits to the DM device's request_queue. This
properly initializes the queue's topology limits and also avoids having
to track the evolution of 'struct queue_limits' in
dm_table_set_restrictions()
Also fixes a bug that was introduced in dm_table_set_restrictions() via
commit ae03bf639a
. In addition to
establishing 'bounce_pfn' in the queue's limits blk_queue_bounce_limit()
also performs an allocation to setup the ISA DMA pool. This allocation
resulted in "sleeping function called from invalid context" when called
from dm_table_set_restrictions().
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
committed by
Alasdair G Kergon
parent
5ab97588fb
commit
1197764e40
@@ -956,17 +956,9 @@ no_integrity:
|
|||||||
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
|
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Make sure we obey the optimistic sub devices
|
* Copy table's limits to the DM device's request_queue
|
||||||
* restrictions.
|
|
||||||
*/
|
*/
|
||||||
blk_queue_max_sectors(q, t->limits.max_sectors);
|
q->limits = t->limits;
|
||||||
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
|
|
||||||
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
|
|
||||||
blk_queue_logical_block_size(q, t->limits.logical_block_size);
|
|
||||||
blk_queue_max_segment_size(q, t->limits.max_segment_size);
|
|
||||||
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
|
|
||||||
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
|
|
||||||
blk_queue_bounce_limit(q, t->limits.bounce_pfn);
|
|
||||||
|
|
||||||
if (t->limits.no_cluster)
|
if (t->limits.no_cluster)
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||||
|
Reference in New Issue
Block a user