Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: cfq-iosched: limit coop preemption cfq-iosched: fix bad return value cfq_should_preempt() backing-dev: bdi sb prune should be in the unregister path, not destroy Fix bio_alloc() and bio_kmalloc() documentation bio_put(): add bio_clone() to the list of functions in the comment
This commit is contained in:
@@ -196,6 +196,7 @@ enum cfqq_state_flags {
|
|||||||
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
|
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
|
||||||
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
|
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
|
||||||
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
|
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
|
||||||
|
CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CFQ_CFQQ_FNS(name) \
|
#define CFQ_CFQQ_FNS(name) \
|
||||||
@@ -222,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
|
|||||||
CFQ_CFQQ_FNS(slice_new);
|
CFQ_CFQQ_FNS(slice_new);
|
||||||
CFQ_CFQQ_FNS(sync);
|
CFQ_CFQQ_FNS(sync);
|
||||||
CFQ_CFQQ_FNS(coop);
|
CFQ_CFQQ_FNS(coop);
|
||||||
|
CFQ_CFQQ_FNS(coop_preempt);
|
||||||
#undef CFQ_CFQQ_FNS
|
#undef CFQ_CFQQ_FNS
|
||||||
|
|
||||||
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
|
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
|
||||||
@@ -945,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
|
|||||||
{
|
{
|
||||||
if (!cfqq) {
|
if (!cfqq) {
|
||||||
cfqq = cfq_get_next_queue(cfqd);
|
cfqq = cfq_get_next_queue(cfqd);
|
||||||
if (cfqq)
|
if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
|
||||||
cfq_clear_cfqq_coop(cfqq);
|
cfq_clear_cfqq_coop(cfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cfqq)
|
||||||
|
cfq_clear_cfqq_coop_preempt(cfqq);
|
||||||
|
|
||||||
__cfq_set_active_queue(cfqd, cfqq);
|
__cfq_set_active_queue(cfqd, cfqq);
|
||||||
return cfqq;
|
return cfqq;
|
||||||
}
|
}
|
||||||
@@ -2051,7 +2056,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|||||||
* it's a metadata request and the current queue is doing regular IO.
|
* it's a metadata request and the current queue is doing regular IO.
|
||||||
*/
|
*/
|
||||||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||||
return false;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
||||||
@@ -2066,8 +2071,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|||||||
* if this request is as-good as one we would expect from the
|
* if this request is as-good as one we would expect from the
|
||||||
* current cfqq, let it preempt
|
* current cfqq, let it preempt
|
||||||
*/
|
*/
|
||||||
if (cfq_rq_close(cfqd, rq))
|
if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
|
||||||
|
cfqd->busy_queues == 1)) {
|
||||||
|
/*
|
||||||
|
* Mark new queue coop_preempt, so its coop flag will not be
|
||||||
|
* cleared when new queue gets scheduled at the very first time
|
||||||
|
*/
|
||||||
|
cfq_mark_cfqq_coop_preempt(new_cfqq);
|
||||||
|
cfq_mark_cfqq_coop(new_cfqq);
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
28
fs/bio.c
28
fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
|
|||||||
* @gfp_mask: allocation mask to use
|
* @gfp_mask: allocation mask to use
|
||||||
* @nr_iovecs: number of iovecs
|
* @nr_iovecs: number of iovecs
|
||||||
*
|
*
|
||||||
* Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
|
* bio_alloc will allocate a bio and associated bio_vec array that can hold
|
||||||
* contains __GFP_WAIT, the allocation is guaranteed to succeed.
|
* at least @nr_iovecs entries. Allocations will be done from the
|
||||||
|
* fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
|
||||||
|
*
|
||||||
|
* If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
|
||||||
|
* a bio. This is due to the mempool guarantees. To make this work, callers
|
||||||
|
* must never allocate more than 1 bio at a time from this pool. Callers
|
||||||
|
* that need to allocate more than 1 bio must always submit the previously
|
||||||
|
* allocated bio for IO before attempting to allocate a new one. Failure to
|
||||||
|
* do so can cause livelocks under memory pressure.
|
||||||
*
|
*
|
||||||
* RETURNS:
|
* RETURNS:
|
||||||
* Pointer to new bio on success, NULL on failure.
|
* Pointer to new bio on success, NULL on failure.
|
||||||
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_alloc - allocate a bio for I/O
|
* bio_kmalloc - allocate a bio for I/O using kmalloc()
|
||||||
* @gfp_mask: the GFP_ mask given to the slab allocator
|
* @gfp_mask: the GFP_ mask given to the slab allocator
|
||||||
* @nr_iovecs: number of iovecs to pre-allocate
|
* @nr_iovecs: number of iovecs to pre-allocate
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* bio_alloc will allocate a bio and associated bio_vec array that can hold
|
* Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
|
||||||
* at least @nr_iovecs entries. Allocations will be done from the
|
* %__GFP_WAIT, the allocation is guaranteed to succeed.
|
||||||
* fs_bio_set. Also see @bio_alloc_bioset.
|
|
||||||
*
|
|
||||||
* If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
|
|
||||||
* a bio. This is due to the mempool guarantees. To make this work, callers
|
|
||||||
* must never allocate more than 1 bio at a time from this pool. Callers
|
|
||||||
* that need to allocate more than 1 bio must always submit the previously
|
|
||||||
* allocated bio for IO before attempting to allocate a new one. Failure to
|
|
||||||
* do so can cause livelocks under memory pressure.
|
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
||||||
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
|
|||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
* Put a reference to a &struct bio, either one you have gotten with
|
* Put a reference to a &struct bio, either one you have gotten with
|
||||||
* bio_alloc or bio_get. The last put of a bio will free it.
|
* bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
|
||||||
**/
|
**/
|
||||||
void bio_put(struct bio *bio)
|
void bio_put(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
@@ -628,6 +628,8 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
|
|||||||
void bdi_unregister(struct backing_dev_info *bdi)
|
void bdi_unregister(struct backing_dev_info *bdi)
|
||||||
{
|
{
|
||||||
if (bdi->dev) {
|
if (bdi->dev) {
|
||||||
|
bdi_prune_sb(bdi);
|
||||||
|
|
||||||
if (!bdi_cap_flush_forker(bdi))
|
if (!bdi_cap_flush_forker(bdi))
|
||||||
bdi_wb_shutdown(bdi);
|
bdi_wb_shutdown(bdi);
|
||||||
bdi_debug_unregister(bdi);
|
bdi_debug_unregister(bdi);
|
||||||
@@ -697,7 +699,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
|
|||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bdi_prune_sb(bdi);
|
|
||||||
bdi_unregister(bdi);
|
bdi_unregister(bdi);
|
||||||
|
|
||||||
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
|
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
|
||||||
|
Reference in New Issue
Block a user