[PATCH] cfq-iosched: migrate to using the elevator rb functions
This removes the rbtree handling from CFQ. Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
@@ -46,12 +46,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
|
|||||||
|
|
||||||
#define RQ_DATA(rq) (rq)->elevator_private
|
#define RQ_DATA(rq) (rq)->elevator_private
|
||||||
|
|
||||||
/*
|
|
||||||
* rb-tree defines
|
|
||||||
*/
|
|
||||||
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
|
|
||||||
#define rq_rb_key(rq) (rq)->sector
|
|
||||||
|
|
||||||
static kmem_cache_t *crq_pool;
|
static kmem_cache_t *crq_pool;
|
||||||
static kmem_cache_t *cfq_pool;
|
static kmem_cache_t *cfq_pool;
|
||||||
static kmem_cache_t *cfq_ioc_pool;
|
static kmem_cache_t *cfq_ioc_pool;
|
||||||
@@ -185,8 +179,6 @@ struct cfq_queue {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct cfq_rq {
|
struct cfq_rq {
|
||||||
struct rb_node rb_node;
|
|
||||||
sector_t rb_key;
|
|
||||||
struct request *request;
|
struct request *request;
|
||||||
|
|
||||||
struct cfq_queue *cfq_queue;
|
struct cfq_queue *cfq_queue;
|
||||||
@@ -376,33 +368,27 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
|
|||||||
*/
|
*/
|
||||||
static struct cfq_rq *
|
static struct cfq_rq *
|
||||||
cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||||
struct cfq_rq *last)
|
struct cfq_rq *last_crq)
|
||||||
{
|
{
|
||||||
struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
|
struct request *last = last_crq->request;
|
||||||
struct rb_node *rbnext, *rbprev;
|
struct rb_node *rbnext = rb_next(&last->rb_node);
|
||||||
|
struct rb_node *rbprev = rb_prev(&last->rb_node);
|
||||||
|
struct cfq_rq *next = NULL, *prev = NULL;
|
||||||
|
|
||||||
if (!(rbnext = rb_next(&last->rb_node))) {
|
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
|
||||||
rbnext = rb_first(&cfqq->sort_list);
|
|
||||||
if (rbnext == &last->rb_node)
|
|
||||||
rbnext = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
rbprev = rb_prev(&last->rb_node);
|
|
||||||
|
|
||||||
if (rbprev)
|
if (rbprev)
|
||||||
crq_prev = rb_entry_crq(rbprev);
|
prev = RQ_DATA(rb_entry_rq(rbprev));
|
||||||
if (rbnext)
|
|
||||||
crq_next = rb_entry_crq(rbnext);
|
|
||||||
|
|
||||||
return cfq_choose_req(cfqd, crq_next, crq_prev);
|
if (rbnext)
|
||||||
|
next = RQ_DATA(rb_entry_rq(rbnext));
|
||||||
|
else {
|
||||||
|
rbnext = rb_first(&cfqq->sort_list);
|
||||||
|
if (rbnext && rbnext != &last->rb_node)
|
||||||
|
next = RQ_DATA(rb_entry_rq(rbnext));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_update_next_crq(struct cfq_rq *crq)
|
return cfq_choose_req(cfqd, next, prev);
|
||||||
{
|
|
||||||
struct cfq_queue *cfqq = crq->cfq_queue;
|
|
||||||
|
|
||||||
if (cfqq->next_crq == crq)
|
|
||||||
cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
|
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
|
||||||
@@ -497,71 +483,34 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
|
|||||||
BUG_ON(!cfqq->queued[sync]);
|
BUG_ON(!cfqq->queued[sync]);
|
||||||
cfqq->queued[sync]--;
|
cfqq->queued[sync]--;
|
||||||
|
|
||||||
cfq_update_next_crq(crq);
|
elv_rb_del(&cfqq->sort_list, crq->request);
|
||||||
|
|
||||||
rb_erase(&crq->rb_node, &cfqq->sort_list);
|
|
||||||
|
|
||||||
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
|
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
|
||||||
cfq_del_cfqq_rr(cfqd, cfqq);
|
cfq_del_cfqq_rr(cfqd, cfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cfq_rq *
|
|
||||||
__cfq_add_crq_rb(struct cfq_rq *crq)
|
|
||||||
{
|
|
||||||
struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
|
|
||||||
struct rb_node *parent = NULL;
|
|
||||||
struct cfq_rq *__crq;
|
|
||||||
|
|
||||||
while (*p) {
|
|
||||||
parent = *p;
|
|
||||||
__crq = rb_entry_crq(parent);
|
|
||||||
|
|
||||||
if (crq->rb_key < __crq->rb_key)
|
|
||||||
p = &(*p)->rb_left;
|
|
||||||
else if (crq->rb_key > __crq->rb_key)
|
|
||||||
p = &(*p)->rb_right;
|
|
||||||
else
|
|
||||||
return __crq;
|
|
||||||
}
|
|
||||||
|
|
||||||
rb_link_node(&crq->rb_node, parent, p);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cfq_add_crq_rb(struct cfq_rq *crq)
|
static void cfq_add_crq_rb(struct cfq_rq *crq)
|
||||||
{
|
{
|
||||||
struct cfq_queue *cfqq = crq->cfq_queue;
|
struct cfq_queue *cfqq = crq->cfq_queue;
|
||||||
struct cfq_data *cfqd = cfqq->cfqd;
|
struct cfq_data *cfqd = cfqq->cfqd;
|
||||||
struct request *rq = crq->request;
|
struct request *rq = crq->request;
|
||||||
struct cfq_rq *__alias;
|
struct request *__alias;
|
||||||
|
|
||||||
crq->rb_key = rq_rb_key(rq);
|
|
||||||
cfqq->queued[cfq_crq_is_sync(crq)]++;
|
cfqq->queued[cfq_crq_is_sync(crq)]++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* looks a little odd, but the first insert might return an alias.
|
* looks a little odd, but the first insert might return an alias.
|
||||||
* if that happens, put the alias on the dispatch list
|
* if that happens, put the alias on the dispatch list
|
||||||
*/
|
*/
|
||||||
while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
|
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
|
||||||
cfq_dispatch_insert(cfqd->queue, __alias);
|
cfq_dispatch_insert(cfqd->queue, RQ_DATA(__alias));
|
||||||
|
|
||||||
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
|
|
||||||
|
|
||||||
if (!cfq_cfqq_on_rr(cfqq))
|
|
||||||
cfq_add_cfqq_rr(cfqd, cfqq);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* check if this request is a better next-serve candidate
|
|
||||||
*/
|
|
||||||
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
|
cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
|
||||||
{
|
{
|
||||||
rb_erase(&crq->rb_node, &cfqq->sort_list);
|
elv_rb_del(&cfqq->sort_list, crq->request);
|
||||||
cfqq->queued[cfq_crq_is_sync(crq)]--;
|
cfqq->queued[cfq_crq_is_sync(crq)]--;
|
||||||
|
|
||||||
cfq_add_crq_rb(crq);
|
cfq_add_crq_rb(crq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -570,28 +519,13 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
|
|||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
|
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
|
||||||
|
sector_t sector = bio->bi_sector + bio_sectors(bio);
|
||||||
struct cfq_queue *cfqq;
|
struct cfq_queue *cfqq;
|
||||||
struct rb_node *n;
|
|
||||||
sector_t sector;
|
|
||||||
|
|
||||||
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
|
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
|
||||||
if (!cfqq)
|
if (cfqq)
|
||||||
goto out;
|
return elv_rb_find(&cfqq->sort_list, sector);
|
||||||
|
|
||||||
sector = bio->bi_sector + bio_sectors(bio);
|
|
||||||
n = cfqq->sort_list.rb_node;
|
|
||||||
while (n) {
|
|
||||||
struct cfq_rq *crq = rb_entry_crq(n);
|
|
||||||
|
|
||||||
if (sector < crq->rb_key)
|
|
||||||
n = n->rb_left;
|
|
||||||
else if (sector > crq->rb_key)
|
|
||||||
n = n->rb_right;
|
|
||||||
else
|
|
||||||
return crq->request;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -622,6 +556,10 @@ static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
|
|||||||
static void cfq_remove_request(struct request *rq)
|
static void cfq_remove_request(struct request *rq)
|
||||||
{
|
{
|
||||||
struct cfq_rq *crq = RQ_DATA(rq);
|
struct cfq_rq *crq = RQ_DATA(rq);
|
||||||
|
struct cfq_queue *cfqq = crq->cfq_queue;
|
||||||
|
|
||||||
|
if (cfqq->next_crq == crq)
|
||||||
|
cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
|
||||||
|
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
cfq_del_crq_rb(crq);
|
cfq_del_crq_rb(crq);
|
||||||
@@ -642,14 +580,14 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
|
|||||||
return ELEVATOR_NO_MERGE;
|
return ELEVATOR_NO_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_merged_request(request_queue_t *q, struct request *req)
|
static void cfq_merged_request(request_queue_t *q, struct request *req,
|
||||||
|
int type)
|
||||||
{
|
{
|
||||||
struct cfq_rq *crq = RQ_DATA(req);
|
struct cfq_rq *crq = RQ_DATA(req);
|
||||||
|
|
||||||
if (rq_rb_key(req) != crq->rb_key) {
|
if (type == ELEVATOR_FRONT_MERGE) {
|
||||||
struct cfq_queue *cfqq = crq->cfq_queue;
|
struct cfq_queue *cfqq = crq->cfq_queue;
|
||||||
|
|
||||||
cfq_update_next_crq(crq);
|
|
||||||
cfq_reposition_crq_rb(cfqq, crq);
|
cfq_reposition_crq_rb(cfqq, crq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -658,8 +596,6 @@ static void
|
|||||||
cfq_merged_requests(request_queue_t *q, struct request *rq,
|
cfq_merged_requests(request_queue_t *q, struct request *rq,
|
||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
cfq_merged_request(q, rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* reposition in fifo if next is older than rq
|
* reposition in fifo if next is older than rq
|
||||||
*/
|
*/
|
||||||
@@ -881,7 +817,6 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
|
|||||||
struct cfq_queue *cfqq = crq->cfq_queue;
|
struct cfq_queue *cfqq = crq->cfq_queue;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
|
|
||||||
cfq_remove_request(crq->request);
|
cfq_remove_request(crq->request);
|
||||||
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
|
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
|
||||||
elv_dispatch_sort(q, crq->request);
|
elv_dispatch_sort(q, crq->request);
|
||||||
@@ -1699,6 +1634,12 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||||||
{
|
{
|
||||||
struct cfq_io_context *cic = crq->io_context;
|
struct cfq_io_context *cic = crq->io_context;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* check if this request is a better next-serve candidate
|
||||||
|
*/
|
||||||
|
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
|
||||||
|
BUG_ON(!cfqq->next_crq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we never wait for an async request and we don't allow preemption
|
* we never wait for an async request and we don't allow preemption
|
||||||
* of an async request. so just return early
|
* of an async request. so just return early
|
||||||
@@ -1756,6 +1697,9 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
|
|||||||
|
|
||||||
cfq_add_crq_rb(crq);
|
cfq_add_crq_rb(crq);
|
||||||
|
|
||||||
|
if (!cfq_cfqq_on_rr(cfqq))
|
||||||
|
cfq_add_cfqq_rr(cfqd, cfqq);
|
||||||
|
|
||||||
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
||||||
|
|
||||||
cfq_crq_enqueued(cfqd, cfqq, crq);
|
cfq_crq_enqueued(cfqd, cfqq, crq);
|
||||||
@@ -1803,30 +1747,6 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *
|
|
||||||
cfq_former_request(request_queue_t *q, struct request *rq)
|
|
||||||
{
|
|
||||||
struct cfq_rq *crq = RQ_DATA(rq);
|
|
||||||
struct rb_node *rbprev = rb_prev(&crq->rb_node);
|
|
||||||
|
|
||||||
if (rbprev)
|
|
||||||
return rb_entry_crq(rbprev)->request;
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct request *
|
|
||||||
cfq_latter_request(request_queue_t *q, struct request *rq)
|
|
||||||
{
|
|
||||||
struct cfq_rq *crq = RQ_DATA(rq);
|
|
||||||
struct rb_node *rbnext = rb_next(&crq->rb_node);
|
|
||||||
|
|
||||||
if (rbnext)
|
|
||||||
return rb_entry_crq(rbnext)->request;
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we temporarily boost lower priority queues if they are holding fs exclusive
|
* we temporarily boost lower priority queues if they are holding fs exclusive
|
||||||
* resources. they are boosted to normal prio (CLASS_BE/4)
|
* resources. they are boosted to normal prio (CLASS_BE/4)
|
||||||
@@ -1982,8 +1902,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
|
|||||||
|
|
||||||
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
|
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
|
||||||
if (crq) {
|
if (crq) {
|
||||||
RB_CLEAR_NODE(&crq->rb_node);
|
|
||||||
crq->rb_key = 0;
|
|
||||||
crq->request = rq;
|
crq->request = rq;
|
||||||
crq->cfq_queue = cfqq;
|
crq->cfq_queue = cfqq;
|
||||||
crq->io_context = cic;
|
crq->io_context = cic;
|
||||||
@@ -2345,8 +2263,8 @@ static struct elevator_type iosched_cfq = {
|
|||||||
.elevator_deactivate_req_fn = cfq_deactivate_request,
|
.elevator_deactivate_req_fn = cfq_deactivate_request,
|
||||||
.elevator_queue_empty_fn = cfq_queue_empty,
|
.elevator_queue_empty_fn = cfq_queue_empty,
|
||||||
.elevator_completed_req_fn = cfq_completed_request,
|
.elevator_completed_req_fn = cfq_completed_request,
|
||||||
.elevator_former_req_fn = cfq_former_request,
|
.elevator_former_req_fn = elv_rb_former_request,
|
||||||
.elevator_latter_req_fn = cfq_latter_request,
|
.elevator_latter_req_fn = elv_rb_latter_request,
|
||||||
.elevator_set_req_fn = cfq_set_request,
|
.elevator_set_req_fn = cfq_set_request,
|
||||||
.elevator_put_req_fn = cfq_put_request,
|
.elevator_put_req_fn = cfq_put_request,
|
||||||
.elevator_may_queue_fn = cfq_may_queue,
|
.elevator_may_queue_fn = cfq_may_queue,
|
||||||
|
Reference in New Issue
Block a user