workqueue: separate out process_one_work()
Separate out process_one_work() out of run_workqueue(). This patch doesn't cause any behavior change. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -402,28 +402,40 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
|
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
|
||||||
|
|
||||||
static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
/**
|
||||||
|
* process_one_work - process single work
|
||||||
|
* @cwq: cwq to process work for
|
||||||
|
* @work: work to process
|
||||||
|
*
|
||||||
|
* Process @work. This function contains all the logics necessary to
|
||||||
|
* process a single work including synchronization against and
|
||||||
|
* interaction with other workers on the same cpu, queueing and
|
||||||
|
* flushing. As long as context requirement is met, any worker can
|
||||||
|
* call this function to process a work.
|
||||||
|
*
|
||||||
|
* CONTEXT:
|
||||||
|
* spin_lock_irq(cwq->lock) which is released and regrabbed.
|
||||||
|
*/
|
||||||
|
static void process_one_work(struct cpu_workqueue_struct *cwq,
|
||||||
|
struct work_struct *work)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&cwq->lock);
|
|
||||||
while (!list_empty(&cwq->worklist)) {
|
|
||||||
struct work_struct *work = list_entry(cwq->worklist.next,
|
|
||||||
struct work_struct, entry);
|
|
||||||
work_func_t f = work->func;
|
work_func_t f = work->func;
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
/*
|
/*
|
||||||
* It is permissible to free the struct work_struct
|
* It is permissible to free the struct work_struct from
|
||||||
* from inside the function that is called from it,
|
* inside the function that is called from it, this we need to
|
||||||
* this we need to take into account for lockdep too.
|
* take into account for lockdep too. To avoid bogus "held
|
||||||
* To avoid bogus "held lock freed" warnings as well
|
* lock freed" warnings as well as problems when looking into
|
||||||
* as problems when looking into work->lockdep_map,
|
* work->lockdep_map, make a copy and use that here.
|
||||||
* make a copy and use that here.
|
|
||||||
*/
|
*/
|
||||||
struct lockdep_map lockdep_map = work->lockdep_map;
|
struct lockdep_map lockdep_map = work->lockdep_map;
|
||||||
#endif
|
#endif
|
||||||
|
/* claim and process */
|
||||||
trace_workqueue_execution(cwq->thread, work);
|
trace_workqueue_execution(cwq->thread, work);
|
||||||
debug_work_deactivate(work);
|
debug_work_deactivate(work);
|
||||||
cwq->current_work = work;
|
cwq->current_work = work;
|
||||||
list_del_init(cwq->worklist.next);
|
list_del_init(&work->entry);
|
||||||
|
|
||||||
spin_unlock_irq(&cwq->lock);
|
spin_unlock_irq(&cwq->lock);
|
||||||
|
|
||||||
BUG_ON(get_wq_data(work) != cwq);
|
BUG_ON(get_wq_data(work) != cwq);
|
||||||
@@ -437,8 +449,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
|||||||
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
||||||
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
||||||
"%s/0x%08x/%d\n",
|
"%s/0x%08x/%d\n",
|
||||||
current->comm, preempt_count(),
|
current->comm, preempt_count(), task_pid_nr(current));
|
||||||
task_pid_nr(current));
|
|
||||||
printk(KERN_ERR " last function: ");
|
printk(KERN_ERR " last function: ");
|
||||||
print_symbol("%s\n", (unsigned long)f);
|
print_symbol("%s\n", (unsigned long)f);
|
||||||
debug_show_held_locks(current);
|
debug_show_held_locks(current);
|
||||||
@@ -446,7 +457,18 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&cwq->lock);
|
spin_lock_irq(&cwq->lock);
|
||||||
|
|
||||||
|
/* we're done with it, release */
|
||||||
cwq->current_work = NULL;
|
cwq->current_work = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
||||||
|
{
|
||||||
|
spin_lock_irq(&cwq->lock);
|
||||||
|
while (!list_empty(&cwq->worklist)) {
|
||||||
|
struct work_struct *work = list_entry(cwq->worklist.next,
|
||||||
|
struct work_struct, entry);
|
||||||
|
process_one_work(cwq, work);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&cwq->lock);
|
spin_unlock_irq(&cwq->lock);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user