workqueue: cleanup flush/cancel functions
Make the following cleanup changes. * Relocate flush/cancel function prototypes and definitions. * Relocate wait_on_cpu_work() and wait_on_work() before try_to_grab_pending(). These will be used to implement flush_work_sync(). * Make all flush/cancel functions return bool instead of int. * Update wait_on_cpu_work() and wait_on_work() to return %true if they actually waited. * Add / update comments. This patch doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@ -343,7 +343,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
|
||||
extern void flush_workqueue(struct workqueue_struct *wq);
|
||||
extern void flush_scheduled_work(void);
|
||||
extern void flush_delayed_work(struct delayed_work *work);
|
||||
|
||||
extern int schedule_work(struct work_struct *work);
|
||||
extern int schedule_work_on(int cpu, struct work_struct *work);
|
||||
@ -355,8 +354,11 @@ extern int keventd_up(void);
|
||||
|
||||
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
||||
|
||||
extern int flush_work(struct work_struct *work);
|
||||
extern int cancel_work_sync(struct work_struct *work);
|
||||
extern bool flush_work(struct work_struct *work);
|
||||
extern bool cancel_work_sync(struct work_struct *work);
|
||||
|
||||
extern bool flush_delayed_work(struct delayed_work *dwork);
|
||||
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
|
||||
|
||||
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
||||
int max_active);
|
||||
@ -370,9 +372,9 @@ extern unsigned int work_busy(struct work_struct *work);
|
||||
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
|
||||
* cancel_work_sync() to wait on it.
|
||||
*/
|
||||
static inline int cancel_delayed_work(struct delayed_work *work)
|
||||
static inline bool cancel_delayed_work(struct delayed_work *work)
|
||||
{
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
ret = del_timer_sync(&work->timer);
|
||||
if (ret)
|
||||
@ -385,9 +387,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
|
||||
* if it returns 0 the timer function may be running and the queueing is in
|
||||
* progress.
|
||||
*/
|
||||
static inline int __cancel_delayed_work(struct delayed_work *work)
|
||||
static inline bool __cancel_delayed_work(struct delayed_work *work)
|
||||
{
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
ret = del_timer(&work->timer);
|
||||
if (ret)
|
||||
@ -395,8 +397,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern int cancel_delayed_work_sync(struct delayed_work *work);
|
||||
|
||||
/* Obsolete. use cancel_delayed_work_sync() */
|
||||
static inline
|
||||
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
|
||||
|
Reference in New Issue
Block a user