workqueue: increase max_active of keventd and kill current_is_keventd()
Define WQ_MAX_ACTIVE and create keventd with max_active set to half of it which means that keventd now can process upto WQ_MAX_ACTIVE / 2 - 1 works concurrently. Unless some combination can result in dependency loop longer than max_active, deadlock won't happen and thus it's unnecessary to check whether current_is_keventd() before trying to schedule a work. Kill current_is_keventd(). (Lockdep annotations are broken. We need lock_map_acquire_read_norecurse()) Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Oleg Nesterov <oleg@redhat.com>
This commit is contained in:
@ -227,6 +227,9 @@ enum {
|
||||
WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */
|
||||
WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */
|
||||
WQ_RESCUER = 1 << 3, /* has an rescue worker */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||
};
|
||||
|
||||
extern struct workqueue_struct *
|
||||
@ -280,7 +283,6 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay)
|
||||
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
|
||||
unsigned long delay);
|
||||
extern int schedule_on_each_cpu(work_func_t func);
|
||||
extern int current_is_keventd(void);
|
||||
extern int keventd_up(void);
|
||||
|
||||
extern void init_workqueues(void);
|
||||
|
Reference in New Issue
Block a user