Merge commit 'v3.1-rc7' into perf/core
Merge reason: Pick up the latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -400,14 +400,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/*
|
||||
* next is NULL when called from perf_event_enable_on_exec()
|
||||
* that will systematically cause a cgroup_switch()
|
||||
*/
|
||||
if (next)
|
||||
cgrp2 = perf_cgroup_from_task(next);
|
||||
|
||||
/*
|
||||
* only schedule out current cgroup events if we know
|
||||
* that we are switching to a different cgroup. Otherwise,
|
||||
* do no touch the cgroup events.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/* prev can never be NULL */
|
||||
cgrp2 = perf_cgroup_from_task(prev);
|
||||
|
||||
/*
|
||||
* only need to schedule in cgroup events if we are changing
|
||||
* cgroup during ctxsw. Cgroup events were not scheduled
|
||||
* out of ctxsw out if that was not the case.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
@@ -519,11 +559,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1989,7 +2031,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_out(task);
|
||||
perf_cgroup_sched_out(task, next);
|
||||
}
|
||||
|
||||
static void task_ctx_sched_out(struct perf_event_context *ctx)
|
||||
@@ -2154,7 +2196,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||
* accessing the event control register. If a NMI hits, then it will
|
||||
* keep the event running.
|
||||
*/
|
||||
void __perf_event_task_sched_in(struct task_struct *task)
|
||||
void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
@@ -2172,7 +2215,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(task);
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
}
|
||||
|
||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
@@ -2428,7 +2471,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
||||
* ctxswin cgroup events which are already scheduled
|
||||
* in.
|
||||
*/
|
||||
perf_cgroup_sched_out(current);
|
||||
perf_cgroup_sched_out(current, NULL);
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
task_ctx_sched_out(ctx);
|
||||
@@ -3354,8 +3397,8 @@ static int perf_event_index(struct perf_event *event)
|
||||
}
|
||||
|
||||
static void calc_timer_values(struct perf_event *event,
|
||||
u64 *running,
|
||||
u64 *enabled)
|
||||
u64 *enabled,
|
||||
u64 *running)
|
||||
{
|
||||
u64 now, ctx_time;
|
||||
|
||||
|
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
|
||||
desc->depth = 1;
|
||||
if (desc->irq_data.chip->irq_shutdown)
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
if (desc->irq_data.chip->irq_disable)
|
||||
else if (desc->irq_data.chip->irq_disable)
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
else
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
|
@@ -344,6 +344,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
|
||||
* @from: Start the search from this irq number
|
||||
* @cnt: Number of consecutive irqs to allocate.
|
||||
* @node: Preferred node on which the irq descriptor should be allocated
|
||||
* @owner: Owning module (can be NULL)
|
||||
*
|
||||
* Returns the first irq number or error code
|
||||
*/
|
||||
|
@@ -1331,7 +1331,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
||||
if (!thread_fn)
|
||||
return -EINVAL;
|
||||
handler = irq_default_primary_handler;
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
}
|
||||
|
||||
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
|
@@ -231,3 +231,7 @@ config PM_CLK
|
||||
config PM_GENERIC_DOMAINS
|
||||
bool
|
||||
depends on PM
|
||||
|
||||
config PM_GENERIC_DOMAINS_RUNTIME
|
||||
def_bool y
|
||||
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
|
||||
|
@@ -1604,7 +1604,7 @@ static int __init printk_late_init(void)
|
||||
struct console *con;
|
||||
|
||||
for_each_console(con) {
|
||||
if (con->flags & CON_BOOT) {
|
||||
if (!keep_bootcon && con->flags & CON_BOOT) {
|
||||
printk(KERN_INFO "turn off boot console %s%d\n",
|
||||
con->name, con->index);
|
||||
unregister_console(con);
|
||||
|
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_disable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
perf_event_task_sched_in(current);
|
||||
perf_event_task_sched_in(prev, current);
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_enable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
|
||||
}
|
||||
|
||||
/*
|
||||
* schedule() is the main scheduler function.
|
||||
* __schedule() is the main scheduler function.
|
||||
*/
|
||||
asmlinkage void __sched schedule(void)
|
||||
static void __sched __schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
unsigned long *switch_count;
|
||||
@@ -4322,16 +4322,6 @@ need_resched:
|
||||
if (to_wakeup)
|
||||
try_to_wake_up_local(to_wakeup);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are going to sleep and we have plugged IO
|
||||
* queued, make sure to submit it to avoid deadlocks.
|
||||
*/
|
||||
if (blk_needs_flush_plug(prev)) {
|
||||
raw_spin_unlock(&rq->lock);
|
||||
blk_schedule_flush_plug(prev);
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
}
|
||||
switch_count = &prev->nvcsw;
|
||||
}
|
||||
@@ -4369,6 +4359,26 @@ need_resched:
|
||||
if (need_resched())
|
||||
goto need_resched;
|
||||
}
|
||||
|
||||
static inline void sched_submit_work(struct task_struct *tsk)
|
||||
{
|
||||
if (!tsk->state)
|
||||
return;
|
||||
/*
|
||||
* If we are going to sleep and we have plugged IO queued,
|
||||
* make sure to submit it to avoid deadlocks.
|
||||
*/
|
||||
if (blk_needs_flush_plug(tsk))
|
||||
blk_schedule_flush_plug(tsk);
|
||||
}
|
||||
|
||||
asmlinkage void schedule(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
sched_submit_work(tsk);
|
||||
__schedule();
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
|
||||
|
||||
do {
|
||||
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
__schedule();
|
||||
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
||||
do {
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
__schedule();
|
||||
local_irq_disable();
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
|
||||
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
|
||||
static void __cond_resched(void)
|
||||
{
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
__schedule();
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
}
|
||||
|
||||
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
||||
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
||||
if (sd && (sd->flags & SD_OVERLAP))
|
||||
free_sched_groups(sd->groups, 0);
|
||||
kfree(*per_cpu_ptr(sdd->sd, j));
|
||||
kfree(*per_cpu_ptr(sdd->sg, j));
|
||||
kfree(*per_cpu_ptr(sdd->sgp, j));
|
||||
}
|
||||
|
38
kernel/sys.c
38
kernel/sys.c
@@ -37,6 +37,8 @@
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
@@ -44,6 +46,8 @@
|
||||
#include <linux/user_namespace.h>
|
||||
|
||||
#include <linux/kmsg_dump.h>
|
||||
/* Move somewhere else to avoid recompiling? */
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
@@ -1161,6 +1165,34 @@ DECLARE_RWSEM(uts_sem);
|
||||
#define override_architecture(name) 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Work around broken programs that cannot handle "Linux 3.0".
|
||||
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
|
||||
*/
|
||||
static int override_release(char __user *release, int len)
|
||||
{
|
||||
int ret = 0;
|
||||
char buf[len];
|
||||
|
||||
if (current->personality & UNAME26) {
|
||||
char *rest = UTS_RELEASE;
|
||||
int ndots = 0;
|
||||
unsigned v;
|
||||
|
||||
while (*rest) {
|
||||
if (*rest == '.' && ++ndots >= 3)
|
||||
break;
|
||||
if (!isdigit(*rest) && *rest != '.')
|
||||
break;
|
||||
rest++;
|
||||
}
|
||||
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
|
||||
snprintf(buf, len, "2.6.%u%s", v, rest);
|
||||
ret = copy_to_user(release, buf, len);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
|
||||
{
|
||||
int errno = 0;
|
||||
@@ -1170,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
|
||||
errno = -EFAULT;
|
||||
up_read(&uts_sem);
|
||||
|
||||
if (!errno && override_release(name->release, sizeof(name->release)))
|
||||
errno = -EFAULT;
|
||||
if (!errno && override_architecture(name))
|
||||
errno = -EFAULT;
|
||||
return errno;
|
||||
@@ -1191,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
|
||||
error = -EFAULT;
|
||||
up_read(&uts_sem);
|
||||
|
||||
if (!error && override_release(name->release, sizeof(name->release)))
|
||||
error = -EFAULT;
|
||||
if (!error && override_architecture(name))
|
||||
error = -EFAULT;
|
||||
return error;
|
||||
@@ -1225,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
|
||||
|
||||
if (!error && override_architecture(name))
|
||||
error = -EFAULT;
|
||||
if (!error && override_release(name->release, sizeof(name->release)))
|
||||
error = -EFAULT;
|
||||
return error ? -EFAULT : 0;
|
||||
}
|
||||
#endif
|
||||
|
@@ -16,7 +16,6 @@ asmlinkage long sys_ni_syscall(void)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
cond_syscall(sys_nfsservctl);
|
||||
cond_syscall(sys_quotactl);
|
||||
cond_syscall(sys32_quotactl);
|
||||
cond_syscall(sys_acct);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#include <linux/stat.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
|
||||
#include "../fs/xfs/xfs_sysctl.h"
|
||||
#include <linux/sunrpc/debug.h>
|
||||
#include <linux/string.h>
|
||||
#include <net/ip_vs.h>
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#include <linux/stat.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
|
||||
#include "../fs/xfs/xfs_sysctl.h"
|
||||
#include <linux/sunrpc/debug.h>
|
||||
#include <linux/string.h>
|
||||
#include <net/ip_vs.h>
|
||||
|
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
|
||||
.cmd = TASKSTATS_CMD_GET,
|
||||
.doit = taskstats_user_cmd,
|
||||
.policy = taskstats_cmd_get_policy,
|
||||
.flags = GENL_ADMIN_PERM,
|
||||
};
|
||||
|
||||
static struct genl_ops cgroupstats_ops = {
|
||||
|
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
|
||||
static void alarm_timer_get(struct k_itimer *timr,
|
||||
struct itimerspec *cur_setting)
|
||||
{
|
||||
memset(cur_setting, 0, sizeof(struct itimerspec));
|
||||
|
||||
cur_setting->it_interval =
|
||||
ktime_to_timespec(timr->it.alarmtimer.period);
|
||||
cur_setting->it_value =
|
||||
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
||||
if (!rtcdev)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Save old values */
|
||||
old_setting->it_interval =
|
||||
ktime_to_timespec(timr->it.alarmtimer.period);
|
||||
old_setting->it_value =
|
||||
ktime_to_timespec(timr->it.alarmtimer.node.expires);
|
||||
/*
|
||||
* XXX HACK! Currently we can DOS a system if the interval
|
||||
* period on alarmtimers is too small. Cap the interval here
|
||||
* to 100us and solve this properly in a future patch! -jstultz
|
||||
*/
|
||||
if ((new_setting->it_interval.tv_sec == 0) &&
|
||||
(new_setting->it_interval.tv_nsec < 100000))
|
||||
new_setting->it_interval.tv_nsec = 100000;
|
||||
|
||||
if (old_setting)
|
||||
alarm_timer_get(timr, old_setting);
|
||||
|
||||
/* If the timer was already set, cancel it */
|
||||
alarm_cancel(&timr->it.alarmtimer);
|
||||
|
@@ -206,6 +206,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
what |= MASK_TC_BIT(rw, RAHEAD);
|
||||
what |= MASK_TC_BIT(rw, META);
|
||||
what |= MASK_TC_BIT(rw, DISCARD);
|
||||
what |= MASK_TC_BIT(rw, FLUSH);
|
||||
what |= MASK_TC_BIT(rw, FUA);
|
||||
|
||||
pid = tsk->pid;
|
||||
if (act_log_check(bt, what, sector, pid))
|
||||
@@ -1054,6 +1056,9 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (tc & BLK_TC_FLUSH)
|
||||
rwbs[i++] = 'F';
|
||||
|
||||
if (tc & BLK_TC_DISCARD)
|
||||
rwbs[i++] = 'D';
|
||||
else if (tc & BLK_TC_WRITE)
|
||||
@@ -1063,10 +1068,10 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
|
||||
else
|
||||
rwbs[i++] = 'N';
|
||||
|
||||
if (tc & BLK_TC_FUA)
|
||||
rwbs[i++] = 'F';
|
||||
if (tc & BLK_TC_AHEAD)
|
||||
rwbs[i++] = 'A';
|
||||
if (tc & BLK_TC_BARRIER)
|
||||
rwbs[i++] = 'B';
|
||||
if (tc & BLK_TC_SYNC)
|
||||
rwbs[i++] = 'S';
|
||||
if (tc & BLK_TC_META)
|
||||
@@ -1132,7 +1137,7 @@ typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
|
||||
|
||||
static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
|
||||
{
|
||||
char rwbs[6];
|
||||
char rwbs[RWBS_LEN];
|
||||
unsigned long long ts = iter->ts;
|
||||
unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
|
||||
unsigned secs = (unsigned long)ts;
|
||||
@@ -1148,7 +1153,7 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
|
||||
|
||||
static int blk_log_action(struct trace_iterator *iter, const char *act)
|
||||
{
|
||||
char rwbs[6];
|
||||
char rwbs[RWBS_LEN];
|
||||
const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
|
||||
|
||||
fill_rwbs(rwbs, t);
|
||||
@@ -1561,7 +1566,7 @@ static const struct {
|
||||
} mask_maps[] = {
|
||||
{ BLK_TC_READ, "read" },
|
||||
{ BLK_TC_WRITE, "write" },
|
||||
{ BLK_TC_BARRIER, "barrier" },
|
||||
{ BLK_TC_FLUSH, "flush" },
|
||||
{ BLK_TC_SYNC, "sync" },
|
||||
{ BLK_TC_QUEUE, "queue" },
|
||||
{ BLK_TC_REQUEUE, "requeue" },
|
||||
@@ -1573,6 +1578,7 @@ static const struct {
|
||||
{ BLK_TC_META, "meta" },
|
||||
{ BLK_TC_DISCARD, "discard" },
|
||||
{ BLK_TC_DRV_DATA, "drv_data" },
|
||||
{ BLK_TC_FUA, "fua" },
|
||||
};
|
||||
|
||||
static int blk_trace_str2mask(const char *str)
|
||||
@@ -1788,6 +1794,9 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (rw & REQ_FLUSH)
|
||||
rwbs[i++] = 'F';
|
||||
|
||||
if (rw & WRITE)
|
||||
rwbs[i++] = 'W';
|
||||
else if (rw & REQ_DISCARD)
|
||||
@@ -1797,6 +1806,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
|
||||
else
|
||||
rwbs[i++] = 'N';
|
||||
|
||||
if (rw & REQ_FUA)
|
||||
rwbs[i++] = 'F';
|
||||
if (rw & REQ_RAHEAD)
|
||||
rwbs[i++] = 'A';
|
||||
if (rw & REQ_SYNC)
|
||||
|
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
|
||||
|
||||
#define KB 1024
|
||||
#define MB (1024*KB)
|
||||
#define KB_MASK (~(KB-1))
|
||||
/*
|
||||
* fill in extended accounting fields
|
||||
*/
|
||||
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
|
||||
stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
|
||||
mmput(mm);
|
||||
}
|
||||
stats->read_char = p->ioac.rchar;
|
||||
stats->write_char = p->ioac.wchar;
|
||||
stats->read_syscalls = p->ioac.syscr;
|
||||
stats->write_syscalls = p->ioac.syscw;
|
||||
stats->read_char = p->ioac.rchar & KB_MASK;
|
||||
stats->write_char = p->ioac.wchar & KB_MASK;
|
||||
stats->read_syscalls = p->ioac.syscr & KB_MASK;
|
||||
stats->write_syscalls = p->ioac.syscw & KB_MASK;
|
||||
#ifdef CONFIG_TASK_IO_ACCOUNTING
|
||||
stats->read_bytes = p->ioac.read_bytes;
|
||||
stats->write_bytes = p->ioac.write_bytes;
|
||||
stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
|
||||
stats->read_bytes = p->ioac.read_bytes & KB_MASK;
|
||||
stats->write_bytes = p->ioac.write_bytes & KB_MASK;
|
||||
stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
|
||||
#else
|
||||
stats->read_bytes = 0;
|
||||
stats->write_bytes = 0;
|
||||
|
@@ -2412,8 +2412,13 @@ reflush:
|
||||
|
||||
for_each_cwq_cpu(cpu, wq) {
|
||||
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
||||
bool drained;
|
||||
|
||||
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
|
||||
spin_lock_irq(&cwq->gcwq->lock);
|
||||
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
|
||||
spin_unlock_irq(&cwq->gcwq->lock);
|
||||
|
||||
if (drained)
|
||||
continue;
|
||||
|
||||
if (++flush_cnt == 10 ||
|
||||
|
Reference in New Issue
Block a user