Merge branch 'master' into export-slabh
This commit is contained in:
@@ -48,17 +48,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
|
||||
struct freezer, css);
|
||||
}
|
||||
|
||||
int cgroup_frozen(struct task_struct *task)
|
||||
int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
{
|
||||
struct freezer *freezer;
|
||||
enum freezer_state state;
|
||||
|
||||
task_lock(task);
|
||||
freezer = task_freezer(task);
|
||||
state = freezer->state;
|
||||
if (!freezer->css.cgroup->parent)
|
||||
state = CGROUP_THAWED; /* root cgroup can't be frozen */
|
||||
else
|
||||
state = freezer->state;
|
||||
task_unlock(task);
|
||||
|
||||
return state == CGROUP_FROZEN;
|
||||
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -365,7 +365,7 @@ struct cred *prepare_usermodehelper_creds(void)
|
||||
|
||||
new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
|
||||
if (!new)
|
||||
return NULL;
|
||||
goto free_tgcred;
|
||||
|
||||
kdebug("prepare_usermodehelper_creds() alloc %p", new);
|
||||
|
||||
@@ -398,6 +398,10 @@ struct cred *prepare_usermodehelper_creds(void)
|
||||
|
||||
error:
|
||||
put_cred(new);
|
||||
free_tgcred:
|
||||
#ifdef CONFIG_KEYS
|
||||
kfree(tgcred);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end)
|
||||
struct early_res *r;
|
||||
int i;
|
||||
|
||||
if (start == end)
|
||||
return;
|
||||
|
||||
if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
|
||||
return;
|
||||
|
||||
try_next:
|
||||
i = find_overlapped_early(start, end);
|
||||
if (i >= max_early_res)
|
||||
|
205
kernel/kgdb.c
205
kernel/kgdb.c
@@ -69,9 +69,16 @@ struct kgdb_state {
|
||||
struct pt_regs *linux_regs;
|
||||
};
|
||||
|
||||
/* Exception state values */
|
||||
#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
|
||||
#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
|
||||
#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
|
||||
#define DCPU_SSTEP 0x8 /* CPU is single stepping */
|
||||
|
||||
static struct debuggerinfo_struct {
|
||||
void *debuggerinfo;
|
||||
struct task_struct *task;
|
||||
int exception_state;
|
||||
} kgdb_info[NR_CPUS];
|
||||
|
||||
/**
|
||||
@@ -391,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
|
||||
|
||||
/*
|
||||
* Copy the binary array pointed to by buf into mem. Fix $, #, and
|
||||
* 0x7d escaped with 0x7d. Return a pointer to the character after
|
||||
* the last byte written.
|
||||
* 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
|
||||
* The input buf is overwitten with the result to write to mem.
|
||||
*/
|
||||
static int kgdb_ebin2mem(char *buf, char *mem, int count)
|
||||
{
|
||||
int err = 0;
|
||||
char c;
|
||||
int size = 0;
|
||||
char *c = buf;
|
||||
|
||||
while (count-- > 0) {
|
||||
c = *buf++;
|
||||
if (c == 0x7d)
|
||||
c = *buf++ ^ 0x20;
|
||||
|
||||
err = probe_kernel_write(mem, &c, 1);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
mem++;
|
||||
c[size] = *buf++;
|
||||
if (c[size] == 0x7d)
|
||||
c[size] = *buf++ ^ 0x20;
|
||||
size++;
|
||||
}
|
||||
|
||||
return err;
|
||||
return probe_kernel_write(mem, c, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -562,49 +564,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
|
||||
return find_task_by_pid_ns(tid, &init_pid_ns);
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU debug state control:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void kgdb_wait(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
kgdb_info[cpu].debuggerinfo = regs;
|
||||
kgdb_info[cpu].task = current;
|
||||
/*
|
||||
* Make sure the above info reaches the primary CPU before
|
||||
* our cpu_in_kgdb[] flag setting does:
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&cpu_in_kgdb[cpu], 1);
|
||||
|
||||
/* Disable any cpu specific hw breakpoints */
|
||||
kgdb_disable_hw_debug(regs);
|
||||
|
||||
/* Wait till primary CPU is done with debugging */
|
||||
while (atomic_read(&passive_cpu_wait[cpu]))
|
||||
cpu_relax();
|
||||
|
||||
kgdb_info[cpu].debuggerinfo = NULL;
|
||||
kgdb_info[cpu].task = NULL;
|
||||
|
||||
/* fix up hardware debug registers on local cpu */
|
||||
if (arch_kgdb_ops.correct_hw_break)
|
||||
arch_kgdb_ops.correct_hw_break();
|
||||
|
||||
/* Signal the primary CPU that we are done: */
|
||||
atomic_set(&cpu_in_kgdb[cpu], 0);
|
||||
touch_softlockup_watchdog_sync();
|
||||
clocksource_touch_watchdog();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some architectures need cache flushes when we set/clear a
|
||||
* breakpoint:
|
||||
@@ -1400,34 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* kgdb_handle_exception() - main entry point from a kernel exception
|
||||
*
|
||||
* Locking hierarchy:
|
||||
* interface locks, if any (begin_session)
|
||||
* kgdb lock (kgdb_active)
|
||||
*/
|
||||
int
|
||||
kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
|
||||
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
|
||||
{
|
||||
struct kgdb_state kgdb_var;
|
||||
struct kgdb_state *ks = &kgdb_var;
|
||||
unsigned long flags;
|
||||
int sstep_tries = 100;
|
||||
int error = 0;
|
||||
int i, cpu;
|
||||
|
||||
ks->cpu = raw_smp_processor_id();
|
||||
ks->ex_vector = evector;
|
||||
ks->signo = signo;
|
||||
ks->ex_vector = evector;
|
||||
ks->err_code = ecode;
|
||||
ks->kgdb_usethreadid = 0;
|
||||
ks->linux_regs = regs;
|
||||
|
||||
if (kgdb_reenter_check(ks))
|
||||
return 0; /* Ouch, double exception ! */
|
||||
|
||||
int trace_on = 0;
|
||||
acquirelock:
|
||||
/*
|
||||
* Interrupts will be restored by the 'trap return' code, except when
|
||||
@@ -1435,13 +1373,43 @@ acquirelock:
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
cpu = ks->cpu;
|
||||
kgdb_info[cpu].debuggerinfo = regs;
|
||||
kgdb_info[cpu].task = current;
|
||||
/*
|
||||
* Make sure the above info reaches the primary CPU before
|
||||
* our cpu_in_kgdb[] flag setting does:
|
||||
*/
|
||||
atomic_inc(&cpu_in_kgdb[cpu]);
|
||||
|
||||
/*
|
||||
* Acquire the kgdb_active lock:
|
||||
* CPU will loop if it is a slave or request to become a kgdb
|
||||
* master cpu and acquire the kgdb_active lock:
|
||||
*/
|
||||
while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
|
||||
while (1) {
|
||||
if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
|
||||
if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
|
||||
break;
|
||||
} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
|
||||
if (!atomic_read(&passive_cpu_wait[cpu]))
|
||||
goto return_normal;
|
||||
} else {
|
||||
return_normal:
|
||||
/* Return to normal operation by executing any
|
||||
* hw breakpoint fixup.
|
||||
*/
|
||||
if (arch_kgdb_ops.correct_hw_break)
|
||||
arch_kgdb_ops.correct_hw_break();
|
||||
if (trace_on)
|
||||
tracing_on();
|
||||
atomic_dec(&cpu_in_kgdb[cpu]);
|
||||
touch_softlockup_watchdog_sync();
|
||||
clocksource_touch_watchdog();
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* For single stepping, try to only enter on the processor
|
||||
@@ -1475,9 +1443,6 @@ acquirelock:
|
||||
if (kgdb_io_ops->pre_exception)
|
||||
kgdb_io_ops->pre_exception();
|
||||
|
||||
kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
|
||||
kgdb_info[ks->cpu].task = current;
|
||||
|
||||
kgdb_disable_hw_debug(ks->linux_regs);
|
||||
|
||||
/*
|
||||
@@ -1486,15 +1451,9 @@ acquirelock:
|
||||
*/
|
||||
if (!kgdb_single_step) {
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
atomic_set(&passive_cpu_wait[i], 1);
|
||||
atomic_inc(&passive_cpu_wait[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* spin_lock code is good enough as a barrier so we don't
|
||||
* need one here:
|
||||
*/
|
||||
atomic_set(&cpu_in_kgdb[ks->cpu], 1);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Signal the other CPUs to enter kgdb_wait() */
|
||||
if ((!kgdb_single_step) && kgdb_do_roundup)
|
||||
@@ -1518,6 +1477,9 @@ acquirelock:
|
||||
kgdb_single_step = 0;
|
||||
kgdb_contthread = current;
|
||||
exception_level = 0;
|
||||
trace_on = tracing_is_on();
|
||||
if (trace_on)
|
||||
tracing_off();
|
||||
|
||||
/* Talk to debugger with gdbserial protocol */
|
||||
error = gdb_serial_stub(ks);
|
||||
@@ -1526,13 +1488,11 @@ acquirelock:
|
||||
if (kgdb_io_ops->post_exception)
|
||||
kgdb_io_ops->post_exception();
|
||||
|
||||
kgdb_info[ks->cpu].debuggerinfo = NULL;
|
||||
kgdb_info[ks->cpu].task = NULL;
|
||||
atomic_set(&cpu_in_kgdb[ks->cpu], 0);
|
||||
atomic_dec(&cpu_in_kgdb[ks->cpu]);
|
||||
|
||||
if (!kgdb_single_step) {
|
||||
for (i = NR_CPUS-1; i >= 0; i--)
|
||||
atomic_set(&passive_cpu_wait[i], 0);
|
||||
atomic_dec(&passive_cpu_wait[i]);
|
||||
/*
|
||||
* Wait till all the CPUs have quit
|
||||
* from the debugger.
|
||||
@@ -1551,6 +1511,8 @@ kgdb_restore:
|
||||
else
|
||||
kgdb_sstep_pid = 0;
|
||||
}
|
||||
if (trace_on)
|
||||
tracing_on();
|
||||
/* Free kgdb_active */
|
||||
atomic_set(&kgdb_active, -1);
|
||||
touch_softlockup_watchdog_sync();
|
||||
@@ -1560,13 +1522,52 @@ kgdb_restore:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* kgdb_handle_exception() - main entry point from a kernel exception
|
||||
*
|
||||
* Locking hierarchy:
|
||||
* interface locks, if any (begin_session)
|
||||
* kgdb lock (kgdb_active)
|
||||
*/
|
||||
int
|
||||
kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
|
||||
{
|
||||
struct kgdb_state kgdb_var;
|
||||
struct kgdb_state *ks = &kgdb_var;
|
||||
int ret;
|
||||
|
||||
ks->cpu = raw_smp_processor_id();
|
||||
ks->ex_vector = evector;
|
||||
ks->signo = signo;
|
||||
ks->ex_vector = evector;
|
||||
ks->err_code = ecode;
|
||||
ks->kgdb_usethreadid = 0;
|
||||
ks->linux_regs = regs;
|
||||
|
||||
if (kgdb_reenter_check(ks))
|
||||
return 0; /* Ouch, double exception ! */
|
||||
kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
|
||||
ret = kgdb_cpu_enter(ks, regs);
|
||||
kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kgdb_nmicallback(int cpu, void *regs)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct kgdb_state kgdb_var;
|
||||
struct kgdb_state *ks = &kgdb_var;
|
||||
|
||||
memset(ks, 0, sizeof(struct kgdb_state));
|
||||
ks->cpu = cpu;
|
||||
ks->linux_regs = regs;
|
||||
|
||||
if (!atomic_read(&cpu_in_kgdb[cpu]) &&
|
||||
atomic_read(&kgdb_active) != cpu &&
|
||||
atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
|
||||
kgdb_wait((struct pt_regs *)regs);
|
||||
atomic_read(&kgdb_active) != -1 &&
|
||||
atomic_read(&kgdb_active) != cpu) {
|
||||
kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
|
||||
kgdb_cpu_enter(ks, regs);
|
||||
kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -1742,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
|
||||
*/
|
||||
void kgdb_breakpoint(void)
|
||||
{
|
||||
atomic_set(&kgdb_setting_breakpoint, 1);
|
||||
atomic_inc(&kgdb_setting_breakpoint);
|
||||
wmb(); /* Sync point before breakpoint */
|
||||
arch_kgdb_breakpoint();
|
||||
wmb(); /* Sync point after breakpoint */
|
||||
atomic_set(&kgdb_setting_breakpoint, 0);
|
||||
atomic_dec(&kgdb_setting_breakpoint);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kgdb_breakpoint);
|
||||
|
||||
|
@@ -1165,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task,
|
||||
struct perf_event_context *ctx = task->perf_event_ctxp;
|
||||
struct perf_event_context *next_ctx;
|
||||
struct perf_event_context *parent;
|
||||
struct pt_regs *regs;
|
||||
int do_switch = 1;
|
||||
|
||||
regs = task_pt_regs(task);
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||
|
||||
if (likely(!ctx || !cpuctx->task_ctx))
|
||||
return;
|
||||
@@ -2787,12 +2785,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
__weak
|
||||
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Output
|
||||
@@ -3379,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event,
|
||||
struct perf_task_event *task_event)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
int size;
|
||||
struct task_struct *task = task_event->task;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
int size, ret;
|
||||
|
||||
/*
|
||||
* If this CPU attempts to acquire an rq lock held by a CPU spinning
|
||||
* in perf_output_lock() from interrupt context, it's game over.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
size = task_event->event_id.header.size;
|
||||
ret = perf_output_begin(&handle, event, size, 0, 0);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
task_event->event_id.pid = perf_event_pid(event, task);
|
||||
task_event->event_id.ppid = perf_event_pid(event, current);
|
||||
@@ -3398,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event,
|
||||
perf_output_put(&handle, task_event->event_id);
|
||||
|
||||
perf_output_end(&handle);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int perf_event_task_match(struct perf_event *event)
|
||||
|
@@ -88,12 +88,11 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
|
||||
"(%d tasks refusing to freeze):\n",
|
||||
elapsed_csecs / 100, elapsed_csecs % 100, todo);
|
||||
show_state();
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
if (freezing(p) && !freezer_should_skip(p))
|
||||
printk(KERN_ERR " %s\n", p->comm);
|
||||
sched_show_task(p);
|
||||
cancel_freezing(p);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
@@ -145,7 +144,7 @@ static void thaw_tasks(bool nosig_only)
|
||||
if (nosig_only && should_send_signal(p))
|
||||
continue;
|
||||
|
||||
if (cgroup_frozen(p))
|
||||
if (cgroup_freezing_or_frozen(p))
|
||||
continue;
|
||||
|
||||
thaw_process(p);
|
||||
|
@@ -5388,7 +5388,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
||||
|
||||
get_task_struct(mt);
|
||||
task_rq_unlock(rq, &flags);
|
||||
wake_up_process(rq->migration_thread);
|
||||
wake_up_process(mt);
|
||||
put_task_struct(mt);
|
||||
wait_for_completion(&req.done);
|
||||
tlb_migrate_finish(p->mm);
|
||||
|
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p)
|
||||
p->se.nr_wakeups_idle = 0;
|
||||
p->sched_info.bkl_count = 0;
|
||||
#endif
|
||||
p->se.sum_exec_runtime = 0;
|
||||
p->se.prev_sum_exec_runtime = 0;
|
||||
p->nvcsw = 0;
|
||||
p->nivcsw = 0;
|
||||
}
|
||||
|
@@ -1210,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
|
||||
return;
|
||||
goto out;
|
||||
p = cpu_buffer->pages->next;
|
||||
bpage = list_entry(p, struct buffer_page, list);
|
||||
list_del_init(&bpage->list);
|
||||
free_buffer_page(bpage);
|
||||
}
|
||||
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
rb_reset_cpu(cpu_buffer);
|
||||
rb_check_pages(cpu_buffer);
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&cpu_buffer->reader_lock);
|
||||
}
|
||||
|
||||
@@ -1238,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
|
||||
return;
|
||||
goto out;
|
||||
p = pages->next;
|
||||
bpage = list_entry(p, struct buffer_page, list);
|
||||
list_del_init(&bpage->list);
|
||||
@@ -1247,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
rb_reset_cpu(cpu_buffer);
|
||||
rb_check_pages(cpu_buffer);
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&cpu_buffer->reader_lock);
|
||||
}
|
||||
|
||||
|
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
|
||||
int this_cpu;
|
||||
u64 now;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
this_cpu = raw_smp_processor_id();
|
||||
now = cpu_clock(this_cpu);
|
||||
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
|
||||
arch_spin_unlock(&trace_clock_struct.lock);
|
||||
|
||||
out:
|
||||
raw_local_irq_restore(flags);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return now;
|
||||
}
|
||||
|
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
|
||||
static char *perf_trace_buf;
|
||||
static char *perf_trace_buf_nmi;
|
||||
|
||||
typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
|
||||
/*
|
||||
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
||||
* suprises
|
||||
*/
|
||||
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
||||
perf_trace_t;
|
||||
|
||||
/* Count the events in use (per event id, not per instance) */
|
||||
static int total_ref_count;
|
||||
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
||||
char *trace_buf, *raw_data;
|
||||
int pc, cpu;
|
||||
|
||||
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
||||
|
||||
pc = preempt_count();
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
||||
|
||||
entry = (struct trace_entry *)raw_data;
|
||||
tracing_generic_entry_update(entry, *irq_flags, pc);
|
||||
|
Reference in New Issue
Block a user