Merge branch 'x86/urgent' into core/signal

Conflicts:
	arch/x86/kernel/signal_64.c
This commit is contained in:
Ingo Molnar
2008-10-12 11:32:17 +02:00
3459 changed files with 188572 additions and 90228 deletions

View File

@@ -555,45 +555,115 @@ static int ptrace_set_debugreg(struct task_struct *child,
return 0;
}
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
/*
* The configuration for a particular BTS hardware implementation.
*/
struct bts_configuration {
/* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
unsigned char sizeof_bts;
/* the size of a field in the BTS record in bytes */
unsigned char sizeof_field;
/* a bitmask to enable/disable BTS in DEBUGCTL MSR */
unsigned long debugctl_mask;
};
static struct bts_configuration bts_cfg;
static int ptrace_bts_get_size(struct task_struct *child)
#define BTS_MAX_RECORD_SIZE (8 * 3)
/*
* Branch Trace Store (BTS) uses the following format. Different
* architectures vary in the size of those fields.
* - source linear address
* - destination linear address
* - flags
*
* Later architectures use 64bit pointers throughout, whereas earlier
* architectures use 32bit pointers in 32bit mode.
*
* We compute the base address for the first 8 fields based on:
* - the field size stored in the DS configuration
* - the relative field position
*
* In order to store additional information in the BTS buffer, we use
* a special source address to indicate that the record requires
* special interpretation.
*
* Netburst indicated via a bit in the flags field whether the branch
* was predicted; this is ignored.
*/
enum bts_field {
bts_from = 0,
bts_to,
bts_flags,
bts_escape = (unsigned long)-1,
bts_qual = bts_to,
bts_jiffies = bts_flags
};
static inline unsigned long bts_get(const char *base, enum bts_field field)
{
if (!child->thread.ds_area_msr)
return -ENXIO;
return ds_get_bts_index((void *)child->thread.ds_area_msr);
base += (bts_cfg.sizeof_field * field);
return *(unsigned long *)base;
}
static int ptrace_bts_read_record(struct task_struct *child,
long index,
static inline void bts_set(char *base, enum bts_field field, unsigned long val)
{
base += (bts_cfg.sizeof_field * field);;
(*(unsigned long *)base) = val;
}
/*
* Translate a BTS record from the raw format into the bts_struct format
*
* out (out): bts_struct interpretation
* raw: raw BTS record
*/
static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
{
memset(out, 0, sizeof(*out));
if (bts_get(raw, bts_from) == bts_escape) {
out->qualifier = bts_get(raw, bts_qual);
out->variant.jiffies = bts_get(raw, bts_jiffies);
} else {
out->qualifier = BTS_BRANCH;
out->variant.lbr.from_ip = bts_get(raw, bts_from);
out->variant.lbr.to_ip = bts_get(raw, bts_to);
}
}
static int ptrace_bts_read_record(struct task_struct *child, size_t index,
struct bts_struct __user *out)
{
struct bts_struct ret;
int retval;
int bts_end;
int bts_index;
const void *bts_record;
size_t bts_index, bts_end;
int error;
if (!child->thread.ds_area_msr)
return -ENXIO;
error = ds_get_bts_end(child, &bts_end);
if (error < 0)
return error;
if (index < 0)
return -EINVAL;
bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
if (bts_end <= index)
return -EINVAL;
/* translate the ptrace bts index into the ds bts index */
bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
bts_index -= (index + 1);
if (bts_index < 0)
bts_index += bts_end;
error = ds_get_bts_index(child, &bts_index);
if (error < 0)
return error;
retval = ds_read_bts((void *)child->thread.ds_area_msr,
bts_index, &ret);
if (retval < 0)
return retval;
/* translate the ptrace bts index into the ds bts index */
bts_index += bts_end - (index + 1);
if (bts_end <= bts_index)
bts_index -= bts_end;
error = ds_access_bts(child, bts_index, &bts_record);
if (error < 0)
return error;
ptrace_bts_translate_record(&ret, bts_record);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
@@ -601,101 +671,106 @@ static int ptrace_bts_read_record(struct task_struct *child,
return sizeof(ret);
}
static int ptrace_bts_clear(struct task_struct *child)
{
if (!child->thread.ds_area_msr)
return -ENXIO;
return ds_clear((void *)child->thread.ds_area_msr);
}
static int ptrace_bts_drain(struct task_struct *child,
long size,
struct bts_struct __user *out)
{
int end, i;
void *ds = (void *)child->thread.ds_area_msr;
struct bts_struct ret;
const unsigned char *raw;
size_t end, i;
int error;
if (!ds)
return -ENXIO;
end = ds_get_bts_index(ds);
if (end <= 0)
return end;
error = ds_get_bts_index(child, &end);
if (error < 0)
return error;
if (size < (end * sizeof(struct bts_struct)))
return -EIO;
for (i = 0; i < end; i++, out++) {
struct bts_struct ret;
int retval;
error = ds_access_bts(child, 0, (const void **)&raw);
if (error < 0)
return error;
retval = ds_read_bts(ds, i, &ret);
if (retval < 0)
return retval;
for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
ptrace_bts_translate_record(&ret, raw);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
}
ds_clear(ds);
error = ds_clear_bts(child);
if (error < 0)
return error;
return end;
}
static void ptrace_bts_ovfl(struct task_struct *child)
{
send_sig(child->thread.bts_ovfl_signal, child, 0);
}
static int ptrace_bts_config(struct task_struct *child,
long cfg_size,
const struct ptrace_bts_config __user *ucfg)
{
struct ptrace_bts_config cfg;
int bts_size, ret = 0;
void *ds;
int error = 0;
error = -EOPNOTSUPP;
if (!bts_cfg.sizeof_bts)
goto errout;
error = -EIO;
if (cfg_size < sizeof(cfg))
return -EIO;
goto errout;
error = -EFAULT;
if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
return -EFAULT;
goto errout;
if ((int)cfg.size < 0)
return -EINVAL;
error = -EINVAL;
if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
!(cfg.flags & PTRACE_BTS_O_ALLOC))
goto errout;
bts_size = 0;
ds = (void *)child->thread.ds_area_msr;
if (ds) {
bts_size = ds_get_bts_size(ds);
if (bts_size < 0)
return bts_size;
}
cfg.size = PAGE_ALIGN(cfg.size);
if (cfg.flags & PTRACE_BTS_O_ALLOC) {
ds_ovfl_callback_t ovfl = NULL;
unsigned int sig = 0;
if (bts_size != cfg.size) {
ret = ptrace_bts_realloc(child, cfg.size,
cfg.flags & PTRACE_BTS_O_CUT_SIZE);
if (ret < 0)
/* we ignore the error in case we were not tracing child */
(void)ds_release_bts(child);
if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
if (!cfg.signal)
goto errout;
sig = cfg.signal;
ovfl = ptrace_bts_ovfl;
}
error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
if (error < 0)
goto errout;
ds = (void *)child->thread.ds_area_msr;
child->thread.bts_ovfl_signal = sig;
}
if (cfg.flags & PTRACE_BTS_O_SIGNAL)
ret = ds_set_overflow(ds, DS_O_SIGNAL);
else
ret = ds_set_overflow(ds, DS_O_WRAP);
if (ret < 0)
error = -EINVAL;
if (!child->thread.ds_ctx && cfg.flags)
goto errout;
if (cfg.flags & PTRACE_BTS_O_TRACE)
child->thread.debugctlmsr |= ds_debugctl_mask();
child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
else
child->thread.debugctlmsr &= ~ds_debugctl_mask();
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (cfg.flags & PTRACE_BTS_O_SCHED)
set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
else
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
ret = sizeof(cfg);
error = sizeof(cfg);
out:
if (child->thread.debugctlmsr)
@@ -703,10 +778,10 @@ out:
else
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
return ret;
return error;
errout:
child->thread.debugctlmsr &= ~ds_debugctl_mask();
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
goto out;
}
@@ -715,119 +790,79 @@ static int ptrace_bts_status(struct task_struct *child,
long cfg_size,
struct ptrace_bts_config __user *ucfg)
{
void *ds = (void *)child->thread.ds_area_msr;
struct ptrace_bts_config cfg;
size_t end;
const void *base, *max;
int error;
if (cfg_size < sizeof(cfg))
return -EIO;
error = ds_get_bts_end(child, &end);
if (error < 0)
return error;
error = ds_access_bts(child, /* index = */ 0, &base);
if (error < 0)
return error;
error = ds_access_bts(child, /* index = */ end, &max);
if (error < 0)
return error;
memset(&cfg, 0, sizeof(cfg));
if (ds) {
cfg.size = ds_get_bts_size(ds);
if (ds_get_overflow(ds) == DS_O_SIGNAL)
cfg.flags |= PTRACE_BTS_O_SIGNAL;
if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
child->thread.debugctlmsr & ds_debugctl_mask())
cfg.flags |= PTRACE_BTS_O_TRACE;
if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
cfg.flags |= PTRACE_BTS_O_SCHED;
}
cfg.size = (max - base);
cfg.signal = child->thread.bts_ovfl_signal;
cfg.bts_size = sizeof(struct bts_struct);
if (cfg.signal)
cfg.flags |= PTRACE_BTS_O_SIGNAL;
if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
child->thread.debugctlmsr & bts_cfg.debugctl_mask)
cfg.flags |= PTRACE_BTS_O_TRACE;
if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
cfg.flags |= PTRACE_BTS_O_SCHED;
if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
return -EFAULT;
return sizeof(cfg);
}
static int ptrace_bts_write_record(struct task_struct *child,
const struct bts_struct *in)
{
int retval;
unsigned char bts_record[BTS_MAX_RECORD_SIZE];
if (!child->thread.ds_area_msr)
return -ENXIO;
BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
if (retval)
return retval;
memset(bts_record, 0, bts_cfg.sizeof_bts);
switch (in->qualifier) {
case BTS_INVALID:
break;
return sizeof(*in);
}
case BTS_BRANCH:
bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
break;
static int ptrace_bts_realloc(struct task_struct *child,
int size, int reduce_size)
{
unsigned long rlim, vm;
int ret, old_size;
case BTS_TASK_ARRIVES:
case BTS_TASK_DEPARTS:
bts_set(bts_record, bts_from, bts_escape);
bts_set(bts_record, bts_qual, in->qualifier);
bts_set(bts_record, bts_jiffies, in->variant.jiffies);
break;
if (size < 0)
default:
return -EINVAL;
old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
if (old_size < 0)
return old_size;
ret = ds_free((void **)&child->thread.ds_area_msr);
if (ret < 0)
goto out;
size >>= PAGE_SHIFT;
old_size >>= PAGE_SHIFT;
current->mm->total_vm -= old_size;
current->mm->locked_vm -= old_size;
if (size == 0)
goto out;
rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->total_vm + size;
if (rlim < vm) {
ret = -ENOMEM;
if (!reduce_size)
goto out;
size = rlim - current->mm->total_vm;
if (size <= 0)
goto out;
}
rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
vm = current->mm->locked_vm + size;
if (rlim < vm) {
ret = -ENOMEM;
if (!reduce_size)
goto out;
size = rlim - current->mm->locked_vm;
if (size <= 0)
goto out;
}
ret = ds_allocate((void **)&child->thread.ds_area_msr,
size << PAGE_SHIFT);
if (ret < 0)
goto out;
current->mm->total_vm += size;
current->mm->locked_vm += size;
out:
if (child->thread.ds_area_msr)
set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
else
clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
return ret;
/* The writing task will be the switched-to task on a context
* switch. It needs to write into the switched-from task's BTS
* buffer. */
return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
}
void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -840,7 +875,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
ptrace_bts_write_record(tsk, &rec);
}
#endif /* X86_BTS */
static const struct bts_configuration bts_cfg_netburst = {
.sizeof_bts = sizeof(long) * 3,
.sizeof_field = sizeof(long),
.debugctl_mask = (1<<2)|(1<<3)|(1<<5)
};
static const struct bts_configuration bts_cfg_pentium_m = {
.sizeof_bts = sizeof(long) * 3,
.sizeof_field = sizeof(long),
.debugctl_mask = (1<<6)|(1<<7)
};
static const struct bts_configuration bts_cfg_core2 = {
.sizeof_bts = 8 * 3,
.sizeof_field = 8,
.debugctl_mask = (1<<6)|(1<<7)|(1<<9)
};
static inline void bts_configure(const struct bts_configuration *cfg)
{
bts_cfg = *cfg;
}
void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
{
switch (c->x86) {
case 0x6:
switch (c->x86_model) {
case 0xD:
case 0xE: /* Pentium M */
bts_configure(&bts_cfg_pentium_m);
break;
case 0xF: /* Core2 */
case 0x1C: /* Atom */
bts_configure(&bts_cfg_core2);
break;
default:
/* sorry, don't know about them */
break;
}
break;
case 0xF:
switch (c->x86_model) {
case 0x0:
case 0x1:
case 0x2: /* Netburst */
bts_configure(&bts_cfg_netburst);
break;
default:
/* sorry, don't know about them */
break;
}
break;
default:
/* sorry, don't know about them */
break;
}
}
#endif /* CONFIG_X86_PTRACE_BTS */
/*
* Called by kernel/ptrace.c when detaching..
@@ -853,15 +947,15 @@ void ptrace_disable(struct task_struct *child)
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
#endif
if (child->thread.ds_area_msr) {
#ifdef X86_BTS
ptrace_bts_realloc(child, 0, 0);
#endif
child->thread.debugctlmsr &= ~ds_debugctl_mask();
if (!child->thread.debugctlmsr)
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
}
#ifdef CONFIG_X86_PTRACE_BTS
(void)ds_release_bts(child);
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (!child->thread.debugctlmsr)
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
#endif /* CONFIG_X86_PTRACE_BTS */
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -981,7 +1075,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/*
* These bits need more cooking - not enabled yet:
*/
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
case PTRACE_BTS_CONFIG:
ret = ptrace_bts_config
(child, data, (struct ptrace_bts_config __user *)addr);
@@ -993,7 +1087,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_BTS_SIZE:
ret = ptrace_bts_get_size(child);
ret = ds_get_bts_index(child, /* pos = */ NULL);
break;
case PTRACE_BTS_GET:
@@ -1002,14 +1096,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_BTS_CLEAR:
ret = ptrace_bts_clear(child);
ret = ds_clear_bts(child);
break;
case PTRACE_BTS_DRAIN:
ret = ptrace_bts_drain
(child, data, (struct bts_struct __user *) addr);
break;
#endif
#endif /* CONFIG_X86_PTRACE_BTS */
default:
ret = ptrace_request(child, request, addr, data);