Merge branch 'x86/pebs' into x86-v28-for-linus-phase1

Conflicts:
	include/asm-x86/ds.h

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar
2008-10-06 16:17:23 +02:00
10 changed files with 1358 additions and 585 deletions

View File

@@ -418,3 +418,21 @@ config X86_MINIMUM_CPU_FAMILY
config X86_DEBUGCTLMSR
def_bool y
depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
config X86_DS
bool "Debug Store support"
default y
help
Add support for Debug Store.
This allows the kernel to provide a memory buffer to the hardware
to store various profiling and tracing events.
config X86_PTRACE_BTS
bool "ptrace interface to Branch Trace Store"
default y
depends on (X86_DS && X86_DEBUGCTLMSR)
help
Add a ptrace interface to allow collecting an execution trace
of the traced task.
This collects control flow changes in a (cyclic) buffer and allows
debuggers to fill in the gaps and show an execution trace of the debuggee.

View File

@@ -222,10 +222,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
ds_init_intel(c);
}
if (cpu_has_bts)
ds_init_intel(c);
ptrace_bts_init_intel(c);
/*
* See if we have a good local APIC by checking for buggy Pentia,

File diff suppressed because it is too large Load Diff

View File

@@ -277,6 +277,14 @@ void exit_thread(void)
tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
put_cpu();
}
#ifdef CONFIG_X86_DS
/* Free any DS contexts that have not been properly released. */
if (unlikely(current->thread.ds_ctx)) {
/* we clear debugctl to make sure DS is not used. */
update_debugctlmsr(0);
ds_free(current->thread.ds_ctx);
}
#endif /* CONFIG_X86_DS */
}
void flush_thread(void)
@@ -438,6 +446,35 @@ int set_tsc_mode(unsigned int val)
return 0;
}
#ifdef CONFIG_X86_DS
static int update_debugctl(struct thread_struct *prev,
struct thread_struct *next, unsigned long debugctl)
{
unsigned long ds_prev = 0;
unsigned long ds_next = 0;
if (prev->ds_ctx)
ds_prev = (unsigned long)prev->ds_ctx->ds;
if (next->ds_ctx)
ds_next = (unsigned long)next->ds_ctx->ds;
if (ds_next != ds_prev) {
/* we clear debugctl to make sure DS
* is not in use when we change it */
debugctl = 0;
update_debugctlmsr(0);
wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
}
return debugctl;
}
#else
static int update_debugctl(struct thread_struct *prev,
struct thread_struct *next, unsigned long debugctl)
{
return debugctl;
}
#endif /* CONFIG_X86_DS */
static noinline void
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
@@ -448,14 +485,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
prev = &prev_p->thread;
next = &next_p->thread;
debugctl = prev->debugctlmsr;
if (next->ds_area_msr != prev->ds_area_msr) {
/* we clear debugctl to make sure DS
* is not in use when we change it */
debugctl = 0;
update_debugctlmsr(0);
wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
}
debugctl = update_debugctl(prev, next, prev->debugctlmsr);
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
@@ -479,13 +509,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
hard_enable_TSC();
}
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
#endif
#endif /* CONFIG_X86_PTRACE_BTS */
if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {

View File

@@ -240,6 +240,14 @@ void exit_thread(void)
t->io_bitmap_max = 0;
put_cpu();
}
#ifdef CONFIG_X86_DS
/* Free any DS contexts that have not been properly released. */
if (unlikely(t->ds_ctx)) {
/* we clear debugctl to make sure DS is not used. */
update_debugctlmsr(0);
ds_free(t->ds_ctx);
}
#endif /* CONFIG_X86_DS */
}
void flush_thread(void)
@@ -473,13 +481,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
next = &next_p->thread;
debugctl = prev->debugctlmsr;
if (next->ds_area_msr != prev->ds_area_msr) {
/* we clear debugctl to make sure DS
* is not in use when we change it */
debugctl = 0;
update_debugctlmsr(0);
wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
#ifdef CONFIG_X86_DS
{
unsigned long ds_prev = 0, ds_next = 0;
if (prev->ds_ctx)
ds_prev = (unsigned long)prev->ds_ctx->ds;
if (next->ds_ctx)
ds_next = (unsigned long)next->ds_ctx->ds;
if (ds_next != ds_prev) {
/*
* We clear debugctl to make sure DS
* is not in use when we change it:
*/
debugctl = 0;
update_debugctlmsr(0);
wrmsrl(MSR_IA32_DS_AREA, ds_next);
}
}
#endif /* CONFIG_X86_DS */
if (next->debugctlmsr != debugctl)
update_debugctlmsr(next->debugctlmsr);
@@ -517,13 +539,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
#endif
#endif /* CONFIG_X86_PTRACE_BTS */
}
/*

View File

@@ -554,45 +554,115 @@ static int ptrace_set_debugreg(struct task_struct *child,
return 0;
}
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
/*
* The configuration for a particular BTS hardware implementation.
*/
struct bts_configuration {
/* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
unsigned char sizeof_bts;
/* the size of a field in the BTS record in bytes */
unsigned char sizeof_field;
/* a bitmask to enable/disable BTS in DEBUGCTL MSR */
unsigned long debugctl_mask;
};
static struct bts_configuration bts_cfg;
static int ptrace_bts_get_size(struct task_struct *child)
#define BTS_MAX_RECORD_SIZE (8 * 3)
/*
* Branch Trace Store (BTS) uses the following format. Different
* architectures vary in the size of those fields.
* - source linear address
* - destination linear address
* - flags
*
* Later architectures use 64bit pointers throughout, whereas earlier
* architectures use 32bit pointers in 32bit mode.
*
* We compute the base address for the first 8 fields based on:
* - the field size stored in the DS configuration
* - the relative field position
*
* In order to store additional information in the BTS buffer, we use
* a special source address to indicate that the record requires
* special interpretation.
*
* Netburst indicated via a bit in the flags field whether the branch
* was predicted; this is ignored.
*/
enum bts_field {
bts_from = 0,
bts_to,
bts_flags,
bts_escape = (unsigned long)-1,
bts_qual = bts_to,
bts_jiffies = bts_flags
};
static inline unsigned long bts_get(const char *base, enum bts_field field)
{
if (!child->thread.ds_area_msr)
return -ENXIO;
return ds_get_bts_index((void *)child->thread.ds_area_msr);
base += (bts_cfg.sizeof_field * field);
return *(unsigned long *)base;
}
static int ptrace_bts_read_record(struct task_struct *child,
long index,
static inline void bts_set(char *base, enum bts_field field, unsigned long val)
{
base += (bts_cfg.sizeof_field * field);;
(*(unsigned long *)base) = val;
}
/*
* Translate a BTS record from the raw format into the bts_struct format
*
* out (out): bts_struct interpretation
* raw: raw BTS record
*/
static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
{
memset(out, 0, sizeof(*out));
if (bts_get(raw, bts_from) == bts_escape) {
out->qualifier = bts_get(raw, bts_qual);
out->variant.jiffies = bts_get(raw, bts_jiffies);
} else {
out->qualifier = BTS_BRANCH;
out->variant.lbr.from_ip = bts_get(raw, bts_from);
out->variant.lbr.to_ip = bts_get(raw, bts_to);
}
}
static int ptrace_bts_read_record(struct task_struct *child, size_t index,
struct bts_struct __user *out)
{
struct bts_struct ret;
int retval;
int bts_end;
int bts_index;
const void *bts_record;
size_t bts_index, bts_end;
int error;
if (!child->thread.ds_area_msr)
return -ENXIO;
error = ds_get_bts_end(child, &bts_end);
if (error < 0)
return error;
if (index < 0)
return -EINVAL;
bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
if (bts_end <= index)
return -EINVAL;
/* translate the ptrace bts index into the ds bts index */
bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
bts_index -= (index + 1);
if (bts_index < 0)
bts_index += bts_end;
error = ds_get_bts_index(child, &bts_index);
if (error < 0)
return error;
retval = ds_read_bts((void *)child->thread.ds_area_msr,
bts_index, &ret);
if (retval < 0)
return retval;
/* translate the ptrace bts index into the ds bts index */
bts_index += bts_end - (index + 1);
if (bts_end <= bts_index)
bts_index -= bts_end;
error = ds_access_bts(child, bts_index, &bts_record);
if (error < 0)
return error;
ptrace_bts_translate_record(&ret, bts_record);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
@@ -600,101 +670,106 @@ static int ptrace_bts_read_record(struct task_struct *child,
return sizeof(ret);
}
static int ptrace_bts_clear(struct task_struct *child)
{
if (!child->thread.ds_area_msr)
return -ENXIO;
return ds_clear((void *)child->thread.ds_area_msr);
}
static int ptrace_bts_drain(struct task_struct *child,
long size,
struct bts_struct __user *out)
{
int end, i;
void *ds = (void *)child->thread.ds_area_msr;
struct bts_struct ret;
const unsigned char *raw;
size_t end, i;
int error;
if (!ds)
return -ENXIO;
end = ds_get_bts_index(ds);
if (end <= 0)
return end;
error = ds_get_bts_index(child, &end);
if (error < 0)
return error;
if (size < (end * sizeof(struct bts_struct)))
return -EIO;
for (i = 0; i < end; i++, out++) {
struct bts_struct ret;
int retval;
error = ds_access_bts(child, 0, (const void **)&raw);
if (error < 0)
return error;
retval = ds_read_bts(ds, i, &ret);
if (retval < 0)
return retval;
for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
ptrace_bts_translate_record(&ret, raw);
if (copy_to_user(out, &ret, sizeof(ret)))
return -EFAULT;
}
ds_clear(ds);
error = ds_clear_bts(child);
if (error < 0)
return error;
return end;
}
static void ptrace_bts_ovfl(struct task_struct *child)
{
send_sig(child->thread.bts_ovfl_signal, child, 0);
}
static int ptrace_bts_config(struct task_struct *child,
long cfg_size,
const struct ptrace_bts_config __user *ucfg)
{
struct ptrace_bts_config cfg;
int bts_size, ret = 0;
void *ds;
int error = 0;
error = -EOPNOTSUPP;
if (!bts_cfg.sizeof_bts)
goto errout;
error = -EIO;
if (cfg_size < sizeof(cfg))
return -EIO;
goto errout;
error = -EFAULT;
if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
return -EFAULT;
goto errout;
if ((int)cfg.size < 0)
return -EINVAL;
error = -EINVAL;
if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
!(cfg.flags & PTRACE_BTS_O_ALLOC))
goto errout;
bts_size = 0;
ds = (void *)child->thread.ds_area_msr;
if (ds) {
bts_size = ds_get_bts_size(ds);
if (bts_size < 0)
return bts_size;
}
cfg.size = PAGE_ALIGN(cfg.size);
if (cfg.flags & PTRACE_BTS_O_ALLOC) {
ds_ovfl_callback_t ovfl = NULL;
unsigned int sig = 0;
if (bts_size != cfg.size) {
ret = ptrace_bts_realloc(child, cfg.size,
cfg.flags & PTRACE_BTS_O_CUT_SIZE);
if (ret < 0)
/* we ignore the error in case we were not tracing child */
(void)ds_release_bts(child);
if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
if (!cfg.signal)
goto errout;
sig = cfg.signal;
ovfl = ptrace_bts_ovfl;
}
error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
if (error < 0)
goto errout;
ds = (void *)child->thread.ds_area_msr;
child->thread.bts_ovfl_signal = sig;
}
if (cfg.flags & PTRACE_BTS_O_SIGNAL)
ret = ds_set_overflow(ds, DS_O_SIGNAL);
else
ret = ds_set_overflow(ds, DS_O_WRAP);
if (ret < 0)
error = -EINVAL;
if (!child->thread.ds_ctx && cfg.flags)
goto errout;
if (cfg.flags & PTRACE_BTS_O_TRACE)
child->thread.debugctlmsr |= ds_debugctl_mask();
child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
else
child->thread.debugctlmsr &= ~ds_debugctl_mask();
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (cfg.flags & PTRACE_BTS_O_SCHED)
set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
else
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
ret = sizeof(cfg);
error = sizeof(cfg);
out:
if (child->thread.debugctlmsr)
@@ -702,10 +777,10 @@ out:
else
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
return ret;
return error;
errout:
child->thread.debugctlmsr &= ~ds_debugctl_mask();
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
goto out;
}
@@ -714,119 +789,79 @@ static int ptrace_bts_status(struct task_struct *child,
long cfg_size,
struct ptrace_bts_config __user *ucfg)
{
void *ds = (void *)child->thread.ds_area_msr;
struct ptrace_bts_config cfg;
size_t end;
const void *base, *max;
int error;
if (cfg_size < sizeof(cfg))
return -EIO;
error = ds_get_bts_end(child, &end);
if (error < 0)
return error;
error = ds_access_bts(child, /* index = */ 0, &base);
if (error < 0)
return error;
error = ds_access_bts(child, /* index = */ end, &max);
if (error < 0)
return error;
memset(&cfg, 0, sizeof(cfg));
if (ds) {
cfg.size = ds_get_bts_size(ds);
if (ds_get_overflow(ds) == DS_O_SIGNAL)
cfg.flags |= PTRACE_BTS_O_SIGNAL;
if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
child->thread.debugctlmsr & ds_debugctl_mask())
cfg.flags |= PTRACE_BTS_O_TRACE;
if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
cfg.flags |= PTRACE_BTS_O_SCHED;
}
cfg.size = (max - base);
cfg.signal = child->thread.bts_ovfl_signal;
cfg.bts_size = sizeof(struct bts_struct);
if (cfg.signal)
cfg.flags |= PTRACE_BTS_O_SIGNAL;
if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
child->thread.debugctlmsr & bts_cfg.debugctl_mask)
cfg.flags |= PTRACE_BTS_O_TRACE;
if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
cfg.flags |= PTRACE_BTS_O_SCHED;
if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
return -EFAULT;
return sizeof(cfg);
}
static int ptrace_bts_write_record(struct task_struct *child,
const struct bts_struct *in)
{
int retval;
unsigned char bts_record[BTS_MAX_RECORD_SIZE];
if (!child->thread.ds_area_msr)
return -ENXIO;
BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
if (retval)
return retval;
memset(bts_record, 0, bts_cfg.sizeof_bts);
switch (in->qualifier) {
case BTS_INVALID:
break;
return sizeof(*in);
}
case BTS_BRANCH:
bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
break;
static int ptrace_bts_realloc(struct task_struct *child,
int size, int reduce_size)
{
unsigned long rlim, vm;
int ret, old_size;
case BTS_TASK_ARRIVES:
case BTS_TASK_DEPARTS:
bts_set(bts_record, bts_from, bts_escape);
bts_set(bts_record, bts_qual, in->qualifier);
bts_set(bts_record, bts_jiffies, in->variant.jiffies);
break;
if (size < 0)
default:
return -EINVAL;
old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
if (old_size < 0)
return old_size;
ret = ds_free((void **)&child->thread.ds_area_msr);
if (ret < 0)
goto out;
size >>= PAGE_SHIFT;
old_size >>= PAGE_SHIFT;
current->mm->total_vm -= old_size;
current->mm->locked_vm -= old_size;
if (size == 0)
goto out;
rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
vm = current->mm->total_vm + size;
if (rlim < vm) {
ret = -ENOMEM;
if (!reduce_size)
goto out;
size = rlim - current->mm->total_vm;
if (size <= 0)
goto out;
}
rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
vm = current->mm->locked_vm + size;
if (rlim < vm) {
ret = -ENOMEM;
if (!reduce_size)
goto out;
size = rlim - current->mm->locked_vm;
if (size <= 0)
goto out;
}
ret = ds_allocate((void **)&child->thread.ds_area_msr,
size << PAGE_SHIFT);
if (ret < 0)
goto out;
current->mm->total_vm += size;
current->mm->locked_vm += size;
out:
if (child->thread.ds_area_msr)
set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
else
clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
return ret;
/* The writing task will be the switched-to task on a context
* switch. It needs to write into the switched-from task's BTS
* buffer. */
return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
}
void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -839,7 +874,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
ptrace_bts_write_record(tsk, &rec);
}
#endif /* X86_BTS */
static const struct bts_configuration bts_cfg_netburst = {
.sizeof_bts = sizeof(long) * 3,
.sizeof_field = sizeof(long),
.debugctl_mask = (1<<2)|(1<<3)|(1<<5)
};
static const struct bts_configuration bts_cfg_pentium_m = {
.sizeof_bts = sizeof(long) * 3,
.sizeof_field = sizeof(long),
.debugctl_mask = (1<<6)|(1<<7)
};
static const struct bts_configuration bts_cfg_core2 = {
.sizeof_bts = 8 * 3,
.sizeof_field = 8,
.debugctl_mask = (1<<6)|(1<<7)|(1<<9)
};
static inline void bts_configure(const struct bts_configuration *cfg)
{
bts_cfg = *cfg;
}
void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
{
switch (c->x86) {
case 0x6:
switch (c->x86_model) {
case 0xD:
case 0xE: /* Pentium M */
bts_configure(&bts_cfg_pentium_m);
break;
case 0xF: /* Core2 */
case 0x1C: /* Atom */
bts_configure(&bts_cfg_core2);
break;
default:
/* sorry, don't know about them */
break;
}
break;
case 0xF:
switch (c->x86_model) {
case 0x0:
case 0x1:
case 0x2: /* Netburst */
bts_configure(&bts_cfg_netburst);
break;
default:
/* sorry, don't know about them */
break;
}
break;
default:
/* sorry, don't know about them */
break;
}
}
#endif /* CONFIG_X86_PTRACE_BTS */
/*
* Called by kernel/ptrace.c when detaching..
@@ -852,15 +946,15 @@ void ptrace_disable(struct task_struct *child)
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
#endif
if (child->thread.ds_area_msr) {
#ifdef X86_BTS
ptrace_bts_realloc(child, 0, 0);
#endif
child->thread.debugctlmsr &= ~ds_debugctl_mask();
if (!child->thread.debugctlmsr)
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
}
#ifdef CONFIG_X86_PTRACE_BTS
(void)ds_release_bts(child);
child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
if (!child->thread.debugctlmsr)
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
#endif /* CONFIG_X86_PTRACE_BTS */
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -980,7 +1074,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/*
* These bits need more cooking - not enabled yet:
*/
#ifdef X86_BTS
#ifdef CONFIG_X86_PTRACE_BTS
case PTRACE_BTS_CONFIG:
ret = ptrace_bts_config
(child, data, (struct ptrace_bts_config __user *)addr);
@@ -992,7 +1086,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_BTS_SIZE:
ret = ptrace_bts_get_size(child);
ret = ds_get_bts_index(child, /* pos = */ NULL);
break;
case PTRACE_BTS_GET:
@@ -1001,14 +1095,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_BTS_CLEAR:
ret = ptrace_bts_clear(child);
ret = ds_clear_bts(child);
break;
case PTRACE_BTS_DRAIN:
ret = ptrace_bts_drain
(child, data, (struct bts_struct __user *) addr);
break;
#endif
#endif /* CONFIG_X86_PTRACE_BTS */
default:
ret = ptrace_request(child, request, addr, data);