linux-kernel-test/arch/sh/kernel/process.c
Paul Mundt 0ea820cf9b sh: Move over to dynamically allocated FPU context.
This follows the x86 xstate changes and implements a task_xstate slab
cache that is dynamically sized to match one of hard FP/soft FP/FPU-less.

This also tidies up and consolidates some of the SH-2A/SH-4 FPU
fragmentation. Now fpu state restorers are commonly defined, with the
init_fpu()/fpu_init() mess reworked to follow the x86 convention.
The fpu_init() register initialization has been replaced by xstate setup
followed by writing out to hardware via the standard restore path.

As init_fpu() now performs a slab allocation a secondary lighterweight
restorer is also introduced for the context switch.

In the future the DSP state will be rolled in here, too.

More work remains for math emulation and the SH-5 FPU, which presently
uses its own special (UP-only) interfaces.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2010-01-13 12:51:40 +09:00

101 lines
2.2 KiB
C

#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/sched.h>
struct kmem_cache *task_xstate_cachep = NULL;
unsigned int xstate_size;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
*dst = *src;
if (src->thread.xstate) {
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!dst->thread.xstate)
return -ENOMEM;
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
}
return 0;
}
void free_thread_xstate(struct task_struct *tsk)
{
if (tsk->thread.xstate) {
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
tsk->thread.xstate = NULL;
}
}
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
struct thread_info *ti;
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
free_thread_xstate(ti->task);
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, SLAB_PANIC, NULL);
}
#else
struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
}
void free_thread_info(struct thread_info *ti)
{
free_thread_xstate(ti->task);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
void arch_task_cache_init(void)
{
if (!xstate_size)
return;
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
SLAB_PANIC | SLAB_NOTRACK, NULL);
}
#ifdef CONFIG_SH_FPU_EMU
# define HAVE_SOFTFP 1
#else
# define HAVE_SOFTFP 0
#endif
void init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
else if (HAVE_SOFTFP)
xstate_size = sizeof(struct sh_fpu_soft_struct);
else
xstate_size = 0;
}