Merge branches 'sched/rt' and 'sched/urgent' into sched/core
This commit is contained in:
@ -293,6 +293,9 @@ extern void sched_show_task(struct task_struct *p);
|
||||
extern void softlockup_tick(void);
|
||||
extern void touch_softlockup_watchdog(void);
|
||||
extern void touch_all_softlockup_watchdogs(void);
|
||||
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
|
||||
struct file *filp, void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
extern unsigned int softlockup_panic;
|
||||
extern unsigned long sysctl_hung_task_check_count;
|
||||
extern unsigned long sysctl_hung_task_timeout_secs;
|
||||
@ -440,6 +443,7 @@ struct pacct_struct {
|
||||
* @utime: time spent in user mode, in &cputime_t units
|
||||
* @stime: time spent in kernel mode, in &cputime_t units
|
||||
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
||||
* @lock: lock for fields in this struct
|
||||
*
|
||||
* This structure groups together three kinds of CPU time that are
|
||||
* tracked for threads and thread groups. Most things considering
|
||||
@ -450,6 +454,7 @@ struct task_cputime {
|
||||
cputime_t utime;
|
||||
cputime_t stime;
|
||||
unsigned long long sum_exec_runtime;
|
||||
spinlock_t lock;
|
||||
};
|
||||
/* Alternate field names when used to cache expirations. */
|
||||
#define prof_exp stime
|
||||
@ -465,7 +470,7 @@ struct task_cputime {
|
||||
* used for thread group CPU clock calculations.
|
||||
*/
|
||||
struct thread_group_cputime {
|
||||
struct task_cputime *totals;
|
||||
struct task_cputime totals;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -626,7 +631,6 @@ struct user_struct {
|
||||
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
|
||||
#endif
|
||||
#ifdef CONFIG_EPOLL
|
||||
atomic_t epoll_devs; /* The number of epoll descriptors currently open */
|
||||
atomic_t epoll_watches; /* The number of file descriptors currently watched */
|
||||
#endif
|
||||
#ifdef CONFIG_POSIX_MQUEUE
|
||||
@ -977,6 +981,7 @@ struct sched_class {
|
||||
struct rq *busiest, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle);
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
int (*needs_post_schedule) (struct rq *this_rq);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
@ -1146,6 +1151,7 @@ struct task_struct {
|
||||
#endif
|
||||
|
||||
struct list_head tasks;
|
||||
struct plist_node pushable_tasks;
|
||||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
|
||||
@ -2183,24 +2189,30 @@ static inline int spin_needbreak(spinlock_t *lock)
|
||||
* Thread group CPU time accounting.
|
||||
*/
|
||||
|
||||
extern int thread_group_cputime_alloc(struct task_struct *);
|
||||
extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
|
||||
static inline
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct task_cputime *totals = &tsk->signal->cputime.totals;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&totals->lock, flags);
|
||||
*times = *totals;
|
||||
spin_unlock_irqrestore(&totals->lock, flags);
|
||||
}
|
||||
|
||||
static inline void thread_group_cputime_init(struct signal_struct *sig)
|
||||
{
|
||||
sig->cputime.totals = NULL;
|
||||
}
|
||||
sig->cputime.totals = (struct task_cputime){
|
||||
.utime = cputime_zero,
|
||||
.stime = cputime_zero,
|
||||
.sum_exec_runtime = 0,
|
||||
};
|
||||
|
||||
static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
|
||||
{
|
||||
if (curr->signal->cputime.totals)
|
||||
return 0;
|
||||
return thread_group_cputime_alloc(curr);
|
||||
spin_lock_init(&sig->cputime.totals.lock);
|
||||
}
|
||||
|
||||
static inline void thread_group_cputime_free(struct signal_struct *sig)
|
||||
{
|
||||
free_percpu(sig->cputime.totals);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user