diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 60 |
1 files changed, 57 insertions, 3 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 03c238088ae..c5d3f847ca8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -158,6 +158,8 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) } #endif +extern unsigned long long time_sync_thresh; + /* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). @@ -764,7 +766,6 @@ struct sched_domain { struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ cpumask_t span; /* span of all CPUs in this domain */ - int first_cpu; /* cache of the first cpu in this domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ @@ -1551,6 +1552,35 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init(void) +{ +} + +static inline u64 sched_clock_cpu(int cpu) +{ + return sched_clock(); +} + +static inline void sched_clock_tick(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} +#else +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); +#endif + /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu * clock constructed from sched_clock(): @@ -1817,7 +1847,9 @@ extern void exit_thread(void); extern void exit_files(struct task_struct *); extern void __cleanup_signal(struct signal_struct *); extern void __cleanup_sighand(struct sighand_struct *); + extern void exit_itimers(struct signal_struct *); +extern void flush_itimer_signals(void); extern NORET_TYPE void do_group_exit(int); @@ -1977,6 +2009,11 @@ static inline void clear_tsk_need_resched(struct task_struct *tsk) clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); } +static inline int test_tsk_need_resched(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); +} + static inline int signal_pending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); @@ -1989,6 +2026,19 @@ static inline int fatal_signal_pending(struct task_struct *p) return signal_pending(p) && __fatal_signal_pending(p); } +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + if (state & (__TASK_STOPPED | __TASK_TRACED)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + static inline int need_resched(void) { return unlikely(test_thread_flag(TIF_NEED_RESCHED)); @@ -2001,13 +2051,13 @@ static inline int need_resched(void) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ -#ifdef CONFIG_PREEMPT +extern int _cond_resched(void); +#ifdef CONFIG_PREEMPT_BKL static inline int cond_resched(void) { return 0; } #else -extern int _cond_resched(void); static inline int cond_resched(void) { return _cond_resched(); @@ -2015,6 +2065,10 @@ static inline int cond_resched(void) #endif extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); +static inline int cond_resched_bkl(void) +{ + return _cond_resched(); +} /* * Does a critical section need to be broken due to another |