summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/acct.c66
-rw-r--r--kernel/audit.c14
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/capability.c199
-rw-r--r--kernel/compat.c117
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c197
-rw-r--r--kernel/delayacct.c6
-rw-r--r--kernel/dma.c8
-rw-r--r--kernel/exec_domain.c2
-rw-r--r--kernel/exit.c116
-rw-r--r--kernel/fork.c42
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/hrtimer.c29
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/irq/manage.c31
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/kexec.c117
-rw-r--r--kernel/kprobes.c62
-rw-r--r--kernel/ksysfs.c10
-rw-r--r--kernel/lockdep.c26
-rw-r--r--kernel/lockdep_proc.c61
-rw-r--r--kernel/module.c168
-rw-r--r--kernel/mutex.c35
-rw-r--r--kernel/nsproxy.c3
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/params.c25
-rw-r--r--kernel/posix-timers.c20
-rw-r--r--kernel/power/Kconfig11
-rw-r--r--kernel/power/disk.c156
-rw-r--r--kernel/power/main.c48
-rw-r--r--kernel/power/power.h21
-rw-r--r--kernel/power/process.c141
-rw-r--r--kernel/power/snapshot.c53
-rw-r--r--kernel/power/swsusp.c33
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk.c127
-rw-r--r--kernel/profile.c6
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/rcupdate.c9
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/relay.c6
-rw-r--r--kernel/resource.c26
-rw-r--r--kernel/rtmutex-debug.c7
-rw-r--r--kernel/sched.c195
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c6
-rw-r--r--kernel/sched_stats.h8
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/sys.c1
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl.c328
-rw-r--r--kernel/sysctl_check.c1588
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/time.c21
-rw-r--r--kernel/time/tick-broadcast.c35
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/timer.c9
-rw-r--r--kernel/tsacct.c4
-rw-r--r--kernel/user.c35
65 files changed, 3258 insertions, 1144 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 6b066632e40..c64ce9c1420 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -63,6 +63,3 @@ config PREEMPT_BKL
Say Y here if you are building a kernel for a desktop system.
Say N if you are unsure.
-config PREEMPT_NOTIFIERS
- bool
-
diff --git a/kernel/Makefile b/kernel/Makefile
index 2a999836ca1..d63fbb18798 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \
- utsname.o
+ utsname.o sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f0f8b2ba7..fce53d8df8a 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -329,16 +329,16 @@ static comp_t encode_comp_t(unsigned long value)
}
/*
- * If we need to round up, do it (and handle overflow correctly).
- */
+ * If we need to round up, do it (and handle overflow correctly).
+ */
if (rnd && (++value > MAXFRACT)) {
value >>= EXPSIZE;
exp++;
}
/*
- * Clean it up and polish it off.
- */
+ * Clean it up and polish it off.
+ */
exp <<= MANTSIZE; /* Shift the exponent into place */
exp += value; /* and add on the mantissa. */
return exp;
@@ -361,30 +361,30 @@ static comp_t encode_comp_t(unsigned long value)
static comp2_t encode_comp2_t(u64 value)
{
- int exp, rnd;
-
- exp = (value > (MAXFRACT2>>1));
- rnd = 0;
- while (value > MAXFRACT2) {
- rnd = value & 1;
- value >>= 1;
- exp++;
- }
-
- /*
- * If we need to round up, do it (and handle overflow correctly).
- */
- if (rnd && (++value > MAXFRACT2)) {
- value >>= 1;
- exp++;
- }
-
- if (exp > MAXEXP2) {
- /* Overflow. Return largest representable number instead. */
- return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1;
- } else {
- return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
- }
+ int exp, rnd;
+
+ exp = (value > (MAXFRACT2>>1));
+ rnd = 0;
+ while (value > MAXFRACT2) {
+ rnd = value & 1;
+ value >>= 1;
+ exp++;
+ }
+
+ /*
+ * If we need to round up, do it (and handle overflow correctly).
+ */
+ if (rnd && (++value > MAXFRACT2)) {
+ value >>= 1;
+ exp++;
+ }
+
+ if (exp > MAXEXP2) {
+ /* Overflow. Return largest representable number instead. */
+ return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1;
+ } else {
+ return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
+ }
}
#endif
@@ -501,14 +501,14 @@ static void do_acct_process(struct file *file)
ac.ac_swaps = encode_comp_t(0);
/*
- * Kernel segment override to datasegment and write it
- * to the accounting file.
- */
+ * Kernel segment override to datasegment and write it
+ * to the accounting file.
+ */
fs = get_fs();
set_fs(KERNEL_DS);
/*
- * Accounting records are not subject to resource limits.
- */
+ * Accounting records are not subject to resource limits.
+ */
flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
file->f_op->write(file, (char *)&ac,
diff --git a/kernel/audit.c b/kernel/audit.c
index 2924251a654..6977ea57a7e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -664,11 +664,11 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (sid) {
if (selinux_sid_to_string(
sid, &ctx, &len)) {
- audit_log_format(ab,
+ audit_log_format(ab,
" ssid=%u", sid);
/* Maybe call audit_panic? */
} else
- audit_log_format(ab,
+ audit_log_format(ab,
" subj=%s", ctx);
kfree(ctx);
}
@@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
sig_data->pid = audit_sig_pid;
memcpy(sig_data->ctx, ctx, len);
kfree(ctx);
- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
+ audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
0, 0, sig_data, sizeof(*sig_data) + len);
kfree(sig_data);
break;
@@ -1005,7 +1005,7 @@ unsigned int audit_serial(void)
return ret;
}
-static inline void audit_get_stamp(struct audit_context *ctx,
+static inline void audit_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial)
{
if (ctx)
@@ -1056,7 +1056,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
if (gfp_mask & __GFP_WAIT)
reserve = 0;
else
- reserve = 5; /* Allow atomic callers to go up to five
+ reserve = 5; /* Allow atomic callers to go up to five
entries over the normal backlog limit */
while (audit_backlog_limit
@@ -1319,7 +1319,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_format(ab, "<too long>");
- } else
+ } else
audit_log_untrustedstring(ab, p);
kfree(path);
}
@@ -1365,7 +1365,7 @@ void audit_log_end(struct audit_buffer *ab)
* audit_log_vformat, and audit_log_end. It may be called
* in any context.
*/
-void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 359645cff5b..df66a21fb36 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1498,7 +1498,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
* auditctl to read from it... which isn't ever going to
* happen if we're actually running in the context of auditctl
* trying to _send_ the stuff */
-
+
dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
if (!dest)
return -ENOMEM;
@@ -1678,7 +1678,7 @@ int audit_filter_type(int type)
{
struct audit_entry *e;
int result = 0;
-
+
rcu_read_lock();
if (list_empty(&audit_filter_list[AUDIT_FILTER_TYPE]))
goto unlock_and_return;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 04f3ffb8d9d..e19b5a33aed 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -45,7 +45,6 @@
#include <linux/init.h>
#include <asm/types.h>
#include <asm/atomic.h>
-#include <asm/types.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
@@ -321,7 +320,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(tsk->personality, f->op, f->val);
break;
case AUDIT_ARCH:
- if (ctx)
+ if (ctx)
result = audit_comparator(ctx->arch, f->op, f->val);
break;
@@ -899,7 +898,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
if (context->return_valid)
- audit_log_format(ab, " success=%s exit=%ld",
+ audit_log_format(ab, " success=%s exit=%ld",
(context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
context->return_code);
@@ -1136,8 +1135,8 @@ void audit_free(struct task_struct *tsk)
return;
/* Check for system calls that do not go through the exit
- * function (e.g., exit_group), then free context block.
- * We use GFP_ATOMIC here because we might be doing this
+ * function (e.g., exit_group), then free context block.
+ * We use GFP_ATOMIC here because we might be doing this
* in the context of the idle thread */
/* that can happen only if we are called from do_exit() */
if (context->in_syscall && context->auditable)
@@ -1317,7 +1316,7 @@ void __audit_getname(const char *name)
context->pwdmnt = mntget(current->fs->pwdmnt);
read_unlock(&current->fs->lock);
}
-
+
}
/* audit_putname - intercept a putname request
@@ -1525,6 +1524,7 @@ add_names:
context->names[idx].ino = (unsigned long)-1;
}
}
+EXPORT_SYMBOL_GPL(__audit_inode_child);
/**
* auditsc_get_stamp - get local copies of audit_context values
diff --git a/kernel/capability.c b/kernel/capability.c
index c8d3c776203..cbc5fd60c0f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -3,9 +3,9 @@
*
* Copyright (C) 1997 Andrew Main <zefram@fysh.org>
*
- * Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com>
+ * Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org>
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
- */
+ */
#include <linux/capability.h>
#include <linux/mm.h>
@@ -14,12 +14,6 @@
#include <linux/syscalls.h>
#include <asm/uaccess.h>
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
-
-EXPORT_SYMBOL(securebits);
-EXPORT_SYMBOL(cap_bset);
-
/*
* This lock protects task->cap_* for all tasks including current.
* Locking rule: acquire this prior to tasklist_lock.
@@ -43,49 +37,49 @@ static DEFINE_SPINLOCK(task_capability_lock);
*/
asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
{
- int ret = 0;
- pid_t pid;
- __u32 version;
- struct task_struct *target;
- struct __user_cap_data_struct data;
-
- if (get_user(version, &header->version))
- return -EFAULT;
-
- if (version != _LINUX_CAPABILITY_VERSION) {
- if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
- return -EFAULT;
- return -EINVAL;
- }
+ int ret = 0;
+ pid_t pid;
+ __u32 version;
+ struct task_struct *target;
+ struct __user_cap_data_struct data;
+
+ if (get_user(version, &header->version))
+ return -EFAULT;
+
+ if (version != _LINUX_CAPABILITY_VERSION) {
+ if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+ return -EFAULT;
+ return -EINVAL;
+ }
- if (get_user(pid, &header->pid))
- return -EFAULT;
+ if (get_user(pid, &header->pid))
+ return -EFAULT;
- if (pid < 0)
- return -EINVAL;
+ if (pid < 0)
+ return -EINVAL;
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
+ spin_lock(&task_capability_lock);
+ read_lock(&tasklist_lock);
- if (pid && pid != current->pid) {
- target = find_task_by_pid(pid);
- if (!target) {
- ret = -ESRCH;
- goto out;
- }
- } else
- target = current;
+ if (pid && pid != current->pid) {
+ target = find_task_by_pid(pid);
+ if (!target) {
+ ret = -ESRCH;
+ goto out;
+ }
+ } else
+ target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
out:
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
+ read_unlock(&tasklist_lock);
+ spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret && copy_to_user(dataptr, &data, sizeof data))
+ return -EFAULT;
- return ret;
+ return ret;
}
/*
@@ -118,7 +112,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
} while_each_pid_task(pgrp, PIDTYPE_PGID, g);
if (!found)
- ret = 0;
+ ret = 0;
return ret;
}
@@ -172,68 +166,68 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
- kernel_cap_t inheritable, permitted, effective;
- __u32 version;
- struct task_struct *target;
- int ret;
- pid_t pid;
-
- if (get_user(version, &header->version))
- return -EFAULT;
-
- if (version != _LINUX_CAPABILITY_VERSION) {
- if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
- return -EFAULT;
- return -EINVAL;
- }
-
- if (get_user(pid, &header->pid))
- return -EFAULT;
-
- if (pid && pid != current->pid && !capable(CAP_SETPCAP))
- return -EPERM;
-
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
- return -EFAULT;
-
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- if (pid > 0 && pid != current->pid) {
- target = find_task_by_pid(pid);
- if (!target) {
- ret = -ESRCH;
- goto out;
- }
- } else
- target = current;
-
- ret = 0;
-
- /* having verified that the proposed changes are legal,
- we now put them into effect. */
- if (pid < 0) {
- if (pid == -1) /* all procs other than current and init */
- ret = cap_set_all(&effective, &inheritable, &permitted);
-
- else /* all procs in process group */
- ret = cap_set_pg(-pid, &effective, &inheritable,
- &permitted);
- } else {
- ret = security_capset_check(target, &effective, &inheritable,
- &permitted);
- if (!ret)
- security_capset_set(target, &effective, &inheritable,
- &permitted);
- }
+ kernel_cap_t inheritable, permitted, effective;
+ __u32 version;
+ struct task_struct *target;
+ int ret;
+ pid_t pid;
+
+ if (get_user(version, &header->version))
+ return -EFAULT;
+
+ if (version != _LINUX_CAPABILITY_VERSION) {
+ if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ if (get_user(pid, &header->pid))
+ return -EFAULT;
+
+ if (pid && pid != current->pid && !capable(CAP_SETPCAP))
+ return -EPERM;
+
+ if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
+ copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
+ copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ return -EFAULT;
+
+ spin_lock(&task_capability_lock);
+ read_lock(&tasklist_lock);
+
+ if (pid > 0 && pid != current->pid) {
+ target = find_task_by_pid(pid);
+ if (!target) {
+ ret = -ESRCH;
+ goto out;
+ }
+ } else
+ target = current;
+
+ ret = 0;
+
+ /* having verified that the proposed changes are legal,
+ we now put them into effect. */
+ if (pid < 0) {
+ if (pid == -1) /* all procs other than current and init */
+ ret = cap_set_all(&effective, &inheritable, &permitted);
+
+ else /* all procs in process group */
+ ret = cap_set_pg(-pid, &effective, &inheritable,
+ &permitted);
+ } else {
+ ret = security_capset_check(target, &effective, &inheritable,
+ &permitted);
+ if (!ret)
+ security_capset_set(target, &effective, &inheritable,
+ &permitted);
+ }
out:
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
+ read_unlock(&tasklist_lock);
+ spin_unlock(&task_capability_lock);
- return ret;
+ return ret;
}
int __capable(struct task_struct *t, int cap)
@@ -244,7 +238,6 @@ int __capable(struct task_struct *t, int cap)
}
return 0;
}
-EXPORT_SYMBOL(__capable);
int capable(int cap)
{
diff --git a/kernel/compat.c b/kernel/compat.c
index 3bae3742c2a..42a1ed4b61b 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -40,62 +40,27 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
__put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
}
-static long compat_nanosleep_restart(struct restart_block *restart)
-{
- unsigned long expire = restart->arg0, now = jiffies;
- struct compat_timespec __user *rmtp;
-
- /* Did it expire while we handled signals? */
- if (!time_after(expire, now))
- return 0;
-
- expire = schedule_timeout_interruptible(expire - now);
- if (expire == 0)
- return 0;
-
- rmtp = (struct compat_timespec __user *)restart->arg1;
- if (rmtp) {
- struct compat_timespec ct;
- struct timespec t;
-
- jiffies_to_timespec(expire, &t);
- ct.tv_sec = t.tv_sec;
- ct.tv_nsec = t.tv_nsec;
- if (copy_to_user(rmtp, &ct, sizeof(ct)))
- return -EFAULT;
- }
- /* The 'restart' block is already filled in */
- return -ERESTART_RESTARTBLOCK;
-}
-
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp)
+ struct compat_timespec __user *rmtp)
{
- struct timespec t;
- struct restart_block *restart;
- unsigned long expire;
+ struct timespec tu, rmt;
+ long ret;
- if (get_compat_timespec(&t, rqtp))
+ if (get_compat_timespec(&tu, rqtp))
return -EFAULT;
- if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
+ if (!timespec_valid(&tu))
return -EINVAL;
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
- expire = schedule_timeout_interruptible(expire);
- if (expire == 0)
- return 0;
+ ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
+ CLOCK_MONOTONIC);
- if (rmtp) {
- jiffies_to_timespec(expire, &t);
- if (put_compat_timespec(&t, rmtp))
+ if (ret && rmtp) {
+ if (put_compat_timespec(&rmt, rmtp))
return -EFAULT;
}
- restart = &current_thread_info()->restart_block;
- restart->fn = compat_nanosleep_restart;
- restart->arg0 = jiffies + expire;
- restart->arg1 = (unsigned long) rmtp;
- return -ERESTART_RESTARTBLOCK;
+
+ return ret;
}
static inline long get_compat_itimerval(struct itimerval *o,
@@ -247,8 +212,8 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
int ret;
mm_segment_t old_fs = get_fs ();
- if (resource >= RLIM_NLIMITS)
- return -EINVAL;
+ if (resource >= RLIM_NLIMITS)
+ return -EINVAL;
if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
__get_user(r.rlim_cur, &rlim->rlim_cur) ||
@@ -477,21 +442,21 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
int get_compat_itimerspec(struct itimerspec *dst,
const struct compat_itimerspec __user *src)
-{
+{
if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
get_compat_timespec(&dst->it_value, &src->it_value))
return -EFAULT;
return 0;
-}
+}
int put_compat_itimerspec(struct compat_itimerspec __user *dst,
const struct itimerspec *src)
-{
+{
if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
put_compat_timespec(&src->it_value, &dst->it_value))
return -EFAULT;
return 0;
-}
+}
long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
@@ -512,9 +477,9 @@ long compat_sys_timer_create(clockid_t which_clock,
}
long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
+ struct compat_itimerspec __user *new,
struct compat_itimerspec __user *old)
-{
+{
long err;
mm_segment_t oldfs;
struct itimerspec newts, oldts;
@@ -522,58 +487,58 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
if (!new)
return -EINVAL;
if (get_compat_itimerspec(&newts, new))
- return -EFAULT;
+ return -EFAULT;
oldfs = get_fs();
set_fs(KERNEL_DS);
err = sys_timer_settime(timer_id, flags,
(struct itimerspec __user *) &newts,
(struct itimerspec __user *) &oldts);
- set_fs(oldfs);
+ set_fs(oldfs);
if (!err && old && put_compat_itimerspec(old, &oldts))
return -EFAULT;
return err;
-}
+}
long compat_sys_timer_gettime(timer_t timer_id,
struct compat_itimerspec __user *setting)
-{
+{
long err;
mm_segment_t oldfs;
- struct itimerspec ts;
+ struct itimerspec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
err = sys_timer_gettime(timer_id,
- (struct itimerspec __user *) &ts);
- set_fs(oldfs);
+ (struct itimerspec __user *) &ts);
+ set_fs(oldfs);
if (!err && put_compat_itimerspec(setting, &ts))
return -EFAULT;
return err;
-}
+}
long compat_sys_clock_settime(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
if (get_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
oldfs = get_fs();
- set_fs(KERNEL_DS);
+ set_fs(KERNEL_DS);
err = sys_clock_settime(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
return err;
-}
+}
long compat_sys_clock_gettime(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -581,16 +546,16 @@ long compat_sys_clock_gettime(clockid_t which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
if (!err && put_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
return err;
-}
+}
long compat_sys_clock_getres(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -598,9 +563,9 @@ long compat_sys_clock_getres(clockid_t which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
if (!err && tp && put_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
return err;
-}
+}
static long compat_clock_nanosleep_restart(struct restart_block *restart)
{
@@ -632,10 +597,10 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
{
long err;
mm_segment_t oldfs;
- struct timespec in, out;
+ struct timespec in, out;
struct restart_block *restart;
- if (get_compat_timespec(&in, rqtp))
+ if (get_compat_timespec(&in, rqtp))
return -EFAULT;
oldfs = get_fs();
@@ -654,8 +619,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
restart->fn = compat_clock_nanosleep_restart;
restart->arg1 = (unsigned long) rmtp;
}
- return err;
-}
+ return err;
+}
/*
* We currently only need the following fields from the sigevent
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 38033db8d8e..a21f71af9d8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -150,6 +150,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
+ nr_calls--;
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
@@ -233,6 +234,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
-1, &nr_calls);
if (ret == NOTIFY_BAD) {
+ nr_calls--;
printk("%s: attempt to bring up CPU %u failed\n",
__FUNCTION__, cpu);
ret = -EINVAL;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 57e6448b171..64950fa5d32 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -581,26 +581,28 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
/*
* Return in *pmask the portion of a cpusets's mems_allowed that
- * are online. If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online mems. If we get
- * all the way to the top and still haven't found any online mems,
- * return node_online_map.
+ * are online, with memory. If none are online with memory, walk
+ * up the cpuset hierarchy until we find one that does have some
+ * online mems. If we get all the way to the top and still haven't
+ * found any online mems, return node_states[N_HIGH_MEMORY].
*
* One way or another, we guarantee to return some non-empty subset
- * of node_online_map.
+ * of node_states[N_HIGH_MEMORY].
*
* Call with callback_mutex held.
*/
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
- while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
+ while (cs && !nodes_intersects(cs->mems_allowed,
+ node_states[N_HIGH_MEMORY]))
cs = cs->parent;
if (cs)
- nodes_and(*pmask, cs->mems_allowed, node_online_map);
+ nodes_and(*pmask, cs->mems_allowed,
+ node_states[N_HIGH_MEMORY]);
else
- *pmask = node_online_map;
- BUG_ON(!nodes_intersects(*pmask, node_online_map));
+ *pmask = node_states[N_HIGH_MEMORY];
+ BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
}
/**
@@ -753,68 +755,13 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
}
/*
- * For a given cpuset cur, partition the system as follows
- * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
- * exclusive child cpusets
- * b. All cpus in the current cpuset's cpus_allowed that are not part of any
- * exclusive child cpusets
- * Build these two partitions by calling partition_sched_domains
- *
- * Call with manage_mutex held. May nest a call to the
- * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
- * Must not be called holding callback_mutex, because we must
- * not call lock_cpu_hotplug() while holding callback_mutex.
- */
-
-static void update_cpu_domains(struct cpuset *cur)
-{
- struct cpuset *c, *par = cur->parent;
- cpumask_t pspan, cspan;
-
- if (par == NULL || cpus_empty(cur->cpus_allowed))
- return;
-
- /*
- * Get all cpus from parent's cpus_allowed not part of exclusive
- * children
- */
- pspan = par->cpus_allowed;
- list_for_each_entry(c, &par->children, sibling) {
- if (is_cpu_exclusive(c))
- cpus_andnot(pspan, pspan, c->cpus_allowed);
- }
- if (!is_cpu_exclusive(cur)) {
- cpus_or(pspan, pspan, cur->cpus_allowed);
- if (cpus_equal(pspan, cur->cpus_allowed))
- return;
- cspan = CPU_MASK_NONE;
- } else {
- if (cpus_empty(pspan))
- return;
- cspan = cur->cpus_allowed;
- /*
- * Get all cpus from current cpuset's cpus_allowed not part
- * of exclusive children
- */
- list_for_each_entry(c, &cur->children, sibling) {
- if (is_cpu_exclusive(c))
- cpus_andnot(cspan, cspan, c->cpus_allowed);
- }
- }
-
- lock_cpu_hotplug();
- partition_sched_domains(&pspan, &cspan);
- unlock_cpu_hotplug();
-}
-
-/*
* Call with manage_mutex held. May take callback_mutex during call.
*/
static int update_cpumask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
- int retval, cpus_unchanged;
+ int retval;
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
if (cs == &top_cpuset)
@@ -841,12 +788,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
retval = validate_change(cs, &trialcs);
if (retval < 0)
return retval;
- cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
mutex_lock(&callback_mutex);
cs->cpus_allowed = trialcs.cpus_allowed;
mutex_unlock(&callback_mutex);
- if (is_cpu_exclusive(cs) && !cpus_unchanged)
- update_cpu_domains(cs);
return 0;
}
@@ -924,7 +868,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
int fudge;
int retval;
- /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */
+ /*
+ * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
+ * it's read-only
+ */
if (cs == &top_cpuset)
return -EACCES;
@@ -941,8 +888,21 @@ static int update_nodemask(struct cpuset *cs, char *buf)
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
goto done;
+ if (!nodes_intersects(trialcs.mems_allowed,
+ node_states[N_HIGH_MEMORY])) {
+ /*
+ * error if only memoryless nodes specified.
+ */
+ retval = -ENOSPC;
+ goto done;
+ }
}
- nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
+ /*
+ * Exclude memoryless nodes. We know that trialcs.mems_allowed
+ * contains at least one node with memory.
+ */
+ nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
+ node_states[N_HIGH_MEMORY]);
oldmem = cs->mems_allowed;
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
@@ -1067,7 +1027,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
{
int turning_on;
struct cpuset trialcs;
- int err, cpu_exclusive_changed;
+ int err;
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
@@ -1080,14 +1040,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
err = validate_change(cs, &trialcs);
if (err < 0)
return err;
- cpu_exclusive_changed =
- (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
mutex_lock(&callback_mutex);
cs->flags = trialcs.flags;
mutex_unlock(&callback_mutex);
- if (cpu_exclusive_changed)
- update_cpu_domains(cs);
return 0;
}
@@ -1445,7 +1401,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
ssize_t retval = 0;
char *s;
- if (!(page = (char *)__get_free_page(GFP_KERNEL)))
+ if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
return -ENOMEM;
s = page;
@@ -1947,17 +1903,6 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
}
-/*
- * Locking note on the strange update_flag() call below:
- *
- * If the cpuset being removed is marked cpu_exclusive, then simulate
- * turning cpu_exclusive off, which will call update_cpu_domains().
- * The lock_cpu_hotplug() call in update_cpu_domains() must not be
- * made while holding callback_mutex. Elsewhere the kernel nests
- * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
- * nesting would risk an ABBA deadlock.
- */
-
static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
struct cpuset *cs = dentry->d_fsdata;
@@ -1977,13 +1922,6 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
mutex_unlock(&manage_mutex);
return -EBUSY;
}
- if (is_cpu_exclusive(cs)) {
- int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
- if (retval < 0) {
- mutex_unlock(&manage_mutex);
- return retval;
- }
- }
parent = cs->parent;
mutex_lock(&callback_mutex);
set_bit(CS_REMOVED, &cs->flags);
@@ -2098,8 +2036,9 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
- * cpu_online_map and node_online_map. Force the top cpuset to track
- * whats online after any CPU or memory node hotplug or unplug event.
+ * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
+ * track what's online after any CPU or memory node hotplug or unplug
+ * event.
*
* To ensure that we don't remove a CPU or node from the top cpuset
* that is currently in use by a child cpuset (which would violate
@@ -2119,7 +2058,7 @@ static void common_cpu_mem_hotplug_unplug(void)
guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
- top_cpuset.mems_allowed = node_online_map;
+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
mutex_unlock(&callback_mutex);
mutex_unlock(&manage_mutex);
@@ -2147,8 +2086,9 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Keep top_cpuset.mems_allowed tracking node_online_map.
- * Call this routine anytime after you change node_online_map.
+ * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
+ * Call this routine anytime after you change
+ * node_states[N_HIGH_MEMORY].
* See also the previous routine cpuset_handle_cpuhp().
*/
@@ -2167,7 +2107,7 @@ void cpuset_track_online_nodes(void)
void __init cpuset_init_smp(void)
{
top_cpuset.cpus_allowed = cpu_online_map;
- top_cpuset.mems_allowed = node_online_map;
+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_handle_cpuhp, 0);
}
@@ -2309,7 +2249,7 @@ void cpuset_init_current_mems_allowed(void)
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
- * subset of node_online_map, even if this means going outside the
+ * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
* tasks cpuset.
**/
@@ -2491,12 +2431,12 @@ int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
node = zone_to_nid(z);
if (node_isset(node, current->mems_allowed))
return 1;
- /*
- * Allow tasks that have access to memory reserves because they have
- * been OOM killed to get memory anywhere.
- */
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
- return 1;
+ /*
+ * Allow tasks that have access to memory reserves because they have
+ * been OOM killed to get memory anywhere.
+ */
+ if (unlikely(test_thread_flag(TIF_MEMDIE)))
+ return 1;
return 0;
}
@@ -2566,41 +2506,20 @@ int cpuset_mem_spread_node(void)
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**
- * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
- * @p: pointer to task_struct of some other task.
- *
- * Description: Return true if the nearest mem_exclusive ancestor
- * cpusets of tasks @p and current overlap. Used by oom killer to
- * determine if task @p's memory usage might impact the memory
- * available to the current task.
- *
- * Call while holding callback_mutex.
+ * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
+ * @tsk1: pointer to task_struct of some task.
+ * @tsk2: pointer to task_struct of some other task.
+ *
+ * Description: Return true if @tsk1's mems_allowed intersects the
+ * mems_allowed of @tsk2. Used by the OOM killer to determine if
+ * one of the task's memory usage might impact the memory available
+ * to the other.
**/
-int cpuset_excl_nodes_overlap(const struct task_struct *p)
+int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
+ const struct task_struct *tsk2)
{
- const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
- int overlap = 1; /* do cpusets overlap? */
-
- task_lock(current);
- if (current->flags & PF_EXITING) {
- task_unlock(current);
- goto done;
- }
- cs1 = nearest_exclusive_ancestor(current->cpuset);
- task_unlock(current);
-
- task_lock((struct task_struct *)p);
- if (p->flags & PF_EXITING) {
- task_unlock((struct task_struct *)p);
- goto done;
- }
- cs2 = nearest_exclusive_ancestor(p->cpuset);
- task_unlock((struct task_struct *)p);
-
- overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
-done:
- return overlap;
+ return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/*
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 09e9574eeb2..10e43fd8b72 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
tmp += timespec_to_ns(&ts);
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
+ tmp = (s64)d->cpu_scaled_run_real_total;
+ cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
+ tmp += timespec_to_ns(&ts);
+ d->cpu_scaled_run_real_total =
+ (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
+
/*
* No locking available for sched_info (and too expensive to add one)
* Mitigate by taking snapshot of values
diff --git a/kernel/dma.c b/kernel/dma.c
index 937b13ca33b..6a82bb716da 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -20,7 +20,7 @@
#include <asm/dma.h>
#include <asm/system.h>
-
+
/* A note on resource allocation:
*
@@ -95,7 +95,7 @@ void free_dma(unsigned int dmanr)
if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
return;
- }
+ }
} /* free_dma */
@@ -121,8 +121,8 @@ static int proc_dma_show(struct seq_file *m, void *v)
for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
if (dma_chan_busy[i].lock) {
- seq_printf(m, "%2d: %s\n", i,
- dma_chan_busy[i].device_id);
+ seq_printf(m, "%2d: %s\n", i,
+ dma_chan_busy[i].device_id);
}
}
return 0;
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 3c2eaea66b1..a9e6bad9f70 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -57,7 +57,7 @@ lookup_exec_domain(u_long personality)
{
struct exec_domain * ep;
u_long pers = personality(personality);
-
+
read_lock(&exec_domains_lock);
for (ep = exec_domains; ep; ep = ep->next) {
if (pers >= ep->pers_low && pers <= ep->pers_high)
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f7959de4a8..2c704c86edb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -44,7 +44,6 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -93,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk)
* If there is any task waiting for the group exit
* then notify it:
*/
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
+
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
@@ -593,17 +591,6 @@ static void exit_mm(struct task_struct * tsk)
mmput(mm);
}
-static inline void
-choose_new_parent(struct task_struct *p, struct task_struct *reaper)
-{
- /*
- * Make sure we're not reparenting to ourselves and that
- * the parent is not a zombie.
- */
- BUG_ON(p == reaper || reaper->exit_state);
- p->real_parent = reaper;
-}
-
static void
reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
@@ -711,7 +698,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper);
+ p->real_parent = reaper;
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
@@ -732,7 +719,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
p = list_entry(_p, struct task_struct, ptrace_list);
- choose_new_parent(p, reaper);
+ p->real_parent = reaper;
reparent_thread(p, father, 1);
}
}
@@ -759,13 +746,11 @@ static void exit_notify(struct task_struct *tsk)
* Now we'll wake all the threads in the group just to make
* sure someone gets all the pending signals.
*/
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING))
recalc_sigpending_and_wake(t);
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
}
write_lock_irq(&tasklist_lock);
@@ -793,9 +778,8 @@ static void exit_notify(struct task_struct *tsk)
* and we were the only connection outside, so our pgrp
* is about to become orphaned.
*/
-
t = tsk->real_parent;
-
+
pgrp = task_pgrp(tsk);
if ((task_pgrp(t) != pgrp) &&
(task_session(t) == task_session(tsk)) &&
@@ -842,6 +826,11 @@ static void exit_notify(struct task_struct *tsk)
state = EXIT_DEAD;
tsk->exit_state = state;
+ if (thread_group_leader(tsk) &&
+ tsk->signal->notify_count < 0 &&
+ tsk->signal->group_exit_task)
+ wake_up_process(tsk->signal->group_exit_task);
+
write_unlock_irq(&tasklist_lock);
list_for_each_safe(_p, _n, &ptrace_dead) {
@@ -883,6 +872,14 @@ static void check_stack_usage(void)
static inline void check_stack_usage(void) {}
#endif
+static inline void exit_child_reaper(struct task_struct *tsk)
+{
+ if (likely(tsk->group_leader != child_reaper(tsk)))
+ return;
+
+ panic("Attempted to kill init!");
+}
+
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
@@ -896,13 +893,6 @@ fastcall NORET_TYPE void do_exit(long code)
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk == child_reaper(tsk))) {
- if (tsk->nsproxy->pid_ns != &init_pid_ns)
- tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
- else
- panic("Attempted to kill init!");
- }
-
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
@@ -932,13 +922,13 @@ fastcall NORET_TYPE void do_exit(long code)
schedule();
}
+ tsk->flags |= PF_EXITING;
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
- spin_lock_irq(&tsk->pi_lock);
- tsk->flags |= PF_EXITING;
- spin_unlock_irq(&tsk->pi_lock);
+ smp_mb();
+ spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -952,16 +942,19 @@ fastcall NORET_TYPE void do_exit(long code)
}
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+ exit_child_reaper(tsk);
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
}
acct_collect(code, group_dead);
+#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list))
exit_robust_list(tsk);
-#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list))
compat_exit_robust_list(tsk);
#endif
+#endif
if (group_dead)
tty_audit_exit();
if (unlikely(tsk->audit_context))
@@ -996,6 +989,7 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+#ifdef CONFIG_FUTEX
/*
* This must happen late, after the PID is not
* hashed anymore:
@@ -1004,6 +998,7 @@ fastcall NORET_TYPE void do_exit(long code)
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
+#endif
/*
* Make sure we are holding no locks:
*/
@@ -1168,8 +1163,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
int __user *stat_addr, struct rusage __user *ru)
{
unsigned long state;
- int retval;
- int status;
+ int retval, status, traced;
if (unlikely(noreap)) {
pid_t pid = p->pid;
@@ -1203,15 +1197,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
BUG_ON(state != EXIT_DEAD);
return 0;
}
- if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
- /*
- * This can only happen in a race with a ptraced thread
- * dying on another processor.
- */
- return 0;
- }
- if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ /* traced means p->ptrace, but not vice versa */
+ traced = (p->real_parent != p->parent);
+
+ if (likely(!traced)) {
struct signal_struct *psig;
struct signal_struct *sig;
@@ -1298,35 +1288,30 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
retval = put_user(p->pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
- if (retval) {
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- return retval;
- }
- retval = p->pid;
- if (p->real_parent != p->parent) {
+ if (!retval)
+ retval = p->pid;
+
+ if (traced) {
write_lock_irq(&tasklist_lock);
- /* Double-check with lock held. */
- if (p->real_parent != p->parent) {
- __ptrace_unlink(p);
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- /*
- * If this is not a detached task, notify the parent.
- * If it's still not detached after that, don't release
- * it now.
- */
+ /* We dropped tasklist, ptracer could die and untrace */
+ ptrace_unlink(p);
+ /*
+ * If this is not a detached task, notify the parent.
+ * If it's still not detached after that, don't release
+ * it now.
+ */
+ if (p->exit_signal != -1) {
+ do_notify_parent(p, p->exit_signal);
if (p->exit_signal != -1) {
- do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1)
- p = NULL;
+ p->exit_state = EXIT_ZOMBIE;
+ p = NULL;
}
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
- BUG_ON(!retval);
+
return retval;
}
@@ -1345,7 +1330,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
- p->signal && p->signal->group_stop_count > 0)
+ p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
@@ -1459,9 +1444,6 @@ static int wait_task_continued(struct task_struct *p, int noreap,
pid_t pid;
uid_t uid;
- if (unlikely(!p->signal))
- return 0;
-
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 3fc3c138391..2ce28f165e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
+ prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ int err;
prepare_to_copy(orig);
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->stack = ti;
+
+ err = prop_local_init_single(&tsk->dirties);
+ if (err) {
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
+ }
+
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -258,7 +268,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
-
+
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_mmap_lock);
tmp->vm_truncate_count = mpnt->vm_truncate_count;
@@ -321,7 +331,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -728,8 +738,8 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
/* compute the remainder to be cleared */
size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
- /* This is long word aligned thus could use a optimized version */
- memset(new_fds, 0, size);
+ /* This is long word aligned thus could use a optimized version */
+ memset(new_fds, 0, size);
if (new_fdt->max_fds > open_files) {
int left = (new_fdt->max_fds-open_files)/8;
@@ -932,6 +942,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
p->flags = new_flags;
+ clear_freeze_flag(p);
}
asmlinkage long sys_set_tid_address(int __user *tidptr)
@@ -1048,6 +1059,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
+ p->utimescaled = cputime_zero;
+ p->stimescaled = cputime_zero;
#ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */
@@ -1058,20 +1071,21 @@ static struct task_struct *copy_process(unsigned long clone_flags,
task_io_accounting_init(p);
acct_clear_integrals(p);
- p->it_virt_expires = cputime_zero;
+ p->it_virt_expires = cputime_zero;
p->it_prof_expires = cputime_zero;
- p->it_sched_expires = 0;
- INIT_LIST_HEAD(&p->cpu_timers[0]);
- INIT_LIST_HEAD(&p->cpu_timers[1]);
- INIT_LIST_HEAD(&p->cpu_timers[2]);
+ p->it_sched_expires = 0;
+ INIT_LIST_HEAD(&p->cpu_timers[0]);
+ INIT_LIST_HEAD(&p->cpu_timers[1]);
+ INIT_LIST_HEAD(&p->cpu_timers[2]);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
+#ifdef CONFIG_SECURITY
p->security = NULL;
+#endif
p->io_context = NULL;
- p->io_wait = NULL;
p->audit_context = NULL;
cpuset_fork(p);
#ifdef CONFIG_NUMA
@@ -1146,13 +1160,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
-
+#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1226,7 +1241,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
- recalc_sigpending();
+ recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
@@ -1435,8 +1450,7 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-static void sighand_ctor(void *data, struct kmem_cache *cachep,
- unsigned long flags)
+static void sighand_ctor(struct kmem_cache *cachep, void *data)
{
struct sighand_struct *sighand = data;
diff --git a/kernel/futex.c b/kernel/futex.c
index fcc94e7b408..e45a65e4168 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -52,6 +52,7 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/module.h>
+#include <linux/magic.h>
#include <asm/futex.h>
#include "rtmutex_common.h"
@@ -292,7 +293,7 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs);
*/
void drop_futex_key_refs(union futex_key *key)
{
- if (key->both.ptr == 0)
+ if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
@@ -1045,7 +1046,7 @@ static int unqueue_me(struct futex_q *q)
retry:
lock_ptr = q->lock_ptr;
barrier();
- if (lock_ptr != 0) {
+ if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
@@ -2080,7 +2081,7 @@ static int futexfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
+ return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
}
static struct file_system_type futex_fs_type = {
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index dc8a4451d79..b2b2c2b0a49 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1286,8 +1286,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
{
struct hrtimer_sleeper t;
- struct timespec __user *rmtp;
- struct timespec tu;
+ struct timespec *rmtp;
ktime_t time;
restart->fn = do_no_restart_syscall;
@@ -1298,14 +1297,12 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
return 0;
- rmtp = (struct timespec __user *) restart->arg1;
+ rmtp = (struct timespec *)restart->arg1;
if (rmtp) {
time = ktime_sub(t.timer.expires, t.timer.base->get_time());
if (time.tv64 <= 0)
return 0;
- tu = ktime_to_timespec(time);
- if (copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ *rmtp = ktime_to_timespec(time);
}
restart->fn = hrtimer_nanosleep_restart;
@@ -1314,12 +1311,11 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
return -ERESTART_RESTARTBLOCK;
}
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
- struct timespec tu;
ktime_t rem;
hrtimer_init(&t.timer, clockid, mode);
@@ -1335,9 +1331,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
if (rem.tv64 <= 0)
return 0;
- tu = ktime_to_timespec(rem);
- if (copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ *rmtp = ktime_to_timespec(rem);
}
restart = &current_thread_info()->restart_block;
@@ -1353,7 +1347,8 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
- struct timespec tu;
+ struct timespec tu, rmt;
+ int ret;
if (copy_from_user(&tu, rqtp, sizeof(tu)))
return -EFAULT;
@@ -1361,7 +1356,15 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
if (!timespec_valid(&tu))
return -EINVAL;
- return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
+ CLOCK_MONOTONIC);
+
+ if (ret && rmtp) {
+ if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
+ return -EFAULT;
+ }
+
+ return ret;
}
/*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f1a73f0b54e..9b5dff6b3f6 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -503,7 +503,6 @@ out_unlock:
spin_unlock(&desc->lock);
}
-#ifdef CONFIG_SMP
/**
* handle_percpu_IRQ - Per CPU local irq handler
* @irq: the interrupt number
@@ -529,8 +528,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
desc->chip->eoi(irq);
}
-#endif /* CONFIG_SMP */
-
void
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7230d914eaa..80eab7a0420 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -405,7 +405,6 @@ void free_irq(unsigned int irq, void *dev_id)
struct irq_desc *desc;
struct irqaction **p;
unsigned long flags;
- irqreturn_t (*handler)(int, void *) = NULL;
WARN_ON(in_interrupt());
if (irq >= NR_IRQS)
@@ -445,8 +444,21 @@ void free_irq(unsigned int irq, void *dev_id)
/* Make sure it's not being used on another CPU */
synchronize_irq(irq);
- if (action->flags & IRQF_SHARED)
- handler = action->handler;
+#ifdef CONFIG_DEBUG_SHIRQ
+ /*
+ * It's a shared IRQ -- the driver ought to be
+ * prepared for it to happen even now it's
+ * being freed, so let's make sure.... We do
+ * this after actually deregistering it, to
+ * make sure that a 'real' IRQ doesn't run in
+ * parallel with our fake
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
+ }
+#endif
kfree(action);
return;
}
@@ -454,19 +466,6 @@ void free_irq(unsigned int irq, void *dev_id)
spin_unlock_irqrestore(&desc->lock, flags);
return;
}
-#ifdef CONFIG_DEBUG_SHIRQ
- if (handler) {
- /*
- * It's a shared IRQ -- the driver ought to be prepared for it
- * to happen even now it's being freed, so let's make sure....
- * We do this after actually deregistering it, to make sure that
- * a 'real' IRQ doesn't run in parallel with our fake
- */
- local_irq_save(flags);
- handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
}
EXPORT_SYMBOL(free_irq);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 3205e8e114f..2fab344dbf5 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -130,7 +130,7 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
enum hrtimer_restart it_real_fn(struct hrtimer *timer)
{
struct signal_struct *sig =
- container_of(timer, struct signal_struct, real_timer);
+ container_of(timer, struct signal_struct, real_timer);
send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
@@ -291,6 +291,6 @@ asmlinkage long sys_setitimer(int which,
return error;
if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 25db14b89e8..e9f1b4ea504 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -17,21 +17,30 @@
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
-#include <linux/syscalls.h>
#include <linux/ioport.h>
#include <linux/hardirq.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
+#include <linux/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/semaphore.h>
+#include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes;
+/* vmcoreinfo stuff */
+unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+size_t vmcoreinfo_size;
+size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -776,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image,
size_t uchunk, mchunk;
page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
@@ -835,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image,
size_t uchunk, mchunk;
page = pfn_to_page(maddr >> PAGE_SHIFT);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
@@ -1061,6 +1070,7 @@ void crash_kexec(struct pt_regs *regs)
if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
@@ -1135,3 +1145,104 @@ static int __init crash_notes_memory_init(void)
return 0;
}
module_init(crash_notes_memory_init)
+
+void crash_save_vmcoreinfo(void)
+{
+ u32 *buf;
+
+ if (!vmcoreinfo_size)
+ return;
+
+ vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+
+ buf = (u32 *)vmcoreinfo_note;
+
+ buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
+ vmcoreinfo_size);
+
+ final_note(buf);
+}
+
+void vmcoreinfo_append_str(const char *fmt, ...)
+{
+ va_list args;
+ char buf[0x50];
+ int r;
+
+ va_start(args, fmt);
+ r = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (r + vmcoreinfo_size > vmcoreinfo_max_size)
+ r = vmcoreinfo_max_size - vmcoreinfo_size;
+
+ memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
+
+ vmcoreinfo_size += r;
+}
+
+/*
+ * provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
+{}
+
+unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
+{
+ return __pa((unsigned long)(char *)&vmcoreinfo_note);
+}
+
+static int __init crash_save_vmcoreinfo_init(void)
+{
+ vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release);
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE);
+
+ VMCOREINFO_SYMBOL(init_uts_ns);
+ VMCOREINFO_SYMBOL(node_online_map);
+ VMCOREINFO_SYMBOL(swapper_pg_dir);
+ VMCOREINFO_SYMBOL(_stext);
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ VMCOREINFO_SYMBOL(mem_map);
+ VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#ifdef CONFIG_SPARSEMEM
+ VMCOREINFO_SYMBOL(mem_section);
+ VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ VMCOREINFO_SIZE(mem_section);
+ VMCOREINFO_OFFSET(mem_section, section_mem_map);
+#endif
+ VMCOREINFO_SIZE(page);
+ VMCOREINFO_SIZE(pglist_data);
+ VMCOREINFO_SIZE(zone);
+ VMCOREINFO_SIZE(free_area);
+ VMCOREINFO_SIZE(list_head);
+ VMCOREINFO_TYPEDEF_SIZE(nodemask_t);
+ VMCOREINFO_OFFSET(page, flags);
+ VMCOREINFO_OFFSET(page, _count);
+ VMCOREINFO_OFFSET(page, mapping);
+ VMCOREINFO_OFFSET(page, lru);
+ VMCOREINFO_OFFSET(pglist_data, node_zones);
+ VMCOREINFO_OFFSET(pglist_data, nr_zones);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+ VMCOREINFO_OFFSET(pglist_data, node_mem_map);
+#endif
+ VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
+ VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
+ VMCOREINFO_OFFSET(pglist_data, node_id);
+ VMCOREINFO_OFFSET(zone, free_area);
+ VMCOREINFO_OFFSET(zone, vm_stat);
+ VMCOREINFO_OFFSET(zone, spanned_pages);
+ VMCOREINFO_OFFSET(free_area, free_list);
+ VMCOREINFO_OFFSET(list_head, next);
+ VMCOREINFO_OFFSET(list_head, prev);
+ VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_NUMBER(NR_FREE_PAGES);
+
+ arch_crash_save_vmcoreinfo();
+
+ return 0;
+}
+
+module_init(crash_save_vmcoreinfo_init)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4b8a4493c54..e3a5d817ac9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -64,7 +64,6 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-static atomic_t kprobe_count;
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct notifier_block kprobe_page_fault_nb = {
- .notifier_call = kprobe_exceptions_notify,
- .priority = 0x7fffffff /* we need to notified first */
-};
-
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
- if (!ret)
- atomic_inc(&kprobe_count);
goto out;
}
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled) {
- if (atomic_add_return(1, &kprobe_count) == \
- (ARCH_INACTIVE_KPROBE_COUNT + 1))
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
+ if (kprobe_enabled)
arch_arm_kprobe(p);
- }
+
out:
mutex_unlock(&kprobe_mutex);
@@ -658,16 +646,6 @@ valid_p:
}
mutex_unlock(&kprobe_mutex);
}
-
- /* Call unregister_page_fault_notifier()
- * if no probes are active
- */
- mutex_lock(&kprobe_mutex);
- if (atomic_add_return(-1, &kprobe_count) == \
- ARCH_INACTIVE_KPROBE_COUNT)
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
- mutex_unlock(&kprobe_mutex);
- return;
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -738,6 +716,18 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
int ret = 0;
struct kretprobe_instance *inst;
int i;
+ void *addr = rp->kp.addr;
+
+ if (kretprobe_blacklist_size) {
+ if (addr == NULL)
+ kprobe_lookup_name(rp->kp.symbol_name, addr);
+ addr += rp->kp.offset;
+
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ if (kretprobe_blacklist[i].addr == addr)
+ return -EINVAL;
+ }
+ }
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
@@ -815,7 +805,17 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
- atomic_set(&kprobe_count, 0);
+
+ if (kretprobe_blacklist_size) {
+ /* lookup the function address from its name */
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ kprobe_lookup_name(kretprobe_blacklist[i].name,
+ kretprobe_blacklist[i].addr);
+ if (!kretprobe_blacklist[i].addr)
+ printk("kretprobe: lookup failed: %s\n",
+ kretprobe_blacklist[i].name);
+ }
+ }
/* By default, kprobes are enabled */
kprobe_enabled = true;
@@ -921,13 +921,6 @@ static void __kprobes enable_all_kprobes(void)
if (kprobe_enabled)
goto already_enabled;
- /*
- * Re-register the page fault notifier only if there are any
- * active probes at the time of enabling kprobes globally
- */
- if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
@@ -968,10 +961,7 @@ static void __kprobes disable_all_kprobes(void)
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
-
- mutex_lock(&kprobe_mutex);
- /* Unconditionally unregister the page_fault notifier */
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
+ return;
already_disabled:
mutex_unlock(&kprobe_mutex);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 6046939d080..65daa5373ca 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -61,6 +61,15 @@ static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
return sprintf(page, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
+
+static ssize_t vmcoreinfo_show(struct kset *kset, char *page)
+{
+ return sprintf(page, "%lx %x\n",
+ paddr_vmcoreinfo_note(),
+ (unsigned int)vmcoreinfo_max_size);
+}
+KERNEL_ATTR_RO(vmcoreinfo);
+
#endif /* CONFIG_KEXEC */
/*
@@ -96,6 +105,7 @@ static struct attribute * kernel_attrs[] = {
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
+ &vmcoreinfo_attr.attr,
#endif
NULL
};
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 734da579ad1..a6f1ee9c92d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1521,7 +1521,7 @@ cache_hit:
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
- struct held_lock *hlock, int chain_head)
+ struct held_lock *hlock, int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
- lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
+ lookup_chain_cache(chain_key, hlock->class)) {
/*
* Check whether last held lock:
*
@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
#else
static inline int validate_chain(struct task_struct *curr,
struct lockdep_map *lock, struct held_lock *hlock,
- int chain_head)
+ int chain_head, u64 chain_key)
{
return 1;
}
@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, id);
- curr->curr_chain_key = chain_key;
- if (!validate_chain(curr, lock, hlock, chain_head))
+ if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0;
+ curr->curr_chain_key = chain_key;
curr->lockdep_depth++;
check_chain_key(curr);
#ifdef CONFIG_DEBUG_LOCKDEP
@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
+
+void lockdep_sys_exit(void)
+{
+ struct task_struct *curr = current;
+
+ if (unlikely(curr->lockdep_depth)) {
+ if (!debug_locks_off())
+ return;
+ printk("\n================================================\n");
+ printk( "[ BUG: lock held when returning to user space! ]\n");
+ printk( "------------------------------------------------\n");
+ printk("%s/%d is leaving the kernel with locks still held!\n",
+ curr->comm, curr->pid);
+ lockdep_print_held_locks(curr);
+ }
+}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index c851b2dcc68..8a135bd163c 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,28 +25,38 @@
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_class *class = v;
+ struct lock_class *class;
(*pos)++;
- if (class->lock_entry.next != &all_lock_classes)
- class = list_entry(class->lock_entry.next, struct lock_class,
- lock_entry);
- else
- class = NULL;
- m->private = class;
+ if (v == SEQ_START_TOKEN)
+ class = m->private;
+ else {
+ class = v;
+
+ if (class->lock_entry.next != &all_lock_classes)
+ class = list_entry(class->lock_entry.next,
+ struct lock_class, lock_entry);
+ else
+ class = NULL;
+ }
return class;
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
- struct lock_class *class = m->private;
+ struct lock_class *class;
+ loff_t i = 0;
- if (&class->lock_entry == all_lock_classes.next)
- seq_printf(m, "all lock classes:\n");
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return class;
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
+ if (++i == *pos)
+ return class;
+ }
+ return NULL;
}
static void l_stop(struct seq_file *m, void *v)
@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
unsigned long nr_forward_deps, nr_backward_deps;
- struct lock_class *class = m->private;
+ struct lock_class *class = v;
struct lock_list *entry;
char c1, c2, c3, c4;
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(m, "all lock classes:\n");
+ return 0;
+ }
+
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
- if (data->iter == data->stats)
- seq_header(m);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- if (data->iter == data->iter_end)
+ data->iter = data->stats + *pos;
+ if (data->iter >= data->iter_end)
data->iter = NULL;
return data->iter;
@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++;
- data->iter = v;
- data->iter++;
+ if (v == SEQ_START_TOKEN)
+ data->iter = data->stats;
+ else {
+ data->iter = v;
+ data->iter++;
+ }
+
if (data->iter == data->iter_end)
data->iter = NULL;
@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v)
static int ls_show(struct seq_file *m, void *v)
{
- struct lock_stat_seq *data = m->private;
+ if (v == SEQ_START_TOKEN)
+ seq_header(m);
+ else
+ seq_stats(m, v);
- seq_stats(m, data->iter);
return 0;
}
diff --git a/kernel/module.c b/kernel/module.c
index db0ead0363e..7734595bd32 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/sysfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -104,7 +105,7 @@ void __module_put_and_exit(struct module *mod, long code)
do_exit(code);
}
EXPORT_SYMBOL(__module_put_and_exit);
-
+
/* Find a module section: 0 means not found. */
static unsigned int find_sec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -178,7 +179,7 @@ static unsigned long __find_symbol(const char *name,
struct module *mod;
const struct kernel_symbol *ks;
- /* Core kernel first. */
+ /* Core kernel first. */
*owner = NULL;
ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
if (ks) {
@@ -230,7 +231,7 @@ static unsigned long __find_symbol(const char *name,
return ks->value;
}
- /* Now try modules. */
+ /* Now try modules. */
list_for_each_entry(mod, &modules, list) {
*owner = mod;
ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
@@ -284,7 +285,7 @@ static unsigned long __find_symbol(const char *name,
}
}
DEBUGP("Failed to find symbol %s\n", name);
- return 0;
+ return 0;
}
/* Search for module by name: must hold module_mutex. */
@@ -440,7 +441,7 @@ static int percpu_modinit(void)
}
return 0;
-}
+}
__initcall(percpu_modinit);
#else /* ... !CONFIG_SMP */
static inline void *percpu_modalloc(unsigned long size, unsigned long align,
@@ -482,8 +483,8 @@ static int modinfo_##field##_exists(struct module *mod) \
} \
static void free_modinfo_##field(struct module *mod) \
{ \
- kfree(mod->field); \
- mod->field = NULL; \
+ kfree(mod->field); \
+ mod->field = NULL; \
} \
static struct module_attribute modinfo_##field = { \
.attr = { .name = __stringify(field), .mode = 0444 }, \
@@ -692,8 +693,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
}
/* If it has an init func, it must have an exit func to unload */
- if ((mod->init != NULL && mod->exit == NULL)
- || mod->unsafe) {
+ if (mod->init && !mod->exit) {
forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
@@ -741,11 +741,6 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
seq_printf(m, "%s,", use->module_which_uses->name);
}
- if (mod->unsafe) {
- printed_something = 1;
- seq_printf(m, "[unsafe],");
- }
-
if (mod->init != NULL && mod->exit == NULL) {
printed_something = 1;
seq_printf(m, "[permanent],");
@@ -995,7 +990,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
struct attribute **gattr;
-
+
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
if (sechdrs[i].sh_flags & SHF_ALLOC)
@@ -1053,6 +1048,100 @@ static void remove_sect_attrs(struct module *mod)
}
}
+/*
+ * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
+ */
+
+struct module_notes_attrs {
+ struct kobject *dir;
+ unsigned int notes;
+ struct bin_attribute attrs[0];
+};
+
+static ssize_t module_notes_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ /*
+ * The caller checked the pos and count against our size.
+ */
+ memcpy(buf, bin_attr->private + pos, count);
+ return count;
+}
+
+static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
+ unsigned int i)
+{
+ if (notes_attrs->dir) {
+ while (i-- > 0)
+ sysfs_remove_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]);
+ kobject_del(notes_attrs->dir);
+ }
+ kfree(notes_attrs);
+}
+
+static void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *secstrings, Elf_Shdr *sechdrs)
+{
+ unsigned int notes, loaded, i;
+ struct module_notes_attrs *notes_attrs;
+ struct bin_attribute *nattr;
+
+ /* Count notes sections and allocate structures. */
+ notes = 0;
+ for (i = 0; i < nsect; i++)
+ if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
+ (sechdrs[i].sh_type == SHT_NOTE))
+ ++notes;
+
+ if (notes == 0)
+ return;
+
+ notes_attrs = kzalloc(sizeof(*notes_attrs)
+ + notes * sizeof(notes_attrs->attrs[0]),
+ GFP_KERNEL);
+ if (notes_attrs == NULL)
+ return;
+
+ notes_attrs->notes = notes;
+ nattr = &notes_attrs->attrs[0];
+ for (loaded = i = 0; i < nsect; ++i) {
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+ if (sechdrs[i].sh_type == SHT_NOTE) {
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+ nattr->attr.mode = S_IRUGO;
+ nattr->size = sechdrs[i].sh_size;
+ nattr->private = (void *) sechdrs[i].sh_addr;
+ nattr->read = module_notes_read;
+ ++nattr;
+ }
+ ++loaded;
+ }
+
+ notes_attrs->dir = kobject_add_dir(&mod->mkobj.kobj, "notes");
+ if (!notes_attrs->dir)
+ goto out;
+
+ for (i = 0; i < notes; ++i)
+ if (sysfs_create_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]))
+ goto out;
+
+ mod->notes_attrs = notes_attrs;
+ return;
+
+ out:
+ free_notes_attrs(notes_attrs, i);
+}
+
+static void remove_notes_attrs(struct module *mod)
+{
+ if (mod->notes_attrs)
+ free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
+}
+
#else
static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
@@ -1063,6 +1152,15 @@ static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
static inline void remove_sect_attrs(struct module *mod)
{
}
+
+static inline void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *sectstrings, Elf_Shdr *sechdrs)
+{
+}
+
+static inline void remove_notes_attrs(struct module *mod)
+{
+}
#endif /* CONFIG_KALLSYMS */
#ifdef CONFIG_SYSFS
@@ -1197,6 +1295,7 @@ static void free_module(struct module *mod)
{
/* Delete from various lists */
stop_machine_run(__unlink_module, mod, NR_CPUS);
+ remove_notes_attrs(mod);
remove_sect_attrs(mod);
mod_kobject_remove(mod);
@@ -1249,14 +1348,14 @@ static int verify_export_symbols(struct module *mod)
const unsigned long *crc;
for (i = 0; i < mod->num_syms; i++)
- if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
+ if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
name = mod->syms[i].name;
ret = -ENOEXEC;
goto dup;
}
for (i = 0; i < mod->num_gpl_syms; i++)
- if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
+ if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
name = mod->gpl_syms[i].name;
ret = -ENOEXEC;
goto dup;
@@ -1782,7 +1881,8 @@ static struct module *load_module(void __user *umod,
module_unload_init(mod);
/* Initialize kobject, so we can reference it. */
- if (mod_sysfs_init(mod) != 0)
+ err = mod_sysfs_init(mod);
+ if (err)
goto cleanup;
/* Set up license info based on the info section */
@@ -1829,7 +1929,7 @@ static struct module *load_module(void __user *umod,
mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr;
#ifdef CONFIG_MODVERSIONS
- if ((mod->num_syms && !crcindex) ||
+ if ((mod->num_syms && !crcindex) ||
(mod->num_gpl_syms && !gplcrcindex) ||
(mod->num_gpl_future_syms && !gplfuturecrcindex) ||
(mod->num_unused_syms && !unusedcrcindex) ||
@@ -1916,7 +2016,7 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto arch_cleanup;
- err = mod_sysfs_setup(mod,
+ err = mod_sysfs_setup(mod,
(struct kernel_param *)
sechdrs[setupindex].sh_addr,
sechdrs[setupindex].sh_size
@@ -1924,11 +2024,12 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto arch_cleanup;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
+ add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
/* Size of section 0 is 0, so this works well if no unwind info. */
mod->unwind_info = unwind_add_table(mod,
- (void *)sechdrs[unwindex].sh_addr,
- sechdrs[unwindex].sh_size);
+ (void *)sechdrs[unwindex].sh_addr,
+ sechdrs[unwindex].sh_size);
/* Get rid of temporary copy */
vfree(hdr);
@@ -2011,15 +2112,10 @@ sys_init_module(void __user *umod,
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
- if (mod->unsafe)
- printk(KERN_ERR "%s: module is now stuck!\n",
- mod->name);
- else {
- module_put(mod);
- mutex_lock(&module_mutex);
- free_module(mod);
- mutex_unlock(&module_mutex);
- }
+ module_put(mod);
+ mutex_lock(&module_mutex);
+ free_module(mod);
+ mutex_unlock(&module_mutex);
return ret;
}
@@ -2050,7 +2146,7 @@ static inline int within(unsigned long addr, void *start, unsigned long size)
*/
static inline int is_arm_mapping_symbol(const char *str)
{
- return str[0] == '$' && strchr("atd", str[1])
+ return str[0] == '$' && strchr("atd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
@@ -2065,11 +2161,11 @@ static const char *get_ksymbol(struct module *mod,
/* At worse, next value is at end of module */
if (within(addr, mod->module_init, mod->init_size))
nextval = (unsigned long)mod->module_init+mod->init_text_size;
- else
+ else
nextval = (unsigned long)mod->module_core+mod->core_text_size;
/* Scan for closest preceeding symbol, and next symbol. (ELF
- starts real symbols at 1). */
+ starts real symbols at 1). */
for (i = 1; i < mod->num_symtab; i++) {
if (mod->symtab[i].st_shndx == SHN_UNDEF)
continue;
@@ -2311,7 +2407,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
list_for_each_entry(mod, &modules, list) {
if (mod->num_exentries == 0)
continue;
-
+
e = search_extable(mod->extable,
mod->extable + mod->num_exentries - 1,
addr);
@@ -2321,7 +2417,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
preempt_enable();
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 691b86564dd..d7fe50cc556 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
EXPORT_SYMBOL(__mutex_init);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_lock);
+#endif
static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count);
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock);
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ unsigned long ip)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (old_val == 1)
goto done;
- lock_contended(&lock->dep_map, _RET_IP_);
+ lock_contended(&lock->dep_map, ip);
for (;;) {
/*
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
- mutex_release(&lock->dep_map, 1, _RET_IP_);
+ mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@ -197,20 +200,12 @@ done:
return 0;
}
-static void fastcall noinline __sched
-__mutex_lock_slowpath(atomic_t *lock_count)
-{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
-
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
-}
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -219,7 +214,7 @@ int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
__mutex_unlock_common_slowpath(lock_count, 1);
}
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
+}
+
static int fastcall noinline __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
}
+#endif
/*
* Spinlock based trylock, we take the spinlock and check whether we
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index f1decd21a53..049e7c0ac56 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -203,8 +203,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
static int __init nsproxy_cache_init(void)
{
- nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy),
- 0, SLAB_PANIC, NULL);
+ nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC);
return 0;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index f64f4c1ac11..3886bd8230f 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -56,14 +56,14 @@ EXPORT_SYMBOL(panic_blink);
*
* This function never returns.
*/
-
+
NORET_TYPE void panic(const char * fmt, ...)
{
long i;
static char buf[1024];
va_list args;
#if defined(CONFIG_S390)
- unsigned long caller = (unsigned long) __builtin_return_address(0);
+ unsigned long caller = (unsigned long) __builtin_return_address(0);
#endif
/*
@@ -128,7 +128,7 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#endif
#if defined(CONFIG_S390)
- disabled_wait(caller);
+ disabled_wait(caller);
#endif
local_irq_enable();
for (i = 0;;) {
@@ -154,7 +154,7 @@ EXPORT_SYMBOL(panic);
*
* The string is overwritten by the next call to print_taint().
*/
-
+
const char *print_tainted(void)
{
static char buf[20];
@@ -164,7 +164,7 @@ const char *print_tainted(void)
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
- tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
+ tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
tainted & TAINT_BAD_PAGE ? 'B' : ' ',
tainted & TAINT_USER ? 'U' : ' ',
tainted & TAINT_DIE ? 'D' : ' ');
diff --git a/kernel/params.c b/kernel/params.c
index 4e57732fcfb..16f269e9ddc 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -252,8 +252,9 @@ int param_get_bool(char *buffer, struct kernel_param *kp)
int param_set_invbool(const char *val, struct kernel_param *kp)
{
int boolval, ret;
- struct kernel_param dummy = { .arg = &boolval };
+ struct kernel_param dummy;
+ dummy.arg = &boolval;
ret = param_set_bool(val, &dummy);
if (ret == 0)
*(int *)kp->arg = !boolval;
@@ -262,11 +263,7 @@ int param_set_invbool(const char *val, struct kernel_param *kp)
int param_get_invbool(char *buffer, struct kernel_param *kp)
{
- int val;
- struct kernel_param dummy = { .arg = &val };
-
- val = !*(int *)kp->arg;
- return param_get_bool(buffer, &dummy);
+ return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y');
}
/* We break the rule and mangle the string. */
@@ -325,7 +322,7 @@ static int param_array(const char *name,
int param_array_set(const char *val, struct kernel_param *kp)
{
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
unsigned int temp_num;
return param_array(kp->name, val, 1, arr->max, arr->elem,
@@ -335,7 +332,7 @@ int param_array_set(const char *val, struct kernel_param *kp)
int param_array_get(char *buffer, struct kernel_param *kp)
{
int i, off, ret;
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
struct kernel_param p;
p = *kp;
@@ -354,7 +351,7 @@ int param_array_get(char *buffer, struct kernel_param *kp)
int param_set_copystring(const char *val, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
if (!val) {
printk(KERN_ERR "%s: missing param set value\n", kp->name);
@@ -371,7 +368,7 @@ int param_set_copystring(const char *val, struct kernel_param *kp)
int param_get_string(char *buffer, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
return strlcpy(buffer, kps->string, kps->maxlen);
}
@@ -595,11 +592,17 @@ static void __init param_sysfs_builtin(void)
for (i=0; i < __stop___param - __start___param; i++) {
char *dot;
+ size_t kplen;
kp = &__start___param[i];
+ kplen = strlen(kp->name);
/* We do not handle args without periods. */
- dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME);
+ if (kplen > MAX_KBUILD_MODNAME) {
+ DEBUGP("kernel parameter name is too long: %s\n", kp->name);
+ continue;
+ }
+ dot = memchr(kp->name, '.', kplen);
if (!dot) {
DEBUGP("couldn't find period in %s\n", kp->name);
continue;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 57efe0400bc..d11f579d189 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -241,7 +241,8 @@ static __init int init_posix_timers(void)
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
- sizeof (struct k_itimer), 0, 0, NULL);
+ sizeof (struct k_itimer), 0, SLAB_PANIC,
+ NULL);
idr_init(&posix_timers_id);
return 0;
}
@@ -980,9 +981,20 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
static int common_nsleep(const clockid_t which_clock, int flags,
struct timespec *tsave, struct timespec __user *rmtp)
{
- return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
- HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
- which_clock);
+ struct timespec rmt;
+ int ret;
+
+ ret = hrtimer_nanosleep(tsave, rmtp ? &rmt : NULL,
+ flags & TIMER_ABSTIME ?
+ HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+ which_clock);
+
+ if (ret && rmtp) {
+ if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
+ return -EFAULT;
+ }
+
+ return ret;
}
asmlinkage long
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 14b0e10dc95..8e186c67814 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,17 +44,6 @@ config PM_VERBOSE
---help---
This option enables verbose messages from the Power Management code.
-config DISABLE_CONSOLE_SUSPEND
- bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
- depends on PM_DEBUG && PM_SLEEP
- default n
- ---help---
- This option turns off the console suspend mechanism that prevents
- debug messages from reaching the console during the suspend/resume
- operations. This may be helpful when debugging device drivers'
- suspend/resume routines, but may itself lead to problems, for example
- if netconsole is used.
-
config PM_TRACE
bool "Suspend/resume event tracing"
depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index eb72255b5c8..8b15f777010 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -45,17 +45,18 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
-static struct hibernation_ops *hibernation_ops;
+static struct platform_hibernation_ops *hibernation_ops;
/**
* hibernation_set_ops - set the global hibernate operations
* @ops: the hibernation operations to use in subsequent hibernation transitions
*/
-void hibernation_set_ops(struct hibernation_ops *ops)
+void hibernation_set_ops(struct platform_hibernation_ops *ops)
{
- if (ops && !(ops->prepare && ops->enter && ops->finish
- && ops->pre_restore && ops->restore_cleanup)) {
+ if (ops && !(ops->start && ops->pre_snapshot && ops->finish
+ && ops->prepare && ops->enter && ops->pre_restore
+ && ops->restore_cleanup)) {
WARN_ON(1);
return;
}
@@ -69,16 +70,37 @@ void hibernation_set_ops(struct hibernation_ops *ops)
mutex_unlock(&pm_mutex);
}
+/**
+ * platform_start - tell the platform driver that we're starting
+ * hibernation
+ */
+
+static int platform_start(int platform_mode)
+{
+ return (platform_mode && hibernation_ops) ?
+ hibernation_ops->start() : 0;
+}
/**
- * platform_prepare - prepare the machine for hibernation using the
+ * platform_pre_snapshot - prepare the machine for hibernation using the
* platform driver if so configured and return an error code if it fails
*/
-static int platform_prepare(int platform_mode)
+static int platform_pre_snapshot(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
- hibernation_ops->prepare() : 0;
+ hibernation_ops->pre_snapshot() : 0;
+}
+
+/**
+ * platform_leave - prepare the machine for switching to the normal mode
+ * of operation using the platform driver (called with interrupts disabled)
+ */
+
+static void platform_leave(int platform_mode)
+{
+ if (platform_mode && hibernation_ops)
+ hibernation_ops->leave();
}
/**
@@ -118,6 +140,51 @@ static void platform_restore_cleanup(int platform_mode)
}
/**
+ * create_image - freeze devices that need to be frozen with interrupts
+ * off, create the hibernation image and thaw those devices. Control
+ * reappears in this routine after a restore.
+ */
+
+int create_image(int platform_mode)
+{
+ int error;
+
+ error = arch_prepare_suspend();
+ if (error)
+ return error;
+
+ local_irq_disable();
+ /* At this point, device_suspend() has been called, but *not*
+ * device_power_down(). We *must* call device_power_down() now.
+ * Otherwise, drivers for some devices (e.g. interrupt controllers)
+ * become desynchronized with the actual state of the hardware
+ * at resume time, and evil weirdness ensues.
+ */
+ error = device_power_down(PMSG_FREEZE);
+ if (error) {
+ printk(KERN_ERR "Some devices failed to power down, "
+ KERN_ERR "aborting suspend\n");
+ goto Enable_irqs;
+ }
+
+ save_processor_state();
+ error = swsusp_arch_suspend();
+ if (error)
+ printk(KERN_ERR "Error %d while creating the image\n", error);
+ /* Restore control flow magically appears here */
+ restore_processor_state();
+ if (!in_suspend)
+ platform_leave(platform_mode);
+ /* NOTE: device_power_up() is just a resume() for devices
+ * that suspended with irqs off ... no overall powerup.
+ */
+ device_power_up();
+ Enable_irqs:
+ local_irq_enable();
+ return error;
+}
+
+/**
* hibernation_snapshot - quiesce devices and create the hibernation
* snapshot image.
* @platform_mode - if set, use the platform driver, if available, to
@@ -135,12 +202,16 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
+ error = platform_start(platform_mode);
+ if (error)
+ return error;
+
suspend_console();
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = platform_prepare(platform_mode);
+ error = platform_pre_snapshot(platform_mode);
if (error)
goto Resume_devices;
@@ -148,7 +219,7 @@ int hibernation_snapshot(int platform_mode)
if (!error) {
if (hibernation_mode != HIBERNATION_TEST) {
in_suspend = 1;
- error = swsusp_suspend();
+ error = create_image(platform_mode);
/* Control returns here after successful restore */
} else {
printk("swsusp debug: Waiting for 5 seconds.\n");
@@ -207,21 +278,50 @@ int hibernation_platform_enter(void)
{
int error;
- if (hibernation_ops) {
- kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
- /*
- * We have cancelled the power transition by running
- * hibernation_ops->finish() before saving the image, so we
- * should let the firmware know that we're going to enter the
- * sleep state after all
- */
- error = hibernation_ops->prepare();
- sysdev_shutdown();
- if (!error)
- error = hibernation_ops->enter();
- } else {
- error = -ENOSYS;
+ if (!hibernation_ops)
+ return -ENOSYS;
+
+ /*
+ * We have cancelled the power transition by running
+ * hibernation_ops->finish() before saving the image, so we should let
+ * the firmware know that we're going to enter the sleep state after all
+ */
+ error = hibernation_ops->start();
+ if (error)
+ return error;
+
+ suspend_console();
+ error = device_suspend(PMSG_SUSPEND);
+ if (error)
+ goto Resume_console;
+
+ error = hibernation_ops->prepare();
+ if (error)
+ goto Resume_devices;
+
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Finish;
+
+ local_irq_disable();
+ error = device_power_down(PMSG_SUSPEND);
+ if (!error) {
+ hibernation_ops->enter();
+ /* We should never get here */
+ while (1);
}
+ local_irq_enable();
+
+ /*
+ * We don't need to reenable the nonboot CPUs or resume consoles, since
+ * the system is going to be halted anyway.
+ */
+ Finish:
+ hibernation_ops->finish();
+ Resume_devices:
+ device_resume();
+ Resume_console:
+ resume_console();
return error;
}
@@ -238,14 +338,14 @@ static void power_down(void)
case HIBERNATION_TEST:
case HIBERNATION_TESTPROC:
break;
- case HIBERNATION_SHUTDOWN:
- kernel_power_off();
- break;
case HIBERNATION_REBOOT:
kernel_restart(NULL);
break;
case HIBERNATION_PLATFORM:
hibernation_platform_enter();
+ case HIBERNATION_SHUTDOWN:
+ kernel_power_off();
+ break;
}
kernel_halt();
/*
@@ -298,6 +398,10 @@ int hibernate(void)
if (error)
goto Exit;
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
error = prepare_processes();
if (error)
goto Finish;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 350b485b3b6..3cdf95b1dc9 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -20,6 +20,7 @@
#include <linux/resume-trace.h>
#include <linux/freezer.h>
#include <linux/vmstat.h>
+#include <linux/syscalls.h>
#include "power.h"
@@ -32,39 +33,32 @@ DEFINE_MUTEX(pm_mutex);
/* This is just an arbitrary number */
#define FREE_PAGE_NUMBER (100)
-struct pm_ops *pm_ops;
+static struct platform_suspend_ops *suspend_ops;
/**
- * pm_set_ops - Set the global power method table.
+ * suspend_set_ops - Set the global suspend method table.
* @ops: Pointer to ops structure.
*/
-void pm_set_ops(struct pm_ops * ops)
+void suspend_set_ops(struct platform_suspend_ops *ops)
{
mutex_lock(&pm_mutex);
- pm_ops = ops;
+ suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
/**
- * pm_valid_only_mem - generic memory-only valid callback
+ * suspend_valid_only_mem - generic memory-only valid callback
*
- * pm_ops drivers that implement mem suspend only and only need
+ * Platform drivers that implement mem suspend only and only need
* to check for that in their .valid callback can use this instead
* of rolling their own .valid callback.
*/
-int pm_valid_only_mem(suspend_state_t state)
+int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
-
-static inline void pm_finish(suspend_state_t state)
-{
- if (pm_ops->finish)
- pm_ops->finish(state);
-}
-
/**
* suspend_prepare - Do prep work before entering low-power state.
*
@@ -76,7 +70,7 @@ static int suspend_prepare(void)
int error;
unsigned int free_pages;
- if (!pm_ops || !pm_ops->enter)
+ if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
@@ -128,7 +122,7 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
*
* This function should be called after devices have been suspended.
*/
-int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state)
{
int error = 0;
@@ -139,7 +133,7 @@ int suspend_enter(suspend_state_t state)
printk(KERN_ERR "Some devices failed to power down\n");
goto Done;
}
- error = pm_ops->enter(state);
+ error = suspend_ops->enter(state);
device_power_up();
Done:
arch_suspend_enable_irqs();
@@ -156,11 +150,11 @@ int suspend_devices_and_enter(suspend_state_t state)
{
int error;
- if (!pm_ops)
+ if (!suspend_ops)
return -ENOSYS;
- if (pm_ops->set_target) {
- error = pm_ops->set_target(state);
+ if (suspend_ops->set_target) {
+ error = suspend_ops->set_target(state);
if (error)
return error;
}
@@ -170,8 +164,8 @@ int suspend_devices_and_enter(suspend_state_t state)
printk(KERN_ERR "Some devices failed to suspend\n");
goto Resume_console;
}
- if (pm_ops->prepare) {
- error = pm_ops->prepare(state);
+ if (suspend_ops->prepare) {
+ error = suspend_ops->prepare();
if (error)
goto Resume_devices;
}
@@ -180,7 +174,8 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_enter(state);
enable_nonboot_cpus();
- pm_finish(state);
+ if (suspend_ops->finish)
+ suspend_ops->finish();
Resume_devices:
device_resume();
Resume_console:
@@ -214,7 +209,7 @@ static inline int valid_state(suspend_state_t state)
/* All states need lowlevel support and need to be valid
* to the lowlevel implementation, no valid callback
* implies that none are valid. */
- if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state))
+ if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
return 0;
return 1;
}
@@ -236,9 +231,14 @@ static int enter_state(suspend_state_t state)
if (!valid_state(state))
return -ENODEV;
+
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
if ((error = suspend_prepare()))
goto Unlock;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 95fbf2dd3fe..195dc461176 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -11,14 +11,32 @@ struct swsusp_info {
unsigned long size;
} __attribute__((aligned(PAGE_SIZE)));
+#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_ARCH_HIBERNATION_HEADER
+/* Maximum size of architecture specific data in a hibernation header */
+#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
+extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
+extern int arch_hibernation_header_restore(void *addr);
+
+static inline int init_header_complete(struct swsusp_info *info)
+{
+ return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
+}
+
+static inline char *check_image_kernel(struct swsusp_info *info)
+{
+ return arch_hibernation_header_restore(info) ?
+ "architecture specific data" : NULL;
+}
+#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
-#ifdef CONFIG_HIBERNATION
/*
* Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?]
*/
#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
+
/*
* Keep 1 MB of memory free so that device drivers can allocate some pages in
* their .suspend() routines without breaking the suspend to disk.
@@ -165,7 +183,6 @@ extern int swsusp_swap_in_use(void);
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
-extern int swsusp_suspend(void);
extern int swsusp_resume(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940a3df..6533923e711 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -75,21 +75,79 @@ void refrigerator(void)
__set_current_state(save);
}
-static void freeze_task(struct task_struct *p)
+static void fake_signal_wake_up(struct task_struct *p, int resume)
{
unsigned long flags;
- if (!freezing(p)) {
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ signal_wake_up(p, resume);
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+}
+
+static void send_fake_signal(struct task_struct *p)
+{
+ if (p->state == TASK_STOPPED)
+ force_sig_specific(SIGSTOP, p);
+ fake_signal_wake_up(p, p->state == TASK_STOPPED);
+}
+
+static int has_mm(struct task_struct *p)
+{
+ return (p->mm && !(p->flags & PF_BORROWED_MM));
+}
+
+/**
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
+ * @with_mm_only: if set, the request will only be sent if the task has its
+ * own mm
+ * Return value: 0, if @with_mm_only is set and the task has no mm of its
+ * own or the task is frozen, 1, otherwise
+ *
+ * The freeze request is sent by seting the tasks's TIF_FREEZE flag and
+ * either sending a fake signal to it or waking it up, depending on whether
+ * or not it has its own mm (ie. it is a user land task). If @with_mm_only
+ * is set and the task has no mm of its own (ie. it is a kernel thread),
+ * its TIF_FREEZE flag should not be set.
+ *
+ * The task_lock() is necessary to prevent races with exit_mm() or
+ * use_mm()/unuse_mm() from occuring.
+ */
+static int freeze_task(struct task_struct *p, int with_mm_only)
+{
+ int ret = 1;
+
+ task_lock(p);
+ if (freezing(p)) {
+ if (has_mm(p)) {
+ if (!signal_pending(p))
+ fake_signal_wake_up(p, 0);
+ } else {
+ if (with_mm_only)
+ ret = 0;
+ else
+ wake_up_state(p, TASK_INTERRUPTIBLE);
+ }
+ } else {
rmb();
- if (!frozen(p)) {
- set_freeze_flag(p);
- if (p->state == TASK_STOPPED)
- force_sig_specific(SIGSTOP, p);
- spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, p->state == TASK_STOPPED);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ if (frozen(p)) {
+ ret = 0;
+ } else {
+ if (has_mm(p)) {
+ set_freeze_flag(p);
+ send_fake_signal(p);
+ } else {
+ if (with_mm_only) {
+ ret = 0;
+ } else {
+ set_freeze_flag(p);
+ wake_up_state(p, TASK_INTERRUPTIBLE);
+ }
+ }
}
}
+ task_unlock(p);
+ return ret;
}
static void cancel_freezing(struct task_struct *p)
@@ -110,6 +168,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
+ struct timeval start, end;
+ s64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
+
+ do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
do {
@@ -119,31 +182,14 @@ static int try_to_freeze_tasks(int freeze_user_space)
if (frozen(p) || !freezeable(p))
continue;
- if (freeze_user_space) {
- if (p->state == TASK_TRACED &&
- frozen(p->parent)) {
- cancel_freezing(p);
- continue;
- }
- /*
- * Kernel threads should not have TIF_FREEZE set
- * at this point, so we must ensure that either
- * p->mm is not NULL *and* PF_BORROWED_MM is
- * unset, or TIF_FRREZE is left unset.
- * The task_lock() is necessary to prevent races
- * with exit_mm() or use_mm()/unuse_mm() from
- * occuring.
- */
- task_lock(p);
- if (!p->mm || (p->flags & PF_BORROWED_MM)) {
- task_unlock(p);
- continue;
- }
- freeze_task(p);
- task_unlock(p);
- } else {
- freeze_task(p);
+ if (p->state == TASK_TRACED && frozen(p->parent)) {
+ cancel_freezing(p);
+ continue;
}
+
+ if (!freeze_task(p, freeze_user_space))
+ continue;
+
if (!freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
@@ -153,6 +199,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
break;
} while (todo);
+ do_gettimeofday(&end);
+ elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+ do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
+ elapsed_csecs = elapsed_csecs64;
+
if (todo) {
/* This does not unfreeze processes that are already frozen
* (we have slightly ugly calling convention in that respect,
@@ -160,10 +211,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
* but it cleans up leftover PF_FREEZE requests.
*/
printk("\n");
- printk(KERN_ERR "Freezing of %s timed out after %d seconds "
+ printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze):\n",
- freeze_user_space ? "user space " : "tasks ",
- TIMEOUT / HZ, todo);
+ elapsed_csecs / 100, elapsed_csecs % 100, todo);
show_state();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
@@ -174,6 +224,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ } else {
+ printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
+ elapsed_csecs % 100);
}
return todo ? -EBUSY : 0;
@@ -186,19 +239,21 @@ int freeze_processes(void)
{
int error;
- printk("Stopping tasks ... ");
+ printk("Freezing user space processes ... ");
error = try_to_freeze_tasks(FREEZER_USER_SPACE);
if (error)
- return error;
+ goto Exit;
+ printk("done.\n");
- sys_sync();
+ printk("Freezing remaining freezable tasks ... ");
error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
if (error)
- return error;
-
- printk("done.\n");
+ goto Exit;
+ printk("done.");
+ Exit:
BUG_ON(in_atomic());
- return 0;
+ printk("\n");
+ return error;
}
static void thaw_tasks(int thaw_user_space)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index a686590d88c..ccc95ac07be 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1239,17 +1239,39 @@ asmlinkage int swsusp_save(void)
return 0;
}
-static void init_header(struct swsusp_info *info)
+#ifndef CONFIG_ARCH_HIBERNATION_HEADER
+static int init_header_complete(struct swsusp_info *info)
{
- memset(info, 0, sizeof(struct swsusp_info));
+ memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
info->version_code = LINUX_VERSION_CODE;
+ return 0;
+}
+
+static char *check_image_kernel(struct swsusp_info *info)
+{
+ if (info->version_code != LINUX_VERSION_CODE)
+ return "kernel version";
+ if (strcmp(info->uts.sysname,init_utsname()->sysname))
+ return "system type";
+ if (strcmp(info->uts.release,init_utsname()->release))
+ return "kernel release";
+ if (strcmp(info->uts.version,init_utsname()->version))
+ return "version";
+ if (strcmp(info->uts.machine,init_utsname()->machine))
+ return "machine";
+ return NULL;
+}
+#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
+
+static int init_header(struct swsusp_info *info)
+{
+ memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = num_physpages;
- memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
- info->cpus = num_online_cpus();
info->image_pages = nr_copy_pages;
info->pages = nr_copy_pages + nr_meta_pages + 1;
info->size = info->pages;
info->size <<= PAGE_SHIFT;
+ return init_header_complete(info);
}
/**
@@ -1303,7 +1325,11 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
return -ENOMEM;
}
if (!handle->offset) {
- init_header((struct swsusp_info *)buffer);
+ int error;
+
+ error = init_header((struct swsusp_info *)buffer);
+ if (error)
+ return error;
handle->buffer = buffer;
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&copy_bm);
@@ -1394,22 +1420,13 @@ duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
}
}
-static inline int check_header(struct swsusp_info *info)
+static int check_header(struct swsusp_info *info)
{
- char *reason = NULL;
+ char *reason;
- if (info->version_code != LINUX_VERSION_CODE)
- reason = "kernel version";
- if (info->num_physpages != num_physpages)
+ reason = check_image_kernel(info);
+ if (!reason && info->num_physpages != num_physpages)
reason = "memory size";
- if (strcmp(info->uts.sysname,init_utsname()->sysname))
- reason = "system type";
- if (strcmp(info->uts.release,init_utsname()->release))
- reason = "kernel release";
- if (strcmp(info->uts.version,init_utsname()->version))
- reason = "version";
- if (strcmp(info->uts.machine,init_utsname()->machine))
- reason = "machine";
if (reason) {
printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
return -EPERM;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 5da304c8f1f..e1722d3155f 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -270,39 +270,6 @@ int swsusp_shrink_memory(void)
return 0;
}
-int swsusp_suspend(void)
-{
- int error;
-
- if ((error = arch_prepare_suspend()))
- return error;
-
- local_irq_disable();
- /* At this point, device_suspend() has been called, but *not*
- * device_power_down(). We *must* device_power_down() now.
- * Otherwise, drivers for some devices (e.g. interrupt controllers)
- * become desynchronized with the actual state of the hardware
- * at resume time, and evil weirdness ensues.
- */
- if ((error = device_power_down(PMSG_FREEZE))) {
- printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
- goto Enable_irqs;
- }
-
- save_processor_state();
- if ((error = swsusp_arch_suspend()))
- printk(KERN_ERR "Error %d suspending\n", error);
- /* Restore control flow magically appears here */
- restore_processor_state();
- /* NOTE: device_power_up() is just a resume() for devices
- * that suspended with irqs off ... no overall powerup.
- */
- device_power_up();
- Enable_irqs:
- local_irq_enable();
- return error;
-}
-
int swsusp_resume(void)
{
int error;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index bd0723a7df3..5bd321bcbb7 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -153,6 +153,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
mutex_lock(&pm_mutex);
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (!error) {
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
error = freeze_processes();
if (error)
thaw_processes();
diff --git a/kernel/printk.c b/kernel/printk.c
index 8451dfc31d2..a30fe33de39 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -22,6 +22,8 @@
#include <linux/tty_driver.h>
#include <linux/console.h>
#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h> /* For in_interrupt() */
@@ -162,6 +164,113 @@ out:
__setup("log_buf_len=", log_buf_len_setup);
+#ifdef CONFIG_BOOT_PRINTK_DELAY
+
+static unsigned int boot_delay; /* msecs delay after each printk during bootup */
+static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */
+
+static int __init boot_delay_setup(char *str)
+{
+ unsigned long lpj;
+ unsigned long long loops_per_msec;
+
+ lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
+ loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
+
+ get_option(&str, &boot_delay);
+ if (boot_delay > 10 * 1000)
+ boot_delay = 0;
+
+ printk_delay_msec = loops_per_msec;
+ printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
+ "HZ: %d, printk_delay_msec: %llu\n",
+ boot_delay, preset_lpj, lpj, HZ, printk_delay_msec);
+ return 1;
+}
+__setup("boot_delay=", boot_delay_setup);
+
+static void boot_delay_msec(void)
+{
+ unsigned long long k;
+ unsigned long timeout;
+
+ if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
+ return;
+
+ k = (unsigned long long)printk_delay_msec * boot_delay;
+
+ timeout = jiffies + msecs_to_jiffies(boot_delay);
+ while (k) {
+ k--;
+ cpu_relax();
+ /*
+ * use (volatile) jiffies to prevent
+ * compiler reduction; loop termination via jiffies
+ * is secondary and may or may not happen.
+ */
+ if (time_after(jiffies, timeout))
+ break;
+ touch_nmi_watchdog();
+ }
+}
+#else
+static inline void boot_delay_msec(void)
+{
+}
+#endif
+
+/*
+ * Return the number of unread characters in the log buffer.
+ */
+int log_buf_get_len(void)
+{
+ return logged_chars;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+ int ret, max;
+ bool took_lock = false;
+
+ if (!oops_in_progress) {
+ spin_lock_irq(&logbuf_lock);
+ took_lock = true;
+ }
+
+ max = log_buf_get_len();
+ if (idx < 0 || idx >= max) {
+ ret = -1;
+ } else {
+ if (len > max)
+ len = max;
+ ret = len;
+ idx += (log_end - max);
+ while (len-- > 0)
+ dest[len] = LOG_BUF(idx + len);
+ }
+
+ if (took_lock)
+ spin_unlock_irq(&logbuf_lock);
+
+ return ret;
+}
+
+/*
+ * Extract a single character from the log buffer.
+ */
+int log_buf_read(int idx)
+{
+ char ret;
+
+ if (log_buf_copy(&ret, idx, 1) == 1)
+ return ret;
+ else
+ return -1;
+}
+
/*
* Commands to do_syslog:
*
@@ -527,6 +636,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
static char printk_buf[1024];
static int log_level_unknown = 1;
+ boot_delay_msec();
+
preempt_disable();
if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id())
/* If a crash is occurring during printk() on this CPU,
@@ -751,7 +862,16 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
return -1;
}
-#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND
+int console_suspend_enabled = 1;
+EXPORT_SYMBOL(console_suspend_enabled);
+
+static int __init console_suspend_disable(char *str)
+{
+ console_suspend_enabled = 0;
+ return 1;
+}
+__setup("no_console_suspend", console_suspend_disable);
+
/**
* suspend_console - suspend the console subsystem
*
@@ -759,6 +879,8 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
*/
void suspend_console(void)
{
+ if (!console_suspend_enabled)
+ return;
printk("Suspending console(s)\n");
acquire_console_sem();
console_suspended = 1;
@@ -766,10 +888,11 @@ void suspend_console(void)
void resume_console(void)
{
+ if (!console_suspend_enabled)
+ return;
console_suspended = 0;
release_console_sem();
}
-#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
/**
* acquire_console_sem - lock the console system for exclusive use.
diff --git a/kernel/profile.c b/kernel/profile.c
index cb1e37d2dac..631b75c25d7 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -37,7 +37,7 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
/* Oprofile timer tick hook */
-int (*timer_hook)(struct pt_regs *) __read_mostly;
+static int (*timer_hook)(struct pt_regs *) __read_mostly;
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
@@ -346,7 +346,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
per_cpu(cpu_profile_flip, cpu) = 0;
if (!per_cpu(cpu_profile_hits, cpu)[1]) {
page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
return NOTIFY_BAD;
@@ -354,7 +354,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
}
if (!per_cpu(cpu_profile_hits, cpu)[0]) {
page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
goto out_free;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 3eca7a55f2e..a73ebd3b9d4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -386,6 +386,9 @@ int ptrace_request(struct task_struct *child, long request,
case PTRACE_SETSIGINFO:
ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
break;
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
default:
break;
}
@@ -450,6 +453,10 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
return child;
}
+#ifndef arch_ptrace_attach
+#define arch_ptrace_attach(child) do { } while (0)
+#endif
+
#ifndef __ARCH_SYS_PTRACE
asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
@@ -473,6 +480,12 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+ if (!ret)
+ arch_ptrace_attach(child);
goto out_put_task_struct;
}
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c2dd8410dc..a66d4d1615f 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,10 +45,17 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
-#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_ctrlblk = {
.cur = -300,
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index ddff3324778..c3e165c2318 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -35,14 +35,12 @@
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
-#include <linux/module.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
-#include <linux/random.h>
#include <linux/delay.h>
#include <linux/byteorder/swabb.h>
#include <linux/stat.h>
@@ -166,16 +164,14 @@ struct rcu_random_state {
/*
* Crude but fast random-number generator. Uses a linear congruential
- * generator, with occasional help from get_random_bytes().
+ * generator, with occasional help from cpu_clock().
*/
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
- long refresh;
-
if (--rrsp->rrs_count < 0) {
- get_random_bytes(&refresh, sizeof(refresh));
- rrsp->rrs_state += refresh;
+ rrsp->rrs_state +=
+ (unsigned long)cpu_clock(raw_smp_processor_id());
rrsp->rrs_count = RCU_RANDOM_REFRESH;
}
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
diff --git a/kernel/relay.c b/kernel/relay.c
index ad855017bc5..61134eb7a0c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -370,7 +370,7 @@ void relay_reset(struct rchan *chan)
if (!chan)
return;
- if (chan->is_global && chan->buf[0]) {
+ if (chan->is_global && chan->buf[0]) {
__relay_reset(chan->buf[0], 0);
return;
}
@@ -850,13 +850,13 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
buf->subbufs_consumed = consumed;
buf->bytes_consumed = 0;
}
-
+
produced = (produced % n_subbufs) * subbuf_size + buf->offset;
consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
if (consumed > produced)
produced += n_subbufs * subbuf_size;
-
+
if (consumed == produced)
return 0;
diff --git a/kernel/resource.c b/kernel/resource.c
index 9bd14fd3e6d..a358142ff48 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(release_resource);
* the caller must specify res->start, res->end, res->flags.
* If found, returns 0, res is overwritten, if not found, returns -1.
*/
-int find_next_system_ram(struct resource *res)
+static int find_next_system_ram(struct resource *res)
{
resource_size_t start, end;
struct resource *p;
@@ -267,6 +267,30 @@ int find_next_system_ram(struct resource *res)
res->end = p->end;
return 0;
}
+int
+walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
+ int (*func)(unsigned long, unsigned long, void *))
+{
+ struct resource res;
+ unsigned long pfn, len;
+ u64 orig_end;
+ int ret = -1;
+ res.start = (u64) start_pfn << PAGE_SHIFT;
+ res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+ res.flags = IORESOURCE_MEM;
+ orig_end = res.end;
+ while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
+ pfn = (unsigned long)(res.start >> PAGE_SHIFT);
+ len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
+ ret = (*func)(pfn, len, arg);
+ if (ret)
+ break;
+ res.start = res.end + 1;
+ res.end = orig_end;
+ }
+ return ret;
+}
+
#endif
/*
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5aedbee014d..6b0703db152 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -82,12 +82,7 @@ do { \
* into the tracing code when doing error printk or
* executing a BUG():
*/
-int rt_trace_on = 1;
-
-void deadlock_trace_off(void)
-{
- rt_trace_on = 0;
-}
+static int rt_trace_on = 1;
static void printk_task(struct task_struct *p)
{
diff --git a/kernel/sched.c b/kernel/sched.c
index bba57adb950..ed90be46fb3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -266,7 +266,8 @@ struct rt_rq {
* acquire operations must be ordered by ascending &runqueue.
*/
struct rq {
- spinlock_t lock; /* runqueue lock */
+ /* runqueue lock: */
+ spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -279,13 +280,15 @@ struct rq {
#ifdef CONFIG_NO_HZ
unsigned char in_nohz_recently;
#endif
- struct load_weight load; /* capture load from *all* tasks on this cpu */
+ /* capture load from *all* tasks on this cpu: */
+ struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
struct cfs_rq cfs;
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
+ /* list of leaf cfs_rq on this cpu: */
+ struct list_head leaf_cfs_rq_list;
#endif
struct rt_rq rt;
@@ -317,7 +320,8 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
- int cpu; /* cpu of this runqueue */
+ /* cpu of this runqueue: */
+ int cpu;
struct task_struct *migration_thread;
struct list_head migration_queue;
@@ -328,22 +332,22 @@ struct rq {
struct sched_info rq_sched_info;
/* sys_sched_yield() stats */
- unsigned long yld_exp_empty;
- unsigned long yld_act_empty;
- unsigned long yld_both_empty;
- unsigned long yld_count;
+ unsigned int yld_exp_empty;
+ unsigned int yld_act_empty;
+ unsigned int yld_both_empty;
+ unsigned int yld_count;
/* schedule() stats */
- unsigned long sched_switch;
- unsigned long sched_count;
- unsigned long sched_goidle;
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
/* try_to_wake_up() stats */
- unsigned long ttwu_count;
- unsigned long ttwu_local;
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
/* BKL stats */
- unsigned long bkl_count;
+ unsigned int bkl_count;
#endif
struct lock_class_key rq_lock_key;
};
@@ -449,12 +453,12 @@ enum {
};
const_debug unsigned int sysctl_sched_features =
- SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
- SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_TREE_AVG *0 |
- SCHED_FEAT_APPROX_AVG *0 |
- SCHED_FEAT_WAKEUP_PREEMPT *1 |
- SCHED_FEAT_PREEMPT_RESTRICT *1;
+ SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
+ SCHED_FEAT_START_DEBIT * 1 |
+ SCHED_FEAT_TREE_AVG * 0 |
+ SCHED_FEAT_APPROX_AVG * 0 |
+ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
+ SCHED_FEAT_PREEMPT_RESTRICT * 1;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
@@ -1712,7 +1716,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
p->prio = effective_prio(p);
- if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) {
+ if (!p->sched_class->task_new || !current->se.on_rq) {
activate_task(rq, p, 0);
} else {
/*
@@ -2336,7 +2340,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long max_pull;
unsigned long busiest_load_per_task, busiest_nr_running;
unsigned long this_load_per_task, this_nr_running;
- int load_idx;
+ int load_idx, group_imb = 0;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int power_savings_balance = 1;
unsigned long leader_nr_running = 0, min_load_per_task = 0;
@@ -2355,9 +2359,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = sd->idle_idx;
do {
- unsigned long load, group_capacity;
+ unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
int local_group;
int i;
+ int __group_imb = 0;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long sum_nr_running, sum_weighted_load;
@@ -2368,6 +2373,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
for_each_cpu_mask(i, group->cpumask) {
struct rq *rq;
@@ -2388,8 +2395,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
}
load = target_load(i, load_idx);
- } else
+ } else {
load = source_load(i, load_idx);
+ if (load > max_cpu_load)
+ max_cpu_load = load;
+ if (min_cpu_load > load)
+ min_cpu_load = load;
+ }
avg_load += load;
sum_nr_running += rq->nr_running;
@@ -2415,6 +2427,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
avg_load = sg_div_cpu_power(group,
avg_load * SCHED_LOAD_SCALE);
+ if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
+ __group_imb = 1;
+
group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
if (local_group) {
@@ -2423,11 +2438,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
this_nr_running = sum_nr_running;
this_load_per_task = sum_weighted_load;
} else if (avg_load > max_load &&
- sum_nr_running > group_capacity) {
+ (sum_nr_running > group_capacity || __group_imb)) {
max_load = avg_load;
busiest = group;
busiest_nr_running = sum_nr_running;
busiest_load_per_task = sum_weighted_load;
+ group_imb = __group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2499,6 +2515,9 @@ group_next:
goto out_balanced;
busiest_load_per_task /= busiest_nr_running;
+ if (group_imb)
+ busiest_load_per_task = min(busiest_load_per_task, avg_load);
+
/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
@@ -3319,6 +3338,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
}
/*
+ * Account scaled user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user space since the last update
+ */
+void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
+{
+ p->utimescaled = cputime_add(p->utimescaled, cputime);
+}
+
+/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3356,6 +3385,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
}
/*
+ * Account scaled system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
+{
+ p->stimescaled = cputime_add(p->stimescaled, cputime);
+}
+
+/*
* Account for involuntary wait time.
* @p: the process from which the cpu time has been stolen
* @steal: the cpu time spent in involuntary wait
@@ -3844,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
int __sched wait_for_completion_interruptible(struct completion *x)
{
- return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+ if (t == -ERESTARTSYS)
+ return t;
+ return 0;
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
@@ -4779,18 +4822,18 @@ static void show_task(struct task_struct *p)
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
- printk("%-13.13s %c", p->comm,
+ printk(KERN_INFO "%-13.13s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
- printk(" running ");
+ printk(KERN_CONT " running ");
else
- printk(" %08lx ", thread_saved_pc(p));
+ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
- printk(" running task ");
+ printk(KERN_CONT " running task ");
else
- printk(" %016lx ", thread_saved_pc(p));
+ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
{
@@ -4800,7 +4843,7 @@ static void show_task(struct task_struct *p)
free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
- printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
+ printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid);
if (state != TASK_RUNNING)
show_stack(p, NULL);
@@ -5060,6 +5103,17 @@ wait_to_die:
}
#ifdef CONFIG_HOTPLUG_CPU
+
+static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
+{
+ int ret;
+
+ local_irq_disable();
+ ret = __migrate_task(p, src_cpu, dest_cpu);
+ local_irq_enable();
+ return ret;
+}
+
/*
* Figure out where task on dead CPU should go, use force if neccessary.
* NOTE: interrupts should be disabled by the caller
@@ -5098,7 +5152,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
"longer affine to cpu%d\n",
p->pid, p->comm, dead_cpu);
}
- } while (!__migrate_task(p, dead_cpu, dest_cpu));
+ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
/*
@@ -5126,7 +5180,7 @@ static void migrate_live_tasks(int src_cpu)
{
struct task_struct *p, *t;
- write_lock_irq(&tasklist_lock);
+ read_lock(&tasklist_lock);
do_each_thread(t, p) {
if (p == current)
@@ -5136,7 +5190,7 @@ static void migrate_live_tasks(int src_cpu)
move_task_off_dead_cpu(src_cpu, p);
} while_each_thread(t, p);
- write_unlock_irq(&tasklist_lock);
+ read_unlock(&tasklist_lock);
}
/*
@@ -5214,11 +5268,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is
* fine.
- * NOTE: interrupts should be left disabled --dev@
*/
- spin_unlock(&rq->lock);
+ spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -5272,11 +5325,20 @@ static struct ctl_table *sd_alloc_ctl_entry(int n)
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
- struct ctl_table *entry = *tablep;
+ struct ctl_table *entry;
- for (entry = *tablep; entry->procname; entry++)
+ /*
+ * In the intermediate directories, both the child directory and
+ * procname are dynamically allocated and could fail but the mode
+ * will always be set. In the lowest directory the names are
+ * static strings and all have proc handlers.
+ */
+ for (entry = *tablep; entry->mode; entry++) {
if (entry->child)
sd_free_ctl_entry(&entry->child);
+ if (entry->proc_handler == NULL)
+ kfree(entry->procname);
+ }
kfree(*tablep);
*tablep = NULL;
@@ -5330,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
@@ -5447,14 +5509,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- rq = task_rq_lock(rq->idle, &flags);
+ spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- task_rq_unlock(rq, &flags);
+ spin_unlock_irq(&rq->lock);
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -5564,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
}
if (!group->__cpu_power) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
break;
}
if (!cpus_weight(group->cpumask)) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (cpus_intersects(groupmask, group->cpumask)) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
@@ -5585,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
cpus_or(groupmask, groupmask, group->cpumask);
cpumask_scnprintf(str, NR_CPUS, group->cpumask);
- printk(" %s", str);
+ printk(KERN_CONT " %s", str);
group = group->next;
} while (group != sd->groups);
- printk("\n");
+ printk(KERN_CONT "\n");
if (!cpus_equal(sd->span, groupmask))
printk(KERN_ERR "ERROR: groups don't span "
@@ -5869,7 +5931,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
struct sched_group **sg)
{
int group;
- cpumask_t mask = cpu_sibling_map[cpu];
+ cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
if (sg)
@@ -5898,7 +5960,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_t mask = cpu_sibling_map[cpu];
+ cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
#else
@@ -6132,7 +6194,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
p = sd;
sd = &per_cpu(cpu_domains, i);
*sd = SD_SIBLING_INIT;
- sd->span = cpu_sibling_map[i];
+ sd->span = per_cpu(cpu_sibling_map, i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
@@ -6143,7 +6205,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_sibling_map = cpu_sibling_map[i];
+ cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
if (i != first_cpu(this_sibling_map))
continue;
@@ -6348,35 +6410,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
arch_destroy_sched_domains(cpu_map);
}
-/*
- * Partition sched domains as specified by the cpumasks below.
- * This attaches all cpus from the cpumasks to the NULL domain,
- * waits for a RCU quiescent period, recalculates sched
- * domain information and then attaches them back to the
- * correct sched domains
- * Call with hotplug lock held
- */
-int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
-{
- cpumask_t change_map;
- int err = 0;
-
- cpus_and(*partition1, *partition1, cpu_online_map);
- cpus_and(*partition2, *partition2, cpu_online_map);
- cpus_or(change_map, *partition1, *partition2);
-
- /* Detach sched domains from all of the affected cpus */
- detach_destroy_domains(&change_map);
- if (!cpus_empty(*partition1))
- err = build_sched_domains(partition1);
- if (!err && !cpus_empty(*partition2))
- err = build_sched_domains(partition2);
-
- register_sched_domain_sysctl();
-
- return err;
-}
-
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
static int arch_reinit_sched_domains(void)
{
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index a5e517ec07c..e6fb392e516 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SCHEDSTATS
- SEQ_printf(m, " .%-30s: %ld\n", "bkl_count",
+ SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
rq->bkl_count);
#endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a17b785d700..166ed6db600 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1031,12 +1031,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
swap(curr->vruntime, se->vruntime);
}
- update_stats_enqueue(cfs_rq, se);
- check_spread(cfs_rq, se);
- check_spread(cfs_rq, curr);
- __enqueue_entity(cfs_rq, se);
- account_entity_enqueue(cfs_rq, se);
se->peer_preempt = 0;
+ enqueue_task_fair(rq, p, 0);
resched_task(rq->curr);
}
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 1c084842c3e..ef1a7df80ea 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
/* runqueue-specific stats */
seq_printf(seq,
- "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
+ "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
cpu, rq->yld_both_empty,
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle,
@@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
- "%lu",
+ seq_printf(seq, " %u %u %u %u %u %u %u %u",
sd->lb_count[itype],
sd->lb_balanced[itype],
sd->lb_failed[itype],
@@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
- " %lu %lu %lu\n",
+ seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
diff --git a/kernel/signal.c b/kernel/signal.c
index 79295238109..e4f059cd986 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -99,7 +99,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
static int recalc_sigpending_tsk(struct task_struct *t)
{
if (t->signal->group_stop_count > 0 ||
- (freezing(t)) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -909,8 +908,7 @@ __group_complete_signal(int sig, struct task_struct *p)
do {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
- t = next_thread(t);
- } while (t != p);
+ } while_each_thread(p, t);
return;
}
@@ -928,13 +926,11 @@ __group_complete_signal(int sig, struct task_struct *p)
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
p->signal->group_stop_count = 0;
p->signal->group_exit_task = t;
- t = p;
+ p = t;
do {
p->signal->group_stop_count++;
- signal_wake_up(t, 0);
- t = next_thread(t);
- } while (t != p);
- wake_up_process(p->signal->group_exit_task);
+ signal_wake_up(t, t == p);
+ } while_each_thread(p, t);
return;
}
@@ -985,9 +981,6 @@ void zap_other_threads(struct task_struct *p)
p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
- if (thread_group_empty(p))
- return;
-
for (t = next_thread(p); t != p; t = next_thread(t)) {
/*
* Don't bother with already dead threads
@@ -2300,15 +2293,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
k = &current->sighand->action[sig-1];
spin_lock_irq(&current->sighand->siglock);
- if (signal_pending(current)) {
- /*
- * If there might be a fatal signal pending on multiple
- * threads, make sure we take it before changing the action.
- */
- spin_unlock_irq(&current->sighand->siglock);
- return -ERESTARTNOINTR;
- }
-
if (oact)
*oact = *k;
@@ -2335,7 +2319,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
rm_from_queue_full(&mask, &t->pending);
- recalc_sigpending_and_wake(t);
t = next_thread(t);
} while (t != current);
}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 708d4882c0c..edeeef3a6a3 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -15,13 +15,16 @@
#include <linux/notifier.h>
#include <linux/module.h>
+#include <asm/irq_regs.h>
+
static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
-static int did_panic = 0;
+static int did_panic;
+int softlockup_thresh = 10;
static int
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -40,14 +43,16 @@ static struct notifier_block panic_block = {
* resolution, and we don't need to waste time with a big divide when
* 2^30ns == 1.074s.
*/
-static unsigned long get_timestamp(void)
+static unsigned long get_timestamp(int this_cpu)
{
- return sched_clock() >> 30; /* 2^30 ~= 10^9 */
+ return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */
}
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = get_timestamp();
+ int this_cpu = raw_smp_processor_id();
+
+ __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -70,6 +75,7 @@ void softlockup_tick(void)
int this_cpu = smp_processor_id();
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
unsigned long print_timestamp;
+ struct pt_regs *regs = get_irq_regs();
unsigned long now;
if (touch_timestamp == 0) {
@@ -80,10 +86,11 @@ void softlockup_tick(void)
print_timestamp = per_cpu(print_timestamp, this_cpu);
/* report at most once a second */
- if (print_timestamp < (touch_timestamp + 1) ||
- did_panic ||
- !per_cpu(watchdog_task, this_cpu))
+ if ((print_timestamp >= touch_timestamp &&
+ print_timestamp < (touch_timestamp + 1)) ||
+ did_panic || !per_cpu(watchdog_task, this_cpu)) {
return;
+ }
/* do not print during early bootup: */
if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -91,28 +98,33 @@ void softlockup_tick(void)
return;
}
- now = get_timestamp();
+ now = get_timestamp(this_cpu);
/* Wake up the high-prio watchdog task every second: */
if (now > (touch_timestamp + 1))
wake_up_process(per_cpu(watchdog_task, this_cpu));
/* Warn about unreasonable 10+ seconds delays: */
- if (now > (touch_timestamp + 10)) {
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ if (now <= (touch_timestamp + softlockup_thresh))
+ return;
- spin_lock(&print_lock);
- printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
- this_cpu);
+ per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+
+ spin_lock(&print_lock);
+ printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
+ this_cpu, now - touch_timestamp,
+ current->comm, current->pid);
+ if (regs)
+ show_regs(regs);
+ else
dump_stack();
- spin_unlock(&print_lock);
- }
+ spin_unlock(&print_lock);
}
/*
* The watchdog thread - runs every second and touches the timestamp.
*/
-static int watchdog(void * __bind_cpu)
+static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -150,13 +162,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(per_cpu(watchdog_task, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
- printk("watchdog for %i failed\n", hotcpu);
+ printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(touch_timestamp, hotcpu) = 0;
+ per_cpu(watchdog_task, hotcpu) = p;
kthread_bind(p, hotcpu);
- break;
+ break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
wake_up_process(per_cpu(watchdog_task, hotcpu));
@@ -176,7 +188,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
- }
+ }
return NOTIFY_OK;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 8ae2e636eb1..bc8879c822a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(cad_pid);
*/
void (*pm_power_off_prepare)(void);
-EXPORT_SYMBOL(pm_power_off_prepare);
/*
* Notifier list for kernel code which wants to be called
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index b0ec498a18d..52c7a151e29 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -4,6 +4,10 @@
#include <asm/unistd.h>
+/* we can't #include <linux/syscalls.h> here,
+ but tell gcc to not warn with -Wmissing-prototypes */
+asmlinkage long sys_ni_syscall(void);
+
/*
* Non-implemented system calls get redirected here.
*/
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ec14aa8ac51..067554bda8b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
-#include <linux/capability.h>
+#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/smp_lock.h>
@@ -55,6 +55,8 @@
#include <asm/stacktrace.h>
#endif
+static int deprecated_sysctl_warning(struct __sysctl_args *args);
+
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
@@ -63,6 +65,7 @@ extern int print_fatal_signals;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
+extern int sysctl_oom_kill_allocating_task;
extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
@@ -79,6 +82,19 @@ extern int maps_protect;
extern int sysctl_stat_interval;
extern int audit_argv_kb;
+/* Constants used for minimum and maximum */
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+static int one = 1;
+static int sixty = 60;
+#endif
+
+#ifdef CONFIG_MMU
+static int two = 2;
+#endif
+
+static int zero;
+static int one_hundred = 100;
+
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
@@ -128,32 +144,29 @@ extern int max_lock_depth;
#ifdef CONFIG_SYSCTL_SYSCALL
static int parse_table(int __user *, int, void __user *, size_t __user *,
- void __user *, size_t, ctl_table *);
+ void __user *, size_t, struct ctl_table *);
#endif
#ifdef CONFIG_PROC_SYSCTL
-static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
+static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
-static ctl_table root_table[];
+static struct ctl_table root_table[];
static struct ctl_table_header root_table_header =
{ root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) };
-static ctl_table kern_table[];
-static ctl_table vm_table[];
-static ctl_table fs_table[];
-static ctl_table debug_table[];
-static ctl_table dev_table[];
-extern ctl_table random_table[];
-#ifdef CONFIG_UNIX98_PTYS
-extern ctl_table pty_table[];
-#endif
+static struct ctl_table kern_table[];
+static struct ctl_table vm_table[];
+static struct ctl_table fs_table[];
+static struct ctl_table debug_table[];
+static struct ctl_table dev_table[];
+extern struct ctl_table random_table[];
#ifdef CONFIG_INOTIFY_USER
-extern ctl_table inotify_table[];
+extern struct ctl_table inotify_table[];
#endif
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -165,7 +178,7 @@ extern int lock_stat;
/* The default sysctl tables: */
-static ctl_table root_table[] = {
+static struct ctl_table root_table[] = {
{
.ctl_name = CTL_KERN,
.procname = "kernel",
@@ -218,7 +231,7 @@ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
#endif
-static ctl_table kern_table[] = {
+static struct ctl_table kern_table[] = {
#ifdef CONFIG_SCHED_DEBUG
{
.ctl_name = CTL_UNNUMBERED,
@@ -351,7 +364,6 @@ static ctl_table kern_table[] = {
},
#ifdef CONFIG_PROC_SYSCTL
{
- .ctl_name = KERN_TAINTED,
.procname = "tainted",
.data = &tainted,
.maxlen = sizeof(int),
@@ -359,14 +371,15 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec_taint,
},
#endif
+#ifdef CONFIG_SECURITY_CAPABILITIES
{
- .ctl_name = KERN_CAP_BSET,
.procname = "cap-bound",
.data = &cap_bset,
.maxlen = sizeof(kernel_cap_t),
.mode = 0600,
.proc_handler = &proc_dointvec_bset,
},
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -500,7 +513,6 @@ static ctl_table kern_table[] = {
#endif
#ifdef CONFIG_PROC_SYSCTL
{
- .ctl_name = KERN_CADPID,
.procname = "cad_pid",
.data = NULL,
.maxlen = sizeof (int),
@@ -522,14 +534,6 @@ static ctl_table kern_table[] = {
.mode = 0555,
.child = random_table,
},
-#ifdef CONFIG_UNIX98_PTYS
- {
- .ctl_name = KERN_PTY,
- .procname = "pty",
- .mode = 0555,
- .child = pty_table,
- },
-#endif
{
.ctl_name = KERN_OVERFLOWUID,
.procname = "overflowuid",
@@ -636,7 +640,6 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
- .ctl_name = KERN_NMI_WATCHDOG,
.procname = "nmi_watchdog",
.data = &nmi_watchdog_enabled,
.maxlen = sizeof (int),
@@ -692,7 +695,6 @@ static ctl_table kern_table[] = {
#endif
#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
{
- .ctl_name = KERN_ACPI_VIDEO_FLAGS,
.procname = "acpi_video_flags",
.data = &acpi_realmode_flags,
.maxlen = sizeof (unsigned long),
@@ -710,6 +712,19 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "softlockup_thresh",
+ .data = &softlockup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &sixty,
+ },
+#endif
#ifdef CONFIG_COMPAT
{
.ctl_name = KERN_COMPAT_LOG,
@@ -756,14 +771,7 @@ static ctl_table kern_table[] = {
{ .ctl_name = 0 }
};
-/* Constants for minimum and maximum testing in vm_table.
- We use these as one-element integer vectors. */
-static int zero;
-static int two = 2;
-static int one_hundred = 100;
-
-
-static ctl_table vm_table[] = {
+static struct ctl_table vm_table[] = {
{
.ctl_name = VM_OVERCOMMIT_MEMORY,
.procname = "overcommit_memory",
@@ -781,6 +789,14 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "oom_kill_allocating_task",
+ .data = &sysctl_oom_kill_allocating_task,
+ .maxlen = sizeof(sysctl_oom_kill_allocating_task),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
@@ -813,13 +829,12 @@ static ctl_table vm_table[] = {
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &dirty_ratio_handler,
.strategy = &sysctl_intvec,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
- .ctl_name = VM_DIRTY_WB_CS,
.procname = "dirty_writeback_centisecs",
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
@@ -827,7 +842,6 @@ static ctl_table vm_table[] = {
.proc_handler = &dirty_writeback_centisecs_handler,
},
{
- .ctl_name = VM_DIRTY_EXPIRE_CS,
.procname = "dirty_expire_centisecs",
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
@@ -855,7 +869,6 @@ static ctl_table vm_table[] = {
},
#ifdef CONFIG_HUGETLB_PAGE
{
- .ctl_name = VM_HUGETLB_PAGES,
.procname = "nr_hugepages",
.data = &max_huge_pages,
.maxlen = sizeof(unsigned long),
@@ -880,6 +893,14 @@ static ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &hugetlb_treat_movable_handler,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hugetlb_dynamic_pool",
+ .data = &hugetlb_dynamic_pool,
+ .maxlen = sizeof(hugetlb_dynamic_pool),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
{
.ctl_name = VM_LOWMEM_RESERVE_RATIO,
@@ -1057,12 +1078,12 @@ static ctl_table vm_table[] = {
};
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
-static ctl_table binfmt_misc_table[] = {
+static struct ctl_table binfmt_misc_table[] = {
{ .ctl_name = 0 }
};
#endif
-static ctl_table fs_table[] = {
+static struct ctl_table fs_table[] = {
{
.ctl_name = FS_NRINODE,
.procname = "inode-nr",
@@ -1080,7 +1101,6 @@ static ctl_table fs_table[] = {
.proc_handler = &proc_dointvec,
},
{
- .ctl_name = FS_NRFILE,
.procname = "file-nr",
.data = &files_stat,
.maxlen = 3*sizeof(int),
@@ -1156,7 +1176,6 @@ static ctl_table fs_table[] = {
.extra2 = &two,
},
{
- .ctl_name = FS_AIO_NR,
.procname = "aio-nr",
.data = &aio_nr,
.maxlen = sizeof(aio_nr),
@@ -1164,7 +1183,6 @@ static ctl_table fs_table[] = {
.proc_handler = &proc_doulongvec_minmax,
},
{
- .ctl_name = FS_AIO_MAX_NR,
.procname = "aio-max-nr",
.data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr),
@@ -1203,7 +1221,7 @@ static ctl_table fs_table[] = {
{ .ctl_name = 0 }
};
-static ctl_table debug_table[] = {
+static struct ctl_table debug_table[] = {
#if defined(CONFIG_X86) || defined(CONFIG_PPC)
{
.ctl_name = CTL_UNNUMBERED,
@@ -1217,7 +1235,7 @@ static ctl_table debug_table[] = {
{ .ctl_name = 0 }
};
-static ctl_table dev_table[] = {
+static struct ctl_table dev_table[] = {
{ .ctl_name = 0 }
};
@@ -1333,10 +1351,15 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
+ error = deprecated_sysctl_warning(&tmp);
+ if (error)
+ goto out;
+
lock_kernel();
error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp,
tmp.newval, tmp.newlen);
unlock_kernel();
+out:
return error;
}
#endif /* CONFIG_SYSCTL_SYSCALL */
@@ -1357,7 +1380,7 @@ static int test_perm(int mode, int op)
return -EACCES;
}
-int sysctl_perm(ctl_table *table, int op)
+int sysctl_perm(struct ctl_table *table, int op)
{
int error;
error = security_sysctl(table, op);
@@ -1370,7 +1393,7 @@ int sysctl_perm(ctl_table *table, int op)
static int parse_table(int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
- ctl_table *table)
+ struct ctl_table *table)
{
int n;
repeat:
@@ -1401,13 +1424,12 @@ repeat:
}
/* Perform the actual read/write of a sysctl table entry. */
-int do_sysctl_strategy (ctl_table *table,
+int do_sysctl_strategy (struct ctl_table *table,
int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
int op = 0, rc;
- size_t len;
if (oldval)
op |= 004;
@@ -1428,25 +1450,10 @@ int do_sysctl_strategy (ctl_table *table,
/* If there is no strategy routine, or if the strategy returns
* zero, proceed with automatic r/w */
if (table->data && table->maxlen) {
- if (oldval && oldlenp) {
- if (get_user(len, oldlenp))
- return -EFAULT;
- if (len) {
- if (len > table->maxlen)
- len = table->maxlen;
- if(copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if(put_user(len, oldlenp))
- return -EFAULT;
- }
- }
- if (newval && newlen) {
- len = newlen;
- if (len > table->maxlen)
- len = table->maxlen;
- if(copy_from_user(table->data, newval, len))
- return -EFAULT;
- }
+ rc = sysctl_data(table, name, nlen, oldval, oldlenp,
+ newval, newlen);
+ if (rc < 0)
+ return rc;
}
return 0;
}
@@ -1463,7 +1470,9 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
static __init int sysctl_init(void)
{
+ int err;
sysctl_set_parent(NULL, root_table);
+ err = sysctl_check_table(root_table);
return 0;
}
@@ -1476,7 +1485,7 @@ core_initcall(sysctl_init);
* Register a sysctl table hierarchy. @table should be a filled in ctl_table
* array. An entry with a ctl_name of 0 terminates the table.
*
- * The members of the &ctl_table structure are used as follows:
+ * The members of the &struct ctl_table structure are used as follows:
*
* ctl_name - This is the numeric sysctl value used by sysctl(2). The number
* must be unique within that level of sysctl
@@ -1537,7 +1546,7 @@ core_initcall(sysctl_init);
* This routine returns %NULL on a failure to register, and a pointer
* to the table header on success.
*/
-struct ctl_table_header *register_sysctl_table(ctl_table * table)
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
struct ctl_table_header *tmp;
tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL);
@@ -1548,6 +1557,10 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table)
tmp->used = 0;
tmp->unregistering = NULL;
sysctl_set_parent(NULL, table);
+ if (sysctl_check_table(tmp->ctl_table)) {
+ kfree(tmp);
+ return NULL;
+ }
spin_lock(&sysctl_lock);
list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
spin_unlock(&sysctl_lock);
@@ -1571,7 +1584,7 @@ void unregister_sysctl_table(struct ctl_table_header * header)
}
#else /* !CONFIG_SYSCTL */
-struct ctl_table_header *register_sysctl_table(ctl_table * table)
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
return NULL;
}
@@ -1664,7 +1677,7 @@ static int _proc_do_string(void* data, int maxlen, int write,
*
* Returns 0 on success.
*/
-int proc_dostring(ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return _proc_do_string(table->data, table->maxlen, write, filp,
@@ -1691,7 +1704,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
return 0;
}
-static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
+static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
int write, struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
@@ -1801,7 +1814,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
#undef TMPBUFLEN
}
-static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
+static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
@@ -1825,7 +1838,7 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec(ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -1861,11 +1874,12 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
+#ifdef CONFIG_SECURITY_CAPABILITIES
/*
* init may raise the set.
*/
-
-int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
+
+int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int op;
@@ -1878,11 +1892,12 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
do_proc_dointvec_bset_conv,&op);
}
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
/*
* Taint values can only be increased
*/
-static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
+static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int op;
@@ -1941,7 +1956,7 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
@@ -1952,7 +1967,7 @@ int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
do_proc_dointvec_minmax_conv, &param);
}
-static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
+static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
@@ -2057,7 +2072,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
#undef TMPBUFLEN
}
-static int do_proc_doulongvec_minmax(ctl_table *table, int write,
+static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
@@ -2085,7 +2100,7 @@ static int do_proc_doulongvec_minmax(ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
@@ -2109,7 +2124,7 @@ int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -2202,7 +2217,7 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2225,7 +2240,7 @@ int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2249,14 +2264,14 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
do_proc_dointvec_ms_jiffies_conv, NULL);
}
-static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
@@ -2280,55 +2295,55 @@ static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
#else /* CONFIG_PROC_FS */
-int proc_dostring(ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec(ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -2345,8 +2360,42 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
* General sysctl support routines
*/
+/* The generic sysctl data routine (used if no strategy routine supplied) */
+int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen)
+{
+ size_t len;
+
+ /* Get out of I don't have a variable */
+ if (!table->data || !table->maxlen)
+ return -ENOTDIR;
+
+ if (oldval && oldlenp) {
+ if (get_user(len, oldlenp))
+ return -EFAULT;
+ if (len) {
+ if (len > table->maxlen)
+ len = table->maxlen;
+ if (copy_to_user(oldval, table->data, len))
+ return -EFAULT;
+ if (put_user(len, oldlenp))
+ return -EFAULT;
+ }
+ }
+
+ if (newval && newlen) {
+ if (newlen > table->maxlen)
+ newlen = table->maxlen;
+
+ if (copy_from_user(table->data, newval, newlen))
+ return -EFAULT;
+ }
+ return 1;
+}
+
/* The generic string strategy routine: */
-int sysctl_string(ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2392,7 +2441,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
* are between the minimum and maximum values given in the arrays
* table->extra1 and table->extra2, respectively.
*/
-int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2428,7 +2477,7 @@ int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2462,7 +2511,7 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2502,59 +2551,50 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
{
- static int msg_count;
struct __sysctl_args tmp;
- int name[CTL_MAXNAME];
- int i;
+ int error;
- /* Read in the sysctl name for better debug message logging */
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
- if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME)
- return -ENOTDIR;
- for (i = 0; i < tmp.nlen; i++)
- if (get_user(name[i], tmp.name + i))
- return -EFAULT;
- /* Ignore accesses to kernel.version */
- if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
- goto out;
+ error = deprecated_sysctl_warning(&tmp);
- if (msg_count < 5) {
- msg_count++;
- printk(KERN_INFO
- "warning: process `%s' used the removed sysctl "
- "system call with ", current->comm);
- for (i = 0; i < tmp.nlen; i++)
- printk("%d.", name[i]);
- printk("\n");
- }
-out:
+ /* If no error reading the parameters then just -ENOSYS ... */
+ if (!error)
+ error = -ENOSYS;
+
+ return error;
+}
+
+int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen)
+{
return -ENOSYS;
}
-int sysctl_string(ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2563,6 +2603,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
#endif /* CONFIG_SYSCTL_SYSCALL */
+static int deprecated_sysctl_warning(struct __sysctl_args *args)
+{
+ static int msg_count;
+ int name[CTL_MAXNAME];
+ int i;
+
+ /* Read in the sysctl name for better debug message logging */
+ for (i = 0; i < args->nlen; i++)
+ if (get_user(name[i], args->name + i))
+ return -EFAULT;
+
+ /* Ignore accesses to kernel.version */
+ if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
+ return 0;
+
+ if (msg_count < 5) {
+ msg_count++;
+ printk(KERN_INFO
+ "warning: process `%s' used the deprecated sysctl "
+ "system call with ", current->comm);
+ for (i = 0; i < args->nlen; i++)
+ printk("%d.", name[i]);
+ printk("\n");
+ }
+ return 0;
+}
+
/*
* No sense putting this after each symbol definition, twice,
* exception granted :-)
@@ -2580,4 +2647,5 @@ EXPORT_SYMBOL(sysctl_intvec);
EXPORT_SYMBOL(sysctl_jiffies);
EXPORT_SYMBOL(sysctl_ms_jiffies);
EXPORT_SYMBOL(sysctl_string);
+EXPORT_SYMBOL(sysctl_data);
EXPORT_SYMBOL(unregister_sysctl_table);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
new file mode 100644
index 00000000000..3c9ef5a7d57
--- /dev/null
+++ b/kernel/sysctl_check.c
@@ -0,0 +1,1588 @@
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include "../arch/s390/appldata/appldata.h"
+#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
+#include <linux/sunrpc/debug.h>
+#include <linux/string.h>
+#include <net/ip_vs.h>
+
+struct trans_ctl_table {
+ int ctl_name;
+ const char *procname;
+ struct trans_ctl_table *child;
+};
+
+static struct trans_ctl_table trans_random_table[] = {
+ { RANDOM_POOLSIZE, "poolsize" },
+ { RANDOM_ENTROPY_COUNT, "entropy_avail" },
+ { RANDOM_READ_THRESH, "read_wakeup_threshold" },
+ { RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
+ { RANDOM_BOOT_ID, "boot_id" },
+ { RANDOM_UUID, "uuid" },
+ {}
+};
+
+static struct trans_ctl_table trans_pty_table[] = {
+ { PTY_MAX, "max" },
+ { PTY_NR, "nr" },
+ {}
+};
+
+static struct trans_ctl_table trans_kern_table[] = {
+ { KERN_OSTYPE, "ostype" },
+ { KERN_OSRELEASE, "osrelease" },
+ /* KERN_OSREV not used */
+ { KERN_VERSION, "version" },
+ /* KERN_SECUREMASK not used */
+ /* KERN_PROF not used */
+ { KERN_NODENAME, "hostname" },
+ { KERN_DOMAINNAME, "domainname" },
+
+#ifdef CONFIG_SECURITY_CAPABILITIES
+ { KERN_CAP_BSET, "cap-bound" },
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
+
+ { KERN_PANIC, "panic" },
+ { KERN_REALROOTDEV, "real-root-dev" },
+
+ { KERN_SPARC_REBOOT, "reboot-cmd" },
+ { KERN_CTLALTDEL, "ctrl-alt-del" },
+ { KERN_PRINTK, "printk" },
+
+ /* KERN_NAMETRANS not used */
+ /* KERN_PPC_HTABRECLAIM not used */
+ /* KERN_PPC_ZEROPAGED not used */
+ { KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
+
+ { KERN_MODPROBE, "modprobe" },
+ { KERN_SG_BIG_BUFF, "sg-big-buff" },
+ { KERN_ACCT, "acct" },
+ { KERN_PPC_L2CR, "l2cr" },
+
+ /* KERN_RTSIGNR not used */
+ /* KERN_RTSIGMAX not used */
+
+ { KERN_SHMMAX, "shmmax" },
+ { KERN_MSGMAX, "msgmax" },
+ { KERN_MSGMNB, "msgmnb" },
+ /* KERN_MSGPOOL not used*/
+ { KERN_SYSRQ, "sysrq" },
+ { KERN_MAX_THREADS, "threads-max" },
+ { KERN_RANDOM, "random", trans_random_table },
+ { KERN_SHMALL, "shmall" },
+ { KERN_MSGMNI, "msgmni" },
+ { KERN_SEM, "sem" },
+ { KERN_SPARC_STOP_A, "stop-a" },
+ { KERN_SHMMNI, "shmmni" },
+
+ { KERN_OVERFLOWUID, "overflowuid" },
+ { KERN_OVERFLOWGID, "overflowgid" },
+
+ { KERN_HOTPLUG, "hotplug", },
+ { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
+
+ { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
+ { KERN_CORE_USES_PID, "core_uses_pid" },
+ { KERN_TAINTED, "tainted" },
+ { KERN_CADPID, "cad_pid" },
+ { KERN_PIDMAX, "pid_max" },
+ { KERN_CORE_PATTERN, "core_pattern" },
+ { KERN_PANIC_ON_OOPS, "panic_on_oops" },
+ { KERN_HPPA_PWRSW, "soft-power" },
+ { KERN_HPPA_UNALIGNED, "unaligned-trap" },
+
+ { KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
+ { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
+
+ { KERN_PTY, "pty", trans_pty_table },
+ { KERN_NGROUPS_MAX, "ngroups_max" },
+ { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
+ { KERN_HZ_TIMER, "hz_timer" },
+ { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
+ { KERN_BOOTLOADER_TYPE, "bootloader_type" },
+ { KERN_RANDOMIZE, "randomize_va_space" },
+
+ { KERN_SPIN_RETRY, "spin_retry" },
+ { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" },
+ { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
+ { KERN_COMPAT_LOG, "compat-log" },
+ { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
+ { KERN_NMI_WATCHDOG, "nmi_watchdog" },
+ { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
+ {}
+};
+
+static struct trans_ctl_table trans_vm_table[] = {
+ { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
+ { VM_PAGE_CLUSTER, "page-cluster" },
+ { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
+ { VM_DIRTY_RATIO, "dirty_ratio" },
+ { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" },
+ { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" },
+ { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
+ { VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
+ /* VM_PAGEBUF unused */
+ { VM_HUGETLB_PAGES, "nr_hugepages" },
+ { VM_SWAPPINESS, "swappiness" },
+ { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
+ { VM_MIN_FREE_KBYTES, "min_free_kbytes" },
+ { VM_MAX_MAP_COUNT, "max_map_count" },
+ { VM_LAPTOP_MODE, "laptop_mode" },
+ { VM_BLOCK_DUMP, "block_dump" },
+ { VM_HUGETLB_GROUP, "hugetlb_shm_group" },
+ { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
+ { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
+ /* VM_SWAP_TOKEN_TIMEOUT unused */
+ { VM_DROP_PAGECACHE, "drop_caches" },
+ { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
+ { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
+ { VM_MIN_UNMAPPED, "min_unmapped_ratio" },
+ { VM_PANIC_ON_OOM, "panic_on_oom" },
+ { VM_VDSO_ENABLED, "vdso_enabled" },
+ { VM_MIN_SLAB, "min_slab_ratio" },
+ { VM_CMM_PAGES, "cmm_pages" },
+ { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
+ { VM_CMM_TIMEOUT, "cmm_timeout" },
+
+ {}
+};
+
+static struct trans_ctl_table trans_net_core_table[] = {
+ { NET_CORE_WMEM_MAX, "wmem_max" },
+ { NET_CORE_RMEM_MAX, "rmem_max" },
+ { NET_CORE_WMEM_DEFAULT, "wmem_default" },
+ { NET_CORE_RMEM_DEFAULT, "rmem_default" },
+ /* NET_CORE_DESTROY_DELAY unused */
+ { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
+ /* NET_CORE_FASTROUTE unused */
+ { NET_CORE_MSG_COST, "message_cost" },
+ { NET_CORE_MSG_BURST, "message_burst" },
+ { NET_CORE_OPTMEM_MAX, "optmem_max" },
+ /* NET_CORE_HOT_LIST_LENGTH unused */
+ /* NET_CORE_DIVERT_VERSION unused */
+ /* NET_CORE_NO_CONG_THRESH unused */
+ /* NET_CORE_NO_CONG unused */
+ /* NET_CORE_LO_CONG unused */
+ /* NET_CORE_MOD_CONG unused */
+ { NET_CORE_DEV_WEIGHT, "dev_weight" },
+ { NET_CORE_SOMAXCONN, "somaxconn" },
+ { NET_CORE_BUDGET, "netdev_budget" },
+ { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
+ { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
+ { NET_CORE_WARNINGS, "warnings" },
+ {},
+};
+
+static struct trans_ctl_table trans_net_unix_table[] = {
+ /* NET_UNIX_DESTROY_DELAY unused */
+ /* NET_UNIX_DELETE_DELAY unused */
+ { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_route_table[] = {
+ { NET_IPV4_ROUTE_FLUSH, "flush" },
+ { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
+ { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
+ { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
+ { NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
+ { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
+ { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
+ { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
+ { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
+ { NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
+ { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
+ { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
+ { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
+ { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
+ { NET_IPV4_CONF_FORWARDING, "forwarding" },
+ { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
+
+ { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
+ { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
+ { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
+ { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
+ { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
+ { NET_IPV4_CONF_RP_FILTER, "rp_filter" },
+ { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
+ { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
+ { NET_IPV4_CONF_TAG, "tag" },
+ { NET_IPV4_CONF_ARPFILTER, "arp_filter" },
+ { NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
+ { NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
+ { NET_IPV4_CONF_NOPOLICY, "disable_policy" },
+ { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
+
+ { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
+ { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
+ { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
+ { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
+ { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
+ { 0, NULL, trans_net_ipv4_conf_vars_table },
+ {}
+};
+
+
+static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
+ { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
+ { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
+ { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
+ { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
+ { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
+ { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
+ { NET_IPV4_VS_TO_ES, "timeout_established" },
+ { NET_IPV4_VS_TO_SS, "timeout_synsent" },
+ { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
+ { NET_IPV4_VS_TO_FW, "timeout_finwait" },
+ { NET_IPV4_VS_TO_TW, "timeout_timewait" },
+ { NET_IPV4_VS_TO_CL, "timeout_close" },
+ { NET_IPV4_VS_TO_CW, "timeout_closewait" },
+ { NET_IPV4_VS_TO_LA, "timeout_lastack" },
+ { NET_IPV4_VS_TO_LI, "timeout_listen" },
+ { NET_IPV4_VS_TO_SA, "timeout_synack" },
+ { NET_IPV4_VS_TO_UDP, "timeout_udp" },
+ { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
+ { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
+ { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
+ { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
+ { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
+ { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
+ { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
+ { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_neigh_vars_table[] = {
+ { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
+ { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
+ { NET_NEIGH_APP_SOLICIT, "app_solicit" },
+ { NET_NEIGH_RETRANS_TIME, "retrans_time" },
+ { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
+ { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
+ { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
+ { NET_NEIGH_UNRES_QLEN, "unres_qlen" },
+ { NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
+ { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" },
+ { NET_NEIGH_PROXY_DELAY, "proxy_delay" },
+ { NET_NEIGH_LOCKTIME, "locktime" },
+ { NET_NEIGH_GC_INTERVAL, "gc_interval" },
+ { NET_NEIGH_GC_THRESH1, "gc_thresh1" },
+ { NET_NEIGH_GC_THRESH2, "gc_thresh2" },
+ { NET_NEIGH_GC_THRESH3, "gc_thresh3" },
+ { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
+ { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_neigh_table[] = {
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
+ { 0, NULL, trans_net_neigh_vars_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
+ { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
+
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" },
+
+ { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" },
+ { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" },
+ { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" },
+ { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" },
+
+ { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
+ { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" },
+ { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
+ { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
+ { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
+
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" },
+
+ { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
+ { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_table[] = {
+ { NET_IPV4_FORWARD, "ip_forward" },
+ { NET_IPV4_DYNADDR, "ip_dynaddr" },
+
+ { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table },
+ { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table },
+ { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
+ /* NET_IPV4_FIB_HASH unused */
+ { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
+ { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
+
+ { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
+ { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
+ { NET_IPV4_TCP_SACK, "tcp_sack" },
+ { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
+ { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
+ /* NET_IPV4_AUTOCONFIG unused */
+ { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
+ { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
+ { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
+ { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
+ { NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
+ /* NET_IPV4_TCP_MAX_KA_PROBES unused */
+ { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
+ { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
+ { NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
+ { NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
+ { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
+ /* NET_IPV4_IP_MASQ_DEBUG unused */
+ { NET_TCP_SYNCOOKIES, "tcp_syncookies" },
+ { NET_TCP_STDURG, "tcp_stdurg" },
+ { NET_TCP_RFC1337, "tcp_rfc1337" },
+ /* NET_TCP_SYN_TAILDROP unused */
+ { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
+ { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
+ { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
+ { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
+ /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
+ /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
+ /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
+ /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
+ /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
+ { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
+ { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
+ { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
+ /* NET_IPV4_ALWAYS_DEFRAG unused */
+ { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
+ { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
+ { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
+ { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
+ { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
+ { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
+ { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
+ { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
+ { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
+ { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
+ { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
+ { NET_TCP_FACK, "tcp_fack" },
+ { NET_TCP_REORDERING, "tcp_reordering" },
+ { NET_TCP_ECN, "tcp_ecn" },
+ { NET_TCP_DSACK, "tcp_dsack" },
+ { NET_TCP_MEM, "tcp_mem" },
+ { NET_TCP_WMEM, "tcp_wmem" },
+ { NET_TCP_RMEM, "tcp_rmem" },
+ { NET_TCP_APP_WIN, "tcp_app_win" },
+ { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
+ { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
+ { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
+ { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
+ { NET_TCP_TW_REUSE, "tcp_tw_reuse" },
+ { NET_TCP_FRTO, "tcp_frto" },
+ { NET_TCP_LOW_LATENCY, "tcp_low_latency" },
+ { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
+ { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
+ { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
+ /* NET_TCP_DEFAULT_WIN_SCALE unused */
+ { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
+ { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
+ /* NET_TCP_BIC_BETA unused */
+ { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
+ { NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
+ { NET_TCP_ABC, "tcp_abc" },
+ { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" },
+ { NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
+ { NET_TCP_BASE_MSS, "tcp_base_mss" },
+ { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
+ { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
+ { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
+ { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
+ { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
+ { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
+ { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
+ { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" },
+ { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
+ { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
+ { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
+ { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipx_table[] = {
+ { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
+ /* NET_IPX_FORWARDING unused */
+ {}
+};
+
+static struct trans_ctl_table trans_net_atalk_table[] = {
+ { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
+ { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
+ { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
+ { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
+ {},
+};
+
+static struct trans_ctl_table trans_net_netrom_table[] = {
+ { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
+ { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
+ { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
+ { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
+ { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
+ { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
+ { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
+ { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
+ { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
+ { NET_NETROM_ROUTING_CONTROL, "routing_control" },
+ { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
+ { NET_NETROM_RESET, "reset" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ax25_table[] = {
+ { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
+ { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
+ { NET_AX25_BACKOFF_TYPE, "backoff_type" },
+ { NET_AX25_CONNECT_MODE, "connect_mode" },
+ { NET_AX25_STANDARD_WINDOW, "standard_window_size" },
+ { NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
+ { NET_AX25_T1_TIMEOUT, "t1_timeout" },
+ { NET_AX25_T2_TIMEOUT, "t2_timeout" },
+ { NET_AX25_T3_TIMEOUT, "t3_timeout" },
+ { NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
+ { NET_AX25_N2, "maximum_retry_count" },
+ { NET_AX25_PACLEN, "maximum_packet_length" },
+ { NET_AX25_PROTOCOL, "protocol" },
+ { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_bridge_table[] = {
+ { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
+ { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
+ { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
+ { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" },
+ { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_rose_table[] = {
+ { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
+ { NET_ROSE_ROUTING_CONTROL, "routing_control" },
+ { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
+ { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
+ { NET_ROSE_WINDOW_SIZE, "window_size" },
+ { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
+ { NET_IPV6_FORWARDING, "forwarding" },
+ { NET_IPV6_HOP_LIMIT, "hop_limit" },
+ { NET_IPV6_MTU, "mtu" },
+ { NET_IPV6_ACCEPT_RA, "accept_ra" },
+ { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
+ { NET_IPV6_AUTOCONF, "autoconf" },
+ { NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
+ { NET_IPV6_RTR_SOLICITS, "router_solicitations" },
+ { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
+ { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
+ { NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
+ { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
+ { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
+ { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
+ { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
+ { NET_IPV6_MAX_ADDRESSES, "max_addresses" },
+ { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
+ { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
+ { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
+ { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
+ { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
+ { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
+ { NET_IPV6_PROXY_NDP, "proxy_ndp" },
+ { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_conf_table[] = {
+ { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
+ { 0, NULL, trans_net_ipv6_conf_var_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_route_table[] = {
+ { NET_IPV6_ROUTE_FLUSH, "flush" },
+ { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
+ { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
+ { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
+ { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
+ { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_table[] = {
+ { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
+ { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
+ { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
+ { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table },
+ { NET_IPV6_BINDV6ONLY, "bindv6only" },
+ { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
+ { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
+ { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
+ { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
+ { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
+ { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_x25_table[] = {
+ { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
+ { NET_X25_FORWARD, "x25_forward" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_tr_table[] = {
+ { NET_TR_RIF_TIMEOUT, "rif_timeout" },
+ {}
+};
+
+
+static struct trans_ctl_table trans_net_decnet_conf_vars[] = {
+ { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
+ { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
+ { NET_DECNET_CONF_DEV_T2, "t2" },
+ { NET_DECNET_CONF_DEV_T3, "t3" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_decnet_conf[] = {
+ { 0, NULL, trans_net_decnet_conf_vars },
+ {}
+};
+
+static struct trans_ctl_table trans_net_decnet_table[] = {
+ { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
+ { NET_DECNET_NODE_ADDRESS, "node_address" },
+ { NET_DECNET_NODE_NAME, "node_name" },
+ { NET_DECNET_DEFAULT_DEVICE, "default_device" },
+ { NET_DECNET_TIME_WAIT, "time_wait" },
+ { NET_DECNET_DN_COUNT, "dn_count" },
+ { NET_DECNET_DI_COUNT, "di_count" },
+ { NET_DECNET_DR_COUNT, "dr_count" },
+ { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
+ { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
+ { NET_DECNET_MEM, "decnet_mem" },
+ { NET_DECNET_RMEM, "decnet_rmem" },
+ { NET_DECNET_WMEM, "decnet_wmem" },
+ { NET_DECNET_DEBUG_LEVEL, "debug" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_sctp_table[] = {
+ { NET_SCTP_RTO_INITIAL, "rto_initial" },
+ { NET_SCTP_RTO_MIN, "rto_min" },
+ { NET_SCTP_RTO_MAX, "rto_max" },
+ { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
+ { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
+ { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
+ { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
+ { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
+ { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
+ { NET_SCTP_HB_INTERVAL, "hb_interval" },
+ { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
+ { NET_SCTP_MAX_BURST, "max_burst" },
+ { NET_SCTP_ADDIP_ENABLE, "addip_enable" },
+ { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
+ { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
+ { NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
+ { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
+ { NET_LLC2_ACK_TIMEOUT, "ack" },
+ { NET_LLC2_P_TIMEOUT, "p" },
+ { NET_LLC2_REJ_TIMEOUT, "rej" },
+ { NET_LLC2_BUSY_TIMEOUT, "busy" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_station_table[] = {
+ { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_llc2_table[] = {
+ { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_table[] = {
+ { NET_LLC2, "llc2", trans_net_llc_llc2_table },
+ { NET_LLC_STATION, "station", trans_net_llc_station_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_netfilter_table[] = {
+ { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" },
+ { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" },
+ { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" },
+ { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" },
+ { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" },
+ { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
+ { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" },
+ { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
+ { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
+ { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" },
+ { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
+ { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" },
+ { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" },
+ { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
+ { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
+ { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
+
+ {}
+};
+
+static struct trans_ctl_table trans_net_dccp_table[] = {
+ { NET_DCCP_DEFAULT, "default" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_irda_table[] = {
+ { NET_IRDA_DISCOVERY, "discovery" },
+ { NET_IRDA_DEVNAME, "devname" },
+ { NET_IRDA_DEBUG, "debug" },
+ { NET_IRDA_FAST_POLL, "fast_poll_increase" },
+ { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
+ { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
+ { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
+ { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
+ { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
+ { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
+ { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
+ { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
+ { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
+ { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_table[] = {
+ { NET_CORE, "core", trans_net_core_table },
+ /* NET_ETHER not used */
+ /* NET_802 not used */
+ { NET_UNIX, "unix", trans_net_unix_table },
+ { NET_IPV4, "ipv4", trans_net_ipv4_table },
+ { NET_IPX, "ipx", trans_net_ipx_table },
+ { NET_ATALK, "atalk", trans_net_atalk_table },
+ { NET_NETROM, "netrom", trans_net_netrom_table },
+ { NET_AX25, "ax25", trans_net_ax25_table },
+ { NET_BRIDGE, "bridge", trans_net_bridge_table },
+ { NET_ROSE, "rose", trans_net_rose_table },
+ { NET_IPV6, "ipv6", trans_net_ipv6_table },
+ { NET_X25, "x25", trans_net_x25_table },
+ { NET_TR, "tr", trans_net_tr_table },
+ { NET_DECNET, "decnet", trans_net_decnet_table },
+ /* NET_ECONET not used */
+ { NET_SCTP, "sctp", trans_net_sctp_table },
+ { NET_LLC, "llc", trans_net_llc_table },
+ { NET_NETFILTER, "netfilter", trans_net_netfilter_table },
+ { NET_DCCP, "dccp", trans_net_dccp_table },
+ { NET_IRDA, "irda", trans_net_irda_table },
+ { 2089, "nf_conntrack_max" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_quota_table[] = {
+ { FS_DQ_LOOKUPS, "lookups" },
+ { FS_DQ_DROPS, "drops" },
+ { FS_DQ_READS, "reads" },
+ { FS_DQ_WRITES, "writes" },
+ { FS_DQ_CACHE_HITS, "cache_hits" },
+ { FS_DQ_ALLOCATED, "allocated_dquots" },
+ { FS_DQ_FREE, "free_dquots" },
+ { FS_DQ_SYNCS, "syncs" },
+ { FS_DQ_WARNINGS, "warnings" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_xfs_table[] = {
+ { XFS_RESTRICT_CHOWN, "restrict_chown" },
+ { XFS_SGID_INHERIT, "irix_sgid_inherit" },
+ { XFS_SYMLINK_MODE, "irix_symlink_mode" },
+ { XFS_PANIC_MASK, "panic_mask" },
+
+ { XFS_ERRLEVEL, "error_level" },
+ { XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
+ { XFS_INHERIT_SYNC, "inherit_sync" },
+ { XFS_INHERIT_NODUMP, "inherit_nodump" },
+ { XFS_INHERIT_NOATIME, "inherit_noatime" },
+ { XFS_BUF_TIMER, "xfsbufd_centisecs" },
+ { XFS_BUF_AGE, "age_buffer_centisecs" },
+ { XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
+ { XFS_ROTORSTEP, "rotorstep" },
+ { XFS_INHERIT_NODFRG, "inherit_nodefrag" },
+ { XFS_FILESTREAM_TIMER, "filestream_centisecs" },
+ { XFS_STATS_CLEAR, "stats_clear" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
+ { 1, "hb_ctl_path" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_ocfs2_table[] = {
+ { 1, "nm", trans_fs_ocfs2_nm_table },
+ {}
+};
+
+static struct trans_ctl_table trans_inotify_table[] = {
+ { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
+ { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
+ { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_table[] = {
+ { FS_NRINODE, "inode-nr" },
+ { FS_STATINODE, "inode-state" },
+ /* FS_MAXINODE unused */
+ /* FS_NRDQUOT unused */
+ /* FS_MAXDQUOT unused */
+ { FS_NRFILE, "file-nr" },
+ { FS_MAXFILE, "file-max" },
+ { FS_DENTRY, "dentry-state" },
+ /* FS_NRSUPER unused */
+ /* FS_MAXUPSER unused */
+ { FS_OVERFLOWUID, "overflowuid" },
+ { FS_OVERFLOWGID, "overflowgid" },
+ { FS_LEASES, "leases-enable" },
+ { FS_DIR_NOTIFY, "dir-notify-enable" },
+ { FS_LEASE_TIME, "lease-break-time" },
+ { FS_DQSTATS, "quota", trans_fs_quota_table },
+ { FS_XFS, "xfs", trans_fs_xfs_table },
+ { FS_AIO_NR, "aio-nr" },
+ { FS_AIO_MAX_NR, "aio-max-nr" },
+ { FS_INOTIFY, "inotify", trans_inotify_table },
+ { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table },
+ { KERN_SETUID_DUMPABLE, "suid_dumpable" },
+ {}
+};
+
+static struct trans_ctl_table trans_debug_table[] = {
+ {}
+};
+
+static struct trans_ctl_table trans_cdrom_table[] = {
+ { DEV_CDROM_INFO, "info" },
+ { DEV_CDROM_AUTOCLOSE, "autoclose" },
+ { DEV_CDROM_AUTOEJECT, "autoeject" },
+ { DEV_CDROM_DEBUG, "debug" },
+ { DEV_CDROM_LOCK, "lock" },
+ { DEV_CDROM_CHECK_MEDIA, "check_media" },
+ {}
+};
+
+static struct trans_ctl_table trans_ipmi_table[] = {
+ { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
+ {}
+};
+
+static struct trans_ctl_table trans_mac_hid_files[] = {
+ /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
+ /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
+ { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
+ { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
+ { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
+ /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
+ {}
+};
+
+static struct trans_ctl_table trans_raid_table[] = {
+ { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
+ { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
+ {}
+};
+
+static struct trans_ctl_table trans_scsi_table[] = {
+ { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_default_table[] = {
+ { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
+ { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_device_table[] = {
+ { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_devices_table[] = {
+ { DEV_PARPORT_DEVICES_ACTIVE, "active" },
+ { 0, NULL, trans_parport_device_table },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_parport_table[] = {
+ { DEV_PARPORT_SPINTIME, "spintime" },
+ { DEV_PARPORT_BASE_ADDR, "base-addr" },
+ { DEV_PARPORT_IRQ, "irq" },
+ { DEV_PARPORT_DMA, "dma" },
+ { DEV_PARPORT_MODES, "modes" },
+ { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table },
+ { DEV_PARPORT_AUTOPROBE, "autoprobe" },
+ { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" },
+ { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" },
+ { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" },
+ { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
+ {}
+};
+static struct trans_ctl_table trans_parport_table[] = {
+ { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
+ { 0, NULL, trans_parport_parport_table },
+ {}
+};
+
+static struct trans_ctl_table trans_dev_table[] = {
+ { DEV_CDROM, "cdrom", trans_cdrom_table },
+ /* DEV_HWMON unused */
+ { DEV_PARPORT, "parport", trans_parport_table },
+ { DEV_RAID, "raid", trans_raid_table },
+ { DEV_MAC_HID, "mac_hid", trans_mac_hid_files },
+ { DEV_SCSI, "scsi", trans_scsi_table },
+ { DEV_IPMI, "ipmi", trans_ipmi_table },
+ {}
+};
+
+static struct trans_ctl_table trans_bus_isa_table[] = {
+ { BUS_ISA_MEM_BASE, "membase" },
+ { BUS_ISA_PORT_BASE, "portbase" },
+ { BUS_ISA_PORT_SHIFT, "portshift" },
+ {}
+};
+
+static struct trans_ctl_table trans_bus_table[] = {
+ { CTL_BUS_ISA, "isa", trans_bus_isa_table },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table0[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan0-txRing" },
+ { 151, "arlan0-rxRing" },
+ { 152, "arlan0-18" },
+ { 153, "arlan0-ring" },
+ { 154, "arlan0-shm-cpy" },
+ { 155, "config0" },
+ { 156, "reset0" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table1[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan1-txRing" },
+ { 151, "arlan1-rxRing" },
+ { 152, "arlan1-18" },
+ { 153, "arlan1-ring" },
+ { 154, "arlan1-shm-cpy" },
+ { 155, "config1" },
+ { 156, "reset1" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table2[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan2-txRing" },
+ { 151, "arlan2-rxRing" },
+ { 152, "arlan2-18" },
+ { 153, "arlan2-ring" },
+ { 154, "arlan2-shm-cpy" },
+ { 155, "config2" },
+ { 156, "reset2" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table3[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan3-txRing" },
+ { 151, "arlan3-rxRing" },
+ { 152, "arlan3-18" },
+ { 153, "arlan3-ring" },
+ { 154, "arlan3-shm-cpy" },
+ { 155, "config3" },
+ { 156, "reset3" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_table[] = {
+ { 1, "arlan0", trans_arlan_conf_table0 },
+ { 2, "arlan1", trans_arlan_conf_table1 },
+ { 3, "arlan2", trans_arlan_conf_table2 },
+ { 4, "arlan3", trans_arlan_conf_table3 },
+ {}
+};
+
+static struct trans_ctl_table trans_appldata_table[] = {
+ { CTL_APPLDATA_TIMER, "timer" },
+ { CTL_APPLDATA_INTERVAL, "interval" },
+ { CTL_APPLDATA_OS, "os" },
+ { CTL_APPLDATA_NET_SUM, "net_sum" },
+ { CTL_APPLDATA_MEM, "mem" },
+ {}
+
+};
+
+static struct trans_ctl_table trans_s390dbf_table[] = {
+ { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
+ { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
+ {}
+};
+
+static struct trans_ctl_table trans_sunrpc_table[] = {
+ { CTL_RPCDEBUG, "rpc_debug" },
+ { CTL_NFSDEBUG, "nfs_debug" },
+ { CTL_NFSDDEBUG, "nfsd_debug" },
+ { CTL_NLMDEBUG, "nlm_debug" },
+ { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
+ { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
+ { CTL_MIN_RESVPORT, "min_resvport" },
+ { CTL_MAX_RESVPORT, "max_resvport" },
+ {}
+};
+
+static struct trans_ctl_table trans_pm_table[] = {
+ { 1 /* CTL_PM_SUSPEND */, "suspend" },
+ { 2 /* CTL_PM_CMODE */, "cmode" },
+ { 3 /* CTL_PM_P0 */, "p0" },
+ { 4 /* CTL_PM_CM */, "cm" },
+ {}
+};
+
+static struct trans_ctl_table trans_frv_table[] = {
+ { 1, "cache-mode" },
+ { 2, "pin-cxnr" },
+ {}
+};
+
+static struct trans_ctl_table trans_root_table[] = {
+ { CTL_KERN, "kernel", trans_kern_table },
+ { CTL_VM, "vm", trans_vm_table },
+ { CTL_NET, "net", trans_net_table },
+ /* CTL_PROC not used */
+ { CTL_FS, "fs", trans_fs_table },
+ { CTL_DEBUG, "debug", trans_debug_table },
+ { CTL_DEV, "dev", trans_dev_table },
+ { CTL_BUS, "bus", trans_bus_table },
+ { CTL_ABI, "abi" },
+ /* CTL_CPU not used */
+ { CTL_ARLAN, "arlan", trans_arlan_table },
+ { CTL_APPLDATA, "appldata", trans_appldata_table },
+ { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
+ { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
+ { CTL_PM, "pm", trans_pm_table },
+ { CTL_FRV, "frv", trans_frv_table },
+ {}
+};
+
+
+
+
+static int sysctl_depth(struct ctl_table *table)
+{
+ struct ctl_table *tmp;
+ int depth;
+
+ depth = 0;
+ for (tmp = table; tmp->parent; tmp = tmp->parent)
+ depth++;
+
+ return depth;
+}
+
+static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
+{
+ int i;
+
+ for (i = 0; table && i < n; i++)
+ table = table->parent;
+
+ return table;
+}
+
+static struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
+{
+ struct ctl_table *test;
+ struct trans_ctl_table *ref;
+ int depth, cur_depth;
+
+ depth = sysctl_depth(table);
+
+ cur_depth = depth;
+ ref = trans_root_table;
+repeat:
+ test = sysctl_parent(table, cur_depth);
+ for (; ref->ctl_name || ref->procname || ref->child; ref++) {
+ int match = 0;
+
+ if (cur_depth && !ref->child)
+ continue;
+
+ if (test->procname && ref->procname &&
+ (strcmp(test->procname, ref->procname) == 0))
+ match++;
+
+ if (test->ctl_name && ref->ctl_name &&
+ (test->ctl_name == ref->ctl_name))
+ match++;
+
+ if (!ref->ctl_name && !ref->procname)
+ match++;
+
+ if (match) {
+ if (cur_depth != 0) {
+ cur_depth--;
+ ref = ref->child;
+ goto repeat;
+ }
+ goto out;
+ }
+ }
+ ref = NULL;
+out:
+ return ref;
+}
+
+static void sysctl_print_path(struct ctl_table *table)
+{
+ struct ctl_table *tmp;
+ int depth, i;
+ depth = sysctl_depth(table);
+ if (table->procname) {
+ for (i = depth; i >= 0; i--) {
+ tmp = sysctl_parent(table, i);
+ printk("/%s", tmp->procname?tmp->procname:"");
+ }
+ }
+ printk(" ");
+ if (table->ctl_name) {
+ for (i = depth; i >= 0; i--) {
+ tmp = sysctl_parent(table, i);
+ printk(".%d", tmp->ctl_name);
+ }
+ }
+}
+
+static void sysctl_repair_table(struct ctl_table *table)
+{
+ /* Don't complain about the classic default
+ * sysctl strategy routine. Maybe later we
+ * can get the tables fixed and complain about
+ * this.
+ */
+ if (table->ctl_name && table->procname &&
+ (table->proc_handler == proc_dointvec) &&
+ (!table->strategy)) {
+ table->strategy = sysctl_data;
+ }
+}
+
+static struct ctl_table *sysctl_check_lookup(struct ctl_table *table)
+{
+ struct ctl_table_header *head;
+ struct ctl_table *ref, *test;
+ int depth, cur_depth;
+
+ depth = sysctl_depth(table);
+
+ for (head = sysctl_head_next(NULL); head;
+ head = sysctl_head_next(head)) {
+ cur_depth = depth;
+ ref = head->ctl_table;
+repeat:
+ test = sysctl_parent(table, cur_depth);
+ for (; ref->ctl_name || ref->procname; ref++) {
+ int match = 0;
+ if (cur_depth && !ref->child)
+ continue;
+
+ if (test->procname && ref->procname &&
+ (strcmp(test->procname, ref->procname) == 0))
+ match++;
+
+ if (test->ctl_name && ref->ctl_name &&
+ (test->ctl_name == ref->ctl_name))
+ match++;
+
+ if (match) {
+ if (cur_depth != 0) {
+ cur_depth--;
+ ref = ref->child;
+ goto repeat;
+ }
+ goto out;
+ }
+ }
+ }
+ ref = NULL;
+out:
+ sysctl_head_finish(head);
+ return ref;
+}
+
+static void set_fail(const char **fail, struct ctl_table *table, const char *str)
+{
+ if (*fail) {
+ printk(KERN_ERR "sysctl table check failed: ");
+ sysctl_print_path(table);
+ printk(" %s\n", *fail);
+ }
+ *fail = str;
+}
+
+static int sysctl_check_dir(struct ctl_table *table)
+{
+ struct ctl_table *ref;
+ int error;
+
+ error = 0;
+ ref = sysctl_check_lookup(table);
+ if (ref) {
+ int match = 0;
+ if ((!table->procname && !ref->procname) ||
+ (table->procname && ref->procname &&
+ (strcmp(table->procname, ref->procname) == 0)))
+ match++;
+
+ if ((!table->ctl_name && !ref->ctl_name) ||
+ (table->ctl_name && ref->ctl_name &&
+ (table->ctl_name == ref->ctl_name)))
+ match++;
+
+ if (match != 2) {
+ printk(KERN_ERR "%s: failed: ", __func__);
+ sysctl_print_path(table);
+ printk(" ref: ");
+ sysctl_print_path(ref);
+ printk("\n");
+ error = -EINVAL;
+ }
+ }
+ return error;
+}
+
+static void sysctl_check_leaf(struct ctl_table *table, const char **fail)
+{
+ struct ctl_table *ref;
+
+ ref = sysctl_check_lookup(table);
+ if (ref && (ref != table))
+ set_fail(fail, table, "Sysctl already exists");
+}
+
+static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
+{
+ struct trans_ctl_table *ref;
+
+ ref = sysctl_binary_lookup(table);
+ if (table->ctl_name && !ref)
+ set_fail(fail, table, "Unknown sysctl binary path");
+ if (ref) {
+ if (ref->procname &&
+ (!table->procname ||
+ (strcmp(table->procname, ref->procname) != 0)))
+ set_fail(fail, table, "procname does not match binary path procname");
+
+ if (ref->ctl_name && table->ctl_name &&
+ (table->ctl_name != ref->ctl_name))
+ set_fail(fail, table, "ctl_name does not match binary path ctl_name");
+ }
+}
+
+int sysctl_check_table(struct ctl_table *table)
+{
+ int error = 0;
+ for (; table->ctl_name || table->procname; table++) {
+ const char *fail = NULL;
+
+ sysctl_repair_table(table);
+ if (table->parent) {
+ if (table->procname && !table->parent->procname)
+ set_fail(&fail, table, "Parent without procname");
+ if (table->ctl_name && !table->parent->ctl_name)
+ set_fail(&fail, table, "Parent without ctl_name");
+ }
+ if (!table->procname)
+ set_fail(&fail, table, "No procname");
+ if (table->child) {
+ if (table->data)
+ set_fail(&fail, table, "Directory with data?");
+ if (table->maxlen)
+ set_fail(&fail, table, "Directory with maxlen?");
+ if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode)
+ set_fail(&fail, table, "Writable sysctl directory");
+ if (table->proc_handler)
+ set_fail(&fail, table, "Directory with proc_handler");
+ if (table->strategy)
+ set_fail(&fail, table, "Directory with strategy");
+ if (table->extra1)
+ set_fail(&fail, table, "Directory with extra1");
+ if (table->extra2)
+ set_fail(&fail, table, "Directory with extra2");
+ if (sysctl_check_dir(table))
+ set_fail(&fail, table, "Inconsistent directory names");
+ } else {
+ if ((table->strategy == sysctl_data) ||
+ (table->strategy == sysctl_string) ||
+ (table->strategy == sysctl_intvec) ||
+ (table->strategy == sysctl_jiffies) ||
+ (table->strategy == sysctl_ms_jiffies) ||
+ (table->proc_handler == proc_dostring) ||
+ (table->proc_handler == proc_dointvec) ||
+#ifdef CONFIG_SECURITY_CAPABILITIES
+ (table->proc_handler == proc_dointvec_bset) ||
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
+ (table->proc_handler == proc_dointvec_minmax) ||
+ (table->proc_handler == proc_dointvec_jiffies) ||
+ (table->proc_handler == proc_dointvec_userhz_jiffies) ||
+ (table->proc_handler == proc_dointvec_ms_jiffies) ||
+ (table->proc_handler == proc_doulongvec_minmax) ||
+ (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
+ if (!table->data)
+ set_fail(&fail, table, "No data");
+ if (!table->maxlen)
+ set_fail(&fail, table, "No maxlen");
+ }
+ if ((table->proc_handler == proc_doulongvec_minmax) ||
+ (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
+ if (table->maxlen > sizeof (unsigned long)) {
+ if (!table->extra1)
+ set_fail(&fail, table, "No min");
+ if (!table->extra2)
+ set_fail(&fail, table, "No max");
+ }
+ }
+#ifdef CONFIG_SYSCTL_SYSCALL
+ if (table->ctl_name && !table->strategy)
+ set_fail(&fail, table, "Missing strategy");
+#endif
+#if 0
+ if (!table->ctl_name && table->strategy)
+ set_fail(&fail, table, "Strategy without ctl_name");
+#endif
+#ifdef CONFIG_PROC_FS
+ if (table->procname && !table->proc_handler)
+ set_fail(&fail, table, "No proc_handler");
+#endif
+#if 0
+ if (!table->procname && table->proc_handler)
+ set_fail(&fail, table, "proc_handler without procname");
+#endif
+ sysctl_check_leaf(table, &fail);
+ }
+ sysctl_check_bin_path(table, &fail);
+ if (fail) {
+ set_fail(&fail, table, NULL);
+ error = -EINVAL;
+ }
+ if (table->child)
+ error |= sysctl_check_table(table->child);
+ }
+ return error;
+}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 059431ed67d..7d4d7f9c1bb 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -20,7 +20,6 @@
#include <linux/taskstats_kern.h>
#include <linux/tsacct_kern.h>
#include <linux/delayacct.h>
-#include <linux/tsacct_kern.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <net/genetlink.h>
diff --git a/kernel/time.c b/kernel/time.c
index 2289a8d6831..09d3c45c4da 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -9,9 +9,9 @@
*/
/*
* Modification history kernel/time.c
- *
+ *
* 1993-09-02 Philip Gladstone
- * Created file with time related functions from sched.c and adjtimex()
+ * Created file with time related functions from sched.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
@@ -30,16 +30,16 @@
#include <linux/module.h>
#include <linux/timex.h>
#include <linux/capability.h>
+#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/fs.h>
-#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-/*
+/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
*/
@@ -57,11 +57,7 @@ EXPORT_SYMBOL(sys_tz);
*/
asmlinkage long sys_time(time_t __user * tloc)
{
- time_t i;
- struct timespec tv;
-
- getnstimeofday(&tv);
- i = tv.tv_sec;
+ time_t i = get_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -76,7 +72,7 @@ asmlinkage long sys_time(time_t __user * tloc)
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
-
+
asmlinkage long sys_stime(time_t __user *tptr)
{
struct timespec tv;
@@ -115,10 +111,10 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time.
- *
+ *
* This is ugly, but preferable to the alternatives. Otherwise we
* would either need to write a program to do it in /etc/rc (and risk
- * confusion if the program gets run more than once; it would also be
+ * confusion if the program gets run more than once; it would also be
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
@@ -163,6 +159,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
if (tz) {
/* SMP safe, global irq locking makes it work. */
sys_tz = *tz;
+ update_vsyscall_tz();
if (firsttime) {
firsttime = 0;
if (!tv)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index fc3fc79b3d5..8cfb8b2ce77 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -222,20 +222,8 @@ static void tick_do_broadcast_on_off(void *why)
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
goto out;
- /*
- * Defect device ?
- */
- if (!tick_device_is_functional(dev)) {
- /*
- * AMD C1E wreckage fixup:
- *
- * Device was registered functional in the first
- * place. Now the secondary CPU detected the C1E
- * misfeature and notifies us to fix it up
- */
- if (*reason != CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
- goto out;
- }
+ if (!tick_device_is_functional(dev))
+ goto out;
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
@@ -246,6 +234,8 @@ static void tick_do_broadcast_on_off(void *why)
clockevents_set_mode(dev,
CLOCK_EVT_MODE_SHUTDOWN);
}
+ if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
+ dev->features |= CLOCK_EVT_FEAT_DUMMY;
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (cpu_isset(cpu, tick_broadcast_mask)) {
@@ -274,21 +264,12 @@ out:
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
- int cpu = get_cpu();
-
- if (!cpu_isset(*oncpu, cpu_online_map)) {
+ if (!cpu_isset(*oncpu, cpu_online_map))
printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
- } else {
-
- if (cpu == *oncpu)
- tick_do_broadcast_on_off(&reason);
- else
- smp_call_function_single(*oncpu,
- tick_do_broadcast_on_off,
- &reason, 1, 1);
- }
- put_cpu();
+ else
+ smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
+ &reason, 1, 1);
}
/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 8c3fef1db09..ce89ffb474d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -570,7 +570,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
ts->sched_timer.expires = tick_init_jiffy_update();
offset = ktime_to_ns(tick_period) >> 1;
- do_div(offset, NR_CPUS);
+ do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4ad79f6bdec..e5e466b2759 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -24,9 +24,7 @@
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
-__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
-EXPORT_SYMBOL(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
/*
@@ -47,21 +45,13 @@ EXPORT_SYMBOL(xtime_lock);
struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
-EXPORT_SYMBOL(xtime);
-
-#ifdef CONFIG_NO_HZ
static struct timespec xtime_cache __attribute__ ((aligned (16)));
static inline void update_xtime_cache(u64 nsec)
{
xtime_cache = xtime;
timespec_add_ns(&xtime_cache, nsec);
}
-#else
-#define xtime_cache xtime
-/* We do *not* want to evaluate the argument for this case */
-#define update_xtime_cache(n) do { } while (0)
-#endif
static struct clocksource *clock; /* pointer to current clocksource */
diff --git a/kernel/timer.c b/kernel/timer.c
index 6ce1952eea7..8521d10fbb2 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -817,7 +817,7 @@ unsigned long next_timer_interrupt(void)
#endif
/*
- * Called from the timer interrupt handler to charge one tick to the current
+ * Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
*/
void update_process_times(int user_tick)
@@ -826,10 +826,13 @@ void update_process_times(int user_tick)
int cpu = smp_processor_id();
/* Note: this timer irq context must be accounted for as well. */
- if (user_tick)
+ if (user_tick) {
account_user_time(p, jiffies_to_cputime(1));
- else
+ account_user_time_scaled(p, jiffies_to_cputime(1));
+ } else {
account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
+ account_system_time_scaled(p, jiffies_to_cputime(1));
+ }
run_local_timers();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index c122131a122..4ab1b584961 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
+ stats->ac_utimescaled =
+ cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
+ stats->ac_stimescaled =
+ cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
diff --git a/kernel/user.c b/kernel/user.c
index f0e561e6d08..e91331c457e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -44,7 +44,6 @@ struct user_struct root_user = {
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
- .mq_bytes = 0,
.locked_shm = 0,
#ifdef CONFIG_KEYS
.uid_keyring = &root_user_keyring,
@@ -58,19 +57,17 @@ struct user_struct root_user = {
/*
* These routines must be called with the uidhash spinlock held!
*/
-static inline void uid_hash_insert(struct user_struct *up,
- struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
hlist_add_head(&up->uidhash_node, hashent);
}
-static inline void uid_hash_remove(struct user_struct *up)
+static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
}
-static inline struct user_struct *uid_hash_find(uid_t uid,
- struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
@@ -87,9 +84,6 @@ static inline struct user_struct *uid_hash_find(uid_t uid,
#ifdef CONFIG_FAIR_USER_SCHED
-static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
-static DEFINE_MUTEX(uids_mutex);
-
static void sched_destroy_user(struct user_struct *up)
{
sched_destroy_group(up->tg);
@@ -111,6 +105,19 @@ static void sched_switch_user(struct task_struct *p)
sched_move_task(p);
}
+#else /* CONFIG_FAIR_USER_SCHED */
+
+static void sched_destroy_user(struct user_struct *up) { }
+static int sched_create_user(struct user_struct *up) { return 0; }
+static void sched_switch_user(struct task_struct *p) { }
+
+#endif /* CONFIG_FAIR_USER_SCHED */
+
+#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
+
+static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
+static DEFINE_MUTEX(uids_mutex);
+
static inline void uids_mutex_lock(void)
{
mutex_lock(&uids_mutex);
@@ -257,11 +264,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
schedule_work(&up->work);
}
-#else /* CONFIG_FAIR_USER_SCHED */
+#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
-static void sched_destroy_user(struct user_struct *up) { }
-static int sched_create_user(struct user_struct *up) { return 0; }
-static void sched_switch_user(struct task_struct *p) { }
static inline int user_kobject_create(struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }
static inline void uids_mutex_unlock(void) { }
@@ -280,7 +284,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
kmem_cache_free(uid_cachep, up);
}
-#endif /* CONFIG_FAIR_USER_SCHED */
+#endif
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
@@ -343,8 +347,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
atomic_set(&new->inotify_watches, 0);
atomic_set(&new->inotify_devs, 0);
#endif
-
+#ifdef CONFIG_POSIX_MQUEUE
new->mq_bytes = 0;
+#endif
new->locked_shm = 0;
if (alloc_uid_keyring(new, current) < 0) {