summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c16
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/futex.c26
-rw-r--r--kernel/futex_compat.c28
-rw-r--r--kernel/hrtimer.c24
-rw-r--r--kernel/nsproxy.c15
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/signal.c22
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c3
-rw-r--r--kernel/time/tick-broadcast.c15
-rw-r--r--kernel/time/tick-common.c4
-rw-r--r--kernel/time/timer_stats.c5
18 files changed, 121 insertions, 66 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index eb0f9165b40..2924251a654 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -847,18 +847,10 @@ static void audit_receive_skb(struct sk_buff *skb)
}
/* Receive messages from netlink socket. */
-static void audit_receive(struct sock *sk, int length)
+static void audit_receive(struct sk_buff *skb)
{
- struct sk_buff *skb;
- unsigned int qlen;
-
mutex_lock(&audit_cmd_mutex);
-
- for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
- skb = skb_dequeue(&sk->sk_receive_queue);
- audit_receive_skb(skb);
- kfree_skb(skb);
- }
+ audit_receive_skb(skb);
mutex_unlock(&audit_cmd_mutex);
}
@@ -876,8 +868,8 @@ static int __init audit_init(void)
printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
audit_default ? "enabled" : "disabled");
- audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive,
- NULL, THIS_MODULE);
+ audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
+ audit_receive, NULL, THIS_MODULE);
if (!audit_sock)
audit_panic("cannot initialize netlink socket");
else
diff --git a/kernel/fork.c b/kernel/fork.c
index 33f12f48684..5e67f90a169 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1608,7 +1608,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
- CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER))
+ CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
+ CLONE_NEWNET))
goto bad_unshare_out;
if ((err = unshare_thread(unshare_flags)))
diff --git a/kernel/futex.c b/kernel/futex.c
index e8935b195e8..fcc94e7b408 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
- struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+ struct robust_list __user *entry, *next_entry, *pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
unsigned long futex_offset;
+ int rc;
/*
* Fetch the list head (which was registered earlier, via
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset,
- curr, pip);
-
+ next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
+ /*
* A pending lock might already be on the list, so
* don't process it twice:
*/
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
- /*
- * Fetch the next entry in the list:
- */
- if (fetch_robust_entry(&entry, &entry->next, &pi))
+ if (rc)
return;
+ entry = next_entry;
+ pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
cond_resched();
}
+
+ if (pending)
+ handle_futex_death((void __user *)pending + futex_offset,
+ curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7e52eb051f2..2c2e2954b71 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
void compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
- struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
- compat_uptr_t uentry, upending;
+ struct robust_list __user *entry, *next_entry, *pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
+ compat_uptr_t uentry, next_uentry, upending;
compat_long_t futex_offset;
+ int rc;
/*
* Fetch the list head (which was registered earlier, via
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&upending, &pending,
&head->list_op_pending, &pip))
return;
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset, curr, pip);
+ next_entry = NULL; /* avoid warning with gcc */
while (entry != (struct robust_list __user *) &head->list) {
/*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = fetch_robust_entry(&next_uentry, &next_entry,
+ (compat_uptr_t __user *)&entry->next, &next_pi);
+ /*
* A pending lock might already be on the list, so
* dont process it twice:
*/
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
curr, pi))
return;
- /*
- * Fetch the next entry in the list:
- */
- if (fetch_robust_entry(&uentry, &entry,
- (compat_uptr_t __user *)&entry->next, &pi))
+ if (rc)
return;
+ uentry = next_uentry;
+ entry = next_entry;
+ pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
cond_resched();
}
+ if (pending)
+ handle_futex_death((void __user *)pending + futex_offset,
+ curr, pip);
}
asmlinkage long
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c21ca6bfaa6..dc8a4451d79 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -277,6 +277,30 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
}
EXPORT_SYMBOL_GPL(ktime_add_ns);
+
+/**
+ * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
+ * @kt: minuend
+ * @nsec: the scalar nsec value to subtract
+ *
+ * Returns the subtraction of @nsec from @kt in ktime_t format
+ */
+ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
+{
+ ktime_t tmp;
+
+ if (likely(nsec < NSEC_PER_SEC)) {
+ tmp.tv64 = nsec;
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+ return ktime_sub(kt, tmp);
+}
+
+EXPORT_SYMBOL_GPL(ktime_sub_ns);
# endif /* !CONFIG_KTIME_SCALAR */
/*
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index a4fb7d46971..f1decd21a53 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -20,6 +20,7 @@
#include <linux/mnt_namespace.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
+#include <net/net_namespace.h>
static struct kmem_cache *nsproxy_cachep;
@@ -98,8 +99,17 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_user;
}
+ new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
+ if (IS_ERR(new_nsp->net_ns)) {
+ err = PTR_ERR(new_nsp->net_ns);
+ goto out_net;
+ }
+
return new_nsp;
+out_net:
+ if (new_nsp->user_ns)
+ put_user_ns(new_nsp->user_ns);
out_user:
if (new_nsp->pid_ns)
put_pid_ns(new_nsp->pid_ns);
@@ -132,7 +142,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
get_nsproxy(old_ns);
- if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER)))
+ if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER | CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN)) {
@@ -164,6 +174,7 @@ void free_nsproxy(struct nsproxy *ns)
put_pid_ns(ns->pid_ns);
if (ns->user_ns)
put_user_ns(ns->user_ns);
+ put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns);
}
@@ -177,7 +188,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
int err = 0;
if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWUSER)))
+ CLONE_NEWUSER | CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/kernel/sched.c b/kernel/sched.c
index 6107a0cd632..6c10fa796ca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -61,6 +61,7 @@
#include <linux/delayacct.h>
#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
+#include <linux/pagemap.h>
#include <asm/tlb.h>
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c9fbe8e73a4..67c67a87146 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -639,6 +639,16 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->block_start = 0;
se->sum_sleep_runtime += delta;
+
+ /*
+ * Blocking time is in units of nanosecs, so shift by 20 to
+ * get a milliseconds-range estimation of the amount of
+ * time that the task spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
+ delta >> 20);
+ }
}
#endif
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 9fb91a32edd..79295238109 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -531,18 +531,18 @@ static int check_kill_permission(int sig, struct siginfo *info,
if (!valid_signal(sig))
return error;
- error = audit_signal_info(sig, t); /* Let audit system see the signal */
- if (error)
- return error;
-
- error = -EPERM;
- if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
- && ((sig != SIGCONT) ||
- (process_session(current) != process_session(t)))
- && (current->euid ^ t->suid) && (current->euid ^ t->uid)
- && (current->uid ^ t->suid) && (current->uid ^ t->uid)
- && !capable(CAP_KILL))
+ if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
+ error = audit_signal_info(sig, t); /* Let audit system see the signal */
+ if (error)
+ return error;
+ error = -EPERM;
+ if (((sig != SIGCONT) ||
+ (process_session(current) != process_session(t)))
+ && (current->euid ^ t->suid) && (current->euid ^ t->uid)
+ && (current->uid ^ t->suid) && (current->uid ^ t->uid)
+ && !capable(CAP_KILL))
return error;
+ }
return security_task_kill(t, info, sig, 0);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0f546ddea43..bd89bc4eb0b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -271,8 +271,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-EXPORT_SYMBOL(do_softirq);
-
#endif
/*
@@ -332,8 +330,6 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr)
wakeup_softirqd();
}
-EXPORT_SYMBOL(raise_softirq_irqoff);
-
void fastcall raise_softirq(unsigned int nr)
{
unsigned long flags;
diff --git a/kernel/sys.c b/kernel/sys.c
index 1b33b05d346..8ae2e636eb1 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
#include <linux/getcpu.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/seccomp.h>
+#include <linux/cpu.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -878,6 +879,7 @@ void kernel_power_off(void)
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
if (pm_power_off_prepare)
pm_power_off_prepare();
+ disable_nonboot_cpus();
sysdev_shutdown();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 53a456ebf6d..c7314f95264 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1221,7 +1221,7 @@ static ctl_table fs_table[] = {
};
static ctl_table debug_table[] = {
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) || defined(CONFIG_PPC)
{
.ctl_name = CTL_UNNUMBERED,
.procname = "exception-trace",
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f6635112654..8d53106a0a9 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -23,3 +23,8 @@ config HIGH_RES_TIMERS
hardware is not capable then this option only increases
the size of the kernel image.
+config GENERIC_CLOCKEVENTS_BUILD
+ bool
+ default y
+ depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
+
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 99b6034fc86..905b0b50792 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,6 +1,6 @@
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
-obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 41dd3105ce7..822beebe664 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -194,6 +194,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
local_irq_restore(flags);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
/**
* clockevents_notify - notification about relevant events
*/
@@ -222,4 +223,4 @@ void clockevents_notify(unsigned long reason, void *arg)
spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
-
+#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 0962e057766..298bc7c6f09 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -64,8 +64,9 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
- if (tick_broadcast_device.evtdev ||
- (dev->features & CLOCK_EVT_FEAT_C3STOP))
+ if ((tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+ (dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(NULL, dev);
@@ -176,8 +177,6 @@ static void tick_do_periodic_broadcast(void)
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
- dev->next_event.tv64 = KTIME_MAX;
-
tick_do_periodic_broadcast();
/*
@@ -515,11 +514,9 @@ static void tick_broadcast_clear_oneshot(int cpu)
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
- if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
- bc->event_handler = tick_handle_oneshot_broadcast;
- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- bc->next_event.tv64 = KTIME_MAX;
- }
+ bc->event_handler = tick_handle_oneshot_broadcast;
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
+ bc->next_event.tv64 = KTIME_MAX;
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 77a21abc871..3f3ae390783 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -200,7 +200,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
cpu = smp_processor_id();
if (!cpu_isset(cpu, newdev->cpumask))
- goto out;
+ goto out_bc;
td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev;
@@ -265,7 +265,7 @@ out_bc:
*/
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
-out:
+
spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 3c38fb5eae1..c36bb7ed030 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -327,8 +327,9 @@ static int tstats_show(struct seq_file *m, void *v)
ms = 1;
if (events && period.tv_sec)
- seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events,
- events / period.tv_sec, events * 1000 / ms);
+ seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
+ events, events * 1000 / ms,
+ (events * 1000000 / ms) % 1000);
else
seq_printf(m, "%ld total events\n", events);