summaryrefslogtreecommitdiffstats
path: root/linux-2.6-i386-nx-emulation.patch
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2012-06-04 12:32:49 -0400
committerDave Jones <davej@redhat.com>2012-06-04 12:32:49 -0400
commit48d35dba7a308ba6c6d97d99f7a3bd62b2d8cead (patch)
treebff2b4fd9ca40f2ec2dbe0530431ff02d288bf6e /linux-2.6-i386-nx-emulation.patch
parent035f913c7c92d6916cf6cb9eaee39910cbbdfbb2 (diff)
downloadkernel-48d35dba7a308ba6c6d97d99f7a3bd62b2d8cead.tar.gz
kernel-48d35dba7a308ba6c6d97d99f7a3bd62b2d8cead.tar.xz
kernel-48d35dba7a308ba6c6d97d99f7a3bd62b2d8cead.zip
Remove 32bit NX emulation. It's time has passed. (and it seems to be broken anyway)
Diffstat (limited to 'linux-2.6-i386-nx-emulation.patch')
-rw-r--r--linux-2.6-i386-nx-emulation.patch626
1 files changed, 0 insertions, 626 deletions
diff --git a/linux-2.6-i386-nx-emulation.patch b/linux-2.6-i386-nx-emulation.patch
deleted file mode 100644
index eadc1e985..000000000
--- a/linux-2.6-i386-nx-emulation.patch
+++ /dev/null
@@ -1,626 +0,0 @@
-diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index 8bf1c06..49f8ab2 100644
---- a/arch/x86/include/asm/desc.h
-+++ b/arch/x86/include/asm/desc.h
-@@ -5,6 +5,7 @@
- #include <asm/ldt.h>
- #include <asm/mmu.h>
-
-+#include <linux/mm_types.h>
- #include <linux/smp.h>
- #include <linux/percpu.h>
-
-@@ -100,6 +101,9 @@ static inline int desc_empty(const void *ptr)
-
- #define load_TLS(t, cpu) native_load_tls(t, cpu)
- #define set_ldt native_set_ldt
-+#ifdef CONFIG_X86_32
-+#define load_user_cs_desc native_load_user_cs_desc
-+#endif /*CONFIG_X86_32*/
-
- #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
- #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
-@@ -405,4 +409,25 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
- }
-
-+#ifdef CONFIG_X86_32
-+static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
-+{
-+ limit = (limit - 1) / PAGE_SIZE;
-+ desc->a = limit & 0xffff;
-+ desc->b = (limit & 0xf0000) | 0x00c0fb00;
-+}
-+
-+static inline void native_load_user_cs_desc(int cpu, struct mm_struct *mm)
-+{
-+ get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS] = (mm)->context.user_cs;
-+}
-+
-+#define arch_add_exec_range arch_add_exec_range
-+#define arch_remove_exec_range arch_remove_exec_range
-+#define arch_flush_exec_range arch_flush_exec_range
-+extern void arch_add_exec_range(struct mm_struct *mm, unsigned long limit);
-+extern void arch_remove_exec_range(struct mm_struct *mm, unsigned long limit);
-+extern void arch_flush_exec_range(struct mm_struct *mm);
-+#endif /* CONFIG_X86_32 */
-+
- #endif /* _ASM_X86_DESC_H */
-diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 5f55e69..aba94f0 100644
---- a/arch/x86/include/asm/mmu.h
-+++ b/arch/x86/include/asm/mmu.h
-@@ -7,6 +7,9 @@
- /*
- * The x86 doesn't have a mmu context, but
- * we put the segment information here.
-+ *
-+ * exec_limit is used to track the range PROT_EXEC
-+ * mappings span.
- */
- typedef struct {
- void *ldt;
-@@ -19,6 +22,11 @@ typedef struct {
-
- struct mutex lock;
- void *vdso;
-+
-+#ifdef CONFIG_X86_32
-+ struct desc_struct user_cs;
-+ unsigned long exec_limit;
-+#endif
- } mm_context_t;
-
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index aa0f913..016fcf6 100644
---- a/arch/x86/include/asm/paravirt.h
-+++ b/arch/x86/include/asm/paravirt.h
-@@ -299,6 +299,12 @@ static inline void set_ldt(const void *addr, unsigned entries)
- {
- PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
- }
-+#ifdef CONFIG_X86_32
-+static inline void load_user_cs_desc(unsigned int cpu, struct mm_struct *mm)
-+{
-+ PVOP_VCALL2(pv_cpu_ops.load_user_cs_desc, cpu, mm);
-+}
-+#endif /*CONFIG_X86_32*/
- static inline void store_gdt(struct desc_ptr *dtr)
- {
- PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
-diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index 8e8b9a4..cca421e 100644
---- a/arch/x86/include/asm/paravirt_types.h
-+++ b/arch/x86/include/asm/paravirt_types.h
-@@ -125,6 +125,9 @@ struct pv_cpu_ops {
- void (*store_gdt)(struct desc_ptr *);
- void (*store_idt)(struct desc_ptr *);
- void (*set_ldt)(const void *desc, unsigned entries);
-+#ifdef CONFIG_X86_32
-+ void (*load_user_cs_desc)(int cpu, struct mm_struct *mm);
-+#endif
- unsigned long (*store_tr)(void);
- void (*load_tls)(struct thread_struct *t, unsigned int cpu);
- #ifdef CONFIG_X86_64
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 82f29e7..d8597f2 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -841,6 +841,22 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
- /* Filter out anything that depends on CPUID levels we don't have */
- filter_cpuid_features(c, true);
-
-+#ifdef CONFIG_X86_32
-+ /*
-+ * emulation of NX with segment limits unfortunately means
-+ * we have to disable the fast system calls, due to the way that
-+ * sysexit clears the segment limits on return.
-+ * If we have either disabled exec-shield on the boot command line,
-+ * or we have NX, then we don't need to do this.
-+ */
-+ if (!disable_nx) {
-+#ifdef CONFIG_X86_PAE
-+ if (!test_cpu_cap(c, X86_FEATURE_NX))
-+#endif
-+ clear_cpu_cap(c, X86_FEATURE_SEP);
-+ }
-+#endif
-+
- /* If the model name is still unset, do table lookup. */
- if (!c->x86_model_id[0]) {
- const char *p;
-diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 9ce8859..d19990c 100644
---- a/arch/x86/kernel/paravirt.c
-+++ b/arch/x86/kernel/paravirt.c
-@@ -360,6 +360,9 @@ struct pv_cpu_ops pv_cpu_ops = {
- .read_tscp = native_read_tscp,
- .load_tr_desc = native_load_tr_desc,
- .set_ldt = native_set_ldt,
-+#ifdef CONFIG_X86_32
-+ .load_user_cs_desc = native_load_user_cs_desc,
-+#endif /*CONFIG_X86_32*/
- .load_gdt = native_load_gdt,
- .load_idt = native_load_idt,
- .store_gdt = native_store_gdt,
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 01d8d40..4c2ece1 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -191,7 +191,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
- void
- start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
- {
-+ int cpu;
-+
- set_user_gs(regs, 0);
-+
- regs->fs = 0;
- regs->ds = __USER_DS;
- regs->es = __USER_DS;
-@@ -199,6 +202,11 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
- regs->cs = __USER_CS;
- regs->ip = new_ip;
- regs->sp = new_sp;
-+
-+ cpu = get_cpu();
-+ load_user_cs_desc(cpu, current->mm);
-+ put_cpu();
-+
- /*
- * Free the old FP and other extended state
- */
-@@ -264,6 +272,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- */
- lazy_save_gs(prev->gs);
-
-+ if (next_p->mm)
-+ load_user_cs_desc(cpu, next_p->mm);
-+
- /*
- * Load the per-thread Thread-Local Storage descriptor.
- */
-@@ -334,3 +345,40 @@ unsigned long get_wchan(struct task_struct *p)
- return 0;
- }
-
-+static void modify_cs(struct mm_struct *mm, unsigned long limit)
-+{
-+ mm->context.exec_limit = limit;
-+ set_user_cs(&mm->context.user_cs, limit);
-+ if (mm == current->mm) {
-+ int cpu;
-+
-+ cpu = get_cpu();
-+ load_user_cs_desc(cpu, mm);
-+ put_cpu();
-+ }
-+}
-+
-+void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
-+{
-+ if (limit > mm->context.exec_limit)
-+ modify_cs(mm, limit);
-+}
-+
-+void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
-+{
-+ struct vm_area_struct *vma;
-+ unsigned long limit = PAGE_SIZE;
-+
-+ if (old_end == mm->context.exec_limit) {
-+ for (vma = mm->mmap; vma; vma = vma->vm_next)
-+ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
-+ limit = vma->vm_end;
-+ modify_cs(mm, limit);
-+ }
-+}
-+
-+void arch_flush_exec_range(struct mm_struct *mm)
-+{
-+ mm->context.exec_limit = 0;
-+ set_user_cs(&mm->context.user_cs, 0);
-+}
-diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 92d5756..662c2f9 100644
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
-@@ -108,6 +108,78 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
- dec_preempt_count();
- }
-
-+#ifdef CONFIG_X86_32
-+static inline int
-+__compare_user_cs_desc(const struct desc_struct *desc1,
-+ const struct desc_struct *desc2)
-+{
-+ return ((desc1->limit0 != desc2->limit0) ||
-+ (desc1->limit != desc2->limit) ||
-+ (desc1->base0 != desc2->base0) ||
-+ (desc1->base1 != desc2->base1) ||
-+ (desc1->base2 != desc2->base2));
-+}
-+
-+/*
-+ * lazy-check for CS validity on exec-shield binaries:
-+ *
-+ * the original non-exec stack patch was written by
-+ * Solar Designer <solar at openwall.com>. Thanks!
-+ */
-+static int
-+check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
-+{
-+ struct desc_struct *desc1, *desc2;
-+ struct vm_area_struct *vma;
-+ unsigned long limit;
-+
-+ if (current->mm == NULL)
-+ return 0;
-+
-+ limit = -1UL;
-+ if (current->mm->context.exec_limit != -1UL) {
-+ limit = PAGE_SIZE;
-+ spin_lock(&current->mm->page_table_lock);
-+ for (vma = current->mm->mmap; vma; vma = vma->vm_next)
-+ if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
-+ limit = vma->vm_end;
-+ vma = get_gate_vma(current->mm);
-+ if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
-+ limit = vma->vm_end;
-+ spin_unlock(&current->mm->page_table_lock);
-+ if (limit >= TASK_SIZE)
-+ limit = -1UL;
-+ current->mm->context.exec_limit = limit;
-+ }
-+ set_user_cs(&current->mm->context.user_cs, limit);
-+
-+ desc1 = &current->mm->context.user_cs;
-+ desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
-+
-+ if (__compare_user_cs_desc(desc1, desc2)) {
-+ /*
-+ * The CS was not in sync - reload it and retry the
-+ * instruction. If the instruction still faults then
-+ * we won't hit this branch next time around.
-+ */
-+ if (print_fatal_signals >= 2) {
-+ printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
-+ error_code, error_code/8, regs->ip,
-+ smp_processor_id());
-+ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
-+ current->mm->context.exec_limit,
-+ desc1->a, desc1->b, desc2->a, desc2->b);
-+ }
-+
-+ load_user_cs_desc(cpu, current->mm);
-+
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+#endif
-+
- static void __kprobes
- do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
- long error_code, siginfo_t *info)
-@@ -268,6 +340,29 @@ do_general_protection(struct pt_regs *regs, long error_code)
- if (!user_mode(regs))
- goto gp_in_kernel;
-
-+#ifdef CONFIG_X86_32
-+{
-+ int cpu;
-+ int ok;
-+
-+ cpu = get_cpu();
-+ ok = check_lazy_exec_limit(cpu, regs, error_code);
-+ put_cpu();
-+
-+ if (ok)
-+ return;
-+
-+ if (print_fatal_signals) {
-+ printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n",
-+ error_code, error_code/8, regs->ip, smp_processor_id());
-+ printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n",
-+ current->mm->context.exec_limit,
-+ current->mm->context.user_cs.a,
-+ current->mm->context.user_cs.b);
-+ }
-+}
-+#endif /*CONFIG_X86_32*/
-+
- tsk->thread.error_code = error_code;
- tsk->thread.trap_nr = X86_TRAP_GP;
-
-@@ -646,20 +741,37 @@ do_device_not_available(struct pt_regs *regs, long error_code)
- }
-
- #ifdef CONFIG_X86_32
-+/*
-+ * The fixup code for errors in iret jumps to here (iret_exc). It loses
-+ * the original trap number and erorr code. The bogus trap 32 and error
-+ * code 0 are what the vanilla kernel delivers via:
-+ * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
-+ *
-+ * NOTE: Because of the final "1" in the macro we need to enable interrupts.
-+ *
-+ * In case of a general protection fault in the iret instruction, we
-+ * need to check for a lazy CS update for exec-shield.
-+ */
- dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
- {
-- siginfo_t info;
-+ int ok;
-+ int cpu;
-+
- local_irq_enable();
-
-- info.si_signo = SIGILL;
-- info.si_errno = 0;
-- info.si_code = ILL_BADSTK;
-- info.si_addr = NULL;
-- if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
-- X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
-- return;
-- do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
-- &info);
-+ cpu = get_cpu();
-+ ok = check_lazy_exec_limit(cpu, regs, error_code);
-+ put_cpu();
-+
-+ if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
-+ error_code, 32, SIGSEGV) != NOTIFY_STOP) {
-+ siginfo_t info;
-+ info.si_signo = SIGSEGV;
-+ info.si_errno = 0;
-+ info.si_code = ILL_BADSTK;
-+ info.si_addr = 0;
-+ do_trap(32, SIGSEGV, "iret exception", regs, error_code, &info);
-+ }
- }
- #endif
-
-diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
-index 410531d..eb040ad 100644
---- a/arch/x86/mm/setup_nx.c
-+++ b/arch/x86/mm/setup_nx.c
-@@ -1,3 +1,4 @@
-+#include <linux/sched.h>
- #include <linux/spinlock.h>
- #include <linux/errno.h>
- #include <linux/init.h>
-@@ -5,7 +6,7 @@
- #include <asm/pgtable.h>
- #include <asm/proto.h>
-
--static int disable_nx __cpuinitdata;
-+int disable_nx __cpuinitdata;
-
- /*
- * noexec = on|off
-@@ -40,6 +41,10 @@ void __cpuinit x86_configure_nx(void)
- void __init x86_report_nx(void)
- {
- if (!cpu_has_nx) {
-+ if (!disable_nx)
-+ printk(KERN_INFO "Using x86 segment limits to approximate NX protection\n");
-+ else
-+
- printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
- "missing in CPU!\n");
- } else {
-diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
-index 3804471..3c7805c 100644
---- a/arch/x86/mm/tlb.c
-+++ b/arch/x86/mm/tlb.c
-@@ -7,6 +7,7 @@
- #include <linux/module.h>
- #include <linux/cpu.h>
-
-+#include <asm/desc.h>
- #include <asm/tlbflush.h>
- #include <asm/mmu_context.h>
- #include <asm/cache.h>
-@@ -134,6 +135,12 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
- union smp_flush_state *f;
-
- cpu = smp_processor_id();
-+
-+#ifdef CONFIG_X86_32
-+ if (current->active_mm)
-+ load_user_cs_desc(cpu, current->active_mm);
-+#endif
-+
- /*
- * orig_rax contains the negated interrupt vector.
- * Use that to determine where the sender put the data.
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index c0f5fac..2040256 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -446,6 +446,24 @@ static void xen_set_ldt(const void *addr, unsigned entries)
- xen_mc_issue(PARAVIRT_LAZY_CPU);
- }
-
-+#ifdef CONFIG_X86_32
-+static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm)
-+{
-+ void *gdt;
-+ xmaddr_t mgdt;
-+ u64 descriptor;
-+ struct desc_struct user_cs;
-+
-+ gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
-+ mgdt = virt_to_machine(gdt);
-+
-+ user_cs = mm->context.user_cs;
-+ descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32;
-+
-+ HYPERVISOR_update_descriptor(mgdt.maddr, descriptor);
-+}
-+#endif /*CONFIG_X86_32*/
-+
- static void xen_load_gdt(const struct desc_ptr *dtr)
- {
- unsigned long va = dtr->address;
-@@ -1119,6 +1137,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
-
- .load_tr_desc = paravirt_nop,
- .set_ldt = xen_set_ldt,
-+#ifdef CONFIG_X86_32
-+ .load_user_cs_desc = xen_load_user_cs_desc,
-+#endif /*CONFIG_X86_32*/
- .load_gdt = xen_load_gdt,
- .load_idt = xen_load_idt,
- .load_tls = xen_load_tls,
-diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 16f7354..611f1c6 100644
---- a/fs/binfmt_elf.c
-+++ b/fs/binfmt_elf.c
-@@ -708,6 +708,16 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- if (retval)
- goto out_free_dentry;
-
-+#ifdef CONFIG_X86_32
-+ /*
-+ * Turn off the CS limit completely if exec-shield disabled or
-+ * NX active:
-+ */
-+ if (disable_nx || executable_stack != EXSTACK_DISABLE_X || (__supported_pte_mask & _PAGE_NX))
-+ arch_add_exec_range(current->mm, -1);
-+#endif
-+
-+
- /* OK, This is the point of no return */
- current->mm->def_flags = def_flags;
-
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 28fa9d0..c961aa8 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -101,6 +101,9 @@ struct fs_struct;
- struct perf_event_context;
- struct blk_plug;
-
-+extern int disable_nx;
-+extern int print_fatal_signals;
-+
- /*
- * List of flags we want to share for kernel threads,
- * if only because they are not used by them anyway.
-diff --git a/mm/mmap.c b/mm/mmap.c
-index 69a1889..5172f68 100644
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -46,6 +46,18 @@
- #define arch_rebalance_pgtables(addr, len) (addr)
- #endif
-
-+/* No sane architecture will #define these to anything else */
-+#ifndef arch_add_exec_range
-+#define arch_add_exec_range(mm, limit) do { ; } while (0)
-+#endif
-+#ifndef arch_flush_exec_range
-+#define arch_flush_exec_range(mm) do { ; } while (0)
-+#endif
-+#ifndef arch_remove_exec_range
-+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
-+#endif
-+
-+
- static void unmap_region(struct mm_struct *mm,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- unsigned long start, unsigned long end);
-@@ -426,6 +438,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
- {
-+ if (vma->vm_flags & VM_EXEC)
-+ arch_add_exec_range(mm, vma->vm_end);
- __vma_link_list(mm, vma, prev, rb_parent);
- __vma_link_rb(mm, vma, rb_link, rb_parent);
- }
-@@ -479,6 +493,8 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
- rb_erase(&vma->vm_rb, &mm->mm_rb);
- if (mm->mmap_cache == vma)
- mm->mmap_cache = prev;
-+ if (vma->vm_flags & VM_EXEC)
-+ arch_remove_exec_range(mm, vma->vm_end);
- }
-
- /*
-@@ -795,6 +811,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
- } else /* cases 2, 5, 7 */
- err = vma_adjust(prev, prev->vm_start,
- end, prev->vm_pgoff, NULL);
-+ if (prev->vm_flags & VM_EXEC)
-+ arch_add_exec_range(mm, prev->vm_end);
- if (err)
- return NULL;
- khugepaged_enter_vma_merge(prev);
-@@ -2009,10 +2027,14 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- if (new->vm_ops && new->vm_ops->open)
- new->vm_ops->open(new);
-
-- if (new_below)
-+ if (new_below) {
-+ unsigned long old_end = vma->vm_end;
-+
- err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
- ((addr - new->vm_start) >> PAGE_SHIFT), new);
-- else
-+ if (vma->vm_flags & VM_EXEC)
-+ arch_remove_exec_range(mm, old_end);
-+ } else
- err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
- /* Success. */
-@@ -2312,6 +2334,7 @@ void exit_mmap(struct mm_struct *mm)
-
- free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
- tlb_finish_mmu(&tlb, 0, -1);
-+ arch_flush_exec_range(mm);
-
- /*
- * Walk the list again, actually closing and freeing it,
-diff --git a/mm/mprotect.c b/mm/mprotect.c
-index a409926..5e05c67 100644
---- a/mm/mprotect.c
-+++ b/mm/mprotect.c
-@@ -25,9 +25,14 @@
- #include <linux/perf_event.h>
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
-
-+#ifndef arch_remove_exec_range
-+#define arch_remove_exec_range(mm, limit) do { ; } while (0)
-+#endif
-+
- #ifndef pgprot_modify
- static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
- {
-@@ -148,7 +153,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- struct mm_struct *mm = vma->vm_mm;
- unsigned long oldflags = vma->vm_flags;
- long nrpages = (end - start) >> PAGE_SHIFT;
-- unsigned long charged = 0;
-+ unsigned long charged = 0, old_end = vma->vm_end;
- pgoff_t pgoff;
- int error;
- int dirty_accountable = 0;
-@@ -213,6 +218,9 @@ success:
- dirty_accountable = 1;
- }
-
-+ if (oldflags & VM_EXEC)
-+ arch_remove_exec_range(current->mm, old_end);
-+
- mmu_notifier_invalidate_range_start(mm, start, end);
- if (is_vm_hugetlb_page(vma))
- hugetlb_change_protection(vma, start, end, vma->vm_page_prot);