summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThorsten Leemhuis <fedora@leemhuis.info>2017-06-20 21:47:41 +0200
committerThorsten Leemhuis <fedora@leemhuis.info>2017-06-20 21:47:41 +0200
commitcdc199efa4929924ef0f98670a60f0987d39ff43 (patch)
tree45a46693666cfd2b53a0dfcfc5b62253b876319a
parent7487bca97bff5bdb0320c16d04c1d8d44ebd4159 (diff)
parent26a80c98876b443d3b5a03450188293e62096c8d (diff)
downloadkernel-4.11.6-101.vanilla.knurd.1.fc24.tar.gz
kernel-4.11.6-101.vanilla.knurd.1.fc24.tar.xz
kernel-4.11.6-101.vanilla.knurd.1.fc24.zip
Merge remote-tracking branch 'origin/f24' into f24-user-thl-vanilla-fedorakernel-4.11.6-101.vanilla.knurd.1.fc24
-rw-r--r--0001-efi-Fix-boot-panic-because-of-invalid-BGRT-image-add.patch114
-rw-r--r--RFC-audit-fix-a-race-condition-with-the-auditd-tracking-code.patch156
-rw-r--r--baseconfig/CONFIG_B43LEGACY_DEBUG2
-rw-r--r--baseconfig/CONFIG_B43_DEBUG2
-rw-r--r--drm-i915-Do-not-drop-pagetables-when-empty.patch95
-rw-r--r--kernel-aarch64.config4
-rw-r--r--kernel-armv7hl-lpae.config4
-rw-r--r--kernel-armv7hl.config4
-rw-r--r--kernel-i686-PAE.config4
-rw-r--r--kernel-i686.config4
-rw-r--r--kernel-ppc64.config4
-rw-r--r--kernel-ppc64le.config4
-rw-r--r--kernel-ppc64p7.config4
-rw-r--r--kernel-s390x.config4
-rw-r--r--kernel-x86_64.config4
-rw-r--r--kernel.spec25
-rw-r--r--mm-fix-new-crash-in-unmapped_area_topdown.patch53
-rw-r--r--mm-larger-stack-guard-gap-between-vmas.patch889
-rw-r--r--sources2
19 files changed, 1139 insertions, 239 deletions
diff --git a/0001-efi-Fix-boot-panic-because-of-invalid-BGRT-image-add.patch b/0001-efi-Fix-boot-panic-because-of-invalid-BGRT-image-add.patch
deleted file mode 100644
index 4a714e36d..000000000
--- a/0001-efi-Fix-boot-panic-because-of-invalid-BGRT-image-add.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-From 87c19e8de4f56d803d133c3e38bbd7b069e06df3 Mon Sep 17 00:00:00 2001
-From: Dave Young <dyoung@redhat.com>
-Date: Fri, 9 Jun 2017 08:45:58 +0000
-Subject: [PATCH] efi: Fix boot panic because of invalid BGRT image address
-
-Maniaxx reported a kernel boot crash in the EFI code, which I emulated
-by using same invalid phys addr in code:
-
- BUG: unable to handle kernel paging request at ffffffffff280001
- IP: efi_bgrt_init+0xfb/0x153
- ...
- Call Trace:
- ? bgrt_init+0xbc/0xbc
- acpi_parse_bgrt+0xe/0x12
- acpi_table_parse+0x89/0xb8
- acpi_boot_init+0x445/0x4e2
- ? acpi_parse_x2apic+0x79/0x79
- ? dmi_ignore_irq0_timer_override+0x33/0x33
- setup_arch+0xb63/0xc82
- ? early_idt_handler_array+0x120/0x120
- start_kernel+0xb7/0x443
- ? early_idt_handler_array+0x120/0x120
- x86_64_start_reservations+0x29/0x2b
- x86_64_start_kernel+0x154/0x177
- secondary_startup_64+0x9f/0x9f
-
-There is also a similar bug filed in bugzilla.kernel.org:
-
- https://bugzilla.kernel.org/show_bug.cgi?id=195633
-
-The crash is caused by this commit:
-
- 7b0a911478c7 efi/x86: Move the EFI BGRT init code to early init code
-
-The root cause is the firmware on those machines provides invalid BGRT
-image addresses.
-
-In a kernel before above commit BGRT initializes late and uses ioremap()
-to map the image address. Ioremap validates the address, if it is not a
-valid physical address ioremap() just fails and returns. However in current
-kernel EFI BGRT initializes early and uses early_memremap() which does not
-validate the image address, and kernel panic happens.
-
-According to ACPI spec the BGRT image address should fall into
-EFI_BOOT_SERVICES_DATA, see the section 5.2.22.4 of below document:
-
- http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
-
-Fix this issue by validating the image address in efi_bgrt_init(). If the
-image address does not fall into any EFI_BOOT_SERVICES_DATA areas we just
-bail out with a warning message.
-
-Reported-by: Maniaxx <tripleshiftone@gmail.com>
-Signed-off-by: Dave Young <dyoung@redhat.com>
-Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Matt Fleming <matt@codeblueprint.co.uk>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: linux-efi@vger.kernel.org
-Fixes: 7b0a911478c7 ("efi/x86: Move the EFI BGRT init code to early init code")
-Link: http://lkml.kernel.org/r/20170609084558.26766-2-ard.biesheuvel@linaro.org
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-[labbott@redhat.com: Backport to 4.11]
-Signed-off-by: Laura Abbott <labbott@redhat.com>
----
- arch/x86/platform/efi/efi-bgrt.c | 24 ++++++++++++++++++++++++
- 1 file changed, 24 insertions(+)
-
-diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
-index 04ca876..08ee795 100644
---- a/arch/x86/platform/efi/efi-bgrt.c
-+++ b/arch/x86/platform/efi/efi-bgrt.c
-@@ -27,6 +27,26 @@ struct bmp_header {
- u32 size;
- } __packed;
-
-+static bool efi_bgrt_addr_valid(u64 addr)
-+{
-+ efi_memory_desc_t *md;
-+
-+ for_each_efi_memory_desc(md) {
-+ u64 size;
-+ u64 end;
-+
-+ if (md->type != EFI_BOOT_SERVICES_DATA)
-+ continue;
-+
-+ size = md->num_pages << EFI_PAGE_SHIFT;
-+ end = md->phys_addr + size;
-+ if (addr >= md->phys_addr && addr < end)
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
- void __init efi_bgrt_init(struct acpi_table_header *table)
- {
- void *image;
-@@ -62,6 +82,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
- goto out;
- }
-
-+ if (!efi_bgrt_addr_valid(bgrt->image_address)) {
-+ pr_notice("Ignoring BGRT: invalid image address\n");
-+ goto out;
-+ }
- image = early_memremap(bgrt->image_address, sizeof(bmp_header));
- if (!image) {
- pr_notice("Ignoring BGRT: failed to map image header memory\n");
---
-2.7.5
-
diff --git a/RFC-audit-fix-a-race-condition-with-the-auditd-tracking-code.patch b/RFC-audit-fix-a-race-condition-with-the-auditd-tracking-code.patch
new file mode 100644
index 000000000..d79fd256f
--- /dev/null
+++ b/RFC-audit-fix-a-race-condition-with-the-auditd-tracking-code.patch
@@ -0,0 +1,156 @@
+From patchwork Thu Jun 15 15:28:58 2017
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [RFC] audit: fix a race condition with the auditd tracking code
+From: Paul Moore <pmoore@redhat.com>
+X-Patchwork-Id: 9789009
+Message-Id: <149754053819.11365.5047864735077505545.stgit@sifl>
+To: linux-audit@redhat.com
+Cc: Dusty Mabe <dustymabe@redhat.com>
+Date: Thu, 15 Jun 2017 11:28:58 -0400
+
+From: Paul Moore <paul@paul-moore.com>
+
+Originally reported by Adam and Dusty, it appears we have a small
+race window in kauditd_thread(), as documented in the Fedora BZ:
+
+ * https://bugzilla.redhat.com/show_bug.cgi?id=1459326#c35
+
+ "This issue is partly due to the read-copy nature of RCU, and
+ partly due to how we sync the auditd_connection state across
+ kauditd_thread and the audit control channel. The kauditd_thread
+ thread is always running so it can service the record queues and
+ emit the multicast messages, if it happens to be just past the
+ "main_queue" label, but before the "if (sk == NULL || ...)"
+ if-statement which calls auditd_reset() when the new auditd
+ connection is registered it could end up resetting the auditd
+ connection, regardless of if it is valid or not. This is a rather
+ small window and the variable nature of multi-core scheduling
+ explains why this is proving rather difficult to reproduce."
+
+The fix is to have functions only call auditd_reset() when they
+believe that the kernel/auditd connection is still valid, e.g.
+non-NULL, and to have these callers pass their local copy of the
+auditd_connection pointer to auditd_reset() where it can be compared
+with the current connection state before resetting. If the caller
+has a stale state tracking pointer then the reset is ignored.
+
+We also make a small change to kauditd_thread() so that if the
+kernel/auditd connection is dead we skip the retry queue and send the
+records straight to the hold queue. This is necessary as we used to
+rely on auditd_reset() to occasionally purge the retry queue but we
+are going to be calling the reset function much less now and we want
+to make sure the retry queue doesn't grow unbounded.
+
+Reported-by: Adam Williamson <awilliam@redhat.com>
+Reported-by: Dusty Mabe <dustymabe@redhat.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Reviewed-by: Richard Guy Briggs <rgb@redhat.com>
+---
+ kernel/audit.c | 36 +++++++++++++++++++++++-------------
+ 1 file changed, 23 insertions(+), 13 deletions(-)
+
+
+--
+Linux-audit mailing list
+Linux-audit@redhat.com
+https://www.redhat.com/mailman/listinfo/linux-audit
+
+diff --git a/kernel/audit.c b/kernel/audit.c
+index b2e877100242..e1e2b3abfb93 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -575,12 +575,16 @@ static void kauditd_retry_skb(struct sk_buff *skb)
+
+ /**
+ * auditd_reset - Disconnect the auditd connection
++ * @ac: auditd connection state
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+- * hold queue in case auditd reconnects.
++ * hold queue in case auditd reconnects. It is important to note that the @ac
++ * pointer should never be dereferenced inside this function as it may be NULL
++ * or invalid, you can only compare the memory address! If @ac is NULL then
++ * the connection will always be reset.
+ */
+-static void auditd_reset(void)
++static void auditd_reset(const struct auditd_connection *ac)
+ {
+ unsigned long flags;
+ struct sk_buff *skb;
+@@ -590,6 +594,11 @@ static void auditd_reset(void)
+ spin_lock_irqsave(&auditd_conn_lock, flags);
+ ac_old = rcu_dereference_protected(auditd_conn,
+ lockdep_is_held(&auditd_conn_lock));
++ if (ac && ac != ac_old) {
++ /* someone already registered a new auditd connection */
++ spin_unlock_irqrestore(&auditd_conn_lock, flags);
++ return;
++ }
+ rcu_assign_pointer(auditd_conn, NULL);
+ spin_unlock_irqrestore(&auditd_conn_lock, flags);
+
+@@ -649,8 +658,8 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
+ return rc;
+
+ err:
+- if (rc == -ECONNREFUSED)
+- auditd_reset();
++ if (ac && rc == -ECONNREFUSED)
++ auditd_reset(ac);
+ return rc;
+ }
+
+@@ -795,9 +804,9 @@ static int kauditd_thread(void *dummy)
+ rc = kauditd_send_queue(sk, portid,
+ &audit_hold_queue, UNICAST_RETRIES,
+ NULL, kauditd_rehold_skb);
+- if (rc < 0) {
++ if (ac && rc < 0) {
+ sk = NULL;
+- auditd_reset();
++ auditd_reset(ac);
+ goto main_queue;
+ }
+
+@@ -805,9 +814,9 @@ static int kauditd_thread(void *dummy)
+ rc = kauditd_send_queue(sk, portid,
+ &audit_retry_queue, UNICAST_RETRIES,
+ NULL, kauditd_hold_skb);
+- if (rc < 0) {
++ if (ac && rc < 0) {
+ sk = NULL;
+- auditd_reset();
++ auditd_reset(ac);
+ goto main_queue;
+ }
+
+@@ -815,12 +824,13 @@ static int kauditd_thread(void *dummy)
+ /* process the main queue - do the multicast send and attempt
+ * unicast, dump failed record sends to the retry queue; if
+ * sk == NULL due to previous failures we will just do the
+- * multicast send and move the record to the retry queue */
++ * multicast send and move the record to the hold queue */
+ rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
+ kauditd_send_multicast_skb,
+- kauditd_retry_skb);
+- if (sk == NULL || rc < 0)
+- auditd_reset();
++ (sk ?
++ kauditd_retry_skb : kauditd_hold_skb));
++ if (ac && rc < 0)
++ auditd_reset(ac);
+ sk = NULL;
+
+ /* drop our netns reference, no auditd sends past this line */
+@@ -1230,7 +1240,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ auditd_pid, 1);
+
+ /* unregister the auditd connection */
+- auditd_reset();
++ auditd_reset(NULL);
+ }
+ }
+ if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
diff --git a/baseconfig/CONFIG_B43LEGACY_DEBUG b/baseconfig/CONFIG_B43LEGACY_DEBUG
index 02f67a471..494982463 100644
--- a/baseconfig/CONFIG_B43LEGACY_DEBUG
+++ b/baseconfig/CONFIG_B43LEGACY_DEBUG
@@ -1 +1 @@
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43LEGACY_DEBUG is not set
diff --git a/baseconfig/CONFIG_B43_DEBUG b/baseconfig/CONFIG_B43_DEBUG
index 9346a4511..a2bf9bb1f 100644
--- a/baseconfig/CONFIG_B43_DEBUG
+++ b/baseconfig/CONFIG_B43_DEBUG
@@ -1 +1 @@
-CONFIG_B43_DEBUG=y
+# CONFIG_B43_DEBUG is not set
diff --git a/drm-i915-Do-not-drop-pagetables-when-empty.patch b/drm-i915-Do-not-drop-pagetables-when-empty.patch
deleted file mode 100644
index 8dcbc81bb..000000000
--- a/drm-i915-Do-not-drop-pagetables-when-empty.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From patchwork Fri May 26 08:29:06 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 8bit
-Subject: drm/i915: Do not drop pagetables when empty
-From: Daniel Vetter <daniel.vetter@ffwll.ch>
-X-Patchwork-Id: 158340
-Message-Id: <20170526082906.8982-1-daniel.vetter@ffwll.ch>
-To: Intel Graphics Development <intel-gfx@lists.freedesktop.org>
-Cc: "# v4 . 10+" <stable@vger.kernel.org>,
- Daniel Vetter <daniel.vetter@intel.com>,
- Mika Kuoppala <mika.kuoppala@intel.com>
-Date: Fri, 26 May 2017 10:29:06 +0200
-
-From: Chris Wilson <chris@chris-wilson.co.uk>
-
-This is the minimal backport for stable of the upstream commit:
-
-commit dd19674bacba227ae5d3ce680cbc5668198894dc
-Author: Chris Wilson <chris@chris-wilson.co.uk>
-Date: Wed Feb 15 08:43:46 2017 +0000
-
- drm/i915: Remove bitmap tracking for used-ptes
-
-Due to a race with the shrinker, when we try to allocate a pagetable, we
-may end up shrinking it instead. This comes as a nasty surprise as we
-try to dereference it to fill in the pagetable entries for the object.
-
-In linus/master this is fixed by pinning the pagetables prior to
-allocation, but that backport is roughly
- drivers/gpu/drm/i915/i915_debugfs.c | 2 +-
- drivers/gpu/drm/i915/i915_gem_evict.c | 12 +-
- drivers/gpu/drm/i915/i915_gem_gtt.c | 2017 ++++++++++++++-------------------
- drivers/gpu/drm/i915/i915_gem_gtt.h | 123 +-
- drivers/gpu/drm/i915/i915_trace.h | 104 --
- drivers/gpu/drm/i915/i915_vgpu.c | 9 +-
- drivers/gpu/drm/i915/i915_vma.c | 9 -
- drivers/gpu/drm/i915/intel_lrc.c | 4 +-
- 8 files changed, 946 insertions(+), 1334 deletions(-)
-i.e. unsuitable for stable. Instead we neuter the code that tried to
-free the pagetables.
-
-Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99295
-Fixes: 2ce5179fe826 ("drm/i915/gtt: Free unused lower-level page tables")
-Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-Cc: Michel Thierry <michel.thierry@intel.com>
-Cc: Mika Kuoppala <mika.kuoppala@intel.com>
-Cc: Chris Wilson <chris@chris-wilson.co.uk>
-Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
-Cc: Michał Winiarski <michal.winiarski@intel.com>
-Cc: Daniel Vetter <daniel.vetter@intel.com>
-Cc: Jani Nikula <jani.nikula@linux.intel.com>
-Cc: intel-gfx@lists.freedesktop.org
-Cc: <stable@vger.kernel.org> # v4.10+
-Tested-by: Maël Lavault <mael.lavault@protonmail.com>
-Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
----
- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ----------
- 1 file changed, 10 deletions(-)
-
-diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
-index 96e45a4d5441..4f581adf2fcf 100644
---- a/drivers/gpu/drm/i915/i915_gem_gtt.c
-+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
-@@ -755,10 +755,6 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
- GEM_BUG_ON(pte_end > GEN8_PTES);
-
- bitmap_clear(pt->used_ptes, pte, num_entries);
-- if (USES_FULL_PPGTT(vm->i915)) {
-- if (bitmap_empty(pt->used_ptes, GEN8_PTES))
-- return true;
-- }
-
- pt_vaddr = kmap_px(pt);
-
-@@ -798,9 +794,6 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
- }
- }
-
-- if (bitmap_empty(pd->used_pdes, I915_PDES))
-- return true;
--
- return false;
- }
-
-@@ -829,9 +822,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
-
- mark_tlbs_dirty(ppgtt);
-
-- if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
-- return true;
--
- return false;
- }
-
diff --git a/kernel-aarch64.config b/kernel-aarch64.config
index 3cb49dc32..beac8fb62 100644
--- a/kernel-aarch64.config
+++ b/kernel-aarch64.config
@@ -401,8 +401,8 @@ CONFIG_AXP288_CHARGER=m
CONFIG_AXP288_FUEL_GAUGE=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-armv7hl-lpae.config b/kernel-armv7hl-lpae.config
index c73231c33..fa8259ede 100644
--- a/kernel-armv7hl-lpae.config
+++ b/kernel-armv7hl-lpae.config
@@ -424,8 +424,8 @@ CONFIG_AXP288_CHARGER=m
CONFIG_AXP288_FUEL_GAUGE=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-armv7hl.config b/kernel-armv7hl.config
index 6453d392c..c6edbcf37 100644
--- a/kernel-armv7hl.config
+++ b/kernel-armv7hl.config
@@ -439,8 +439,8 @@ CONFIG_AXP288_CHARGER=m
CONFIG_AXP288_FUEL_GAUGE=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-i686-PAE.config b/kernel-i686-PAE.config
index b78f88d58..687191c2d 100644
--- a/kernel-i686-PAE.config
+++ b/kernel-i686-PAE.config
@@ -325,8 +325,8 @@ CONFIG_AX25_DAMA_SLAVE=y
CONFIG_AX25=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-i686.config b/kernel-i686.config
index 66104bf5f..623df454f 100644
--- a/kernel-i686.config
+++ b/kernel-i686.config
@@ -325,8 +325,8 @@ CONFIG_AX25_DAMA_SLAVE=y
CONFIG_AX25=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-ppc64.config b/kernel-ppc64.config
index 11ae013e5..41871c207 100644
--- a/kernel-ppc64.config
+++ b/kernel-ppc64.config
@@ -275,8 +275,8 @@ CONFIG_AX25=m
# CONFIG_AXON_RAM is not set
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-ppc64le.config b/kernel-ppc64le.config
index b1bdc13e1..076dadfb4 100644
--- a/kernel-ppc64le.config
+++ b/kernel-ppc64le.config
@@ -269,8 +269,8 @@ CONFIG_AX25=m
# CONFIG_AXON_RAM is not set
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-ppc64p7.config b/kernel-ppc64p7.config
index d405b8ba9..a199a1b7a 100644
--- a/kernel-ppc64p7.config
+++ b/kernel-ppc64p7.config
@@ -269,8 +269,8 @@ CONFIG_AX25=m
# CONFIG_AXON_RAM is not set
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-s390x.config b/kernel-s390x.config
index ff799a3bd..2d6280c50 100644
--- a/kernel-s390x.config
+++ b/kernel-s390x.config
@@ -269,8 +269,8 @@ CONFIG_AX25_DAMA_SLAVE=y
CONFIG_AX25=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel-x86_64.config b/kernel-x86_64.config
index 8f09d8b87..7297e5eae 100644
--- a/kernel-x86_64.config
+++ b/kernel-x86_64.config
@@ -324,8 +324,8 @@ CONFIG_AX25_DAMA_SLAVE=y
CONFIG_AX25=m
CONFIG_B43_BCMA_PIO=y
CONFIG_B43_BCMA=y
-CONFIG_B43_DEBUG=y
-CONFIG_B43LEGACY_DEBUG=y
+# CONFIG_B43_DEBUG is not set
+# CONFIG_B43LEGACY_DEBUG is not set
CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
# CONFIG_B43LEGACY_DMA_MODE is not set
CONFIG_B43LEGACY_DMA=y
diff --git a/kernel.spec b/kernel.spec
index a5164f9a2..11f389548 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -44,7 +44,7 @@ Summary: The Linux kernel
# For non-released -rc kernels, this will be appended after the rcX and
# gitX tags, so a 3 here would become part of release "0.rcX.gitX.3"
#
-%global baserelease 100
+%global baserelease 101
%global fedora_build %{baserelease}
# base_sublevel is the kernel version we're starting with and patching
@@ -59,7 +59,7 @@ Summary: The Linux kernel
# Do we have a -stable update to apply?
-%define stable_update 5
+%define stable_update 6
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@@ -639,9 +639,6 @@ Patch864: dell-laptop-Adds-support-for-keyboard-backlight-timeout-AC-settings.pa
Patch866: 0001-SUNRPC-Refactor-svc_set_num_threads.patch
Patch867: 0002-NFSv4-Fix-callback-server-shutdown.patch
-#Fix broadwell issues
-Patch675: drm-i915-Do-not-drop-pagetables-when-empty.patch
-
# rhbz 1455780
Patch676: 2-2-nvme-Quirk-APST-on-Intel-600P-P3100-devices.patch
@@ -653,8 +650,12 @@ Patch679: actual_udpencap_fix.patch
Patch680: 0001-platform-x86-thinkpad_acpi-guard-generic-hotkey-case.patch
Patch681: 0002-platform-x86-thinkpad_acpi-add-mapping-for-new-hotke.patch
-# rhbz 1461337
-Patch682: 0001-efi-Fix-boot-panic-because-of-invalid-BGRT-image-add.patch
+# rhbz 1459326
+Patch683: RFC-audit-fix-a-race-condition-with-the-auditd-tracking-code.patch
+
+# CVE-2017-1000364 rhbz 1462819 1461333
+Patch684: mm-larger-stack-guard-gap-between-vmas.patch
+Patch685: mm-fix-new-crash-in-unmapped_area_topdown.patch
# END OF PATCH DEFINITIONS
@@ -2223,6 +2224,16 @@ fi
#
#
%changelog
+* Tue Jun 20 2017 Laura Abbott <labbott@fedoraproject.org> - 4.11.6-101
+- bump and build
+
+* Mon Jun 19 2017 Laura Abbott <labbott@fedoraproject.org> - 4.11.6-100
+- Linux v4.11.6
+- Fix CVE-2017-1000364 (rhbz 1462819 1461333)
+
+* Fri Jun 16 2017 Laura Abbott <labbott@fedoraproject.org>
+- Fix an auditd race condition (rhbz 1459326)
+
* Wed Jun 14 2017 Laura Abbott <labbott@fedoraproject.org> - 4.11.5-100
- Linux v4.11.5
diff --git a/mm-fix-new-crash-in-unmapped_area_topdown.patch b/mm-fix-new-crash-in-unmapped_area_topdown.patch
new file mode 100644
index 000000000..20da9556f
--- /dev/null
+++ b/mm-fix-new-crash-in-unmapped_area_topdown.patch
@@ -0,0 +1,53 @@
+From patchwork Tue Jun 20 09:10:44 2017
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: mm: fix new crash in unmapped_area_topdown()
+From: Hugh Dickins <hughd@google.com>
+X-Patchwork-Id: 9798991
+Message-Id: <alpine.LSU.2.11.1706200206210.10925@eggly.anvils>
+To: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Dave Jones <davej@codemonkey.org.uk>, Oleg Nesterov <oleg@redhat.com>,
+ Michal Hocko <mhocko@suse.com>, linux-kernel@vger.kernel.org,
+ linux-mm@kvack.org
+Date: Tue, 20 Jun 2017 02:10:44 -0700 (PDT)
+
+Trinity gets kernel BUG at mm/mmap.c:1963! in about 3 minutes of
+mmap testing. That's the VM_BUG_ON(gap_end < gap_start) at the
+end of unmapped_area_topdown(). Linus points out how MAP_FIXED
+(which does not have to respect our stack guard gap intentions)
+could result in gap_end below gap_start there. Fix that, and
+the similar case in its alternative, unmapped_area().
+
+Cc: stable@vger.kernel.org
+Fixes: 1be7107fbe18 ("mm: larger stack guard gap, between vmas")
+Reported-by: Dave Jones <davej@codemonkey.org.uk>
+Debugged-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+---
+
+ mm/mmap.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- 4.12-rc6/mm/mmap.c 2017-06-19 09:06:10.035407505 -0700
++++ linux/mm/mmap.c 2017-06-19 21:09:28.616707311 -0700
+@@ -1817,7 +1817,8 @@ unsigned long unmapped_area(struct vm_un
+ /* Check if current node has a suitable gap */
+ if (gap_start > high_limit)
+ return -ENOMEM;
+- if (gap_end >= low_limit && gap_end - gap_start >= length)
++ if (gap_end >= low_limit &&
++ gap_end > gap_start && gap_end - gap_start >= length)
+ goto found;
+
+ /* Visit right subtree if it looks promising */
+@@ -1920,7 +1921,8 @@ unsigned long unmapped_area_topdown(stru
+ gap_end = vm_start_gap(vma);
+ if (gap_end < low_limit)
+ return -ENOMEM;
+- if (gap_start <= high_limit && gap_end - gap_start >= length)
++ if (gap_start <= high_limit &&
++ gap_end > gap_start && gap_end - gap_start >= length)
+ goto found;
+
+ /* Visit left subtree if it looks promising */
diff --git a/mm-larger-stack-guard-gap-between-vmas.patch b/mm-larger-stack-guard-gap-between-vmas.patch
new file mode 100644
index 000000000..45d7987cc
--- /dev/null
+++ b/mm-larger-stack-guard-gap-between-vmas.patch
@@ -0,0 +1,889 @@
+From 1be7107fbe18eed3e319a6c3e83c78254b693acb Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 19 Jun 2017 04:03:24 -0700
+Subject: mm: larger stack guard gap, between vmas
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 1be7107fbe18eed3e319a6c3e83c78254b693acb upstream.
+
+Stack guard page is a useful feature to reduce a risk of stack smashing
+into a different mapping. We have been using a single page gap which
+is sufficient to prevent having stack adjacent to a different mapping.
+But this seems to be insufficient in the light of the stack usage in
+userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
+used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
+which is 256kB or stack strings with MAX_ARG_STRLEN.
+
+This will become especially dangerous for suid binaries and the default
+no limit for the stack size limit because those applications can be
+tricked to consume a large portion of the stack and a single glibc call
+could jump over the guard page. These attacks are not theoretical,
+unfortunatelly.
+
+Make those attacks less probable by increasing the stack guard gap
+to 1MB (on systems with 4k pages; but make it depend on the page size
+because systems with larger base pages might cap stack allocations in
+the PAGE_SIZE units) which should cover larger alloca() and VLA stack
+allocations. It is obviously not a full fix because the problem is
+somehow inherent, but it should reduce attack space a lot.
+
+One could argue that the gap size should be configurable from userspace,
+but that can be done later when somebody finds that the new 1MB is wrong
+for some special case applications. For now, add a kernel command line
+option (stack_guard_gap) to specify the stack gap size (in page units).
+
+Implementation wise, first delete all the old code for stack guard page:
+because although we could get away with accounting one extra page in a
+stack vma, accounting a larger gap can break userspace - case in point,
+a program run with "ulimit -S -v 20000" failed when the 1MB gap was
+counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
+and strict non-overcommit mode.
+
+Instead of keeping gap inside the stack vma, maintain the stack guard
+gap as a gap between vmas: using vm_start_gap() in place of vm_start
+(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
+places which need to respect the gap - mainly arch_get_unmapped_area(),
+and and the vma tree's subtree_gap support for that.
+
+Original-patch-by: Oleg Nesterov <oleg@redhat.com>
+Original-patch-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Tested-by: Helge Deller <deller@gmx.de> # parisc
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[wt: backport to 4.11: adjust context]
+Signed-off-by: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 7 +
+ arch/arc/mm/mmap.c | 2
+ arch/arm/mm/mmap.c | 4
+ arch/frv/mm/elf-fdpic.c | 2
+ arch/mips/mm/mmap.c | 2
+ arch/parisc/kernel/sys_parisc.c | 15 +-
+ arch/powerpc/mm/hugetlbpage-radix.c | 2
+ arch/powerpc/mm/mmap.c | 4
+ arch/powerpc/mm/slice.c | 2
+ arch/s390/mm/mmap.c | 4
+ arch/sh/mm/mmap.c | 4
+ arch/sparc/kernel/sys_sparc_64.c | 4
+ arch/sparc/mm/hugetlbpage.c | 2
+ arch/tile/mm/hugetlbpage.c | 2
+ arch/x86/kernel/sys_x86_64.c | 4
+ arch/x86/mm/hugetlbpage.c | 2
+ arch/xtensa/kernel/syscall.c | 2
+ fs/hugetlbfs/inode.c | 2
+ fs/proc/task_mmu.c | 4
+ include/linux/mm.h | 53 ++++----
+ mm/gup.c | 5
+ mm/memory.c | 38 ------
+ mm/mmap.c | 149 ++++++++++++++----------
+ 23 files changed, 152 insertions(+), 163 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3779,6 +3779,13 @@
+ spia_pedr=
+ spia_peddr=
+
++ stack_guard_gap= [MM]
++ override the default stack gap protection. The value
++ is in page units and it defines how many pages prior
++ to (for stacks growing down) resp. after (for stacks
++ growing up) the main stack are reserved for no other
++ mapping. Default value is 256 pages.
++
+ stacktrace [FTRACE]
+ Enabled the stack tracer on boot up.
+
+--- a/arch/arc/mm/mmap.c
++++ b/arch/arc/mm/mmap.c
+@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ goto success;
+ }
+
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_a
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(str
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma, *prev;
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
+@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(str
+ else
+ addr = PAGE_ALIGN(addr);
+
+- vma = find_vma(mm, addr);
++ vma = find_vma_prev(mm, addr, &prev);
+ if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)) &&
++ (!prev || addr >= vm_end_gap(prev)))
+ goto found_addr;
+ }
+
+@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct fi
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+ {
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma, *prev;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_color_align, last_mmap;
+@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
++
++ vma = find_vma_prev(mm, addr, &prev);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)) &&
++ (!prev || addr >= vm_end_gap(prev)))
+ goto found_addr;
+ }
+
+--- a/arch/powerpc/mm/hugetlbpage-radix.c
++++ b/arch/powerpc/mm/hugetlbpage-radix.c
+@@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+ /*
+--- a/arch/powerpc/mm/mmap.c
++++ b/arch/powerpc/mm/mmap.c
+@@ -107,7 +107,7 @@ radix__arch_get_unmapped_area(struct fil
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -143,7 +143,7 @@ radix__arch_get_unmapped_area_topdown(st
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return (!vma || (addr + len) <= vm_start_gap(vma));
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -100,7 +100,7 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -138,7 +138,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(str
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct fi
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(str
+
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct fi
+
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *f
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+ if (current->mm->get_unmapped_area == arch_get_unmapped_area)
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -141,7 +141,7 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+@@ -184,7 +184,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -145,7 +145,7 @@ hugetlb_get_unmapped_area(struct file *f
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+--- a/arch/xtensa/kernel/syscall.c
++++ b/arch/xtensa/kernel/syscall.c
+@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (!vmm || addr + len <= vm_start_gap(vmm))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *f
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct
+
+ /* We don't show the stack guard page in /proc/maps */
+ start = vma->vm_start;
+- if (stack_guard_page_start(vma, start))
+- start += PAGE_SIZE;
+ end = vma->vm_end;
+- if (stack_guard_page_end(vma, end))
+- end -= PAGE_SIZE;
+
+ seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1381,12 +1381,6 @@ int clear_page_dirty_for_io(struct page
+
+ int get_cmdline(struct task_struct *task, char *buffer, int buflen);
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+ {
+ return !vma->vm_ops;
+@@ -1402,28 +1396,6 @@ bool vma_is_shmem(struct vm_area_struct
+ static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
+ #endif
+
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSUP) &&
+- (vma->vm_end == addr) &&
+- !vma_growsup(vma->vm_next, addr);
+-}
+-
+ int vma_is_stack_for_current(struct vm_area_struct *vma);
+
+ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+@@ -2210,6 +2182,7 @@ void page_cache_async_readahead(struct a
+ pgoff_t offset,
+ unsigned long size);
+
++extern unsigned long stack_guard_gap;
+ /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
+ extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+
+@@ -2238,6 +2211,30 @@ static inline struct vm_area_struct * fi
+ return vma;
+ }
+
++static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
++{
++ unsigned long vm_start = vma->vm_start;
++
++ if (vma->vm_flags & VM_GROWSDOWN) {
++ vm_start -= stack_guard_gap;
++ if (vm_start > vma->vm_start)
++ vm_start = 0;
++ }
++ return vm_start;
++}
++
++static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
++{
++ unsigned long vm_end = vma->vm_end;
++
++ if (vma->vm_flags & VM_GROWSUP) {
++ vm_end += stack_guard_gap;
++ if (vm_end < vma->vm_end)
++ vm_end = -PAGE_SIZE;
++ }
++ return vm_end;
++}
++
+ static inline unsigned long vma_pages(struct vm_area_struct *vma)
+ {
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -387,11 +387,6 @@ static int faultin_page(struct task_stru
+ /* mlock all present pages, but do not fault in new pages */
+ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
+ return -ENOENT;
+- /* For mm_populate(), just skip the stack guard page. */
+- if ((*flags & FOLL_POPULATE) &&
+- (stack_guard_page_start(vma, address) ||
+- stack_guard_page_end(vma, address + PAGE_SIZE)))
+- return -ENOENT;
+ if (*flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (*flags & FOLL_REMOTE)
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2855,40 +2855,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- return expand_downwards(vma, address - PAGE_SIZE);
+- }
+- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+- struct vm_area_struct *next = vma->vm_next;
+-
+- /* As VM_GROWSDOWN but s/below/above/ */
+- if (next && next->vm_start == address + PAGE_SIZE)
+- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+- return expand_upwards(vma, address + PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_f
+ if (vma->vm_flags & VM_SHARED)
+ return VM_FAULT_SIGBUS;
+
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, vmf->address) < 0)
+- return VM_FAULT_SIGSEGV;
+-
+ /*
+ * Use pte_alloc() instead of pte_alloc_map(). We can't run
+ * pte_offset_map() on pmds where a huge pmd might be created
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ unsigned long retval;
+ unsigned long newbrk, oldbrk;
+ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *next;
+ unsigned long min_brk;
+ bool populate;
+ LIST_HEAD(uf);
+@@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ }
+
+ /* Check against existing mmap mappings. */
+- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
++ next = find_vma(mm, oldbrk);
++ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
+ goto out;
+
+ /* Ok, looks good - let it rip. */
+@@ -253,10 +255,22 @@ out:
+
+ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
+ {
+- unsigned long max, subtree_gap;
+- max = vma->vm_start;
+- if (vma->vm_prev)
+- max -= vma->vm_prev->vm_end;
++ unsigned long max, prev_end, subtree_gap;
++
++ /*
++ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
++ * allow two stack_guard_gaps between them here, and when choosing
++ * an unmapped area; whereas when expanding we only require one.
++ * That's a little inconsistent, but keeps the code here simpler.
++ */
++ max = vm_start_gap(vma);
++ if (vma->vm_prev) {
++ prev_end = vm_end_gap(vma->vm_prev);
++ if (max > prev_end)
++ max -= prev_end;
++ else
++ max = 0;
++ }
+ if (vma->vm_rb.rb_left) {
+ subtree_gap = rb_entry(vma->vm_rb.rb_left,
+ struct vm_area_struct, vm_rb)->rb_subtree_gap;
+@@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct
+ anon_vma_unlock_read(anon_vma);
+ }
+
+- highest_address = vma->vm_end;
++ highest_address = vm_end_gap(vma);
+ vma = vma->vm_next;
+ i++;
+ }
+@@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm,
+ if (vma->vm_next)
+ vma_gap_update(vma->vm_next);
+ else
+- mm->highest_vm_end = vma->vm_end;
++ mm->highest_vm_end = vm_end_gap(vma);
+
+ /*
+ * vma->vm_prev wasn't known when we followed the rbtree to find the
+@@ -856,7 +870,7 @@ again:
+ vma_gap_update(vma);
+ if (end_changed) {
+ if (!next)
+- mm->highest_vm_end = end;
++ mm->highest_vm_end = vm_end_gap(vma);
+ else if (!adjust_next)
+ vma_gap_update(next);
+ }
+@@ -941,7 +955,7 @@ again:
+ * mm->highest_vm_end doesn't need any update
+ * in remove_next == 1 case.
+ */
+- VM_WARN_ON(mm->highest_vm_end != end);
++ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
+ }
+ }
+ if (insert && file)
+@@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_un
+
+ while (true) {
+ /* Visit left subtree if it looks promising */
+- gap_end = vma->vm_start;
++ gap_end = vm_start_gap(vma);
+ if (gap_end >= low_limit && vma->vm_rb.rb_left) {
+ struct vm_area_struct *left =
+ rb_entry(vma->vm_rb.rb_left,
+@@ -1798,7 +1812,7 @@ unsigned long unmapped_area(struct vm_un
+ }
+ }
+
+- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+ check_current:
+ /* Check if current node has a suitable gap */
+ if (gap_start > high_limit)
+@@ -1825,8 +1839,8 @@ check_current:
+ vma = rb_entry(rb_parent(prev),
+ struct vm_area_struct, vm_rb);
+ if (prev == vma->vm_rb.rb_left) {
+- gap_start = vma->vm_prev->vm_end;
+- gap_end = vma->vm_start;
++ gap_start = vm_end_gap(vma->vm_prev);
++ gap_end = vm_start_gap(vma);
+ goto check_current;
+ }
+ }
+@@ -1890,7 +1904,7 @@ unsigned long unmapped_area_topdown(stru
+
+ while (true) {
+ /* Visit right subtree if it looks promising */
+- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
++ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+ if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+ struct vm_area_struct *right =
+ rb_entry(vma->vm_rb.rb_right,
+@@ -1903,7 +1917,7 @@ unsigned long unmapped_area_topdown(stru
+
+ check_current:
+ /* Check if current node has a suitable gap */
+- gap_end = vma->vm_start;
++ gap_end = vm_start_gap(vma);
+ if (gap_end < low_limit)
+ return -ENOMEM;
+ if (gap_start <= high_limit && gap_end - gap_start >= length)
+@@ -1929,7 +1943,7 @@ check_current:
+ struct vm_area_struct, vm_rb);
+ if (prev == vma->vm_rb.rb_right) {
+ gap_start = vma->vm_prev ?
+- vma->vm_prev->vm_end : 0;
++ vm_end_gap(vma->vm_prev) : 0;
+ goto check_current;
+ }
+ }
+@@ -1967,7 +1981,7 @@ arch_get_unmapped_area(struct file *filp
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma, *prev;
+ struct vm_unmapped_area_info info;
+
+ if (len > TASK_SIZE - mmap_min_addr)
+@@ -1978,9 +1992,10 @@ arch_get_unmapped_area(struct file *filp
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
++ vma = find_vma_prev(mm, addr, &prev);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)) &&
++ (!prev || addr >= vm_end_gap(prev)))
+ return addr;
+ }
+
+@@ -2003,7 +2018,7 @@ arch_get_unmapped_area_topdown(struct fi
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+ {
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma, *prev;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ struct vm_unmapped_area_info info;
+@@ -2018,9 +2033,10 @@ arch_get_unmapped_area_topdown(struct fi
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
++ vma = find_vma_prev(mm, addr, &prev);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+- (!vma || addr + len <= vma->vm_start))
++ (!vma || addr + len <= vm_start_gap(vma)) &&
++ (!prev || addr >= vm_end_gap(prev)))
+ return addr;
+ }
+
+@@ -2155,21 +2171,19 @@ find_vma_prev(struct mm_struct *mm, unsi
+ * update accounting. This is shared with both the
+ * grow-up and grow-down cases.
+ */
+-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
++static int acct_stack_growth(struct vm_area_struct *vma,
++ unsigned long size, unsigned long grow)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+ struct rlimit *rlim = current->signal->rlim;
+- unsigned long new_start, actual_size;
++ unsigned long new_start;
+
+ /* address space limit tests */
+ if (!may_expand_vm(mm, vma->vm_flags, grow))
+ return -ENOMEM;
+
+ /* Stack limit test */
+- actual_size = size;
+- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+- actual_size -= PAGE_SIZE;
+- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
++ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ return -ENOMEM;
+
+ /* mlock limit tests */
+@@ -2207,17 +2221,30 @@ static int acct_stack_growth(struct vm_a
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *next;
++ unsigned long gap_addr;
+ int error = 0;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
+ /* Guard against wrapping around to address 0. */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else
++ address &= PAGE_MASK;
++ address += PAGE_SIZE;
++ if (!address)
+ return -ENOMEM;
+
++ /* Enforce stack_guard_gap */
++ gap_addr = address + stack_guard_gap;
++ if (gap_addr < address)
++ return -ENOMEM;
++ next = vma->vm_next;
++ if (next && next->vm_start < gap_addr) {
++ if (!(next->vm_flags & VM_GROWSUP))
++ return -ENOMEM;
++ /* Check that both stack segments have the same anon_vma? */
++ }
++
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
+@@ -2261,7 +2288,7 @@ int expand_upwards(struct vm_area_struct
+ if (vma->vm_next)
+ vma_gap_update(vma->vm_next);
+ else
+- mm->highest_vm_end = address;
++ mm->highest_vm_end = vm_end_gap(vma);
+ spin_unlock(&mm->page_table_lock);
+
+ perf_event_mmap(vma);
+@@ -2282,6 +2309,8 @@ int expand_downwards(struct vm_area_stru
+ unsigned long address)
+ {
+ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *prev;
++ unsigned long gap_addr;
+ int error;
+
+ address &= PAGE_MASK;
+@@ -2289,6 +2318,17 @@ int expand_downwards(struct vm_area_stru
+ if (error)
+ return error;
+
++ /* Enforce stack_guard_gap */
++ gap_addr = address - stack_guard_gap;
++ if (gap_addr > address)
++ return -ENOMEM;
++ prev = vma->vm_prev;
++ if (prev && prev->vm_end > gap_addr) {
++ if (!(prev->vm_flags & VM_GROWSDOWN))
++ return -ENOMEM;
++ /* Check that both stack segments have the same anon_vma? */
++ }
++
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
+@@ -2343,28 +2383,25 @@ int expand_downwards(struct vm_area_stru
+ return error;
+ }
+
+-/*
+- * Note how expand_stack() refuses to expand the stack all the way to
+- * abut the next virtual mapping, *unless* that mapping itself is also
+- * a stack mapping. We want to leave room for a guard page, after all
+- * (the guard page itself is not added here, that is done by the
+- * actual page faulting logic)
+- *
+- * This matches the behavior of the guard page logic (see mm/memory.c:
+- * check_stack_guard_page()), which only allows the guard page to be
+- * removed under these circumstances.
+- */
++/* enforced gap between the expanding stack and other mappings. */
++unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
++
++static int __init cmdline_parse_stack_guard_gap(char *p)
++{
++ unsigned long val;
++ char *endptr;
++
++ val = simple_strtoul(p, &endptr, 10);
++ if (!*endptr)
++ stack_guard_gap = val << PAGE_SHIFT;
++
++ return 0;
++}
++__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
++
+ #ifdef CONFIG_STACK_GROWSUP
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
+- struct vm_area_struct *next;
+-
+- address &= PAGE_MASK;
+- next = vma->vm_next;
+- if (next && next->vm_start == address + PAGE_SIZE) {
+- if (!(next->vm_flags & VM_GROWSUP))
+- return -ENOMEM;
+- }
+ return expand_upwards(vma, address);
+ }
+
+@@ -2386,14 +2423,6 @@ find_extend_vma(struct mm_struct *mm, un
+ #else
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
+- struct vm_area_struct *prev;
+-
+- address &= PAGE_MASK;
+- prev = vma->vm_prev;
+- if (prev && prev->vm_end == address) {
+- if (!(prev->vm_flags & VM_GROWSDOWN))
+- return -ENOMEM;
+- }
+ return expand_downwards(vma, address);
+ }
+
+@@ -2491,7 +2520,7 @@ detach_vmas_to_be_unmapped(struct mm_str
+ vma->vm_prev = prev;
+ vma_gap_update(vma);
+ } else
+- mm->highest_vm_end = prev ? prev->vm_end : 0;
++ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+ tail_vma->vm_next = NULL;
+
+ /* Kill the cache */
diff --git a/sources b/sources
index 161469091..910639ba0 100644
--- a/sources
+++ b/sources
@@ -1,3 +1,3 @@
SHA512 (perf-man-4.11.tar.gz) = 0b070d2f10a743329de2f532e2d7e19ef385a3e6ef3c700b591ae2697604dbe542b36e31121b3e37517ee8071ab800386fa8663c24a5b36520a18e096c6eefc8
SHA512 (linux-4.11.tar.xz) = 6610eed97ffb7207c71771198c36179b8244ace7222bebb109507720e26c5f17d918079a56d5febdd8605844d67fb2df0ebe910fa2f2f53690daf6e2a8ad09c3
-SHA512 (patch-4.11.5.xz) = c337470c79961c88b806a449ee3bbb3b5428c1f1d6751133de00b67901a6ad8db2ed8899e0b5ca89ff902f29f58a6721053d25e286a2120e7cf2e578907c8645
+SHA512 (patch-4.11.6.xz) = e0e2de7d721575cd2770fa4fa61a1ecdfd54bb4239725363a90ab3b670aab44531a7c0f198ff769080643e86ce7e4806d26bb436a43437747e123715061b278b