summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJustin M. Forbes <jforbes@fedoraproject.org>2017-11-30 07:44:30 -0600
committerJustin M. Forbes <jforbes@fedoraproject.org>2017-11-30 07:44:30 -0600
commitb6e85294ed80e130c57dc3916634518108400f77 (patch)
treee19e811e9e9538174f5537f45422c63c3c22a40b
parentbff21f88c7129b709157e7cad97bcdef3d2040e1 (diff)
downloadkernel-b6e85294ed80e130c57dc3916634518108400f77.tar.gz
kernel-b6e85294ed80e130c57dc3916634518108400f77.tar.xz
kernel-b6e85294ed80e130c57dc3916634518108400f77.zip
Linux v4.14.3
-rw-r--r--0001-mm-thp-Do-not-make-page-table-dirty-unconditionally-.patch108
-rw-r--r--1-2-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-NMI.patch296
-rw-r--r--kernel.spec12
-rw-r--r--sources2
4 files changed, 117 insertions, 301 deletions
diff --git a/0001-mm-thp-Do-not-make-page-table-dirty-unconditionally-.patch b/0001-mm-thp-Do-not-make-page-table-dirty-unconditionally-.patch
new file mode 100644
index 000000000..2a1d7b719
--- /dev/null
+++ b/0001-mm-thp-Do-not-make-page-table-dirty-unconditionally-.patch
@@ -0,0 +1,108 @@
+From a8f97366452ed491d13cf1e44241bc0b5740b1f0 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Mon, 27 Nov 2017 06:21:25 +0300
+Subject: [PATCH] mm, thp: Do not make page table dirty unconditionally in
+ touch_p[mu]d()
+
+Currently, we unconditionally make page table dirty in touch_pmd().
+It may result in false-positive can_follow_write_pmd().
+
+We may avoid the situation, if we would only make the page table entry
+dirty if caller asks for write access -- FOLL_WRITE.
+
+The patch also changes touch_pud() in the same way.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ mm/huge_memory.c | 36 +++++++++++++-----------------------
+ 1 file changed, 13 insertions(+), 23 deletions(-)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 86fe697e8bfb..0e7ded98d114 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+ #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
+- pmd_t *pmd)
++ pmd_t *pmd, int flags)
+ {
+ pmd_t _pmd;
+
+- /*
+- * We should set the dirty bit only for FOLL_WRITE but for now
+- * the dirty bit in the pmd is meaningless. And if the dirty
+- * bit will become meaningful and we'll only set it with
+- * FOLL_WRITE, an atomic set_bit will be required on the pmd to
+- * set the young bit, instead of the current set_pmd_at.
+- */
+- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
++ _pmd = pmd_mkyoung(*pmd);
++ if (flags & FOLL_WRITE)
++ _pmd = pmd_mkdirty(_pmd);
+ if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
+- pmd, _pmd, 1))
++ pmd, _pmd, flags & FOLL_WRITE))
+ update_mmu_cache_pmd(vma, addr, pmd);
+ }
+
+@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+ return NULL;
+
+ if (flags & FOLL_TOUCH)
+- touch_pmd(vma, addr, pmd);
++ touch_pmd(vma, addr, pmd, flags);
+
+ /*
+ * device mapped pages can only be returned if the
+@@ -995,20 +990,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+
+ #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
+- pud_t *pud)
++ pud_t *pud, int flags)
+ {
+ pud_t _pud;
+
+- /*
+- * We should set the dirty bit only for FOLL_WRITE but for now
+- * the dirty bit in the pud is meaningless. And if the dirty
+- * bit will become meaningful and we'll only set it with
+- * FOLL_WRITE, an atomic set_bit will be required on the pud to
+- * set the young bit, instead of the current set_pud_at.
+- */
+- _pud = pud_mkyoung(pud_mkdirty(*pud));
++ _pud = pud_mkyoung(*pud);
++ if (flags & FOLL_WRITE)
++ _pud = pud_mkdirty(_pud);
+ if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
+- pud, _pud, 1))
++ pud, _pud, flags & FOLL_WRITE))
+ update_mmu_cache_pud(vma, addr, pud);
+ }
+
+@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ return NULL;
+
+ if (flags & FOLL_TOUCH)
+- touch_pud(vma, addr, pud);
++ touch_pud(vma, addr, pud, flags);
+
+ /*
+ * device mapped pages can only be returned if the
+@@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ page = pmd_page(*pmd);
+ VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+ if (flags & FOLL_TOUCH)
+- touch_pmd(vma, addr, pmd);
++ touch_pmd(vma, addr, pmd, flags);
+ if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+ /*
+ * We don't mlock() pte-mapped THPs. This way we can avoid
+--
+2.14.3
+
diff --git a/1-2-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-NMI.patch b/1-2-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-NMI.patch
deleted file mode 100644
index ca079af42..000000000
--- a/1-2-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-NMI.patch
+++ /dev/null
@@ -1,296 +0,0 @@
-From patchwork Mon Nov 6 12:31:12 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: [1/2] kvm: vmx: Reinstate support for CPUs without virtual NMI
-From: Paolo Bonzini <pbonzini@redhat.com>
-X-Patchwork-Id: 10043403
-Message-Id: <1509971473-74491-2-git-send-email-pbonzini@redhat.com>
-To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
-Cc: rkrcmar@redhat.com, stable@vger.kernel.org
-Date: Mon, 6 Nov 2017 13:31:12 +0100
-
-This is more or less a revert of commit 2c82878b0cb3 ("KVM: VMX: require
-virtual NMI support", 2017-03-27); it turns out that Core 2 Duo machines
-only had virtual NMIs in some SKUs.
-
-The revert is not trivial because in the meanwhile there have been several
-fixes to nested NMI injection. Therefore, the entire vNMI state is moved
-to struct loaded_vmcs.
-
-Another change compared to before the patch is a simplification here:
-
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
- !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
- get_vmcs12(vcpu))))) {
-
-The final condition here is always true (because nested_cpu_has_virtual_nmis
-is always false) and is removed.
-
-Fixes: 2c82878b0cb38fd516fd612c67852a6bbf282003
-Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1490803
-Cc: stable@vger.kernel.org
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
----
- arch/x86/kvm/vmx.c | 150 +++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 106 insertions(+), 44 deletions(-)
-
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index e6c8ffa84968..d6b3b12ae1e2 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -202,6 +202,10 @@ struct loaded_vmcs {
- bool nmi_known_unmasked;
- unsigned long vmcs_host_cr3; /* May not match real cr3 */
- unsigned long vmcs_host_cr4; /* May not match real cr4 */
-+ /* Support for vnmi-less CPUs */
-+ int soft_vnmi_blocked;
-+ ktime_t entry_time;
-+ s64 vnmi_blocked_time;
- struct list_head loaded_vmcss_on_cpu_link;
- };
-
-@@ -1291,6 +1295,11 @@ static inline bool cpu_has_vmx_invpcid(void)
- SECONDARY_EXEC_ENABLE_INVPCID;
- }
-
-+static inline bool cpu_has_virtual_nmis(void)
-+{
-+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
-+}
-+
- static inline bool cpu_has_vmx_wbinvd_exit(void)
- {
- return vmcs_config.cpu_based_2nd_exec_ctrl &
-@@ -1348,11 +1357,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
- (vmcs12->secondary_vm_exec_control & bit);
- }
-
--static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
--{
-- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
--}
--
- static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
- {
- return vmcs12->pin_based_vm_exec_control &
-@@ -3712,9 +3716,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
- &_vmexit_control) < 0)
- return -EIO;
-
-- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
-- PIN_BASED_VIRTUAL_NMIS;
-- opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
-+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
-+ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
-+ PIN_BASED_VMX_PREEMPTION_TIMER;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
- &_pin_based_exec_control) < 0)
- return -EIO;
-@@ -5669,7 +5673,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
-
- static void enable_nmi_window(struct kvm_vcpu *vcpu)
- {
-- if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
-+ if (!cpu_has_virtual_nmis() ||
-+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
- enable_irq_window(vcpu);
- return;
- }
-@@ -5709,6 +5714,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-+ if (!cpu_has_virtual_nmis()) {
-+ /*
-+ * Tracking the NMI-blocked state in software is built upon
-+ * finding the next open IRQ window. This, in turn, depends on
-+ * well-behaving guests: They have to keep IRQs disabled at
-+ * least as long as the NMI handler runs. Otherwise we may
-+ * cause NMI nesting, maybe breaking the guest. But as this is
-+ * highly unlikely, we can live with the residual risk.
-+ */
-+ vmx->loaded_vmcs->soft_vnmi_blocked = 1;
-+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
-+ }
-+
- ++vcpu->stat.nmi_injections;
- vmx->loaded_vmcs->nmi_known_unmasked = false;
-
-@@ -5727,6 +5745,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool masked;
-
-+ if (!cpu_has_virtual_nmis())
-+ return vmx->loaded_vmcs->soft_vnmi_blocked;
- if (vmx->loaded_vmcs->nmi_known_unmasked)
- return false;
- masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
-@@ -5738,13 +5758,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
-- if (masked)
-- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-- GUEST_INTR_STATE_NMI);
-- else
-- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
-- GUEST_INTR_STATE_NMI);
-+ if (!cpu_has_virtual_nmis()) {
-+ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
-+ vmx->loaded_vmcs->soft_vnmi_blocked = masked;
-+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
-+ }
-+ } else {
-+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
-+ if (masked)
-+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-+ GUEST_INTR_STATE_NMI);
-+ else
-+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
-+ GUEST_INTR_STATE_NMI);
-+ }
- }
-
- static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
-@@ -5752,6 +5779,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
- if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
-
-+ if (!cpu_has_virtual_nmis() &&
-+ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
-+ return 0;
-+
- return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
- | GUEST_INTR_STATE_NMI));
-@@ -6479,6 +6510,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
- * AAK134, BY25.
- */
- if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
-+ cpu_has_virtual_nmis() &&
- (exit_qualification & INTR_INFO_UNBLOCK_NMI))
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
-
-@@ -6965,7 +6997,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
- }
-
- /* Create a new VMCS */
-- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
-+ item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
- if (!item)
- return NULL;
- item->vmcs02.vmcs = alloc_vmcs();
-@@ -7982,6 +8014,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
- * "blocked by NMI" bit has to be set before next VM entry.
- */
- if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
-+ cpu_has_virtual_nmis() &&
- (exit_qualification & INTR_INFO_UNBLOCK_NMI))
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
-@@ -8826,6 +8859,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
- return 0;
- }
-
-+ if (unlikely(!cpu_has_virtual_nmis() &&
-+ vmx->loaded_vmcs->soft_vnmi_blocked)) {
-+ if (vmx_interrupt_allowed(vcpu)) {
-+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
-+ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
-+ vcpu->arch.nmi_pending) {
-+ /*
-+ * This CPU don't support us in finding the end of an
-+ * NMI-blocked window if the guest runs with IRQs
-+ * disabled. So we pull the trigger after 1 s of
-+ * futile waiting, but inform the user about this.
-+ */
-+ printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
-+ "state on VCPU %d after 1 s timeout\n",
-+ __func__, vcpu->vcpu_id);
-+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
-+ }
-+ }
-+
- if (exit_reason < kvm_vmx_max_exit_handlers
- && kvm_vmx_exit_handlers[exit_reason])
- return kvm_vmx_exit_handlers[exit_reason](vcpu);
-@@ -9108,33 +9160,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
-
- idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
-
-- if (vmx->loaded_vmcs->nmi_known_unmasked)
-- return;
-- /*
-- * Can't use vmx->exit_intr_info since we're not sure what
-- * the exit reason is.
-- */
-- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
-- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
-- /*
-- * SDM 3: 27.7.1.2 (September 2008)
-- * Re-set bit "block by NMI" before VM entry if vmexit caused by
-- * a guest IRET fault.
-- * SDM 3: 23.2.2 (September 2008)
-- * Bit 12 is undefined in any of the following cases:
-- * If the VM exit sets the valid bit in the IDT-vectoring
-- * information field.
-- * If the VM exit is due to a double fault.
-- */
-- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
-- vector != DF_VECTOR && !idtv_info_valid)
-- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-- GUEST_INTR_STATE_NMI);
-- else
-- vmx->loaded_vmcs->nmi_known_unmasked =
-- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
-- & GUEST_INTR_STATE_NMI);
-+ if (cpu_has_virtual_nmis()) {
-+ if (vmx->loaded_vmcs->nmi_known_unmasked)
-+ return;
-+ /*
-+ * Can't use vmx->exit_intr_info since we're not sure what
-+ * the exit reason is.
-+ */
-+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-+ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
-+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
-+ /*
-+ * SDM 3: 27.7.1.2 (September 2008)
-+ * Re-set bit "block by NMI" before VM entry if vmexit caused by
-+ * a guest IRET fault.
-+ * SDM 3: 23.2.2 (September 2008)
-+ * Bit 12 is undefined in any of the following cases:
-+ * If the VM exit sets the valid bit in the IDT-vectoring
-+ * information field.
-+ * If the VM exit is due to a double fault.
-+ */
-+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
-+ vector != DF_VECTOR && !idtv_info_valid)
-+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-+ GUEST_INTR_STATE_NMI);
-+ else
-+ vmx->loaded_vmcs->nmi_known_unmasked =
-+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
-+ & GUEST_INTR_STATE_NMI);
-+ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
-+ vmx->loaded_vmcs->vnmi_blocked_time +=
-+ ktime_to_ns(ktime_sub(ktime_get(),
-+ vmx->loaded_vmcs->entry_time));
- }
-
- static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
-@@ -9251,6 +9308,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long debugctlmsr, cr3, cr4;
-
-+ /* Record the guest's net vcpu time for enforced NMI injections. */
-+ if (unlikely(!cpu_has_virtual_nmis() &&
-+ vmx->loaded_vmcs->soft_vnmi_blocked))
-+ vmx->loaded_vmcs->entry_time = ktime_get();
-+
- /* Don't enter VMX if guest state is invalid, let the exit handler
- start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
diff --git a/kernel.spec b/kernel.spec
index 1c31fc1a2..faac07516 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -54,7 +54,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
-%define stable_update 2
+%define stable_update 3
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@@ -650,12 +650,12 @@ Patch623: 0001-PATCH-staging-rtl8822be-fix-wrong-dma-unmap-len.patch
# rhbz 1509461
Patch625: v3-2-2-Input-synaptics---Lenovo-X1-Carbon-5-should-use-SMBUS-RMI.patch
-# rhbz 1490803
-Patch626: 1-2-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-NMI.patch
-
# Fixes for QXL issues
Patch627: qxl-fixes.patch
+# CVE-2017-1000405 rhbz 1516514 1519115
+Patch628: 0001-mm-thp-Do-not-make-page-table-dirty-unconditionally-.patch
+
# END OF PATCH DEFINITIONS
%endif
@@ -2210,6 +2210,10 @@ fi
#
#
%changelog
+* Thu Nov 30 2017 Justin M. Forbes <jforbes@fedoraproject.org> - 4.14.3-300
+- Linux v4.14.3
+- Fix CVE-2017-1000405 (rhbz 1516514 1519115)
+
* Fri Nov 24 2017 Peter Robinson <pbrobinson@fedoraproject.org> 4.14.2-300
- Linux v4.14.2
diff --git a/sources b/sources
index 9c3e8f19b..5cfd93eb5 100644
--- a/sources
+++ b/sources
@@ -1,3 +1,3 @@
SHA512 (linux-4.14.tar.xz) = 77e43a02d766c3d73b7e25c4aafb2e931d6b16e870510c22cef0cdb05c3acb7952b8908ebad12b10ef982c6efbe286364b1544586e715cf38390e483927904d8
SHA512 (perf-man-4.14.tar.gz) = 76a9d8adc284cdffd4b3fbb060e7f9a14109267707ce1d03f4c3239cd70d8d164f697da3a0f90a363fbcac42a61d3c378afbcc2a86f112c501b9cb5ce74ef9f8
-SHA512 (patch-4.14.2.xz) = 04415954c3c4d3044a6a3da979e59fb18f0eda3fd872a8036ac8947fbbadcd6041384a900973b917353de6e5c1a589eff1db63c029edcb78f38b07868a929f9d
+SHA512 (patch-4.14.3.xz) = 36a08a4c1c93c4fefb95273f3bfe4cac724d8e7c4f90d6e42a11c3afbbdd35b537f3380985a730c9aca491359f9bbdc4747ac444dd6b2625443c28df285cf74a