Makefile | 4 + arch/arm/Kconfig | 4 +- arch/arm64/Kconfig | 3 +- arch/s390/include/asm/ipl.h | 1 + arch/s390/kernel/ipl.c | 5 + arch/s390/kernel/setup.c | 4 + arch/x86/kernel/setup.c | 22 +- drivers/acpi/apei/hest.c | 8 + drivers/acpi/irq.c | 17 +- drivers/acpi/scan.c | 9 + drivers/ata/libahci.c | 18 ++ drivers/char/ipmi/ipmi_dmi.c | 15 ++ drivers/char/ipmi/ipmi_msghandler.c | 16 +- drivers/firmware/efi/Makefile | 1 + drivers/firmware/efi/efi.c | 124 ++++++--- drivers/firmware/efi/secureboot.c | 38 +++ drivers/firmware/sysfb.c | 18 +- drivers/gpu/drm/tiny/simpledrm.c | 65 ++++- drivers/hid/hid-rmi.c | 64 ----- drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 ++ drivers/input/rmi4/rmi_driver.c | 124 +++++---- drivers/iommu/iommu.c | 22 ++ drivers/nvme/host/core.c | 22 +- drivers/nvme/host/multipath.c | 19 +- drivers/nvme/host/nvme.h | 4 + drivers/pci/pci-driver.c | 11 + drivers/pci/pci.c | 28 +- drivers/pci/pci.h | 14 +- drivers/pci/pcie/ptm.c | 300 ++++++++++++--------- drivers/pci/quirks.c | 24 ++ drivers/usb/core/hub.c | 7 + include/linux/efi.h | 22 +- include/linux/lsm_hook_defs.h | 2 + include/linux/lsm_hooks.h | 6 + include/linux/pci.h | 3 + include/linux/rmi.h | 1 + include/linux/security.h | 5 + include/net/neighbour.h | 2 +- init/Kconfig | 2 +- kernel/module/signing.c | 9 +- net/core/neighbour.c | 58 ++-- net/ipv4/fib_semantics.c | 8 +- scripts/pahole-flags.sh | 3 + scripts/tags.sh | 2 + security/integrity/platform_certs/load_uefi.c | 6 +- security/lockdown/Kconfig | 13 + security/lockdown/lockdown.c | 1 + security/security.c | 6 + tools/testing/selftests/net/fib_nexthops.sh | 5 + 49 files changed, 793 insertions(+), 391 deletions(-) diff --git a/Makefile b/Makefile index 4f7da26fef78..ce8f093a79f1 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,10 @@ $(if $(filter __%, $(MAKECMDGOALS)), \ PHONY := __all __all: +# Set RHEL variables +# Use this spot to avoid future merge conflicts +include Makefile.rhelver + # We are using a recursive build, so we need to do a little thinking # to get the ordering right. # diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 11ecf09aadc8..5d91c8c3cd49 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1387,9 +1387,9 @@ config HIGHMEM If unsure, say n. config HIGHPTE - bool "Allocate 2nd-level pagetables from highmem" if EXPERT + bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM - default y + default n help The VM uses one page of physical memory for each page table. For systems with a lot of processes, this can use a lot of diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3795eb5ba1cd..55d299a03150 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1199,7 +1199,7 @@ endchoice config ARM64_FORCE_52BIT bool "Force 52-bit virtual addresses for userspace" - depends on ARM64_VA_BITS_52 && EXPERT + depends on ARM64_VA_BITS_52 help For systems with 52-bit userspace VAs enabled, the kernel will attempt to maintain compatibility with older software by providing 48-bit VAs @@ -1438,6 +1438,7 @@ config XEN config FORCE_MAX_ZONEORDER int default "14" if ARM64_64K_PAGES + default "13" if (ARCH_THUNDER && !ARM64_64K_PAGES) default "12" if ARM64_16K_PAGES default "11" help diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index a405b6bb89fb..50827b341fd7 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -128,6 +128,7 @@ int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf, unsigned char flags, unsigned short cert); int ipl_report_add_certificate(struct ipl_report *report, void *key, unsigned long addr, unsigned long len); +bool ipl_get_secureboot(void); /* * DIAG 308 support diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 1cc85b8ff42e..b7ee128c67ce 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2216,3 +2216,8 @@ int ipl_report_free(struct ipl_report *report) } #endif + +bool ipl_get_secureboot(void) +{ + return !!ipl_secure_flag; +} diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index bbd4bde4f65d..dd60114ae9c1 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -968,6 +969,9 @@ void __init setup_arch(char **cmdline_p) log_component_list(); + if (ipl_get_secureboot()) + security_lock_kernel_down("Secure IPL mode", LOCKDOWN_INTEGRITY_MAX); + /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ *cmdline_p = boot_command_line; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 216fee7144ee..55e817aa000d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1036,6 +1037,13 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled(EFI_BOOT)) efi_init(); + efi_set_secure_boot(boot_params.secure_boot); + +#ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT + if (efi_enabled(EFI_SECURE_BOOT)) + security_lock_kernel_down("EFI Secure Boot mode", LOCKDOWN_INTEGRITY_MAX); +#endif + dmi_setup(); /* @@ -1205,19 +1213,7 @@ void __init setup_arch(char **cmdline_p) /* Allocate bigger log buffer */ setup_log_buf(1); - if (efi_enabled(EFI_BOOT)) { - switch (boot_params.secure_boot) { - case efi_secureboot_mode_disabled: - pr_info("Secure boot disabled\n"); - break; - case efi_secureboot_mode_enabled: - pr_info("Secure boot enabled\n"); - break; - default: - pr_info("Secure boot could not be determined\n"); - break; - } - } + efi_set_secure_boot(boot_params.secure_boot); reserve_initrd(); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 6aef1ee5e1bd..8f146b1b4972 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -96,6 +96,14 @@ static int apei_hest_parse(apei_hest_func_t func, void *data) if (hest_disable || !hest_tab) return -EINVAL; +#ifdef CONFIG_ARM64 + /* Ignore broken firmware */ + if (!strncmp(hest_tab->header.oem_id, "HPE ", 6) && + !strncmp(hest_tab->header.oem_table_id, "ProLiant", 8) && + MIDR_IMPLEMENTOR(read_cpuid_id()) == ARM_CPU_IMP_APM) + return -EINVAL; +#endif + hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); for (i = 0; i < hest_tab->error_source_count; i++) { len = hest_esrc_len(hest_hdr); diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index dabe45eba055..82cf1ddf25d6 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -137,6 +137,7 @@ struct acpi_irq_parse_one_ctx { unsigned int index; unsigned long *res_flags; struct irq_fwspec *fwspec; + bool skip_producer_check; }; /** @@ -208,7 +209,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, return AE_CTRL_TERMINATE; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: eirq = &ares->data.extended_irq; - if (eirq->producer_consumer == ACPI_PRODUCER) + if (!ctx->skip_producer_check && + eirq->producer_consumer == ACPI_PRODUCER) return AE_OK; if (ctx->index >= eirq->interrupt_count) { ctx->index -= eirq->interrupt_count; @@ -244,8 +246,19 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, static int acpi_irq_parse_one(acpi_handle handle, unsigned int index, struct irq_fwspec *fwspec, unsigned long *flags) { - struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec }; + struct acpi_irq_parse_one_ctx ctx = { -EINVAL, index, flags, fwspec, false }; + /* + * Firmware on arm64-based HPE m400 platform incorrectly marks + * its UART interrupt as ACPI_PRODUCER rather than ACPI_CONSUMER. + * Don't do the producer/consumer check for that device. + */ + if (IS_ENABLED(CONFIG_ARM64)) { + struct acpi_device *adev = acpi_bus_get_acpi_device(handle); + + if (adev && !strcmp(acpi_device_hid(adev), "APMC0D08")) + ctx.skip_producer_check = true; + } acpi_walk_resources(handle, METHOD_NAME__CRS, acpi_irq_parse_one_cb, &ctx); return ctx.rc; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index adfeb5770efd..9ed777d8512f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1753,6 +1753,15 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) return false; + /* + * Firmware on some arm64 X-Gene platforms will make the UART + * device appear as both a UART and a slave of that UART. Just + * bail out here for X-Gene UARTs. + */ + if (IS_ENABLED(CONFIG_ARM64) && + !strcmp(acpi_device_hid(device), "APMC0D08")) + return false; + INIT_LIST_HEAD(&resource_list); acpi_dev_get_resources(device, &resource_list, acpi_check_serial_bus_slave, diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index cf8c7fd59ada..28a8189be64f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -690,6 +690,24 @@ int ahci_stop_engine(struct ata_port *ap) tmp &= ~PORT_CMD_START; writel(tmp, port_mmio + PORT_CMD); +#ifdef CONFIG_ARM64 + /* Rev Ax of Cavium CN99XX needs a hack for port stop */ + if (dev_is_pci(ap->host->dev) && + to_pci_dev(ap->host->dev)->vendor == 0x14e4 && + to_pci_dev(ap->host->dev)->device == 0x9027 && + midr_is_cpu_model_range(read_cpuid_id(), + MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN), + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(0, MIDR_REVISION_MASK))) { + tmp = readl(hpriv->mmio + 0x8000); + udelay(100); + writel(tmp | (1 << 26), hpriv->mmio + 0x8000); + udelay(100); + writel(tmp & ~(1 << 26), hpriv->mmio + 0x8000); + dev_warn(ap->host->dev, "CN99XX SATA reset workaround applied\n"); + } +#endif + /* wait for engine to stop. This could be as long as 500 msec */ tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index bbf7029e224b..cf7faa970dd6 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -215,6 +215,21 @@ static int __init scan_for_dmi_ipmi(void) { const struct dmi_device *dev = NULL; +#ifdef CONFIG_ARM64 + /* RHEL-only + * If this is ARM-based HPE m400, return now, because that platform + * reports the host-side ipmi address as intel port-io space, which + * does not exist in the ARM architecture. + */ + const char *dmistr = dmi_get_system_info(DMI_PRODUCT_NAME); + + if (dmistr && (strcmp("ProLiant m400 Server", dmistr) == 0)) { + pr_debug("%s does not support host ipmi\n", dmistr); + return 0; + } + /* END RHEL-only */ +#endif + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) dmi_decode_ipmi((const struct dmi_header *) dev->device_data); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 703433493c85..6b1b102b9b7d 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #define IPMI_DRIVER_VERSION "39.2" @@ -5516,8 +5517,21 @@ static int __init ipmi_init_msghandler_mod(void) { int rv; - pr_info("version " IPMI_DRIVER_VERSION "\n"); +#ifdef CONFIG_ARM64 + /* RHEL-only + * If this is ARM-based HPE m400, return now, because that platform + * reports the host-side ipmi address as intel port-io space, which + * does not exist in the ARM architecture. + */ + const char *dmistr = dmi_get_system_info(DMI_PRODUCT_NAME); + if (dmistr && (strcmp("ProLiant m400 Server", dmistr) == 0)) { + pr_debug("%s does not support host ipmi\n", dmistr); + return -ENOSYS; + } + /* END RHEL-only */ +#endif + pr_info("version " IPMI_DRIVER_VERSION "\n"); mutex_lock(&ipmi_interfaces_mutex); rv = ipmi_register_driver(); mutex_unlock(&ipmi_interfaces_mutex); diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 8d151e332584..bd29fe4ddbf3 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o obj-$(CONFIG_EFI_TEST) += test/ obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o +obj-$(CONFIG_EFI) += secureboot.o obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index a06decee51e0..039897a56c5c 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -866,40 +867,101 @@ int efi_mem_type(unsigned long phys_addr) } #endif +struct efi_error_code { + efi_status_t status; + int errno; + const char *description; +}; + +static const struct efi_error_code efi_error_codes[] = { + { EFI_SUCCESS, 0, "Success"}, +#if 0 + { EFI_LOAD_ERROR, -EPICK_AN_ERRNO, "Load Error"}, +#endif + { EFI_INVALID_PARAMETER, -EINVAL, "Invalid Parameter"}, + { EFI_UNSUPPORTED, -ENOSYS, "Unsupported"}, + { EFI_BAD_BUFFER_SIZE, -ENOSPC, "Bad Buffer Size"}, + { EFI_BUFFER_TOO_SMALL, -ENOSPC, "Buffer Too Small"}, + { EFI_NOT_READY, -EAGAIN, "Not Ready"}, + { EFI_DEVICE_ERROR, -EIO, "Device Error"}, + { EFI_WRITE_PROTECTED, -EROFS, "Write Protected"}, + { EFI_OUT_OF_RESOURCES, -ENOMEM, "Out of Resources"}, +#if 0 + { EFI_VOLUME_CORRUPTED, -EPICK_AN_ERRNO, "Volume Corrupt"}, + { EFI_VOLUME_FULL, -EPICK_AN_ERRNO, "Volume Full"}, + { EFI_NO_MEDIA, -EPICK_AN_ERRNO, "No Media"}, + { EFI_MEDIA_CHANGED, -EPICK_AN_ERRNO, "Media changed"}, +#endif + { EFI_NOT_FOUND, -ENOENT, "Not Found"}, +#if 0 + { EFI_ACCESS_DENIED, -EPICK_AN_ERRNO, "Access Denied"}, + { EFI_NO_RESPONSE, -EPICK_AN_ERRNO, "No Response"}, + { EFI_NO_MAPPING, -EPICK_AN_ERRNO, "No mapping"}, + { EFI_TIMEOUT, -EPICK_AN_ERRNO, "Time out"}, + { EFI_NOT_STARTED, -EPICK_AN_ERRNO, "Not started"}, + { EFI_ALREADY_STARTED, -EPICK_AN_ERRNO, "Already started"}, +#endif + { EFI_ABORTED, -EINTR, "Aborted"}, +#if 0 + { EFI_ICMP_ERROR, -EPICK_AN_ERRNO, "ICMP Error"}, + { EFI_TFTP_ERROR, -EPICK_AN_ERRNO, "TFTP Error"}, + { EFI_PROTOCOL_ERROR, -EPICK_AN_ERRNO, "Protocol Error"}, + { EFI_INCOMPATIBLE_VERSION, -EPICK_AN_ERRNO, "Incompatible Version"}, +#endif + { EFI_SECURITY_VIOLATION, -EACCES, "Security Policy Violation"}, +#if 0 + { EFI_CRC_ERROR, -EPICK_AN_ERRNO, "CRC Error"}, + { EFI_END_OF_MEDIA, -EPICK_AN_ERRNO, "End of Media"}, + { EFI_END_OF_FILE, -EPICK_AN_ERRNO, "End of File"}, + { EFI_INVALID_LANGUAGE, -EPICK_AN_ERRNO, "Invalid Languages"}, + { EFI_COMPROMISED_DATA, -EPICK_AN_ERRNO, "Compromised Data"}, + + // warnings + { EFI_WARN_UNKOWN_GLYPH, -EPICK_AN_ERRNO, "Warning Unknown Glyph"}, + { EFI_WARN_DELETE_FAILURE, -EPICK_AN_ERRNO, "Warning Delete Failure"}, + { EFI_WARN_WRITE_FAILURE, -EPICK_AN_ERRNO, "Warning Write Failure"}, + { EFI_WARN_BUFFER_TOO_SMALL, -EPICK_AN_ERRNO, "Warning Buffer Too Small"}, +#endif +}; + +static int +efi_status_cmp_bsearch(const void *key, const void *item) +{ + u64 status = (u64)(uintptr_t)key; + struct efi_error_code *code = (struct efi_error_code *)item; + + if (status < code->status) + return -1; + if (status > code->status) + return 1; + return 0; +} + int efi_status_to_err(efi_status_t status) { - int err; - - switch (status) { - case EFI_SUCCESS: - err = 0; - break; - case EFI_INVALID_PARAMETER: - err = -EINVAL; - break; - case EFI_OUT_OF_RESOURCES: - err = -ENOSPC; - break; - case EFI_DEVICE_ERROR: - err = -EIO; - break; - case EFI_WRITE_PROTECTED: - err = -EROFS; - break; - case EFI_SECURITY_VIOLATION: - err = -EACCES; - break; - case EFI_NOT_FOUND: - err = -ENOENT; - break; - case EFI_ABORTED: - err = -EINTR; - break; - default: - err = -EINVAL; - } + struct efi_error_code *found; + size_t num = sizeof(efi_error_codes) / sizeof(struct efi_error_code); - return err; + found = bsearch((void *)(uintptr_t)status, efi_error_codes, + sizeof(struct efi_error_code), num, + efi_status_cmp_bsearch); + if (!found) + return -EINVAL; + return found->errno; +} + +const char * +efi_status_to_str(efi_status_t status) +{ + struct efi_error_code *found; + size_t num = sizeof(efi_error_codes) / sizeof(struct efi_error_code); + + found = bsearch((void *)(uintptr_t)status, efi_error_codes, + sizeof(struct efi_error_code), num, + efi_status_cmp_bsearch); + if (!found) + return "Unknown error code"; + return found->description; } EXPORT_SYMBOL_GPL(efi_status_to_err); diff --git a/drivers/firmware/efi/secureboot.c b/drivers/firmware/efi/secureboot.c new file mode 100644 index 000000000000..de0a3714a5d4 --- /dev/null +++ b/drivers/firmware/efi/secureboot.c @@ -0,0 +1,38 @@ +/* Core kernel secure boot support. + * + * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +/* + * Decide what to do when UEFI secure boot mode is enabled. + */ +void __init efi_set_secure_boot(enum efi_secureboot_mode mode) +{ + if (efi_enabled(EFI_BOOT)) { + switch (mode) { + case efi_secureboot_mode_disabled: + pr_info("Secure boot disabled\n"); + break; + case efi_secureboot_mode_enabled: + set_bit(EFI_SECURE_BOOT, &efi.flags); + pr_info("Secure boot enabled\n"); + break; + default: + pr_warn("Secure boot could not be determined (mode %u)\n", + mode); + break; + } + } +} diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 1f276f108cc9..7039ad9bdf7f 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -34,6 +34,22 @@ #include #include +static int skip_simpledrm; + +static int __init simpledrm_disable(char *opt) +{ + if (!opt) + return -EINVAL; + + get_option(&opt, &skip_simpledrm); + + if (skip_simpledrm) + pr_info("The simpledrm driver will not be probed\n"); + + return 0; +} +early_param("nvidia-drm.modeset", simpledrm_disable); + static struct platform_device *pd; static DEFINE_MUTEX(disable_lock); static bool disabled; @@ -83,7 +99,7 @@ static __init int sysfb_init(void) /* try to create a simple-framebuffer device */ compatible = sysfb_parse_mode(si, &mode); - if (compatible) { + if (compatible && !skip_simpledrm) { pd = sysfb_create_simplefb(si, &mode); if (!IS_ERR(pd)) goto unlock_mutex; diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 5422363690e7..a5b500811892 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -569,20 +569,44 @@ static int simpledrm_device_init_mm(struct simpledrm_device *sdev) */ /* - * Support all formats of simplefb and maybe more; in order - * of preference. The display's update function will do any + * Support the subset of formats that we have conversion helpers for, + * in order of preference. The display's update function will do any * conversion necessary. * * TODO: Add blit helpers for remaining formats and uncomment * constants. */ -static const uint32_t simpledrm_default_formats[] = { + +/* + * Supported conversions to RGB565 and RGB888: + * from [AX]RGB8888 + */ +static const uint32_t simpledrm_primary_plane_formats_base[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +/* + * Supported conversions to [AX]RGB8888: + * A/X variants (no-op) + * from RGB565 + * from RGB888 + */ +static const uint32_t simpledrm_primary_plane_formats_xrgb8888[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB888, DRM_FORMAT_RGB565, //DRM_FORMAT_XRGB1555, //DRM_FORMAT_ARGB1555, - DRM_FORMAT_RGB888, +}; + +/* + * Supported conversions to [AX]RGB2101010: + * A/X variants (no-op) + * from [AX]RGB8888 + */ +static const uint32_t simpledrm_primary_plane_formats_xrgb2101010[] = { DRM_FORMAT_XRGB2101010, DRM_FORMAT_ARGB2101010, }; @@ -744,7 +768,8 @@ static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev, size_t *nformats_out) { struct drm_device *dev = &sdev->dev; - size_t i; + const uint32_t *conv_formats; + size_t i, conv_nformats; if (sdev->nformats) goto out; /* don't rebuild list on recurring calls */ @@ -753,11 +778,35 @@ static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev, sdev->formats[0] = sdev->format->format; sdev->nformats = 1; + switch (sdev->format->format) { + case DRM_FORMAT_RGB565: + case DRM_FORMAT_RGB888: + conv_formats = simpledrm_primary_plane_formats_base; + conv_nformats = ARRAY_SIZE(simpledrm_primary_plane_formats_base); + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + conv_formats = simpledrm_primary_plane_formats_xrgb8888; + conv_nformats = ARRAY_SIZE(simpledrm_primary_plane_formats_xrgb8888); + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + conv_formats = simpledrm_primary_plane_formats_xrgb2101010; + conv_nformats = ARRAY_SIZE(simpledrm_primary_plane_formats_xrgb2101010); + break; + default: + conv_formats = NULL; + conv_nformats = 0; + drm_warn(dev, "Format conversion helpers required to add extra formats.\n"); + break; + } + + /* default formats go second */ - for (i = 0; i < ARRAY_SIZE(simpledrm_default_formats); ++i) { - if (simpledrm_default_formats[i] == sdev->format->format) + for (i = 0; i < conv_nformats; ++i) { + if (conv_formats[i] == sdev->format->format) continue; /* native format already went first */ - sdev->formats[sdev->nformats] = simpledrm_default_formats[i]; + sdev->formats[sdev->nformats] = conv_formats[i]; sdev->nformats++; } diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 311eee599ce9..2460c6bd46f8 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -322,19 +322,12 @@ static int rmi_input_event(struct hid_device *hdev, u8 *data, int size) { struct rmi_data *hdata = hid_get_drvdata(hdev); struct rmi_device *rmi_dev = hdata->xport.rmi_dev; - unsigned long flags; if (!(test_bit(RMI_STARTED, &hdata->flags))) return 0; - local_irq_save(flags); - rmi_set_attn_data(rmi_dev, data[1], &data[2], size - 2); - generic_handle_irq(hdata->rmi_irq); - - local_irq_restore(flags); - return 1; } @@ -591,56 +584,6 @@ static const struct rmi_transport_ops hid_rmi_ops = { .reset = rmi_hid_reset, }; -static void rmi_irq_teardown(void *data) -{ - struct rmi_data *hdata = data; - struct irq_domain *domain = hdata->domain; - - if (!domain) - return; - - irq_dispose_mapping(irq_find_mapping(domain, 0)); - - irq_domain_remove(domain); - hdata->domain = NULL; - hdata->rmi_irq = 0; -} - -static int rmi_irq_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw_irq_num) -{ - irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops rmi_irq_ops = { - .map = rmi_irq_map, -}; - -static int rmi_setup_irq_domain(struct hid_device *hdev) -{ - struct rmi_data *hdata = hid_get_drvdata(hdev); - int ret; - - hdata->domain = irq_domain_create_linear(hdev->dev.fwnode, 1, - &rmi_irq_ops, hdata); - if (!hdata->domain) - return -ENOMEM; - - ret = devm_add_action_or_reset(&hdev->dev, &rmi_irq_teardown, hdata); - if (ret) - return ret; - - hdata->rmi_irq = irq_create_mapping(hdata->domain, 0); - if (hdata->rmi_irq <= 0) { - hid_err(hdev, "Can't allocate an IRQ\n"); - return hdata->rmi_irq < 0 ? hdata->rmi_irq : -ENXIO; - } - - return 0; -} - static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct rmi_data *data = NULL; @@ -713,18 +656,11 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id) mutex_init(&data->page_mutex); - ret = rmi_setup_irq_domain(hdev); - if (ret) { - hid_err(hdev, "failed to allocate IRQ domain\n"); - return ret; - } - if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS) rmi_hid_pdata.gpio_data.disable = true; data->xport.dev = hdev->dev.parent; data->xport.pdata = rmi_hid_pdata; - data->xport.pdata.irq = data->rmi_irq; data->xport.proto_name = "hid"; data->xport.ops = &hid_rmi_ops; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index d39660a3e50c..8e5ffb4325a3 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -2137,6 +2138,16 @@ static const struct amba_id etm4_ids[] = { {}, }; +static const struct dmi_system_id broken_coresight[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HPE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Apollo 70"), + }, + }, + { } /* terminating entry */ +}; + MODULE_DEVICE_TABLE(amba, etm4_ids); static struct amba_driver etm4x_amba_driver = { @@ -2170,6 +2181,11 @@ static int __init etm4x_init(void) { int ret; + if (dmi_check_system(broken_coresight)) { + pr_info("ETM4 disabled due to firmware bug\n"); + return 0; + } + ret = etm4_pm_setup(); /* etm4_pm_setup() does its own cleanup - exit on error */ @@ -2196,6 +2212,9 @@ static int __init etm4x_init(void) static void __exit etm4x_exit(void) { + if (dmi_check_system(broken_coresight)) + return; + amba_driver_unregister(&etm4x_amba_driver); platform_driver_unregister(&etm4_platform_driver); etm4_pm_clear(); diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 258d5fe3d395..f7298e3dc8f3 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -182,34 +182,47 @@ void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, attn_data.data = fifo_data; kfifo_put(&drvdata->attn_fifo, attn_data); + + schedule_work(&drvdata->attn_work); } EXPORT_SYMBOL_GPL(rmi_set_attn_data); -static irqreturn_t rmi_irq_fn(int irq, void *dev_id) +static void attn_callback(struct work_struct *work) { - struct rmi_device *rmi_dev = dev_id; - struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); + struct rmi_driver_data *drvdata = container_of(work, + struct rmi_driver_data, + attn_work); struct rmi4_attn_data attn_data = {0}; int ret, count; count = kfifo_get(&drvdata->attn_fifo, &attn_data); - if (count) { - *(drvdata->irq_status) = attn_data.irq_status; - drvdata->attn_data = attn_data; - } + if (!count) + return; - ret = rmi_process_interrupt_requests(rmi_dev); + *(drvdata->irq_status) = attn_data.irq_status; + drvdata->attn_data = attn_data; + + ret = rmi_process_interrupt_requests(drvdata->rmi_dev); if (ret) - rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, + rmi_dbg(RMI_DEBUG_CORE, &drvdata->rmi_dev->dev, "Failed to process interrupt request: %d\n", ret); - if (count) { - kfree(attn_data.data); - drvdata->attn_data.data = NULL; - } + kfree(attn_data.data); + drvdata->attn_data.data = NULL; if (!kfifo_is_empty(&drvdata->attn_fifo)) - return rmi_irq_fn(irq, dev_id); + schedule_work(&drvdata->attn_work); +} + +static irqreturn_t rmi_irq_fn(int irq, void *dev_id) +{ + struct rmi_device *rmi_dev = dev_id; + int ret; + + ret = rmi_process_interrupt_requests(rmi_dev); + if (ret) + rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, + "Failed to process interrupt request: %d\n", ret); return IRQ_HANDLED; } @@ -217,7 +230,6 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) static int rmi_irq_init(struct rmi_device *rmi_dev) { struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); - struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); int irq_flags = irq_get_trigger_type(pdata->irq); int ret; @@ -235,8 +247,6 @@ static int rmi_irq_init(struct rmi_device *rmi_dev) return ret; } - data->enabled = true; - return 0; } @@ -886,23 +896,27 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) if (data->enabled) goto out; - enable_irq(irq); - data->enabled = true; - if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { - retval = disable_irq_wake(irq); - if (retval) - dev_warn(&rmi_dev->dev, - "Failed to disable irq for wake: %d\n", - retval); - } + if (irq) { + enable_irq(irq); + data->enabled = true; + if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { + retval = disable_irq_wake(irq); + if (retval) + dev_warn(&rmi_dev->dev, + "Failed to disable irq for wake: %d\n", + retval); + } - /* - * Call rmi_process_interrupt_requests() after enabling irq, - * otherwise we may lose interrupt on edge-triggered systems. - */ - irq_flags = irq_get_trigger_type(pdata->irq); - if (irq_flags & IRQ_TYPE_EDGE_BOTH) - rmi_process_interrupt_requests(rmi_dev); + /* + * Call rmi_process_interrupt_requests() after enabling irq, + * otherwise we may lose interrupt on edge-triggered systems. + */ + irq_flags = irq_get_trigger_type(pdata->irq); + if (irq_flags & IRQ_TYPE_EDGE_BOTH) + rmi_process_interrupt_requests(rmi_dev); + } else { + data->enabled = true; + } out: mutex_unlock(&data->enabled_mutex); @@ -922,20 +936,22 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) goto out; data->enabled = false; - disable_irq(irq); - if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { - retval = enable_irq_wake(irq); - if (retval) - dev_warn(&rmi_dev->dev, - "Failed to enable irq for wake: %d\n", - retval); - } - - /* make sure the fifo is clean */ - while (!kfifo_is_empty(&data->attn_fifo)) { - count = kfifo_get(&data->attn_fifo, &attn_data); - if (count) - kfree(attn_data.data); + if (irq) { + disable_irq(irq); + if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { + retval = enable_irq_wake(irq); + if (retval) + dev_warn(&rmi_dev->dev, + "Failed to enable irq for wake: %d\n", + retval); + } + } else { + /* make sure the fifo is clean */ + while (!kfifo_is_empty(&data->attn_fifo)) { + count = kfifo_get(&data->attn_fifo, &attn_data); + if (count) + kfree(attn_data.data); + } } out: @@ -981,6 +997,8 @@ static int rmi_driver_remove(struct device *dev) irq_domain_remove(data->irqdomain); data->irqdomain = NULL; + cancel_work_sync(&data->attn_work); + rmi_f34_remove_sysfs(rmi_dev); rmi_free_function_list(rmi_dev); @@ -1219,9 +1237,15 @@ static int rmi_driver_probe(struct device *dev) } } - retval = rmi_irq_init(rmi_dev); - if (retval < 0) - goto err_destroy_functions; + if (pdata->irq) { + retval = rmi_irq_init(rmi_dev); + if (retval < 0) + goto err_destroy_functions; + } + + data->enabled = true; + + INIT_WORK(&data->attn_work, attn_callback); if (data->f01_container->dev.driver) { /* Driver already bound, so enable ATTN now. */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3a808146b50f..c1a3f3057921 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) "iommu: " fmt #include +#include #include #include #include @@ -2843,6 +2844,27 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle) } EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); +#ifdef CONFIG_ARM64 +static int __init iommu_quirks(void) +{ + const char *vendor, *name; + + vendor = dmi_get_system_info(DMI_SYS_VENDOR); + name = dmi_get_system_info(DMI_PRODUCT_NAME); + + if (vendor && + (strncmp(vendor, "GIGABYTE", 8) == 0 && name && + (strncmp(name, "R120", 4) == 0 || + strncmp(name, "R270", 4) == 0))) { + pr_warn("Gigabyte %s detected, force iommu passthrough mode", name); + iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; + } + + return 0; +} +arch_initcall(iommu_quirks); +#endif + /* * Changes the default domain of an iommu group that has *only* one device * diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ed47c256dbd2..2af613f7353f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -250,6 +250,9 @@ static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { + if (unlikely(status & NVME_SC_DNR)) + return BLK_STS_TARGET; + switch (status & 0x7ff) { case NVME_SC_SUCCESS: return BLK_STS_OK; @@ -340,6 +343,7 @@ enum nvme_disposition { COMPLETE, RETRY, FAILOVER, + FAILUP, AUTHENTICATE, }; @@ -351,15 +355,16 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; - if (blk_noretry_request(req) || + if ((req->cmd_flags & (REQ_FAILFAST_DEV | REQ_FAILFAST_DRIVER)) || (nvme_req(req)->status & NVME_SC_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if (req->cmd_flags & REQ_NVME_MPATH) { + if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) { if (nvme_is_path_error(nvme_req(req)->status) || blk_queue_dying(req->q)) - return FAILOVER; + return (req->cmd_flags & REQ_NVME_MPATH) ? + FAILOVER : FAILUP; } else { if (blk_queue_dying(req->q)) return COMPLETE; @@ -387,6 +392,14 @@ static inline void nvme_end_req(struct request *req) blk_mq_end_request(req, status); } +static inline void nvme_failup_req(struct request *req) +{ + nvme_update_ana(req); + + nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; + nvme_end_req(req); +} + void nvme_complete_rq(struct request *req) { struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; @@ -407,6 +420,9 @@ void nvme_complete_rq(struct request *req) case FAILOVER: nvme_failover_req(req); return; + case FAILUP: + nvme_failup_req(req); + return; case AUTHENTICATE: #ifdef CONFIG_NVME_AUTH queue_work(nvme_wq, &ctrl->dhchap_auth_work); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b9cf17cbbbd5..9aa33fc59f67 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -80,14 +80,10 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) blk_freeze_queue_start(h->disk->queue); } -void nvme_failover_req(struct request *req) +void nvme_update_ana(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status & 0x7ff; - unsigned long flags; - struct bio *bio; - - nvme_mpath_clear_current_path(ns); /* * If we got back an ANA error, we know the controller is alive but not @@ -98,6 +94,16 @@ void nvme_failover_req(struct request *req) set_bit(NVME_NS_ANA_PENDING, &ns->flags); queue_work(nvme_wq, &ns->ctrl->ana_work); } +} + +void nvme_failover_req(struct request *req) +{ + struct nvme_ns *ns = req->q->queuedata; + unsigned long flags; + struct bio *bio; + + nvme_mpath_clear_current_path(ns); + nvme_update_ana(req); spin_lock_irqsave(&ns->head->requeue_lock, flags); for (bio = req->bio; bio; bio = bio->bi_next) { @@ -872,8 +878,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) int error = 0; /* check if multipath is enabled and we have the capability */ - if (!multipath || !ctrl->subsys || - !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) + if (!ctrl->subsys || !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; if (!ctrl->max_namespaces || diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 70555022cb44..66c4f089d175 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -854,6 +854,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); void nvme_failover_req(struct request *req); +void nvme_update_ana(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); @@ -890,6 +891,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) static inline void nvme_failover_req(struct request *req) { } +static inline void nvme_update_ana(struct request *req) +{ +} static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 49238ddd39ee..5d8c37c3e15a 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev) pci_dev->skip_bus_pm = false; + /* + * Disabling PTM allows some systems, e.g., Intel mobile chips + * since Coffee Lake, to enter a lower-power PM state. + */ + pci_suspend_ptm(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); @@ -987,6 +993,8 @@ static int pci_pm_resume(struct device *dev) if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); + pci_resume_ptm(pci_dev); + if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); @@ -1274,6 +1282,8 @@ static int pci_pm_runtime_suspend(struct device *dev) pci_power_t prev = pci_dev->current_state; int error; + pci_suspend_ptm(pci_dev); + /* * If pci_dev->driver is not set (unbound), we leave the device in D0, * but it may go to D3cold when the bridge above it runtime suspends. @@ -1335,6 +1345,7 @@ static int pci_pm_runtime_resume(struct device *dev) * D3cold when the bridge above it runtime suspended. */ pci_pm_default_resume_early(pci_dev); + pci_resume_ptm(pci_dev); if (!pci_dev->driver) return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 95bc329e74c0..107afa0a5b03 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2706,24 +2706,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev) if (target_state == PCI_POWER_ERROR) return -EIO; - /* - * There are systems (for example, Intel mobile chips since Coffee - * Lake) where the power drawn while suspended can be significantly - * reduced by disabling PTM on PCIe root ports as this allows the - * port to enter a lower-power PM state and the SoC to reach a - * lower-power idle state as a whole. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - pci_disable_ptm(dev); - pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); - if (error) { + if (error) pci_enable_wake(dev, target_state, false); - pci_restore_ptm_state(dev); - } return error; } @@ -2764,24 +2752,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) if (target_state == PCI_POWER_ERROR) return -EIO; - /* - * There are systems (for example, Intel mobile chips since Coffee - * Lake) where the power drawn while suspended can be significantly - * reduced by disabling PTM on PCIe root ports as this allows the - * port to enter a lower-power PM state and the SoC to reach a - * lower-power idle state as a whole. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - pci_disable_ptm(dev); - __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); - if (error) { + if (error) pci_enable_wake(dev, target_state, false); - pci_restore_ptm_state(dev); - } return error; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 785f31086313..5cca2e58cce8 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_PCIE_PTM +void pci_ptm_init(struct pci_dev *dev); void pci_save_ptm_state(struct pci_dev *dev); void pci_restore_ptm_state(struct pci_dev *dev); -void pci_disable_ptm(struct pci_dev *dev); +void pci_suspend_ptm(struct pci_dev *dev); +void pci_resume_ptm(struct pci_dev *dev); #else +static inline void pci_ptm_init(struct pci_dev *dev) { } static inline void pci_save_ptm_state(struct pci_dev *dev) { } static inline void pci_restore_ptm_state(struct pci_dev *dev) { } -static inline void pci_disable_ptm(struct pci_dev *dev) { } +static inline void pci_suspend_ptm(struct pci_dev *dev) { } +static inline void pci_resume_ptm(struct pci_dev *dev) { } #endif unsigned long pci_cardbus_resource_alignment(struct resource *); @@ -575,12 +579,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#ifdef CONFIG_PCIE_PTM -void pci_ptm_init(struct pci_dev *dev); -#else -static inline void pci_ptm_init(struct pci_dev *dev) { } -#endif - struct pci_dev_reset_methods { u16 vendor; u16 device; diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 368a254e3124..b4e5f553467c 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -9,30 +9,38 @@ #include #include "../pci.h" -static void pci_ptm_info(struct pci_dev *dev) +/* + * If the next upstream device supports PTM, return it; otherwise return + * NULL. PTM Messages are local, so both link partners must support it. + */ +static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev) { - char clock_desc[8]; + struct pci_dev *ups = pci_upstream_bridge(dev); - switch (dev->ptm_granularity) { - case 0: - snprintf(clock_desc, sizeof(clock_desc), "unknown"); - break; - case 255: - snprintf(clock_desc, sizeof(clock_desc), ">254ns"); - break; - default: - snprintf(clock_desc, sizeof(clock_desc), "%uns", - dev->ptm_granularity); - break; - } - pci_info(dev, "PTM enabled%s, %s granularity\n", - dev->ptm_root ? " (root)" : "", clock_desc); + /* + * Switch Downstream Ports are not permitted to have a PTM + * capability; their PTM behavior is controlled by the Upstream + * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a + * Switch Downstream Port, look up one more level. + */ + if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM) + ups = pci_upstream_bridge(ups); + + if (ups && ups->ptm_cap) + return ups; + + return NULL; } -void pci_disable_ptm(struct pci_dev *dev) +/* + * Find the PTM Capability (if present) and extract the information we need + * to use it. + */ +void pci_ptm_init(struct pci_dev *dev) { - int ptm; - u16 ctrl; + u16 ptm; + u32 cap; + struct pci_dev *ups; if (!pci_is_pcie(dev)) return; @@ -41,21 +49,47 @@ void pci_disable_ptm(struct pci_dev *dev) if (!ptm) return; - pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl); - ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT); - pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl); + dev->ptm_cap = ptm; + pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32)); + + pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap); + dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; + + /* + * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the + * furthest upstream Time Source as the PTM Root. For Endpoints, + * "the Effective Granularity is the maximum Local Clock Granularity + * reported by the PTM Root and all intervening PTM Time Sources." + */ + ups = pci_upstream_ptm(dev); + if (ups) { + if (ups->ptm_granularity == 0) + dev->ptm_granularity = 0; + else if (ups->ptm_granularity > dev->ptm_granularity) + dev->ptm_granularity = ups->ptm_granularity; + } else if (cap & PCI_PTM_CAP_ROOT) { + dev->ptm_root = 1; + } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { + + /* + * Per sec 7.9.15.3, this should be the Local Clock + * Granularity of the associated Time Source. But it + * doesn't say how to find that Time Source. + */ + dev->ptm_granularity = 0; + } + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM) + pci_enable_ptm(dev, NULL); } void pci_save_ptm_state(struct pci_dev *dev) { - int ptm; + u16 ptm = dev->ptm_cap; struct pci_cap_saved_state *save_state; - u16 *cap; + u32 *cap; - if (!pci_is_pcie(dev)) - return; - - ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); if (!ptm) return; @@ -63,146 +97,152 @@ void pci_save_ptm_state(struct pci_dev *dev) if (!save_state) return; - cap = (u16 *)&save_state->cap.data[0]; - pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap); + cap = (u32 *)&save_state->cap.data[0]; + pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap); } void pci_restore_ptm_state(struct pci_dev *dev) { + u16 ptm = dev->ptm_cap; struct pci_cap_saved_state *save_state; - int ptm; - u16 *cap; + u32 *cap; - if (!pci_is_pcie(dev)) + if (!ptm) return; save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM); - ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!save_state || !ptm) + if (!save_state) return; - cap = (u16 *)&save_state->cap.data[0]; - pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap); + cap = (u32 *)&save_state->cap.data[0]; + pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap); } -void pci_ptm_init(struct pci_dev *dev) +/* Enable PTM in the Control register if possible */ +static int __pci_enable_ptm(struct pci_dev *dev) { - int pos; - u32 cap, ctrl; - u8 local_clock; + u16 ptm = dev->ptm_cap; struct pci_dev *ups; + u32 ctrl; - if (!pci_is_pcie(dev)) - return; - - /* - * Enable PTM only on interior devices (root ports, switch ports, - * etc.) on the assumption that it causes no link traffic until an - * endpoint enables it. - */ - if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT || - pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) - return; + if (!ptm) + return -EINVAL; /* - * Switch Downstream Ports are not permitted to have a PTM - * capability; their PTM behavior is controlled by the Upstream - * Port (PCIe r5.0, sec 7.9.16). + * A device uses local PTM Messages to request time information + * from a PTM Root that's farther upstream. Every device along the + * path must support PTM and have it enabled so it can handle the + * messages. Therefore, if this device is not a PTM Root, the + * upstream link partner must have PTM enabled before we can enable + * PTM. */ - ups = pci_upstream_bridge(dev); - if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && - ups && ups->ptm_enabled) { - dev->ptm_granularity = ups->ptm_granularity; - dev->ptm_enabled = 1; - return; + if (!dev->ptm_root) { + ups = pci_upstream_ptm(dev); + if (!ups || !ups->ptm_enabled) + return -EINVAL; } - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!pos) - return; - - pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16)); - - pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); - local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; - - /* - * There's no point in enabling PTM unless it's enabled in the - * upstream device or this device can be a PTM Root itself. Per - * the spec recommendation (PCIe r3.1, sec 7.32.3), select the - * furthest upstream Time Source as the PTM Root. - */ - if (ups && ups->ptm_enabled) { - ctrl = PCI_PTM_CTRL_ENABLE; - if (ups->ptm_granularity == 0) - dev->ptm_granularity = 0; - else if (ups->ptm_granularity > local_clock) - dev->ptm_granularity = ups->ptm_granularity; - } else { - if (cap & PCI_PTM_CAP_ROOT) { - ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT; - dev->ptm_root = 1; - dev->ptm_granularity = local_clock; - } else - return; - } + pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl); + ctrl |= PCI_PTM_CTRL_ENABLE; + ctrl &= ~PCI_PTM_GRANULARITY_MASK; ctrl |= dev->ptm_granularity << 8; - pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl); - dev->ptm_enabled = 1; + if (dev->ptm_root) + ctrl |= PCI_PTM_CTRL_ROOT; - pci_ptm_info(dev); + pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl); + return 0; } +/** + * pci_enable_ptm() - Enable Precision Time Measurement + * @dev: PCI device + * @granularity: pointer to return granularity + * + * Enable Precision Time Measurement for @dev. If successful and + * @granularity is non-NULL, return the Effective Granularity. + * + * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or + * is not a PTM Root and lacks an upstream path of PTM-enabled devices. + */ int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) { - int pos; - u32 cap, ctrl; - struct pci_dev *ups; - - if (!pci_is_pcie(dev)) - return -EINVAL; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!pos) - return -EINVAL; - - pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); - if (!(cap & PCI_PTM_CAP_REQ)) - return -EINVAL; - - /* - * For a PCIe Endpoint, PTM is only useful if the endpoint can - * issue PTM requests to upstream devices that have PTM enabled. - * - * For Root Complex Integrated Endpoints, there is no upstream - * device, so there must be some implementation-specific way to - * associate the endpoint with a time source. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) { - ups = pci_upstream_bridge(dev); - if (!ups || !ups->ptm_enabled) - return -EINVAL; + int rc; + char clock_desc[8]; - dev->ptm_granularity = ups->ptm_granularity; - } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { - dev->ptm_granularity = 0; - } else - return -EINVAL; + rc = __pci_enable_ptm(dev); + if (rc) + return rc; - ctrl = PCI_PTM_CTRL_ENABLE; - ctrl |= dev->ptm_granularity << 8; - pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl); dev->ptm_enabled = 1; - pci_ptm_info(dev); - if (granularity) *granularity = dev->ptm_granularity; + + switch (dev->ptm_granularity) { + case 0: + snprintf(clock_desc, sizeof(clock_desc), "unknown"); + break; + case 255: + snprintf(clock_desc, sizeof(clock_desc), ">254ns"); + break; + default: + snprintf(clock_desc, sizeof(clock_desc), "%uns", + dev->ptm_granularity); + break; + } + pci_info(dev, "PTM enabled%s, %s granularity\n", + dev->ptm_root ? " (root)" : "", clock_desc); + return 0; } EXPORT_SYMBOL(pci_enable_ptm); +static void __pci_disable_ptm(struct pci_dev *dev) +{ + u16 ptm = dev->ptm_cap; + u32 ctrl; + + if (!ptm) + return; + + pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl); + ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT); + pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl); +} + +/** + * pci_disable_ptm() - Disable Precision Time Measurement + * @dev: PCI device + * + * Disable Precision Time Measurement for @dev. + */ +void pci_disable_ptm(struct pci_dev *dev) +{ + if (dev->ptm_enabled) { + __pci_disable_ptm(dev); + dev->ptm_enabled = 0; + } +} +EXPORT_SYMBOL(pci_disable_ptm); + +/* + * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on + * resume if necessary. + */ +void pci_suspend_ptm(struct pci_dev *dev) +{ + if (dev->ptm_enabled) + __pci_disable_ptm(dev); +} + +/* If PTM was enabled before suspend, re-enable it when resuming */ +void pci_resume_ptm(struct pci_dev *dev) +{ + if (dev->ptm_enabled) + __pci_enable_ptm(dev); +} + bool pcie_ptm_enabled(struct pci_dev *dev) { if (!dev) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4944798e75b5..079a29ef1bf2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4296,6 +4296,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); +/* + * PCI BAR 5 is not setup correctly for the on-board AHCI controller + * on Broadcom's Vulcan processor. Added a quirk to fix BAR 5 by + * using BAR 4's resources which are populated correctly and NOT + * actually used by the AHCI controller. + */ +static void quirk_fix_vulcan_ahci_bars(struct pci_dev *dev) +{ + struct resource *r = &dev->resource[4]; + + if (!(r->flags & IORESOURCE_MEM) || (r->start == 0)) + return; + + /* Set BAR5 resource to BAR4 */ + dev->resource[5] = *r; + + /* Update BAR5 in pci config space */ + pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, r->start); + + /* Clear BAR4's resource */ + memset(r, 0, sizeof(*r)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9027, quirk_fix_vulcan_ahci_bars); + /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) * class code. Fix it. diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bbab424b0d55..ed86042fb57b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5676,6 +5676,13 @@ static void hub_event(struct work_struct *work) (u16) hub->change_bits[0], (u16) hub->event_bits[0]); + /* Don't disconnect USB-SATA on TrimSlice */ + if (strcmp(dev_name(hdev->bus->controller), "tegra-ehci.0") == 0) { + if ((hdev->state == 7) && (hub->change_bits[0] == 0) && + (hub->event_bits[0] == 0x2)) + hub->event_bits[0] = 0; + } + /* Lock the device, then check to see if we were * disconnected while waiting for the lock to succeed. */ usb_lock_device(hdev); diff --git a/include/linux/efi.h b/include/linux/efi.h index f87b2f5db9f8..0b156c2a4ab3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -43,6 +43,8 @@ #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) +#define EFI_IS_ERROR(x) ((x) & (1UL << (BITS_PER_LONG-1))) + typedef unsigned long efi_status_t; typedef u8 efi_bool_t; typedef u16 efi_char16_t; /* UNICODE character */ @@ -849,6 +851,14 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ #define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */ #define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */ +#define EFI_SECURE_BOOT 13 /* Are we in Secure Boot mode? */ + +enum efi_secureboot_mode { + efi_secureboot_mode_unset, + efi_secureboot_mode_unknown, + efi_secureboot_mode_disabled, + efi_secureboot_mode_enabled, +}; #ifdef CONFIG_EFI /* @@ -860,6 +870,8 @@ static inline bool efi_enabled(int feature) } extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); +extern void __init efi_set_secure_boot(enum efi_secureboot_mode mode); + bool __pure __efi_soft_reserve_enabled(void); static inline bool __pure efi_soft_reserve_enabled(void) @@ -881,6 +893,8 @@ static inline bool efi_enabled(int feature) static inline void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} +static inline void efi_set_secure_boot(enum efi_secureboot_mode mode) {} + static inline bool efi_soft_reserve_enabled(void) { return false; @@ -895,6 +909,7 @@ static inline void efi_find_mirror(void) {} #endif extern int efi_status_to_err(efi_status_t status); +extern const char *efi_status_to_str(efi_status_t status); /* * Variable Attributes @@ -1104,13 +1119,6 @@ static inline bool efi_runtime_disabled(void) { return true; } extern void efi_call_virt_check_flags(unsigned long flags, const char *call); extern unsigned long efi_call_virt_save_flags(void); -enum efi_secureboot_mode { - efi_secureboot_mode_unset, - efi_secureboot_mode_unknown, - efi_secureboot_mode_disabled, - efi_secureboot_mode_enabled, -}; - static inline enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var) { diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 60fff133c0b1..fa0aa514c70f 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -395,6 +395,8 @@ LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux) #endif /* CONFIG_BPF_SYSCALL */ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) +LSM_HOOK(int, 0, lock_kernel_down, const char *where, enum lockdown_reason level) + #ifdef CONFIG_PERF_EVENTS LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 3aa6030302f5..23b63de268a3 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1557,6 +1557,12 @@ * * @what: kernel feature being accessed * + * @lock_kernel_down + * Put the kernel into lock-down mode. + * + * @where: Where the lock-down is originating from (e.g. command line option) + * @level: The lock-down level (can only increase) + * * Security hooks for perf events * * @perf_event_open: diff --git a/include/linux/pci.h b/include/linux/pci.h index 060af91bafcd..cb5f796e3319 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -475,6 +475,7 @@ struct pci_dev { unsigned int broken_cmd_compl:1; /* No compl for some cmds */ #endif #ifdef CONFIG_PCIE_PTM + u16 ptm_cap; /* PTM Capability */ unsigned int ptm_root:1; unsigned int ptm_enabled:1; u8 ptm_granularity; @@ -1677,10 +1678,12 @@ bool pci_ats_disabled(void); #ifdef CONFIG_PCIE_PTM int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +void pci_disable_ptm(struct pci_dev *dev); bool pcie_ptm_enabled(struct pci_dev *dev); #else static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) { return -EINVAL; } +static inline void pci_disable_ptm(struct pci_dev *dev) { } static inline bool pcie_ptm_enabled(struct pci_dev *dev) { return false; } #endif diff --git a/include/linux/rmi.h b/include/linux/rmi.h index ab7eea01ab42..fff7c5f737fc 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -364,6 +364,7 @@ struct rmi_driver_data { struct rmi4_attn_data attn_data; DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); + struct work_struct attn_work; }; int rmi_register_transport_device(struct rmi_transport_dev *xport); diff --git a/include/linux/security.h b/include/linux/security.h index 7bd0c490703d..7779eaf1ffa1 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -475,6 +475,7 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); int security_locked_down(enum lockdown_reason what); +int security_lock_kernel_down(const char *where, enum lockdown_reason level); #else /* CONFIG_SECURITY */ static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) @@ -1358,6 +1359,10 @@ static inline int security_locked_down(enum lockdown_reason what) { return 0; } +static inline int security_lock_kernel_down(const char *where, enum lockdown_reason level) +{ + return 0; +} #endif /* CONFIG_SECURITY */ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 3827a6b395fd..bce6b228cf56 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -83,7 +83,7 @@ struct neigh_parms { struct rcu_head rcu_head; int reachable_time; - int qlen; + u32 qlen; int data[NEIGH_VAR_DATA_MAX]; DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); }; diff --git a/init/Kconfig b/init/Kconfig index 532362fcfe31..30d547537ea0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1678,7 +1678,7 @@ config AIO this option saves about 7k. config IO_URING - bool "Enable IO uring support" if EXPERT + bool "Enable IO uring support" select IO_WQ default y help diff --git a/kernel/module/signing.c b/kernel/module/signing.c index a2ff4242e623..f0d2be1ee4f1 100644 --- a/kernel/module/signing.c +++ b/kernel/module/signing.c @@ -61,10 +61,17 @@ int mod_verify_sig(const void *mod, struct load_info *info) modlen -= sig_len + sizeof(ms); info->len = modlen; - return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, + ret = verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, VERIFY_USE_SECONDARY_KEYRING, VERIFYING_MODULE_SIGNATURE, NULL, NULL); + if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) { + ret = verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, + VERIFY_USE_PLATFORM_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); + } + return ret; } int module_sig_check(struct load_info *info, int flags) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 84755db81e9d..35f5a3125808 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -307,7 +307,31 @@ static int neigh_del_timer(struct neighbour *n) return 0; } -static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) +static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, + int family) +{ + switch (family) { + case AF_INET: + return __in_dev_arp_parms_get_rcu(dev); + case AF_INET6: + return __in6_dev_nd_parms_get_rcu(dev); + } + return NULL; +} + +static void neigh_parms_qlen_dec(struct net_device *dev, int family) +{ + struct neigh_parms *p; + + rcu_read_lock(); + p = neigh_get_dev_parms_rcu(dev, family); + if (p) + p->qlen--; + rcu_read_unlock(); +} + +static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net, + int family) { struct sk_buff_head tmp; unsigned long flags; @@ -321,13 +345,7 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) struct net_device *dev = skb->dev; if (net == NULL || net_eq(dev_net(dev), net)) { - struct in_device *in_dev; - - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - if (in_dev) - in_dev->arp_parms->qlen--; - rcu_read_unlock(); + neigh_parms_qlen_dec(dev, family); __skb_unlink(skb, list); __skb_queue_tail(&tmp, skb); } @@ -409,7 +427,8 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); - pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL); + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, + tbl->family); if (skb_queue_empty_lockless(&tbl->proxy_queue)) del_timer_sync(&tbl->proxy_timer); return 0; @@ -1621,13 +1640,8 @@ static void neigh_proxy_process(struct timer_list *t) if (tdif <= 0) { struct net_device *dev = skb->dev; - struct in_device *in_dev; - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - if (in_dev) - in_dev->arp_parms->qlen--; - rcu_read_unlock(); + neigh_parms_qlen_dec(dev, tbl->family); __skb_unlink(skb, &tbl->proxy_queue); if (tbl->proxy_redo && netif_running(dev)) { @@ -1821,7 +1835,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) cancel_delayed_work_sync(&tbl->managed_work); cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue, NULL); + pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); @@ -3542,18 +3556,6 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write, return ret; } -static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, - int family) -{ - switch (family) { - case AF_INET: - return __in_dev_arp_parms_get_rcu(dev); - case AF_INET6: - return __in6_dev_nd_parms_get_rcu(dev); - } - return NULL; -} - static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, int index) { diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 2dc97583d279..e9a7f70a54df 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -888,13 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, return 1; } + /* cannot match on nexthop object attributes */ + if (fi->nh) + return 1; + if (cfg->fc_oif || cfg->fc_gw_family) { struct fib_nh *nh; - /* cannot match on nexthop object attributes */ - if (fi->nh) - return 1; - nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh index 0d99ef17e4a5..81c8e082ec57 100755 --- a/scripts/pahole-flags.sh +++ b/scripts/pahole-flags.sh @@ -20,4 +20,7 @@ if [ "${pahole_ver}" -ge "122" ]; then extra_paholeopt="${extra_paholeopt} -j" fi +# temporary workaround to disable enum64 +extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_enum64" + echo ${extra_paholeopt} diff --git a/scripts/tags.sh b/scripts/tags.sh index e137cf15aae9..2ed2341f7967 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -16,6 +16,8 @@ fi ignore="$(echo "$RCS_FIND_IGNORE" | sed 's|\\||g' )" # tags and cscope files should also ignore MODVERSION *.mod.c files ignore="$ignore ( -name *.mod.c ) -prune -o" +# RHEL tags and cscope should also ignore redhat/rpm +ignore="$ignore ( -path redhat/rpm ) -prune -o" # Use make KBUILD_ABS_SRCTREE=1 {tags|cscope} # to force full paths for a non-O= build diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c index b78753d27d8e..f0b37800431a 100644 --- a/security/integrity/platform_certs/load_uefi.c +++ b/security/integrity/platform_certs/load_uefi.c @@ -73,7 +73,8 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid, return NULL; if (*status != EFI_BUFFER_TOO_SMALL) { - pr_err("Couldn't get size: 0x%lx\n", *status); + pr_err("Couldn't get size: %s (0x%lx)\n", + efi_status_to_str(*status), *status); return NULL; } @@ -84,7 +85,8 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid, *status = efi.get_variable(name, guid, NULL, &lsize, db); if (*status != EFI_SUCCESS) { kfree(db); - pr_err("Error reading db var: 0x%lx\n", *status); + pr_err("Error reading db var: %s (0x%lx)\n", + efi_status_to_str(*status), *status); return NULL; } diff --git a/security/lockdown/Kconfig b/security/lockdown/Kconfig index e84ddf484010..d0501353a4b9 100644 --- a/security/lockdown/Kconfig +++ b/security/lockdown/Kconfig @@ -16,6 +16,19 @@ config SECURITY_LOCKDOWN_LSM_EARLY subsystem is fully initialised. If enabled, lockdown will unconditionally be called before any other LSMs. +config LOCK_DOWN_IN_EFI_SECURE_BOOT + bool "Lock down the kernel in EFI Secure Boot mode" + default n + depends on EFI && SECURITY_LOCKDOWN_LSM_EARLY + help + UEFI Secure Boot provides a mechanism for ensuring that the firmware + will only load signed bootloaders and kernels. Secure boot mode may + be determined from EFI variables provided by the system firmware if + not indicated by the boot parameters. + + Enabling this option results in kernel lockdown being triggered if + EFI Secure Boot is set. + choice prompt "Kernel default lockdown mode" default LOCK_DOWN_KERNEL_FORCE_NONE diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c index 87cbdc64d272..18555cf18da7 100644 --- a/security/lockdown/lockdown.c +++ b/security/lockdown/lockdown.c @@ -73,6 +73,7 @@ static int lockdown_is_locked_down(enum lockdown_reason what) static struct security_hook_list lockdown_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(locked_down, lockdown_is_locked_down), + LSM_HOOK_INIT(lock_kernel_down, lock_kernel_down), }; static int __init lockdown_lsm_init(void) diff --git a/security/security.c b/security/security.c index 4b95de24bc8d..10047790e96e 100644 --- a/security/security.c +++ b/security/security.c @@ -2623,6 +2623,12 @@ int security_locked_down(enum lockdown_reason what) } EXPORT_SYMBOL(security_locked_down); +int security_lock_kernel_down(const char *where, enum lockdown_reason level) +{ + return call_int_hook(lock_kernel_down, 0, where, level); +} +EXPORT_SYMBOL(security_lock_kernel_down); + #ifdef CONFIG_PERF_EVENTS int security_perf_event_open(struct perf_event_attr *attr, int type) { diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index d5a0dd548989..ee5e98204d3d 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -1223,6 +1223,11 @@ ipv4_fcnal() log_test $rc 0 "Delete nexthop route warning" run_cmd "$IP route delete 172.16.101.1/32 nhid 12" run_cmd "$IP nexthop del id 12" + + run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.101.0/24 nhid 21" + run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1" + log_test $? 2 "Delete multipath route with only nh id based entry" } ipv4_grp_fcnal()