From 614b7d7d7b10d8e3f146bed311215f818be5accd Mon Sep 17 00:00:00 2001 From: "Justin M. Forbes" Date: Wed, 10 Jun 2020 17:15:58 -0500 Subject: Initial 5.7.2 rebase Signed-off-by: Justin M. Forbes --- ...et_user_check-in-case-uaccess_-calls-are-.patch | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 0001-ARM-fix-__get_user_check-in-case-uaccess_-calls-are-.patch (limited to '0001-ARM-fix-__get_user_check-in-case-uaccess_-calls-are-.patch') diff --git a/0001-ARM-fix-__get_user_check-in-case-uaccess_-calls-are-.patch b/0001-ARM-fix-__get_user_check-in-case-uaccess_-calls-are-.patch new file mode 100644 index 000000000..c5a43608f --- /dev/null +++ b/0001-ARM-fix-__get_user_check-in-case-uaccess_-calls-are-.patch @@ -0,0 +1,86 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Mon, 30 Sep 2019 14:59:25 +0900 +Subject: [PATCH] ARM: fix __get_user_check() in case uaccess_* calls are not + inlined + +KernelCI reports that bcm2835_defconfig is no longer booting since +commit ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING +forcibly"): + + https://lkml.org/lkml/2019/9/26/825 + +I also received a regression report from Nicolas Saenz Julienne: + + https://lkml.org/lkml/2019/9/27/263 + +This problem has cropped up on arch/arm/config/bcm2835_defconfig +because it enables CONFIG_CC_OPTIMIZE_FOR_SIZE. The compiler tends +to prefer not inlining functions with -Os. I was able to reproduce +it with other boards and defconfig files by manually enabling +CONFIG_CC_OPTIMIZE_FOR_SIZE. + +The __get_user_check() specifically uses r0, r1, r2 registers. +So, uaccess_save_and_enable() and uaccess_restore() must be inlined +in order to avoid those registers being overwritten in the callees. + +Prior to commit 9012d011660e ("compiler: allow all arches to enable +CONFIG_OPTIMIZE_INLINING"), the 'inline' marker was always enough for +inlining functions, except on x86. + +Since that commit, all architectures can enable CONFIG_OPTIMIZE_INLINING. +So, __always_inline is now the only guaranteed way of forcible inlining. + +I want to keep as much compiler's freedom as possible about the inlining +decision. So, I changed the function call order instead of adding +__always_inline around. + +Call uaccess_save_and_enable() before assigning the __p ("r0"), and +uaccess_restore() after evacuating the __e ("r0"). + +Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING") +Reported-by: "kernelci.org bot" +Reported-by: Nicolas Saenz Julienne +Signed-off-by: Masahiro Yamada +Acked-by: Arnd Bergmann +Tested-by: Nicolas Saenz Julienne +Tested-by: Fabrizio Castro +Tested-by: Geert Uytterhoeven +--- + arch/arm/include/asm/uaccess.h | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h +index 98c6b91be4a8..60055827dddc 100644 +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -191,11 +191,12 @@ extern int __get_user_64t_4(void *); + #define __get_user_check(x, p) \ + ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ ++ unsigned int __ua_flags = uaccess_save_and_enable(); \ + register typeof(*(p)) __user *__p asm("r0") = (p); \ + register __inttype(x) __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ + register int __e asm("r0"); \ +- unsigned int __ua_flags = uaccess_save_and_enable(); \ ++ unsigned int __err; \ + switch (sizeof(*(__p))) { \ + case 1: \ + if (sizeof((x)) >= 8) \ +@@ -223,9 +224,10 @@ extern int __get_user_64t_4(void *); + break; \ + default: __e = __get_user_bad(); break; \ + } \ +- uaccess_restore(__ua_flags); \ ++ __err = __e; \ + x = (typeof(*(p))) __r2; \ +- __e; \ ++ uaccess_restore(__ua_flags); \ ++ __err; \ + }) + + #define get_user(x, p) \ +-- +2.26.2 + -- cgit