diff options
author | Justin M. Forbes <jforbes@fedoraproject.org> | 2021-06-17 10:08:38 -0500 |
---|---|---|
committer | Justin M. Forbes <jforbes@fedoraproject.org> | 2021-06-17 10:08:38 -0500 |
commit | 124eb98dad29a958da9c4a41e456b160c988ac8e (patch) | |
tree | 80c440456a9d721be36db0d8925d9e11424ca46d /patch-5.13.0-redhat.patch | |
parent | ef5dd18db57999683d8d646d14b651cde817f5fe (diff) | |
download | kernel-124eb98dad29a958da9c4a41e456b160c988ac8e.tar.gz kernel-124eb98dad29a958da9c4a41e456b160c988ac8e.tar.xz kernel-124eb98dad29a958da9c4a41e456b160c988ac8e.zip |
kernel-5.13.0-0.rc6.20210617git70585216fe77.48
* Thu Jun 17 2021 Fedora Kernel Team <kernel-team@fedoraproject.org> [5.13.0-0.rc6.20210617git70585216fe77.48]
- Revert "powerpc: Switch to relative jump labels" (Don Zickus)
- spec: Enable sefltests rpm build (Jiri Olsa)
- spec: Allow bpf selftest/samples to fail (Jiri Olsa)
- bpf, selftests: Disable tests that need clang13 (Toke Høiland-Jørgensen)
- kvm: Add kvm_stat.service file and kvm_stat logrotate config to the tools (Jiri Benc)
- kernel.spec: Add missing source files to kernel-selftests-internal (Jiri Benc)
- kernel.spec: selftests: add net/forwarding to TARGETS list (Jiri Benc)
- kernel.spec: selftests: add build requirement on libmnl-devel (Jiri Benc)
- kernel.spec: add action.o to kernel-selftests-internal (Jiri Benc)
- kernel.spec: avoid building bpftool repeatedly (Jiri Benc)
- kernel.spec: selftests require python3 (Jiri Benc)
- kernel.spec: skip selftests that failed to build (Jiri Benc)
- kernel.spec: fix installation of bpf selftests (Jiri Benc)
- redhat: fix samples and selftests make options (Jiri Benc)
- kernel.spec: enable mptcp selftests for kernel-selftests-internal (Jiri Benc)
- kernel.spec: Do not export shared objects from libexecdir to RPM Provides (Jiri Benc)
- kernel.spec: add missing dependency for the which package (Jiri Benc)
- kernel.spec: add netfilter selftests to kernel-selftests-internal (Jiri Benc)
- kernel.spec: move slabinfo and page_owner_sort debuginfo to tools-debuginfo (Jiri Benc)
- kernel.spec: package and ship VM tools (Jiri Benc)
- configs: enable CONFIG_PAGE_OWNER (Jiri Benc)
- kernel.spec: add coreutils (Jiri Benc)
- kernel.spec: add netdevsim driver selftests to kernel-selftests-internal (Jiri Benc)
- redhat/Makefile: Clean out the --without flags from the baseonly rule (Jiri Benc)
- kernel.spec: Stop building unnecessary rpms for baseonly builds (Jiri Benc)
- kernel.spec: disable more kabi switches for gcov build (Jiri Benc)
- kernel.spec: Rename kabi-dw base (Jiri Benc)
- kernel.spec: Fix error messages during build of zfcpdump kernel (Jiri Benc)
- kernel.spec: perf: remove bpf examples (Jiri Benc)
- kernel.spec: selftests should not depend on modules-internal (Jiri Benc)
- kernel.spec: build samples (Jiri Benc)
- kernel.spec: tools: sync missing options with RHEL 8 (Jiri Benc)
Resolves: rhbz#
Signed-off-by: Justin M. Forbes <jforbes@fedoraproject.org>
Diffstat (limited to 'patch-5.13.0-redhat.patch')
-rw-r--r-- | patch-5.13.0-redhat.patch | 2346 |
1 files changed, 2326 insertions, 20 deletions
diff --git a/patch-5.13.0-redhat.patch b/patch-5.13.0-redhat.patch index 1ad256aae..e43812adc 100644 --- a/patch-5.13.0-redhat.patch +++ b/patch-5.13.0-redhat.patch @@ -1,10 +1,11 @@ Documentation/admin-guide/kdump/kdump.rst | 11 + Documentation/admin-guide/kernel-parameters.txt | 9 + Kconfig | 2 + - Kconfig.redhat | 17 ++ + Kconfig.redhat | 17 + Makefile | 13 +- arch/arm/Kconfig | 4 +- arch/arm64/Kconfig | 3 +- + arch/arm64/boot/dts/rockchip/rk3399.dtsi | 2 +- arch/arm64/kernel/acpi.c | 4 + arch/powerpc/Kconfig | 1 - arch/powerpc/include/asm/jump_label.h | 21 +- @@ -13,21 +14,21 @@ arch/s390/kernel/ipl.c | 5 + arch/s390/kernel/setup.c | 4 + arch/x86/kernel/cpu/common.c | 1 + - arch/x86/kernel/setup.c | 69 ++++- + arch/x86/kernel/setup.c | 69 ++- drivers/acpi/apei/hest.c | 8 + drivers/acpi/irq.c | 17 +- drivers/acpi/scan.c | 9 + - drivers/ata/libahci.c | 18 ++ - drivers/char/ipmi/ipmi_dmi.c | 15 ++ + drivers/ata/libahci.c | 18 + + drivers/char/ipmi/ipmi_dmi.c | 15 + drivers/char/ipmi/ipmi_msghandler.c | 16 +- drivers/firmware/efi/Makefile | 1 + - drivers/firmware/efi/efi.c | 124 ++++++--- - drivers/firmware/efi/secureboot.c | 38 +++ - drivers/hid/hid-rmi.c | 64 ----- - drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 ++ + drivers/firmware/efi/efi.c | 124 +++-- + drivers/firmware/efi/secureboot.c | 38 ++ + drivers/hid/hid-rmi.c | 64 --- + drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 + drivers/infiniband/sw/rxe/rxe.c | 2 + - drivers/input/rmi4/rmi_driver.c | 124 +++++---- - drivers/iommu/iommu.c | 22 ++ + drivers/input/rmi4/rmi_driver.c | 124 +++-- + drivers/iommu/iommu.c | 22 + drivers/message/fusion/mptsas.c | 10 + drivers/message/fusion/mptspi.c | 11 + drivers/net/ethernet/intel/ice/ice_main.c | 1 + @@ -36,8 +37,10 @@ drivers/nvme/host/core.c | 22 +- drivers/nvme/host/multipath.c | 19 +- drivers/nvme/host/nvme.h | 4 + + drivers/pci/controller/pcie-rockchip-host.c | 12 +- + drivers/pci/of.c | 17 +- drivers/pci/pci-driver.c | 29 ++ - drivers/pci/quirks.c | 24 ++ + drivers/pci/quirks.c | 24 + drivers/scsi/aacraid/linit.c | 2 + drivers/scsi/be2iscsi/be_main.c | 2 + drivers/scsi/hpsa.c | 4 + @@ -46,25 +49,25 @@ drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 + drivers/scsi/qla2xxx/qla_os.c | 6 + drivers/scsi/qla4xxx/ql4_os.c | 2 + - drivers/scsi/smartpqi/smartpqi_init.c | 16 ++ + drivers/scsi/smartpqi/smartpqi_init.c | 16 + drivers/usb/core/hub.c | 7 + include/linux/efi.h | 22 +- - include/linux/kernel.h | 34 ++- + include/linux/kernel.h | 34 +- include/linux/lsm_hook_defs.h | 2 + include/linux/lsm_hooks.h | 6 + include/linux/module.h | 1 + include/linux/pci.h | 4 + - include/linux/rh_kabi.h | 297 +++++++++++++++++++++ + include/linux/rh_kabi.h | 297 +++++++++++ include/linux/rmi.h | 1 + include/linux/security.h | 5 + init/Kconfig | 2 +- kernel/Makefile | 1 + - kernel/bpf/syscall.c | 24 ++ + kernel/bpf/syscall.c | 24 + kernel/crash_core.c | 28 +- kernel/module.c | 2 + kernel/module_signing.c | 9 +- kernel/panic.c | 14 + - kernel/rh_taint.c | 93 +++++++ + kernel/rh_taint.c | 93 ++++ kernel/sysctl.c | 5 + mm/cma.c | 10 + scripts/mod/modpost.c | 8 + @@ -73,7 +76,27 @@ security/lockdown/Kconfig | 13 + security/lockdown/lockdown.c | 1 + security/security.c | 6 + - 75 files changed, 1232 insertions(+), 198 deletions(-) + tools/testing/selftests/bpf/Makefile | 1 - + tools/testing/selftests/bpf/prog_tests/atomics.c | 246 ---------- + .../testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 280 ----------- + .../testing/selftests/bpf/prog_tests/kfunc_call.c | 59 --- + .../selftests/bpf/prog_tests/linked_funcs.c | 42 -- + .../testing/selftests/bpf/prog_tests/linked_maps.c | 30 -- + .../testing/selftests/bpf/prog_tests/linked_vars.c | 43 -- + .../selftests/bpf/prog_tests/static_linked.c | 40 -- + tools/testing/selftests/bpf/progs/bpf_cubic.c | 545 --------------------- + tools/testing/selftests/bpf/progs/bpf_dctcp.c | 224 --------- + .../testing/selftests/bpf/progs/kfunc_call_test.c | 47 -- + .../selftests/bpf/progs/kfunc_call_test_subprog.c | 42 -- + tools/testing/selftests/bpf/progs/linked_funcs1.c | 73 --- + tools/testing/selftests/bpf/progs/linked_funcs2.c | 73 --- + tools/testing/selftests/bpf/progs/linked_maps1.c | 82 ---- + tools/testing/selftests/bpf/progs/linked_maps2.c | 76 --- + tools/testing/selftests/bpf/progs/linked_vars1.c | 54 -- + tools/testing/selftests/bpf/progs/linked_vars2.c | 55 --- + .../selftests/bpf/progs/test_static_linked1.c | 30 -- + .../selftests/bpf/progs/test_static_linked2.c | 31 -- + 98 files changed, 1251 insertions(+), 2283 deletions(-) diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst index 75a9dd98e76e..3ff3291551f9 100644 @@ -151,7 +174,7 @@ index 000000000000..effb81d04bfd + +endmenu diff --git a/Makefile b/Makefile -index ed669b2d705d..4b8f0c79ce41 100644 +index 2d7a8df84e2b..8faff67e93d3 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,10 @@ $(if $(filter __%, $(MAKECMDGOALS)), \ @@ -173,7 +196,7 @@ index ed669b2d705d..4b8f0c79ce41 100644 -Werror=return-type -Wno-format-security \ -std=gnu89 KBUILD_CPPFLAGS := -D__KERNEL__ -@@ -1296,7 +1301,13 @@ define filechk_version.h +@@ -1299,7 +1304,13 @@ define filechk_version.h ((c) > 255 ? 255 : (c)))'; \ echo \#define LINUX_VERSION_MAJOR $(VERSION); \ echo \#define LINUX_VERSION_PATCHLEVEL $(PATCHLEVEL); \ @@ -225,6 +248,19 @@ index 9f1d8566bbf9..ebb24a713210 100644 default "12" if ARM64_16K_PAGES default "11" help +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +index 634a91af8e83..4b854eb21f72 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +@@ -227,7 +227,7 @@ pcie0: pcie@f8000000 { + <&pcie_phy 2>, <&pcie_phy 3>; + phy-names = "pcie-phy-0", "pcie-phy-1", + "pcie-phy-2", "pcie-phy-3"; +- ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>, ++ ranges = <0x82000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000>, + <0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>; + resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>, + <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>, diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index cada0b816c8a..77b30bf451aa 100644 --- a/arch/arm64/kernel/acpi.c @@ -1526,6 +1562,78 @@ index 0015860ec12b..25acc9943007 100644 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { } +diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c +index f1d08a1b1591..78d04ac29cd5 100644 +--- a/drivers/pci/controller/pcie-rockchip-host.c ++++ b/drivers/pci/controller/pcie-rockchip-host.c +@@ -592,10 +592,6 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) + if (err) + return err; + +- err = rockchip_pcie_setup_irq(rockchip); +- if (err) +- return err; +- + rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); + if (IS_ERR(rockchip->vpcie12v)) { + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) +@@ -973,8 +969,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) + if (err) + goto err_vpcie; + +- rockchip_pcie_enable_interrupts(rockchip); +- + err = rockchip_pcie_init_irq_domain(rockchip); + if (err < 0) + goto err_deinit_port; +@@ -992,6 +986,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev) + bridge->sysdata = rockchip; + bridge->ops = &rockchip_pcie_ops; + ++ err = rockchip_pcie_setup_irq(rockchip); ++ if (err) ++ goto err_remove_irq_domain; ++ ++ rockchip_pcie_enable_interrupts(rockchip); ++ + err = pci_host_probe(bridge); + if (err < 0) + goto err_remove_irq_domain; +diff --git a/drivers/pci/of.c b/drivers/pci/of.c +index 85dcb7097da4..0580c654127e 100644 +--- a/drivers/pci/of.c ++++ b/drivers/pci/of.c +@@ -353,6 +353,18 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev, + dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", + dev_node); + *io_base = range.cpu_addr; ++ } else if (resource_type(res) == IORESOURCE_MEM) { ++ if (!(res->flags & IORESOURCE_PREFETCH)) { ++ if (res->flags & IORESOURCE_MEM_64) { ++ if (!upper_32_bits(range.pci_addr + range.size - 1)) { ++ dev_warn(dev, "Clearing 64-bit flag for non-prefetchable memory below 4GB\n"); ++ res->flags &= ~IORESOURCE_MEM_64; ++ } ++ } else { ++ if (upper_32_bits(resource_size(res))) ++ dev_warn(dev, "Memory resource size exceeds max for 32 bits\n"); ++ } ++ } + } + + pci_add_resource_offset(resources, res, res->start - range.pci_addr); +@@ -571,11 +583,6 @@ static int pci_parse_request_of_pci_ranges(struct device *dev, + break; + case IORESOURCE_MEM: + res_valid |= !(res->flags & IORESOURCE_PREFETCH); +- +- if (!(res->flags & IORESOURCE_PREFETCH)) +- if (upper_32_bits(resource_size(res))) +- dev_warn(dev, "Memory resource size exceeds max for 32 bits\n"); +- + break; + } + } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index ec44a79e951a..5025827ef396 100644 --- a/drivers/pci/pci-driver.c @@ -2501,7 +2609,7 @@ index ea04b0deb5ce..cb37c3f119cf 100644 if (err) return err; diff --git a/kernel/crash_core.c b/kernel/crash_core.c -index 825284baaf46..0b2b3f510b16 100644 +index 684a6061a13a..220579c0e963 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -7,6 +7,7 @@ @@ -2897,3 +3005,2201 @@ index b38155b2de83..b0a6711b4825 100644 #ifdef CONFIG_PERF_EVENTS int security_perf_event_open(struct perf_event_attr *attr, int type) { +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile +index 511259c2c6c5..bd2ca0032883 100644 +--- a/tools/testing/selftests/bpf/Makefile ++++ b/tools/testing/selftests/bpf/Makefile +@@ -409,7 +409,6 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \ + $(TRUNNER_EXTRA_HDRS) \ + $(TRUNNER_BPF_OBJS) \ + $(TRUNNER_BPF_SKELS) \ +- $(TRUNNER_BPF_SKELS_LINKED) \ + $$(BPFOBJ) | $(TRUNNER_OUTPUT) + $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@) + $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F) +diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c +deleted file mode 100644 +index 21efe7bbf10d..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/atomics.c ++++ /dev/null +@@ -1,246 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +- +-#include <test_progs.h> +- +-#include "atomics.skel.h" +- +-static void test_add(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.add); +- if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.add); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run add", +- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); +- ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); +- +- ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); +- ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); +- +- ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); +- ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); +- +- ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); +- +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_sub(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.sub); +- if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.sub); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run sub", +- "err %d errno %d retval %d duration %d\n", +- err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value"); +- ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); +- +- ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value"); +- ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result"); +- +- ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value"); +- ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result"); +- +- ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); +- +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_and(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.and); +- if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.and); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run and", +- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value"); +- ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); +- +- ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value"); +- ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result"); +- +- ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_or(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.or); +- if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.or); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run or", +- "err %d errno %d retval %d duration %d\n", +- err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value"); +- ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); +- +- ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value"); +- ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result"); +- +- ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_xor(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.xor); +- if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.xor); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run xor", +- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value"); +- ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); +- +- ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value"); +- ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result"); +- +- ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_cmpxchg(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.cmpxchg); +- if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.cmpxchg); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run add", +- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value"); +- ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); +- ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed"); +- +- ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value"); +- ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); +- ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); +- +-cleanup: +- bpf_link__destroy(link); +-} +- +-static void test_xchg(struct atomics *skel) +-{ +- int err, prog_fd; +- __u32 duration = 0, retval; +- struct bpf_link *link; +- +- link = bpf_program__attach(skel->progs.xchg); +- if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link))) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.xchg); +- err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +- NULL, NULL, &retval, &duration); +- if (CHECK(err || retval, "test_run add", +- "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) +- goto cleanup; +- +- ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value"); +- ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result"); +- +- ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value"); +- ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); +- +-cleanup: +- bpf_link__destroy(link); +-} +- +-void test_atomics(void) +-{ +- struct atomics *skel; +- __u32 duration = 0; +- +- skel = atomics__open_and_load(); +- if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) +- return; +- +- if (skel->data->skip_tests) { +- printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)", +- __func__); +- test__skip(); +- goto cleanup; +- } +- +- if (test__start_subtest("add")) +- test_add(skel); +- if (test__start_subtest("sub")) +- test_sub(skel); +- if (test__start_subtest("and")) +- test_and(skel); +- if (test__start_subtest("or")) +- test_or(skel); +- if (test__start_subtest("xor")) +- test_xor(skel); +- if (test__start_subtest("cmpxchg")) +- test_cmpxchg(skel); +- if (test__start_subtest("xchg")) +- test_xchg(skel); +- +-cleanup: +- atomics__destroy(skel); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +deleted file mode 100644 +index e25917f04602..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c ++++ /dev/null +@@ -1,280 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2019 Facebook */ +- +-#include <linux/err.h> +-#include <netinet/tcp.h> +-#include <test_progs.h> +-#include "bpf_dctcp.skel.h" +-#include "bpf_cubic.skel.h" +-#include "bpf_tcp_nogpl.skel.h" +- +-#define min(a, b) ((a) < (b) ? (a) : (b)) +- +-static const unsigned int total_bytes = 10 * 1024 * 1024; +-static const struct timeval timeo_sec = { .tv_sec = 10 }; +-static const size_t timeo_optlen = sizeof(timeo_sec); +-static int expected_stg = 0xeB9F; +-static int stop, duration; +- +-static int settimeo(int fd) +-{ +- int err; +- +- err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, +- timeo_optlen); +- if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n", +- errno)) +- return -1; +- +- err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, +- timeo_optlen); +- if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n", +- errno)) +- return -1; +- +- return 0; +-} +- +-static int settcpca(int fd, const char *tcp_ca) +-{ +- int err; +- +- err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca)); +- if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n", +- errno)) +- return -1; +- +- return 0; +-} +- +-static void *server(void *arg) +-{ +- int lfd = (int)(long)arg, err = 0, fd; +- ssize_t nr_sent = 0, bytes = 0; +- char batch[1500]; +- +- fd = accept(lfd, NULL, NULL); +- while (fd == -1) { +- if (errno == EINTR) +- continue; +- err = -errno; +- goto done; +- } +- +- if (settimeo(fd)) { +- err = -errno; +- goto done; +- } +- +- while (bytes < total_bytes && !READ_ONCE(stop)) { +- nr_sent = send(fd, &batch, +- min(total_bytes - bytes, sizeof(batch)), 0); +- if (nr_sent == -1 && errno == EINTR) +- continue; +- if (nr_sent == -1) { +- err = -errno; +- break; +- } +- bytes += nr_sent; +- } +- +- CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n", +- bytes, total_bytes, nr_sent, errno); +- +-done: +- if (fd != -1) +- close(fd); +- if (err) { +- WRITE_ONCE(stop, 1); +- return ERR_PTR(err); +- } +- return NULL; +-} +- +-static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) +-{ +- struct sockaddr_in6 sa6 = {}; +- ssize_t nr_recv = 0, bytes = 0; +- int lfd = -1, fd = -1; +- pthread_t srv_thread; +- socklen_t addrlen = sizeof(sa6); +- void *thread_ret; +- char batch[1500]; +- int err; +- +- WRITE_ONCE(stop, 0); +- +- lfd = socket(AF_INET6, SOCK_STREAM, 0); +- if (CHECK(lfd == -1, "socket", "errno:%d\n", errno)) +- return; +- fd = socket(AF_INET6, SOCK_STREAM, 0); +- if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) { +- close(lfd); +- return; +- } +- +- if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) || +- settimeo(lfd) || settimeo(fd)) +- goto done; +- +- /* bind, listen and start server thread to accept */ +- sa6.sin6_family = AF_INET6; +- sa6.sin6_addr = in6addr_loopback; +- err = bind(lfd, (struct sockaddr *)&sa6, addrlen); +- if (CHECK(err == -1, "bind", "errno:%d\n", errno)) +- goto done; +- err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen); +- if (CHECK(err == -1, "getsockname", "errno:%d\n", errno)) +- goto done; +- err = listen(lfd, 1); +- if (CHECK(err == -1, "listen", "errno:%d\n", errno)) +- goto done; +- +- if (sk_stg_map) { +- err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd, +- &expected_stg, BPF_NOEXIST); +- if (CHECK(err, "bpf_map_update_elem(sk_stg_map)", +- "err:%d errno:%d\n", err, errno)) +- goto done; +- } +- +- /* connect to server */ +- err = connect(fd, (struct sockaddr *)&sa6, addrlen); +- if (CHECK(err == -1, "connect", "errno:%d\n", errno)) +- goto done; +- +- if (sk_stg_map) { +- int tmp_stg; +- +- err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd, +- &tmp_stg); +- if (CHECK(!err || errno != ENOENT, +- "bpf_map_lookup_elem(sk_stg_map)", +- "err:%d errno:%d\n", err, errno)) +- goto done; +- } +- +- err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd); +- if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno)) +- goto done; +- +- /* recv total_bytes */ +- while (bytes < total_bytes && !READ_ONCE(stop)) { +- nr_recv = recv(fd, &batch, +- min(total_bytes - bytes, sizeof(batch)), 0); +- if (nr_recv == -1 && errno == EINTR) +- continue; +- if (nr_recv == -1) +- break; +- bytes += nr_recv; +- } +- +- CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n", +- bytes, total_bytes, nr_recv, errno); +- +- WRITE_ONCE(stop, 1); +- pthread_join(srv_thread, &thread_ret); +- CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld", +- PTR_ERR(thread_ret)); +-done: +- close(lfd); +- close(fd); +-} +- +-static void test_cubic(void) +-{ +- struct bpf_cubic *cubic_skel; +- struct bpf_link *link; +- +- cubic_skel = bpf_cubic__open_and_load(); +- if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n")) +- return; +- +- link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); +- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n", +- PTR_ERR(link))) { +- bpf_cubic__destroy(cubic_skel); +- return; +- } +- +- do_test("bpf_cubic", NULL); +- +- bpf_link__destroy(link); +- bpf_cubic__destroy(cubic_skel); +-} +- +-static void test_dctcp(void) +-{ +- struct bpf_dctcp *dctcp_skel; +- struct bpf_link *link; +- +- dctcp_skel = bpf_dctcp__open_and_load(); +- if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n")) +- return; +- +- link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); +- if (CHECK(IS_ERR(link), "bpf_map__attach_struct_ops", "err:%ld\n", +- PTR_ERR(link))) { +- bpf_dctcp__destroy(dctcp_skel); +- return; +- } +- +- do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map); +- CHECK(dctcp_skel->bss->stg_result != expected_stg, +- "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n", +- dctcp_skel->bss->stg_result, expected_stg); +- +- bpf_link__destroy(link); +- bpf_dctcp__destroy(dctcp_skel); +-} +- +-static char *err_str; +-static bool found; +- +-static int libbpf_debug_print(enum libbpf_print_level level, +- const char *format, va_list args) +-{ +- char *log_buf; +- +- if (level != LIBBPF_WARN || +- strcmp(format, "libbpf: \n%s\n")) { +- vprintf(format, args); +- return 0; +- } +- +- log_buf = va_arg(args, char *); +- if (!log_buf) +- goto out; +- if (err_str && strstr(log_buf, err_str) != NULL) +- found = true; +-out: +- printf(format, log_buf); +- return 0; +-} +- +-static void test_invalid_license(void) +-{ +- libbpf_print_fn_t old_print_fn; +- struct bpf_tcp_nogpl *skel; +- +- err_str = "struct ops programs must have a GPL compatible license"; +- found = false; +- old_print_fn = libbpf_set_print(libbpf_debug_print); +- +- skel = bpf_tcp_nogpl__open_and_load(); +- ASSERT_NULL(skel, "bpf_tcp_nogpl"); +- ASSERT_EQ(found, true, "expected_err_msg"); +- +- bpf_tcp_nogpl__destroy(skel); +- libbpf_set_print(old_print_fn); +-} +- +-void test_bpf_tcp_ca(void) +-{ +- if (test__start_subtest("dctcp")) +- test_dctcp(); +- if (test__start_subtest("cubic")) +- test_cubic(); +- if (test__start_subtest("invalid_license")) +- test_invalid_license(); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +deleted file mode 100644 +index 7fc0951ee75f..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c ++++ /dev/null +@@ -1,59 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +-#include <test_progs.h> +-#include <network_helpers.h> +-#include "kfunc_call_test.skel.h" +-#include "kfunc_call_test_subprog.skel.h" +- +-static void test_main(void) +-{ +- struct kfunc_call_test *skel; +- int prog_fd, retval, err; +- +- skel = kfunc_call_test__open_and_load(); +- if (!ASSERT_OK_PTR(skel, "skel")) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1); +- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), +- NULL, NULL, (__u32 *)&retval, NULL); +- ASSERT_OK(err, "bpf_prog_test_run(test1)"); +- ASSERT_EQ(retval, 12, "test1-retval"); +- +- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2); +- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), +- NULL, NULL, (__u32 *)&retval, NULL); +- ASSERT_OK(err, "bpf_prog_test_run(test2)"); +- ASSERT_EQ(retval, 3, "test2-retval"); +- +- kfunc_call_test__destroy(skel); +-} +- +-static void test_subprog(void) +-{ +- struct kfunc_call_test_subprog *skel; +- int prog_fd, retval, err; +- +- skel = kfunc_call_test_subprog__open_and_load(); +- if (!ASSERT_OK_PTR(skel, "skel")) +- return; +- +- prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1); +- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), +- NULL, NULL, (__u32 *)&retval, NULL); +- ASSERT_OK(err, "bpf_prog_test_run(test1)"); +- ASSERT_EQ(retval, 10, "test1-retval"); +- ASSERT_NEQ(skel->data->active_res, -1, "active_res"); +- ASSERT_EQ(skel->data->sk_state, BPF_TCP_CLOSE, "sk_state"); +- +- kfunc_call_test_subprog__destroy(skel); +-} +- +-void test_kfunc_call(void) +-{ +- if (test__start_subtest("main")) +- test_main(); +- +- if (test__start_subtest("subprog")) +- test_subprog(); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +deleted file mode 100644 +index e9916f2817ec..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c ++++ /dev/null +@@ -1,42 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include <test_progs.h> +-#include <sys/syscall.h> +-#include "linked_funcs.skel.h" +- +-void test_linked_funcs(void) +-{ +- int err; +- struct linked_funcs *skel; +- +- skel = linked_funcs__open(); +- if (!ASSERT_OK_PTR(skel, "skel_open")) +- return; +- +- skel->rodata->my_tid = syscall(SYS_gettid); +- skel->bss->syscall_id = SYS_getpgid; +- +- err = linked_funcs__load(skel); +- if (!ASSERT_OK(err, "skel_load")) +- goto cleanup; +- +- err = linked_funcs__attach(skel); +- if (!ASSERT_OK(err, "skel_attach")) +- goto cleanup; +- +- /* trigger */ +- syscall(SYS_getpgid); +- +- ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1"); +- ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1"); +- ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1"); +- +- ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2"); +- ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2"); +- /* output_weak2 should never be updated */ +- ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2"); +- +-cleanup: +- linked_funcs__destroy(skel); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c +deleted file mode 100644 +index 85dcaaaf2775..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/linked_maps.c ++++ /dev/null +@@ -1,30 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include <test_progs.h> +-#include <sys/syscall.h> +-#include "linked_maps.skel.h" +- +-void test_linked_maps(void) +-{ +- int err; +- struct linked_maps *skel; +- +- skel = linked_maps__open_and_load(); +- if (!ASSERT_OK_PTR(skel, "skel_open")) +- return; +- +- err = linked_maps__attach(skel); +- if (!ASSERT_OK(err, "skel_attach")) +- goto cleanup; +- +- /* trigger */ +- syscall(SYS_getpgid); +- +- ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1"); +- ASSERT_EQ(skel->bss->output_second1, 2, "output_second1"); +- ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1"); +- +-cleanup: +- linked_maps__destroy(skel); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c +deleted file mode 100644 +index 267166abe4c1..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/linked_vars.c ++++ /dev/null +@@ -1,43 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include <test_progs.h> +-#include <sys/syscall.h> +-#include "linked_vars.skel.h" +- +-void test_linked_vars(void) +-{ +- int err; +- struct linked_vars *skel; +- +- skel = linked_vars__open(); +- if (!ASSERT_OK_PTR(skel, "skel_open")) +- return; +- +- skel->bss->input_bss1 = 1000; +- skel->bss->input_bss2 = 2000; +- skel->bss->input_bss_weak = 3000; +- +- err = linked_vars__load(skel); +- if (!ASSERT_OK(err, "skel_load")) +- goto cleanup; +- +- err = linked_vars__attach(skel); +- if (!ASSERT_OK(err, "skel_attach")) +- goto cleanup; +- +- /* trigger */ +- syscall(SYS_getpgid); +- +- ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1"); +- ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2"); +- /* 10 comes from "winner" input_data_weak in first obj file */ +- ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1"); +- ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2"); +- /* 100 comes from "winner" input_rodata_weak in first obj file */ +- ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1"); +- ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2"); +- +-cleanup: +- linked_vars__destroy(skel); +-} +diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c +deleted file mode 100644 +index 46556976dccc..000000000000 +--- a/tools/testing/selftests/bpf/prog_tests/static_linked.c ++++ /dev/null +@@ -1,40 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2019 Facebook */ +- +-#include <test_progs.h> +-#include "test_static_linked.skel.h" +- +-void test_static_linked(void) +-{ +- int err; +- struct test_static_linked* skel; +- +- skel = test_static_linked__open(); +- if (!ASSERT_OK_PTR(skel, "skel_open")) +- return; +- +- skel->rodata->rovar1 = 1; +- skel->bss->static_var1 = 2; +- skel->bss->static_var11 = 3; +- +- skel->rodata->rovar2 = 4; +- skel->bss->static_var2 = 5; +- skel->bss->static_var22 = 6; +- +- err = test_static_linked__load(skel); +- if (!ASSERT_OK(err, "skel_load")) +- goto cleanup; +- +- err = test_static_linked__attach(skel); +- if (!ASSERT_OK(err, "skel_attach")) +- goto cleanup; +- +- /* trigger */ +- usleep(1); +- +- ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1"); +- ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2"); +- +-cleanup: +- test_static_linked__destroy(skel); +-} +diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c +deleted file mode 100644 +index f62df4d023f9..000000000000 +--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c ++++ /dev/null +@@ -1,545 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +- +-/* WARNING: This implemenation is not necessarily the same +- * as the tcp_cubic.c. The purpose is mainly for testing +- * the kernel BPF logic. +- * +- * Highlights: +- * 1. CONFIG_HZ .kconfig map is used. +- * 2. In bictcp_update(), calculation is changed to use usec +- * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies. +- * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c. +- * 3. In bitctcp_update() [under tcp_friendliness], the original +- * "while (ca->ack_cnt > delta)" loop is changed to the equivalent +- * "ca->ack_cnt / delta" operation. +- */ +- +-#include <linux/bpf.h> +-#include <linux/stddef.h> +-#include <linux/tcp.h> +-#include "bpf_tcp_helpers.h" +- +-char _license[] SEC("license") = "GPL"; +- +-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) +- +-#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation +- * max_cwnd = snd_cwnd * beta +- */ +-#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ +- +-/* Two methods of hybrid slow start */ +-#define HYSTART_ACK_TRAIN 0x1 +-#define HYSTART_DELAY 0x2 +- +-/* Number of delay samples for detecting the increase of delay */ +-#define HYSTART_MIN_SAMPLES 8 +-#define HYSTART_DELAY_MIN (4000U) /* 4ms */ +-#define HYSTART_DELAY_MAX (16000U) /* 16 ms */ +-#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) +- +-static int fast_convergence = 1; +-static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ +-static int initial_ssthresh; +-static const int bic_scale = 41; +-static int tcp_friendliness = 1; +- +-static int hystart = 1; +-static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY; +-static int hystart_low_window = 16; +-static int hystart_ack_delta_us = 2000; +- +-static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ +-static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 +- / (BICTCP_BETA_SCALE - beta); +-/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 +- * so K = cubic_root( (wmax-cwnd)*rtt/c ) +- * the unit of K is bictcp_HZ=2^10, not HZ +- * +- * c = bic_scale >> 10 +- * rtt = 100ms +- * +- * the following code has been designed and tested for +- * cwnd < 1 million packets +- * RTT < 100 seconds +- * HZ < 1,000,00 (corresponding to 10 nano-second) +- */ +- +-/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */ +-static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ)) +- / (bic_scale * 10); +- +-/* BIC TCP Parameters */ +-struct bictcp { +- __u32 cnt; /* increase cwnd by 1 after ACKs */ +- __u32 last_max_cwnd; /* last maximum snd_cwnd */ +- __u32 last_cwnd; /* the last snd_cwnd */ +- __u32 last_time; /* time when updated last_cwnd */ +- __u32 bic_origin_point;/* origin point of bic function */ +- __u32 bic_K; /* time to origin point +- from the beginning of the current epoch */ +- __u32 delay_min; /* min delay (usec) */ +- __u32 epoch_start; /* beginning of an epoch */ +- __u32 ack_cnt; /* number of acks */ +- __u32 tcp_cwnd; /* estimated tcp cwnd */ +- __u16 unused; +- __u8 sample_cnt; /* number of samples to decide curr_rtt */ +- __u8 found; /* the exit point is found? */ +- __u32 round_start; /* beginning of each round */ +- __u32 end_seq; /* end_seq of the round */ +- __u32 last_ack; /* last time when the ACK spacing is close */ +- __u32 curr_rtt; /* the minimum rtt of current round */ +-}; +- +-static inline void bictcp_reset(struct bictcp *ca) +-{ +- ca->cnt = 0; +- ca->last_max_cwnd = 0; +- ca->last_cwnd = 0; +- ca->last_time = 0; +- ca->bic_origin_point = 0; +- ca->bic_K = 0; +- ca->delay_min = 0; +- ca->epoch_start = 0; +- ca->ack_cnt = 0; +- ca->tcp_cwnd = 0; +- ca->found = 0; +-} +- +-extern unsigned long CONFIG_HZ __kconfig; +-#define HZ CONFIG_HZ +-#define USEC_PER_MSEC 1000UL +-#define USEC_PER_SEC 1000000UL +-#define USEC_PER_JIFFY (USEC_PER_SEC / HZ) +- +-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor) +-{ +- return dividend / divisor; +-} +- +-#define div64_ul div64_u64 +- +-#define BITS_PER_U64 (sizeof(__u64) * 8) +-static __always_inline int fls64(__u64 x) +-{ +- int num = BITS_PER_U64 - 1; +- +- if (x == 0) +- return 0; +- +- if (!(x & (~0ull << (BITS_PER_U64-32)))) { +- num -= 32; +- x <<= 32; +- } +- if (!(x & (~0ull << (BITS_PER_U64-16)))) { +- num -= 16; +- x <<= 16; +- } +- if (!(x & (~0ull << (BITS_PER_U64-8)))) { +- num -= 8; +- x <<= 8; +- } +- if (!(x & (~0ull << (BITS_PER_U64-4)))) { +- num -= 4; +- x <<= 4; +- } +- if (!(x & (~0ull << (BITS_PER_U64-2)))) { +- num -= 2; +- x <<= 2; +- } +- if (!(x & (~0ull << (BITS_PER_U64-1)))) +- num -= 1; +- +- return num + 1; +-} +- +-static __always_inline __u32 bictcp_clock_us(const struct sock *sk) +-{ +- return tcp_sk(sk)->tcp_mstamp; +-} +- +-static __always_inline void bictcp_hystart_reset(struct sock *sk) +-{ +- struct tcp_sock *tp = tcp_sk(sk); +- struct bictcp *ca = inet_csk_ca(sk); +- +- ca->round_start = ca->last_ack = bictcp_clock_us(sk); +- ca->end_seq = tp->snd_nxt; +- ca->curr_rtt = ~0U; +- ca->sample_cnt = 0; +-} +- +-/* "struct_ops/" prefix is not a requirement +- * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS +- * as long as it is used in one of the func ptr +- * under SEC(".struct_ops"). +- */ +-SEC("struct_ops/bpf_cubic_init") +-void BPF_PROG(bpf_cubic_init, struct sock *sk) +-{ +- struct bictcp *ca = inet_csk_ca(sk); +- +- bictcp_reset(ca); +- +- if (hystart) +- bictcp_hystart_reset(sk); +- +- if (!hystart && initial_ssthresh) +- tcp_sk(sk)->snd_ssthresh = initial_ssthresh; +-} +- +-/* No prefix in SEC will also work. +- * The remaining tcp-cubic functions have an easier way. +- */ +-SEC("no-sec-prefix-bictcp_cwnd_event") +-void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) +-{ +- if (event == CA_EVENT_TX_START) { +- struct bictcp *ca = inet_csk_ca(sk); +- __u32 now = tcp_jiffies32; +- __s32 delta; +- +- delta = now - tcp_sk(sk)->lsndtime; +- +- /* We were application limited (idle) for a while. +- * Shift epoch_start to keep cwnd growth to cubic curve. +- */ +- if (ca->epoch_start && delta > 0) { +- ca->epoch_start += delta; +- if (after(ca->epoch_start, now)) +- ca->epoch_start = now; +- } +- return; +- } +-} +- +-/* +- * cbrt(x) MSB values for x MSB values in [0..63]. +- * Precomputed then refined by hand - Willy Tarreau +- * +- * For x in [0..63], +- * v = cbrt(x << 18) - 1 +- * cbrt(x) = (v[x] + 10) >> 6 +- */ +-static const __u8 v[] = { +- /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118, +- /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156, +- /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179, +- /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199, +- /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215, +- /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229, +- /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242, +- /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254, +-}; +- +-/* calculate the cubic root of x using a table lookup followed by one +- * Newton-Raphson iteration. +- * Avg err ~= 0.195% +- */ +-static __always_inline __u32 cubic_root(__u64 a) +-{ +- __u32 x, b, shift; +- +- if (a < 64) { +- /* a in [0..63] */ +- return ((__u32)v[(__u32)a] + 35) >> 6; +- } +- +- b = fls64(a); +- b = ((b * 84) >> 8) - 1; +- shift = (a >> (b * 3)); +- +- /* it is needed for verifier's bound check on v */ +- if (shift >= 64) +- return 0; +- +- x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6; +- +- /* +- * Newton-Raphson iteration +- * 2 +- * x = ( 2 * x + a / x ) / 3 +- * k+1 k k +- */ +- x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1))); +- x = ((x * 341) >> 10); +- return x; +-} +- +-/* +- * Compute congestion window to use. +- */ +-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, +- __u32 acked) +-{ +- __u32 delta, bic_target, max_cnt; +- __u64 offs, t; +- +- ca->ack_cnt += acked; /* count the number of ACKed packets */ +- +- if (ca->last_cwnd == cwnd && +- (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32) +- return; +- +- /* The CUBIC function can update ca->cnt at most once per jiffy. +- * On all cwnd reduction events, ca->epoch_start is set to 0, +- * which will force a recalculation of ca->cnt. +- */ +- if (ca->epoch_start && tcp_jiffies32 == ca->last_time) +- goto tcp_friendliness; +- +- ca->last_cwnd = cwnd; +- ca->last_time = tcp_jiffies32; +- +- if (ca->epoch_start == 0) { +- ca->epoch_start = tcp_jiffies32; /* record beginning */ +- ca->ack_cnt = acked; /* start counting */ +- ca->tcp_cwnd = cwnd; /* syn with cubic */ +- +- if (ca->last_max_cwnd <= cwnd) { +- ca->bic_K = 0; +- ca->bic_origin_point = cwnd; +- } else { +- /* Compute new K based on +- * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ) +- */ +- ca->bic_K = cubic_root(cube_factor +- * (ca->last_max_cwnd - cwnd)); +- ca->bic_origin_point = ca->last_max_cwnd; +- } +- } +- +- /* cubic function - calc*/ +- /* calculate c * time^3 / rtt, +- * while considering overflow in calculation of time^3 +- * (so time^3 is done by using 64 bit) +- * and without the support of division of 64bit numbers +- * (so all divisions are done by using 32 bit) +- * also NOTE the unit of those veriables +- * time = (t - K) / 2^bictcp_HZ +- * c = bic_scale >> 10 +- * rtt = (srtt >> 3) / HZ +- * !!! The following code does not have overflow problems, +- * if the cwnd < 1 million packets !!! +- */ +- +- t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY; +- t += ca->delay_min; +- /* change the unit from usec to bictcp_HZ */ +- t <<= BICTCP_HZ; +- t /= USEC_PER_SEC; +- +- if (t < ca->bic_K) /* t - K */ +- offs = ca->bic_K - t; +- else +- offs = t - ca->bic_K; +- +- /* c/rtt * (t-K)^3 */ +- delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); +- if (t < ca->bic_K) /* below origin*/ +- bic_target = ca->bic_origin_point - delta; +- else /* above origin*/ +- bic_target = ca->bic_origin_point + delta; +- +- /* cubic function - calc bictcp_cnt*/ +- if (bic_target > cwnd) { +- ca->cnt = cwnd / (bic_target - cwnd); +- } else { +- ca->cnt = 100 * cwnd; /* very small increment*/ +- } +- +- /* +- * The initial growth of cubic function may be too conservative +- * when the available bandwidth is still unknown. +- */ +- if (ca->last_max_cwnd == 0 && ca->cnt > 20) +- ca->cnt = 20; /* increase cwnd 5% per RTT */ +- +-tcp_friendliness: +- /* TCP Friendly */ +- if (tcp_friendliness) { +- __u32 scale = beta_scale; +- __u32 n; +- +- /* update tcp cwnd */ +- delta = (cwnd * scale) >> 3; +- if (ca->ack_cnt > delta && delta) { +- n = ca->ack_cnt / delta; +- ca->ack_cnt -= n * delta; +- ca->tcp_cwnd += n; +- } +- +- if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ +- delta = ca->tcp_cwnd - cwnd; +- max_cnt = cwnd / delta; +- if (ca->cnt > max_cnt) +- ca->cnt = max_cnt; +- } +- } +- +- /* The maximum rate of cwnd increase CUBIC allows is 1 packet per +- * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. +- */ +- ca->cnt = max(ca->cnt, 2U); +-} +- +-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */ +-void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +-{ +- struct tcp_sock *tp = tcp_sk(sk); +- struct bictcp *ca = inet_csk_ca(sk); +- +- if (!tcp_is_cwnd_limited(sk)) +- return; +- +- if (tcp_in_slow_start(tp)) { +- if (hystart && after(ack, ca->end_seq)) +- bictcp_hystart_reset(sk); +- acked = tcp_slow_start(tp, acked); +- if (!acked) +- return; +- } +- bictcp_update(ca, tp->snd_cwnd, acked); +- tcp_cong_avoid_ai(tp, ca->cnt, acked); +-} +- +-__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk) +-{ +- const struct tcp_sock *tp = tcp_sk(sk); +- struct bictcp *ca = inet_csk_ca(sk); +- +- ca->epoch_start = 0; /* end of epoch */ +- +- /* Wmax and fast convergence */ +- if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) +- ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) +- / (2 * BICTCP_BETA_SCALE); +- else +- ca->last_max_cwnd = tp->snd_cwnd; +- +- return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); +-} +- +-void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state) +-{ +- if (new_state == TCP_CA_Loss) { +- bictcp_reset(inet_csk_ca(sk)); +- bictcp_hystart_reset(sk); +- } +-} +- +-#define GSO_MAX_SIZE 65536 +- +-/* Account for TSO/GRO delays. +- * Otherwise short RTT flows could get too small ssthresh, since during +- * slow start we begin with small TSO packets and ca->delay_min would +- * not account for long aggregation delay when TSO packets get bigger. +- * Ideally even with a very small RTT we would like to have at least one +- * TSO packet being sent and received by GRO, and another one in qdisc layer. +- * We apply another 100% factor because @rate is doubled at this point. +- * We cap the cushion to 1ms. +- */ +-static __always_inline __u32 hystart_ack_delay(struct sock *sk) +-{ +- unsigned long rate; +- +- rate = sk->sk_pacing_rate; +- if (!rate) +- return 0; +- return min((__u64)USEC_PER_MSEC, +- div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate)); +-} +- +-static __always_inline void hystart_update(struct sock *sk, __u32 delay) +-{ +- struct tcp_sock *tp = tcp_sk(sk); +- struct bictcp *ca = inet_csk_ca(sk); +- __u32 threshold; +- +- if (hystart_detect & HYSTART_ACK_TRAIN) { +- __u32 now = bictcp_clock_us(sk); +- +- /* first detection parameter - ack-train detection */ +- if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) { +- ca->last_ack = now; +- +- threshold = ca->delay_min + hystart_ack_delay(sk); +- +- /* Hystart ack train triggers if we get ack past +- * ca->delay_min/2. +- * Pacing might have delayed packets up to RTT/2 +- * during slow start. +- */ +- if (sk->sk_pacing_status == SK_PACING_NONE) +- threshold >>= 1; +- +- if ((__s32)(now - ca->round_start) > threshold) { +- ca->found = 1; +- tp->snd_ssthresh = tp->snd_cwnd; +- } +- } +- } +- +- if (hystart_detect & HYSTART_DELAY) { +- /* obtain the minimum delay of more than sampling packets */ +- if (ca->curr_rtt > delay) +- ca->curr_rtt = delay; +- if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { +- ca->sample_cnt++; +- } else { +- if (ca->curr_rtt > ca->delay_min + +- HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { +- ca->found = 1; +- tp->snd_ssthresh = tp->snd_cwnd; +- } +- } +- } +-} +- +-void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, +- const struct ack_sample *sample) +-{ +- const struct tcp_sock *tp = tcp_sk(sk); +- struct bictcp *ca = inet_csk_ca(sk); +- __u32 delay; +- +- /* Some calls are for duplicates without timetamps */ +- if (sample->rtt_us < 0) +- return; +- +- /* Discard delay samples right after fast recovery */ +- if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ) +- return; +- +- delay = sample->rtt_us; +- if (delay == 0) +- delay = 1; +- +- /* first time call or link delay decreases */ +- if (ca->delay_min == 0 || ca->delay_min > delay) +- ca->delay_min = delay; +- +- /* hystart triggers when cwnd is larger than some threshold */ +- if (!ca->found && tcp_in_slow_start(tp) && hystart && +- tp->snd_cwnd >= hystart_low_window) +- hystart_update(sk, delay); +-} +- +-extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym; +- +-__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk) +-{ +- return tcp_reno_undo_cwnd(sk); +-} +- +-SEC(".struct_ops") +-struct tcp_congestion_ops cubic = { +- .init = (void *)bpf_cubic_init, +- .ssthresh = (void *)bpf_cubic_recalc_ssthresh, +- .cong_avoid = (void *)bpf_cubic_cong_avoid, +- .set_state = (void *)bpf_cubic_state, +- .undo_cwnd = (void *)bpf_cubic_undo_cwnd, +- .cwnd_event = (void *)bpf_cubic_cwnd_event, +- .pkts_acked = (void *)bpf_cubic_acked, +- .name = "bpf_cubic", +-}; +diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c +deleted file mode 100644 +index fd42247da8b4..000000000000 +--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c ++++ /dev/null +@@ -1,224 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2019 Facebook */ +- +-/* WARNING: This implemenation is not necessarily the same +- * as the tcp_dctcp.c. The purpose is mainly for testing +- * the kernel BPF logic. +- */ +- +-#include <stddef.h> +-#include <linux/bpf.h> +-#include <linux/types.h> +-#include <linux/stddef.h> +-#include <linux/tcp.h> +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +-#include "bpf_tcp_helpers.h" +- +-char _license[] SEC("license") = "GPL"; +- +-int stg_result = 0; +- +-struct { +- __uint(type, BPF_MAP_TYPE_SK_STORAGE); +- __uint(map_flags, BPF_F_NO_PREALLOC); +- __type(key, int); +- __type(value, int); +-} sk_stg_map SEC(".maps"); +- +-#define DCTCP_MAX_ALPHA 1024U +- +-struct dctcp { +- __u32 old_delivered; +- __u32 old_delivered_ce; +- __u32 prior_rcv_nxt; +- __u32 dctcp_alpha; +- __u32 next_seq; +- __u32 ce_state; +- __u32 loss_cwnd; +-}; +- +-static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */ +-static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA; +- +-static __always_inline void dctcp_reset(const struct tcp_sock *tp, +- struct dctcp *ca) +-{ +- ca->next_seq = tp->snd_nxt; +- +- ca->old_delivered = tp->delivered; +- ca->old_delivered_ce = tp->delivered_ce; +-} +- +-SEC("struct_ops/dctcp_init") +-void BPF_PROG(dctcp_init, struct sock *sk) +-{ +- const struct tcp_sock *tp = tcp_sk(sk); +- struct dctcp *ca = inet_csk_ca(sk); +- int *stg; +- +- ca->prior_rcv_nxt = tp->rcv_nxt; +- ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); +- ca->loss_cwnd = 0; +- ca->ce_state = 0; +- +- stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0); +- if (stg) { +- stg_result = *stg; +- bpf_sk_storage_delete(&sk_stg_map, (void *)tp); +- } +- dctcp_reset(tp, ca); +-} +- +-SEC("struct_ops/dctcp_ssthresh") +-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) +-{ +- struct dctcp *ca = inet_csk_ca(sk); +- struct tcp_sock *tp = tcp_sk(sk); +- +- ca->loss_cwnd = tp->snd_cwnd; +- return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); +-} +- +-SEC("struct_ops/dctcp_update_alpha") +-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) +-{ +- const struct tcp_sock *tp = tcp_sk(sk); +- struct dctcp *ca = inet_csk_ca(sk); +- +- /* Expired RTT */ +- if (!before(tp->snd_una, ca->next_seq)) { +- __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; +- __u32 alpha = ca->dctcp_alpha; +- +- /* alpha = (1 - g) * alpha + g * F */ +- +- alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); +- if (delivered_ce) { +- __u32 delivered = tp->delivered - ca->old_delivered; +- +- /* If dctcp_shift_g == 1, a 32bit value would overflow +- * after 8 M packets. +- */ +- delivered_ce <<= (10 - dctcp_shift_g); +- delivered_ce /= max(1U, delivered); +- +- alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); +- } +- ca->dctcp_alpha = alpha; +- dctcp_reset(tp, ca); +- } +-} +- +-static __always_inline void dctcp_react_to_loss(struct sock *sk) +-{ +- struct dctcp *ca = inet_csk_ca(sk); +- struct tcp_sock *tp = tcp_sk(sk); +- +- ca->loss_cwnd = tp->snd_cwnd; +- tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); +-} +- +-SEC("struct_ops/dctcp_state") +-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) +-{ +- if (new_state == TCP_CA_Recovery && +- new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) +- dctcp_react_to_loss(sk); +- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only +- * one loss-adjustment per RTT. +- */ +-} +- +-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state) +-{ +- struct tcp_sock *tp = tcp_sk(sk); +- +- if (ce_state == 1) +- tp->ecn_flags |= TCP_ECN_DEMAND_CWR; +- else +- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +-} +- +-/* Minimal DCTP CE state machine: +- * +- * S: 0 <- last pkt was non-CE +- * 1 <- last pkt was CE +- */ +-static __always_inline +-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, +- __u32 *prior_rcv_nxt, __u32 *ce_state) +-{ +- __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0; +- +- if (*ce_state != new_ce_state) { +- /* CE state has changed, force an immediate ACK to +- * reflect the new CE state. If an ACK was delayed, +- * send that first to reflect the prior CE state. +- */ +- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { +- dctcp_ece_ack_cwr(sk, *ce_state); +- bpf_tcp_send_ack(sk, *prior_rcv_nxt); +- } +- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; +- } +- *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt; +- *ce_state = new_ce_state; +- dctcp_ece_ack_cwr(sk, new_ce_state); +-} +- +-SEC("struct_ops/dctcp_cwnd_event") +-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) +-{ +- struct dctcp *ca = inet_csk_ca(sk); +- +- switch (ev) { +- case CA_EVENT_ECN_IS_CE: +- case CA_EVENT_ECN_NO_CE: +- dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); +- break; +- case CA_EVENT_LOSS: +- dctcp_react_to_loss(sk); +- break; +- default: +- /* Don't care for the rest. */ +- break; +- } +-} +- +-SEC("struct_ops/dctcp_cwnd_undo") +-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) +-{ +- const struct dctcp *ca = inet_csk_ca(sk); +- +- return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); +-} +- +-extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym; +- +-SEC("struct_ops/dctcp_reno_cong_avoid") +-void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +-{ +- tcp_reno_cong_avoid(sk, ack, acked); +-} +- +-SEC(".struct_ops") +-struct tcp_congestion_ops dctcp_nouse = { +- .init = (void *)dctcp_init, +- .set_state = (void *)dctcp_state, +- .flags = TCP_CONG_NEEDS_ECN, +- .name = "bpf_dctcp_nouse", +-}; +- +-SEC(".struct_ops") +-struct tcp_congestion_ops dctcp = { +- .init = (void *)dctcp_init, +- .in_ack_event = (void *)dctcp_update_alpha, +- .cwnd_event = (void *)dctcp_cwnd_event, +- .ssthresh = (void *)dctcp_ssthresh, +- .cong_avoid = (void *)dctcp_cong_avoid, +- .undo_cwnd = (void *)dctcp_cwnd_undo, +- .set_state = (void *)dctcp_state, +- .flags = TCP_CONG_NEEDS_ECN, +- .name = "bpf_dctcp", +-}; +diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c +deleted file mode 100644 +index 470f8723e463..000000000000 +--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c ++++ /dev/null +@@ -1,47 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +-#include <linux/bpf.h> +-#include <bpf/bpf_helpers.h> +-#include "bpf_tcp_helpers.h" +- +-extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; +-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, +- __u32 c, __u64 d) __ksym; +- +-SEC("classifier") +-int kfunc_call_test2(struct __sk_buff *skb) +-{ +- struct bpf_sock *sk = skb->sk; +- +- if (!sk) +- return -1; +- +- sk = bpf_sk_fullsock(sk); +- if (!sk) +- return -1; +- +- return bpf_kfunc_call_test2((struct sock *)sk, 1, 2); +-} +- +-SEC("classifier") +-int kfunc_call_test1(struct __sk_buff *skb) +-{ +- struct bpf_sock *sk = skb->sk; +- __u64 a = 1ULL << 32; +- __u32 ret; +- +- if (!sk) +- return -1; +- +- sk = bpf_sk_fullsock(sk); +- if (!sk) +- return -1; +- +- a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4); +- ret = a >> 32; /* ret should be 2 */ +- ret += (__u32)a; /* ret should be 12 */ +- +- return ret; +-} +- +-char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +deleted file mode 100644 +index b2dcb7d9cb03..000000000000 +--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c ++++ /dev/null +@@ -1,42 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +-#include <linux/bpf.h> +-#include <bpf/bpf_helpers.h> +-#include "bpf_tcp_helpers.h" +- +-extern const int bpf_prog_active __ksym; +-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, +- __u32 c, __u64 d) __ksym; +-extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym; +-int active_res = -1; +-int sk_state = -1; +- +-int __noinline f1(struct __sk_buff *skb) +-{ +- struct bpf_sock *sk = skb->sk; +- int *active; +- +- if (!sk) +- return -1; +- +- sk = bpf_sk_fullsock(sk); +- if (!sk) +- return -1; +- +- active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, +- bpf_get_smp_processor_id()); +- if (active) +- active_res = *active; +- +- sk_state = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state; +- +- return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4); +-} +- +-SEC("classifier") +-int kfunc_call_test1(struct __sk_buff *skb) +-{ +- return f1(skb); +-} +- +-char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c +deleted file mode 100644 +index b964ec1390c2..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c ++++ /dev/null +@@ -1,73 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-/* weak and shared between two files */ +-const volatile int my_tid __weak; +-long syscall_id __weak; +- +-int output_val1; +-int output_ctx1; +-int output_weak1; +- +-/* same "subprog" name in all files, but it's ok because they all are static */ +-static __noinline int subprog(int x) +-{ +- /* but different formula */ +- return x * 1; +-} +- +-/* Global functions can't be void */ +-int set_output_val1(int x) +-{ +- output_val1 = x + subprog(x); +- return x; +-} +- +-/* This function can't be verified as global, as it assumes raw_tp/sys_enter +- * context and accesses syscall id (second argument). So we mark it as +- * __hidden, so that libbpf will mark it as static in the final object file, +- * right before verifying it in the kernel. +- * +- * But we don't mark it as __hidden here, rather at extern site. __hidden is +- * "contaminating" visibility, so it will get propagated from either extern or +- * actual definition (including from the losing __weak definition). +- */ +-void set_output_ctx1(__u64 *ctx) +-{ +- output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */ +-} +- +-/* this weak instance should win because it's the first one */ +-__weak int set_output_weak(int x) +-{ +- output_weak1 = x; +- return x; +-} +- +-extern int set_output_val2(int x); +- +-/* here we'll force set_output_ctx2() to be __hidden in the final obj file */ +-__hidden extern void set_output_ctx2(__u64 *ctx); +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler1, struct pt_regs *regs, long id) +-{ +- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id) +- return 0; +- +- set_output_val2(1000); +- set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */ +- +- /* keep input value the same across both files to avoid dependency on +- * handler call order; differentiate by output_weak1 vs output_weak2. +- */ +- set_output_weak(42); +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c +deleted file mode 100644 +index 575e958e60b7..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c ++++ /dev/null +@@ -1,73 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-/* weak and shared between both files */ +-const volatile int my_tid __weak; +-long syscall_id __weak; +- +-int output_val2; +-int output_ctx2; +-int output_weak2; /* should stay zero */ +- +-/* same "subprog" name in all files, but it's ok because they all are static */ +-static __noinline int subprog(int x) +-{ +- /* but different formula */ +- return x * 2; +-} +- +-/* Global functions can't be void */ +-int set_output_val2(int x) +-{ +- output_val2 = 2 * x + 2 * subprog(x); +- return 2 * x; +-} +- +-/* This function can't be verified as global, as it assumes raw_tp/sys_enter +- * context and accesses syscall id (second argument). So we mark it as +- * __hidden, so that libbpf will mark it as static in the final object file, +- * right before verifying it in the kernel. +- * +- * But we don't mark it as __hidden here, rather at extern site. __hidden is +- * "contaminating" visibility, so it will get propagated from either extern or +- * actual definition (including from the losing __weak definition). +- */ +-void set_output_ctx2(__u64 *ctx) +-{ +- output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */ +-} +- +-/* this weak instance should lose, because it will be processed second */ +-__weak int set_output_weak(int x) +-{ +- output_weak2 = x; +- return 2 * x; +-} +- +-extern int set_output_val1(int x); +- +-/* here we'll force set_output_ctx1() to be __hidden in the final obj file */ +-__hidden extern void set_output_ctx1(__u64 *ctx); +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler2, struct pt_regs *regs, long id) +-{ +- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id) +- return 0; +- +- set_output_val1(2000); +- set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */ +- +- /* keep input value the same across both files to avoid dependency on +- * handler call order; differentiate by output_weak1 vs output_weak2. +- */ +- set_output_weak(42); +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_maps1.c b/tools/testing/selftests/bpf/progs/linked_maps1.c +deleted file mode 100644 +index 52291515cc72..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_maps1.c ++++ /dev/null +@@ -1,82 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-struct my_key { long x; }; +-struct my_value { long x; }; +- +-struct { +- __uint(type, BPF_MAP_TYPE_HASH); +- __type(key, struct my_key); +- __type(value, struct my_value); +- __uint(max_entries, 16); +-} map1 SEC(".maps"); +- +- /* Matches map2 definition in linked_maps2.c. Order of the attributes doesn't +- * matter. +- */ +-typedef struct { +- __uint(max_entries, 8); +- __type(key, int); +- __type(value, int); +- __uint(type, BPF_MAP_TYPE_ARRAY); +-} map2_t; +- +-extern map2_t map2 SEC(".maps"); +- +-/* This should be the winning map definition, but we have no way of verifying, +- * so we just make sure that it links and works without errors +- */ +-struct { +- __uint(type, BPF_MAP_TYPE_ARRAY); +- __type(key, int); +- __type(value, int); +- __uint(max_entries, 16); +-} map_weak __weak SEC(".maps"); +- +-int output_first1; +-int output_second1; +-int output_weak1; +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler_enter1) +-{ +- /* update values with key = 1 */ +- int key = 1, val = 1; +- struct my_key key_struct = { .x = 1 }; +- struct my_value val_struct = { .x = 1000 }; +- +- bpf_map_update_elem(&map1, &key_struct, &val_struct, 0); +- bpf_map_update_elem(&map2, &key, &val, 0); +- bpf_map_update_elem(&map_weak, &key, &val, 0); +- +- return 0; +-} +- +-SEC("raw_tp/sys_exit") +-int BPF_PROG(handler_exit1) +-{ +- /* lookup values with key = 2, set in another file */ +- int key = 2, *val; +- struct my_key key_struct = { .x = 2 }; +- struct my_value *value_struct; +- +- value_struct = bpf_map_lookup_elem(&map1, &key_struct); +- if (value_struct) +- output_first1 = value_struct->x; +- +- val = bpf_map_lookup_elem(&map2, &key); +- if (val) +- output_second1 = *val; +- +- val = bpf_map_lookup_elem(&map_weak, &key); +- if (val) +- output_weak1 = *val; +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c +deleted file mode 100644 +index 0693687474ed..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_maps2.c ++++ /dev/null +@@ -1,76 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-/* modifiers and typedefs are ignored when comparing key/value types */ +-typedef struct my_key { long x; } key_type; +-typedef struct my_value { long x; } value_type; +- +-extern struct { +- __uint(max_entries, 16); +- __type(key, key_type); +- __type(value, value_type); +- __uint(type, BPF_MAP_TYPE_HASH); +-} map1 SEC(".maps"); +- +-struct { +- __uint(type, BPF_MAP_TYPE_ARRAY); +- __type(key, int); +- __type(value, int); +- __uint(max_entries, 8); +-} map2 SEC(".maps"); +- +-/* this definition will lose, but it has to exactly match the winner */ +-struct { +- __uint(type, BPF_MAP_TYPE_ARRAY); +- __type(key, int); +- __type(value, int); +- __uint(max_entries, 16); +-} map_weak __weak SEC(".maps"); +- +-int output_first2; +-int output_second2; +-int output_weak2; +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler_enter2) +-{ +- /* update values with key = 2 */ +- int key = 2, val = 2; +- key_type key_struct = { .x = 2 }; +- value_type val_struct = { .x = 2000 }; +- +- bpf_map_update_elem(&map1, &key_struct, &val_struct, 0); +- bpf_map_update_elem(&map2, &key, &val, 0); +- bpf_map_update_elem(&map_weak, &key, &val, 0); +- +- return 0; +-} +- +-SEC("raw_tp/sys_exit") +-int BPF_PROG(handler_exit2) +-{ +- /* lookup values with key = 1, set in another file */ +- int key = 1, *val; +- key_type key_struct = { .x = 1 }; +- value_type *value_struct; +- +- value_struct = bpf_map_lookup_elem(&map1, &key_struct); +- if (value_struct) +- output_first2 = value_struct->x; +- +- val = bpf_map_lookup_elem(&map2, &key); +- if (val) +- output_second2 = *val; +- +- val = bpf_map_lookup_elem(&map_weak, &key); +- if (val) +- output_weak2 = *val; +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c +deleted file mode 100644 +index ef9e9d0bb0ca..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_vars1.c ++++ /dev/null +@@ -1,54 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-extern int LINUX_KERNEL_VERSION __kconfig; +-/* this weak extern will be strict due to the other file's strong extern */ +-extern bool CONFIG_BPF_SYSCALL __kconfig __weak; +-extern const void bpf_link_fops __ksym __weak; +- +-int input_bss1; +-int input_data1 = 1; +-const volatile int input_rodata1 = 11; +- +-int input_bss_weak __weak; +-/* these two definitions should win */ +-int input_data_weak __weak = 10; +-const volatile int input_rodata_weak __weak = 100; +- +-extern int input_bss2; +-extern int input_data2; +-extern const int input_rodata2; +- +-int output_bss1; +-int output_data1; +-int output_rodata1; +- +-long output_sink1; +- +-static __noinline int get_bss_res(void) +-{ +- /* just make sure all the relocations work against .text as well */ +- return input_bss1 + input_bss2 + input_bss_weak; +-} +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler1) +-{ +- output_bss1 = get_bss_res(); +- output_data1 = input_data1 + input_data2 + input_data_weak; +- output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak; +- +- /* make sure we actually use above special externs, otherwise compiler +- * will optimize them out +- */ +- output_sink1 = LINUX_KERNEL_VERSION +- + CONFIG_BPF_SYSCALL +- + (long)&bpf_link_fops; +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c +deleted file mode 100644 +index e4f5bd388a3c..000000000000 +--- a/tools/testing/selftests/bpf/progs/linked_vars2.c ++++ /dev/null +@@ -1,55 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include "vmlinux.h" +-#include <bpf/bpf_helpers.h> +-#include <bpf/bpf_tracing.h> +- +-extern int LINUX_KERNEL_VERSION __kconfig; +-/* when an extern is defined as both strong and weak, resulting symbol will be strong */ +-extern bool CONFIG_BPF_SYSCALL __kconfig; +-extern const void __start_BTF __ksym; +- +-int input_bss2; +-int input_data2 = 2; +-const volatile int input_rodata2 = 22; +- +-int input_bss_weak __weak; +-/* these two weak variables should lose */ +-int input_data_weak __weak = 20; +-const volatile int input_rodata_weak __weak = 200; +- +-extern int input_bss1; +-extern int input_data1; +-extern const int input_rodata1; +- +-int output_bss2; +-int output_data2; +-int output_rodata2; +- +-int output_sink2; +- +-static __noinline int get_data_res(void) +-{ +- /* just make sure all the relocations work against .text as well */ +- return input_data1 + input_data2 + input_data_weak; +-} +- +-SEC("raw_tp/sys_enter") +-int BPF_PROG(handler2) +-{ +- output_bss2 = input_bss1 + input_bss2 + input_bss_weak; +- output_data2 = get_data_res(); +- output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak; +- +- /* make sure we actually use above special externs, otherwise compiler +- * will optimize them out +- */ +- output_sink2 = LINUX_KERNEL_VERSION +- + CONFIG_BPF_SYSCALL +- + (long)&__start_BTF; +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/bpf/progs/test_static_linked1.c b/tools/testing/selftests/bpf/progs/test_static_linked1.c +deleted file mode 100644 +index ea1a6c4c7172..000000000000 +--- a/tools/testing/selftests/bpf/progs/test_static_linked1.c ++++ /dev/null +@@ -1,30 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include <linux/bpf.h> +-#include <bpf/bpf_helpers.h> +- +-/* 8-byte aligned .bss */ +-static volatile long static_var1; +-static volatile int static_var11; +-int var1 = 0; +-/* 4-byte aligned .rodata */ +-const volatile int rovar1; +- +-/* same "subprog" name in both files */ +-static __noinline int subprog(int x) +-{ +- /* but different formula */ +- return x * 2; +-} +- +-SEC("raw_tp/sys_enter") +-int handler1(const void *ctx) +-{ +- var1 = subprog(rovar1) + static_var1 + static_var11; +- +- return 0; +-} +- +-char LICENSE[] SEC("license") = "GPL"; +-int VERSION SEC("version") = 1; +diff --git a/tools/testing/selftests/bpf/progs/test_static_linked2.c b/tools/testing/selftests/bpf/progs/test_static_linked2.c +deleted file mode 100644 +index 54d8d1ab577c..000000000000 +--- a/tools/testing/selftests/bpf/progs/test_static_linked2.c ++++ /dev/null +@@ -1,31 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 +-/* Copyright (c) 2021 Facebook */ +- +-#include <linux/bpf.h> +-#include <bpf/bpf_helpers.h> +- +-/* 4-byte aligned .bss */ +-static volatile int static_var2; +-static volatile int static_var22; +-int var2 = 0; +-/* 8-byte aligned .rodata */ +-const volatile long rovar2; +- +-/* same "subprog" name in both files */ +-static __noinline int subprog(int x) +-{ +- /* but different formula */ +- return x * 3; +-} +- +-SEC("raw_tp/sys_enter") +-int handler2(const void *ctx) +-{ +- var2 = subprog(rovar2) + static_var2 + static_var22; +- +- return 0; +-} +- +-/* different name and/or type of the variable doesn't matter */ +-char _license[] SEC("license") = "GPL"; +-int _version SEC("version") = 1; |