summaryrefslogtreecommitdiffstats
path: root/patch-5.15-redhat.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patch-5.15-redhat.patch')
-rw-r--r--patch-5.15-redhat.patch1462
1 files changed, 1444 insertions, 18 deletions
diff --git a/patch-5.15-redhat.patch b/patch-5.15-redhat.patch
index 49e6f2ba9..63f964df0 100644
--- a/patch-5.15-redhat.patch
+++ b/patch-5.15-redhat.patch
@@ -1,6 +1,6 @@
Documentation/admin-guide/kernel-parameters.txt | 9 +
Kconfig | 2 +
- Kconfig.redhat | 17 ++
+ Kconfig.redhat | 17 +
Makefile | 12 +-
arch/arm/Kconfig | 4 +-
arch/arm64/Kconfig | 3 +-
@@ -10,22 +10,22 @@
arch/s390/kernel/ipl.c | 5 +
arch/s390/kernel/setup.c | 4 +
arch/x86/kernel/cpu/common.c | 1 +
- arch/x86/kernel/setup.c | 69 ++++-
- crypto/rng.c | 73 ++++-
+ arch/x86/kernel/setup.c | 69 ++-
+ crypto/rng.c | 73 ++-
drivers/acpi/apei/hest.c | 8 +
drivers/acpi/irq.c | 17 +-
drivers/acpi/scan.c | 9 +
- drivers/ata/libahci.c | 18 ++
- drivers/char/ipmi/ipmi_dmi.c | 15 ++
+ drivers/ata/libahci.c | 18 +
+ drivers/char/ipmi/ipmi_dmi.c | 15 +
drivers/char/ipmi/ipmi_msghandler.c | 16 +-
- drivers/char/random.c | 115 ++++++++
+ drivers/char/random.c | 115 +++++
drivers/firmware/efi/Makefile | 1 +
- drivers/firmware/efi/efi.c | 124 ++++++---
- drivers/firmware/efi/secureboot.c | 38 +++
- drivers/hid/hid-rmi.c | 64 -----
- drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 ++
- drivers/input/rmi4/rmi_driver.c | 124 +++++----
- drivers/iommu/iommu.c | 22 ++
+ drivers/firmware/efi/efi.c | 124 +++--
+ drivers/firmware/efi/secureboot.c | 38 ++
+ drivers/hid/hid-rmi.c | 64 ---
+ drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 +
+ drivers/input/rmi4/rmi_driver.c | 124 +++--
+ drivers/iommu/iommu.c | 22 +
drivers/message/fusion/mptsas.c | 10 +
drivers/message/fusion/mptspi.c | 11 +
drivers/net/team/team.c | 2 +
@@ -34,7 +34,7 @@
drivers/nvme/host/multipath.c | 19 +-
drivers/nvme/host/nvme.h | 4 +
drivers/pci/pci-driver.c | 29 ++
- drivers/pci/quirks.c | 24 ++
+ drivers/pci/quirks.c | 24 +
drivers/scsi/aacraid/linit.c | 2 +
drivers/scsi/be2iscsi/be_main.c | 2 +
drivers/scsi/hpsa.c | 4 +
@@ -47,23 +47,23 @@
fs/ext4/super.c | 5 +
fs/xfs/xfs_super.c | 5 +
include/linux/efi.h | 22 +-
- include/linux/kernel.h | 17 ++
+ include/linux/kernel.h | 17 +
include/linux/lsm_hook_defs.h | 2 +
include/linux/lsm_hooks.h | 6 +
include/linux/module.h | 1 +
include/linux/panic.h | 19 +-
include/linux/pci.h | 4 +
include/linux/random.h | 7 +
- include/linux/rh_kabi.h | 297 +++++++++++++++++++++
+ include/linux/rh_kabi.h | 297 +++++++++++
include/linux/rmi.h | 1 +
include/linux/security.h | 5 +
init/Kconfig | 2 +-
kernel/Makefile | 1 +
- kernel/bpf/syscall.c | 18 ++
+ kernel/bpf/syscall.c | 18 +
kernel/module.c | 2 +
kernel/module_signing.c | 9 +-
kernel/panic.c | 14 +
- kernel/rh_taint.c | 109 ++++++++
+ kernel/rh_taint.c | 109 +++++
kernel/sysctl.c | 5 +
mm/cma.c | 10 +
scripts/mod/modpost.c | 8 +
@@ -72,7 +72,20 @@
security/lockdown/Kconfig | 13 +
security/lockdown/lockdown.c | 1 +
security/security.c | 6 +
- 74 files changed, 1377 insertions(+), 193 deletions(-)
+ tools/testing/selftests/bpf/Makefile | 1 -
+ .../selftests/bpf/prog_tests/linked_funcs.c | 42 --
+ .../testing/selftests/bpf/prog_tests/linked_maps.c | 30 --
+ .../testing/selftests/bpf/prog_tests/linked_vars.c | 43 --
+ tools/testing/selftests/bpf/progs/bpf_cubic.c | 545 ---------------------
+ tools/testing/selftests/bpf/progs/bpf_dctcp.c | 249 ----------
+ .../testing/selftests/bpf/progs/kfunc_call_test.c | 47 --
+ .../selftests/bpf/progs/kfunc_call_test_subprog.c | 42 --
+ tools/testing/selftests/bpf/progs/linked_funcs1.c | 73 ---
+ tools/testing/selftests/bpf/progs/linked_funcs2.c | 73 ---
+ tools/testing/selftests/bpf/progs/linked_maps2.c | 76 ---
+ tools/testing/selftests/bpf/progs/linked_vars1.c | 54 --
+ tools/testing/selftests/bpf/progs/linked_vars2.c | 55 ---
+ 87 files changed, 1377 insertions(+), 1523 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 91ba391f9b32..88296cfdc5e5 100644
@@ -3054,3 +3067,1416 @@ index 9ffa9e9c5c55..36484cc9842d 100644
#ifdef CONFIG_PERF_EVENTS
int security_perf_event_open(struct perf_event_attr *attr, int type)
{
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 799b88152e9e..73c687d900c3 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -426,7 +426,6 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
+ $(TRUNNER_BPF_OBJS) \
+ $(TRUNNER_BPF_SKELS) \
+ $(TRUNNER_BPF_LSKELS) \
+- $(TRUNNER_BPF_SKELS_LINKED) \
+ $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
+ $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+deleted file mode 100644
+index e9916f2817ec..000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
++++ /dev/null
+@@ -1,42 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include <test_progs.h>
+-#include <sys/syscall.h>
+-#include "linked_funcs.skel.h"
+-
+-void test_linked_funcs(void)
+-{
+- int err;
+- struct linked_funcs *skel;
+-
+- skel = linked_funcs__open();
+- if (!ASSERT_OK_PTR(skel, "skel_open"))
+- return;
+-
+- skel->rodata->my_tid = syscall(SYS_gettid);
+- skel->bss->syscall_id = SYS_getpgid;
+-
+- err = linked_funcs__load(skel);
+- if (!ASSERT_OK(err, "skel_load"))
+- goto cleanup;
+-
+- err = linked_funcs__attach(skel);
+- if (!ASSERT_OK(err, "skel_attach"))
+- goto cleanup;
+-
+- /* trigger */
+- syscall(SYS_getpgid);
+-
+- ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1");
+- ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1");
+- ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1");
+-
+- ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2");
+- ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2");
+- /* output_weak2 should never be updated */
+- ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2");
+-
+-cleanup:
+- linked_funcs__destroy(skel);
+-}
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c
+deleted file mode 100644
+index 85dcaaaf2775..000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/linked_maps.c
++++ /dev/null
+@@ -1,30 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include <test_progs.h>
+-#include <sys/syscall.h>
+-#include "linked_maps.skel.h"
+-
+-void test_linked_maps(void)
+-{
+- int err;
+- struct linked_maps *skel;
+-
+- skel = linked_maps__open_and_load();
+- if (!ASSERT_OK_PTR(skel, "skel_open"))
+- return;
+-
+- err = linked_maps__attach(skel);
+- if (!ASSERT_OK(err, "skel_attach"))
+- goto cleanup;
+-
+- /* trigger */
+- syscall(SYS_getpgid);
+-
+- ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1");
+- ASSERT_EQ(skel->bss->output_second1, 2, "output_second1");
+- ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1");
+-
+-cleanup:
+- linked_maps__destroy(skel);
+-}
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c
+deleted file mode 100644
+index 267166abe4c1..000000000000
+--- a/tools/testing/selftests/bpf/prog_tests/linked_vars.c
++++ /dev/null
+@@ -1,43 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include <test_progs.h>
+-#include <sys/syscall.h>
+-#include "linked_vars.skel.h"
+-
+-void test_linked_vars(void)
+-{
+- int err;
+- struct linked_vars *skel;
+-
+- skel = linked_vars__open();
+- if (!ASSERT_OK_PTR(skel, "skel_open"))
+- return;
+-
+- skel->bss->input_bss1 = 1000;
+- skel->bss->input_bss2 = 2000;
+- skel->bss->input_bss_weak = 3000;
+-
+- err = linked_vars__load(skel);
+- if (!ASSERT_OK(err, "skel_load"))
+- goto cleanup;
+-
+- err = linked_vars__attach(skel);
+- if (!ASSERT_OK(err, "skel_attach"))
+- goto cleanup;
+-
+- /* trigger */
+- syscall(SYS_getpgid);
+-
+- ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1");
+- ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2");
+- /* 10 comes from "winner" input_data_weak in first obj file */
+- ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1");
+- ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2");
+- /* 100 comes from "winner" input_rodata_weak in first obj file */
+- ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1");
+- ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2");
+-
+-cleanup:
+- linked_vars__destroy(skel);
+-}
+diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
+deleted file mode 100644
+index f62df4d023f9..000000000000
+--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
++++ /dev/null
+@@ -1,545 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-
+-/* WARNING: This implemenation is not necessarily the same
+- * as the tcp_cubic.c. The purpose is mainly for testing
+- * the kernel BPF logic.
+- *
+- * Highlights:
+- * 1. CONFIG_HZ .kconfig map is used.
+- * 2. In bictcp_update(), calculation is changed to use usec
+- * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
+- * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
+- * 3. In bitctcp_update() [under tcp_friendliness], the original
+- * "while (ca->ack_cnt > delta)" loop is changed to the equivalent
+- * "ca->ack_cnt / delta" operation.
+- */
+-
+-#include <linux/bpf.h>
+-#include <linux/stddef.h>
+-#include <linux/tcp.h>
+-#include "bpf_tcp_helpers.h"
+-
+-char _license[] SEC("license") = "GPL";
+-
+-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+-
+-#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
+- * max_cwnd = snd_cwnd * beta
+- */
+-#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
+-
+-/* Two methods of hybrid slow start */
+-#define HYSTART_ACK_TRAIN 0x1
+-#define HYSTART_DELAY 0x2
+-
+-/* Number of delay samples for detecting the increase of delay */
+-#define HYSTART_MIN_SAMPLES 8
+-#define HYSTART_DELAY_MIN (4000U) /* 4ms */
+-#define HYSTART_DELAY_MAX (16000U) /* 16 ms */
+-#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
+-
+-static int fast_convergence = 1;
+-static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
+-static int initial_ssthresh;
+-static const int bic_scale = 41;
+-static int tcp_friendliness = 1;
+-
+-static int hystart = 1;
+-static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
+-static int hystart_low_window = 16;
+-static int hystart_ack_delta_us = 2000;
+-
+-static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
+-static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
+- / (BICTCP_BETA_SCALE - beta);
+-/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
+- * so K = cubic_root( (wmax-cwnd)*rtt/c )
+- * the unit of K is bictcp_HZ=2^10, not HZ
+- *
+- * c = bic_scale >> 10
+- * rtt = 100ms
+- *
+- * the following code has been designed and tested for
+- * cwnd < 1 million packets
+- * RTT < 100 seconds
+- * HZ < 1,000,00 (corresponding to 10 nano-second)
+- */
+-
+-/* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
+-static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
+- / (bic_scale * 10);
+-
+-/* BIC TCP Parameters */
+-struct bictcp {
+- __u32 cnt; /* increase cwnd by 1 after ACKs */
+- __u32 last_max_cwnd; /* last maximum snd_cwnd */
+- __u32 last_cwnd; /* the last snd_cwnd */
+- __u32 last_time; /* time when updated last_cwnd */
+- __u32 bic_origin_point;/* origin point of bic function */
+- __u32 bic_K; /* time to origin point
+- from the beginning of the current epoch */
+- __u32 delay_min; /* min delay (usec) */
+- __u32 epoch_start; /* beginning of an epoch */
+- __u32 ack_cnt; /* number of acks */
+- __u32 tcp_cwnd; /* estimated tcp cwnd */
+- __u16 unused;
+- __u8 sample_cnt; /* number of samples to decide curr_rtt */
+- __u8 found; /* the exit point is found? */
+- __u32 round_start; /* beginning of each round */
+- __u32 end_seq; /* end_seq of the round */
+- __u32 last_ack; /* last time when the ACK spacing is close */
+- __u32 curr_rtt; /* the minimum rtt of current round */
+-};
+-
+-static inline void bictcp_reset(struct bictcp *ca)
+-{
+- ca->cnt = 0;
+- ca->last_max_cwnd = 0;
+- ca->last_cwnd = 0;
+- ca->last_time = 0;
+- ca->bic_origin_point = 0;
+- ca->bic_K = 0;
+- ca->delay_min = 0;
+- ca->epoch_start = 0;
+- ca->ack_cnt = 0;
+- ca->tcp_cwnd = 0;
+- ca->found = 0;
+-}
+-
+-extern unsigned long CONFIG_HZ __kconfig;
+-#define HZ CONFIG_HZ
+-#define USEC_PER_MSEC 1000UL
+-#define USEC_PER_SEC 1000000UL
+-#define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
+-
+-static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
+-{
+- return dividend / divisor;
+-}
+-
+-#define div64_ul div64_u64
+-
+-#define BITS_PER_U64 (sizeof(__u64) * 8)
+-static __always_inline int fls64(__u64 x)
+-{
+- int num = BITS_PER_U64 - 1;
+-
+- if (x == 0)
+- return 0;
+-
+- if (!(x & (~0ull << (BITS_PER_U64-32)))) {
+- num -= 32;
+- x <<= 32;
+- }
+- if (!(x & (~0ull << (BITS_PER_U64-16)))) {
+- num -= 16;
+- x <<= 16;
+- }
+- if (!(x & (~0ull << (BITS_PER_U64-8)))) {
+- num -= 8;
+- x <<= 8;
+- }
+- if (!(x & (~0ull << (BITS_PER_U64-4)))) {
+- num -= 4;
+- x <<= 4;
+- }
+- if (!(x & (~0ull << (BITS_PER_U64-2)))) {
+- num -= 2;
+- x <<= 2;
+- }
+- if (!(x & (~0ull << (BITS_PER_U64-1))))
+- num -= 1;
+-
+- return num + 1;
+-}
+-
+-static __always_inline __u32 bictcp_clock_us(const struct sock *sk)
+-{
+- return tcp_sk(sk)->tcp_mstamp;
+-}
+-
+-static __always_inline void bictcp_hystart_reset(struct sock *sk)
+-{
+- struct tcp_sock *tp = tcp_sk(sk);
+- struct bictcp *ca = inet_csk_ca(sk);
+-
+- ca->round_start = ca->last_ack = bictcp_clock_us(sk);
+- ca->end_seq = tp->snd_nxt;
+- ca->curr_rtt = ~0U;
+- ca->sample_cnt = 0;
+-}
+-
+-/* "struct_ops/" prefix is not a requirement
+- * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
+- * as long as it is used in one of the func ptr
+- * under SEC(".struct_ops").
+- */
+-SEC("struct_ops/bpf_cubic_init")
+-void BPF_PROG(bpf_cubic_init, struct sock *sk)
+-{
+- struct bictcp *ca = inet_csk_ca(sk);
+-
+- bictcp_reset(ca);
+-
+- if (hystart)
+- bictcp_hystart_reset(sk);
+-
+- if (!hystart && initial_ssthresh)
+- tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
+-}
+-
+-/* No prefix in SEC will also work.
+- * The remaining tcp-cubic functions have an easier way.
+- */
+-SEC("no-sec-prefix-bictcp_cwnd_event")
+-void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
+-{
+- if (event == CA_EVENT_TX_START) {
+- struct bictcp *ca = inet_csk_ca(sk);
+- __u32 now = tcp_jiffies32;
+- __s32 delta;
+-
+- delta = now - tcp_sk(sk)->lsndtime;
+-
+- /* We were application limited (idle) for a while.
+- * Shift epoch_start to keep cwnd growth to cubic curve.
+- */
+- if (ca->epoch_start && delta > 0) {
+- ca->epoch_start += delta;
+- if (after(ca->epoch_start, now))
+- ca->epoch_start = now;
+- }
+- return;
+- }
+-}
+-
+-/*
+- * cbrt(x) MSB values for x MSB values in [0..63].
+- * Precomputed then refined by hand - Willy Tarreau
+- *
+- * For x in [0..63],
+- * v = cbrt(x << 18) - 1
+- * cbrt(x) = (v[x] + 10) >> 6
+- */
+-static const __u8 v[] = {
+- /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
+- /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
+- /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
+- /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
+- /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
+- /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
+- /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
+- /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
+-};
+-
+-/* calculate the cubic root of x using a table lookup followed by one
+- * Newton-Raphson iteration.
+- * Avg err ~= 0.195%
+- */
+-static __always_inline __u32 cubic_root(__u64 a)
+-{
+- __u32 x, b, shift;
+-
+- if (a < 64) {
+- /* a in [0..63] */
+- return ((__u32)v[(__u32)a] + 35) >> 6;
+- }
+-
+- b = fls64(a);
+- b = ((b * 84) >> 8) - 1;
+- shift = (a >> (b * 3));
+-
+- /* it is needed for verifier's bound check on v */
+- if (shift >= 64)
+- return 0;
+-
+- x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
+-
+- /*
+- * Newton-Raphson iteration
+- * 2
+- * x = ( 2 * x + a / x ) / 3
+- * k+1 k k
+- */
+- x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
+- x = ((x * 341) >> 10);
+- return x;
+-}
+-
+-/*
+- * Compute congestion window to use.
+- */
+-static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
+- __u32 acked)
+-{
+- __u32 delta, bic_target, max_cnt;
+- __u64 offs, t;
+-
+- ca->ack_cnt += acked; /* count the number of ACKed packets */
+-
+- if (ca->last_cwnd == cwnd &&
+- (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
+- return;
+-
+- /* The CUBIC function can update ca->cnt at most once per jiffy.
+- * On all cwnd reduction events, ca->epoch_start is set to 0,
+- * which will force a recalculation of ca->cnt.
+- */
+- if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
+- goto tcp_friendliness;
+-
+- ca->last_cwnd = cwnd;
+- ca->last_time = tcp_jiffies32;
+-
+- if (ca->epoch_start == 0) {
+- ca->epoch_start = tcp_jiffies32; /* record beginning */
+- ca->ack_cnt = acked; /* start counting */
+- ca->tcp_cwnd = cwnd; /* syn with cubic */
+-
+- if (ca->last_max_cwnd <= cwnd) {
+- ca->bic_K = 0;
+- ca->bic_origin_point = cwnd;
+- } else {
+- /* Compute new K based on
+- * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
+- */
+- ca->bic_K = cubic_root(cube_factor
+- * (ca->last_max_cwnd - cwnd));
+- ca->bic_origin_point = ca->last_max_cwnd;
+- }
+- }
+-
+- /* cubic function - calc*/
+- /* calculate c * time^3 / rtt,
+- * while considering overflow in calculation of time^3
+- * (so time^3 is done by using 64 bit)
+- * and without the support of division of 64bit numbers
+- * (so all divisions are done by using 32 bit)
+- * also NOTE the unit of those veriables
+- * time = (t - K) / 2^bictcp_HZ
+- * c = bic_scale >> 10
+- * rtt = (srtt >> 3) / HZ
+- * !!! The following code does not have overflow problems,
+- * if the cwnd < 1 million packets !!!
+- */
+-
+- t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
+- t += ca->delay_min;
+- /* change the unit from usec to bictcp_HZ */
+- t <<= BICTCP_HZ;
+- t /= USEC_PER_SEC;
+-
+- if (t < ca->bic_K) /* t - K */
+- offs = ca->bic_K - t;
+- else
+- offs = t - ca->bic_K;
+-
+- /* c/rtt * (t-K)^3 */
+- delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
+- if (t < ca->bic_K) /* below origin*/
+- bic_target = ca->bic_origin_point - delta;
+- else /* above origin*/
+- bic_target = ca->bic_origin_point + delta;
+-
+- /* cubic function - calc bictcp_cnt*/
+- if (bic_target > cwnd) {
+- ca->cnt = cwnd / (bic_target - cwnd);
+- } else {
+- ca->cnt = 100 * cwnd; /* very small increment*/
+- }
+-
+- /*
+- * The initial growth of cubic function may be too conservative
+- * when the available bandwidth is still unknown.
+- */
+- if (ca->last_max_cwnd == 0 && ca->cnt > 20)
+- ca->cnt = 20; /* increase cwnd 5% per RTT */
+-
+-tcp_friendliness:
+- /* TCP Friendly */
+- if (tcp_friendliness) {
+- __u32 scale = beta_scale;
+- __u32 n;
+-
+- /* update tcp cwnd */
+- delta = (cwnd * scale) >> 3;
+- if (ca->ack_cnt > delta && delta) {
+- n = ca->ack_cnt / delta;
+- ca->ack_cnt -= n * delta;
+- ca->tcp_cwnd += n;
+- }
+-
+- if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
+- delta = ca->tcp_cwnd - cwnd;
+- max_cnt = cwnd / delta;
+- if (ca->cnt > max_cnt)
+- ca->cnt = max_cnt;
+- }
+- }
+-
+- /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
+- * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
+- */
+- ca->cnt = max(ca->cnt, 2U);
+-}
+-
+-/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */
+-void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+-{
+- struct tcp_sock *tp = tcp_sk(sk);
+- struct bictcp *ca = inet_csk_ca(sk);
+-
+- if (!tcp_is_cwnd_limited(sk))
+- return;
+-
+- if (tcp_in_slow_start(tp)) {
+- if (hystart && after(ack, ca->end_seq))
+- bictcp_hystart_reset(sk);
+- acked = tcp_slow_start(tp, acked);
+- if (!acked)
+- return;
+- }
+- bictcp_update(ca, tp->snd_cwnd, acked);
+- tcp_cong_avoid_ai(tp, ca->cnt, acked);
+-}
+-
+-__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
+-{
+- const struct tcp_sock *tp = tcp_sk(sk);
+- struct bictcp *ca = inet_csk_ca(sk);
+-
+- ca->epoch_start = 0; /* end of epoch */
+-
+- /* Wmax and fast convergence */
+- if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
+- ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
+- / (2 * BICTCP_BETA_SCALE);
+- else
+- ca->last_max_cwnd = tp->snd_cwnd;
+-
+- return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
+-}
+-
+-void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
+-{
+- if (new_state == TCP_CA_Loss) {
+- bictcp_reset(inet_csk_ca(sk));
+- bictcp_hystart_reset(sk);
+- }
+-}
+-
+-#define GSO_MAX_SIZE 65536
+-
+-/* Account for TSO/GRO delays.
+- * Otherwise short RTT flows could get too small ssthresh, since during
+- * slow start we begin with small TSO packets and ca->delay_min would
+- * not account for long aggregation delay when TSO packets get bigger.
+- * Ideally even with a very small RTT we would like to have at least one
+- * TSO packet being sent and received by GRO, and another one in qdisc layer.
+- * We apply another 100% factor because @rate is doubled at this point.
+- * We cap the cushion to 1ms.
+- */
+-static __always_inline __u32 hystart_ack_delay(struct sock *sk)
+-{
+- unsigned long rate;
+-
+- rate = sk->sk_pacing_rate;
+- if (!rate)
+- return 0;
+- return min((__u64)USEC_PER_MSEC,
+- div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
+-}
+-
+-static __always_inline void hystart_update(struct sock *sk, __u32 delay)
+-{
+- struct tcp_sock *tp = tcp_sk(sk);
+- struct bictcp *ca = inet_csk_ca(sk);
+- __u32 threshold;
+-
+- if (hystart_detect & HYSTART_ACK_TRAIN) {
+- __u32 now = bictcp_clock_us(sk);
+-
+- /* first detection parameter - ack-train detection */
+- if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
+- ca->last_ack = now;
+-
+- threshold = ca->delay_min + hystart_ack_delay(sk);
+-
+- /* Hystart ack train triggers if we get ack past
+- * ca->delay_min/2.
+- * Pacing might have delayed packets up to RTT/2
+- * during slow start.
+- */
+- if (sk->sk_pacing_status == SK_PACING_NONE)
+- threshold >>= 1;
+-
+- if ((__s32)(now - ca->round_start) > threshold) {
+- ca->found = 1;
+- tp->snd_ssthresh = tp->snd_cwnd;
+- }
+- }
+- }
+-
+- if (hystart_detect & HYSTART_DELAY) {
+- /* obtain the minimum delay of more than sampling packets */
+- if (ca->curr_rtt > delay)
+- ca->curr_rtt = delay;
+- if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
+- ca->sample_cnt++;
+- } else {
+- if (ca->curr_rtt > ca->delay_min +
+- HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
+- ca->found = 1;
+- tp->snd_ssthresh = tp->snd_cwnd;
+- }
+- }
+- }
+-}
+-
+-void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
+- const struct ack_sample *sample)
+-{
+- const struct tcp_sock *tp = tcp_sk(sk);
+- struct bictcp *ca = inet_csk_ca(sk);
+- __u32 delay;
+-
+- /* Some calls are for duplicates without timetamps */
+- if (sample->rtt_us < 0)
+- return;
+-
+- /* Discard delay samples right after fast recovery */
+- if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
+- return;
+-
+- delay = sample->rtt_us;
+- if (delay == 0)
+- delay = 1;
+-
+- /* first time call or link delay decreases */
+- if (ca->delay_min == 0 || ca->delay_min > delay)
+- ca->delay_min = delay;
+-
+- /* hystart triggers when cwnd is larger than some threshold */
+- if (!ca->found && tcp_in_slow_start(tp) && hystart &&
+- tp->snd_cwnd >= hystart_low_window)
+- hystart_update(sk, delay);
+-}
+-
+-extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
+-
+-__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
+-{
+- return tcp_reno_undo_cwnd(sk);
+-}
+-
+-SEC(".struct_ops")
+-struct tcp_congestion_ops cubic = {
+- .init = (void *)bpf_cubic_init,
+- .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
+- .cong_avoid = (void *)bpf_cubic_cong_avoid,
+- .set_state = (void *)bpf_cubic_state,
+- .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
+- .cwnd_event = (void *)bpf_cubic_cwnd_event,
+- .pkts_acked = (void *)bpf_cubic_acked,
+- .name = "bpf_cubic",
+-};
+diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+deleted file mode 100644
+index 9573be6122be..000000000000
+--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
++++ /dev/null
+@@ -1,249 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2019 Facebook */
+-
+-/* WARNING: This implemenation is not necessarily the same
+- * as the tcp_dctcp.c. The purpose is mainly for testing
+- * the kernel BPF logic.
+- */
+-
+-#include <stddef.h>
+-#include <linux/bpf.h>
+-#include <linux/types.h>
+-#include <linux/stddef.h>
+-#include <linux/tcp.h>
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-#include "bpf_tcp_helpers.h"
+-
+-char _license[] SEC("license") = "GPL";
+-
+-volatile const char fallback[TCP_CA_NAME_MAX];
+-const char bpf_dctcp[] = "bpf_dctcp";
+-const char tcp_cdg[] = "cdg";
+-char cc_res[TCP_CA_NAME_MAX];
+-int tcp_cdg_res = 0;
+-int stg_result = 0;
+-
+-struct {
+- __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+- __uint(map_flags, BPF_F_NO_PREALLOC);
+- __type(key, int);
+- __type(value, int);
+-} sk_stg_map SEC(".maps");
+-
+-#define DCTCP_MAX_ALPHA 1024U
+-
+-struct dctcp {
+- __u32 old_delivered;
+- __u32 old_delivered_ce;
+- __u32 prior_rcv_nxt;
+- __u32 dctcp_alpha;
+- __u32 next_seq;
+- __u32 ce_state;
+- __u32 loss_cwnd;
+-};
+-
+-static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
+-static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
+-
+-static __always_inline void dctcp_reset(const struct tcp_sock *tp,
+- struct dctcp *ca)
+-{
+- ca->next_seq = tp->snd_nxt;
+-
+- ca->old_delivered = tp->delivered;
+- ca->old_delivered_ce = tp->delivered_ce;
+-}
+-
+-SEC("struct_ops/dctcp_init")
+-void BPF_PROG(dctcp_init, struct sock *sk)
+-{
+- const struct tcp_sock *tp = tcp_sk(sk);
+- struct dctcp *ca = inet_csk_ca(sk);
+- int *stg;
+-
+- if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
+- /* Switch to fallback */
+- bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+- (void *)fallback, sizeof(fallback));
+- /* Switch back to myself which the bpf trampoline
+- * stopped calling dctcp_init recursively.
+- */
+- bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+- (void *)bpf_dctcp, sizeof(bpf_dctcp));
+- /* Switch back to fallback */
+- bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+- (void *)fallback, sizeof(fallback));
+- /* Expecting -ENOTSUPP for tcp_cdg_res */
+- tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
+- (void *)tcp_cdg, sizeof(tcp_cdg));
+- bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
+- (void *)cc_res, sizeof(cc_res));
+- return;
+- }
+-
+- ca->prior_rcv_nxt = tp->rcv_nxt;
+- ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
+- ca->loss_cwnd = 0;
+- ca->ce_state = 0;
+-
+- stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
+- if (stg) {
+- stg_result = *stg;
+- bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
+- }
+- dctcp_reset(tp, ca);
+-}
+-
+-SEC("struct_ops/dctcp_ssthresh")
+-__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
+-{
+- struct dctcp *ca = inet_csk_ca(sk);
+- struct tcp_sock *tp = tcp_sk(sk);
+-
+- ca->loss_cwnd = tp->snd_cwnd;
+- return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
+-}
+-
+-SEC("struct_ops/dctcp_update_alpha")
+-void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
+-{
+- const struct tcp_sock *tp = tcp_sk(sk);
+- struct dctcp *ca = inet_csk_ca(sk);
+-
+- /* Expired RTT */
+- if (!before(tp->snd_una, ca->next_seq)) {
+- __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
+- __u32 alpha = ca->dctcp_alpha;
+-
+- /* alpha = (1 - g) * alpha + g * F */
+-
+- alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
+- if (delivered_ce) {
+- __u32 delivered = tp->delivered - ca->old_delivered;
+-
+- /* If dctcp_shift_g == 1, a 32bit value would overflow
+- * after 8 M packets.
+- */
+- delivered_ce <<= (10 - dctcp_shift_g);
+- delivered_ce /= max(1U, delivered);
+-
+- alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
+- }
+- ca->dctcp_alpha = alpha;
+- dctcp_reset(tp, ca);
+- }
+-}
+-
+-static __always_inline void dctcp_react_to_loss(struct sock *sk)
+-{
+- struct dctcp *ca = inet_csk_ca(sk);
+- struct tcp_sock *tp = tcp_sk(sk);
+-
+- ca->loss_cwnd = tp->snd_cwnd;
+- tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+-}
+-
+-SEC("struct_ops/dctcp_state")
+-void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
+-{
+- if (new_state == TCP_CA_Recovery &&
+- new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
+- dctcp_react_to_loss(sk);
+- /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+- * one loss-adjustment per RTT.
+- */
+-}
+-
+-static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
+-{
+- struct tcp_sock *tp = tcp_sk(sk);
+-
+- if (ce_state == 1)
+- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+- else
+- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+-}
+-
+-/* Minimal DCTP CE state machine:
+- *
+- * S: 0 <- last pkt was non-CE
+- * 1 <- last pkt was CE
+- */
+-static __always_inline
+-void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+- __u32 *prior_rcv_nxt, __u32 *ce_state)
+-{
+- __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
+-
+- if (*ce_state != new_ce_state) {
+- /* CE state has changed, force an immediate ACK to
+- * reflect the new CE state. If an ACK was delayed,
+- * send that first to reflect the prior CE state.
+- */
+- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
+- dctcp_ece_ack_cwr(sk, *ce_state);
+- bpf_tcp_send_ack(sk, *prior_rcv_nxt);
+- }
+- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+- }
+- *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
+- *ce_state = new_ce_state;
+- dctcp_ece_ack_cwr(sk, new_ce_state);
+-}
+-
+-SEC("struct_ops/dctcp_cwnd_event")
+-void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
+-{
+- struct dctcp *ca = inet_csk_ca(sk);
+-
+- switch (ev) {
+- case CA_EVENT_ECN_IS_CE:
+- case CA_EVENT_ECN_NO_CE:
+- dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
+- break;
+- case CA_EVENT_LOSS:
+- dctcp_react_to_loss(sk);
+- break;
+- default:
+- /* Don't care for the rest. */
+- break;
+- }
+-}
+-
+-SEC("struct_ops/dctcp_cwnd_undo")
+-__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
+-{
+- const struct dctcp *ca = inet_csk_ca(sk);
+-
+- return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+-}
+-
+-extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
+-
+-SEC("struct_ops/dctcp_reno_cong_avoid")
+-void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
+-{
+- tcp_reno_cong_avoid(sk, ack, acked);
+-}
+-
+-SEC(".struct_ops")
+-struct tcp_congestion_ops dctcp_nouse = {
+- .init = (void *)dctcp_init,
+- .set_state = (void *)dctcp_state,
+- .flags = TCP_CONG_NEEDS_ECN,
+- .name = "bpf_dctcp_nouse",
+-};
+-
+-SEC(".struct_ops")
+-struct tcp_congestion_ops dctcp = {
+- .init = (void *)dctcp_init,
+- .in_ack_event = (void *)dctcp_update_alpha,
+- .cwnd_event = (void *)dctcp_cwnd_event,
+- .ssthresh = (void *)dctcp_ssthresh,
+- .cong_avoid = (void *)dctcp_cong_avoid,
+- .undo_cwnd = (void *)dctcp_cwnd_undo,
+- .set_state = (void *)dctcp_state,
+- .flags = TCP_CONG_NEEDS_ECN,
+- .name = "bpf_dctcp",
+-};
+diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c
+deleted file mode 100644
+index 470f8723e463..000000000000
+--- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c
++++ /dev/null
+@@ -1,47 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-#include <linux/bpf.h>
+-#include <bpf/bpf_helpers.h>
+-#include "bpf_tcp_helpers.h"
+-
+-extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
+-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
+- __u32 c, __u64 d) __ksym;
+-
+-SEC("classifier")
+-int kfunc_call_test2(struct __sk_buff *skb)
+-{
+- struct bpf_sock *sk = skb->sk;
+-
+- if (!sk)
+- return -1;
+-
+- sk = bpf_sk_fullsock(sk);
+- if (!sk)
+- return -1;
+-
+- return bpf_kfunc_call_test2((struct sock *)sk, 1, 2);
+-}
+-
+-SEC("classifier")
+-int kfunc_call_test1(struct __sk_buff *skb)
+-{
+- struct bpf_sock *sk = skb->sk;
+- __u64 a = 1ULL << 32;
+- __u32 ret;
+-
+- if (!sk)
+- return -1;
+-
+- sk = bpf_sk_fullsock(sk);
+- if (!sk)
+- return -1;
+-
+- a = bpf_kfunc_call_test1((struct sock *)sk, 1, a | 2, 3, a | 4);
+- ret = a >> 32; /* ret should be 2 */
+- ret += (__u32)a; /* ret should be 12 */
+-
+- return ret;
+-}
+-
+-char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
+deleted file mode 100644
+index 5fbd9e232d44..000000000000
+--- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c
++++ /dev/null
+@@ -1,42 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-#include <linux/bpf.h>
+-#include <bpf/bpf_helpers.h>
+-#include "bpf_tcp_helpers.h"
+-
+-extern const int bpf_prog_active __ksym;
+-extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
+- __u32 c, __u64 d) __ksym;
+-extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
+-int active_res = -1;
+-int sk_state_res = -1;
+-
+-int __noinline f1(struct __sk_buff *skb)
+-{
+- struct bpf_sock *sk = skb->sk;
+- int *active;
+-
+- if (!sk)
+- return -1;
+-
+- sk = bpf_sk_fullsock(sk);
+- if (!sk)
+- return -1;
+-
+- active = (int *)bpf_per_cpu_ptr(&bpf_prog_active,
+- bpf_get_smp_processor_id());
+- if (active)
+- active_res = *active;
+-
+- sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->sk_state;
+-
+- return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4);
+-}
+-
+-SEC("classifier")
+-int kfunc_call_test1(struct __sk_buff *skb)
+-{
+- return f1(skb);
+-}
+-
+-char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
+deleted file mode 100644
+index b964ec1390c2..000000000000
+--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c
++++ /dev/null
+@@ -1,73 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-
+-/* weak and shared between two files */
+-const volatile int my_tid __weak;
+-long syscall_id __weak;
+-
+-int output_val1;
+-int output_ctx1;
+-int output_weak1;
+-
+-/* same "subprog" name in all files, but it's ok because they all are static */
+-static __noinline int subprog(int x)
+-{
+- /* but different formula */
+- return x * 1;
+-}
+-
+-/* Global functions can't be void */
+-int set_output_val1(int x)
+-{
+- output_val1 = x + subprog(x);
+- return x;
+-}
+-
+-/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+- * context and accesses syscall id (second argument). So we mark it as
+- * __hidden, so that libbpf will mark it as static in the final object file,
+- * right before verifying it in the kernel.
+- *
+- * But we don't mark it as __hidden here, rather at extern site. __hidden is
+- * "contaminating" visibility, so it will get propagated from either extern or
+- * actual definition (including from the losing __weak definition).
+- */
+-void set_output_ctx1(__u64 *ctx)
+-{
+- output_ctx1 = ctx[1]; /* long id, same as in BPF_PROG below */
+-}
+-
+-/* this weak instance should win because it's the first one */
+-__weak int set_output_weak(int x)
+-{
+- output_weak1 = x;
+- return x;
+-}
+-
+-extern int set_output_val2(int x);
+-
+-/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
+-__hidden extern void set_output_ctx2(__u64 *ctx);
+-
+-SEC("raw_tp/sys_enter")
+-int BPF_PROG(handler1, struct pt_regs *regs, long id)
+-{
+- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+- return 0;
+-
+- set_output_val2(1000);
+- set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
+-
+- /* keep input value the same across both files to avoid dependency on
+- * handler call order; differentiate by output_weak1 vs output_weak2.
+- */
+- set_output_weak(42);
+-
+- return 0;
+-}
+-
+-char LICENSE[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
+deleted file mode 100644
+index 575e958e60b7..000000000000
+--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c
++++ /dev/null
+@@ -1,73 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-
+-/* weak and shared between both files */
+-const volatile int my_tid __weak;
+-long syscall_id __weak;
+-
+-int output_val2;
+-int output_ctx2;
+-int output_weak2; /* should stay zero */
+-
+-/* same "subprog" name in all files, but it's ok because they all are static */
+-static __noinline int subprog(int x)
+-{
+- /* but different formula */
+- return x * 2;
+-}
+-
+-/* Global functions can't be void */
+-int set_output_val2(int x)
+-{
+- output_val2 = 2 * x + 2 * subprog(x);
+- return 2 * x;
+-}
+-
+-/* This function can't be verified as global, as it assumes raw_tp/sys_enter
+- * context and accesses syscall id (second argument). So we mark it as
+- * __hidden, so that libbpf will mark it as static in the final object file,
+- * right before verifying it in the kernel.
+- *
+- * But we don't mark it as __hidden here, rather at extern site. __hidden is
+- * "contaminating" visibility, so it will get propagated from either extern or
+- * actual definition (including from the losing __weak definition).
+- */
+-void set_output_ctx2(__u64 *ctx)
+-{
+- output_ctx2 = ctx[1]; /* long id, same as in BPF_PROG below */
+-}
+-
+-/* this weak instance should lose, because it will be processed second */
+-__weak int set_output_weak(int x)
+-{
+- output_weak2 = x;
+- return 2 * x;
+-}
+-
+-extern int set_output_val1(int x);
+-
+-/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
+-__hidden extern void set_output_ctx1(__u64 *ctx);
+-
+-SEC("raw_tp/sys_enter")
+-int BPF_PROG(handler2, struct pt_regs *regs, long id)
+-{
+- if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
+- return 0;
+-
+- set_output_val1(2000);
+- set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
+-
+- /* keep input value the same across both files to avoid dependency on
+- * handler call order; differentiate by output_weak1 vs output_weak2.
+- */
+- set_output_weak(42);
+-
+- return 0;
+-}
+-
+-char LICENSE[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/linked_maps2.c b/tools/testing/selftests/bpf/progs/linked_maps2.c
+deleted file mode 100644
+index 0693687474ed..000000000000
+--- a/tools/testing/selftests/bpf/progs/linked_maps2.c
++++ /dev/null
+@@ -1,76 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-
+-/* modifiers and typedefs are ignored when comparing key/value types */
+-typedef struct my_key { long x; } key_type;
+-typedef struct my_value { long x; } value_type;
+-
+-extern struct {
+- __uint(max_entries, 16);
+- __type(key, key_type);
+- __type(value, value_type);
+- __uint(type, BPF_MAP_TYPE_HASH);
+-} map1 SEC(".maps");
+-
+-struct {
+- __uint(type, BPF_MAP_TYPE_ARRAY);
+- __type(key, int);
+- __type(value, int);
+- __uint(max_entries, 8);
+-} map2 SEC(".maps");
+-
+-/* this definition will lose, but it has to exactly match the winner */
+-struct {
+- __uint(type, BPF_MAP_TYPE_ARRAY);
+- __type(key, int);
+- __type(value, int);
+- __uint(max_entries, 16);
+-} map_weak __weak SEC(".maps");
+-
+-int output_first2;
+-int output_second2;
+-int output_weak2;
+-
+-SEC("raw_tp/sys_enter")
+-int BPF_PROG(handler_enter2)
+-{
+- /* update values with key = 2 */
+- int key = 2, val = 2;
+- key_type key_struct = { .x = 2 };
+- value_type val_struct = { .x = 2000 };
+-
+- bpf_map_update_elem(&map1, &key_struct, &val_struct, 0);
+- bpf_map_update_elem(&map2, &key, &val, 0);
+- bpf_map_update_elem(&map_weak, &key, &val, 0);
+-
+- return 0;
+-}
+-
+-SEC("raw_tp/sys_exit")
+-int BPF_PROG(handler_exit2)
+-{
+- /* lookup values with key = 1, set in another file */
+- int key = 1, *val;
+- key_type key_struct = { .x = 1 };
+- value_type *value_struct;
+-
+- value_struct = bpf_map_lookup_elem(&map1, &key_struct);
+- if (value_struct)
+- output_first2 = value_struct->x;
+-
+- val = bpf_map_lookup_elem(&map2, &key);
+- if (val)
+- output_second2 = *val;
+-
+- val = bpf_map_lookup_elem(&map_weak, &key);
+- if (val)
+- output_weak2 = *val;
+-
+- return 0;
+-}
+-
+-char LICENSE[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/linked_vars1.c b/tools/testing/selftests/bpf/progs/linked_vars1.c
+deleted file mode 100644
+index ef9e9d0bb0ca..000000000000
+--- a/tools/testing/selftests/bpf/progs/linked_vars1.c
++++ /dev/null
+@@ -1,54 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-
+-extern int LINUX_KERNEL_VERSION __kconfig;
+-/* this weak extern will be strict due to the other file's strong extern */
+-extern bool CONFIG_BPF_SYSCALL __kconfig __weak;
+-extern const void bpf_link_fops __ksym __weak;
+-
+-int input_bss1;
+-int input_data1 = 1;
+-const volatile int input_rodata1 = 11;
+-
+-int input_bss_weak __weak;
+-/* these two definitions should win */
+-int input_data_weak __weak = 10;
+-const volatile int input_rodata_weak __weak = 100;
+-
+-extern int input_bss2;
+-extern int input_data2;
+-extern const int input_rodata2;
+-
+-int output_bss1;
+-int output_data1;
+-int output_rodata1;
+-
+-long output_sink1;
+-
+-static __noinline int get_bss_res(void)
+-{
+- /* just make sure all the relocations work against .text as well */
+- return input_bss1 + input_bss2 + input_bss_weak;
+-}
+-
+-SEC("raw_tp/sys_enter")
+-int BPF_PROG(handler1)
+-{
+- output_bss1 = get_bss_res();
+- output_data1 = input_data1 + input_data2 + input_data_weak;
+- output_rodata1 = input_rodata1 + input_rodata2 + input_rodata_weak;
+-
+- /* make sure we actually use above special externs, otherwise compiler
+- * will optimize them out
+- */
+- output_sink1 = LINUX_KERNEL_VERSION
+- + CONFIG_BPF_SYSCALL
+- + (long)&bpf_link_fops;
+- return 0;
+-}
+-
+-char LICENSE[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/linked_vars2.c b/tools/testing/selftests/bpf/progs/linked_vars2.c
+deleted file mode 100644
+index e4f5bd388a3c..000000000000
+--- a/tools/testing/selftests/bpf/progs/linked_vars2.c
++++ /dev/null
+@@ -1,55 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* Copyright (c) 2021 Facebook */
+-
+-#include "vmlinux.h"
+-#include <bpf/bpf_helpers.h>
+-#include <bpf/bpf_tracing.h>
+-
+-extern int LINUX_KERNEL_VERSION __kconfig;
+-/* when an extern is defined as both strong and weak, resulting symbol will be strong */
+-extern bool CONFIG_BPF_SYSCALL __kconfig;
+-extern const void __start_BTF __ksym;
+-
+-int input_bss2;
+-int input_data2 = 2;
+-const volatile int input_rodata2 = 22;
+-
+-int input_bss_weak __weak;
+-/* these two weak variables should lose */
+-int input_data_weak __weak = 20;
+-const volatile int input_rodata_weak __weak = 200;
+-
+-extern int input_bss1;
+-extern int input_data1;
+-extern const int input_rodata1;
+-
+-int output_bss2;
+-int output_data2;
+-int output_rodata2;
+-
+-int output_sink2;
+-
+-static __noinline int get_data_res(void)
+-{
+- /* just make sure all the relocations work against .text as well */
+- return input_data1 + input_data2 + input_data_weak;
+-}
+-
+-SEC("raw_tp/sys_enter")
+-int BPF_PROG(handler2)
+-{
+- output_bss2 = input_bss1 + input_bss2 + input_bss_weak;
+- output_data2 = get_data_res();
+- output_rodata2 = input_rodata1 + input_rodata2 + input_rodata_weak;
+-
+- /* make sure we actually use above special externs, otherwise compiler
+- * will optimize them out
+- */
+- output_sink2 = LINUX_KERNEL_VERSION
+- + CONFIG_BPF_SYSCALL
+- + (long)&__start_BTF;
+-
+- return 0;
+-}
+-
+-char LICENSE[] SEC("license") = "GPL";