summaryrefslogtreecommitdiffstats
path: root/patch-5.11.0-redhat.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patch-5.11.0-redhat.patch')
-rw-r--r--patch-5.11.0-redhat.patch1337
1 files changed, 1336 insertions, 1 deletions
diff --git a/patch-5.11.0-redhat.patch b/patch-5.11.0-redhat.patch
index 220038e21..5eb55aa44 100644
--- a/patch-5.11.0-redhat.patch
+++ b/patch-5.11.0-redhat.patch
@@ -11,7 +11,11 @@
arch/s390/include/asm/ipl.h | 1 +
arch/s390/kernel/ipl.c | 5 +
arch/s390/kernel/setup.c | 4 +
+ arch/x86/hyperv/hv_init.c | 4 +
+ arch/x86/hyperv/mmu.c | 12 +-
+ arch/x86/include/asm/mshyperv.h | 2 +
arch/x86/kernel/cpu/common.c | 1 +
+ arch/x86/kernel/cpu/mshyperv.c | 18 +
arch/x86/kernel/setup.c | 71 +++-
drivers/acpi/apei/hest.c | 8 +
drivers/acpi/irq.c | 17 +-
@@ -27,6 +31,7 @@
drivers/gpu/drm/panel/panel-xingbangda-xbd599.c | 366 +++++++++++++++++++++
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 10 +-
drivers/hid/hid-rmi.c | 64 ----
+ drivers/hv/vmbus_drv.c | 2 -
drivers/infiniband/sw/rxe/rxe.c | 2 +
drivers/input/rmi4/rmi_driver.c | 124 ++++---
drivers/iommu/iommu.c | 22 ++
@@ -44,7 +49,28 @@
drivers/scsi/qla2xxx/qla_os.c | 6 +
drivers/scsi/qla4xxx/ql4_os.c | 2 +
drivers/scsi/smartpqi/smartpqi_init.c | 16 +
+ drivers/target/target_core_xcopy.c | 119 ++++---
+ drivers/target/target_core_xcopy.h | 1 +
drivers/usb/core/hub.c | 7 +
+ fs/btrfs/disk-io.c | 2 +-
+ fs/btrfs/extent_io.c | 4 +-
+ fs/btrfs/inode.c | 60 +++-
+ fs/btrfs/print-tree.c | 10 +-
+ fs/btrfs/print-tree.h | 2 +-
+ fs/btrfs/relocation.c | 7 +-
+ fs/btrfs/space-info.c | 4 +-
+ fs/btrfs/tree-checker.c | 7 +
+ fs/nfs/delegation.c | 12 +-
+ fs/nfs/internal.h | 38 ++-
+ fs/nfs/nfs4proc.c | 28 +-
+ fs/nfs/nfs4super.c | 4 +-
+ fs/nfs/pnfs.c | 67 ++--
+ fs/nfs/pnfs.h | 8 +-
+ fs/nfs/pnfs_nfs.c | 22 +-
+ fs/nfsd/nfs4proc.c | 5 +
+ fs/nfsd/nfs4xdr.c | 56 ++--
+ fs/nfsd/nfssvc.c | 6 -
+ fs/nfsd/xdr4.h | 1 -
include/linux/efi.h | 22 +-
include/linux/kernel.h | 34 +-
include/linux/lsm_hook_defs.h | 2 +
@@ -62,14 +88,18 @@
kernel/module_signing.c | 9 +-
kernel/panic.c | 14 +
kernel/rh_taint.c | 93 ++++++
+ kernel/trace/Kconfig | 2 +-
+ kernel/trace/trace_kprobe.c | 2 +-
mm/kmemleak.c | 5 +
+ net/sunrpc/addr.c | 2 +-
+ net/sunrpc/svcsock.c | 86 ++++-
scripts/mod/modpost.c | 8 +
scripts/tags.sh | 2 +
security/integrity/platform_certs/load_uefi.c | 6 +-
security/lockdown/Kconfig | 13 +
security/lockdown/lockdown.c | 1 +
security/security.c | 6 +
- 71 files changed, 1581 insertions(+), 188 deletions(-)
+ 101 files changed, 1979 insertions(+), 383 deletions(-)
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 75a9dd98e76e..3ff3291551f9 100644
@@ -370,6 +400,73 @@ index 1fbed91c73bc..73f5724342b1 100644
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index e04d90af4c27..4638a52d8eae 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -16,6 +16,7 @@
+ #include <asm/hyperv-tlfs.h>
+ #include <asm/mshyperv.h>
+ #include <asm/idtentry.h>
++#include <linux/kexec.h>
+ #include <linux/version.h>
+ #include <linux/vmalloc.h>
+ #include <linux/mm.h>
+@@ -26,6 +27,8 @@
+ #include <linux/syscore_ops.h>
+ #include <clocksource/hyperv_timer.h>
+
++int hyperv_init_cpuhp;
++
+ void *hv_hypercall_pg;
+ EXPORT_SYMBOL_GPL(hv_hypercall_pg);
+
+@@ -401,6 +404,7 @@ void __init hyperv_init(void)
+
+ register_syscore_ops(&hv_syscore_ops);
+
++ hyperv_init_cpuhp = cpuhp;
+ return;
+
+ remove_cpuhp_state:
+diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
+index 5208ba49c89a..2c87350c1fb0 100644
+--- a/arch/x86/hyperv/mmu.c
++++ b/arch/x86/hyperv/mmu.c
+@@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
+ if (!hv_hypercall_pg)
+ goto do_native;
+
+- if (cpumask_empty(cpus))
+- return;
+-
+ local_irq_save(flags);
+
++ /*
++ * Only check the mask _after_ interrupt has been disabled to avoid the
++ * mask changing under our feet.
++ */
++ if (cpumask_empty(cpus)) {
++ local_irq_restore(flags);
++ return;
++ }
++
+ flush_pcpu = (struct hv_tlb_flush **)
+ this_cpu_ptr(hyperv_pcpu_input_arg);
+
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index ffc289992d1b..30f76b966857 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
+
+
+ #if IS_ENABLED(CONFIG_HYPERV)
++extern int hyperv_init_cpuhp;
++
+ extern void *hv_hypercall_pg;
+ extern void __percpu **hyperv_pcpu_input_arg;
+
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 35ad8480c464..fade48ea4c2d 100644
--- a/arch/x86/kernel/cpu/common.c
@@ -382,6 +479,43 @@ index 35ad8480c464..fade48ea4c2d 100644
get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
cpu_parse_early_param();
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index f628e3dc150f..43b54bef5448 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
+ {
+ if (kexec_in_progress && hv_kexec_handler)
+ hv_kexec_handler();
++
++ /*
++ * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
++ * corrupts the old VP Assist Pages and can crash the kexec kernel.
++ */
++ if (kexec_in_progress && hyperv_init_cpuhp > 0)
++ cpuhp_remove_state(hyperv_init_cpuhp);
++
++ /* The function calls stop_other_cpus(). */
+ native_machine_shutdown();
++
++ /* Disable the hypercall page when there is only 1 active CPU. */
++ if (kexec_in_progress)
++ hyperv_cleanup();
+ }
+
+ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ {
+ if (hv_crash_handler)
+ hv_crash_handler(regs);
++
++ /* The function calls crash_smp_send_stop(). */
+ native_machine_crash_shutdown(regs);
++
++ /* Disable the hypercall page when there is only 1 active CPU. */
++ hyperv_cleanup();
+ }
+ #endif /* CONFIG_KEXEC_CORE */
+ #endif /* CONFIG_HYPERV */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 740f3bdb3f61..26c35aa38ea7 100644
--- a/arch/x86/kernel/setup.c
@@ -1416,6 +1550,26 @@ index 311eee599ce9..2460c6bd46f8 100644
data->xport.proto_name = "hid";
data->xport.ops = &hid_rmi_ops;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 502f8cd95f6d..d491fdcee61f 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
+ /* Make sure conn_state is set as hv_synic_cleanup checks for it */
+ mb();
+ cpuhp_remove_state(hyperv_cpuhp_online);
+- hyperv_cleanup();
+ };
+
+ static void hv_crash_handler(struct pt_regs *regs)
+@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
+ cpu = smp_processor_id();
+ hv_stimer_cleanup(cpu);
+ hv_synic_disable_regs(cpu);
+- hyperv_cleanup();
+ };
+
+ static int hv_synic_suspend(void)
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 95f0de0c8b49..faa8a6cadef1 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
@@ -2107,6 +2261,185 @@ index c53f456fbd09..ea190660c86e 100644
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_DELL, 0x1fe0)
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 44e15d7fb2f0..66d6f1d06f21 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
+ return 0;
+ }
+
+-struct xcopy_dev_search_info {
+- const unsigned char *dev_wwn;
+- struct se_device *found_dev;
+-};
+-
++/**
++ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
++ *
++ * @se_dev: device being considered for match
++ * @dev_wwn: XCOPY requested NAA dev_wwn
++ * @return: 1 on match, 0 on no-match
++ */
+ static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
+- void *data)
++ const unsigned char *dev_wwn)
+ {
+- struct xcopy_dev_search_info *info = data;
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ int rc;
+
+- if (!se_dev->dev_attrib.emulate_3pc)
++ if (!se_dev->dev_attrib.emulate_3pc) {
++ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
+ return 0;
++ }
+
+ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+
+- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+- if (rc != 0)
+- return 0;
+-
+- info->found_dev = se_dev;
+- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
+-
+- rc = target_depend_item(&se_dev->dev_group.cg_item);
++ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+ if (rc != 0) {
+- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
+- rc, se_dev);
+- return rc;
++ pr_debug("XCOPY: skip non-matching: %*ph\n",
++ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
++ return 0;
+ }
++ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
+
+- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
+- se_dev, &se_dev->dev_group);
+ return 1;
+ }
+
+-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+- struct se_device **found_dev)
++static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
++ const unsigned char *dev_wwn,
++ struct se_device **_found_dev,
++ struct percpu_ref **_found_lun_ref)
+ {
+- struct xcopy_dev_search_info info;
+- int ret;
+-
+- memset(&info, 0, sizeof(info));
+- info.dev_wwn = dev_wwn;
+-
+- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
+- if (ret == 1) {
+- *found_dev = info.found_dev;
+- return 0;
+- } else {
+- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+- return -EINVAL;
++ struct se_dev_entry *deve;
++ struct se_node_acl *nacl;
++ struct se_lun *this_lun = NULL;
++ struct se_device *found_dev = NULL;
++
++ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
++ if (!sess)
++ goto err_out;
++
++ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
++ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
++
++ nacl = sess->se_node_acl;
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
++ struct se_device *this_dev;
++ int rc;
++
++ this_lun = rcu_dereference(deve->se_lun);
++ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
++
++ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
++ if (rc) {
++ if (percpu_ref_tryget_live(&this_lun->lun_ref))
++ found_dev = this_dev;
++ break;
++ }
+ }
++ rcu_read_unlock();
++ if (found_dev == NULL)
++ goto err_out;
++
++ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
++ found_dev, &found_dev->dev_group);
++ *_found_dev = found_dev;
++ *_found_lun_ref = &this_lun->lun_ref;
++ return 0;
++err_out:
++ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
++ return -EINVAL;
+ }
+
+ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+
+ switch (xop->op_origin) {
+ case XCOL_SOURCE_RECV_OP:
+- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+- &xop->dst_dev);
++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
++ xop->dst_tid_wwn,
++ &xop->dst_dev,
++ &xop->remote_lun_ref);
+ break;
+ case XCOL_DEST_RECV_OP:
+- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+- &xop->src_dev);
++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
++ xop->src_tid_wwn,
++ &xop->src_dev,
++ &xop->remote_lun_ref);
+ break;
+ default:
+ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+
+ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+ {
+- struct se_device *remote_dev;
+-
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+- remote_dev = xop->dst_dev;
++ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
+ else
+- remote_dev = xop->src_dev;
+-
+- pr_debug("Calling configfs_undepend_item for"
+- " remote_dev: %p remote_dev->dev_group: %p\n",
+- remote_dev, &remote_dev->dev_group.cg_item);
++ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
+
+- target_undepend_item(&remote_dev->dev_group.cg_item);
++ percpu_ref_put(xop->remote_lun_ref);
+ }
+
+ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
+index c56a1bde9417..e5f20005179a 100644
+--- a/drivers/target/target_core_xcopy.h
++++ b/drivers/target/target_core_xcopy.h
+@@ -27,6 +27,7 @@ struct xcopy_op {
+ struct se_device *dst_dev;
+ unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
++ struct percpu_ref *remote_lun_ref;
+
+ sector_t src_lba;
+ sector_t dst_lba;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7f71218cc1e5..283fc0f41cd2 100644
--- a/drivers/usb/core/hub.c
@@ -2125,6 +2458,865 @@ index 7f71218cc1e5..283fc0f41cd2 100644
/* Lock the device, then check to see if we were
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 1dfd4b2d0e1e..6b35b7e88136 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1457,7 +1457,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
+ root = list_first_entry(&fs_info->allocated_roots,
+ struct btrfs_root, leak_list);
+ btrfs_err(fs_info, "leaked root %s refcount %d",
+- btrfs_root_name(root->root_key.objectid, buf),
++ btrfs_root_name(&root->root_key, buf),
+ refcount_read(&root->refs));
+ while (refcount_read(&root->refs) > 1)
+ btrfs_put_root(root);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 6e3b72e63e42..c9cee458e001 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -676,9 +676,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
+
+ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
+ {
+- struct inode *inode = tree->private_data;
+-
+- btrfs_panic(btrfs_sb(inode->i_sb), err,
++ btrfs_panic(tree->fs_info, err,
+ "locking error: extent tree was modified by another thread while locked");
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 070716650df8..a8e0a6b038d3 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9390,7 +9390,8 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
+ * some fairly slow code that needs optimization. This walks the list
+ * of all the inodes with pending delalloc and forces them to disk.
+ */
+-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot,
++static int start_delalloc_inodes(struct btrfs_root *root,
++ struct writeback_control *wbc, bool snapshot,
+ bool in_reclaim_context)
+ {
+ struct btrfs_inode *binode;
+@@ -9399,6 +9400,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
+ struct list_head works;
+ struct list_head splice;
+ int ret = 0;
++ bool full_flush = wbc->nr_to_write == LONG_MAX;
+
+ INIT_LIST_HEAD(&works);
+ INIT_LIST_HEAD(&splice);
+@@ -9427,18 +9429,24 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
+ if (snapshot)
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &binode->runtime_flags);
+- work = btrfs_alloc_delalloc_work(inode);
+- if (!work) {
+- iput(inode);
+- ret = -ENOMEM;
+- goto out;
+- }
+- list_add_tail(&work->list, &works);
+- btrfs_queue_work(root->fs_info->flush_workers,
+- &work->work);
+- if (*nr != U64_MAX) {
+- (*nr)--;
+- if (*nr == 0)
++ if (full_flush) {
++ work = btrfs_alloc_delalloc_work(inode);
++ if (!work) {
++ iput(inode);
++ ret = -ENOMEM;
++ goto out;
++ }
++ list_add_tail(&work->list, &works);
++ btrfs_queue_work(root->fs_info->flush_workers,
++ &work->work);
++ } else {
++ ret = sync_inode(inode, wbc);
++ if (!ret &&
++ test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
++ &BTRFS_I(inode)->runtime_flags))
++ ret = sync_inode(inode, wbc);
++ btrfs_add_delayed_iput(inode);
++ if (ret || wbc->nr_to_write <= 0)
+ goto out;
+ }
+ cond_resched();
+@@ -9464,18 +9472,29 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
+
+ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+ {
++ struct writeback_control wbc = {
++ .nr_to_write = LONG_MAX,
++ .sync_mode = WB_SYNC_NONE,
++ .range_start = 0,
++ .range_end = LLONG_MAX,
++ };
+ struct btrfs_fs_info *fs_info = root->fs_info;
+- u64 nr = U64_MAX;
+
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ return -EROFS;
+
+- return start_delalloc_inodes(root, &nr, true, false);
++ return start_delalloc_inodes(root, &wbc, true, false);
+ }
+
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ bool in_reclaim_context)
+ {
++ struct writeback_control wbc = {
++ .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
++ .sync_mode = WB_SYNC_NONE,
++ .range_start = 0,
++ .range_end = LLONG_MAX,
++ };
+ struct btrfs_root *root;
+ struct list_head splice;
+ int ret;
+@@ -9489,6 +9508,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ spin_lock(&fs_info->delalloc_root_lock);
+ list_splice_init(&fs_info->delalloc_roots, &splice);
+ while (!list_empty(&splice) && nr) {
++ /*
++ * Reset nr_to_write here so we know that we're doing a full
++ * flush.
++ */
++ if (nr == U64_MAX)
++ wbc.nr_to_write = LONG_MAX;
++
+ root = list_first_entry(&splice, struct btrfs_root,
+ delalloc_root);
+ root = btrfs_grab_root(root);
+@@ -9497,9 +9523,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ &fs_info->delalloc_roots);
+ spin_unlock(&fs_info->delalloc_root_lock);
+
+- ret = start_delalloc_inodes(root, &nr, false, in_reclaim_context);
++ ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
+ btrfs_put_root(root);
+- if (ret < 0)
++ if (ret < 0 || wbc.nr_to_write <= 0)
+ goto out;
+ spin_lock(&fs_info->delalloc_root_lock);
+ }
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index fe5e0026129d..aae1027bd76a 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -26,22 +26,22 @@ static const struct root_name_map root_map[] = {
+ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
+ };
+
+-const char *btrfs_root_name(u64 objectid, char *buf)
++const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
+ {
+ int i;
+
+- if (objectid == BTRFS_TREE_RELOC_OBJECTID) {
++ if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
+- "TREE_RELOC offset=%llu", objectid);
++ "TREE_RELOC offset=%llu", key->offset);
+ return buf;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(root_map); i++) {
+- if (root_map[i].id == objectid)
++ if (root_map[i].id == key->objectid)
+ return root_map[i].name;
+ }
+
+- snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", objectid);
++ snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
+ return buf;
+ }
+
+diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
+index 78b99385a503..8c3e9319ec4e 100644
+--- a/fs/btrfs/print-tree.h
++++ b/fs/btrfs/print-tree.h
+@@ -11,6 +11,6 @@
+
+ void btrfs_print_leaf(struct extent_buffer *l);
+ void btrfs_print_tree(struct extent_buffer *c, bool follow);
+-const char *btrfs_root_name(u64 objectid, char *buf);
++const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
+
+ #endif
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 19b7db8b2117..df63ef64c5c0 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2975,11 +2975,16 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
+ return 0;
+
+ for (i = 0; i < btrfs_header_nritems(leaf); i++) {
++ u8 type;
++
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+ ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+- if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
++ type = btrfs_file_extent_type(leaf, ei);
++
++ if ((type == BTRFS_FILE_EXTENT_REG ||
++ type == BTRFS_FILE_EXTENT_PREALLOC) &&
+ btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
+ found = true;
+ space_cache_ino = key.objectid;
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 67e55c5479b8..e8347461c8dd 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
+
+ loops = 0;
+ while ((delalloc_bytes || dio_bytes) && loops < 3) {
+- btrfs_start_delalloc_roots(fs_info, items, true);
++ u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
++
++ btrfs_start_delalloc_roots(fs_info, nr_pages, true);
+
+ loops++;
+ if (wait_ordered && !trans) {
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 028e733e42f3..582061c7b547 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -760,6 +760,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
+ {
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
+ u64 length;
++ u64 chunk_end;
+ u64 stripe_len;
+ u16 num_stripes;
+ u16 sub_stripes;
+@@ -814,6 +815,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
+ "invalid chunk length, have %llu", length);
+ return -EUCLEAN;
+ }
++ if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
++ chunk_err(leaf, chunk, logical,
++"invalid chunk logical start and length, have logical start %llu length %llu",
++ logical, length);
++ return -EUCLEAN;
++ }
+ if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
+ chunk_err(leaf, chunk, logical,
+ "invalid chunk stripe length: %llu",
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 816e1427f17e..04bf8066980c 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -1011,22 +1011,24 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
+ const struct nfs_fh *fhandle)
+ {
+ struct nfs_delegation *delegation;
+- struct inode *freeme, *res = NULL;
++ struct super_block *freeme = NULL;
++ struct inode *res = NULL;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL &&
+ !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
+ nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
+- freeme = igrab(delegation->inode);
+- if (freeme && nfs_sb_active(freeme->i_sb))
+- res = freeme;
++ if (nfs_sb_active(server->super)) {
++ freeme = server->super;
++ res = igrab(delegation->inode);
++ }
+ spin_unlock(&delegation->lock);
+ if (res != NULL)
+ return res;
+ if (freeme) {
+ rcu_read_unlock();
+- iput(freeme);
++ nfs_sb_deactive(freeme);
+ rcu_read_lock();
+ }
+ return ERR_PTR(-EAGAIN);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index b840d0a91c9d..62d3189745cd 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -136,9 +136,29 @@ struct nfs_fs_context {
+ } clone_data;
+ };
+
+-#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
+-#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
+-#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
++#define nfs_errorf(fc, fmt, ...) ((fc)->log.log ? \
++ errorf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
++
++#define nfs_ferrorf(fc, fac, fmt, ...) ((fc)->log.log ? \
++ errorf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
++
++#define nfs_invalf(fc, fmt, ...) ((fc)->log.log ? \
++ invalf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dprintk(fmt "\n", ## __VA_ARGS__); -EINVAL; }))
++
++#define nfs_finvalf(fc, fac, fmt, ...) ((fc)->log.log ? \
++ invalf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); -EINVAL; }))
++
++#define nfs_warnf(fc, fmt, ...) ((fc)->log.log ? \
++ warnf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
++
++#define nfs_fwarnf(fc, fac, fmt, ...) ((fc)->log.log ? \
++ warnf(fc, fmt, ## __VA_ARGS__) : \
++ ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
+
+ static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
+ {
+@@ -579,12 +599,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
+
+ static inline struct inode *nfs_igrab_and_active(struct inode *inode)
+ {
+- inode = igrab(inode);
+- if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
+- iput(inode);
+- inode = NULL;
++ struct super_block *sb = inode->i_sb;
++
++ if (sb && nfs_sb_active(sb)) {
++ if (igrab(inode))
++ return inode;
++ nfs_sb_deactive(sb);
+ }
+- return inode;
++ return NULL;
+ }
+
+ static inline void nfs_iput_and_deactive(struct inode *inode)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 0ce04e0e5d82..2f4679a62712 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3536,10 +3536,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
+
+ /* Handle Layoutreturn errors */
+- if (pnfs_roc_done(task, calldata->inode,
+- &calldata->arg.lr_args,
+- &calldata->res.lr_res,
+- &calldata->res.lr_ret) == -EAGAIN)
++ if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
++ &calldata->res.lr_ret) == -EAGAIN)
+ goto out_restart;
+
+ /* hmm. we are done with the inode, and in the process of freeing
+@@ -6384,10 +6382,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
+
+ /* Handle Layoutreturn errors */
+- if (pnfs_roc_done(task, data->inode,
+- &data->args.lr_args,
+- &data->res.lr_res,
+- &data->res.lr_ret) == -EAGAIN)
++ if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
++ &data->res.lr_ret) == -EAGAIN)
+ goto out_restart;
+
+ switch (task->tk_status) {
+@@ -6441,10 +6437,10 @@ static void nfs4_delegreturn_release(void *calldata)
+ struct nfs4_delegreturndata *data = calldata;
+ struct inode *inode = data->inode;
+
++ if (data->lr.roc)
++ pnfs_roc_release(&data->lr.arg, &data->lr.res,
++ data->res.lr_ret);
+ if (inode) {
+- if (data->lr.roc)
+- pnfs_roc_release(&data->lr.arg, &data->lr.res,
+- data->res.lr_ret);
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+ nfs_iput_and_deactive(inode);
+ }
+@@ -6520,16 +6516,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ nfs_fattr_init(data->res.fattr);
+ data->timestamp = jiffies;
+ data->rpc_status = 0;
+- data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
+ data->inode = nfs_igrab_and_active(inode);
+- if (data->inode) {
++ if (data->inode || issync) {
++ data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
++ cred);
+ if (data->lr.roc) {
+ data->args.lr_args = &data->lr.arg;
+ data->res.lr_res = &data->lr.res;
+ }
+- } else if (data->lr.roc) {
+- pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
+- data->lr.roc = false;
+ }
+
+ task_setup_data.callback_data = data;
+@@ -7111,9 +7105,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
+ data->arg.new_lock_owner, ret);
+ } else
+ data->cancelled = true;
++ trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
+ rpc_put_task(task);
+ dprintk("%s: done, ret = %d!\n", __func__, ret);
+- trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
+ return ret;
+ }
+
+diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
+index 984cc42ee54d..d09bcfd7db89 100644
+--- a/fs/nfs/nfs4super.c
++++ b/fs/nfs/nfs4super.c
+@@ -227,7 +227,7 @@ int nfs4_try_get_tree(struct fs_context *fc)
+ fc, ctx->nfs_server.hostname,
+ ctx->nfs_server.export_path);
+ if (err) {
+- nfs_errorf(fc, "NFS4: Couldn't follow remote path");
++ nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
+ dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
+ } else {
+ dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
+@@ -250,7 +250,7 @@ int nfs4_get_referral_tree(struct fs_context *fc)
+ fc, ctx->nfs_server.hostname,
+ ctx->nfs_server.export_path);
+ if (err) {
+- nfs_errorf(fc, "NFS4: Couldn't follow remote path");
++ nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
+ dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
+ } else {
+ dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 07f59dc8cb2e..4f274f21c4ab 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1152,7 +1152,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ LIST_HEAD(freeme);
+
+ spin_lock(&inode->i_lock);
+- if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
++ if (!pnfs_layout_is_valid(lo) ||
+ !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ goto out_unlock;
+ if (stateid) {
+@@ -1509,10 +1509,8 @@ bool pnfs_roc(struct inode *ino,
+ return false;
+ }
+
+-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+- struct nfs4_layoutreturn_args **argpp,
+- struct nfs4_layoutreturn_res **respp,
+- int *ret)
++int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
++ struct nfs4_layoutreturn_res **respp, int *ret)
+ {
+ struct nfs4_layoutreturn_args *arg = *argpp;
+ int retval = -EAGAIN;
+@@ -1545,7 +1543,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ return 0;
+ case -NFS4ERR_OLD_STATEID:
+ if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
+- &arg->range, inode))
++ &arg->range, arg->inode))
+ break;
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
+ return -EAGAIN;
+@@ -1560,23 +1558,28 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
+ int ret)
+ {
+ struct pnfs_layout_hdr *lo = args->layout;
+- const nfs4_stateid *arg_stateid = NULL;
++ struct inode *inode = args->inode;
+ const nfs4_stateid *res_stateid = NULL;
+ struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
+
+ switch (ret) {
+ case -NFS4ERR_NOMATCHING_LAYOUT:
++ spin_lock(&inode->i_lock);
++ if (pnfs_layout_is_valid(lo) &&
++ nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
++ pnfs_set_plh_return_info(lo, args->range.iomode, 0);
++ pnfs_clear_layoutreturn_waitbit(lo);
++ spin_unlock(&inode->i_lock);
+ break;
+ case 0:
+ if (res->lrs_present)
+ res_stateid = &res->stateid;
+ fallthrough;
+ default:
+- arg_stateid = &args->stateid;
++ pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
++ res_stateid);
+ }
+ trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
+- pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
+- res_stateid);
+ if (ld_private && ld_private->ops && ld_private->ops->free)
+ ld_private->ops->free(ld_private);
+ pnfs_put_layout_hdr(lo);
+@@ -2015,6 +2018,27 @@ pnfs_update_layout(struct inode *ino,
+ goto lookup_again;
+ }
+
++ /*
++ * Because we free lsegs when sending LAYOUTRETURN, we need to wait
++ * for LAYOUTRETURN.
++ */
++ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
++ spin_unlock(&ino->i_lock);
++ dprintk("%s wait for layoutreturn\n", __func__);
++ lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
++ if (!IS_ERR(lseg)) {
++ pnfs_put_layout_hdr(lo);
++ dprintk("%s retrying\n", __func__);
++ trace_pnfs_update_layout(ino, pos, count, iomode, lo,
++ lseg,
++ PNFS_UPDATE_LAYOUT_RETRY);
++ goto lookup_again;
++ }
++ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
++ PNFS_UPDATE_LAYOUT_RETURN);
++ goto out_put_layout_hdr;
++ }
++
+ lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
+ if (lseg) {
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+@@ -2067,28 +2091,6 @@ pnfs_update_layout(struct inode *ino,
+ nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+ }
+
+- /*
+- * Because we free lsegs before sending LAYOUTRETURN, we need to wait
+- * for LAYOUTRETURN even if first is true.
+- */
+- if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+- spin_unlock(&ino->i_lock);
+- dprintk("%s wait for layoutreturn\n", __func__);
+- lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+- if (!IS_ERR(lseg)) {
+- if (first)
+- pnfs_clear_first_layoutget(lo);
+- pnfs_put_layout_hdr(lo);
+- dprintk("%s retrying\n", __func__);
+- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+- lseg, PNFS_UPDATE_LAYOUT_RETRY);
+- goto lookup_again;
+- }
+- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+- PNFS_UPDATE_LAYOUT_RETURN);
+- goto out_put_layout_hdr;
+- }
+-
+ if (pnfs_layoutgets_blocked(lo)) {
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_BLOCKED);
+@@ -2242,6 +2244,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
+ &rng, GFP_KERNEL);
+ if (!lgp) {
+ pnfs_clear_first_layoutget(lo);
++ nfs_layoutget_end(lo);
+ pnfs_put_layout_hdr(lo);
+ return;
+ }
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index bbd3de1025f2..d810ae674f4e 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -297,10 +297,8 @@ bool pnfs_roc(struct inode *ino,
+ struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res,
+ const struct cred *cred);
+-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+- struct nfs4_layoutreturn_args **argpp,
+- struct nfs4_layoutreturn_res **respp,
+- int *ret);
++int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
++ struct nfs4_layoutreturn_res **respp, int *ret);
+ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
+ struct nfs4_layoutreturn_res *res,
+ int ret);
+@@ -772,7 +770,7 @@ pnfs_roc(struct inode *ino,
+ }
+
+ static inline int
+-pnfs_roc_done(struct rpc_task *task, struct inode *inode,
++pnfs_roc_done(struct rpc_task *task,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret)
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 2efcfdd348a1..49d3389bd813 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -78,22 +78,18 @@ void
+ pnfs_generic_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
+ {
+- struct pnfs_layout_segment *freeme = NULL;
++ struct pnfs_commit_bucket *bucket = NULL;
+
+ if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
+ goto out;
+ cinfo->ds->nwritten--;
+- if (list_is_singular(&req->wb_list)) {
+- struct pnfs_commit_bucket *bucket;
+-
++ if (list_is_singular(&req->wb_list))
+ bucket = list_first_entry(&req->wb_list,
+- struct pnfs_commit_bucket,
+- written);
+- freeme = pnfs_free_bucket_lseg(bucket);
+- }
++ struct pnfs_commit_bucket, written);
+ out:
+ nfs_request_remove_commit_list(req, cinfo);
+- pnfs_put_lseg(freeme);
++ if (bucket)
++ pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
+ }
+ EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
+
+@@ -407,12 +403,16 @@ pnfs_bucket_get_committing(struct list_head *head,
+ struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo)
+ {
++ struct pnfs_layout_segment *lseg;
+ struct list_head *pos;
+
+ list_for_each(pos, &bucket->committing)
+ cinfo->ds->ncommitting--;
+ list_splice_init(&bucket->committing, head);
+- return pnfs_free_bucket_lseg(bucket);
++ lseg = pnfs_free_bucket_lseg(bucket);
++ if (!lseg)
++ lseg = pnfs_get_lseg(bucket->lseg);
++ return lseg;
+ }
+
+ static struct nfs_commit_data *
+@@ -424,8 +424,6 @@ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
+ if (!data)
+ return NULL;
+ data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
+- if (!data->lseg)
+- data->lseg = pnfs_get_lseg(bucket->lseg);
+ return data;
+ }
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 4727b7f03c5b..8d6d2678abad 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -50,6 +50,11 @@
+ #include "pnfs.h"
+ #include "trace.h"
+
++static bool inter_copy_offload_enable;
++module_param(inter_copy_offload_enable, bool, 0644);
++MODULE_PARM_DESC(inter_copy_offload_enable,
++ "Enable inter server to server copy offload. Default: false");
++
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ #include <linux/security.h>
+
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 45ee6b12ce5b..eaaa1605b5b5 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -147,6 +147,25 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
+ return p;
+ }
+
++static void *
++svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
++{
++ __be32 *tmp;
++
++ /*
++ * The location of the decoded data item is stable,
++ * so @p is OK to use. This is the common case.
++ */
++ if (p != argp->xdr->scratch.iov_base)
++ return p;
++
++ tmp = svcxdr_tmpalloc(argp, len);
++ if (!tmp)
++ return NULL;
++ memcpy(tmp, p, len);
++ return tmp;
++}
++
+ /*
+ * NFSv4 basic data type decoders
+ */
+@@ -183,11 +202,10 @@ nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
+ p = xdr_inline_decode(argp->xdr, len);
+ if (!p)
+ return nfserr_bad_xdr;
+- o->data = svcxdr_tmpalloc(argp, len);
++ o->data = svcxdr_savemem(argp, p, len);
+ if (!o->data)
+ return nfserr_jukebox;
+ o->len = len;
+- memcpy(o->data, p, len);
+
+ return nfs_ok;
+ }
+@@ -205,10 +223,9 @@ nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
+ status = check_filename((char *)p, *lenp);
+ if (status)
+ return status;
+- *namp = svcxdr_tmpalloc(argp, *lenp);
++ *namp = svcxdr_savemem(argp, p, *lenp);
+ if (!*namp)
+ return nfserr_jukebox;
+- memcpy(*namp, p, *lenp);
+
+ return nfs_ok;
+ }
+@@ -1200,10 +1217,9 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
+ p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
+ if (!p)
+ return nfserr_bad_xdr;
+- putfh->pf_fhval = svcxdr_tmpalloc(argp, putfh->pf_fhlen);
++ putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
+ if (!putfh->pf_fhval)
+ return nfserr_jukebox;
+- memcpy(putfh->pf_fhval, p, putfh->pf_fhlen);
+
+ return nfs_ok;
+ }
+@@ -1318,24 +1334,20 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
+ if (!p)
+ return nfserr_bad_xdr;
+- setclientid->se_callback_netid_val = svcxdr_tmpalloc(argp,
++ setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
+ setclientid->se_callback_netid_len);
+ if (!setclientid->se_callback_netid_val)
+ return nfserr_jukebox;
+- memcpy(setclientid->se_callback_netid_val, p,
+- setclientid->se_callback_netid_len);
+
+ if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
+ return nfserr_bad_xdr;
+ p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
+ if (!p)
+ return nfserr_bad_xdr;
+- setclientid->se_callback_addr_val = svcxdr_tmpalloc(argp,
++ setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
+ setclientid->se_callback_addr_len);
+ if (!setclientid->se_callback_addr_val)
+ return nfserr_jukebox;
+- memcpy(setclientid->se_callback_addr_val, p,
+- setclientid->se_callback_addr_len);
+ if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
+ return nfserr_bad_xdr;
+
+@@ -1375,10 +1387,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
+ p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
+ if (!p)
+ return nfserr_bad_xdr;
+- verify->ve_attrval = svcxdr_tmpalloc(argp, verify->ve_attrlen);
++ verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
+ if (!verify->ve_attrval)
+ return nfserr_jukebox;
+- memcpy(verify->ve_attrval, p, verify->ve_attrlen);
+
+ return nfs_ok;
+ }
+@@ -2333,10 +2344,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ p = xdr_inline_decode(argp->xdr, argp->taglen);
+ if (!p)
+ return 0;
+- argp->tag = svcxdr_tmpalloc(argp, argp->taglen);
++ argp->tag = svcxdr_savemem(argp, p, argp->taglen);
+ if (!argp->tag)
+ return 0;
+- memcpy(argp->tag, p, argp->taglen);
+ max_reply += xdr_align_size(argp->taglen);
+ }
+
+@@ -4756,6 +4766,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
+ resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
+ if (nfserr)
+ return nfserr;
++ xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
+
+ tmp = htonl(NFS4_CONTENT_DATA);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
+@@ -4763,6 +4774,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8);
+ tmp = htonl(*maxcount);
+ write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4);
++
++ tmp = xdr_zero;
++ write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
++ xdr_pad_size(*maxcount));
+ return nfs_ok;
+ }
+
+@@ -4855,14 +4870,15 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
+ if (nfserr && segments == 0)
+ xdr_truncate_encode(xdr, starting_len);
+ else {
+- tmp = htonl(eof);
+- write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
+- tmp = htonl(segments);
+- write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ if (nfserr) {
+ xdr_truncate_encode(xdr, last_segment);
+ nfserr = nfs_ok;
++ eof = 0;
+ }
++ tmp = htonl(eof);
++ write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4);
++ tmp = htonl(segments);
++ write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ }
+
+ return nfserr;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 00384c332f9b..f9c9f4c63cc7 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -33,12 +33,6 @@
+
+ #define NFSDDBG_FACILITY NFSDDBG_SVC
+
+-bool inter_copy_offload_enable;
+-EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
+-module_param(inter_copy_offload_enable, bool, 0644);
+-MODULE_PARM_DESC(inter_copy_offload_enable,
+- "Enable inter server to server copy offload. Default: false");
+-
+ extern struct svc_program nfsd_program;
+ static int nfsd(void *vrqstp);
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index a60ff5ce1a37..c300885ae75d 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -568,7 +568,6 @@ struct nfsd4_copy {
+ struct nfs_fh c_fh;
+ nfs4_stateid stateid;
+ };
+-extern bool inter_copy_offload_enable;
+
+ struct nfsd4_seek {
+ /* request */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 763b816ba19c..4c55e3aa7e95 100644
--- a/include/linux/efi.h
@@ -2947,6 +4139,32 @@ index 000000000000..4050b6dead75
+ name ? name : "kernel");
+}
+EXPORT_SYMBOL(mark_driver_unsupported);
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index d5a19413d4f8..c1a62ae7e812 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -538,7 +538,7 @@ config KPROBE_EVENTS
+ config KPROBE_EVENTS_ON_NOTRACE
+ bool "Do NOT protect notrace function from kprobe events"
+ depends on KPROBE_EVENTS
+- depends on KPROBES_ON_FTRACE
++ depends on DYNAMIC_FTRACE
+ default n
+ help
+ This is only for the developers who want to debug ftrace itself
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 9c31f42245e9..e6fba1798771 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -434,7 +434,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
+ return 0;
+ }
+
+-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
++#if defined(CONFIG_DYNAMIC_FTRACE) && \
+ !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
+ static bool __within_notrace_func(unsigned long addr)
+ {
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c0014d3b91c1..c00e9820412a 100644
--- a/mm/kmemleak.c
@@ -2963,6 +4181,123 @@ index c0014d3b91c1..c00e9820412a 100644
kmemleak_initialized = 1;
debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
+diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
+index 010dcb876f9d..6e4dbd577a39 100644
+--- a/net/sunrpc/addr.c
++++ b/net/sunrpc/addr.c
+@@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
+ scope_id = dev->ifindex;
+ dev_put(dev);
+ } else {
+- if (kstrtou32(p, 10, &scope_id) == 0) {
++ if (kstrtou32(p, 10, &scope_id) != 0) {
+ kfree(p);
+ return 0;
+ }
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index b248f2349437..c9766d07eb81 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1062,6 +1062,90 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ return 0; /* record not complete */
+ }
+
++static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
++ int flags)
++{
++ return kernel_sendpage(sock, virt_to_page(vec->iov_base),
++ offset_in_page(vec->iov_base),
++ vec->iov_len, flags);
++}
++
++/*
++ * kernel_sendpage() is used exclusively to reduce the number of
++ * copy operations in this path. Therefore the caller must ensure
++ * that the pages backing @xdr are unchanging.
++ *
++ * In addition, the logic assumes that * .bv_len is never larger
++ * than PAGE_SIZE.
++ */
++static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
++ struct xdr_buf *xdr, rpc_fraghdr marker,
++ unsigned int *sentp)
++{
++ const struct kvec *head = xdr->head;
++ const struct kvec *tail = xdr->tail;
++ struct kvec rm = {
++ .iov_base = &marker,
++ .iov_len = sizeof(marker),
++ };
++ int flags, ret;
++
++ *sentp = 0;
++ xdr_alloc_bvec(xdr, GFP_KERNEL);
++
++ msg->msg_flags = MSG_MORE;
++ ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != rm.iov_len)
++ return -EAGAIN;
++
++ flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
++ ret = svc_tcp_send_kvec(sock, head, flags);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != head->iov_len)
++ goto out;
++
++ if (xdr->page_len) {
++ unsigned int offset, len, remaining;
++ struct bio_vec *bvec;
++
++ bvec = xdr->bvec;
++ offset = xdr->page_base;
++ remaining = xdr->page_len;
++ flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
++ while (remaining > 0) {
++ if (remaining <= PAGE_SIZE && tail->iov_len == 0)
++ flags = 0;
++ len = min(remaining, bvec->bv_len);
++ ret = kernel_sendpage(sock, bvec->bv_page,
++ bvec->bv_offset + offset,
++ len, flags);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ if (ret != len)
++ goto out;
++ remaining -= len;
++ offset = 0;
++ bvec++;
++ }
++ }
++
++ if (tail->iov_len) {
++ ret = svc_tcp_send_kvec(sock, tail, 0);
++ if (ret < 0)
++ return ret;
++ *sentp += ret;
++ }
++
++out:
++ return 0;
++}
++
+ /**
+ * svc_tcp_sendto - Send out a reply on a TCP socket
+ * @rqstp: completed svc_rqst
+@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ mutex_lock(&xprt->xpt_mutex);
+ if (svc_xprt_is_dead(xprt))
+ goto out_notconn;
+- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
++ err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
+ xdr_free_bvec(xdr);
+ trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
+ if (err < 0 || sent != (xdr->len + sizeof(marker)))
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d6c81657d695..c70e5904b7bc 100644
--- a/scripts/mod/modpost.c