commit: d1e7752942b93dc59f20bbe08a33a697ad6567e0 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed Dec 8 12:55:17 2021 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed Dec 8 12:55:17 2021 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d1e77529
Linux patch 4.19.220 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1219_linux-4.19.220.patch | 1748 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1752 insertions(+) diff --git a/0000_README b/0000_README index c2e992ef..ec86fab5 100644 --- a/0000_README +++ b/0000_README @@ -915,6 +915,10 @@ Patch: 1218_linux-4.19.219.patch From: https://www.kernel.org Desc: Linux 4.19.219 +Patch: 1219_linux-4.19.220.patch +From: https://www.kernel.org +Desc: Linux 4.19.220 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1219_linux-4.19.220.patch b/1219_linux-4.19.220.patch new file mode 100644 index 00000000..eb91993c --- /dev/null +++ b/1219_linux-4.19.220.patch @@ -0,0 +1,1748 @@ +diff --git a/Makefile b/Makefile +index 310cc8508b9e8..f243688468d53 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 219 ++SUBLEVEL = 220 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile +index 253d7ca714724..55e6e60f984b2 100644 +--- a/arch/parisc/Makefile ++++ b/arch/parisc/Makefile +@@ -17,7 +17,12 @@ + # Mike Shaver, Helge Deller and Martin K. Petersen + # + ++ifdef CONFIG_PARISC_SELF_EXTRACT ++boot := arch/parisc/boot ++KBUILD_IMAGE := $(boot)/bzImage ++else + KBUILD_IMAGE := vmlinuz ++endif + + KBUILD_DEFCONFIG := default_defconfig + +diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh +index 6f68784fea25f..a8c49815f58c8 100644 +--- a/arch/parisc/install.sh ++++ b/arch/parisc/install.sh +@@ -39,6 +39,7 @@ verify "$3" + if [ -n "${INSTALLKERNEL}" ]; then + if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi + if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi ++ if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi + fi + + # Default install +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c +index a1e772f909cbf..a923414d3745a 100644 +--- a/arch/parisc/kernel/time.c ++++ b/arch/parisc/kernel/time.c +@@ -245,27 +245,13 @@ void __init time_init(void) + static int __init init_cr16_clocksource(void) + { + /* +- * The cr16 interval timers are not syncronized across CPUs on +- * different sockets, so mark them unstable and lower rating on +- * multi-socket SMP systems. ++ * The cr16 interval timers are not syncronized across CPUs, even if ++ * they share the same socket. + */ + if (num_online_cpus() > 1 && !running_on_qemu) { +- int cpu; +- unsigned long cpu0_loc; +- cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; +- +- for_each_online_cpu(cpu) { +- if (cpu == 0) +- continue; +- if ((cpu0_loc != 0) && +- (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) +- continue; +- +- clocksource_cr16.name = "cr16_unstable"; +- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; +- clocksource_cr16.rating = 0; +- break; +- } ++ clocksource_cr16.name = "cr16_unstable"; ++ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; ++ clocksource_cr16.rating = 0; + } + + /* XXX: We may want to mark sched_clock stable here if cr16 clocks are +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c +index e8bfd29bb1f9f..098794fc5dc81 100644 +--- a/arch/s390/kernel/setup.c ++++ b/arch/s390/kernel/setup.c +@@ -703,9 +703,6 @@ static void __init setup_memory(void) + storage_key_init_range(reg->base, reg->base + reg->size); + } + psw_set_key(PAGE_DEFAULT_KEY); +- +- /* Only cosmetics */ +- memblock_enforce_memory_limit(memblock_end_of_DRAM()); + } + + /* +diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c +index 47d0979468727..7bfb9af57e4c2 100644 +--- a/arch/x86/realmode/init.c ++++ b/arch/x86/realmode/init.c +@@ -55,6 +55,7 @@ static void __init setup_real_mode(void) + #ifdef CONFIG_X86_64 + u64 *trampoline_pgd; + u64 efer; ++ int i; + #endif + + base = (unsigned char *)real_mode_header; +@@ -113,8 +114,17 @@ static void __init setup_real_mode(void) + trampoline_header->flags |= TH_FLAGS_SME_ACTIVE; + + trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); ++ ++ /* Map the real mode stub as virtual == physical */ + trampoline_pgd[0] = trampoline_pgd_entry.pgd; +- trampoline_pgd[511] = init_top_pgt[511].pgd; ++ ++ /* ++ * Include the entirety of the kernel mapping into the trampoline ++ * PGD. This way, all mappings present in the normal kernel page ++ * tables are usable while running on trampoline_pgd. ++ */ ++ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) ++ trampoline_pgd[i] = init_top_pgt[i].pgd; + #endif + } + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index 8df0ec85cc7b9..505920d4530f8 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -436,6 +436,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { + /* AMD */ + { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ + { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ ++ { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ + /* AMD is using RAID class only for ahci controllers */ + { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c +index 8b3be0ff91cb4..0dce18838296d 100644 +--- a/drivers/ata/sata_fsl.c ++++ b/drivers/ata/sata_fsl.c +@@ -1399,6 +1399,14 @@ static int sata_fsl_init_controller(struct ata_host *host) + return 0; + } + ++static void sata_fsl_host_stop(struct ata_host *host) ++{ ++ struct sata_fsl_host_priv *host_priv = host->private_data; ++ ++ iounmap(host_priv->hcr_base); ++ kfree(host_priv); ++} ++ + /* + * scsi mid-layer and libata interface structures + */ +@@ -1431,6 +1439,8 @@ static struct ata_port_operations sata_fsl_ops = { + .port_start = sata_fsl_port_start, + .port_stop = sata_fsl_port_stop, + ++ .host_stop = sata_fsl_host_stop, ++ + .pmp_attach = sata_fsl_pmp_attach, + .pmp_detach = sata_fsl_pmp_detach, + }; +@@ -1485,9 +1495,9 @@ static int sata_fsl_probe(struct platform_device *ofdev) + host_priv->ssr_base = ssr_base; + host_priv->csr_base = csr_base; + +- irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); +- if (!irq) { +- dev_err(&ofdev->dev, "invalid irq from platform\n"); ++ irq = platform_get_irq(ofdev, 0); ++ if (irq < 0) { ++ retval = irq; + goto error_exit_with_cleanup; + } + host_priv->irq = irq; +@@ -1562,10 +1572,6 @@ static int sata_fsl_remove(struct platform_device *ofdev) + + ata_host_detach(host); + +- irq_dispose_mapping(host_priv->irq); +- iounmap(host_priv->hcr_base); +- kfree(host_priv); +- + return 0; + } + +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index cb85516e1eb3c..48929df7673b1 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -219,6 +219,8 @@ struct ipmi_user { + struct work_struct remove_work; + }; + ++static struct workqueue_struct *remove_work_wq; ++ + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) + __acquires(user->release_barrier) + { +@@ -1207,7 +1209,7 @@ static void free_user(struct kref *ref) + struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); + + /* SRCU cleanup must happen in task context. */ +- schedule_work(&user->remove_work); ++ queue_work(remove_work_wq, &user->remove_work); + } + + static void _ipmi_destroy_user(struct ipmi_user *user) +@@ -5090,6 +5092,13 @@ static int ipmi_init_msghandler(void) + + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + ++ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); ++ if (!remove_work_wq) { ++ pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); ++ rv = -ENOMEM; ++ goto out; ++ } ++ + initialized = true; + + out: +@@ -5115,6 +5124,8 @@ static void __exit cleanup_ipmi(void) + int count; + + if (initialized) { ++ destroy_workqueue(remove_work_wq); ++ + atomic_notifier_chain_unregister(&panic_notifier_list, + &panic_block); + +diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c +index 989465e5f9d91..a3666267d5f6d 100644 +--- a/drivers/gpu/drm/msm/msm_debugfs.c ++++ b/drivers/gpu/drm/msm/msm_debugfs.c +@@ -83,6 +83,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) + goto free_priv; + + pm_runtime_get_sync(&gpu->pdev->dev); ++ msm_gpu_hw_init(gpu); + show_priv->state = gpu->funcs->gpu_state_get(gpu); + pm_runtime_put_sync(&gpu->pdev->dev); + +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c +index 6feafebf85feb..e352c16087bb3 100644 +--- a/drivers/i2c/busses/i2c-stm32f7.c ++++ b/drivers/i2c/busses/i2c-stm32f7.c +@@ -1379,6 +1379,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) + { + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; ++ struct stm32_i2c_dma *dma = i2c_dev->dma; + void __iomem *base = i2c_dev->base; + u32 status, mask; + int ret = IRQ_HANDLED; +@@ -1403,6 +1404,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) + if (status & STM32F7_I2C_ISR_NACKF) { + dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); ++ if (i2c_dev->use_dma) { ++ stm32f7_i2c_disable_dma_req(i2c_dev); ++ dmaengine_terminate_all(dma->chan_using); ++ } + f7_msg->result = -ENXIO; + } + +@@ -1418,7 +1423,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) + /* Clear STOP flag */ + writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); + +- if (i2c_dev->use_dma) { ++ if (i2c_dev->use_dma && !f7_msg->result) { + ret = IRQ_WAKE_THREAD; + } else { + i2c_dev->master_mode = false; +@@ -1431,7 +1436,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) + if (f7_msg->stop) { + mask = STM32F7_I2C_CR2_STOP; + stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); +- } else if (i2c_dev->use_dma) { ++ } else if (i2c_dev->use_dma && !f7_msg->result) { + ret = IRQ_WAKE_THREAD; + } else if (f7_msg->smbus) { + stm32f7_i2c_smbus_rep_start(i2c_dev); +@@ -1579,6 +1584,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + i2c_dev->msg->addr); + if (i2c_dev->use_dma) + dmaengine_terminate_all(dma->chan_using); ++ stm32f7_i2c_wait_free_bus(i2c_dev); + ret = -ETIMEDOUT; + } + +@@ -1629,6 +1635,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, + dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr); + if (i2c_dev->use_dma) + dmaengine_terminate_all(dma->chan_using); ++ stm32f7_i2c_wait_free_bus(i2c_dev); + ret = -ETIMEDOUT; + goto clk_free; + } +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +index 096ec18e8f15a..49c80bac9ce28 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +@@ -459,6 +459,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + goto err_exit; + + if (fw.len == 0xFFFFU) { ++ if (sw.len > sizeof(self->rpc)) { ++ printk(KERN_INFO "Invalid sw len: %x\n", sw.len); ++ err = -EINVAL; ++ goto err_exit; ++ } + err = hw_atl_utils_fw_rpc_call(self, sw.len); + if (err < 0) + goto err_exit; +@@ -469,6 +474,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + + if (rpc) { + if (fw.len) { ++ if (fw.len > sizeof(self->rpc)) { ++ printk(KERN_INFO "Invalid fw len: %x\n", fw.len); ++ err = -EINVAL; ++ goto err_exit; ++ } + err = + hw_atl_utils_fw_downld_dwords(self, + self->rpc_addr, +diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c +index c813e6f2b371e..c97fc0e384ca6 100644 +--- a/drivers/net/ethernet/dec/tulip/de4x5.c ++++ b/drivers/net/ethernet/dec/tulip/de4x5.c +@@ -4708,6 +4708,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) + lp->ibn = 3; + lp->active = *p++; + if (MOTO_SROM_BUG) lp->active = 0; ++ /* if (MOTO_SROM_BUG) statement indicates lp->active could ++ * be 8 (i.e. the size of array lp->phy) */ ++ if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) ++ return -EINVAL; + lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); + lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); + lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; +@@ -4999,19 +5003,23 @@ mii_get_phy(struct net_device *dev) + } + if ((j == limit) && (i < DE4X5_MAX_MII)) { + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); +- lp->phy[k].addr = i; +- lp->phy[k].id = id; +- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ +- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ +- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ +- lp->mii_cnt++; +- lp->active++; +- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); +- j = de4x5_debug; +- de4x5_debug |= DEBUG_MII; +- de4x5_dbg_mii(dev, k); +- de4x5_debug = j; +- printk("\n"); ++ if (k < DE4X5_MAX_PHY) { ++ lp->phy[k].addr = i; ++ lp->phy[k].id = id; ++ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ ++ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ ++ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ ++ lp->mii_cnt++; ++ lp->active++; ++ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); ++ j = de4x5_debug; ++ de4x5_debug |= DEBUG_MII; ++ de4x5_dbg_mii(dev, k); ++ de4x5_debug = j; ++ printk("\n"); ++ } else { ++ goto purgatory; ++ } + } + } + purgatory: +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +index 16294cd3c9545..4ceacb1c154dc 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +@@ -402,6 +402,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + return; + + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { ++ /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8. ++ We need check to prevent array overflow */ ++ if (port >= DSAF_MAX_PORT_NUM) ++ return; + reg_val_1 = 0x1 << port; + port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; + /* there is difference between V1 and V2 in register.*/ +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index 943bd189e895e..14d46afd21cb5 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -2282,9 +2282,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, + bool carry_xdp_prog) + { + struct bpf_prog *xdp_prog; +- int i, t; ++ int i, t, ret; + +- mlx4_en_copy_priv(tmp, priv, prof); ++ ret = mlx4_en_copy_priv(tmp, priv, prof); ++ if (ret) { ++ en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n", ++ __func__); ++ return ret; ++ } + + if (mlx4_en_alloc_resources(tmp)) { + en_warn(priv, +diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c +index 44171d7bb434c..5117864738a7e 100644 +--- a/drivers/net/ethernet/natsemi/xtsonic.c ++++ b/drivers/net/ethernet/natsemi/xtsonic.c +@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = { + .ndo_set_mac_address = eth_mac_addr, + }; + +-static int __init sonic_probe1(struct net_device *dev) ++static int sonic_probe1(struct net_device *dev) + { + unsigned int silicon_revision; + struct sonic_local *lp = netdev_priv(dev); +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +index a15845e511b2c..d857d44547a53 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +@@ -1079,8 +1079,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) + sds_mbx_size = sizeof(struct qlcnic_sds_mbx); + context_id = recv_ctx->context_id; + num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; +- ahw->hw_ops->alloc_mbx_args(&cmd, adapter, +- QLCNIC_CMD_ADD_RCV_RINGS); ++ err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, ++ QLCNIC_CMD_ADD_RCV_RINGS); ++ if (err) { ++ dev_err(&adapter->pdev->dev, ++ "Failed to alloc mbx args %d\n", err); ++ return err; ++ } ++ + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); + + /* set up status rings, mbx 2-81 */ +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 75984c799afc7..b328207c04555 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -2152,7 +2152,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) + if (dev->domain_data.phyirq > 0) + phydev->irq = dev->domain_data.phyirq; + else +- phydev->irq = 0; ++ phydev->irq = PHY_POLL; + netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); + + /* set to AUTOMDIX */ +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c +index 93899a7be9c57..857b6f80a58d8 100644 +--- a/drivers/net/vrf.c ++++ b/drivers/net/vrf.c +@@ -210,6 +210,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, + /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb)); + ++ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + ret = vrf_ip6_local_out(net, skb->sk, skb); + if (unlikely(net_xmit_eval(ret))) + dev->stats.tx_errors++; +@@ -291,6 +292,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, + RT_SCOPE_LINK); + } + ++ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); + if (unlikely(net_xmit_eval(ret))) + vrf_dev->stats.tx_errors++; +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index fa8bcbe3d2762..912ce5cb2f084 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -1198,15 +1198,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk) + return status; + } + +-/* Query FW and update rfkill sw state for all rfkill switches */ +-static void tpacpi_rfk_update_swstate_all(void) +-{ +- unsigned int i; +- +- for (i = 0; i < TPACPI_RFK_SW_MAX; i++) +- tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]); +-} +- + /* + * Sync the HW-blocking state of all rfkill switches, + * do notice it causes the rfkill core to schedule uevents +@@ -3145,9 +3136,6 @@ static void tpacpi_send_radiosw_update(void) + if (wlsw == TPACPI_RFK_RADIO_OFF) + tpacpi_rfk_update_hwblock_state(true); + +- /* Sync sw blocking state */ +- tpacpi_rfk_update_swstate_all(); +- + /* Sync hw blocking state last if it is hw-unblocked */ + if (wlsw == TPACPI_RFK_RADIO_ON) + tpacpi_rfk_update_hwblock_state(false); +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index c06e648a415b5..79581771e6f61 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -1892,12 +1892,12 @@ static void session_recovery_timedout(struct work_struct *work) + } + spin_unlock_irqrestore(&session->lock, flags); + +- if (session->transport->session_recovery_timedout) +- session->transport->session_recovery_timedout(session); +- + ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); + ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); ++ ++ if (session->transport->session_recovery_timedout) ++ session->transport->session_recovery_timedout(session); + } + + static void __iscsi_unblock_session(struct work_struct *work) +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c +index ae60599c462b9..6c7825c581b5f 100644 +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -454,6 +454,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) + { + struct thermal_instance *pos; + tz->temperature = THERMAL_TEMP_INVALID; ++ tz->prev_low_trip = -INT_MAX; ++ tz->prev_high_trip = INT_MAX; + list_for_each_entry(pos, &tz->thermal_instances, tz_node) + pos->initialized = false; + } +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index 1306ce5c5d9bf..3d63e9a71c376 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2773,6 +2773,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); + + static const struct acpi_device_id sbsa_uart_acpi_match[] = { + { "ARMH0011", 0 }, ++ { "ARMHB000", 0 }, + {}, + }; + MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c +index 4caeca67fd143..a40827956bd15 100644 +--- a/drivers/tty/serial/msm_serial.c ++++ b/drivers/tty/serial/msm_serial.c +@@ -603,6 +603,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port) + u32 val; + int ret; + ++ if (IS_ENABLED(CONFIG_CONSOLE_POLL)) ++ return; ++ + if (!dma->chan) + return; + +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 5ed1327d284f9..63aefe7e91be1 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -1527,6 +1527,7 @@ static void uart_tty_port_shutdown(struct tty_port *port) + { + struct uart_state *state = container_of(port, struct uart_state, port); + struct uart_port *uport = uart_port_check(state); ++ char *buf; + + /* + * At this point, we stop accepting input. To do this, we +@@ -1548,8 +1549,18 @@ static void uart_tty_port_shutdown(struct tty_port *port) + */ + tty_port_set_suspended(port, 0); + +- uart_change_pm(state, UART_PM_STATE_OFF); ++ /* ++ * Free the transmit buffer. ++ */ ++ spin_lock_irq(&uport->lock); ++ buf = state->xmit.buf; ++ state->xmit.buf = NULL; ++ spin_unlock_irq(&uport->lock); + ++ if (buf) ++ free_page((unsigned long)buf); ++ ++ uart_change_pm(state, UART_PM_STATE_OFF); + } + + static void uart_wait_until_sent(struct tty_struct *tty, int timeout) +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index d97544fd339b1..e170c5b4d6f0c 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x1532, 0x0116), .driver_info = + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + ++ /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */ ++ { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */ + { USB_DEVICE(0x17ef, 0xa012), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 04c04328d075f..15e3bf8c9e830 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -339,7 +339,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, + /* Must be called with xhci->lock held, releases and aquires lock back */ + static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) + { +- u32 temp_32; ++ struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; ++ union xhci_trb *new_deq = xhci->cmd_ring->dequeue; ++ u64 crcr; + int ret; + + xhci_dbg(xhci, "Abort command ring\n"); +@@ -348,13 +350,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) + + /* + * The control bits like command stop, abort are located in lower +- * dword of the command ring control register. Limit the write +- * to the lower dword to avoid corrupting the command ring pointer +- * in case if the command ring is stopped by the time upper dword +- * is written. ++ * dword of the command ring control register. ++ * Some controllers require all 64 bits to be written to abort the ring. ++ * Make sure the upper dword is valid, pointing to the next command, ++ * avoiding corrupting the command ring pointer in case the command ring ++ * is stopped by the time the upper dword is written. + */ +- temp_32 = readl(&xhci->op_regs->cmd_ring); +- writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); ++ next_trb(xhci, NULL, &new_seg, &new_deq); ++ if (trb_is_link(new_deq)) ++ next_trb(xhci, NULL, &new_seg, &new_deq); ++ ++ crcr = xhci_trb_virt_to_dma(new_seg, new_deq); ++ xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); + + /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the + * completion of the Command Abort operation. If CRR is not negated in 5 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c +index af41d4dce3adb..228d88c7bdb26 100644 +--- a/drivers/usb/typec/tcpm.c ++++ b/drivers/usb/typec/tcpm.c +@@ -3098,11 +3098,7 @@ static void run_state_machine(struct tcpm_port *port) + tcpm_try_src(port) ? SRC_TRY + : SNK_ATTACHED, + 0); +- else +- /* Wait for VBUS, but not forever */ +- tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON); + break; +- + case SRC_TRY: + port->try_src_count++; + tcpm_set_cc(port, tcpm_rp_cc(port)); +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index a992d922b3a71..23f15f42e5cb6 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -370,11 +370,17 @@ static void vgacon_init(struct vc_data *c, int init) + struct uni_pagedir *p; + + /* +- * We cannot be loaded as a module, therefore init is always 1, +- * but vgacon_init can be called more than once, and init will +- * not be 1. ++ * We cannot be loaded as a module, therefore init will be 1 ++ * if we are the default console, however if we are a fallback ++ * console, for example if fbcon has failed registration, then ++ * init will be 0, so we need to make sure our boot parameters ++ * have been copied to the console structure for vgacon_resize ++ * ultimately called by vc_resize. Any subsequent calls to ++ * vgacon_init init will have init set to 0 too. + */ + c->vc_can_do_color = vga_can_do_color; ++ c->vc_scan_lines = vga_scan_lines; ++ c->vc_font.height = c->vc_cell_height = vga_video_font_height; + + /* set dimensions manually if init != 0 since vc_resize() will fail */ + if (init) { +@@ -383,8 +389,6 @@ static void vgacon_init(struct vc_data *c, int init) + } else + vc_resize(c, vga_video_num_columns, vga_video_num_lines); + +- c->vc_scan_lines = vga_scan_lines; +- c->vc_font.height = c->vc_cell_height = vga_video_font_height; + c->vc_complement_mask = 0x7700; + if (vga_512_chars) + c->vc_hi_font_mask = 0x0800; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index c326535d5a80a..2ac920bdf4df5 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3579,11 +3579,23 @@ static void btrfs_end_empty_barrier(struct bio *bio) + */ + static void write_dev_flush(struct btrfs_device *device) + { +- struct request_queue *q = bdev_get_queue(device->bdev); + struct bio *bio = device->flush_bio; + ++#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY ++ /* ++ * When a disk has write caching disabled, we skip submission of a bio ++ * with flush and sync requests before writing the superblock, since ++ * it's not needed. However when the integrity checker is enabled, this ++ * results in reports that there are metadata blocks referred by a ++ * superblock that were not properly flushed. So don't skip the bio ++ * submission only when the integrity checker is enabled for the sake ++ * of simplicity, since this is a debug tool and not meant for use in ++ * non-debug builds. ++ */ ++ struct request_queue *q = bdev_get_queue(device->bdev); + if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) + return; ++#endif + + bio_reset(bio); + bio->bi_end_io = btrfs_end_empty_barrier; +diff --git a/fs/file.c b/fs/file.c +index 3762a3f136fda..9a6a3bba53af1 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -677,7 +677,7 @@ void do_close_on_exec(struct files_struct *files) + spin_unlock(&files->file_lock); + } + +-static struct file *__fget(unsigned int fd, fmode_t mask) ++static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs) + { + struct files_struct *files = current->files; + struct file *file; +@@ -692,23 +692,32 @@ loop: + */ + if (file->f_mode & mask) + file = NULL; +- else if (!get_file_rcu(file)) ++ else if (!get_file_rcu_many(file, refs)) + goto loop; ++ else if (__fcheck_files(files, fd) != file) { ++ fput_many(file, refs); ++ goto loop; ++ } + } + rcu_read_unlock(); + + return file; + } + ++struct file *fget_many(unsigned int fd, unsigned int refs) ++{ ++ return __fget(fd, FMODE_PATH, refs); ++} ++ + struct file *fget(unsigned int fd) + { +- return __fget(fd, FMODE_PATH); ++ return __fget(fd, FMODE_PATH, 1); + } + EXPORT_SYMBOL(fget); + + struct file *fget_raw(unsigned int fd) + { +- return __fget(fd, 0); ++ return __fget(fd, 0, 1); + } + EXPORT_SYMBOL(fget_raw); + +@@ -739,7 +748,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask) + return 0; + return (unsigned long)file; + } else { +- file = __fget(fd, mask); ++ file = __fget(fd, mask, 1); + if (!file) + return 0; + return FDPUT_FPUT | (unsigned long)file; +diff --git a/fs/file_table.c b/fs/file_table.c +index e49af4caf15d9..6a715639728da 100644 +--- a/fs/file_table.c ++++ b/fs/file_table.c +@@ -326,9 +326,9 @@ void flush_delayed_fput(void) + + static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); + +-void fput(struct file *file) ++void fput_many(struct file *file, unsigned int refs) + { +- if (atomic_long_dec_and_test(&file->f_count)) { ++ if (atomic_long_sub_and_test(refs, &file->f_count)) { + struct task_struct *task = current; + + if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { +@@ -347,6 +347,11 @@ void fput(struct file *file) + } + } + ++void fput(struct file *file) ++{ ++ fput_many(file, 1); ++} ++ + /* + * synchronous analog of fput(); for kernel threads that might be needed + * in some umount() (and thus can't use flush_delayed_fput() without +diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c +index 43f53020553b5..53ba5019ad063 100644 +--- a/fs/gfs2/bmap.c ++++ b/fs/gfs2/bmap.c +@@ -943,7 +943,7 @@ do_alloc: + else if (height == ip->i_height) + ret = gfs2_hole_size(inode, lblock, len, mp, iomap); + else +- iomap->length = size - pos; ++ iomap->length = size - iomap->offset; + } else if (flags & IOMAP_WRITE) { + u64 alloc_size; + +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c +index be252795a6f70..5b1d452e640bc 100644 +--- a/fs/nfs/nfs42proc.c ++++ b/fs/nfs/nfs42proc.c +@@ -295,8 +295,9 @@ static ssize_t _nfs42_proc_copy(struct file *src, + goto out; + } + +- truncate_pagecache_range(dst_inode, pos_dst, +- pos_dst + res->write_res.count); ++ WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping, ++ pos_dst >> PAGE_SHIFT, ++ (pos_dst + res->write_res.count - 1) >> PAGE_SHIFT)); + + status = res->write_res.count; + out: +diff --git a/include/linux/file.h b/include/linux/file.h +index 6b2fb032416ca..3fcddff56bc4b 100644 +--- a/include/linux/file.h ++++ b/include/linux/file.h +@@ -13,6 +13,7 @@ + struct file; + + extern void fput(struct file *); ++extern void fput_many(struct file *, unsigned int); + + struct file_operations; + struct vfsmount; +@@ -44,6 +45,7 @@ static inline void fdput(struct fd fd) + } + + extern struct file *fget(unsigned int fd); ++extern struct file *fget_many(unsigned int fd, unsigned int refs); + extern struct file *fget_raw(unsigned int fd); + extern unsigned long __fdget(unsigned int fd); + extern unsigned long __fdget_raw(unsigned int fd); +diff --git a/include/linux/fs.h b/include/linux/fs.h +index b6a955ba6173a..86f884e78b6b9 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -942,7 +942,9 @@ static inline struct file *get_file(struct file *f) + atomic_long_inc(&f->f_count); + return f; + } +-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) ++#define get_file_rcu_many(x, cnt) \ ++ atomic_long_add_unless(&(x)->f_count, (cnt), 0) ++#define get_file_rcu(x) get_file_rcu_many((x), 1) + #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) + #define file_count(x) atomic_long_read(&(x)->f_count) + +diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h +index 6ab8c1bada3fc..36ffbe330abea 100644 +--- a/include/linux/ipc_namespace.h ++++ b/include/linux/ipc_namespace.h +@@ -129,6 +129,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) + return ns; + } + ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) ++{ ++ if (ns) { ++ if (refcount_inc_not_zero(&ns->count)) ++ return ns; ++ } ++ ++ return NULL; ++} ++ + extern void put_ipc_ns(struct ipc_namespace *ns); + #else + static inline struct ipc_namespace *copy_ipcs(unsigned long flags, +@@ -145,6 +155,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) + return ns; + } + ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) ++{ ++ return ns; ++} ++ + static inline void put_ipc_ns(struct ipc_namespace *ns) + { + } +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index c28204e22b544..304e7a0f6b165 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -168,6 +168,8 @@ struct kretprobe { + raw_spinlock_t lock; + }; + ++#define KRETPROBE_MAX_DATA_SIZE 4096 ++ + struct kretprobe_instance { + struct hlist_node hlist; + struct kretprobe *rp; +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index fbd689c15974e..50ab7c8fd3090 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -3840,7 +3840,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) + static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) + { + spin_lock(&txq->_xmit_lock); +- txq->xmit_lock_owner = cpu; ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, cpu); + } + + static inline bool __netif_tx_acquire(struct netdev_queue *txq) +@@ -3857,26 +3858,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) + static inline void __netif_tx_lock_bh(struct netdev_queue *txq) + { + spin_lock_bh(&txq->_xmit_lock); +- txq->xmit_lock_owner = smp_processor_id(); ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + } + + static inline bool __netif_tx_trylock(struct netdev_queue *txq) + { + bool ok = spin_trylock(&txq->_xmit_lock); +- if (likely(ok)) +- txq->xmit_lock_owner = smp_processor_id(); ++ ++ if (likely(ok)) { ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); ++ } + return ok; + } + + static inline void __netif_tx_unlock(struct netdev_queue *txq) + { +- txq->xmit_lock_owner = -1; ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, -1); + spin_unlock(&txq->_xmit_lock); + } + + static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) + { +- txq->xmit_lock_owner = -1; ++ /* Pairs with READ_ONCE() in __dev_queue_xmit() */ ++ WRITE_ONCE(txq->xmit_lock_owner, -1); + spin_unlock_bh(&txq->_xmit_lock); + } + +diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h +index b27da9f164cbd..c86fcad23fc21 100644 +--- a/include/linux/of_clk.h ++++ b/include/linux/of_clk.h +@@ -6,6 +6,9 @@ + #ifndef __LINUX_OF_CLK_H + #define __LINUX_OF_CLK_H + ++struct device_node; ++struct of_device_id; ++ + #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF) + + unsigned int of_clk_get_parent_count(struct device_node *np); +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h +index 44c6f15800ff5..91401309b1aa2 100644 +--- a/include/linux/sched/task.h ++++ b/include/linux/sched/task.h +@@ -136,7 +136,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and +- * ->cgroup.subsys[]. And ->vfork_done. ++ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), +diff --git a/include/linux/siphash.h b/include/linux/siphash.h +index bf21591a9e5e6..0cda61855d907 100644 +--- a/include/linux/siphash.h ++++ b/include/linux/siphash.h +@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key) + } + + u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +-#endif + + u64 siphash_1u64(const u64 a, const siphash_key_t *key); + u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, + static inline u64 siphash(const void *data, size_t len, + const siphash_key_t *key) + { +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) ++ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || ++ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + return __siphash_unaligned(data, len, key); +-#endif + return ___siphash_aligned(data, len, key); + } + +@@ -96,10 +93,8 @@ typedef struct { + + u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key); +-#endif + + u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); + u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, + static inline u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) + { +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) ++ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || ++ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + return __hsiphash_unaligned(data, len, key); +-#endif + return ___hsiphash_aligned(data, len, key); + } + +diff --git a/ipc/shm.c b/ipc/shm.c +index 1c65fb357395e..ba99f48c6e2bc 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */ + struct pid *shm_lprid; + struct user_struct *mlock_user; + +- /* The task created the shm object. NULL if the task is dead. */ ++ /* ++ * The task created the shm object, for ++ * task_lock(shp->shm_creator) ++ */ + struct task_struct *shm_creator; +- struct list_head shm_clist; /* list by creator */ ++ ++ /* ++ * List by creator. task_lock(->shm_creator) required for read/write. ++ * If list_empty(), then the creator is dead already. ++ */ ++ struct list_head shm_clist; ++ struct ipc_namespace *ns; + } __randomize_layout; + + /* shm_mode upper byte flags */ +@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) + struct shmid_kernel *shp; + + shp = container_of(ipcp, struct shmid_kernel, shm_perm); ++ WARN_ON(ns != shp->ns); + + if (shp->shm_nattch) { + shp->shm_perm.mode |= SHM_DEST; +@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head) + kvfree(shp); + } + +-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) ++/* ++ * It has to be called with shp locked. ++ * It must be called before ipc_rmid() ++ */ ++static inline void shm_clist_rm(struct shmid_kernel *shp) + { +- list_del(&s->shm_clist); +- ipc_rmid(&shm_ids(ns), &s->shm_perm); ++ struct task_struct *creator; ++ ++ /* ensure that shm_creator does not disappear */ ++ rcu_read_lock(); ++ ++ /* ++ * A concurrent exit_shm may do a list_del_init() as well. ++ * Just do nothing if exit_shm already did the work ++ */ ++ if (!list_empty(&shp->shm_clist)) { ++ /* ++ * shp->shm_creator is guaranteed to be valid *only* ++ * if shp->shm_clist is not empty. ++ */ ++ creator = shp->shm_creator; ++ ++ task_lock(creator); ++ /* ++ * list_del_init() is a nop if the entry was already removed ++ * from the list. ++ */ ++ list_del_init(&shp->shm_clist); ++ task_unlock(creator); ++ } ++ rcu_read_unlock(); ++} ++ ++static inline void shm_rmid(struct shmid_kernel *s) ++{ ++ shm_clist_rm(s); ++ ipc_rmid(&shm_ids(s->ns), &s->shm_perm); + } + + +@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) + shm_file = shp->shm_file; + shp->shm_file = NULL; + ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; +- shm_rmid(ns, shp); ++ shm_rmid(shp); + shm_unlock(shp); + if (!is_file_hugepages(shm_file)) + shmem_lock(shm_file, 0, shp->mlock_user); +@@ -306,10 +349,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) + * + * 2) sysctl kernel.shm_rmid_forced is set to 1. + */ +-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) ++static bool shm_may_destroy(struct shmid_kernel *shp) + { + return (shp->shm_nattch == 0) && +- (ns->shm_rmid_forced || ++ (shp->ns->shm_rmid_forced || + (shp->shm_perm.mode & SHM_DEST)); + } + +@@ -340,7 +383,7 @@ static void shm_close(struct vm_area_struct *vma) + ipc_update_pid(&shp->shm_lprid, task_tgid(current)); + shp->shm_dtim = ktime_get_real_seconds(); + shp->shm_nattch--; +- if (shm_may_destroy(ns, shp)) ++ if (shm_may_destroy(shp)) + shm_destroy(ns, shp); + else + shm_unlock(shp); +@@ -361,10 +404,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) + * + * As shp->* are changed under rwsem, it's safe to skip shp locking. + */ +- if (shp->shm_creator != NULL) ++ if (!list_empty(&shp->shm_clist)) + return 0; + +- if (shm_may_destroy(ns, shp)) { ++ if (shm_may_destroy(shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); + } +@@ -382,48 +425,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns) + /* Locking assumes this will only be called with task == current */ + void exit_shm(struct task_struct *task) + { +- struct ipc_namespace *ns = task->nsproxy->ipc_ns; +- struct shmid_kernel *shp, *n; ++ for (;;) { ++ struct shmid_kernel *shp; ++ struct ipc_namespace *ns; + +- if (list_empty(&task->sysvshm.shm_clist)) +- return; ++ task_lock(task); ++ ++ if (list_empty(&task->sysvshm.shm_clist)) { ++ task_unlock(task); ++ break; ++ } ++ ++ shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, ++ shm_clist); + +- /* +- * If kernel.shm_rmid_forced is not set then only keep track of +- * which shmids are orphaned, so that a later set of the sysctl +- * can clean them up. +- */ +- if (!ns->shm_rmid_forced) { +- down_read(&shm_ids(ns).rwsem); +- list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) +- shp->shm_creator = NULL; + /* +- * Only under read lock but we are only called on current +- * so no entry on the list will be shared. ++ * 1) Get pointer to the ipc namespace. It is worth to say ++ * that this pointer is guaranteed to be valid because ++ * shp lifetime is always shorter than namespace lifetime ++ * in which shp lives. ++ * We taken task_lock it means that shp won't be freed. + */ +- list_del(&task->sysvshm.shm_clist); +- up_read(&shm_ids(ns).rwsem); +- return; +- } ++ ns = shp->ns; + +- /* +- * Destroy all already created segments, that were not yet mapped, +- * and mark any mapped as orphan to cover the sysctl toggling. +- * Destroy is skipped if shm_may_destroy() returns false. +- */ +- down_write(&shm_ids(ns).rwsem); +- list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { +- shp->shm_creator = NULL; ++ /* ++ * 2) If kernel.shm_rmid_forced is not set then only keep track of ++ * which shmids are orphaned, so that a later set of the sysctl ++ * can clean them up. ++ */ ++ if (!ns->shm_rmid_forced) ++ goto unlink_continue; + +- if (shm_may_destroy(ns, shp)) { +- shm_lock_by_ptr(shp); +- shm_destroy(ns, shp); ++ /* ++ * 3) get a reference to the namespace. ++ * The refcount could be already 0. If it is 0, then ++ * the shm objects will be free by free_ipc_work(). ++ */ ++ ns = get_ipc_ns_not_zero(ns); ++ if (!ns) { ++unlink_continue: ++ list_del_init(&shp->shm_clist); ++ task_unlock(task); ++ continue; + } +- } + +- /* Remove the list head from any segments still attached. */ +- list_del(&task->sysvshm.shm_clist); +- up_write(&shm_ids(ns).rwsem); ++ /* ++ * 4) get a reference to shp. ++ * This cannot fail: shm_clist_rm() is called before ++ * ipc_rmid(), thus the refcount cannot be 0. ++ */ ++ WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); ++ ++ /* ++ * 5) unlink the shm segment from the list of segments ++ * created by current. ++ * This must be done last. After unlinking, ++ * only the refcounts obtained above prevent IPC_RMID ++ * from destroying the segment or the namespace. ++ */ ++ list_del_init(&shp->shm_clist); ++ ++ task_unlock(task); ++ ++ /* ++ * 6) we have all references ++ * Thus lock & if needed destroy shp. ++ */ ++ down_write(&shm_ids(ns).rwsem); ++ shm_lock_by_ptr(shp); ++ /* ++ * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's ++ * safe to call ipc_rcu_putref here ++ */ ++ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); ++ ++ if (ipc_valid_object(&shp->shm_perm)) { ++ if (shm_may_destroy(shp)) ++ shm_destroy(ns, shp); ++ else ++ shm_unlock(shp); ++ } else { ++ /* ++ * Someone else deleted the shp from namespace ++ * idr/kht while we have waited. ++ * Just unlock and continue. ++ */ ++ shm_unlock(shp); ++ } ++ ++ up_write(&shm_ids(ns).rwsem); ++ put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ ++ } + } + + static vm_fault_t shm_fault(struct vm_fault *vmf) +@@ -680,7 +772,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) + if (error < 0) + goto no_id; + ++ shp->ns = ns; ++ ++ task_lock(current); + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); ++ task_unlock(current); + + /* + * shmid gets reported as "inode#" in /proc/pid/maps. +@@ -1549,7 +1645,8 @@ out_nattch: + down_write(&shm_ids(ns).rwsem); + shp = shm_lock(ns, shmid); + shp->shm_nattch--; +- if (shm_may_destroy(ns, shp)) ++ ++ if (shm_may_destroy(shp)) + shm_destroy(ns, shp); + else + shm_unlock(shp); +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index d4435fd6fc8bc..993b84cc1db5a 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -1976,6 +1976,9 @@ int register_kretprobe(struct kretprobe *rp) + } + } + ++ if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) ++ return -E2BIG; ++ + rp->kp.pre_handler = pre_handler_kretprobe; + rp->kp.post_handler = NULL; + rp->kp.fault_handler = NULL; +diff --git a/lib/siphash.c b/lib/siphash.c +index 3ae58b4edad61..e632ee40aac1a 100644 +--- a/lib/siphash.c ++++ b/lib/siphash.c +@@ -49,6 +49,7 @@ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) + { + const u8 *end = data + len - (len % sizeof(u64)); +@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) + POSTAMBLE + } + EXPORT_SYMBOL(__siphash_aligned); ++#endif + +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) + { + const u8 *end = data + len - (len % sizeof(u64)); +@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) + POSTAMBLE + } + EXPORT_SYMBOL(__siphash_unaligned); +-#endif + + /** + * siphash_1u64 - compute 64-bit siphash PRF value of a u64 +@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32); + HSIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) + { + const u8 *end = data + len - (len % sizeof(u64)); +@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) + HPOSTAMBLE + } + EXPORT_SYMBOL(__hsiphash_aligned); ++#endif + +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) + { +@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, + HPOSTAMBLE + } + EXPORT_SYMBOL(__hsiphash_unaligned); +-#endif + + /** + * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 +@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32); + HSIPROUND; \ + return v1 ^ v3; + ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) + { + const u8 *end = data + len - (len % sizeof(u32)); +@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) + HPOSTAMBLE + } + EXPORT_SYMBOL(__hsiphash_aligned); ++#endif + +-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + u32 __hsiphash_unaligned(const void *data, size_t len, + const hsiphash_key_t *key) + { +@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, + HPOSTAMBLE + } + EXPORT_SYMBOL(__hsiphash_unaligned); +-#endif + + /** + * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 +diff --git a/net/core/dev.c b/net/core/dev.c +index 2519a90a14827..42f6ff8b9703c 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3831,7 +3831,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) + if (dev->flags & IFF_UP) { + int cpu = smp_processor_id(); /* ok because BHs are off */ + +- if (txq->xmit_lock_owner != cpu) { ++ /* Other cpus might concurrently change txq->xmit_lock_owner ++ * to -1 or to their cpu id, but not to our id. ++ */ ++ if (READ_ONCE(txq->xmit_lock_owner) != cpu) { + if (dev_xmit_recursion()) + goto recursion_alert; + +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index 12a2cea9d606a..e2ab8cdb71347 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -2354,7 +2354,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, + free: + kfree(t); + out: +- return -ENOBUFS; ++ return -ENOMEM; + } + + static void __devinet_sysctl_unregister(struct net *net, +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c +index 7623d9aec6364..ea1745cb93edb 100644 +--- a/net/mpls/af_mpls.c ++++ b/net/mpls/af_mpls.c +@@ -1438,22 +1438,52 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head) + kfree(mdev); + } + +-static void mpls_ifdown(struct net_device *dev, int event) ++static int mpls_ifdown(struct net_device *dev, int event) + { + struct mpls_route __rcu **platform_label; + struct net *net = dev_net(dev); +- u8 alive, deleted; + unsigned index; + + platform_label = rtnl_dereference(net->mpls.platform_label); + for (index = 0; index < net->mpls.platform_labels; index++) { + struct mpls_route *rt = rtnl_dereference(platform_label[index]); ++ bool nh_del = false; ++ u8 alive = 0; + + if (!rt) + continue; + +- alive = 0; +- deleted = 0; ++ if (event == NETDEV_UNREGISTER) { ++ u8 deleted = 0; ++ ++ for_nexthops(rt) { ++ struct net_device *nh_dev = ++ rtnl_dereference(nh->nh_dev); ++ ++ if (!nh_dev || nh_dev == dev) ++ deleted++; ++ if (nh_dev == dev) ++ nh_del = true; ++ } endfor_nexthops(rt); ++ ++ /* if there are no more nexthops, delete the route */ ++ if (deleted == rt->rt_nhn) { ++ mpls_route_update(net, index, NULL, NULL); ++ continue; ++ } ++ ++ if (nh_del) { ++ size_t size = sizeof(*rt) + rt->rt_nhn * ++ rt->rt_nh_size; ++ struct mpls_route *orig = rt; ++ ++ rt = kmalloc(size, GFP_KERNEL); ++ if (!rt) ++ return -ENOMEM; ++ memcpy(rt, orig, size); ++ } ++ } ++ + change_nexthops(rt) { + unsigned int nh_flags = nh->nh_flags; + +@@ -1477,16 +1507,15 @@ static void mpls_ifdown(struct net_device *dev, int event) + next: + if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))) + alive++; +- if (!rtnl_dereference(nh->nh_dev)) +- deleted++; + } endfor_nexthops(rt); + + WRITE_ONCE(rt->rt_nhn_alive, alive); + +- /* if there are no more nexthops, delete the route */ +- if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn) +- mpls_route_update(net, index, NULL, NULL); ++ if (nh_del) ++ mpls_route_update(net, index, rt, NULL); + } ++ ++ return 0; + } + + static void mpls_ifup(struct net_device *dev, unsigned int flags) +@@ -1554,8 +1583,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, + return NOTIFY_OK; + + switch (event) { ++ int err; ++ + case NETDEV_DOWN: +- mpls_ifdown(dev, event); ++ err = mpls_ifdown(dev, event); ++ if (err) ++ return notifier_from_errno(err); + break; + case NETDEV_UP: + flags = dev_get_flags(dev); +@@ -1566,13 +1599,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, + break; + case NETDEV_CHANGE: + flags = dev_get_flags(dev); +- if (flags & (IFF_RUNNING | IFF_LOWER_UP)) ++ if (flags & (IFF_RUNNING | IFF_LOWER_UP)) { + mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN); +- else +- mpls_ifdown(dev, event); ++ } else { ++ err = mpls_ifdown(dev, event); ++ if (err) ++ return notifier_from_errno(err); ++ } + break; + case NETDEV_UNREGISTER: +- mpls_ifdown(dev, event); ++ err = mpls_ifdown(dev, event); ++ if (err) ++ return notifier_from_errno(err); + mdev = mpls_dev_get(dev); + if (mdev) { + mpls_dev_sysctl_unregister(dev, mdev); +@@ -1583,8 +1621,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, + case NETDEV_CHANGENAME: + mdev = mpls_dev_get(dev); + if (mdev) { +- int err; +- + mpls_dev_sysctl_unregister(dev, mdev); + err = mpls_dev_sysctl_register(dev, mdev); + if (err) +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index d0bce439198fc..d4e6466d3989a 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -502,7 +502,7 @@ void rds_tcp_tune(struct socket *sock) + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + } + if (rtn->rcvbuf_size > 0) { +- sk->sk_sndbuf = rtn->rcvbuf_size; ++ sk->sk_rcvbuf = rtn->rcvbuf_size; + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + } + release_sock(sk); +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c +index b91b090217cdb..d0a1ebecc6fbb 100644 +--- a/net/rxrpc/peer_object.c ++++ b/net/rxrpc/peer_object.c +@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, + return peer; + } + ++static void rxrpc_free_peer(struct rxrpc_peer *peer) ++{ ++ rxrpc_put_local(peer->local); ++ kfree_rcu(peer, rcu); ++} ++ + /* + * Set up a new incoming peer. There shouldn't be any other matching peers + * since we've already done a search in the list from the non-reentrant context +@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, + spin_unlock_bh(&rxnet->peer_hash_lock); + + if (peer) +- kfree(candidate); ++ rxrpc_free_peer(candidate); + else + peer = candidate; + } +@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) + list_del_init(&peer->keepalive_link); + spin_unlock_bh(&rxnet->peer_hash_lock); + +- rxrpc_put_local(peer->local); +- kfree_rcu(peer, rcu); ++ rxrpc_free_peer(peer); + } + + /* +@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer) + if (n == 0) { + hash_del_rcu(&peer->hash_link); + list_del_init(&peer->keepalive_link); +- rxrpc_put_local(peer->local); +- kfree_rcu(peer, rcu); ++ rxrpc_free_peer(peer); + } + } + +diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c +index e25c023582f9e..092696d738c00 100644 +--- a/net/smc/smc_close.c ++++ b/net/smc/smc_close.c +@@ -167,6 +167,7 @@ int smc_close_active(struct smc_sock *smc) + int old_state; + long timeout; + int rc = 0; ++ int rc1 = 0; + + timeout = current->flags & PF_EXITING ? + 0 : sock_flag(sk, SOCK_LINGER) ? +@@ -206,8 +207,11 @@ again: + /* actively shutdown clcsock before peer close it, + * prevent peer from entering TIME_WAIT state. + */ +- if (smc->clcsock && smc->clcsock->sk) +- rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); ++ if (smc->clcsock && smc->clcsock->sk) { ++ rc1 = kernel_sock_shutdown(smc->clcsock, ++ SHUT_RDWR); ++ rc = rc ? rc : rc1; ++ } + } else { + /* peer event has changed the state */ + goto again; +diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c +index fe3dfaa64a916..4afb63cea41b9 100644 +--- a/tools/perf/ui/hist.c ++++ b/tools/perf/ui/hist.c +@@ -468,6 +468,18 @@ struct perf_hpp_list perf_hpp_list = { + #undef __HPP_SORT_ACC_FN + #undef __HPP_SORT_RAW_FN + ++static void fmt_free(struct perf_hpp_fmt *fmt) ++{ ++ /* ++ * At this point fmt should be completely ++ * unhooked, if not it's a bug. ++ */ ++ BUG_ON(!list_empty(&fmt->list)); ++ BUG_ON(!list_empty(&fmt->sort_list)); ++ ++ if (fmt->free) ++ fmt->free(fmt); ++} + + void perf_hpp__init(void) + { +@@ -531,9 +543,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, + list_add(&format->sort_list, &list->sorts); + } + +-void perf_hpp__column_unregister(struct perf_hpp_fmt *format) ++static void perf_hpp__column_unregister(struct perf_hpp_fmt *format) + { + list_del_init(&format->list); ++ fmt_free(format); + } + + void perf_hpp__cancel_cumulate(void) +@@ -605,19 +618,6 @@ next: + } + + +-static void fmt_free(struct perf_hpp_fmt *fmt) +-{ +- /* +- * At this point fmt should be completely +- * unhooked, if not it's a bug. +- */ +- BUG_ON(!list_empty(&fmt->list)); +- BUG_ON(!list_empty(&fmt->sort_list)); +- +- if (fmt->free) +- fmt->free(fmt); +-} +- + void perf_hpp__reset_output_field(struct perf_hpp_list *list) + { + struct perf_hpp_fmt *fmt, *tmp; +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h +index 7173e1f410930..899c1ca5e7dce 100644 +--- a/tools/perf/util/hist.h ++++ b/tools/perf/util/hist.h +@@ -346,7 +346,6 @@ enum { + }; + + void perf_hpp__init(void); +-void perf_hpp__column_unregister(struct perf_hpp_fmt *format); + void perf_hpp__cancel_cumulate(void); + void perf_hpp__setup_output_field(struct perf_hpp_list *list); + void perf_hpp__reset_output_field(struct perf_hpp_list *list);
