commit:     5494776e44ba90f3a6a32578a72458db1aff414d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 26 11:52:58 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 26 11:52:58 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5494776e

Linux patch 4.4.57

 0000_README             |    4 +
 1056_linux-4.4.57.patch | 1172 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1176 insertions(+)

diff --git a/0000_README b/0000_README
index 448cdac..e08ec98 100644
--- a/0000_README
+++ b/0000_README
@@ -267,6 +267,10 @@ Patch:  1055_linux-4.4.56.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.56
 
+Patch:  1056_linux-4.4.57.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.57
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1056_linux-4.4.57.patch b/1056_linux-4.4.57.patch
new file mode 100644
index 0000000..d28c5d6
--- /dev/null
+++ b/1056_linux-4.4.57.patch
@@ -0,0 +1,1172 @@
+diff --git a/Makefile b/Makefile
+index cf9303a5d621..841675e63a38 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 56
++SUBLEVEL = 57
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
+index 861e72109df2..f080abfc2f83 100644
+--- a/arch/powerpc/boot/zImage.lds.S
++++ b/arch/powerpc/boot/zImage.lds.S
+@@ -68,6 +68,7 @@ SECTIONS
+   }
+ 
+ #ifdef CONFIG_PPC64_BOOT_WRAPPER
++  . = ALIGN(256);
+   .got :
+   {
+     __toc_start = .;
+diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
+index 5cc2e7af3a7b..b379146de55b 100644
+--- a/arch/powerpc/kvm/emulate.c
++++ b/arch/powerpc/kvm/emulate.c
+@@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct 
kvm_vcpu *vcpu)
+                       advance = 0;
+                       printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
+                              "(op %d xop %d)\n", inst, get_op(inst), 
get_xop(inst));
+-                      kvmppc_core_queue_program(vcpu, 0);
+               }
+       }
+ 
+diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
+index 3a40f718baef..4004e03267cd 100644
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -455,7 +455,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+       zdev->dma_table = dma_alloc_cpu_table();
+       if (!zdev->dma_table) {
+               rc = -ENOMEM;
+-              goto out_clean;
++              goto out;
+       }
+ 
+       /*
+@@ -475,18 +475,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+       zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+       if (!zdev->iommu_bitmap) {
+               rc = -ENOMEM;
+-              goto out_reg;
++              goto free_dma_table;
+       }
+ 
+       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                               (u64) zdev->dma_table);
+       if (rc)
+-              goto out_reg;
+-      return 0;
++              goto free_bitmap;
+ 
+-out_reg:
++      return 0;
++free_bitmap:
++      vfree(zdev->iommu_bitmap);
++      zdev->iommu_bitmap = NULL;
++free_dma_table:
+       dma_free_cpu_table(zdev->dma_table);
+-out_clean:
++      zdev->dma_table = NULL;
++out:
+       return rc;
+ }
+ 
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c 
b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 440df0c7a2ee..a69321a77783 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
+       }
+ }
+ 
++static int ghash_async_import(struct ahash_request *req, const void *in)
++{
++      struct ahash_request *cryptd_req = ahash_request_ctx(req);
++      struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++      struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++      ghash_async_init(req);
++      memcpy(dctx, in, sizeof(*dctx));
++      return 0;
++
++}
++
++static int ghash_async_export(struct ahash_request *req, void *out)
++{
++      struct ahash_request *cryptd_req = ahash_request_ctx(req);
++      struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++      struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++      memcpy(out, dctx, sizeof(*dctx));
++      return 0;
++
++}
++
+ static int ghash_async_digest(struct ahash_request *req)
+ {
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
+       .final          = ghash_async_final,
+       .setkey         = ghash_async_setkey,
+       .digest         = ghash_async_digest,
++      .export         = ghash_async_export,
++      .import         = ghash_async_import,
+       .halg = {
+               .digestsize     = GHASH_DIGEST_SIZE,
++              .statesize = sizeof(struct ghash_desc_ctx),
+               .base = {
+                       .cra_name               = "ghash",
+                       .cra_driver_name        = "ghash-clmulni",
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 9e2ba5c6e1dd..f42e78de1e10 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
+ 
+ static void xen_qlock_kick(int cpu)
+ {
++      int irq = per_cpu(lock_kicker_irq, cpu);
++
++      /* Don't kick if the target's kicker interrupt is not initialized. */
++      if (irq == -1)
++              return;
++
+       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+ }
+ 
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index e7aa904cb20b..26a504db3f53 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -642,6 +642,7 @@ static int cryptd_create_hash(struct crypto_template 
*tmpl, struct rtattr **tb,
+       inst->alg.halg.base.cra_flags = type;
+ 
+       inst->alg.halg.digestsize = salg->digestsize;
++      inst->alg.halg.statesize = salg->statesize;
+       inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ 
+       inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
+index a0ceb41d5ccc..b4f3930266b1 100644
+--- a/crypto/mcryptd.c
++++ b/crypto/mcryptd.c
+@@ -531,6 +531,7 @@ static int mcryptd_create_hash(struct crypto_template 
*tmpl, struct rtattr **tb,
+       inst->alg.halg.base.cra_flags = type;
+ 
+       inst->alg.halg.digestsize = salg->digestsize;
++      inst->alg.halg.statesize = salg->statesize;
+       inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
+ 
+       inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 5fdac394207a..549cdbed7b0e 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -1211,6 +1211,9 @@ static int acpi_video_device_enumerate(struct 
acpi_video_bus *video)
+       union acpi_object *dod = NULL;
+       union acpi_object *obj;
+ 
++      if (!video->cap._DOD)
++              return AE_NOT_EXIST;
++
+       status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, 
&buffer);
+       if (!ACPI_SUCCESS(status)) {
+               ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 65f7eecc45b0..f10a107614b4 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
+       iowrite32(intmask,
+                 chip->vendor.iobase +
+                 TPM_INT_ENABLE(chip->vendor.locality));
+-      free_irq(chip->vendor.irq, chip);
++      devm_free_irq(chip->pdev, chip->vendor.irq, chip);
+       chip->vendor.irq = 0;
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 8412ce5f93a7..86fa9fdc8323 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -626,9 +626,11 @@ static ssize_t show_cpuinfo_cur_freq(struct 
cpufreq_policy *policy,
+                                       char *buf)
+ {
+       unsigned int cur_freq = __cpufreq_get(policy);
+-      if (!cur_freq)
+-              return sprintf(buf, "<unknown>");
+-      return sprintf(buf, "%u\n", cur_freq);
++
++      if (cur_freq)
++              return sprintf(buf, "%u\n", cur_freq);
++
++      return sprintf(buf, "<unknown>\n");
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 7c42ff670080..a0924330d125 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -25,6 +25,7 @@
+  *          Alex Deucher
+  *          Jerome Glisse
+  */
++#include <linux/irq.h>
+ #include <drm/drmP.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/amdgpu_drm.h>
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index 57c191798699..ddbf7e7e0d98 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -274,7 +274,7 @@ cleanup:
+  *
+  * This routine is called normally during driver unloading or exiting.
+  */
+-void hv_cleanup(void)
++void hv_cleanup(bool crash)
+ {
+       union hv_x64_msr_hypercall_contents hypercall_msr;
+ 
+@@ -284,7 +284,8 @@ void hv_cleanup(void)
+       if (hv_context.hypercall_page) {
+               hypercall_msr.as_uint64 = 0;
+               wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+-              vfree(hv_context.hypercall_page);
++              if (!crash)
++                      vfree(hv_context.hypercall_page);
+               hv_context.hypercall_page = NULL;
+       }
+ 
+@@ -304,7 +305,8 @@ void hv_cleanup(void)
+ 
+               hypercall_msr.as_uint64 = 0;
+               wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
+-              vfree(hv_context.tsc_page);
++              if (!crash)
++                      vfree(hv_context.tsc_page);
+               hv_context.tsc_page = NULL;
+       }
+ #endif
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index b853b4b083bd..43af91362be5 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned 
long pfn_cnt)
+                * If the pfn range we are dealing with is not in the current
+                * "hot add block", move on.
+                */
+-              if ((start_pfn >= has->end_pfn))
++              if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+                       continue;
+               /*
+                * If the current hot add-request extends beyond
+@@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long 
pg_start,
+                * If the pfn range we are dealing with is not in the current
+                * "hot add block", move on.
+                */
+-              if ((start_pfn >= has->end_pfn))
++              if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+                       continue;
+ 
+               old_covered_state = has->covered_end_pfn;
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 12156db2e88e..75e383e6d03d 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -581,7 +581,7 @@ struct hv_ring_buffer_debug_info {
+ 
+ extern int hv_init(void);
+ 
+-extern void hv_cleanup(void);
++extern void hv_cleanup(bool crash);
+ 
+ extern int hv_post_message(union hv_connection_id connection_id,
+                        enum hv_message_type message_type,
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 509ed9731630..802dcb409030 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -889,7 +889,7 @@ err_alloc:
+       bus_unregister(&hv_bus);
+ 
+ err_cleanup:
+-      hv_cleanup();
++      hv_cleanup(false);
+ 
+       return ret;
+ }
+@@ -1254,7 +1254,7 @@ static void hv_kexec_handler(void)
+       vmbus_initiate_unload();
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+-      hv_cleanup();
++      hv_cleanup(false);
+ };
+ 
+ static void hv_crash_handler(struct pt_regs *regs)
+@@ -1266,7 +1266,7 @@ static void hv_crash_handler(struct pt_regs *regs)
+        * for kdump.
+        */
+       hv_synic_cleanup(NULL);
+-      hv_cleanup();
++      hv_cleanup(true);
+ };
+ 
+ static int __init hv_acpi_init(void)
+@@ -1330,7 +1330,7 @@ static void __exit vmbus_exit(void)
+                                                &hyperv_panic_block);
+       }
+       bus_unregister(&hv_bus);
+-      hv_cleanup();
++      hv_cleanup(false);
+       for_each_online_cpu(cpu) {
+               tasklet_kill(hv_context.event_dpc[cpu]);
+               smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c 
b/drivers/isdn/gigaset/bas-gigaset.c
+index aecec6d32463..7f1c625b08ec 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
+               return -ENODEV;
+       }
+ 
++      if (hostif->desc.bNumEndpoints < 1)
++              return -ENODEV;
++
+       dev_info(&udev->dev,
+                "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
+                __func__, le16_to_cpu(udev->descriptor.idVendor),
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index ebb0dd612ebd..122af340a531 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1477,7 +1477,25 @@ static void make_request(struct mddev *mddev, struct 
bio *bio)
+                       split = bio;
+               }
+ 
++              /*
++               * If a bio is splitted, the first part of bio will pass
++               * barrier but the bio is queued in current->bio_list (see
++               * generic_make_request). If there is a raise_barrier() called
++               * here, the second part of bio can't pass barrier. But since
++               * the first part bio isn't dispatched to underlaying disks
++               * yet, the barrier is never released, hence raise_barrier will
++               * alays wait. We have a deadlock.
++               * Note, this only happens in read path. For write path, the
++               * first part of bio is dispatched in a schedule() call
++               * (because of blk plug) or offloaded to raid10d.
++               * Quitting from the function immediately can change the bio
++               * order queued in bio_list and avoid the deadlock.
++               */
+               __make_request(mddev, split);
++              if (split != bio && bio_data_dir(bio) == READ) {
++                      generic_make_request(bio);
++                      break;
++              }
+       } while (split != bio);
+ 
+       /* In case raid10d snuck in to freeze_array */
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index e8a09ff9e724..c8a7802d2953 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -197,65 +197,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 
ppi_size,
+       return ppi;
+ }
+ 
+-union sub_key {
+-      u64 k;
+-      struct {
+-              u8 pad[3];
+-              u8 kb;
+-              u32 ka;
+-      };
+-};
+-
+-/* Toeplitz hash function
+- * data: network byte order
+- * return: host byte order
+- */
+-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
+-{
+-      union sub_key subk;
+-      int k_next = 4;
+-      u8 dt;
+-      int i, j;
+-      u32 ret = 0;
+-
+-      subk.k = 0;
+-      subk.ka = ntohl(*(u32 *)key);
+-
+-      for (i = 0; i < dlen; i++) {
+-              subk.kb = key[k_next];
+-              k_next = (k_next + 1) % klen;
+-              dt = ((u8 *)data)[i];
+-              for (j = 0; j < 8; j++) {
+-                      if (dt & 0x80)
+-                              ret ^= subk.ka;
+-                      dt <<= 1;
+-                      subk.k <<= 1;
+-              }
+-      }
+-
+-      return ret;
+-}
+-
+-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
+-{
+-      struct flow_keys flow;
+-      int data_len;
+-
+-      if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
+-          !(flow.basic.n_proto == htons(ETH_P_IP) ||
+-            flow.basic.n_proto == htons(ETH_P_IPV6)))
+-              return false;
+-
+-      if (flow.basic.ip_proto == IPPROTO_TCP)
+-              data_len = 12;
+-      else
+-              data_len = 8;
+-
+-      *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
+-
+-      return true;
+-}
+-
+ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+                       void *accel_priv, select_queue_fallback_t fallback)
+ {
+@@ -268,11 +209,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, 
struct sk_buff *skb,
+       if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
+               return 0;
+ 
+-      if (netvsc_set_hash(&hash, skb)) {
+-              q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+-                      ndev->real_num_tx_queues;
+-              skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+-      }
++      hash = skb_get_hash(skb);
++      q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
++              ndev->real_num_tx_queues;
+ 
+       return q_idx;
+ }
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c 
b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index a009ae34c5ef..930f0f25c1ce 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1466,12 +1466,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, 
int irq)
+               offset += range->npins;
+       }
+ 
+-      /* Mask and clear all interrupts */
+-      chv_writel(0, pctrl->regs + CHV_INTMASK);
++      /* Clear all interrupts */
+       chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+ 
+       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+-                                 handle_simple_irq, IRQ_TYPE_NONE);
++                                 handle_bad_irq, IRQ_TYPE_NONE);
+       if (ret) {
+               dev_err(pctrl->dev, "failed to add IRQ chip\n");
+               goto fail;
+diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
+index 5ada9268a450..a8ac4c0a1493 100644
+--- a/drivers/scsi/cxlflash/common.h
++++ b/drivers/scsi/cxlflash/common.h
+@@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops;
+                                                                  sectors
+                                                               */
+ 
+-#define NUM_RRQ_ENTRY    16     /* for master issued cmds */
+ #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
+ 
+ /* AFU command retry limit */
+@@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops;
+                                                          index derivation
+                                                        */
+ 
+-#define CXLFLASH_MAX_CMDS               16
++#define CXLFLASH_MAX_CMDS               256
+ #define CXLFLASH_MAX_CMDS_PER_LUN       CXLFLASH_MAX_CMDS
+ 
++/* RRQ for master issued cmds */
++#define NUM_RRQ_ENTRY                   CXLFLASH_MAX_CMDS
++
+ 
+ static inline void check_sizes(void)
+ {
+@@ -149,7 +151,7 @@ struct afu_cmd {
+ struct afu {
+       /* Stuff requiring alignment go first. */
+ 
+-      u64 rrq_entry[NUM_RRQ_ENTRY];   /* 128B RRQ */
++      u64 rrq_entry[NUM_RRQ_ENTRY];   /* 2K RRQ */
+       /*
+        * Command & data for AFU commands.
+        */
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
+index c86847c68448..2882bcac918a 100644
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -2305,7 +2305,7 @@ static struct scsi_host_template driver_template = {
+       .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
+       .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
+       .change_queue_depth = cxlflash_change_queue_depth,
+-      .cmd_per_lun = 16,
++      .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
+       .can_queue = CXLFLASH_MAX_CMDS,
+       .this_id = -1,
+       .sg_tablesize = SG_NONE,        /* No scatter gather support */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 6bffd91b973a..c1ccf1ee99ea 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, 
int state)
+       WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+       task->state = state;
+ 
+-      if (!list_empty(&task->running))
++      spin_lock_bh(&conn->taskqueuelock);
++      if (!list_empty(&task->running)) {
++              pr_debug_once("%s while task on list", __func__);
+               list_del_init(&task->running);
++      }
++      spin_unlock_bh(&conn->taskqueuelock);
+ 
+       if (conn->task == task)
+               conn->task = NULL;
+@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct 
iscsi_hdr *hdr,
+               if (session->tt->xmit_task(task))
+                       goto free_task;
+       } else {
++              spin_lock_bh(&conn->taskqueuelock);
+               list_add_tail(&task->running, &conn->mgmtqueue);
++              spin_unlock_bh(&conn->taskqueuelock);
+               iscsi_conn_queue_work(conn);
+       }
+ 
+@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
+        * this may be on the requeue list already if the xmit_task callout
+        * is handling the r2ts while we are adding new ones
+        */
++      spin_lock_bh(&conn->taskqueuelock);
+       if (list_empty(&task->running))
+               list_add_tail(&task->running, &conn->requeue);
++      spin_unlock_bh(&conn->taskqueuelock);
+       iscsi_conn_queue_work(conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+        * only have one nop-out as a ping from us and targets should not
+        * overflow us with nop-ins
+        */
++      spin_lock_bh(&conn->taskqueuelock);
+ check_mgmt:
+       while (!list_empty(&conn->mgmtqueue)) {
+               conn->task = list_entry(conn->mgmtqueue.next,
+                                        struct iscsi_task, running);
+               list_del_init(&conn->task->running);
++              spin_unlock_bh(&conn->taskqueuelock);
+               if (iscsi_prep_mgmt_task(conn, conn->task)) {
+                       /* regular RX path uses back_lock */
+                       spin_lock_bh(&conn->session->back_lock);
+                       __iscsi_put_task(conn->task);
+                       spin_unlock_bh(&conn->session->back_lock);
+                       conn->task = NULL;
++                      spin_lock_bh(&conn->taskqueuelock);
+                       continue;
+               }
+               rc = iscsi_xmit_task(conn);
+               if (rc)
+                       goto done;
++              spin_lock_bh(&conn->taskqueuelock);
+       }
+ 
+       /* process pending command queue */
+@@ -1535,19 +1547,24 @@ check_mgmt:
+               conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+                                       running);
+               list_del_init(&conn->task->running);
++              spin_unlock_bh(&conn->taskqueuelock);
+               if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+                       fail_scsi_task(conn->task, DID_IMM_RETRY);
++                      spin_lock_bh(&conn->taskqueuelock);
+                       continue;
+               }
+               rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+               if (rc) {
+                       if (rc == -ENOMEM || rc == -EACCES) {
++                              spin_lock_bh(&conn->taskqueuelock);
+                               list_add_tail(&conn->task->running,
+                                             &conn->cmdqueue);
+                               conn->task = NULL;
++                              spin_unlock_bh(&conn->taskqueuelock);
+                               goto done;
+                       } else
+                               fail_scsi_task(conn->task, DID_ABORT);
++                      spin_lock_bh(&conn->taskqueuelock);
+                       continue;
+               }
+               rc = iscsi_xmit_task(conn);
+@@ -1558,6 +1575,7 @@ check_mgmt:
+                * we need to check the mgmt queue for nops that need to
+                * be sent to aviod starvation
+                */
++              spin_lock_bh(&conn->taskqueuelock);
+               if (!list_empty(&conn->mgmtqueue))
+                       goto check_mgmt;
+       }
+@@ -1577,12 +1595,15 @@ check_mgmt:
+               conn->task = task;
+               list_del_init(&conn->task->running);
+               conn->task->state = ISCSI_TASK_RUNNING;
++              spin_unlock_bh(&conn->taskqueuelock);
+               rc = iscsi_xmit_task(conn);
+               if (rc)
+                       goto done;
++              spin_lock_bh(&conn->taskqueuelock);
+               if (!list_empty(&conn->mgmtqueue))
+                       goto check_mgmt;
+       }
++      spin_unlock_bh(&conn->taskqueuelock);
+       spin_unlock_bh(&conn->session->frwd_lock);
+       return -ENODATA;
+ 
+@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct 
scsi_cmnd *sc)
+                       goto prepd_reject;
+               }
+       } else {
++              spin_lock_bh(&conn->taskqueuelock);
+               list_add_tail(&task->running, &conn->cmdqueue);
++              spin_unlock_bh(&conn->taskqueuelock);
+               iscsi_conn_queue_work(conn);
+       }
+ 
+@@ -2900,6 +2923,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, 
int dd_size,
+       INIT_LIST_HEAD(&conn->mgmtqueue);
+       INIT_LIST_HEAD(&conn->cmdqueue);
+       INIT_LIST_HEAD(&conn->requeue);
++      spin_lock_init(&conn->taskqueuelock);
+       INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+ 
+       /* allocate login_task used for the login/text sequences */
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index c14ab6c3ae40..60c21093f865 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11387,6 +11387,7 @@ static struct pci_driver lpfc_driver = {
+       .id_table       = lpfc_id_table,
+       .probe          = lpfc_pci_probe_one,
+       .remove         = lpfc_pci_remove_one,
++      .shutdown       = lpfc_pci_remove_one,
+       .suspend        = lpfc_pci_suspend_one,
+       .resume         = lpfc_pci_resume_one,
+       .err_handler    = &lpfc_err_handler,
+diff --git a/drivers/target/target_core_pscsi.c 
b/drivers/target/target_core_pscsi.c
+index de18790eb21c..d72a4058fd08 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device 
*dev,
+ 
+       buf = kzalloc(12, GFP_KERNEL);
+       if (!buf)
+-              return;
++              goto out_free;
+ 
+       memset(cdb, 0, MAX_COMMAND_SIZE);
+       cdb[0] = MODE_SENSE;
+@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device 
*dev,
+        * If MODE_SENSE still returns zero, set the default value to 1024.
+        */
+       sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
++out_free:
+       if (!sdev->sector_size)
+               sdev->sector_size = 1024;
+-out_free:
++
+       kfree(buf);
+ }
+ 
+@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+                               sd->lun, sd->queue_depth);
+       }
+ 
+-      dev->dev_attrib.hw_block_size = sd->sector_size;
++      dev->dev_attrib.hw_block_size =
++              min_not_zero((int)sd->sector_size, 512);
+       dev->dev_attrib.hw_max_sectors =
+-              min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
++              min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
+       dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+ 
+       /*
+@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+       /*
+        * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+        */
+-      if (sd->type == TYPE_TAPE)
++      if (sd->type == TYPE_TAPE) {
+               pscsi_tape_read_blocksize(dev, sd);
++              dev->dev_attrib.hw_block_size = sd->sector_size;
++      }
+       return 0;
+ }
+ 
+@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, 
struct scsi_device *sd)
+ /*
+  * Called with struct Scsi_Host->host_lock called.
+  */
+-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device 
*sd)
++static int pscsi_create_type_nondisk(struct se_device *dev, struct 
scsi_device *sd)
+       __releases(sh->host_lock)
+ {
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, 
struct scsi_device *sd)
+       return 0;
+ }
+ 
+-/*
+- * Called with struct Scsi_Host->host_lock called.
+- */
+-static int pscsi_create_type_other(struct se_device *dev,
+-              struct scsi_device *sd)
+-      __releases(sh->host_lock)
+-{
+-      struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+-      struct Scsi_Host *sh = sd->host;
+-      int ret;
+-
+-      spin_unlock_irq(sh->host_lock);
+-      ret = pscsi_add_device_to_list(dev, sd);
+-      if (ret)
+-              return ret;
+-
+-      pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
+-              phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+-              sd->channel, sd->id, sd->lun);
+-      return 0;
+-}
+-
+ static int pscsi_configure_device(struct se_device *dev)
+ {
+       struct se_hba *hba = dev->se_hba;
+@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
+               case TYPE_DISK:
+                       ret = pscsi_create_type_disk(dev, sd);
+                       break;
+-              case TYPE_ROM:
+-                      ret = pscsi_create_type_rom(dev, sd);
+-                      break;
+               default:
+-                      ret = pscsi_create_type_other(dev, sd);
++                      ret = pscsi_create_type_nondisk(dev, sd);
+                       break;
+               }
+ 
+@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
+               else if (pdv->pdv_lld_host)
+                       scsi_host_put(pdv->pdv_lld_host);
+ 
+-              if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+-                      scsi_device_put(sd);
++              scsi_device_put(sd);
+ 
+               pdv->pdv_sd = NULL;
+       }
+@@ -1088,7 +1066,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
+       if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+               return pdv->pdv_bd->bd_part->nr_sects;
+ 
+-      dump_stack();
+       return 0;
+ }
+ 
+diff --git a/drivers/target/target_core_sbc.c 
b/drivers/target/target_core_sbc.c
+index 2e27b1034ede..90c5dffc9fa4 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -1096,9 +1096,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+                       return ret;
+               break;
+       case VERIFY:
++      case VERIFY_16:
+               size = 0;
+-              sectors = transport_get_sectors_10(cdb);
+-              cmd->t_task_lba = transport_lba_32(cdb);
++              if (cdb[0] == VERIFY) {
++                      sectors = transport_get_sectors_10(cdb);
++                      cmd->t_task_lba = transport_lba_32(cdb);
++              } else {
++                      sectors = transport_get_sectors_16(cdb);
++                      cmd->t_task_lba = transport_lba_64(cdb);
++              }
+               cmd->execute_cmd = sbc_emulate_noop;
+               goto check_lba;
+       case REZERO_UNIT:
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index f44ce09367bc..5724d7c41e29 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -966,7 +966,7 @@ static void usb_bus_init (struct usb_bus *bus)
+       bus->bandwidth_allocated = 0;
+       bus->bandwidth_int_reqs  = 0;
+       bus->bandwidth_isoc_reqs = 0;
+-      mutex_init(&bus->usb_address0_mutex);
++      mutex_init(&bus->devnum_next_mutex);
+ 
+       INIT_LIST_HEAD (&bus->bus_list);
+ }
+@@ -2497,6 +2497,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct 
hc_driver *driver,
+               return NULL;
+       }
+       if (primary_hcd == NULL) {
++              hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
++                              GFP_KERNEL);
++              if (!hcd->address0_mutex) {
++                      kfree(hcd);
++                      dev_dbg(dev, "hcd address0 mutex alloc failed\n");
++                      return NULL;
++              }
++              mutex_init(hcd->address0_mutex);
+               hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
+                               GFP_KERNEL);
+               if (!hcd->bandwidth_mutex) {
+@@ -2508,6 +2516,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct 
hc_driver *driver,
+               dev_set_drvdata(dev, hcd);
+       } else {
+               mutex_lock(&usb_port_peer_mutex);
++              hcd->address0_mutex = primary_hcd->address0_mutex;
+               hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
+               hcd->primary_hcd = primary_hcd;
+               primary_hcd->primary_hcd = primary_hcd;
+@@ -2564,24 +2573,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
+  * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
+  * deallocated.
+  *
+- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
+- * freed.  When hcd_release() is called for either hcd in a peer set
+- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
+- * block new peering attempts
++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
++ * freed.  When hcd_release() is called for either hcd in a peer set,
++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
+  */
+ static void hcd_release(struct kref *kref)
+ {
+       struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
+ 
+       mutex_lock(&usb_port_peer_mutex);
+-      if (usb_hcd_is_primary_hcd(hcd))
+-              kfree(hcd->bandwidth_mutex);
+       if (hcd->shared_hcd) {
+               struct usb_hcd *peer = hcd->shared_hcd;
+ 
+               peer->shared_hcd = NULL;
+-              if (peer->primary_hcd == hcd)
+-                      peer->primary_hcd = NULL;
++              peer->primary_hcd = NULL;
++      } else {
++              kfree(hcd->address0_mutex);
++              kfree(hcd->bandwidth_mutex);
+       }
+       mutex_unlock(&usb_port_peer_mutex);
+       kfree(hcd);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 780db8bb2262..f52d8abf6979 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1980,7 +1980,7 @@ static void choose_devnum(struct usb_device *udev)
+       struct usb_bus  *bus = udev->bus;
+ 
+       /* be safe when more hub events are proceed in parallel */
+-      mutex_lock(&bus->usb_address0_mutex);
++      mutex_lock(&bus->devnum_next_mutex);
+       if (udev->wusb) {
+               devnum = udev->portnum + 1;
+               BUG_ON(test_bit(devnum, bus->devmap.devicemap));
+@@ -1998,7 +1998,7 @@ static void choose_devnum(struct usb_device *udev)
+               set_bit(devnum, bus->devmap.devicemap);
+               udev->devnum = devnum;
+       }
+-      mutex_unlock(&bus->usb_address0_mutex);
++      mutex_unlock(&bus->devnum_next_mutex);
+ }
+ 
+ static void release_devnum(struct usb_device *udev)
+@@ -4262,7 +4262,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device 
*udev, int port1,
+       if (oldspeed == USB_SPEED_LOW)
+               delay = HUB_LONG_RESET_TIME;
+ 
+-      mutex_lock(&hdev->bus->usb_address0_mutex);
++      mutex_lock(hcd->address0_mutex);
+ 
+       /* Reset the device; full speed may morph to high speed */
+       /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+@@ -4548,7 +4548,7 @@ fail:
+               hub_port_disable(hub, port1, 0);
+               update_devnum(udev, devnum);    /* for disconnect processing */
+       }
+-      mutex_unlock(&hdev->bus->usb_address0_mutex);
++      mutex_unlock(hcd->address0_mutex);
+       return retval;
+ }
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6fe8e30eeb99..68345a9e59b8 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3666,7 +3666,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+       db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+                  EXT4_DESC_PER_BLOCK(sb);
+       if (ext4_has_feature_meta_bg(sb)) {
+-              if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
++              if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "first meta block group too large: %u "
+                                "(group descriptor block count %u)",
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index de7b4f97ac75..be519416c112 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -207,7 +207,7 @@ struct lm_lockname {
+       struct gfs2_sbd *ln_sbd;
+       u64 ln_number;
+       unsigned int ln_type;
+-};
++} __packed __aligned(sizeof(int));
+ 
+ #define lm_name_equal(name1, name2) \
+         (((name1)->ln_number == (name2)->ln_number) &&        \
+diff --git a/include/linux/log2.h b/include/linux/log2.h
+index fd7ff3d91e6a..f38fae23bdac 100644
+--- a/include/linux/log2.h
++++ b/include/linux/log2.h
+@@ -16,12 +16,6 @@
+ #include <linux/bitops.h>
+ 
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+  * non-constant log of base 2 calculators
+  * - the arch may override these in asm/bitops.h if they can be implemented
+  *   more efficiently than using fls() and fls64()
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n)                              \
+ (                                             \
+       __builtin_constant_p(n) ? (             \
+-              (n) < 1 ? ____ilog2_NaN() :     \
++              (n) < 2 ? 0 :                   \
+               (n) & (1ULL << 63) ? 63 :       \
+               (n) & (1ULL << 62) ? 62 :       \
+               (n) & (1ULL << 61) ? 61 :       \
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+               (n) & (1ULL <<  4) ?  4 :       \
+               (n) & (1ULL <<  3) ?  3 :       \
+               (n) & (1ULL <<  2) ?  2 :       \
+-              (n) & (1ULL <<  1) ?  1 :       \
+-              (n) & (1ULL <<  0) ?  0 :       \
+-              ____ilog2_NaN()                 \
+-                                 ) :          \
++              1 ) :                           \
+       (sizeof(n) <= 4) ?                      \
+       __ilog2_u32(n) :                        \
+       __ilog2_u64(n)                          \
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 12891ffd4bf0..8c75af6b7d5b 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -371,14 +371,13 @@ struct usb_bus {
+ 
+       int devnum_next;                /* Next open device number in
+                                        * round-robin allocation */
++      struct mutex devnum_next_mutex; /* devnum_next mutex */
+ 
+       struct usb_devmap devmap;       /* device address allocation map */
+       struct usb_device *root_hub;    /* Root hub */
+       struct usb_bus *hs_companion;   /* Companion EHCI bus, if any */
+       struct list_head bus_list;      /* list of busses */
+ 
+-      struct mutex usb_address0_mutex; /* unaddressed device mutex */
+-
+       int bandwidth_allocated;        /* on this bus: how much of the time
+                                        * reserved for periodic (intr/iso)
+                                        * requests is used, on average?
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index f89c24bd53a4..3993b21f3d11 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -180,6 +180,7 @@ struct usb_hcd {
+        * bandwidth_mutex should be dropped after a successful control message
+        * to the device, or resetting the bandwidth after a failed attempt.
+        */
++      struct mutex            *address0_mutex;
+       struct mutex            *bandwidth_mutex;
+       struct usb_hcd          *shared_hcd;
+       struct usb_hcd          *primary_hcd;
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 4d1c46aac331..c7b1dc713cdd 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -196,6 +196,7 @@ struct iscsi_conn {
+       struct iscsi_task       *task;          /* xmit task in progress */
+ 
+       /* xmit */
++      spinlock_t              taskqueuelock;  /* protects the next three 
lists */
+       struct list_head        mgmtqueue;      /* mgmt (control) xmit queue */
+       struct list_head        cmdqueue;       /* data-path cmd queue */
+       struct list_head        requeue;        /* tasks needing another run */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9bbe9ac23cf2..e4b5494f05f8 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9230,7 +9230,7 @@ static int perf_event_init_context(struct task_struct 
*child, int ctxn)
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
+               if (ret)
+-                      break;
++                      goto out_unlock;
+       }
+ 
+       /*
+@@ -9246,7 +9246,7 @@ static int perf_event_init_context(struct task_struct 
*child, int ctxn)
+               ret = inherit_task_group(event, parent, parent_ctx,
+                                        child, ctxn, &inherited_all);
+               if (ret)
+-                      break;
++                      goto out_unlock;
+       }
+ 
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+@@ -9274,6 +9274,7 @@ static int perf_event_init_context(struct task_struct 
*child, int ctxn)
+       }
+ 
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++out_unlock:
+       mutex_unlock(&parent_ctx->mutex);
+ 
+       perf_unpin_context(parent_ctx);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 2e55b53399de..278a2ddad351 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk)
+       *stackend = STACK_END_MAGIC;    /* for overflow detection */
+ }
+ 
+-static struct task_struct *dup_task_struct(struct task_struct *orig)
++static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ {
+       struct task_struct *tsk;
+       struct thread_info *ti;
+-      int node = tsk_fork_get_node(orig);
+       int err;
+ 
++      if (node == NUMA_NO_NODE)
++              node = tsk_fork_get_node(orig);
+       tsk = alloc_task_struct_node(node);
+       if (!tsk)
+               return NULL;
+@@ -1270,7 +1271,8 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
+                                       int __user *child_tidptr,
+                                       struct pid *pid,
+                                       int trace,
+-                                      unsigned long tls)
++                                      unsigned long tls,
++                                      int node)
+ {
+       int retval;
+       struct task_struct *p;
+@@ -1323,7 +1325,7 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
+               goto fork_out;
+ 
+       retval = -ENOMEM;
+-      p = dup_task_struct(current);
++      p = dup_task_struct(current, node);
+       if (!p)
+               goto fork_out;
+ 
+@@ -1699,7 +1701,8 @@ static inline void init_idle_pids(struct pid_link *links)
+ struct task_struct *fork_idle(int cpu)
+ {
+       struct task_struct *task;
+-      task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
++      task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
++                          cpu_to_node(cpu));
+       if (!IS_ERR(task)) {
+               init_idle_pids(task->pids);
+               init_idle(task, cpu);
+@@ -1744,7 +1747,7 @@ long _do_fork(unsigned long clone_flags,
+       }
+ 
+       p = copy_process(clone_flags, stack_start, stack_size,
+-                       child_tidptr, NULL, trace, tls);
++                       child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
+       /*
+        * Do this prior waking up the new thread - the thread pointer
+        * might get invalid after that point, if the thread exits quickly.
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 1f376bce413c..ef6353f0adbd 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1012,8 +1012,11 @@ area_found:
+               mutex_unlock(&pcpu_alloc_mutex);
+       }
+ 
+-      if (chunk != pcpu_reserved_chunk)
++      if (chunk != pcpu_reserved_chunk) {
++              spin_lock_irqsave(&pcpu_lock, flags);
+               pcpu_nr_empty_pop_pages -= occ_pages;
++              spin_unlock_irqrestore(&pcpu_lock, flags);
++      }
+ 
+       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+               pcpu_schedule_balance_work();
+diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
+index 41446668ccce..d5677d39c1e4 100644
+--- a/tools/include/linux/log2.h
++++ b/tools/include/linux/log2.h
+@@ -13,12 +13,6 @@
+ #define _TOOLS_LINUX_LOG2_H
+ 
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+  * non-constant log of base 2 calculators
+  * - the arch may override these in asm/bitops.h if they can be implemented
+  *   more efficiently than using fls() and fls64()
+@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n)                              \
+ (                                             \
+       __builtin_constant_p(n) ? (             \
+-              (n) < 1 ? ____ilog2_NaN() :     \
++              (n) < 2 ? 0 :                   \
+               (n) & (1ULL << 63) ? 63 :       \
+               (n) & (1ULL << 62) ? 62 :       \
+               (n) & (1ULL << 61) ? 61 :       \
+@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+               (n) & (1ULL <<  4) ?  4 :       \
+               (n) & (1ULL <<  3) ?  3 :       \
+               (n) & (1ULL <<  2) ?  2 :       \
+-              (n) & (1ULL <<  1) ?  1 :       \
+-              (n) & (1ULL <<  0) ?  0 :       \
+-              ____ilog2_NaN()                 \
+-                                 ) :          \
++              1 ) :                           \
+       (sizeof(n) <= 4) ?                      \
+       __ilog2_u32(n) :                        \
+       __ilog2_u64(n)                          \

Reply via email to