commit:     332e735eb1f56098e3ccdcca5f6502d114d98c60
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 22 23:00:30 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 22 23:00:30 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=332e735e

Linux patch 3.12.49

 1048_linux-3.12.49.patch | 3673 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 3673 insertions(+)

diff --git a/1048_linux-3.12.49.patch b/1048_linux-3.12.49.patch
new file mode 100644
index 0000000..30795f1
--- /dev/null
+++ b/1048_linux-3.12.49.patch
@@ -0,0 +1,3673 @@
+diff --git a/Makefile b/Makefile
+index a01f2573731d..b2985713121c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c 
b/arch/arm/mach-omap2/clockdomains7xx_data.c
+index 57d5df0c1fbd..7581e036bda6 100644
+--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
++++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
+@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
+       .dep_bit          = DRA7XX_L4PER2_STATDEP_SHIFT,
+       .wkdep_srcs       = l4per2_wkup_sleep_deps,
+       .sleepdep_srcs    = l4per2_wkup_sleep_deps,
+-      .flags            = CLKDM_CAN_HWSUP_SWSUP,
++      .flags            = CLKDM_CAN_SWSUP,
+ };
+ 
+ static struct clockdomain mpu0_7xx_clkdm = {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index fe70eaea0e28..c6ab435557b2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -56,6 +56,10 @@ config NO_IOPORT
+ config STACKTRACE_SUPPORT
+       def_bool y
+ 
++config ILLEGAL_POINTER_VALUE
++      hex
++      default 0xdead000000000000
++
+ config LOCKDEP_SUPPORT
+       def_bool y
+ 
+@@ -265,6 +269,22 @@ config SYSVIPC_COMPAT
+       def_bool y
+       depends on COMPAT && SYSVIPC
+ 
++config ARM64_ERRATUM_843419
++      bool "Cortex-A53: 843419: A load or store might access an incorrect 
address"
++      depends on MODULES
++      default y
++      help
++        This option builds kernel modules using the large memory model in
++        order to avoid the use of the ADRP instruction, which can cause
++        a subsequent memory access to use an incorrect address on Cortex-A53
++        parts up to r0p4.
++
++        Note that the kernel itself must be linked with a version of ld
++        which fixes potentially affected ADRP instructions through the
++        use of veneers.
++
++        If unsure, say Y.
++
+ endmenu
+ 
+ source "net/Kconfig"
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index d90cf79f233a..4148c05df99a 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -28,6 +28,10 @@ comma = ,
+ 
+ CHECKFLAGS    += -D__aarch64__
+ 
++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
++CFLAGS_MODULE += -mcmodel=large
++endif
++
+ # Default value
+ head-y                := arch/arm64/kernel/head.o
+ 
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 7090c126797c..aca41b06dc7a 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -192,6 +192,11 @@ ENTRY(el2_setup)
+       msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
+ #endif
+ 
++      /* EL2 debug */
++      mrs     x0, pmcr_el0                    // Disable debug access traps
++      ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
++      msr     mdcr_el2, x0                    // all PMU counters from EL1
++
+       /* Stage-2 translation */
+       msr     vttbr_el2, xzr
+ 
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index ca0e3d55da99..9589a92f6332 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -390,12 +390,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+                       ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+                                            INSN_IMM_ADR);
+                       break;
++#ifndef CONFIG_ARM64_ERRATUM_843419
+               case R_AARCH64_ADR_PREL_PG_HI21_NC:
+                       overflow_check = false;
+               case R_AARCH64_ADR_PREL_PG_HI21:
+                       ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+                                            INSN_IMM_ADR);
+                       break;
++#endif
+               case R_AARCH64_ADD_ABS_LO12_NC:
+               case R_AARCH64_LDST8_ABS_LO12_NC:
+                       overflow_check = false;
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index b9564b8d6bab..1e60acc6a4d7 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -231,14 +231,32 @@ int copy_siginfo_from_user32(siginfo_t *to, 
compat_siginfo_t __user *from)
+ 
+ /*
+  * VFP save/restore code.
++ *
++ * We have to be careful with endianness, since the fpsimd context-switch
++ * code operates on 128-bit (Q) register values whereas the compat ABI
++ * uses an array of 64-bit (D) registers. Consequently, we need to swap
++ * the two halves of each Q register when running on a big-endian CPU.
+  */
++union __fpsimd_vreg {
++      __uint128_t     raw;
++      struct {
++#ifdef __AARCH64EB__
++              u64     hi;
++              u64     lo;
++#else
++              u64     lo;
++              u64     hi;
++#endif
++      };
++};
++
+ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user 
*frame)
+ {
+       struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+       compat_ulong_t magic = VFP_MAGIC;
+       compat_ulong_t size = VFP_STORAGE_SIZE;
+       compat_ulong_t fpscr, fpexc;
+-      int err = 0;
++      int i, err = 0;
+ 
+       /*
+        * Save the hardware registers to the fpsimd_state structure.
+@@ -254,10 +272,15 @@ static int compat_preserve_vfp_context(struct 
compat_vfp_sigframe __user *frame)
+       /*
+        * Now copy the FP registers. Since the registers are packed,
+        * we can copy the prefix we want (V0-V15) as it is.
+-       * FIXME: Won't work if big endian.
+        */
+-      err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
+-                            sizeof(frame->ufp.fpregs));
++      for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++              union __fpsimd_vreg vreg = {
++                      .raw = fpsimd->vregs[i >> 1],
++              };
++
++              __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++              __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++      }
+ 
+       /* Create an AArch32 fpscr from the fpsr and the fpcr. */
+       fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
+@@ -282,7 +305,7 @@ static int compat_restore_vfp_context(struct 
compat_vfp_sigframe __user *frame)
+       compat_ulong_t magic = VFP_MAGIC;
+       compat_ulong_t size = VFP_STORAGE_SIZE;
+       compat_ulong_t fpscr;
+-      int err = 0;
++      int i, err = 0;
+ 
+       __get_user_error(magic, &frame->magic, err);
+       __get_user_error(size, &frame->size, err);
+@@ -292,12 +315,14 @@ static int compat_restore_vfp_context(struct 
compat_vfp_sigframe __user *frame)
+       if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+               return -EINVAL;
+ 
+-      /*
+-       * Copy the FP registers into the start of the fpsimd_state.
+-       * FIXME: Won't work if big endian.
+-       */
+-      err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
+-                              sizeof(frame->ufp.fpregs));
++      /* Copy the FP registers into the start of the fpsimd_state. */
++      for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++              union __fpsimd_vreg vreg;
++
++              __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++              __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++              fpsimd.vregs[i >> 1] = vreg.raw;
++      }
+ 
+       /* Extract the fpsr and the fpcr from the fpscr */
+       __get_user_error(fpscr, &frame->ufp.fpscr, err);
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index a255167baf6a..44b00eb7b340 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -472,8 +472,6 @@ __kvm_hyp_code_start:
+       mrs     x3, cntv_ctl_el0
+       and     x3, x3, #3
+       str     w3, [x0, #VCPU_TIMER_CNTV_CTL]
+-      bic     x3, x3, #1              // Clear Enable
+-      msr     cntv_ctl_el0, x3
+ 
+       isb
+ 
+@@ -481,6 +479,9 @@ __kvm_hyp_code_start:
+       str     x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+ 
+ 1:
++      // Disable the virtual timer
++      msr     cntv_ctl_el0, xzr
++
+       // Allow physical timer/counter access for the host
+       mrs     x2, cnthctl_el2
+       orr     x2, x2, #3
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 81a02a8762b0..86825f8883de 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long 
addr)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, false, addr);
+-
+-      inject_abt64(vcpu, false, addr);
++      else
++              inject_abt64(vcpu, false, addr);
+ }
+ 
+ /**
+@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long 
addr)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, true, addr);
+-
+-      inject_abt64(vcpu, true, addr);
++      else
++              inject_abt64(vcpu, true, addr);
+ }
+ 
+ /**
+@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_undef32(vcpu);
+-
+-      inject_undef64(vcpu);
++      else
++              inject_undef64(vcpu);
+ }
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 2e6443b1e922..c32a37e0e0d2 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -524,8 +524,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+       struct pt_regs *old_regs;
+       unsigned long eirr_val;
+       int irq, cpu = smp_processor_id();
+-#ifdef CONFIG_SMP
+       struct irq_desc *desc;
++#ifdef CONFIG_SMP
+       cpumask_t dest;
+ #endif
+ 
+@@ -538,8 +538,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+               goto set_out;
+       irq = eirr_to_irq(eirr_val);
+ 
+-#ifdef CONFIG_SMP
++      /* Filter out spurious interrupts, mostly from serial port at bootup */
+       desc = irq_to_desc(irq);
++      if (unlikely(!desc->action))
++              goto set_out;
++
++#ifdef CONFIG_SMP
+       cpumask_copy(&dest, desc->irq_data.affinity);
+       if (irqd_is_per_cpu(&desc->irq_data) &&
+           !cpu_isset(smp_processor_id(), dest)) {
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h 
b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 832a39d042d4..3848deed9472 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -135,7 +135,19 @@
+ #define pte_iterate_hashed_end() } while(0)
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+-#define pte_pagesize_index(mm, addr, pte)     get_slice_psize(mm, addr)
++/*
++ * We expect this to be called only for user addresses or kernel virtual
++ * addresses other than the linear mapping.
++ */
++#define pte_pagesize_index(mm, addr, pte)                     \
++      ({                                                      \
++              unsigned int psize;                             \
++              if (is_kernel_addr(addr))                       \
++                      psize = MMU_PAGE_4K;                    \
++              else                                            \
++                      psize = get_slice_psize(mm, addr);      \
++              psize;                                          \
++      })
+ #else
+ #define pte_pagesize_index(mm, addr, pte)     MMU_PAGE_4K
+ #endif
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 9bd52c65e66f..14de1385dedb 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -255,6 +255,7 @@ extern void rtas_power_off(void);
+ extern void rtas_halt(void);
+ extern void rtas_os_term(char *str);
+ extern int rtas_get_sensor(int sensor, int index, int *state);
++extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+ extern int rtas_get_power_level(int powerdomain, int *level);
+ extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+ extern bool rtas_indicator_present(int token, int *maxindex);
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 4cf674d7d5ae..c4bc8d6cfd79 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
+ }
+ EXPORT_SYMBOL(rtas_get_sensor);
+ 
++int rtas_get_sensor_fast(int sensor, int index, int *state)
++{
++      int token = rtas_token("get-sensor-state");
++      int rc;
++
++      if (token == RTAS_UNKNOWN_SERVICE)
++              return -ENOENT;
++
++      rc = rtas_call(token, 2, 2, state, sensor, index);
++      WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
++                                  rc <= RTAS_EXTENDED_DELAY_MAX));
++
++      if (rc < 0)
++              return rtas_error_rc(rc);
++      return rc;
++}
++
+ bool rtas_indicator_present(int token, int *maxindex)
+ {
+       int proplen, count, i;
+diff --git a/arch/powerpc/mm/hugepage-hash64.c 
b/arch/powerpc/mm/hugepage-hash64.c
+index 7d86c868040d..9836e8032d0b 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -136,7 +136,6 @@ int __hash_page_thp(unsigned long ea, unsigned long 
access, unsigned long vsid,
+       BUG_ON(index >= 4096);
+ 
+       vpn = hpt_vpn(ea, vsid, ssize);
+-      hash = hpt_hash(vpn, shift, ssize);
+       hpte_slot_array = get_hpte_slot_array(pmdp);
+       if (psize == MMU_PAGE_4K) {
+               /*
+@@ -151,6 +150,7 @@ int __hash_page_thp(unsigned long ea, unsigned long 
access, unsigned long vsid,
+       valid = hpte_valid(hpte_slot_array, index);
+       if (valid) {
+               /* update the hpte bits */
++              hash = hpt_hash(vpn, shift, ssize);
+               hidx =  hpte_hash_index(hpte_slot_array, index);
+               if (hidx & _PTEIDX_SECONDARY)
+                       hash = ~hash;
+@@ -176,6 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long 
access, unsigned long vsid,
+       if (!valid) {
+               unsigned long hpte_group;
+ 
++              hash = hpt_hash(vpn, shift, ssize);
+               /* insert new entry */
+               pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+               new_pmd |= _PAGE_HASHPTE;
+diff --git a/arch/powerpc/platforms/powernv/pci.c 
b/arch/powerpc/platforms/powernv/pci.c
+index 7dcf8628f626..52746b3caf08 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -109,6 +109,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+       struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+       struct msi_desc *entry;
++      irq_hw_number_t hwirq;
+ 
+       if (WARN_ON(!phb))
+               return;
+@@ -116,10 +117,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+       list_for_each_entry(entry, &pdev->msi_list, list) {
+               if (entry->irq == NO_IRQ)
+                       continue;
++              hwirq = virq_to_hw(entry->irq);
+               irq_set_msi_desc(entry->irq, NULL);
+-              msi_bitmap_free_hwirqs(&phb->msi_bmp,
+-                      virq_to_hw(entry->irq) - phb->msi_base, 1);
+               irq_dispose_mapping(entry->irq);
++              msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
+       }
+ }
+ #endif /* CONFIG_PCI_MSI */
+diff --git a/arch/powerpc/platforms/pseries/ras.c 
b/arch/powerpc/platforms/pseries/ras.c
+index 721c0586b284..50fd3ac7b7bf 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -187,7 +187,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void 
*dev_id)
+       int state;
+       int critical;
+ 
+-      status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
++      status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
++                                    &state);
+ 
+       if (state > 3)
+               critical = 1;           /* Time Critical */
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 77efbaec7b9c..4a9b36777775 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -121,15 +121,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+       struct msi_desc *entry;
+       struct fsl_msi *msi_data;
++      irq_hw_number_t hwirq;
+ 
+       list_for_each_entry(entry, &pdev->msi_list, list) {
+               if (entry->irq == NO_IRQ)
+                       continue;
++              hwirq = virq_to_hw(entry->irq);
+               msi_data = irq_get_chip_data(entry->irq);
+               irq_set_msi_desc(entry->irq, NULL);
+-              msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-                                     virq_to_hw(entry->irq), 1);
+               irq_dispose_mapping(entry->irq);
++              msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+       }
+ 
+       return;
+diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c 
b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+index 38e62382070c..9e14d82287a1 100644
+--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int 
nvec, int type)
+ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+       struct msi_desc *entry;
++      irq_hw_number_t hwirq;
+ 
+       pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
+ 
+@@ -81,10 +82,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev 
*pdev)
+               if (entry->irq == NO_IRQ)
+                       continue;
+ 
++              hwirq = virq_to_hw(entry->irq);
+               irq_set_msi_desc(entry->irq, NULL);
+-              msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-                                     virq_to_hw(entry->irq), ALLOC_CHUNK);
+               irq_dispose_mapping(entry->irq);
++              msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
++                                     hwirq, ALLOC_CHUNK);
+       }
+ 
+       return;
+diff --git a/arch/powerpc/sysdev/mpic_u3msi.c 
b/arch/powerpc/sysdev/mpic_u3msi.c
+index 9a7aa0ed9c1c..dfc3486bf802 100644
+--- a/arch/powerpc/sysdev/mpic_u3msi.c
++++ b/arch/powerpc/sysdev/mpic_u3msi.c
+@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, 
int nvec, int type)
+ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+       struct msi_desc *entry;
++      irq_hw_number_t hwirq;
+ 
+         list_for_each_entry(entry, &pdev->msi_list, list) {
+               if (entry->irq == NO_IRQ)
+                       continue;
+ 
++              hwirq = virq_to_hw(entry->irq);
+               irq_set_msi_desc(entry->irq, NULL);
+-              msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-                                     virq_to_hw(entry->irq), 1);
+               irq_dispose_mapping(entry->irq);
++              msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
+       }
+ 
+       return;
+diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c 
b/arch/powerpc/sysdev/ppc4xx_msi.c
+index 43948da837a7..c3e65129940b 100644
+--- a/arch/powerpc/sysdev/ppc4xx_msi.c
++++ b/arch/powerpc/sysdev/ppc4xx_msi.c
+@@ -121,16 +121,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
+ {
+       struct msi_desc *entry;
+       struct ppc4xx_msi *msi_data = &ppc4xx_msi;
++      irq_hw_number_t hwirq;
+ 
+       dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
+ 
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               if (entry->irq == NO_IRQ)
+                       continue;
++              hwirq = virq_to_hw(entry->irq);
+               irq_set_msi_desc(entry->irq, NULL);
+-              msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-                              virq_to_hw(entry->irq), 1);
+               irq_dispose_mapping(entry->irq);
++              msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+       }
+ }
+ 
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c 
b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index a8d6f69f92a3..4bcf841e4701 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -291,6 +291,7 @@ static struct ahash_alg ghash_async_alg = {
+                       .cra_name               = "ghash",
+                       .cra_driver_name        = "ghash-clmulni",
+                       .cra_priority           = 400,
++                      .cra_ctxsize            = sizeof(struct 
ghash_async_ctx),
+                       .cra_flags              = CRYPTO_ALG_TYPE_AHASH | 
CRYPTO_ALG_ASYNC,
+                       .cra_blocksize          = GHASH_BLOCK_SIZE,
+                       .cra_type               = &crypto_ahash_type,
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 691337073e1f..7ed99df028ca 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1693,11 +1693,12 @@ ENTRY(nmi)
+        *  If the variable is not set and the stack is not the NMI
+        *  stack then:
+        *    o Set the special variable on the stack
+-       *    o Copy the interrupt frame into a "saved" location on the stack
+-       *    o Copy the interrupt frame into a "copy" location on the stack
++       *    o Copy the interrupt frame into an "outermost" location on the
++       *      stack
++       *    o Copy the interrupt frame into an "iret" location on the stack
+        *    o Continue processing the NMI
+        *  If the variable is set or the previous stack is the NMI stack:
+-       *    o Modify the "copy" location to jump to the repeate_nmi
++       *    o Modify the "iret" location to jump to the repeat_nmi
+        *    o return back to the first NMI
+        *
+        * Now on exit of the first NMI, we first clear the stack variable
+@@ -1777,38 +1778,101 @@ ENTRY(nmi)
+ 
+ .Lnmi_from_kernel:
+       /*
+-       * Check the special variable on the stack to see if NMIs are
+-       * executing.
++       * Here's what our stack frame will look like:
++       * +---------------------------------------------------------+
++       * | original SS                                             |
++       * | original Return RSP                                     |
++       * | original RFLAGS                                         |
++       * | original CS                                             |
++       * | original RIP                                            |
++       * +---------------------------------------------------------+
++       * | temp storage for rdx                                    |
++       * +---------------------------------------------------------+
++       * | "NMI executing" variable                                |
++       * +---------------------------------------------------------+
++       * | iret SS          } Copied from "outermost" frame        |
++       * | iret Return RSP  } on each loop iteration; overwritten  |
++       * | iret RFLAGS      } by a nested NMI to force another     |
++       * | iret CS          } iteration if needed.                 |
++       * | iret RIP         }                                      |
++       * +---------------------------------------------------------+
++       * | outermost SS          } initialized in first_nmi;       |
++       * | outermost Return RSP  } will not be changed before      |
++       * | outermost RFLAGS      } NMI processing is done.         |
++       * | outermost CS          } Copied to "iret" frame on each  |
++       * | outermost RIP         } iteration.                      |
++       * +---------------------------------------------------------+
++       * | pt_regs                                                 |
++       * +---------------------------------------------------------+
++       *
++       * The "original" frame is used by hardware.  Before re-enabling
++       * NMIs, we need to be done with it, and we need to leave enough
++       * space for the asm code here.
++       *
++       * We return by executing IRET while RSP points to the "iret" frame.
++       * That will either return for real or it will loop back into NMI
++       * processing.
++       *
++       * The "outermost" frame is copied to the "iret" frame on each
++       * iteration of the loop, so each iteration starts with the "iret"
++       * frame pointing to the final return target.
++       */
++
++      /*
++       * Determine whether we're a nested NMI.
++       *
++       * If we interrupted kernel code between repeat_nmi and
++       * end_repeat_nmi, then we are a nested NMI.  We must not
++       * modify the "iret" frame because it's being written by
++       * the outer NMI.  That's okay; the outer NMI handler is
++       * about to about to call do_nmi anyway, so we can just
++       * resume the outer NMI.
++       */
++      movq    $repeat_nmi, %rdx
++      cmpq    8(%rsp), %rdx
++      ja      1f
++      movq    $end_repeat_nmi, %rdx
++      cmpq    8(%rsp), %rdx
++      ja      nested_nmi_out
++1:
++
++      /*
++       * Now check "NMI executing".  If it's set, then we're nested.
++       * This will not detect if we interrupted an outer NMI just
++       * before IRET.
+        */
+       cmpl $1, -8(%rsp)
+       je nested_nmi
+ 
+       /*
+-       * Now test if the previous stack was an NMI stack.
+-       * We need the double check. We check the NMI stack to satisfy the
+-       * race when the first NMI clears the variable before returning.
+-       * We check the variable because the first NMI could be in a
+-       * breakpoint routine using a breakpoint stack.
++       * Now test if the previous stack was an NMI stack.  This covers
++       * the case where we interrupt an outer NMI after it clears
++       * "NMI executing" but before IRET.  We need to be careful, though:
++       * there is one case in which RSP could point to the NMI stack
++       * despite there being no NMI active: naughty userspace controls
++       * RSP at the very beginning of the SYSCALL targets.  We can
++       * pull a fast one on naughty userspace, though: we program
++       * SYSCALL to mask DF, so userspace cannot cause DF to be set
++       * if it controls the kernel's RSP.  We set DF before we clear
++       * "NMI executing".
+        */
+       lea 6*8(%rsp), %rdx
+       test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
++
++      /* Ah, it is within the NMI stack. */
++
++      testb   $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
++      jz      first_nmi       /* RSP was user controlled. */
++
++      /* This is a nested NMI. */
++
+       CFI_REMEMBER_STATE
+ 
+ nested_nmi:
+       /*
+-       * Do nothing if we interrupted the fixup in repeat_nmi.
+-       * It's about to repeat the NMI handler, so we are fine
+-       * with ignoring this one.
++       * Modify the "iret" frame to point to repeat_nmi, forcing another
++       * iteration of NMI handling.
+        */
+-      movq $repeat_nmi, %rdx
+-      cmpq 8(%rsp), %rdx
+-      ja 1f
+-      movq $end_repeat_nmi, %rdx
+-      cmpq 8(%rsp), %rdx
+-      ja nested_nmi_out
+-
+-1:
+-      /* Set up the interrupted NMIs stack to jump to repeat_nmi */
+       leaq -1*8(%rsp), %rdx
+       movq %rdx, %rsp
+       CFI_ADJUST_CFA_OFFSET 1*8
+@@ -1827,60 +1891,23 @@ nested_nmi_out:
+       popq_cfi %rdx
+       CFI_RESTORE rdx
+ 
+-      /* No need to check faults here */
++      /* We are returning to kernel mode, so this cannot result in a fault. */
+       INTERRUPT_RETURN
+ 
+       CFI_RESTORE_STATE
+ first_nmi:
+-      /*
+-       * Because nested NMIs will use the pushed location that we
+-       * stored in rdx, we must keep that space available.
+-       * Here's what our stack frame will look like:
+-       * +-------------------------+
+-       * | original SS             |
+-       * | original Return RSP     |
+-       * | original RFLAGS         |
+-       * | original CS             |
+-       * | original RIP            |
+-       * +-------------------------+
+-       * | temp storage for rdx    |
+-       * +-------------------------+
+-       * | NMI executing variable  |
+-       * +-------------------------+
+-       * | copied SS               |
+-       * | copied Return RSP       |
+-       * | copied RFLAGS           |
+-       * | copied CS               |
+-       * | copied RIP              |
+-       * +-------------------------+
+-       * | Saved SS                |
+-       * | Saved Return RSP        |
+-       * | Saved RFLAGS            |
+-       * | Saved CS                |
+-       * | Saved RIP               |
+-       * +-------------------------+
+-       * | pt_regs                 |
+-       * +-------------------------+
+-       *
+-       * The saved stack frame is used to fix up the copied stack frame
+-       * that a nested NMI may change to make the interrupted NMI iret jump
+-       * to the repeat_nmi. The original stack frame and the temp storage
+-       * is also used by nested NMIs and can not be trusted on exit.
+-       */
+-      /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
++      /* Restore rdx. */
+       movq (%rsp), %rdx
+       CFI_RESTORE rdx
+ 
+-      /* Set the NMI executing variable on the stack. */
++      /* Set "NMI executing" on the stack. */
+       pushq_cfi $1
+ 
+-      /*
+-       * Leave room for the "copied" frame
+-       */
++      /* Leave room for the "iret" frame */
+       subq $(5*8), %rsp
+       CFI_ADJUST_CFA_OFFSET 5*8
+ 
+-      /* Copy the stack frame to the Saved frame */
++      /* Copy the "original" frame to the "outermost" frame */
+       .rept 5
+       pushq_cfi 11*8(%rsp)
+       .endr
+@@ -1888,6 +1915,7 @@ first_nmi:
+ 
+       /* Everything up to here is safe from nested NMIs */
+ 
++repeat_nmi:
+       /*
+        * If there was a nested NMI, the first NMI's iret will return
+        * here. But NMIs are still enabled and we can take another
+@@ -1896,16 +1924,21 @@ first_nmi:
+        * it will just return, as we are about to repeat an NMI anyway.
+        * This makes it safe to copy to the stack frame that a nested
+        * NMI will update.
+-       */
+-repeat_nmi:
+-      /*
+-       * Update the stack variable to say we are still in NMI (the update
+-       * is benign for the non-repeat case, where 1 was pushed just above
+-       * to this very stack slot).
++       *
++       * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
++       * we're repeating an NMI, gsbase has the same value that it had on
++       * the first iteration.  paranoid_entry will load the kernel
++       * gsbase if needed before we call do_nmi.
++       *
++       * Set "NMI executing" in case we came back here via IRET.
+        */
+       movq $1, 10*8(%rsp)
+ 
+-      /* Make another copy, this one may be modified by nested NMIs */
++      /*
++       * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
++       * here must not modify the "iret" frame while we're writing to
++       * it or it will end up containing garbage.
++       */
+       addq $(10*8), %rsp
+       CFI_ADJUST_CFA_OFFSET -10*8
+       .rept 5
+@@ -1916,9 +1949,9 @@ repeat_nmi:
+ end_repeat_nmi:
+ 
+       /*
+-       * Everything below this point can be preempted by a nested
+-       * NMI if the first NMI took an exception and reset our iret stack
+-       * so that we repeat another NMI.
++       * Everything below this point can be preempted by a nested NMI.
++       * If this happens, then the inner NMI will change the "iret"
++       * frame to point back to repeat_nmi.
+        */
+       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
+       subq $ORIG_RAX-R15, %rsp
+@@ -1946,9 +1979,23 @@ nmi_restore:
+       /* Pop the extra iret frame at once */
+       RESTORE_ALL 6*8
+ 
+-      /* Clear the NMI executing stack variable */
+-      movq $0, 5*8(%rsp)
+-      jmp irq_return
++      /*
++       * Clear "NMI executing".  Set DF first so that we can easily
++       * distinguish the remaining code between here and IRET from
++       * the SYSCALL entry and exit paths.  On a native kernel, we
++       * could just inspect RIP, but, on paravirt kernels,
++       * INTERRUPT_RETURN can translate into a jump into a
++       * hypercall page.
++       */
++      std
++      movq    $0, 5*8(%rsp)           /* clear "NMI executing" */
++
++      /*
++       * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
++       * stack in a single instruction.  We are returning to kernel
++       * mode, so this cannot result in a fault.
++       */
++      INTERRUPT_RETURN
+       CFI_ENDPROC
+ END(nmi)
+ 
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index b82e0fdc7edb..85ede73e5ed7 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -392,8 +392,8 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
+ }
+ 
+ /*
+- * NMIs can hit breakpoints which will cause it to lose its NMI context
+- * with the CPU when the breakpoint or page fault does an IRET.
++ * NMIs can page fault or hit breakpoints which will cause it to lose
++ * its NMI context with the CPU when the breakpoint or page fault does an 
IRET.
+  *
+  * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+  * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index b759853d78fe..caff13e0c993 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -381,12 +381,6 @@ static u64 __get_spte_lockless(u64 *sptep)
+ {
+       return ACCESS_ONCE(*sptep);
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-      /* It is valid if the spte is zapped. */
+-      return spte == 0ull;
+-}
+ #else
+ union split_spte {
+       struct {
+@@ -502,23 +496,6 @@ retry:
+ 
+       return spte.spte;
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-      union split_spte sspte = (union split_spte)spte;
+-      u32 high_mmio_mask = shadow_mmio_mask >> 32;
+-
+-      /* It is valid if the spte is zapped. */
+-      if (spte == 0ull)
+-              return true;
+-
+-      /* It is valid if the spte is being zapped. */
+-      if (sspte.spte_low == 0ull &&
+-          (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+-              return true;
+-
+-      return false;
+-}
+ #endif
+ 
+ static bool spte_is_locklessly_modifiable(u64 spte)
+@@ -3219,21 +3196,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu 
*vcpu, u64 addr, bool direct)
+       return vcpu_match_mmio_gva(vcpu, addr);
+ }
+ 
+-
+-/*
+- * On direct hosts, the last spte is only allows two states
+- * for mmio page fault:
+- *   - It is the mmio spte
+- *   - It is zapped or it is being zapped.
+- *
+- * This function completely checks the spte when the last spte
+- * is not the mmio spte.
+- */
+-static bool check_direct_spte_mmio_pf(u64 spte)
+-{
+-      return __check_direct_spte_mmio_pf(spte);
+-}
+-
+ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+ {
+       struct kvm_shadow_walk_iterator iterator;
+@@ -3276,13 +3238,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu 
*vcpu, u64 addr, bool direct)
+       }
+ 
+       /*
+-       * It's ok if the gva is remapped by other cpus on shadow guest,
+-       * it's a BUG if the gfn is not a mmio page.
+-       */
+-      if (direct && !check_direct_spte_mmio_pf(spte))
+-              return RET_MMIO_PF_BUG;
+-
+-      /*
+        * If the page table is zapped by other cpus, let CPU fault again on
+        * the address.
+        */
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 4287f1ffba7e..948e91b731a2 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned 
long end)
+ 
+       vaddr = start;
+       pgd_idx = pgd_index(vaddr);
++      pmd_idx = pmd_index(vaddr);
+ 
+       for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
+               for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
+index f2faa58f9a43..03d02df47b9a 100644
+--- a/arch/xtensa/include/asm/traps.h
++++ b/arch/xtensa/include/asm/traps.h
+@@ -24,30 +24,39 @@ static inline void spill_registers(void)
+ {
+ #if XCHAL_NUM_AREGS > 16
+       __asm__ __volatile__ (
+-              "       call12  1f\n"
++              "       call8   1f\n"
+               "       _j      2f\n"
+               "       retw\n"
+               "       .align  4\n"
+               "1:\n"
++#if XCHAL_NUM_AREGS == 32
++              "       _entry  a1, 32\n"
++              "       addi    a8, a0, 3\n"
++              "       _entry  a1, 16\n"
++              "       mov     a12, a12\n"
++              "       retw\n"
++#else
+               "       _entry  a1, 48\n"
+-              "       addi    a12, a0, 3\n"
+-#if XCHAL_NUM_AREGS > 32
+-              "       .rept   (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
++              "       call12  1f\n"
++              "       retw\n"
++              "       .align  4\n"
++              "1:\n"
++              "       .rept   (" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+               "       _entry  a1, 48\n"
+               "       mov     a12, a0\n"
+               "       .endr\n"
+-#endif
+-              "       _entry  a1, 48\n"
++              "       _entry  a1, 16\n"
+ #if XCHAL_NUM_AREGS % 12 == 0
+-              "       mov     a8, a8\n"
+-#elif XCHAL_NUM_AREGS % 12 == 4
+               "       mov     a12, a12\n"
+-#elif XCHAL_NUM_AREGS % 12 == 8
++#elif XCHAL_NUM_AREGS % 12 == 4
+               "       mov     a4, a4\n"
++#elif XCHAL_NUM_AREGS % 12 == 8
++              "       mov     a8, a8\n"
+ #endif
+               "       retw\n"
++#endif
+               "2:\n"
+-              : : : "a12", "a13", "memory");
++              : : : "a8", "a9", "memory");
+ #else
+       __asm__ __volatile__ (
+               "       mov     a12, a12\n"
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 4b8e636e60da..250c52b14edf 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -568,12 +568,13 @@ user_exception_exit:
+        *       (if we have restored WSBITS-1 frames).
+        */
+ 
++2:
+ #if XCHAL_HAVE_THREADPTR
+       l32i    a3, a1, PT_THREADPTR
+       wur     a3, threadptr
+ #endif
+ 
+-2:    j       common_exception_exit
++      j       common_exception_exit
+ 
+       /* This is the kernel exception exit.
+        * We avoided to do a MOVSP when we entered the exception, but we
+@@ -1827,7 +1828,7 @@ ENDPROC(system_call)
+       mov     a12, a0
+       .endr
+ #endif
+-      _entry  a1, 48
++      _entry  a1, 16
+ #if XCHAL_NUM_AREGS % 12 == 0
+       mov     a8, a8
+ #elif XCHAL_NUM_AREGS % 12 == 4
+@@ -1851,7 +1852,7 @@ ENDPROC(system_call)
+ 
+ ENTRY(_switch_to)
+ 
+-      entry   a1, 16
++      entry   a1, 48
+ 
+       mov     a11, a3                 # and 'next' (a3)
+ 
+diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
+index 5b93852392b8..0d752851a1ee 100644
+--- a/drivers/auxdisplay/ks0108.c
++++ b/drivers/auxdisplay/ks0108.c
+@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
+ 
+       ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
+               NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
++      parport_put_port(ks0108_parport);
+       if (ks0108_pardevice == NULL) {
+               printk(KERN_ERR KS0108_NAME ": ERROR: "
+                       "parport didn't register new device\n");
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 507379e7b763..4e2fb405da87 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -296,10 +296,10 @@ void * devres_get(struct device *dev, void *new_res,
+       if (!dr) {
+               add_dr(dev, &new_dr->node);
+               dr = new_dr;
+-              new_dr = NULL;
++              new_res = NULL;
+       }
+       spin_unlock_irqrestore(&dev->devres_lock, flags);
+-      devres_free(new_dr);
++      devres_free(new_res);
+ 
+       return dr->data;
+ }
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 4f8bef3eb5a8..413441a2ad4a 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -344,9 +344,7 @@ int platform_device_add(struct platform_device *pdev)
+ 
+       while (--i >= 0) {
+               struct resource *r = &pdev->resource[i];
+-              unsigned long type = resource_type(r);
+-
+-              if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++              if (r->parent)
+                       release_resource(r);
+       }
+ 
+@@ -377,9 +375,7 @@ void platform_device_del(struct platform_device *pdev)
+ 
+               for (i = 0; i < pdev->num_resources; i++) {
+                       struct resource *r = &pdev->resource[i];
+-                      unsigned long type = resource_type(r);
+-
+-                      if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++                      if (r->parent)
+                               release_resource(r);
+               }
+       }
+diff --git a/drivers/base/regmap/regcache-rbtree.c 
b/drivers/base/regmap/regcache-rbtree.c
+index 2b946bc4212d..f3f71369adc7 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -302,11 +302,20 @@ static int regcache_rbtree_insert_to_block(struct regmap 
*map,
+       if (!blk)
+               return -ENOMEM;
+ 
+-      present = krealloc(rbnode->cache_present,
+-                  BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+-      if (!present) {
+-              kfree(blk);
+-              return -ENOMEM;
++      if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
++              present = krealloc(rbnode->cache_present,
++                                 BITS_TO_LONGS(blklen) * sizeof(*present),
++                                 GFP_KERNEL);
++              if (!present) {
++                      kfree(blk);
++                      return -ENOMEM;
++              }
++
++              memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
++                     (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
++                     * sizeof(*present));
++      } else {
++              present = rbnode->cache_present;
+       }
+ 
+       /* insert the register value in the correct place in the rbnode block */
+diff --git a/drivers/clk/versatile/clk-sp810.c 
b/drivers/clk/versatile/clk-sp810.c
+index bf9b15a585e1..b9e05bde0c06 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -128,8 +128,8 @@ struct clk *clk_sp810_timerclken_of_get(struct 
of_phandle_args *clkspec,
+ {
+       struct clk_sp810 *sp810 = data;
+ 
+-      if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
+-                      ARRAY_SIZE(sp810->timerclken)))
++      if (WARN_ON(clkspec->args_count != 1 ||
++                  clkspec->args[0] >= ARRAY_SIZE(sp810->timerclken)))
+               return NULL;
+ 
+       return sp810->timerclken[clkspec->args[0]].clk;
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index af351f478b14..92d2116bf1ad 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -897,13 +897,14 @@ static int ahash_final_ctx(struct ahash_request *req)
+                         state->buflen_1;
+       u32 *sh_desc = ctx->sh_desc_fin, *desc;
+       dma_addr_t ptr = ctx->sh_desc_fin_dma;
+-      int sec4_sg_bytes;
++      int sec4_sg_bytes, sec4_sg_src_index;
+       int digestsize = crypto_ahash_digestsize(ahash);
+       struct ahash_edesc *edesc;
+       int ret = 0;
+       int sh_len;
+ 
+-      sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
++      sec4_sg_src_index = 1 + (buflen ? 1 : 0);
++      sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ 
+       /* allocate space for base edesc and hw desc commands, link tables */
+       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+@@ -930,7 +931,7 @@ static int ahash_final_ctx(struct ahash_request *req)
+       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+                                               buf, state->buf_dma, buflen,
+                                               last_buflen);
+-      (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
++      (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ 
+       append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+                         LDST_SGF);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c 
b/drivers/gpu/drm/qxl/qxl_display.c
+index 5f79e511c2a6..ea0904875c74 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -127,37 +127,40 @@ static int qxl_add_monitors_config_modes(struct 
drm_connector *connector)
+                           false);
+       mode->type |= DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
++      /* remember the last custom size for mode validation */
++      qdev->monitors_config_width = mode->hdisplay;
++      qdev->monitors_config_height = mode->vdisplay;
+       return 1;
+ }
+ 
++static struct mode_size {
++      int w;
++      int h;
++} common_modes[] = {
++      { 640,  480},
++      { 720,  480},
++      { 800,  600},
++      { 848,  480},
++      {1024,  768},
++      {1152,  768},
++      {1280,  720},
++      {1280,  800},
++      {1280,  854},
++      {1280,  960},
++      {1280, 1024},
++      {1440,  900},
++      {1400, 1050},
++      {1680, 1050},
++      {1600, 1200},
++      {1920, 1080},
++      {1920, 1200}
++};
++
+ static int qxl_add_common_modes(struct drm_connector *connector)
+ {
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode = NULL;
+       int i;
+-      struct mode_size {
+-              int w;
+-              int h;
+-      } common_modes[] = {
+-              { 640,  480},
+-              { 720,  480},
+-              { 800,  600},
+-              { 848,  480},
+-              {1024,  768},
+-              {1152,  768},
+-              {1280,  720},
+-              {1280,  800},
+-              {1280,  854},
+-              {1280,  960},
+-              {1280, 1024},
+-              {1440,  900},
+-              {1400, 1050},
+-              {1680, 1050},
+-              {1600, 1200},
+-              {1920, 1080},
+-              {1920, 1200}
+-      };
+-
+       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+               if (common_modes[i].w < 320 || common_modes[i].h < 200)
+                       continue;
+@@ -736,11 +739,22 @@ static int qxl_conn_get_modes(struct drm_connector 
*connector)
+ static int qxl_conn_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+ {
++      struct drm_device *ddev = connector->dev;
++      struct qxl_device *qdev = ddev->dev_private;
++      int i;
++
+       /* TODO: is this called for user defined modes? (xrandr --add-mode)
+        * TODO: check that the mode fits in the framebuffer */
+-      DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+-                mode->vdisplay, mode->status);
+-      return MODE_OK;
++
++      if(qdev->monitors_config_width == mode->hdisplay &&
++         qdev->monitors_config_height == mode->vdisplay)
++              return MODE_OK;
++
++      for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
++              if (common_modes[i].w == mode->hdisplay && common_modes[i].h == 
mode->vdisplay)
++                      return MODE_OK;
++      }
++      return MODE_BAD;
+ }
+ 
+ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index f7c9adde46a0..9cfafd7a1af6 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -323,6 +323,8 @@ struct qxl_device {
+       struct work_struct gc_work;
+ 
+       struct work_struct fb_work;
++      int monitors_config_width;
++      int monitors_config_height;
+ };
+ 
+ /* forward declaration for QXL_INFO_IO */
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c 
b/drivers/gpu/drm/radeon/radeon_combios.c
+index 8cac69819054..9c64a973190e 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -3403,6 +3403,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
+           rdev->pdev->subsystem_device == 0x30ae)
+               return;
+ 
++      /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
++       * - it hangs on resume inside the dynclk 1 table.
++       */
++      if (rdev->family == CHIP_RS480 &&
++          rdev->pdev->subsystem_vendor == 0x103c &&
++          rdev->pdev->subsystem_device == 0x280a)
++              return;
++
+       /* DYN CLK 1 */
+       table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+       if (table)
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c 
b/drivers/gpu/drm/radeon/radeon_connectors.c
+index fe90b3e28d88..02cd9585ff83 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -78,6 +78,11 @@ void radeon_connector_hotplug(struct drm_connector 
*connector)
+                       if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 
{
+                               drm_helper_connector_dpms(connector, 
DRM_MODE_DPMS_OFF);
+                       } else if 
(radeon_dp_needs_link_train(radeon_connector)) {
++                              /* Don't try to start link training before we
++                               * have the dpcd */
++                              if (!radeon_dp_getdpcd(radeon_connector))
++                                      return;
++
+                               /* set it to OFF so that 
drm_helper_connector_dpms()
+                                * won't return immediately since the current 
state
+                                * is ON at this point.
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index bcc3193d297c..f44be51e261d 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -180,7 +180,7 @@ static void hid_io_error(struct hid_device *hid)
+       if (time_after(jiffies, usbhid->stop_retry)) {
+ 
+               /* Retries failed, so do a port reset unless we lack bandwidth*/
+-              if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
++              if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+                    && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
+ 
+                       schedule_work(&usbhid->reset_work);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index dd4206cac62d..5e1b117d4e3b 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -110,6 +110,10 @@
+ struct adis16480_chip_info {
+       unsigned int num_channels;
+       const struct iio_chan_spec *channels;
++      unsigned int gyro_max_val;
++      unsigned int gyro_max_scale;
++      unsigned int accel_max_val;
++      unsigned int accel_max_scale;
+ };
+ 
+ struct adis16480 {
+@@ -533,19 +537,21 @@ static int adis16480_set_filter_freq(struct iio_dev 
*indio_dev,
+ static int adis16480_read_raw(struct iio_dev *indio_dev,
+       const struct iio_chan_spec *chan, int *val, int *val2, long info)
+ {
++      struct adis16480 *st = iio_priv(indio_dev);
++
+       switch (info) {
+       case IIO_CHAN_INFO_RAW:
+               return adis_single_conversion(indio_dev, chan, 0, val);
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_ANGL_VEL:
+-                      *val = 0;
+-                      *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
+-                      return IIO_VAL_INT_PLUS_MICRO;
++                      *val = st->chip_info->gyro_max_scale;
++                      *val2 = st->chip_info->gyro_max_val;
++                      return IIO_VAL_FRACTIONAL;
+               case IIO_ACCEL:
+-                      *val = 0;
+-                      *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
+-                      return IIO_VAL_INT_PLUS_MICRO;
++                      *val = st->chip_info->accel_max_scale;
++                      *val2 = st->chip_info->accel_max_val;
++                      return IIO_VAL_FRACTIONAL;
+               case IIO_MAGN:
+                       *val = 0;
+                       *val2 = 100; /* 0.0001 gauss */
+@@ -702,18 +708,39 @@ static const struct adis16480_chip_info 
adis16480_chip_info[] = {
+       [ADIS16375] = {
+               .channels = adis16485_channels,
+               .num_channels = ARRAY_SIZE(adis16485_channels),
++              /*
++               * storing the value in rad/degree and the scale in degree
++               * gives us the result in rad and better precession than
++               * storing the scale directly in rad.
++               */
++              .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
++              .gyro_max_scale = 300,
++              .accel_max_val = IIO_M_S_2_TO_G(21973),
++              .accel_max_scale = 18,
+       },
+       [ADIS16480] = {
+               .channels = adis16480_channels,
+               .num_channels = ARRAY_SIZE(adis16480_channels),
++              .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++              .gyro_max_scale = 450,
++              .accel_max_val = IIO_M_S_2_TO_G(12500),
++              .accel_max_scale = 5,
+       },
+       [ADIS16485] = {
+               .channels = adis16485_channels,
+               .num_channels = ARRAY_SIZE(adis16485_channels),
++              .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++              .gyro_max_scale = 450,
++              .accel_max_val = IIO_M_S_2_TO_G(20000),
++              .accel_max_scale = 5,
+       },
+       [ADIS16488] = {
+               .channels = adis16480_channels,
+               .num_channels = ARRAY_SIZE(adis16480_channels),
++              .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++              .gyro_max_scale = 450,
++              .accel_max_val = IIO_M_S_2_TO_G(22500),
++              .accel_max_scale = 18,
+       },
+ };
+ 
+diff --git a/drivers/infiniband/core/uverbs.h 
b/drivers/infiniband/core/uverbs.h
+index d8f9c6c272d7..5252cd0be039 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -69,7 +69,7 @@
+  */
+ 
+ struct ib_uverbs_device {
+-      struct kref                             ref;
++      atomic_t                                refcount;
+       int                                     num_comp_vectors;
+       struct completion                       comp;
+       struct device                          *dev;
+@@ -78,6 +78,7 @@ struct ib_uverbs_device {
+       struct cdev                             cdev;
+       struct rb_root                          xrcd_tree;
+       struct mutex                            xrcd_tree_mutex;
++      struct kobject                          kobj;
+ };
+ 
+ struct ib_uverbs_event_file {
+diff --git a/drivers/infiniband/core/uverbs_cmd.c 
b/drivers/infiniband/core/uverbs_cmd.c
+index 2f0f01b70e3b..f08f79c58f6a 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2120,6 +2120,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+               next->send_flags = user_wr->send_flags;
+ 
+               if (is_ud) {
++                      if (next->opcode != IB_WR_SEND &&
++                          next->opcode != IB_WR_SEND_WITH_IMM) {
++                              ret = -EINVAL;
++                              goto out_put;
++                      }
++
+                       next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
+                                                    file->ucontext);
+                       if (!next->wr.ud.ah) {
+@@ -2156,9 +2162,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+                                       user_wr->wr.atomic.compare_add;
+                               next->wr.atomic.swap = user_wr->wr.atomic.swap;
+                               next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
++                      case IB_WR_SEND:
+                               break;
+                       default:
+-                              break;
++                              ret = -EINVAL;
++                              goto out_put;
+                       }
+               }
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c 
b/drivers/infiniband/core/uverbs_main.c
+index 849c9dc7d1f6..68e5496c5d58 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -124,14 +124,18 @@ static ssize_t (*uverbs_cmd_table[])(struct 
ib_uverbs_file *file,
+ static void ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device);
+ 
+-static void ib_uverbs_release_dev(struct kref *ref)
++static void ib_uverbs_release_dev(struct kobject *kobj)
+ {
+       struct ib_uverbs_device *dev =
+-              container_of(ref, struct ib_uverbs_device, ref);
++              container_of(kobj, struct ib_uverbs_device, kobj);
+ 
+-      complete(&dev->comp);
++      kfree(dev);
+ }
+ 
++static struct kobj_type ib_uverbs_dev_ktype = {
++      .release = ib_uverbs_release_dev,
++};
++
+ static void ib_uverbs_release_event_file(struct kref *ref)
+ {
+       struct ib_uverbs_event_file *file =
+@@ -295,13 +299,19 @@ static int ib_uverbs_cleanup_ucontext(struct 
ib_uverbs_file *file,
+       return context->device->dealloc_ucontext(context);
+ }
+ 
++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
++{
++      complete(&dev->comp);
++}
++
+ static void ib_uverbs_release_file(struct kref *ref)
+ {
+       struct ib_uverbs_file *file =
+               container_of(ref, struct ib_uverbs_file, ref);
+ 
+       module_put(file->device->ib_dev->owner);
+-      kref_put(&file->device->ref, ib_uverbs_release_dev);
++      if (atomic_dec_and_test(&file->device->refcount))
++              ib_uverbs_comp_dev(file->device);
+ 
+       kfree(file);
+ }
+@@ -665,9 +675,7 @@ static int ib_uverbs_open(struct inode *inode, struct file 
*filp)
+       int ret;
+ 
+       dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
+-      if (dev)
+-              kref_get(&dev->ref);
+-      else
++      if (!atomic_inc_not_zero(&dev->refcount))
+               return -ENXIO;
+ 
+       if (!try_module_get(dev->ib_dev->owner)) {
+@@ -688,6 +696,7 @@ static int ib_uverbs_open(struct inode *inode, struct file 
*filp)
+       mutex_init(&file->mutex);
+ 
+       filp->private_data = file;
++      kobject_get(&dev->kobj);
+ 
+       return nonseekable_open(inode, filp);
+ 
+@@ -695,13 +704,16 @@ err_module:
+       module_put(dev->ib_dev->owner);
+ 
+ err:
+-      kref_put(&dev->ref, ib_uverbs_release_dev);
++      if (atomic_dec_and_test(&dev->refcount))
++              ib_uverbs_comp_dev(dev);
++
+       return ret;
+ }
+ 
+ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ {
+       struct ib_uverbs_file *file = filp->private_data;
++      struct ib_uverbs_device *dev = file->device;
+ 
+       ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ 
+@@ -709,6 +721,7 @@ static int ib_uverbs_close(struct inode *inode, struct 
file *filp)
+               kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+ 
+       kref_put(&file->ref, ib_uverbs_release_file);
++      kobject_put(&dev->kobj);
+ 
+       return 0;
+ }
+@@ -804,10 +817,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
+       if (!uverbs_dev)
+               return;
+ 
+-      kref_init(&uverbs_dev->ref);
++      atomic_set(&uverbs_dev->refcount, 1);
+       init_completion(&uverbs_dev->comp);
+       uverbs_dev->xrcd_tree = RB_ROOT;
+       mutex_init(&uverbs_dev->xrcd_tree_mutex);
++      kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
+ 
+       spin_lock(&map_lock);
+       devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+@@ -834,6 +848,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
+       cdev_init(&uverbs_dev->cdev, NULL);
+       uverbs_dev->cdev.owner = THIS_MODULE;
+       uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
++      uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
+       kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", 
uverbs_dev->devnum);
+       if (cdev_add(&uverbs_dev->cdev, base, 1))
+               goto err_cdev;
+@@ -864,9 +879,10 @@ err_cdev:
+               clear_bit(devnum, overflow_map);
+ 
+ err:
+-      kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++      if (atomic_dec_and_test(&uverbs_dev->refcount))
++              ib_uverbs_comp_dev(uverbs_dev);
+       wait_for_completion(&uverbs_dev->comp);
+-      kfree(uverbs_dev);
++      kobject_put(&uverbs_dev->kobj);
+       return;
+ }
+ 
+@@ -886,9 +902,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
+       else
+               clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, 
overflow_map);
+ 
+-      kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++      if (atomic_dec_and_test(&uverbs_dev->refcount))
++              ib_uverbs_comp_dev(uverbs_dev);
+       wait_for_completion(&uverbs_dev->comp);
+-      kfree(uverbs_dev);
++      kobject_put(&uverbs_dev->kobj);
+ }
+ 
+ static char *uverbs_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index a251becdaa98..890c23b3d714 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -169,9 +169,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct 
ib_ah_attr *ah_attr)
+       enum rdma_link_layer ll;
+ 
+       memset(ah_attr, 0, sizeof *ah_attr);
+-      ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+       ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+       ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
++      if (ll == IB_LINK_LAYER_ETHERNET)
++              ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
++      else
++              ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
++
+       ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? 
be16_to_cpu(ah->av.ib.dlid) : 0;
+       if (ah->av.ib.stat_rate)
+               ah_attr->static_rate = ah->av.ib.stat_rate - 
MLX4_STAT_RATE_OFFSET;
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c 
b/drivers/infiniband/hw/mlx4/sysfs.c
+index 97516eb363b7..c5ce4082fdc7 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -563,6 +563,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, 
int slave)
+       struct mlx4_port *p;
+       int i;
+       int ret;
++      int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
++                      IB_LINK_LAYER_ETHERNET;
+ 
+       p = kzalloc(sizeof *p, GFP_KERNEL);
+       if (!p)
+@@ -580,7 +582,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, 
int slave)
+ 
+       p->pkey_group.name  = "pkey_idx";
+       p->pkey_group.attrs =
+-              alloc_group_attrs(show_port_pkey, store_port_pkey,
++              alloc_group_attrs(show_port_pkey,
++                                is_eth ? NULL : store_port_pkey,
+                                 dev->dev->caps.pkey_table_len[port_num]);
+       if (!p->pkey_group.attrs)
+               goto err_alloc;
+diff --git a/drivers/infiniband/hw/qib/qib_keys.c 
b/drivers/infiniband/hw/qib/qib_keys.c
+index 3b9afccaaade..eabe54738be6 100644
+--- a/drivers/infiniband/hw/qib/qib_keys.c
++++ b/drivers/infiniband/hw/qib/qib_keys.c
+@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+        * unrestricted LKEY.
+        */
+       rkt->gen++;
++      /*
++       * bits are capped in qib_verbs.c to insure enough bits
++       * for generation number
++       */
+       mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+               ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+                << 8);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c 
b/drivers/infiniband/hw/qib/qib_verbs.c
+index 092b0bb1bb78..c141b9b2493d 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -40,6 +40,7 @@
+ #include <linux/rculist.h>
+ #include <linux/mm.h>
+ #include <linux/random.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qib.h"
+ #include "qib_common.h"
+@@ -2086,10 +2087,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
+        * the LKEY).  The remaining bits act as a generation number or tag.
+        */
+       spin_lock_init(&dev->lk_table.lock);
++      /* insure generation is at least 4 bits see keys.c */
++      if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
++              qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
++                      ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
++              ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
++      }
+       dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+       lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+       dev->lk_table.table = (struct qib_mregion __rcu **)
+-              __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
++              vmalloc(lk_tab_size);
+       if (dev->lk_table.table == NULL) {
+               ret = -ENOMEM;
+               goto err_lk;
+@@ -2262,7 +2269,7 @@ err_tx:
+                                       sizeof(struct qib_pio_header),
+                                 dev->pio_hdrs, dev->pio_hdrs_phys);
+ err_hdrs:
+-      free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
++      vfree(dev->lk_table.table);
+ err_lk:
+       kfree(dev->qp_table);
+ err_qpt:
+@@ -2316,8 +2323,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
+                                       sizeof(struct qib_pio_header),
+                                 dev->pio_hdrs, dev->pio_hdrs_phys);
+       lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+-      free_pages((unsigned long) dev->lk_table.table,
+-                 get_order(lk_tab_size));
++      vfree(dev->lk_table.table);
+       kfree(dev->qp_table);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.h 
b/drivers/infiniband/hw/qib/qib_verbs.h
+index 012e2c7575ad..61b162b64dc6 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.h
++++ b/drivers/infiniband/hw/qib/qib_verbs.h
+@@ -647,6 +647,8 @@ struct qib_qpn_table {
+       struct qpn_map map[QPNMAP_ENTRIES];
+ };
+ 
++#define MAX_LKEY_TABLE_BITS 23
++
+ struct qib_lkey_table {
+       spinlock_t lock; /* protect changes in this struct */
+       u32 next;               /* next unused index (speeds search) */
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index 694af4958a98..5311dbbee47c 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -240,19 +240,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
+ {
+       struct evdev_client *client = file->private_data;
+       struct evdev *evdev = client->evdev;
+-      int retval;
+ 
+-      retval = mutex_lock_interruptible(&evdev->mutex);
+-      if (retval)
+-              return retval;
++      mutex_lock(&evdev->mutex);
+ 
+-      if (!evdev->exist || client->revoked)
+-              retval = -ENODEV;
+-      else
+-              retval = input_flush_device(&evdev->handle, file);
++      if (evdev->exist && !client->revoked)
++              input_flush_device(&evdev->handle, file);
+ 
+       mutex_unlock(&evdev->mutex);
+-      return retval;
++      return 0;
+ }
+ 
+ static void evdev_free(struct device *dev)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1c512dc1f17f..81bf511b3182 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5315,6 +5315,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
+ static void __md_stop(struct mddev *mddev)
+ {
+       mddev->ready = 0;
++      /* Ensure ->event_work is done */
++      flush_workqueue(md_misc_wq);
+       mddev->pers->stop(mddev);
+       if (mddev->pers->sync_request && mddev->to_remove == NULL)
+               mddev->to_remove = &md_redundancy_group;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 9ccb107c982e..d525d663bb22 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3599,6 +3599,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
+                       /* far_copies must be 1 */
+                       conf->prev.stride = conf->dev_sectors;
+       }
++      conf->reshape_safe = conf->reshape_progress;
+       spin_lock_init(&conf->device_lock);
+       INIT_LIST_HEAD(&conf->retry_list);
+ 
+@@ -3806,7 +3807,6 @@ static int run(struct mddev *mddev)
+               }
+               conf->offset_diff = min_offset_diff;
+ 
+-              conf->reshape_safe = conf->reshape_progress;
+               clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+               clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+               set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+@@ -4151,6 +4151,7 @@ static int raid10_start_reshape(struct mddev *mddev)
+               conf->reshape_progress = size;
+       } else
+               conf->reshape_progress = 0;
++      conf->reshape_safe = conf->reshape_progress;
+       spin_unlock_irq(&conf->device_lock);
+ 
+       if (mddev->delta_disks && mddev->bitmap) {
+@@ -4217,6 +4218,7 @@ abort:
+               rdev->new_data_offset = rdev->data_offset;
+       smp_wmb();
+       conf->reshape_progress = MaxSector;
++      conf->reshape_safe = MaxSector;
+       mddev->reshape_position = MaxSector;
+       spin_unlock_irq(&conf->device_lock);
+       return ret;
+@@ -4564,6 +4566,7 @@ static void end_reshape(struct r10conf *conf)
+       md_finish_reshape(conf->mddev);
+       smp_wmb();
+       conf->reshape_progress = MaxSector;
++      conf->reshape_safe = MaxSector;
+       spin_unlock_irq(&conf->device_lock);
+ 
+       /* read-ahead size must cover two whole stripes, which is
+diff --git a/drivers/media/platform/omap3isp/isp.c 
b/drivers/media/platform/omap3isp/isp.c
+index df3a0ec7fd2c..9dd0e0cc65cf 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -814,14 +814,14 @@ static int isp_pipeline_link_notify(struct media_link 
*link, u32 flags,
+       int ret;
+ 
+       if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+-          !(link->flags & MEDIA_LNK_FL_ENABLED)) {
++          !(flags & MEDIA_LNK_FL_ENABLED)) {
+               /* Powering off entities is assumed to never fail. */
+               isp_pipeline_pm_power(source, -sink_use);
+               isp_pipeline_pm_power(sink, -source_use);
+               return 0;
+       }
+ 
+-      if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
++      if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+               (flags & MEDIA_LNK_FL_ENABLED)) {
+ 
+               ret = isp_pipeline_pm_power(source, sink_use);
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 46da365c9c84..f972de9f02e6 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -978,9 +978,6 @@ static int rc_dev_uevent(struct device *device, struct 
kobj_uevent_env *env)
+ {
+       struct rc_dev *dev = to_rc_dev(device);
+ 
+-      if (!dev || !dev->input_dev)
+-              return -ENODEV;
+-
+       if (dev->rc_map.name)
+               ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
+       if (dev->driver_name)
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index e743d3984d29..4b12543b0826 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -328,8 +328,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
+  */
+ static void mmc_wait_data_done(struct mmc_request *mrq)
+ {
+-      mrq->host->context_info.is_done_rcv = true;
+-      wake_up_interruptible(&mrq->host->context_info.wait);
++      struct mmc_context_info *context_info = &mrq->host->context_info;
++
++      context_info->is_done_rcv = true;
++      wake_up_interruptible(&context_info->wait);
+ }
+ 
+ static void mmc_wait_done(struct mmc_request *mrq)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index 8ad9ff65913c..fe601e264f94 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10650,7 +10650,7 @@ static ssize_t tg3_show_temp(struct device *dev,
+       tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+                               sizeof(temperature));
+       spin_unlock_bh(&tp->lock);
+-      return sprintf(buf, "%u\n", temperature);
++      return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ 
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h 
b/drivers/net/ethernet/stmicro/stmmac/descs.h
+index ad3996038018..799c2929c536 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -158,6 +158,8 @@ struct dma_desc {
+                       u32 buffer2_size:13;
+                       u32 reserved4:3;
+               } etx;          /* -- enhanced -- */
++
++              u64 all_flags;
+       } des01;
+       unsigned int des2;
+       unsigned int des3;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c 
b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index 7e6628a91514..59fb7f69841b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct 
stmmac_extra_stats *x,
+ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+                                 int mode, int end)
+ {
++      p->des01.all_flags = 0;
+       p->des01.erx.own = 1;
+       p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ 
+@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int 
disable_rx_ic,
+ 
+ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-      p->des01.etx.own = 0;
++      p->des01.all_flags = 0;
+       if (mode == STMMAC_CHAIN_MODE)
+               ehn_desc_tx_set_on_chain(p, end);
+       else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c 
b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+index 35ad4f427ae2..48c3456445b2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct 
stmmac_extra_stats *x,
+ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int 
mode,
+                              int end)
+ {
++      p->des01.all_flags = 0;
+       p->des01.rx.own = 1;
+       p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ 
+@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int 
disable_rx_ic, int mode,
+ 
+ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-      p->des01.tx.own = 0;
++      p->des01.all_flags = 0;
+       if (mode == STMMAC_CHAIN_MODE)
+               ndesc_tx_set_on_chain(p, end);
+       else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 8d4ccd35a016..3b5459696310 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1003,41 +1003,41 @@ static int init_dma_desc_rings(struct net_device *dev)
+                        txsize, rxsize, bfsize);
+ 
+       if (priv->extend_desc) {
+-              priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+-                                                 sizeof(struct
+-                                                        dma_extended_desc),
+-                                                 &priv->dma_rx_phy,
+-                                                 GFP_KERNEL);
++              priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
++                                                  sizeof(struct
++                                                         dma_extended_desc),
++                                                  &priv->dma_rx_phy,
++                                                  GFP_KERNEL);
+               if (!priv->dma_erx)
+                       goto err_dma;
+ 
+-              priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+-                                                 sizeof(struct
+-                                                        dma_extended_desc),
+-                                                 &priv->dma_tx_phy,
+-                                                 GFP_KERNEL);
++              priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
++                                                  sizeof(struct
++                                                         dma_extended_desc),
++                                                  &priv->dma_tx_phy,
++                                                  GFP_KERNEL);
+               if (!priv->dma_etx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+-                                      sizeof(struct dma_extended_desc),
+-                                      priv->dma_erx, priv->dma_rx_phy);
++                                        sizeof(struct dma_extended_desc),
++                                        priv->dma_erx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
+       } else {
+-              priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+-                                                sizeof(struct dma_desc),
+-                                                &priv->dma_rx_phy,
+-                                                GFP_KERNEL);
++              priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
++                                                 sizeof(struct dma_desc),
++                                                 &priv->dma_rx_phy,
++                                                 GFP_KERNEL);
+               if (!priv->dma_rx)
+                       goto err_dma;
+ 
+-              priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+-                                                sizeof(struct dma_desc),
+-                                                &priv->dma_tx_phy,
+-                                                GFP_KERNEL);
++              priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
++                                                 sizeof(struct dma_desc),
++                                                 &priv->dma_tx_phy,
++                                                 GFP_KERNEL);
+               if (!priv->dma_tx) {
+                       dma_free_coherent(priv->device, priv->dma_rx_size *
+-                                      sizeof(struct dma_desc),
+-                                      priv->dma_rx, priv->dma_rx_phy);
++                                        sizeof(struct dma_desc),
++                                        priv->dma_rx, priv->dma_rx_phy);
+                       goto err_dma;
+               }
+       }
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 1d4da74595f9..3b7b7b2eba1b 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -779,7 +779,7 @@ int usbnet_stop (struct net_device *net)
+ {
+       struct usbnet           *dev = netdev_priv(net);
+       struct driver_info      *info = dev->driver_info;
+-      int                     retval, pm;
++      int                     retval, pm, mpn;
+ 
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
+       netif_stop_queue (net);
+@@ -810,6 +810,8 @@ int usbnet_stop (struct net_device *net)
+ 
+       usbnet_purge_paused_rxq(dev);
+ 
++      mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
++
+       /* deferred work (task, timer, softirq) must also stop.
+        * can't flush_scheduled_work() until we drop rtnl (later),
+        * else workers could deadlock; so make workers a NOP.
+@@ -820,8 +822,7 @@ int usbnet_stop (struct net_device *net)
+       if (!pm)
+               usb_autopm_put_interface(dev->intf);
+ 
+-      if (info->manage_power &&
+-          !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
++      if (info->manage_power && mpn)
+               info->manage_power(dev, 0);
+       else
+               usb_autopm_put_interface(dev->intf);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c 
b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 7555095e0b74..fa669b52fc91 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -313,6 +313,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+       {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+       {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+       {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++      {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+       {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+       {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index f5582f3a06a4..3a308877ac90 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -696,10 +696,10 @@ struct device_node 
*of_find_matching_node_by_address(struct device_node *from,
+       struct resource res;
+ 
+       while (dn) {
+-              if (of_address_to_resource(dn, 0, &res))
+-                      continue;
+-              if (res.start == base_address)
++              if (!of_address_to_resource(dn, 0, &res) &&
++                  res.start == base_address)
+                       return dn;
++
+               dn = of_find_matching_node(dn, matches);
+       }
+ 
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 0857ca981fae..6bc9b12ba42a 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -359,6 +359,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+       .release = pci_vpd_pci22_release,
+ };
+ 
++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
++                             void *arg)
++{
++      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++      ssize_t ret;
++
++      if (!tdev)
++              return -ENODEV;
++
++      ret = pci_read_vpd(tdev, pos, count, arg);
++      pci_dev_put(tdev);
++      return ret;
++}
++
++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
++                              const void *arg)
++{
++      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++      ssize_t ret;
++
++      if (!tdev)
++              return -ENODEV;
++
++      ret = pci_write_vpd(tdev, pos, count, arg);
++      pci_dev_put(tdev);
++      return ret;
++}
++
++static const struct pci_vpd_ops pci_vpd_f0_ops = {
++      .read = pci_vpd_f0_read,
++      .write = pci_vpd_f0_write,
++      .release = pci_vpd_pci22_release,
++};
++
++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
++{
++      struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++      int ret = 0;
++
++      if (!tdev)
++              return -ENODEV;
++      if (!tdev->vpd || !tdev->multifunction ||
++          dev->class != tdev->class || dev->vendor != tdev->vendor ||
++          dev->device != tdev->device)
++              ret = -ENODEV;
++
++      pci_dev_put(tdev);
++      return ret;
++}
++
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+       struct pci_vpd_pci22 *vpd;
+@@ -367,12 +417,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+       cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+       if (!cap)
+               return -ENODEV;
++      if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
++              int ret = pci_vpd_f0_dev_check(dev);
++
++              if (ret)
++                      return ret;
++      }
+       vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+       if (!vpd)
+               return -ENOMEM;
+ 
+       vpd->base.len = PCI_VPD_PCI22_SIZE;
+-      vpd->base.ops = &pci_vpd_pci22_ops;
++      if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
++              vpd->base.ops = &pci_vpd_f0_ops;
++      else
++              vpd->base.ops = &pci_vpd_pci22_ops;
+       mutex_init(&vpd->lock);
+       vpd->cap = cap;
+       vpd->busy = false;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index a7b7eeaf35e8..eee40430b0b0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1849,6 +1849,15 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+                        PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++static void quirk_f0_vpd_link(struct pci_dev *dev)
++{
++      if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++              return;
++      dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++}
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
++                            PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
++
+ static void quirk_e100_interrupt(struct pci_dev *dev)
+ {
+       u16 command, pmcsr;
+@@ -2794,12 +2803,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, 
vtd_mask_spec_errors);
+ 
+ static void fixup_ti816x_class(struct pci_dev *dev)
+ {
++      u32 class = dev->class;
++
+       /* TI 816x devices do not have class code set when in PCIe boot mode */
+-      dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
+-      dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
++      dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
++      dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
++               class, dev->class);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
+-                               PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
++                            PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+ 
+ /* Some PCIe devices do not work reliably with the claimed maximum
+  * payload size supported.
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index 5fd0f1fbe586..5fec5db1e329 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -1039,11 +1039,26 @@ restart:
+               fc_fcp_pkt_hold(fsp);
+               spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ 
+-              if (!fc_fcp_lock_pkt(fsp)) {
++              spin_lock_bh(&fsp->scsi_pkt_lock);
++              if (!(fsp->state & FC_SRB_COMPL)) {
++                      fsp->state |= FC_SRB_COMPL;
++                      /*
++                       * TODO: dropping scsi_pkt_lock and then reacquiring
++                       * again around fc_fcp_cleanup_cmd() is required,
++                       * since fc_fcp_cleanup_cmd() calls into
++                       * fc_seq_set_resp() and that func preempts cpu using
++                       * schedule. May be schedule and related code should be
++                       * removed instead of unlocking here to avoid scheduling
++                       * while atomic bug.
++                       */
++                      spin_unlock_bh(&fsp->scsi_pkt_lock);
++
+                       fc_fcp_cleanup_cmd(fsp, error);
++
++                      spin_lock_bh(&fsp->scsi_pkt_lock);
+                       fc_io_compl(fsp);
+-                      fc_fcp_unlock_pkt(fsp);
+               }
++              spin_unlock_bh(&fsp->scsi_pkt_lock);
+ 
+               fc_fcp_pkt_release(fsp);
+               spin_lock_irqsave(&si->scsi_queue_lock, flags);
+diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c 
b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+index 81b7203f824f..c570ede07e94 100644
+--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
++++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+@@ -116,10 +116,21 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device 
*dev,
+       unsigned int bits = data[1];
+ 
+       if (mask) {
++              unsigned int val;
++
+               s->state &= ~mask;
+               s->state |= (bits & mask);
+-
+-              outl(s->state, dev->iobase + reg);
++              val = s->state;
++              if (s->n_chan == 16) {
++                      /*
++                       * It seems the PCI-7230 needs the 16-bit DO state
++                       * to be shifted left by 16 bits before being written
++                       * to the 32-bit register.  Set the value in both
++                       * halves of the register to be sure.
++                       */
++                      val |= val << 16;
++              }
++              outl(val, dev->iobase + reg);
+       }
+ 
+       /*
+diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c 
b/drivers/staging/comedi/drivers/usbduxsigma.c
+index c47f4087568f..580c1358eb84 100644
+--- a/drivers/staging/comedi/drivers/usbduxsigma.c
++++ b/drivers/staging/comedi/drivers/usbduxsigma.c
+@@ -575,37 +575,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device 
*dev,
+       if (err)
+               return 3;
+ 
+-      /* Step 4: fix up any arguments */
+-
+-      if (high_speed) {
+-              /*
+-               * every 2 channels get a time window of 125us. Thus, if we
+-               * sample all 16 channels we need 1ms. If we sample only one
+-               * channel we need only 125us
+-               */
+-              devpriv->ai_interval = interval;
+-              devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+-      } else {
+-              /* interval always 1ms */
+-              devpriv->ai_interval = 1;
+-              devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+-      }
+-      if (devpriv->ai_timer < 1)
+-              err |= -EINVAL;
+-
+-      if (cmd->stop_src == TRIG_COUNT) {
+-              /* data arrives as one packet */
+-              devpriv->ai_sample_count = cmd->stop_arg;
+-              devpriv->ai_continuous = 0;
+-      } else {
+-              /* continuous acquisition */
+-              devpriv->ai_continuous = 1;
+-              devpriv->ai_sample_count = 0;
+-      }
+-
+-      if (err)
+-              return 4;
+-
+       return 0;
+ }
+ 
+@@ -704,6 +673,33 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
+ 
+       /* set current channel of the running acquisition to zero */
+       s->async->cur_chan = 0;
++
++      if (devpriv->high_speed) {
++              /*
++               * every 2 channels get a time window of 125us. Thus, if we
++               * sample all 16 channels we need 1ms. If we sample only one
++               * channel we need only 125us
++               */
++              unsigned int interval = usbduxsigma_chans_to_interval(len);
++
++              devpriv->ai_interval = interval;
++              devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
++      } else {
++              /* interval always 1ms */
++              devpriv->ai_interval = 1;
++              devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
++      }
++
++      if (cmd->stop_src == TRIG_COUNT) {
++              /* data arrives as one packet */
++              devpriv->ai_sample_count = cmd->stop_arg;
++              devpriv->ai_continuous = 0;
++      } else {
++              /* continuous acquisition */
++              devpriv->ai_continuous = 1;
++              devpriv->ai_sample_count = 0;
++      }
++
+       for (i = 0; i < len; i++) {
+               unsigned int chan  = CR_CHAN(cmd->chanlist[i]);
+ 
+@@ -954,10 +950,24 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device 
*dev,
+       if (err)
+               return 3;
+ 
+-      /* Step 4: fix up any arguments */
++      return 0;
++}
++
++static int usbduxsigma_ao_cmd(struct comedi_device *dev,
++                            struct comedi_subdevice *s)
++{
++      struct usbduxsigma_private *devpriv = dev->private;
++      struct comedi_cmd *cmd = &s->async->cmd;
++      int ret;
++      int i;
++
++      down(&devpriv->sem);
++
++      /* set current channel of the running acquisition to zero */
++      s->async->cur_chan = 0;
+ 
+       /* we count in timer steps */
+-      if (high_speed) {
++      if (cmd->convert_src == TRIG_TIMER) {
+               /* timing of the conversion itself: every 125 us */
+               devpriv->ao_timer = cmd->convert_arg / 125000;
+       } else {
+@@ -967,12 +977,9 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device 
*dev,
+                */
+               devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+       }
+-      if (devpriv->ao_timer < 1)
+-              err |= -EINVAL;
+-
+       if (cmd->stop_src == TRIG_COUNT) {
+               /* not continuous, use counter */
+-              if (high_speed) {
++              if (cmd->convert_src == TRIG_TIMER) {
+                       /* high speed also scans everything at once */
+                       devpriv->ao_sample_count = cmd->stop_arg *
+                                                  cmd->scan_end_arg;
+@@ -991,24 +998,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device 
*dev,
+               devpriv->ao_sample_count = 0;
+       }
+ 
+-      if (err)
+-              return 4;
+-
+-      return 0;
+-}
+-
+-static int usbduxsigma_ao_cmd(struct comedi_device *dev,
+-                            struct comedi_subdevice *s)
+-{
+-      struct usbduxsigma_private *devpriv = dev->private;
+-      struct comedi_cmd *cmd = &s->async->cmd;
+-      int ret;
+-      int i;
+-
+-      down(&devpriv->sem);
+-
+-      /* set current channel of the running acquisition to zero */
+-      s->async->cur_chan = 0;
+       for (i = 0; i < cmd->chanlist_len; ++i)
+               devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
+ 
+diff --git a/drivers/tty/serial/8250/8250_pnp.c 
b/drivers/tty/serial/8250/8250_pnp.c
+index 35d9ab95c5cb..91b14202b90b 100644
+--- a/drivers/tty/serial/8250/8250_pnp.c
++++ b/drivers/tty/serial/8250/8250_pnp.c
+@@ -365,6 +365,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
+       /* Winbond CIR port, should not be probed. We should keep track
+          of it to prevent the legacy serial driver from probing it */
+       {       "WEC1022",              CIR_PORT        },
++      /*
++       * SMSC IrCC SIR/FIR port, should not be probed by serial driver
++       * as well so its own driver can bind to it.
++       */
++      {       "SMCF010",              CIR_PORT        },
+       {       "",                     0       }
+ };
+ 
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index 2978ca596a7f..0e75d2a76511 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -540,6 +540,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct 
unipair __user *list)
+ 
+       /* Save original vc_unipagdir_loc in case we allocate a new one */
+       p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
++
++      if (!p) {
++              err = -EINVAL;
++
++              goto out_unlock;
++      }
+       if (p->readonly) {
+               console_unlock();
+               return -EIO;
+@@ -633,6 +639,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct 
unipair __user *list)
+               set_inverse_transl(vc, p, i); /* Update inverse translations */
+       set_inverse_trans_unicode(vc, p);
+ 
++out_unlock:
+       console_unlock();
+       return err;
+ }
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 657c51cf2109..fb78796b0c26 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -793,6 +793,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+               unsigned maxp = ep0->endpoint.maxpacket;
+ 
+               transfer_size += (maxp - (transfer_size % maxp));
++
++              /* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
++              if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
++                      transfer_size = DWC3_EP0_BOUNCE_SIZE;
++
+               transferred = min_t(u32, ur->length,
+                               transfer_size - length);
+               memcpy(ur->buf, dwc->ep0_bounce, transferred);
+@@ -905,11 +910,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+                       return;
+               }
+ 
+-              WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
+-
+               maxpacket = dep->endpoint.maxpacket;
+               transfer_size = roundup(req->request.length, maxpacket);
+ 
++              if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
++                      dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
++                      transfer_size = DWC3_EP0_BOUNCE_SIZE;
++              }
++
+               dwc->ep0_bounced = true;
+ 
+               /*
+diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
+index 14ced00ba220..0659024290af 100644
+--- a/drivers/usb/host/ehci-sysfs.c
++++ b/drivers/usb/host/ehci-sysfs.c
+@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
+       int                     count = PAGE_SIZE;
+       char                    *ptr = buf;
+ 
+-      ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++      ehci = hcd_to_ehci(dev_get_drvdata(dev));
+       nports = HCS_N_PORTS(ehci->hcs_params);
+ 
+       for (index = 0; index < nports; ++index) {
+@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
+       struct ehci_hcd         *ehci;
+       int                     portnum, new_owner;
+ 
+-      ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++      ehci = hcd_to_ehci(dev_get_drvdata(dev));
+       new_owner = PORT_OWNER;         /* Owned by companion */
+       if (sscanf(buf, "%d", &portnum) != 1)
+               return -EINVAL;
+@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
+       struct ehci_hcd         *ehci;
+       int                     n;
+ 
+-      ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++      ehci = hcd_to_ehci(dev_get_drvdata(dev));
+       n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+       return n;
+ }
+@@ -102,7 +102,7 @@ static ssize_t store_uframe_periodic_max(struct device 
*dev,
+       unsigned long           flags;
+       ssize_t                 ret;
+ 
+-      ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++      ehci = hcd_to_ehci(dev_get_drvdata(dev));
+       if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+               return -EINVAL;
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 75260b2ee420..beb96e997951 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -619,6 +619,10 @@ static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
++      { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
++      { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
++      { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
++      { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
+       /*
+        * ELV devices:
+        */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 792e054126de..2943b97b2a83 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -568,6 +568,14 @@
+  */
+ #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+ 
++/*
++ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
++ */
++#define FTDI_CUSTOMWARE_MINIPLEX_PID  0xfd48  /* MiniPlex first generation 
NMEA Multiplexer */
++#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49  /* MiniPlex-USB and MiniPlex-2 
series */
++#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID       0xfd4a  /* MiniPlex-2Wi */
++#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b  /* MiniPlex-3 series */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/symbolserial.c 
b/drivers/usb/serial/symbolserial.c
+index 9b1648945e7a..1e2d86d4f539 100644
+--- a/drivers/usb/serial/symbolserial.c
++++ b/drivers/usb/serial/symbolserial.c
+@@ -97,7 +97,7 @@ exit:
+ 
+ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+-      struct symbol_private *priv = usb_get_serial_data(port->serial);
++      struct symbol_private *priv = usb_get_serial_port_data(port);
+       unsigned long flags;
+       int result = 0;
+ 
+@@ -123,7 +123,7 @@ static void symbol_close(struct usb_serial_port *port)
+ static void symbol_throttle(struct tty_struct *tty)
+ {
+       struct usb_serial_port *port = tty->driver_data;
+-      struct symbol_private *priv = usb_get_serial_data(port->serial);
++      struct symbol_private *priv = usb_get_serial_port_data(port);
+ 
+       spin_lock_irq(&priv->lock);
+       priv->throttled = true;
+@@ -133,7 +133,7 @@ static void symbol_throttle(struct tty_struct *tty)
+ static void symbol_unthrottle(struct tty_struct *tty)
+ {
+       struct usb_serial_port *port = tty->driver_data;
+-      struct symbol_private *priv = usb_get_serial_data(port->serial);
++      struct symbol_private *priv = usb_get_serial_port_data(port);
+       int result;
+       bool was_throttled;
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index e41c79c986ea..0b5806995718 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -67,7 +67,7 @@ struct gntdev_priv {
+        * Only populated if populate_freeable_maps == 1 */
+       struct list_head freeable_maps;
+       /* lock protects maps and freeable_maps */
+-      spinlock_t lock;
++      struct mutex lock;
+       struct mm_struct *mm;
+       struct mmu_notifier mn;
+ };
+@@ -216,9 +216,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, 
struct grant_map *map)
+       }
+ 
+       if (populate_freeable_maps && priv) {
+-              spin_lock(&priv->lock);
++              mutex_lock(&priv->lock);
+               list_del(&map->next);
+-              spin_unlock(&priv->lock);
++              mutex_unlock(&priv->lock);
+       }
+ 
+       if (map->pages && !use_ptemod)
+@@ -387,9 +387,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
+                * not do any unmapping, since that has been done prior to
+                * closing the vma, but it may still iterate the unmap_ops list.
+                */
+-              spin_lock(&priv->lock);
++              mutex_lock(&priv->lock);
+               map->vma = NULL;
+-              spin_unlock(&priv->lock);
++              mutex_unlock(&priv->lock);
+       }
+       vma->vm_private_data = NULL;
+       gntdev_put_map(priv, map);
+@@ -433,14 +433,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
+       struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+       struct grant_map *map;
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       list_for_each_entry(map, &priv->maps, next) {
+               unmap_if_in_range(map, start, end);
+       }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               unmap_if_in_range(map, start, end);
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ }
+ 
+ static void mn_invl_page(struct mmu_notifier *mn,
+@@ -457,7 +457,7 @@ static void mn_release(struct mmu_notifier *mn,
+       struct grant_map *map;
+       int err;
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       list_for_each_entry(map, &priv->maps, next) {
+               if (!map->vma)
+                       continue;
+@@ -476,7 +476,7 @@ static void mn_release(struct mmu_notifier *mn,
+               err = unmap_grant_pages(map, /* offset */ 0, map->count);
+               WARN_ON(err);
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ }
+ 
+ static struct mmu_notifier_ops gntdev_mmu_ops = {
+@@ -498,7 +498,7 @@ static int gntdev_open(struct inode *inode, struct file 
*flip)
+ 
+       INIT_LIST_HEAD(&priv->maps);
+       INIT_LIST_HEAD(&priv->freeable_maps);
+-      spin_lock_init(&priv->lock);
++      mutex_init(&priv->lock);
+ 
+       if (use_ptemod) {
+               priv->mm = get_task_mm(current);
+@@ -572,10 +572,10 @@ static long gntdev_ioctl_map_grant_ref(struct 
gntdev_priv *priv,
+               return -EFAULT;
+       }
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       gntdev_add_map(priv, map);
+       op.index = map->index << PAGE_SHIFT;
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ 
+       if (copy_to_user(u, &op, sizeof(op)) != 0)
+               return -EFAULT;
+@@ -594,7 +594,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct 
gntdev_priv *priv,
+               return -EFAULT;
+       pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+       if (map) {
+               list_del(&map->next);
+@@ -602,7 +602,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct 
gntdev_priv *priv,
+                       list_add_tail(&map->next, &priv->freeable_maps);
+               err = 0;
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       if (map)
+               gntdev_put_map(priv, map);
+       return err;
+@@ -670,7 +670,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, 
void __user *u)
+       out_flags = op.action;
+       out_event = op.event_channel_port;
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+ 
+       list_for_each_entry(map, &priv->maps, next) {
+               uint64_t begin = map->index << PAGE_SHIFT;
+@@ -698,7 +698,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, 
void __user *u)
+       rc = 0;
+ 
+  unlock_out:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ 
+       /* Drop the reference to the event channel we did not save in the map */
+       if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+@@ -748,7 +748,7 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
+       pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+                       index, count, vma->vm_start, vma->vm_pgoff);
+ 
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       map = gntdev_find_map_index(priv, index, count);
+       if (!map)
+               goto unlock_out;
+@@ -783,7 +783,7 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
+                       map->flags |= GNTMAP_readonly;
+       }
+ 
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ 
+       if (use_ptemod) {
+               err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+@@ -811,11 +811,11 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
+       return 0;
+ 
+ unlock_out:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       return err;
+ 
+ out_unlock_put:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ out_put_map:
+       if (use_ptemod)
+               map->vma = NULL;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 069c2fd37ce7..9218ea8dbfe5 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1711,8 +1711,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle 
*trans,
+                       spin_unlock(&root->fs_info->trans_lock);
+ 
+                       wait_for_commit(root, prev_trans);
++                      ret = prev_trans->aborted;
+ 
+                       btrfs_put_transaction(prev_trans);
++                      if (ret)
++                              goto cleanup_transaction;
+               } else {
+                       spin_unlock(&root->fs_info->trans_lock);
+               }
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 88adbdd15193..ff78d9075316 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -499,10 +499,10 @@ void do_coredump(siginfo_t *siginfo)
+       const struct cred *old_cred;
+       struct cred *cred;
+       int retval = 0;
+-      int flag = 0;
+       int ispipe;
+       struct files_struct *displaced;
+-      bool need_nonrelative = false;
++      /* require nonrelative corefile path and be extra careful */
++      bool need_suid_safe = false;
+       bool core_dumped = false;
+       static atomic_t core_dump_count = ATOMIC_INIT(0);
+       struct coredump_params cprm = {
+@@ -536,9 +536,8 @@ void do_coredump(siginfo_t *siginfo)
+        */
+       if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
+               /* Setuid core dump mode */
+-              flag = O_EXCL;          /* Stop rewrite attacks */
+               cred->fsuid = GLOBAL_ROOT_UID;  /* Dump root private */
+-              need_nonrelative = true;
++              need_suid_safe = true;
+       }
+ 
+       retval = coredump_wait(siginfo->si_signo, &core_state);
+@@ -619,7 +618,7 @@ void do_coredump(siginfo_t *siginfo)
+               if (cprm.limit < binfmt->min_coredump)
+                       goto fail_unlock;
+ 
+-              if (need_nonrelative && cn.corename[0] != '/') {
++              if (need_suid_safe && cn.corename[0] != '/') {
+                       printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+                               "to fully qualified path!\n",
+                               task_tgid_vnr(current), current->comm);
+@@ -627,8 +626,35 @@ void do_coredump(siginfo_t *siginfo)
+                       goto fail_unlock;
+               }
+ 
++              /*
++               * Unlink the file if it exists unless this is a SUID
++               * binary - in that case, we're running around with root
++               * privs and don't want to unlink another user's coredump.
++               */
++              if (!need_suid_safe) {
++                      mm_segment_t old_fs;
++
++                      old_fs = get_fs();
++                      set_fs(KERNEL_DS);
++                      /*
++                       * If it doesn't exist, that's fine. If there's some
++                       * other problem, we'll catch it at the filp_open().
++                       */
++                      (void) sys_unlink((const char __user *)cn.corename);
++                      set_fs(old_fs);
++              }
++
++              /*
++               * There is a race between unlinking and creating the
++               * file, but if that causes an EEXIST here, that's
++               * fine - another process raced with us while creating
++               * the corefile, and the other process won. To userspace,
++               * what matters is that at least one of the two processes
++               * writes its coredump successfully, not which one.
++               */
+               cprm.file = filp_open(cn.corename,
+-                               O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++                               O_CREAT | 2 | O_NOFOLLOW |
++                               O_LARGEFILE | O_EXCL,
+                                0600);
+               if (IS_ERR(cprm.file))
+                       goto fail_unlock;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 4c227f81051b..0fa3b3dba96f 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2808,6 +2808,13 @@ restart:
+               struct dentry * parent;
+ 
+               if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++                      /* Escaped? */
++                      if (dentry != vfsmnt->mnt_root) {
++                              bptr = *buffer;
++                              blen = *buflen;
++                              error = 3;
++                              break;
++                      }
+                       /* Global root? */
+                       if (mnt_has_parent(mnt)) {
+                               dentry = mnt->mnt_mountpoint;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index c9830686cbd5..a9d23daa0d6f 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4634,12 +4634,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t 
offset, loff_t len)
+       if (ret)
+               return ret;
+ 
+-      /*
+-       * currently supporting (pre)allocate mode for extent-based
+-       * files _only_
+-       */
+-      if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+-              return -EOPNOTSUPP;
+ 
+       trace_ext4_fallocate_enter(inode, offset, len, mode);
+       map.m_lblk = offset >> blkbits;
+@@ -4654,6 +4648,16 @@ long ext4_fallocate(struct file *file, int mode, loff_t 
offset, loff_t len)
+        */
+       credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       mutex_lock(&inode->i_mutex);
++
++      /*
++       * currently supporting (pre)allocate mode for extent-based
++       * files _only_
++       */
++      if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
++              ret = -EOPNOTSUPP;
++              goto out;
++      }
++
+       ret = inode_newsize_ok(inode, (len + offset));
+       if (ret) {
+               mutex_unlock(&inode->i_mutex);
+@@ -4714,6 +4718,7 @@ retry:
+               ret = 0;
+               goto retry;
+       }
++out:
+       mutex_unlock(&inode->i_mutex);
+       trace_ext4_fallocate_exit(inode, offset, max_blocks,
+                               ret > 0 ? ret2 : ret);
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index d3fa6bd9503e..221719eac5de 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct 
hfs_btree *tree, u32 cnid)
+                       page_cache_release(page);
+                       goto fail;
+               }
+-              page_cache_release(page);
+               node->page[i] = page;
+       }
+ 
+@@ -398,11 +397,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-      //int i;
++      int i;
+ 
+-      //for (i = 0; i < node->tree->pages_per_bnode; i++)
+-      //      if (node->page[i])
+-      //              page_cache_release(node->page[i]);
++      for (i = 0; i < node->tree->pages_per_bnode; i++)
++              if (node->page[i])
++                      page_cache_release(node->page[i]);
+       kfree(node);
+ }
+ 
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index 9f4ee7f52026..6fc766df0461 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -131,13 +131,16 @@ skip:
+       hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+       hfs_bnode_dump(node);
+ 
+-      if (new_node) {
+-              /* update parent key if we inserted a key
+-               * at the start of the first node
+-               */
+-              if (!rec && new_node != node)
+-                      hfs_brec_update_parent(fd);
++      /*
++       * update parent key if we inserted a key
++       * at the start of the node and it is not the new node
++       */
++      if (!rec && new_node != node) {
++              hfs_bnode_read_key(node, fd->search_key, data_off + size);
++              hfs_brec_update_parent(fd);
++      }
+ 
++      if (new_node) {
+               hfs_bnode_put(fd->bnode);
+               if (!new_node->parent) {
+                       hfs_btree_inc_height(tree);
+@@ -166,9 +169,6 @@ skip:
+               goto again;
+       }
+ 
+-      if (!rec)
+-              hfs_brec_update_parent(fd);
+-
+       return 0;
+ }
+ 
+@@ -366,6 +366,8 @@ again:
+       if (IS_ERR(parent))
+               return PTR_ERR(parent);
+       __hfs_brec_find(parent, fd);
++      if (fd->record < 0)
++              return -ENOENT;
+       hfs_bnode_dump(parent);
+       rec = fd->record;
+ 
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 11c860204520..bedfe5f7d332 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -456,7 +456,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct 
hfs_btree *tree, u32 cnid)
+                       page_cache_release(page);
+                       goto fail;
+               }
+-              page_cache_release(page);
+               node->page[i] = page;
+       }
+ 
+@@ -568,13 +567,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-#if 0
+       int i;
+ 
+       for (i = 0; i < node->tree->pages_per_bnode; i++)
+               if (node->page[i])
+                       page_cache_release(node->page[i]);
+-#endif
+       kfree(node);
+ }
+ 
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index 345713d2f8f3..6b42789ae799 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -8,6 +8,17 @@
+ #include <linux/sched.h>
+ #include "hpfs_fn.h"
+ 
++static void hpfs_update_directory_times(struct inode *dir)
++{
++      time_t t = get_seconds();
++      if (t == dir->i_mtime.tv_sec &&
++          t == dir->i_ctime.tv_sec)
++              return;
++      dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
++      dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
++      hpfs_write_inode_nolock(dir);
++}
++
+ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+       const unsigned char *name = dentry->d_name.name;
+@@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry 
*dentry, umode_t mode)
+               result->i_mode = mode | S_IFDIR;
+               hpfs_write_inode_nolock(result);
+       }
++      hpfs_update_directory_times(dir);
+       d_instantiate(dentry, result);
+       hpfs_unlock(dir->i_sb);
+       return 0;
+@@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry 
*dentry, umode_t mode, b
+               result->i_mode = mode | S_IFREG;
+               hpfs_write_inode_nolock(result);
+       }
++      hpfs_update_directory_times(dir);
+       d_instantiate(dentry, result);
+       hpfs_unlock(dir->i_sb);
+       return 0;
+@@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry 
*dentry, umode_t mode, de
+       insert_inode_hash(result);
+ 
+       hpfs_write_inode_nolock(result);
++      hpfs_update_directory_times(dir);
+       d_instantiate(dentry, result);
+       brelse(bh);
+       hpfs_unlock(dir->i_sb);
+@@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry 
*dentry, const char *sy
+       insert_inode_hash(result);
+ 
+       hpfs_write_inode_nolock(result);
++      hpfs_update_directory_times(dir);
+       d_instantiate(dentry, result);
+       hpfs_unlock(dir->i_sb);
+       return 0;
+@@ -423,6 +438,8 @@ again:
+ out1:
+       hpfs_brelse4(&qbh);
+ out:
++      if (!err)
++              hpfs_update_directory_times(dir);
+       hpfs_unlock(dir->i_sb);
+       return err;
+ }
+@@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry 
*dentry)
+ out1:
+       hpfs_brelse4(&qbh);
+ out:
++      if (!err)
++              hpfs_update_directory_times(dir);
+       hpfs_unlock(dir->i_sb);
+       return err;
+ }
+@@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct 
dentry *old_dentry,
+               goto end1;
+       }
+ 
+-      end:
++end:
+       hpfs_i(i)->i_parent_dir = new_dir->i_ino;
+       if (S_ISDIR(i->i_mode)) {
+               inc_nlink(new_dir);
+@@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct 
dentry *old_dentry,
+               brelse(bh);
+       }
+ end1:
++      if (!err) {
++              hpfs_update_directory_times(old_dir);
++              hpfs_update_directory_times(new_dir);
++      }
+       hpfs_unlock(i->i_sb);
+       return err;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 097bbeac8c66..d1c0b91b4534 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -473,6 +473,24 @@ void path_put(const struct path *path)
+ }
+ EXPORT_SYMBOL(path_put);
+ 
++/**
++ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
++ * @path: nameidate to verify
++ *
++ * Rename can sometimes move a file or directory outside of a bind
++ * mount, path_connected allows those cases to be detected.
++ */
++static bool path_connected(const struct path *path)
++{
++      struct vfsmount *mnt = path->mnt;
++
++      /* Only bind mounts can have disconnected paths */
++      if (mnt->mnt_root == mnt->mnt_sb->s_root)
++              return true;
++
++      return is_subdir(path->dentry, mnt->mnt_root);
++}
++
+ /*
+  * Path walking has 2 modes, rcu-walk and ref-walk (see
+  * Documentation/filesystems/path-lookup.txt).  In situations when we can't
+@@ -1162,6 +1180,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+                               goto failed;
+                       nd->path.dentry = parent;
+                       nd->seq = seq;
++                      if (unlikely(!path_connected(&nd->path)))
++                              goto failed;
+                       break;
+               }
+               if (!follow_up_rcu(&nd->path))
+@@ -1245,7 +1265,7 @@ static void follow_mount(struct path *path)
+       }
+ }
+ 
+-static void follow_dotdot(struct nameidata *nd)
++static int follow_dotdot(struct nameidata *nd)
+ {
+       if (!nd->root.mnt)
+               set_root(nd);
+@@ -1261,6 +1281,10 @@ static void follow_dotdot(struct nameidata *nd)
+                       /* rare case of legitimate dget_parent()... */
+                       nd->path.dentry = dget_parent(nd->path.dentry);
+                       dput(old);
++                      if (unlikely(!path_connected(&nd->path))) {
++                              path_put(&nd->path);
++                              return -ENOENT;
++                      }
+                       break;
+               }
+               if (!follow_up(&nd->path))
+@@ -1268,6 +1292,7 @@ static void follow_dotdot(struct nameidata *nd)
+       }
+       follow_mount(&nd->path);
+       nd->inode = nd->path.dentry->d_inode;
++      return 0;
+ }
+ 
+ /*
+@@ -1491,7 +1516,7 @@ static inline int handle_dots(struct nameidata *nd, int 
type)
+                       if (follow_dotdot_rcu(nd))
+                               return -ECHILD;
+               } else
+-                      follow_dotdot(nd);
++                      return follow_dotdot(nd);
+       }
+       return 0;
+ }
+@@ -2248,7 +2273,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
+       if (unlikely(nd->last_type != LAST_NORM)) {
+               error = handle_dots(nd, nd->last_type);
+               if (error)
+-                      goto out;
++                      return error;
+               dentry = dget(nd->path.dentry);
+               goto done;
+       }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 36a72b59d7c8..794af58b388f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2267,7 +2267,7 @@ static int _nfs4_do_open(struct inode *dir,
+               goto err_free_label;
+       state = ctx->state;
+ 
+-      if ((opendata->o_arg.open_flags & O_EXCL) &&
++      if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) 
&&
+           (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
+               nfs4_exclusive_attrset(opendata, sattr);
+ 
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 27d7f2742592..11763ce73709 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -60,8 +60,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
+ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+ {
+       spin_lock(&hdr->lock);
+-      if (pos < hdr->io_start + hdr->good_bytes) {
+-              set_bit(NFS_IOHDR_ERROR, &hdr->flags);
++      if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
++          || pos < hdr->io_start + hdr->good_bytes) {
+               clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+               hdr->good_bytes = pos - hdr->io_start;
+               hdr->error = error;
+diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
+index 2103cc32a5fb..e94d165a1053 100644
+--- a/include/linux/iio/iio.h
++++ b/include/linux/iio/iio.h
+@@ -623,6 +623,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, 
int *integer,
+ #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 
18000000ULL)
+ 
+ /**
++ * IIO_RAD_TO_DEGREE() - Convert rad to degree
++ * @rad: A value in rad
++ *
++ * Returns the given value converted from rad to degree
++ */
++#define IIO_RAD_TO_DEGREE(rad) \
++      (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
++
++/**
+  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+  * @g: A value in g
+  *
+@@ -630,4 +639,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, 
int *integer,
+  */
+ #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+ 
++/**
++ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
++ * @ms2: A value in meter / second**2
++ *
++ * Returns the given value converted from meter / second**2 to g
++ */
++#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
++
+ #endif /* _INDUSTRIAL_IO_H_ */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 573c04929bd1..b11e6e280f15 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -170,6 +170,8 @@ enum pci_dev_flags {
+       PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+       /* Provide indication device is assigned by a Virtual Machine Manager */
+       PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
++      /* Get VPD from function 0 VPD */
++      PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ };
+ 
+ enum pci_irq_reroute_variant {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 982a36db1593..60403f7efdad 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1762,13 +1762,21 @@ static int check_unshare_flags(unsigned long 
unshare_flags)
+                               CLONE_NEWUSER|CLONE_NEWPID))
+               return -EINVAL;
+       /*
+-       * Not implemented, but pretend it works if there is nothing to
+-       * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
+-       * needs to unshare vm.
++       * Not implemented, but pretend it works if there is nothing
++       * to unshare.  Note that unsharing the address space or the
++       * signal handlers also need to unshare the signal queues (aka
++       * CLONE_THREAD).
+        */
+       if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
+-              /* FIXME: get_task_mm() increments ->mm_users */
+-              if (atomic_read(&current->mm->mm_users) > 1)
++              if (!thread_group_empty(current))
++                      return -EINVAL;
++      }
++      if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
++              if (atomic_read(&current->sighand->count) > 1)
++                      return -EINVAL;
++      }
++      if (unshare_flags & CLONE_VM) {
++              if (!current_is_single_threaded())
+                       return -EINVAL;
+       }
+ 
+@@ -1837,16 +1845,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+       if (unshare_flags & CLONE_NEWUSER)
+               unshare_flags |= CLONE_THREAD | CLONE_FS;
+       /*
+-       * If unsharing a thread from a thread group, must also unshare vm.
+-       */
+-      if (unshare_flags & CLONE_THREAD)
+-              unshare_flags |= CLONE_VM;
+-      /*
+        * If unsharing vm, must also unshare signal handlers.
+        */
+       if (unshare_flags & CLONE_VM)
+               unshare_flags |= CLONE_SIGHAND;
+       /*
++       * If unsharing a signal handlers, must also unshare the signal queues.
++       */
++      if (unshare_flags & CLONE_SIGHAND)
++              unshare_flags |= CLONE_THREAD;
++      /*
+        * If unsharing namespace, must also unshare filesystem information.
+        */
+       if (unshare_flags & CLONE_NEWNS)
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 04c33d5fb079..6dc33d9dc2cf 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1087,7 +1087,7 @@ cull_mlocked:
+               if (PageSwapCache(page))
+                       try_to_free_swap(page);
+               unlock_page(page);
+-              putback_lru_page(page);
++              list_add(&page->lru, &ret_pages);
+               continue;
+ 
+ activate_locked:
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 185c341fafbd..99ae718b79be 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -621,15 +621,17 @@ static int dump_rules(struct sk_buff *skb, struct 
netlink_callback *cb,
+ {
+       int idx = 0;
+       struct fib_rule *rule;
++      int err = 0;
+ 
+       rcu_read_lock();
+       list_for_each_entry_rcu(rule, &ops->rules_list, list) {
+               if (idx < cb->args[1])
+                       goto skip;
+ 
+-              if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+-                                   cb->nlh->nlmsg_seq, RTM_NEWRULE,
+-                                   NLM_F_MULTI, ops) < 0)
++              err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
++                                     cb->nlh->nlmsg_seq, RTM_NEWRULE,
++                                     NLM_F_MULTI, ops);
++              if (err < 0)
+                       break;
+ skip:
+               idx++;
+@@ -638,7 +640,7 @@ skip:
+       cb->args[1] = idx;
+       rules_ops_put(ops);
+ 
+-      return skb->len;
++      return err;
+ }
+ 
+ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -654,7 +656,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct 
netlink_callback *cb)
+               if (ops == NULL)
+                       return -EAFNOSUPPORT;
+ 
+-              return dump_rules(skb, cb, ops);
++              dump_rules(skb, cb, ops);
++
++              return skb->len;
+       }
+ 
+       rcu_read_lock();
+diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
+index 447a7fbd1bb6..f5e2ba1c18bf 100644
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -36,6 +36,6 @@ out:
+       return ret;
+ 
+ out_rt:
+-      inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
++      inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+       goto out;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 7d640f276e87..b2e4c77d9a8c 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -360,6 +360,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ 
+       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
++      ip6_tnl_dst_reset(netdev_priv(dev));
+       dev_put(dev);
+ }
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 8737400af0a0..821d8dfb2ddd 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -552,7 +552,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void 
*v)
+ 
+       if (it->cache == &mrt->mfc6_unres_queue)
+               spin_unlock_bh(&mfc_unres_lock);
+-      else if (it->cache == mrt->mfc6_cache_array)
++      else if (it->cache == &mrt->mfc6_cache_array[it->ct])
+               read_unlock(&mrt_lock);
+ }
+ 
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index d36e0977f44a..eac14e99c941 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -296,9 +296,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+       if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
+               return TX_CONTINUE;
+ 
+-      if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+-              return TX_CONTINUE;
+-
+       if (tx->flags & IEEE80211_TX_PS_BUFFERED)
+               return TX_CONTINUE;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 22e0f478a2a3..10805856dfba 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -115,6 +115,24 @@ static inline struct hlist_head *nl_portid_hashfn(struct 
nl_portid_hash *hash, u
+       return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
+ }
+ 
++static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
++                                         gfp_t gfp_mask)
++{
++      unsigned int len = skb_end_offset(skb);
++      struct sk_buff *new;
++
++      new = alloc_skb(len, gfp_mask);
++      if (new == NULL)
++              return NULL;
++
++      NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
++      NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
++      NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
++
++      memcpy(skb_put(new, len), skb->data, len);
++      return new;
++}
++
+ int netlink_add_tap(struct netlink_tap *nt)
+ {
+       if (unlikely(nt->dev->type != ARPHRD_NETLINK))
+@@ -200,7 +218,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+       int ret = -ENOMEM;
+ 
+       dev_hold(dev);
+-      nskb = skb_clone(skb, GFP_ATOMIC);
++
++      if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
++              nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
++      else
++              nskb = skb_clone(skb, GFP_ATOMIC);
+       if (nskb) {
+               nskb->dev = dev;
+               nskb->protocol = htons((u16) sk->sk_protocol);
+@@ -263,11 +285,6 @@ static void netlink_rcv_wake(struct sock *sk)
+ }
+ 
+ #ifdef CONFIG_NETLINK_MMAP
+-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+-{
+-      return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+-}
+-
+ static bool netlink_rx_is_mmaped(struct sock *sk)
+ {
+       return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+@@ -819,7 +836,6 @@ static void netlink_ring_set_copied(struct sock *sk, 
struct sk_buff *skb)
+ }
+ 
+ #else /* CONFIG_NETLINK_MMAP */
+-#define netlink_skb_is_mmaped(skb)    false
+ #define netlink_rx_is_mmaped(sk)      false
+ #define netlink_tx_is_mmaped(sk)      false
+ #define netlink_mmap                  sock_no_mmap
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index acbd774eeb7c..dcc89c74b514 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -65,6 +65,15 @@ struct nl_portid_hash {
+       u32                     rnd;
+ };
+ 
++static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
++{
++#ifdef CONFIG_NETLINK_MMAP
++      return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
++#else
++      return false;
++#endif /* CONFIG_NETLINK_MMAP */
++}
++
+ struct netlink_table {
+       struct nl_portid_hash   hash;
+       struct hlist_head       mc_list;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 2aa13bd7f2b2..aa0a5f2794f1 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1266,7 +1266,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, 
struct genl_info *info)
+               if (IS_ERR(acts))
+                       goto error;
+ 
+-              ovs_flow_key_mask(&masked_key, &key, &mask);
++              ovs_flow_key_mask(&masked_key, &key, true, &mask);
+               error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+                                                 &masked_key, 0, &acts);
+               if (error) {
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 410db90db73d..b5d58cfa4fdc 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -373,18 +373,21 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
+ }
+ 
+ void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-                     const struct sw_flow_mask *mask)
++                     bool full, const struct sw_flow_mask *mask)
+ {
+-      const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+-      const long *s = (long *)((u8 *)src + mask->range.start);
+-      long *d = (long *)((u8 *)dst + mask->range.start);
++      int start = full ? 0 : mask->range.start;
++      int len = full ? sizeof *dst : range_n_bytes(&mask->range);
++      const long *m = (const long *)((const u8 *)&mask->key + start);
++      const long *s = (const long *)((const u8 *)src + start);
++      long *d = (long *)((u8 *)dst + start);
+       int i;
+ 
+-      /* The memory outside of the 'mask->range' are not set since
+-       * further operations on 'dst' only uses contents within
+-       * 'mask->range'.
++      /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
++       * if 'full' is false the memory outside of the 'mask->range' is left
++       * uninitialized. This can be used as an optimization when further
++       * operations on 'dst' only use contents within 'mask->range'.
+        */
+-      for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
++      for (i = 0; i < len; i += sizeof(long))
+               *d++ = *s++ & *m++;
+ }
+ 
+@@ -1085,7 +1088,7 @@ static struct sw_flow *ovs_masked_flow_lookup(struct 
flow_table *table,
+       u32 hash;
+       struct sw_flow_key masked_key;
+ 
+-      ovs_flow_key_mask(&masked_key, unmasked, mask);
++      ovs_flow_key_mask(&masked_key, unmasked, false, mask);
+       hash = ovs_flow_hash(&masked_key, key_start, key_end);
+       head = find_bucket(table, hash);
+       hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
+diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
+index 212fbf7510c4..a5da8e1ab854 100644
+--- a/net/openvswitch/flow.h
++++ b/net/openvswitch/flow.h
+@@ -255,5 +255,5 @@ void ovs_sw_flow_mask_insert(struct flow_table *, struct 
sw_flow_mask *);
+ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
+               const struct sw_flow_mask *);
+ void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-                     const struct sw_flow_mask *mask);
++                     bool full, const struct sw_flow_mask *mask);
+ #endif /* flow.h */
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 2b216f1f6b23..599757e0c23a 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1167,7 +1167,7 @@ static void sctp_v4_del_protocol(void)
+       unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+ }
+ 
+-static int __net_init sctp_net_init(struct net *net)
++static int __net_init sctp_defaults_init(struct net *net)
+ {
+       int status;
+ 
+@@ -1260,12 +1260,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+       sctp_dbg_objcnt_init(net);
+ 
+-      /* Initialize the control inode/socket for handling OOTB packets.  */
+-      if ((status = sctp_ctl_sock_init(net))) {
+-              pr_err("Failed to initialize the SCTP control sock\n");
+-              goto err_ctl_sock_init;
+-      }
+-
+       /* Initialize the local address list. */
+       INIT_LIST_HEAD(&net->sctp.local_addr_list);
+       spin_lock_init(&net->sctp.local_addr_lock);
+@@ -1281,9 +1275,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+       return 0;
+ 
+-err_ctl_sock_init:
+-      sctp_dbg_objcnt_exit(net);
+-      sctp_proc_exit(net);
+ err_init_proc:
+       cleanup_sctp_mibs(net);
+ err_init_mibs:
+@@ -1292,15 +1283,12 @@ err_sysctl_register:
+       return status;
+ }
+ 
+-static void __net_exit sctp_net_exit(struct net *net)
++static void __net_exit sctp_defaults_exit(struct net *net)
+ {
+       /* Free the local address list */
+       sctp_free_addr_wq(net);
+       sctp_free_local_addr_list(net);
+ 
+-      /* Free the control endpoint.  */
+-      inet_ctl_sock_destroy(net->sctp.ctl_sock);
+-
+       sctp_dbg_objcnt_exit(net);
+ 
+       sctp_proc_exit(net);
+@@ -1308,9 +1296,32 @@ static void __net_exit sctp_net_exit(struct net *net)
+       sctp_sysctl_net_unregister(net);
+ }
+ 
+-static struct pernet_operations sctp_net_ops = {
+-      .init = sctp_net_init,
+-      .exit = sctp_net_exit,
++static struct pernet_operations sctp_defaults_ops = {
++      .init = sctp_defaults_init,
++      .exit = sctp_defaults_exit,
++};
++
++static int __net_init sctp_ctrlsock_init(struct net *net)
++{
++      int status;
++
++      /* Initialize the control inode/socket for handling OOTB packets.  */
++      status = sctp_ctl_sock_init(net);
++      if (status)
++              pr_err("Failed to initialize the SCTP control sock\n");
++
++      return status;
++}
++
++static void __net_init sctp_ctrlsock_exit(struct net *net)
++{
++      /* Free the control endpoint.  */
++      inet_ctl_sock_destroy(net->sctp.ctl_sock);
++}
++
++static struct pernet_operations sctp_ctrlsock_ops = {
++      .init = sctp_ctrlsock_init,
++      .exit = sctp_ctrlsock_exit,
+ };
+ 
+ /* Initialize the universe into something sensible.  */
+@@ -1444,8 +1455,11 @@ static __init int sctp_init(void)
+       sctp_v4_pf_init();
+       sctp_v6_pf_init();
+ 
+-      status = sctp_v4_protosw_init();
++      status = register_pernet_subsys(&sctp_defaults_ops);
++      if (status)
++              goto err_register_defaults;
+ 
++      status = sctp_v4_protosw_init();
+       if (status)
+               goto err_protosw_init;
+ 
+@@ -1453,9 +1467,9 @@ static __init int sctp_init(void)
+       if (status)
+               goto err_v6_protosw_init;
+ 
+-      status = register_pernet_subsys(&sctp_net_ops);
++      status = register_pernet_subsys(&sctp_ctrlsock_ops);
+       if (status)
+-              goto err_register_pernet_subsys;
++              goto err_register_ctrlsock;
+ 
+       status = sctp_v4_add_protocol();
+       if (status)
+@@ -1472,12 +1486,14 @@ out:
+ err_v6_add_protocol:
+       sctp_v4_del_protocol();
+ err_add_protocol:
+-      unregister_pernet_subsys(&sctp_net_ops);
+-err_register_pernet_subsys:
++      unregister_pernet_subsys(&sctp_ctrlsock_ops);
++err_register_ctrlsock:
+       sctp_v6_protosw_exit();
+ err_v6_protosw_init:
+       sctp_v4_protosw_exit();
+ err_protosw_init:
++      unregister_pernet_subsys(&sctp_defaults_ops);
++err_register_defaults:
+       sctp_v4_pf_exit();
+       sctp_v6_pf_exit();
+       sctp_sysctl_unregister();
+@@ -1510,12 +1526,14 @@ static __exit void sctp_exit(void)
+       sctp_v6_del_protocol();
+       sctp_v4_del_protocol();
+ 
+-      unregister_pernet_subsys(&sctp_net_ops);
++      unregister_pernet_subsys(&sctp_ctrlsock_ops);
+ 
+       /* Free protosw registrations */
+       sctp_v6_protosw_exit();
+       sctp_v4_protosw_exit();
+ 
++      unregister_pernet_subsys(&sctp_defaults_ops);
++
+       /* Unregister with socket layer. */
+       sctp_v6_pf_exit();
+       sctp_v4_pf_exit();
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f92057919273..73d342c8403c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1144,7 +1144,7 @@ static const struct hda_fixup alc880_fixups[] = {
+               /* override all pins as BIOS on old Amilo is broken */
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+-                      { 0x14, 0x0121411f }, /* HP */
++                      { 0x14, 0x0121401f }, /* HP */
+                       { 0x15, 0x99030120 }, /* speaker */
+                       { 0x16, 0x99030130 }, /* bass speaker */
+                       { 0x17, 0x411111f0 }, /* N/A */
+@@ -1164,7 +1164,7 @@ static const struct hda_fixup alc880_fixups[] = {
+               /* almost compatible with FUJITSU, but no bass and SPDIF */
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+-                      { 0x14, 0x0121411f }, /* HP */
++                      { 0x14, 0x0121401f }, /* HP */
+                       { 0x15, 0x99030120 }, /* speaker */
+                       { 0x16, 0x411111f0 }, /* N/A */
+                       { 0x17, 0x411111f0 }, /* N/A */
+@@ -1372,7 +1372,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
+       SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", 
ALC880_FIXUP_MEDION_RIM),
+       SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
+-      SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
++      SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
+       SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+       SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+       SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),

Reply via email to