commit:     ad556f40fb7e235038ee94c87a2b20fee2e064df
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 26 17:04:56 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 26 17:04:56 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ad556f40

Linux patch 3.10.73

 0000_README              |    4 +
 1072_linux-3.10.73.patch | 1772 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1776 insertions(+)

diff --git a/0000_README b/0000_README
index b0f0bc4..9319079 100644
--- a/0000_README
+++ b/0000_README
@@ -330,6 +330,10 @@ Patch:  1071_linux-3.10.72.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.10.72
 
+Patch:  1072_linux-3.10.73.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.10.73
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1072_linux-3.10.73.patch b/1072_linux-3.10.73.patch
new file mode 100644
index 0000000..ec1cc88
--- /dev/null
+++ b/1072_linux-3.10.73.patch
@@ -0,0 +1,1772 @@
+diff --git a/Makefile b/Makefile
+index 211bb34102bf..b1129094ebfd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 72
++SUBLEVEL = 73
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
+index 2f5908f0b8c5..d8af0755bddc 100644
+--- a/arch/arm/mach-at91/pm.h
++++ b/arch/arm/mach-at91/pm.h
+@@ -37,7 +37,7 @@ static inline void at91rm9200_standby(void)
+               "    mcr    p15, 0, %0, c7, c0, 4\n\t"
+               "    str    %5, [%1, %2]"
+               :
+-              : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
++              : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
+                 "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
+                 "r" (lpr));
+ }
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index ee7ac5e6e28a..c5c640779549 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -544,8 +544,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct 
task_struct *tidle)
+       if (smp_ops->give_timebase)
+               smp_ops->give_timebase();
+ 
+-      /* Wait until cpu puts itself in the online map */
+-      while (!cpu_online(cpu))
++      /* Wait until cpu puts itself in the online & active maps */
++      while (!cpu_online(cpu) || !cpu_active(cpu))
+               cpu_relax();
+ 
+       return 0;
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index b5c38faa4ead..d461b7ddf30e 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -960,6 +960,8 @@ out:
+       cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+ }
+ 
++static void sparc_pmu_start(struct perf_event *event, int flags);
++
+ /* On this PMU each PIC has it's own PCR control register.  */
+ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ {
+@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events 
*cpuc)
+               struct perf_event *cp = cpuc->event[i];
+               struct hw_perf_event *hwc = &cp->hw;
+               int idx = hwc->idx;
+-              u64 enc;
+ 
+               if (cpuc->current_idx[i] != PIC_NO_INDEX)
+                       continue;
+ 
+-              sparc_perf_event_set_period(cp, hwc, idx);
+               cpuc->current_idx[i] = idx;
+ 
+-              enc = perf_event_get_enc(cpuc->events[i]);
+-              cpuc->pcr[idx] &= ~mask_for_index(idx);
+-              if (hwc->state & PERF_HES_STOPPED)
+-                      cpuc->pcr[idx] |= nop_for_index(idx);
+-              else
+-                      cpuc->pcr[idx] |= event_encoding(enc, idx);
++              sparc_pmu_start(cp, PERF_EF_RELOAD);
+       }
+ out:
+       for (i = 0; i < cpuc->n_events; i++) {
+@@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int 
_flags)
+       int i;
+ 
+       local_irq_save(flags);
+-      perf_pmu_disable(event->pmu);
+ 
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (event == cpuc->event[i]) {
+@@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int 
_flags)
+               }
+       }
+ 
+-      perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+ }
+ 
+@@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int 
ef_flags)
+       unsigned long flags;
+ 
+       local_irq_save(flags);
+-      perf_pmu_disable(event->pmu);
+ 
+       n0 = cpuc->n_events;
+       if (n0 >= sparc_pmu->max_hw_events)
+@@ -1394,7 +1386,6 @@ nocheck:
+ 
+       ret = 0;
+ out:
+-      perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+       return ret;
+ }
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index b9cc9763faf4..036e43cef6fb 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -280,6 +280,8 @@ void arch_trigger_all_cpu_backtrace(void)
+                       printk("             TPC[%lx] O7[%lx] I7[%lx] 
RPC[%lx]\n",
+                              gp->tpc, gp->o7, gp->i7, gp->rpc);
+               }
++
++              touch_nmi_watchdog();
+       }
+ 
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+@@ -352,6 +354,8 @@ static void pmu_snapshot_all_cpus(void)
+                      (cpu == this_cpu ? '*' : ' '), cpu,
+                      pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
+                      pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
++
++              touch_nmi_watchdog();
+       }
+ 
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+diff --git a/arch/sparc/kernel/sys_sparc_64.c 
b/arch/sparc/kernel/sys_sparc_64.c
+index 2daaaa6eda23..be8db9bb7878 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -336,7 +336,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, 
unsigned long, second
+       long err;
+ 
+       /* No need for backward compatibility. We can start fresh... */
+-      if (call <= SEMCTL) {
++      if (call <= SEMTIMEDOP) {
+               switch (call) {
+               case SEMOP:
+                       err = sys_semtimedop(first, ptr,
+diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
+index b7f6334e159f..857ad4f8905f 100644
+--- a/arch/sparc/lib/memmove.S
++++ b/arch/sparc/lib/memmove.S
+@@ -8,9 +8,11 @@
+ 
+       .text
+ ENTRY(memmove) /* o0=dst o1=src o2=len */
+-      mov             %o0, %g1
++      brz,pn          %o2, 99f
++       mov            %o0, %g1
++
+       cmp             %o0, %o1
+-      bleu,pt         %xcc, memcpy
++      bleu,pt         %xcc, 2f
+        add            %o1, %o2, %g7
+       cmp             %g7, %o0
+       bleu,pt         %xcc, memcpy
+@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
+       stb             %g7, [%o0]
+       bne,pt          %icc, 1b
+        sub            %o0, 1, %o0
+-
++99:
+       retl
+        mov            %g1, %o0
++
++      /* We can't just call memcpy for these memmove cases.  On some
++       * chips the memcpy uses cache initializing stores and when dst
++       * and src are close enough, those can clobber the source data
++       * before we've loaded it in.
++       */
++2:    or              %o0, %o1, %g7
++      or              %o2, %g7, %g7
++      andcc           %g7, 0x7, %g0
++      bne,pn          %xcc, 4f
++       nop
++
++3:    ldx             [%o1], %g7
++      add             %o1, 8, %o1
++      subcc           %o2, 8, %o2
++      add             %o0, 8, %o0
++      bne,pt          %icc, 3b
++       stx            %g7, [%o0 - 0x8]
++      ba,a,pt         %xcc, 99b
++
++4:    ldub            [%o1], %g7
++      add             %o1, 1, %o1
++      subcc           %o2, 1, %o2
++      add             %o0, 1, %o0
++      bne,pt          %icc, 4b
++       stb            %g7, [%o0 - 0x1]
++      ba,a,pt         %xcc, 99b
+ ENDPROC(memmove)
+diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
+index 036c2797dece..f58cb540ff94 100644
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -455,10 +455,12 @@ static void __init sparc_context_init(int numctx)
+ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+              struct task_struct *tsk)
+ {
++      unsigned long flags;
++
+       if (mm->context == NO_CONTEXT) {
+-              spin_lock(&srmmu_context_spinlock);
++              spin_lock_irqsave(&srmmu_context_spinlock, flags);
+               alloc_context(old_mm, mm);
+-              spin_unlock(&srmmu_context_spinlock);
++              spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+               srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+       }
+ 
+@@ -983,14 +985,15 @@ int init_new_context(struct task_struct *tsk, struct 
mm_struct *mm)
+ 
+ void destroy_context(struct mm_struct *mm)
+ {
++      unsigned long flags;
+ 
+       if (mm->context != NO_CONTEXT) {
+               flush_cache_mm(mm);
+               srmmu_ctxd_set(&srmmu_context_table[mm->context], 
srmmu_swapper_pg_dir);
+               flush_tlb_mm(mm);
+-              spin_lock(&srmmu_context_spinlock);
++              spin_lock_irqsave(&srmmu_context_spinlock, flags);
+               free_context(mm->context);
+-              spin_unlock(&srmmu_context_spinlock);
++              spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+               mm->context = NO_CONTEXT;
+       }
+ }
+diff --git a/arch/x86/crypto/aesni-intel_glue.c 
b/arch/x86/crypto/aesni-intel_glue.c
+index f89e7490d303..990c9699b662 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -989,7 +989,7 @@ static int __driver_rfc4106_decrypt(struct aead_request 
*req)
+               src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+               if (!src)
+                       return -ENOMEM;
+-              assoc = (src + req->cryptlen + auth_tag_len);
++              assoc = (src + req->cryptlen);
+               scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+               scatterwalk_map_and_copy(assoc, req->assoc, 0,
+                       req->assoclen, 0);
+@@ -1014,7 +1014,7 @@ static int __driver_rfc4106_decrypt(struct aead_request 
*req)
+               scatterwalk_done(&src_sg_walk, 0, 0);
+               scatterwalk_done(&assoc_sg_walk, 0, 0);
+       } else {
+-              scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
++              scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
+               kfree(src);
+       }
+       return retval;
+diff --git a/arch/x86/include/asm/fpu-internal.h 
b/arch/x86/include/asm/fpu-internal.h
+index e72b2e41499e..1b2fc5cf1963 100644
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
+       preempt_disable();
+       tsk->fpu_counter = 0;
+       __drop_fpu(tsk);
+-      clear_used_math();
++      clear_stopped_child_used_math(tsk);
+       preempt_enable();
+ }
+ 
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index 1ee723298e90..92f37e7683c5 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -376,7 +376,7 @@ int __restore_xstate_sig(void __user *buf, void __user 
*buf_fx, int size)
+                * thread's fpu state, reconstruct fxstate from the fsave
+                * header. Sanitize the copied state etc.
+                */
+-              struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
++              struct fpu *fpu = &tsk->thread.fpu;
+               struct user_i387_ia32_struct env;
+               int err = 0;
+ 
+@@ -390,14 +390,15 @@ int __restore_xstate_sig(void __user *buf, void __user 
*buf_fx, int size)
+                */
+               drop_fpu(tsk);
+ 
+-              if (__copy_from_user(xsave, buf_fx, state_size) ||
++              if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+                   __copy_from_user(&env, buf, sizeof(env))) {
++                      fpu_finit(fpu);
+                       err = -1;
+               } else {
+                       sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
+-                      set_used_math();
+               }
+ 
++              set_used_math();
+               if (use_eager_fpu()) {
+                       preempt_disable();
+                       math_state_restore();
+diff --git a/arch/x86/vdso/vdso32/sigreturn.S 
b/arch/x86/vdso/vdso32/sigreturn.S
+index 31776d0efc8c..d7ec4e251c0a 100644
+--- a/arch/x86/vdso/vdso32/sigreturn.S
++++ b/arch/x86/vdso/vdso32/sigreturn.S
+@@ -17,6 +17,7 @@
+       .text
+       .globl __kernel_sigreturn
+       .type __kernel_sigreturn,@function
++      nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
+       ALIGN
+ __kernel_sigreturn:
+ .LSTART_sigreturn:
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 538856f3e68a..09df26f9621d 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 
*buf, size_t count)
+ {
+       struct ibmvtpm_dev *ibmvtpm;
+       struct ibmvtpm_crq crq;
+-      u64 *word = (u64 *) &crq;
++      __be64 *word = (__be64 *)&crq;
+       int rc;
+ 
+       ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
+@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 
*buf, size_t count)
+       memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+       crq.valid = (u8)IBMVTPM_VALID_CMD;
+       crq.msg = (u8)VTPM_TPM_COMMAND;
+-      crq.len = (u16)count;
+-      crq.data = ibmvtpm->rtce_dma_handle;
++      crq.len = cpu_to_be16(count);
++      crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
+ 
+-      rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
+-                            cpu_to_be64(word[1]));
++      rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
++                            be64_to_cpu(word[1]));
+       if (rc != H_SUCCESS) {
+               dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+               rc = 0;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
+index bd82a791f995..b2c231b1beec 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -22,9 +22,9 @@
+ struct ibmvtpm_crq {
+       u8 valid;
+       u8 msg;
+-      u16 len;
+-      u32 data;
+-      u64 reserved;
++      __be16 len;
++      __be32 data;
++      __be64 reserved;
+ } __attribute__((packed, aligned(8)));
+ 
+ struct ibmvtpm_crq_queue {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index fc45567ad3ac..ec3bd62eeaf6 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -2023,12 +2023,13 @@ static int virtcons_probe(struct virtio_device *vdev)
+       spin_lock_init(&portdev->ports_lock);
+       INIT_LIST_HEAD(&portdev->ports);
+ 
++      INIT_WORK(&portdev->control_work, &control_work_handler);
++
+       if (multiport) {
+               unsigned int nr_added_bufs;
+ 
+               spin_lock_init(&portdev->c_ivq_lock);
+               spin_lock_init(&portdev->c_ovq_lock);
+-              INIT_WORK(&portdev->control_work, &control_work_handler);
+ 
+               nr_added_bufs = fill_queue(portdev->c_ivq,
+                                          &portdev->c_ivq_lock);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c 
b/drivers/gpu/drm/radeon/evergreen.c
+index ead08a49bec0..59ea6547306b 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4016,6 +4016,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
+       WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 
afmt5);
+       WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 
afmt6);
+ 
++      /* posting read */
++      RREG32(SRBM_STATUS);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 46470dd7c710..f9f0e3680d76 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -743,6 +743,10 @@ int r100_irq_set(struct radeon_device *rdev)
+               tmp |= RADEON_FP2_DETECT_MASK;
+       }
+       WREG32(RADEON_GEN_INT_CNTL, tmp);
++
++      /* read back to post the write */
++      RREG32(RADEON_GEN_INT_CNTL);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 4cf21ec1abe3..90b007594e32 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3459,6 +3459,9 @@ int r600_init(struct radeon_device *rdev)
+               rdev->accel_working = false;
+       }
+ 
++      /* posting read */
++      RREG32(R_000E50_SRBM_STATUS);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
b/drivers/gpu/drm/radeon/radeon_cs.c
+index 60af3cda587b..6627585da1e5 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -177,11 +177,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, 
void *data)
+       u32 ring = RADEON_CS_RING_GFX;
+       s32 priority = 0;
+ 
++      INIT_LIST_HEAD(&p->validated);
++
+       if (!cs->num_chunks) {
+               return 0;
+       }
++
+       /* get chunks */
+-      INIT_LIST_HEAD(&p->validated);
+       p->idx = 0;
+       p->ib.sa_bo = NULL;
+       p->ib.semaphore = NULL;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index ae813fef0818..971d55f73e0c 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -636,6 +636,10 @@ int rs600_irq_set(struct radeon_device *rdev)
+       WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+       if (ASIC_IS_DCE2(rdev))
+               WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
++
++      /* posting read */
++      RREG32(R_000040_GEN_INT_CNTL);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 03add5d5542e..2410c38ff037 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -5704,8 +5704,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 
vclk, u32 dclk)
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ 
+       if (!vclk || !dclk) {
+-              /* keep the Bypass mode, put PLL to sleep */
+-              WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++              /* keep the Bypass mode */
+               return 0;
+       }
+ 
+@@ -5721,8 +5720,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 
vclk, u32 dclk)
+       /* set VCO_MODE to 1 */
+       WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+ 
+-      /* toggle UPLL_SLEEP to 1 then back to 0 */
+-      WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++      /* disable sleep mode */
+       WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+ 
+       /* deassert UPLL_RESET */
+@@ -5778,5 +5776,8 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 
vclk, u32 dclk)
+ 
+       mdelay(100);
+ 
++      /* posting read */
++      RREG32(SRBM_STATUS);
++
+       return 0;
+ }
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index a4694aa20a3e..f66aeb79abdf 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -503,6 +503,14 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, 
struct can_frame **cf)
+       skb->pkt_type = PACKET_BROADCAST;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
++      skb_reset_mac_header(skb);
++      skb_reset_network_header(skb);
++      skb_reset_transport_header(skb);
++
++      skb_reset_mac_header(skb);
++      skb_reset_network_header(skb);
++      skb_reset_transport_header(skb);
++
+       can_skb_reserve(skb);
+       can_skb_prv(skb)->ifindex = dev->ifindex;
+ 
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index 1e207f086b75..49ab45e17fe8 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -302,9 +302,18 @@ static const struct driver_info   cx82310_info = {
+       .tx_fixup       = cx82310_tx_fixup,
+ };
+ 
++#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
++      .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
++                     USB_DEVICE_ID_MATCH_DEV_INFO, \
++      .idVendor = (vend), \
++      .idProduct = (prod), \
++      .bDeviceClass = (cl), \
++      .bDeviceSubClass = (sc), \
++      .bDeviceProtocol = (pr)
++
+ static const struct usb_device_id products[] = {
+       {
+-              USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
++              USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+               .driver_info = (unsigned long) &cx82310_info
+       },
+       { },
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index e873e8f0070d..283212aa103c 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1596,10 +1596,12 @@ static int _regulator_do_enable(struct regulator_dev 
*rdev)
+       trace_regulator_enable(rdev_get_name(rdev));
+ 
+       if (rdev->ena_pin) {
+-              ret = regulator_ena_gpio_ctrl(rdev, true);
+-              if (ret < 0)
+-                      return ret;
+-              rdev->ena_gpio_state = 1;
++              if (!rdev->ena_gpio_state) {
++                      ret = regulator_ena_gpio_ctrl(rdev, true);
++                      if (ret < 0)
++                              return ret;
++                      rdev->ena_gpio_state = 1;
++              }
+       } else if (rdev->desc->ops->enable) {
+               ret = rdev->desc->ops->enable(rdev);
+               if (ret < 0)
+@@ -1701,10 +1703,12 @@ static int _regulator_do_disable(struct regulator_dev 
*rdev)
+       trace_regulator_disable(rdev_get_name(rdev));
+ 
+       if (rdev->ena_pin) {
+-              ret = regulator_ena_gpio_ctrl(rdev, false);
+-              if (ret < 0)
+-                      return ret;
+-              rdev->ena_gpio_state = 0;
++              if (rdev->ena_gpio_state) {
++                      ret = regulator_ena_gpio_ctrl(rdev, false);
++                      if (ret < 0)
++                              return ret;
++                      rdev->ena_gpio_state = 0;
++              }
+ 
+       } else if (rdev->desc->ops->disable) {
+               ret = rdev->desc->ops->disable(rdev);
+@@ -3614,12 +3618,6 @@ regulator_register(const struct regulator_desc 
*regulator_desc,
+                                config->ena_gpio, ret);
+                       goto wash;
+               }
+-
+-              if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+-                      rdev->ena_gpio_state = 1;
+-
+-              if (config->ena_gpio_invert)
+-                      rdev->ena_gpio_state = !rdev->ena_gpio_state;
+       }
+ 
+       /* set regulator constraints */
+@@ -3788,9 +3786,11 @@ int regulator_suspend_finish(void)
+       list_for_each_entry(rdev, &regulator_list, list) {
+               mutex_lock(&rdev->mutex);
+               if (rdev->use_count > 0  || rdev->constraints->always_on) {
+-                      error = _regulator_do_enable(rdev);
+-                      if (error)
+-                              ret = error;
++                      if (!_regulator_is_enabled(rdev)) {
++                              error = _regulator_do_enable(rdev);
++                              if (error)
++                                      ret = error;
++                      }
+               } else {
+                       if (!has_full_constraints)
+                               goto unlock;
+diff --git a/drivers/scsi/libsas/sas_discover.c 
b/drivers/scsi/libsas/sas_discover.c
+index 62b58d38ce2e..60de66252fa2 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
+       struct sas_discovery_event *ev = to_sas_discovery_event(work);
+       struct asd_sas_port *port = ev->port;
+       struct sas_ha_struct *ha = port->ha;
++      struct domain_device *ddev = port->port_dev;
+ 
+       /* prevent revalidation from finding sata links in recovery */
+       mutex_lock(&ha->disco_mutex);
+@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
+       SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+                   task_pid_nr(current));
+ 
+-      if (port->port_dev)
+-              res = sas_ex_revalidate_domain(port->port_dev);
++      if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
++                   ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
++              res = sas_ex_revalidate_domain(ddev);
+ 
+       SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+                   port->id, task_pid_nr(current), res);
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 5266c89fc989..a6f0878d9bf1 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -508,12 +508,12 @@ static void giveback(struct pl022 *pl022)
+       pl022->cur_msg = NULL;
+       pl022->cur_transfer = NULL;
+       pl022->cur_chip = NULL;
+-      spi_finalize_current_message(pl022->master);
+ 
+       /* disable the SPI/SSP operation */
+       writew((readw(SSP_CR1(pl022->virtbase)) &
+               (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ 
++      spi_finalize_current_message(pl022->master);
+ }
+ 
+ /**
+diff --git a/drivers/target/iscsi/iscsi_target.c 
b/drivers/target/iscsi/iscsi_target.c
+index 651b5768862f..9559ea749d83 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4136,11 +4136,17 @@ int iscsit_close_connection(
+       pr_debug("Closing iSCSI connection CID %hu on SID:"
+               " %u\n", conn->cid, sess->sid);
+       /*
+-       * Always up conn_logout_comp just in case the RX Thread is sleeping
+-       * and the logout response never got sent because the connection
+-       * failed.
++       * Always up conn_logout_comp for the traditional TCP case just in case
++       * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
++       * response never got sent because the connection failed.
++       *
++       * However for iser-target, isert_wait4logout() is using 
conn_logout_comp
++       * to signal logout response TX interrupt completion.  Go ahead and skip
++       * this for iser since isert_rx_opcode() does not wait on logout 
failure,
++       * and to avoid iscsi_conn pointer dereference in iser-target code.
+        */
+-      complete(&conn->conn_logout_comp);
++      if (conn->conn_transport->transport_type == ISCSI_TCP)
++              complete(&conn->conn_logout_comp);
+ 
+       iscsi_release_thread_set(conn);
+ 
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 27ec6e4d1c7c..7f85f4a6d73a 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -518,6 +518,18 @@ static int core_scsi3_pr_seq_non_holder(
+ 
+                       return 0;
+               }
++       } else if (we && registered_nexus) {
++               /*
++                * Reads are allowed for Write Exclusive locks
++                * from all registrants.
++                */
++               if (cmd->data_direction == DMA_FROM_DEVICE) {
++                       pr_debug("Allowing READ CDB: 0x%02x for %s"
++                               " reservation\n", cdb[0],
++                               core_scsi3_pr_dump_type(pr_reg_type));
++
++                       return 0;
++               }
+       }
+       pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+               " for %s reservation\n", transport_dump_cmd_direction(cmd),
+@@ -2397,6 +2409,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int 
scope, u64 res_key)
+       spin_lock(&dev->dev_reservation_lock);
+       pr_res_holder = dev->dev_pr_res_holder;
+       if (pr_res_holder) {
++              int pr_res_type = pr_res_holder->pr_res_type;
+               /*
+                * From spc4r17 Section 5.7.9: Reserving:
+                *
+@@ -2407,7 +2420,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int 
scope, u64 res_key)
+                * the logical unit, then the command shall be completed with
+                * RESERVATION CONFLICT status.
+                */
+-              if (pr_res_holder != pr_reg) {
++              if ((pr_res_holder != pr_reg) &&
++                  (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
++                  (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+                       struct se_node_acl *pr_res_nacl = 
pr_res_holder->pr_reg_nacl;
+                       pr_err("SPC-3 PR: Attempted RESERVE from"
+                               " [%s]: %s while reservation already held by"
+@@ -4012,7 +4027,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+       unsigned char *buf;
+       u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+       u32 off = 8; /* off into first Full Status descriptor */
+-      int format_code = 0;
++      int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
++      bool all_reg = false;
+ 
+       if (cmd->data_length < 8) {
+               pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+@@ -4029,6 +4045,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+       buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+       buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ 
++      spin_lock(&dev->dev_reservation_lock);
++      if (dev->dev_pr_res_holder) {
++              struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
++
++              if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
++                  pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
++                      all_reg = true;
++                      pr_res_type = pr_holder->pr_res_type;
++                      pr_res_scope = pr_holder->pr_res_scope;
++              }
++      }
++      spin_unlock(&dev->dev_reservation_lock);
++
+       spin_lock(&pr_tmpl->registration_lock);
+       list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+                       &pr_tmpl->registration_list, pr_reg_list) {
+@@ -4078,14 +4107,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+                * reservation holder for PR_HOLDER bit.
+                *
+                * Also, if this registration is the reservation
+-               * holder, fill in SCOPE and TYPE in the next byte.
++               * holder or there is an All Registrants reservation
++               * active, fill in SCOPE and TYPE in the next byte.
+                */
+               if (pr_reg->pr_res_holder) {
+                       buf[off++] |= 0x01;
+                       buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+                                    (pr_reg->pr_res_type & 0x0f);
+-              } else
++              } else if (all_reg) {
++                      buf[off++] |= 0x01;
++                      buf[off++] = (pr_res_scope & 0xf0) |
++                                   (pr_res_type & 0x0f);
++              } else {
+                       off += 2;
++              }
+ 
+               off += 4; /* Skip over reserved area */
+               /*
+diff --git a/drivers/target/target_core_pscsi.c 
b/drivers/target/target_core_pscsi.c
+index 3250ba2594e0..b1e77ff9a636 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -1112,7 +1112,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+       struct scsi_device *sd = pdv->pdv_sd;
+ 
+-      return sd->type;
++      return (sd) ? sd->type : TYPE_NO_LUN;
+ }
+ 
+ static sector_t pscsi_get_blocks(struct se_device *dev)
+diff --git a/drivers/target/target_core_transport.c 
b/drivers/target/target_core_transport.c
+index dcc5daa0ff1c..daf0f6357bb3 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2222,6 +2222,10 @@ int target_get_sess_cmd(struct se_session *se_sess, 
struct se_cmd *se_cmd,
+ 
+ out:
+       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++
++      if (ret && ack_kref)
++              target_put_sess_cmd(se_sess, se_cmd);
++
+       return ret;
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 8d3c0b5e2878..98b8423793fd 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -68,7 +68,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
+              "Please send the output of lspci -vv, this\n"
+              "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
+              "manufacturer and name of serial board or\n"
+-             "modem board to [email protected].\n",
++             "modem board to <[email protected]>.\n",
+              pci_name(dev), str, dev->vendor, dev->device,
+              dev->subsystem_vendor, dev->subsystem_device);
+ }
+diff --git a/drivers/xen/xen-pciback/conf_space.c 
b/drivers/xen/xen-pciback/conf_space.c
+index 46ae0f9f02ad..75fe3d466515 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -16,7 +16,7 @@
+ #include "conf_space.h"
+ #include "conf_space_quirks.h"
+ 
+-static bool permissive;
++bool permissive;
+ module_param(permissive, bool, 0644);
+ 
+ /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+diff --git a/drivers/xen/xen-pciback/conf_space.h 
b/drivers/xen/xen-pciback/conf_space.h
+index e56c934ad137..2e1d73d1d5d0 100644
+--- a/drivers/xen/xen-pciback/conf_space.h
++++ b/drivers/xen/xen-pciback/conf_space.h
+@@ -64,6 +64,8 @@ struct config_field_entry {
+       void *data;
+ };
+ 
++extern bool permissive;
++
+ #define OFFSET(cfg_entry) 
((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+ 
+ /* Add fields to a device - the add_fields macro expects to get a pointer to
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c 
b/drivers/xen/xen-pciback/conf_space_header.c
+index 3daf862d739d..a5bb81a600f7 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -9,6 +9,10 @@
+ #include "pciback.h"
+ #include "conf_space.h"
+ 
++struct pci_cmd_info {
++      u16 val;
++};
++
+ struct pci_bar_info {
+       u32 val;
+       u32 len_val;
+@@ -18,22 +22,36 @@ struct pci_bar_info {
+ #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
+ #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+ 
+-static int command_read(struct pci_dev *dev, int offset, u16 *value, void 
*data)
++/* Bits guests are allowed to control in permissive mode. */
++#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
++                         PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
++                         PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
++
++static void *command_init(struct pci_dev *dev, int offset)
+ {
+-      int i;
+-      int ret;
+-
+-      ret = xen_pcibk_read_config_word(dev, offset, value, data);
+-      if (!pci_is_enabled(dev))
+-              return ret;
+-
+-      for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+-              if (dev->resource[i].flags & IORESOURCE_IO)
+-                      *value |= PCI_COMMAND_IO;
+-              if (dev->resource[i].flags & IORESOURCE_MEM)
+-                      *value |= PCI_COMMAND_MEMORY;
++      struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
++      int err;
++
++      if (!cmd)
++              return ERR_PTR(-ENOMEM);
++
++      err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
++      if (err) {
++              kfree(cmd);
++              return ERR_PTR(err);
+       }
+ 
++      return cmd;
++}
++
++static int command_read(struct pci_dev *dev, int offset, u16 *value, void 
*data)
++{
++      int ret = pci_read_config_word(dev, offset, value);
++      const struct pci_cmd_info *cmd = data;
++
++      *value &= PCI_COMMAND_GUEST;
++      *value |= cmd->val & ~PCI_COMMAND_GUEST;
++
+       return ret;
+ }
+ 
+@@ -41,6 +59,8 @@ static int command_write(struct pci_dev *dev, int offset, 
u16 value, void *data)
+ {
+       struct xen_pcibk_dev_data *dev_data;
+       int err;
++      u16 val;
++      struct pci_cmd_info *cmd = data;
+ 
+       dev_data = pci_get_drvdata(dev);
+       if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
+@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, 
u16 value, void *data)
+               }
+       }
+ 
++      cmd->val = value;
++
++      if (!permissive && (!dev_data || !dev_data->permissive))
++              return 0;
++
++      /* Only allow the guest to control certain bits. */
++      err = pci_read_config_word(dev, offset, &val);
++      if (err || val == value)
++              return err;
++
++      value &= PCI_COMMAND_GUEST;
++      value |= val & ~PCI_COMMAND_GUEST;
++
+       return pci_write_config_word(dev, offset, value);
+ }
+ 
+@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
+       {
+        .offset    = PCI_COMMAND,
+        .size      = 2,
++       .init      = command_init,
++       .release   = bar_release,
+        .u.w.read  = command_read,
+        .u.w.write = command_write,
+       },
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 23bf1a52a5da..b535008b6c4c 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -819,8 +819,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, 
struct page **pagep)
+ 
+       newpage = buf->page;
+ 
+-      if (WARN_ON(!PageUptodate(newpage)))
+-              return -EIO;
++      if (!PageUptodate(newpage))
++              SetPageUptodate(newpage);
+ 
+       ClearPageMappedToDisk(newpage);
+ 
+@@ -1725,6 +1725,9 @@ copy_finish:
+ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
+                      unsigned int size, struct fuse_copy_state *cs)
+ {
++      /* Don't try to move pages (yet) */
++      cs->move_pages = 0;
++
+       switch (code) {
+       case FUSE_NOTIFY_POLL:
+               return fuse_notify_poll(fc, size, cs);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index b3c95c1a4700..99294a286e66 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1906,6 +1906,7 @@ static void nilfs_segctor_drop_written_files(struct 
nilfs_sc_info *sci,
+                                            struct the_nilfs *nilfs)
+ {
+       struct nilfs_inode_info *ii, *n;
++      int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+       int defer_iput = false;
+ 
+       spin_lock(&nilfs->ns_inode_lock);
+@@ -1918,10 +1919,10 @@ static void nilfs_segctor_drop_written_files(struct 
nilfs_sc_info *sci,
+               brelse(ii->i_bh);
+               ii->i_bh = NULL;
+               list_del_init(&ii->i_dirty);
+-              if (!ii->vfs_inode.i_nlink) {
++              if (!ii->vfs_inode.i_nlink || during_mount) {
+                       /*
+-                       * Defer calling iput() to avoid a deadlock
+-                       * over I_SYNC flag for inodes with i_nlink == 0
++                       * Defer calling iput() to avoid deadlocks if
++                       * i_nlink == 0 or mount is not yet finished.
+                        */
+                       list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+                       defer_iput = true;
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index ff28cf578d01..120dd354849d 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -71,7 +71,8 @@ enum {
+       /* data contains off-queue information when !WORK_STRUCT_PWQ */
+       WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_COLOR_SHIFT,
+ 
+-      WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
++      __WORK_OFFQ_CANCELING   = WORK_OFFQ_FLAG_BASE,
++      WORK_OFFQ_CANCELING     = (1 << __WORK_OFFQ_CANCELING),
+ 
+       /*
+        * When a work item is off queue, its high bits point to the last
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 16730a9c8cac..fe7c4b91d2e7 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2861,19 +2861,57 @@ bool flush_work(struct work_struct *work)
+ }
+ EXPORT_SYMBOL_GPL(flush_work);
+ 
++struct cwt_wait {
++      wait_queue_t            wait;
++      struct work_struct      *work;
++};
++
++static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
++{
++      struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
++
++      if (cwait->work != key)
++              return 0;
++      return autoremove_wake_function(wait, mode, sync, key);
++}
++
+ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ {
++      static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
+       unsigned long flags;
+       int ret;
+ 
+       do {
+               ret = try_to_grab_pending(work, is_dwork, &flags);
+               /*
+-               * If someone else is canceling, wait for the same event it
+-               * would be waiting for before retrying.
++               * If someone else is already canceling, wait for it to
++               * finish.  flush_work() doesn't work for PREEMPT_NONE
++               * because we may get scheduled between @work's completion
++               * and the other canceling task resuming and clearing
++               * CANCELING - flush_work() will return false immediately
++               * as @work is no longer busy, try_to_grab_pending() will
++               * return -ENOENT as @work is still being canceled and the
++               * other canceling task won't be able to clear CANCELING as
++               * we're hogging the CPU.
++               *
++               * Let's wait for completion using a waitqueue.  As this
++               * may lead to the thundering herd problem, use a custom
++               * wake function which matches @work along with exclusive
++               * wait and wakeup.
+                */
+-              if (unlikely(ret == -ENOENT))
+-                      flush_work(work);
++              if (unlikely(ret == -ENOENT)) {
++                      struct cwt_wait cwait;
++
++                      init_wait(&cwait.wait);
++                      cwait.wait.func = cwt_wakefn;
++                      cwait.work = work;
++
++                      prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
++                                                TASK_UNINTERRUPTIBLE);
++                      if (work_is_canceling(work))
++                              schedule();
++                      finish_wait(&cancel_waitq, &cwait.wait);
++              }
+       } while (unlikely(ret < 0));
+ 
+       /* tell other tasks trying to grab @work to back off */
+@@ -2882,6 +2920,16 @@ static bool __cancel_work_timer(struct work_struct 
*work, bool is_dwork)
+ 
+       flush_work(work);
+       clear_work_data(work);
++
++      /*
++       * Paired with prepare_to_wait() above so that either
++       * waitqueue_active() is visible here or !work_is_canceling() is
++       * visible there.
++       */
++      smp_mb();
++      if (waitqueue_active(&cancel_waitq))
++              __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
++
+       return ret;
+ }
+ 
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index d6be3edb7a43..526bf56f4d31 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -283,7 +283,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct 
socket *sock,
+       int copylen;
+ 
+       ret = -EOPNOTSUPP;
+-      if (m->msg_flags&MSG_OOB)
++      if (flags & MSG_OOB)
+               goto read_error;
+ 
+       skb = skb_recv_datagram(sk, flags, 0 , &ret);
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index f59859a3f562..d3668c55b088 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -262,6 +262,9 @@ int can_send(struct sk_buff *skb, int loop)
+               goto inval_skb;
+       }
+ 
++      skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++      skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+ 
+diff --git a/net/compat.c b/net/compat.c
+index 275af79c131b..d12529050b29 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,6 +71,13 @@ int get_compat_msghdr(struct msghdr *kmsg, struct 
compat_msghdr __user *umsg)
+           __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+           __get_user(kmsg->msg_flags, &umsg->msg_flags))
+               return -EFAULT;
++
++      if (!tmp1)
++              kmsg->msg_namelen = 0;
++
++      if (kmsg->msg_namelen < 0)
++              return -EINVAL;
++
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+       kmsg->msg_name = compat_ptr(tmp1);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 2ff093b7c45e..0a327b66a344 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -23,6 +23,8 @@
+ static int zero = 0;
+ static int one = 1;
+ static int ushort_max = USHRT_MAX;
++static int min_sndbuf = SOCK_MIN_SNDBUF;
++static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ 
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(ctl_table *table, int write,
+@@ -97,7 +99,7 @@ static struct ctl_table net_core_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+-              .extra1         = &one,
++              .extra1         = &min_sndbuf,
+       },
+       {
+               .procname       = "rmem_max",
+@@ -105,7 +107,7 @@ static struct ctl_table net_core_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+-              .extra1         = &one,
++              .extra1         = &min_rcvbuf,
+       },
+       {
+               .procname       = "wmem_default",
+@@ -113,7 +115,7 @@ static struct ctl_table net_core_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+-              .extra1         = &one,
++              .extra1         = &min_sndbuf,
+       },
+       {
+               .procname       = "rmem_default",
+@@ -121,7 +123,7 @@ static struct ctl_table net_core_table[] = {
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+-              .extra1         = &one,
++              .extra1         = &min_rcvbuf,
+       },
+       {
+               .procname       = "dev_weight",
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 45dbdab915e2..14a1ed611b05 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
+       mutex_unlock(&inet_diag_table_mutex);
+ }
+ 
++static size_t inet_sk_attr_size(void)
++{
++      return    nla_total_size(sizeof(struct tcp_info))
++              + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
++              + nla_total_size(1) /* INET_DIAG_TOS */
++              + nla_total_size(1) /* INET_DIAG_TCLASS */
++              + nla_total_size(sizeof(struct inet_diag_meminfo))
++              + nla_total_size(sizeof(struct inet_diag_msg))
++              + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
++              + nla_total_size(TCP_CA_NAME_MAX)
++              + nla_total_size(sizeof(struct tcpvegas_info))
++              + 64;
++}
++
+ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct user_namespace *user_ns,                   
+@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo 
*hashinfo, struct sk_buff *in_s
+       if (err)
+               goto out;
+ 
+-      rep = nlmsg_new(sizeof(struct inet_diag_msg) +
+-                      sizeof(struct inet_diag_meminfo) +
+-                      sizeof(struct tcp_info) + 64, GFP_KERNEL);
++      rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
+       if (!rep) {
+               err = -ENOMEM;
+               goto out;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 923146c4f007..913dc4f49b10 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2593,15 +2593,11 @@ void tcp_send_fin(struct sock *sk)
+       } else {
+               /* Socket is locked, keep trying until memory is available. */
+               for (;;) {
+-                      skb = alloc_skb_fclone(MAX_TCP_HEADER,
+-                                             sk->sk_allocation);
++                      skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+                       if (skb)
+                               break;
+                       yield();
+               }
+-
+-              /* Reserve space for headers and prepare control bits. */
+-              skb_reserve(skb, MAX_TCP_HEADER);
+               /* FIN eats a sequence byte, write_seq advanced by 
tcp_queue_skb(). */
+               tcp_init_nondata_skb(skb, tp->write_seq,
+                                    TCPHDR_ACK | TCPHDR_FIN);
+@@ -2875,9 +2871,9 @@ static int tcp_send_syn_data(struct sock *sk, struct 
sk_buff *syn)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_fastopen_request *fo = tp->fastopen_req;
+-      int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
+-      struct sk_buff *syn_data = NULL, *data;
++      int syn_loss = 0, space, err = 0;
+       unsigned long last_syn_loss = 0;
++      struct sk_buff *syn_data;
+ 
+       tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
+       tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
+@@ -2908,42 +2904,38 @@ static int tcp_send_syn_data(struct sock *sk, struct 
sk_buff *syn)
+       /* limit to order-0 allocations */
+       space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+ 
+-      syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
+-                                 sk->sk_allocation);
+-      if (syn_data == NULL)
++      syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
++      if (!syn_data)
+               goto fallback;
++      syn_data->ip_summed = CHECKSUM_PARTIAL;
++      memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
++      if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
++                                       fo->data->msg_iov, 0, space))) {
++              kfree_skb(syn_data);
++              goto fallback;
++      }
+ 
+-      for (i = 0; i < iovlen && syn_data->len < space; ++i) {
+-              struct iovec *iov = &fo->data->msg_iov[i];
+-              unsigned char __user *from = iov->iov_base;
+-              int len = iov->iov_len;
+-
+-              if (syn_data->len + len > space)
+-                      len = space - syn_data->len;
+-              else if (i + 1 == iovlen)
+-                      /* No more data pending in inet_wait_for_connect() */
+-                      fo->data = NULL;
++      /* No more data pending in inet_wait_for_connect() */
++      if (space == fo->size)
++              fo->data = NULL;
++      fo->copied = space;
+ 
+-              if (skb_add_data(syn_data, from, len))
+-                      goto fallback;
+-      }
++      tcp_connect_queue_skb(sk, syn_data);
+ 
+-      /* Queue a data-only packet after the regular SYN for retransmission */
+-      data = pskb_copy(syn_data, sk->sk_allocation);
+-      if (data == NULL)
+-              goto fallback;
+-      TCP_SKB_CB(data)->seq++;
+-      TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
+-      TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
+-      tcp_connect_queue_skb(sk, data);
+-      fo->copied = data->len;
++      err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
+ 
+-      if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
++      /* Now full SYN+DATA was cloned and sent (or not),
++       * remove the SYN from the original skb (syn_data)
++       * we keep in write queue in case of a retransmit, as we
++       * also have the SYN packet (with no data) in the same queue.
++       */
++      TCP_SKB_CB(syn_data)->seq++;
++      TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
++      if (!err) {
+               tp->syn_data = (fo->copied > 0);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+               goto done;
+       }
+-      syn_data = NULL;
+ 
+ fallback:
+       /* Send a regular SYN with Fast Open cookie request option */
+@@ -2952,7 +2944,6 @@ fallback:
+       err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
+       if (err)
+               tp->syn_fastopen = 0;
+-      kfree_skb(syn_data);
+ done:
+       fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
+       return err;
+@@ -2972,13 +2963,10 @@ int tcp_connect(struct sock *sk)
+               return 0;
+       }
+ 
+-      buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
+-      if (unlikely(buff == NULL))
++      buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
++      if (unlikely(!buff))
+               return -ENOBUFS;
+ 
+-      /* Reserve space for headers. */
+-      skb_reserve(buff, MAX_TCP_HEADER);
+-
+       tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
+       tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
+       tcp_connect_queue_skb(sk, buff);
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 26b9a986a87f..1c6a71c41e62 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -650,16 +650,24 @@ static inline int ip_vs_gather_frags(struct sk_buff 
*skb, u_int32_t user)
+       return err;
+ }
+ 
+-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
++static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
++                               unsigned int hooknum)
+ {
++      if (!sysctl_snat_reroute(skb))
++              return 0;
++      /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
++      if (NF_INET_LOCAL_IN == hooknum)
++              return 0;
+ #ifdef CONFIG_IP_VS_IPV6
+       if (af == AF_INET6) {
+-              if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
++              struct dst_entry *dst = skb_dst(skb);
++
++              if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
++                  ip6_route_me_harder(skb) != 0)
+                       return 1;
+       } else
+ #endif
+-              if ((sysctl_snat_reroute(skb) ||
+-                   skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
++              if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+                   ip_route_me_harder(skb, RTN_LOCAL) != 0)
+                       return 1;
+ 
+@@ -782,7 +790,8 @@ static int handle_response_icmp(int af, struct sk_buff 
*skb,
+                               union nf_inet_addr *snet,
+                               __u8 protocol, struct ip_vs_conn *cp,
+                               struct ip_vs_protocol *pp,
+-                              unsigned int offset, unsigned int ihl)
++                              unsigned int offset, unsigned int ihl,
++                              unsigned int hooknum)
+ {
+       unsigned int verdict = NF_DROP;
+ 
+@@ -812,7 +821,7 @@ static int handle_response_icmp(int af, struct sk_buff 
*skb,
+ #endif
+               ip_vs_nat_icmp(skb, pp, cp, 1);
+ 
+-      if (ip_vs_route_me_harder(af, skb))
++      if (ip_vs_route_me_harder(af, skb, hooknum))
+               goto out;
+ 
+       /* do the statistics and put it back */
+@@ -907,7 +916,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int 
*related,
+ 
+       snet.ip = iph->saddr;
+       return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
+-                                  pp, ciph.len, ihl);
++                                  pp, ciph.len, ihl, hooknum);
+ }
+ 
+ #ifdef CONFIG_IP_VS_IPV6
+@@ -972,7 +981,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int 
*related,
+       snet.in6 = ciph.saddr.in6;
+       writable = ciph.len;
+       return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
+-                                  pp, writable, sizeof(struct ipv6hdr));
++                                  pp, writable, sizeof(struct ipv6hdr),
++                                  hooknum);
+ }
+ #endif
+ 
+@@ -1031,7 +1041,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
+  */
+ static unsigned int
+ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+-              struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
++              struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
++              unsigned int hooknum)
+ {
+       struct ip_vs_protocol *pp = pd->pp;
+ 
+@@ -1069,7 +1080,7 @@ handle_response(int af, struct sk_buff *skb, struct 
ip_vs_proto_data *pd,
+        * if it came from this machine itself.  So re-compute
+        * the routing information.
+        */
+-      if (ip_vs_route_me_harder(af, skb))
++      if (ip_vs_route_me_harder(af, skb, hooknum))
+               goto drop;
+ 
+       IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
+@@ -1172,7 +1183,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int 
af)
+       cp = pp->conn_out_get(af, skb, &iph, 0);
+ 
+       if (likely(cp))
+-              return handle_response(af, skb, pd, cp, &iph);
++              return handle_response(af, skb, pd, cp, &iph, hooknum);
+       if (sysctl_nat_icmp_send(net) &&
+           (pp->protocol == IPPROTO_TCP ||
+            pp->protocol == IPPROTO_UDP ||
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index f6046d9af8d3..e476cc7dc801 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -878,6 +878,8 @@ static void ip_vs_proc_conn(struct net *net, struct 
ip_vs_conn_param *param,
+                       IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+                       return;
+               }
++              if (!(flags & IP_VS_CONN_F_TEMPLATE))
++                      kfree(param->pe_data);
+       }
+ 
+       if (opt)
+@@ -1151,6 +1153,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, 
__u8 *p, __u8 *msg_end)
+                               (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+                               );
+ #endif
++      ip_vs_pe_put(param.pe);
+       return 0;
+       /* Error exit */
+ out:
+diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
+index a817705ce2d0..dba8d0864f18 100644
+--- a/net/rds/iw_rdma.c
++++ b/net/rds/iw_rdma.c
+@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct 
rds_iw_mr_pool *pool,
+                       int *unpinned);
+ static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct 
rds_iw_mr *ibmr);
+ 
+-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device 
**rds_iwdev, struct rdma_cm_id **cm_id)
++static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
++                           struct rds_iw_device **rds_iwdev,
++                           struct rdma_cm_id **cm_id)
+ {
+       struct rds_iw_device *iwdev;
+       struct rds_iw_cm_id *i_cm_id;
+@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct 
rds_iw_device **rds_iwd
+                               src_addr->sin_port,
+                               dst_addr->sin_addr.s_addr,
+                               dst_addr->sin_port,
+-                              rs->rs_bound_addr,
+-                              rs->rs_bound_port,
+-                              rs->rs_conn_addr,
+-                              rs->rs_conn_port);
++                              src->sin_addr.s_addr,
++                              src->sin_port,
++                              dst->sin_addr.s_addr,
++                              dst->sin_port);
+ #ifdef WORKING_TUPLE_DETECTION
+-                      if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
+-                          src_addr->sin_port == rs->rs_bound_port &&
+-                          dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
+-                          dst_addr->sin_port == rs->rs_conn_port) {
++                      if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
++                          src_addr->sin_port == src->sin_port &&
++                          dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
++                          dst_addr->sin_port == dst->sin_port) {
+ #else
+                       /* FIXME - needs to compare the local and remote
+                        * ipaddr/port tuple, but the ipaddr is the only
+@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct 
rds_iw_device **rds_iwd
+                        * zero'ed.  It doesn't appear to be properly populated
+                        * during connection setup...
+                        */
+-                      if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
++                      if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
+ #endif
+                               spin_unlock_irq(&iwdev->spinlock);
+                               *rds_iwdev = iwdev;
+@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, 
struct rdma_cm_id *cm_i
+ {
+       struct sockaddr_in *src_addr, *dst_addr;
+       struct rds_iw_device *rds_iwdev_old;
+-      struct rds_sock rs;
+       struct rdma_cm_id *pcm_id;
+       int rc;
+ 
+       src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
+       dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
+ 
+-      rs.rs_bound_addr = src_addr->sin_addr.s_addr;
+-      rs.rs_bound_port = src_addr->sin_port;
+-      rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
+-      rs.rs_conn_port = dst_addr->sin_port;
+-
+-      rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
++      rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
+       if (rc)
+               rds_iw_remove_cm_id(rds_iwdev, cm_id);
+ 
+@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long 
nents,
+       struct rds_iw_device *rds_iwdev;
+       struct rds_iw_mr *ibmr = NULL;
+       struct rdma_cm_id *cm_id;
++      struct sockaddr_in src = {
++              .sin_addr.s_addr = rs->rs_bound_addr,
++              .sin_port = rs->rs_bound_port,
++      };
++      struct sockaddr_in dst = {
++              .sin_addr.s_addr = rs->rs_conn_addr,
++              .sin_port = rs->rs_conn_port,
++      };
+       int ret;
+ 
+-      ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
++      ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
+       if (ret || !cm_id) {
+               ret = -ENODEV;
+               goto out;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 898492a8d61b..5cc2da5d295d 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+               if (!skb) {
+                       /* nothing remains on the queue */
+                       if (copied &&
+-                          (msg->msg_flags & MSG_PEEK || timeo == 0))
++                          (flags & MSG_PEEK || timeo == 0))
+                               goto out;
+ 
+                       /* wait for a message to turn up */
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 98a29b26c5f4..f2082a35b890 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1168,6 +1168,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 
+       if (info->count < 1)
+               return -EINVAL;
++      if (!*info->id.name)
++              return -EINVAL;
++      if (strnlen(info->id.name, sizeof(info->id.name)) >= 
sizeof(info->id.name))
++              return -EINVAL;
+       access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
+               (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
+                                SNDRV_CTL_ELEM_ACCESS_INACTIVE|
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index cb4d3700f330..db67e5b596d3 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -642,12 +642,45 @@ static int get_amp_val_to_activate(struct hda_codec 
*codec, hda_nid_t nid,
+       return val;
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
++{
++      unsigned int wcaps = get_wcaps(codec, nid);
++      hda_nid_t conn;
++
++      if (wcaps & AC_WCAP_STEREO)
++              return true;
++      if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++              return false;
++      if (snd_hda_get_num_conns(codec, nid) != 1)
++              return false;
++      if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
++              return false;
++      return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
++}
++
+ /* initialize the amp value (only at the first time) */
+ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
+ {
+       unsigned int caps = query_amp_caps(codec, nid, dir);
+       int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
+-      snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++
++      if (is_stereo_amps(codec, nid, dir))
++              snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++      else
++              snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
++}
++
++/* update the amp, doing in stereo or mono depending on NID */
++static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int 
idx,
++                    unsigned int mask, unsigned int val)
++{
++      if (is_stereo_amps(codec, nid, dir))
++              return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
++                                              mask, val);
++      else
++              return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
++                                              mask, val);
+ }
+ 
+ /* calculate amp value mask we can modify;
+@@ -687,7 +720,7 @@ static void activate_amp(struct hda_codec *codec, 
hda_nid_t nid, int dir,
+               return;
+ 
+       val &= mask;
+-      snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
++      update_amp(codec, nid, dir, idx, mask, val);
+ }
+ 
+ static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
+@@ -4235,13 +4268,11 @@ static void mute_all_mixer_nid(struct hda_codec 
*codec, hda_nid_t mix)
+       has_amp = nid_has_mute(codec, mix, HDA_INPUT);
+       for (i = 0; i < nums; i++) {
+               if (has_amp)
+-                      snd_hda_codec_amp_stereo(codec, mix,
+-                                               HDA_INPUT, i,
+-                                               0xff, HDA_AMP_MUTE);
++                      update_amp(codec, mix, HDA_INPUT, i,
++                                 0xff, HDA_AMP_MUTE);
+               else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
+-                      snd_hda_codec_amp_stereo(codec, conn[i],
+-                                               HDA_OUTPUT, 0,
+-                                               0xff, HDA_AMP_MUTE);
++                      update_amp(codec, conn[i], HDA_OUTPUT, 0,
++                                 0xff, HDA_AMP_MUTE);
+       }
+ }
+ 
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 0fee8fae590a..eb94e495c754 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -129,13 +129,38 @@ static void print_amp_caps(struct snd_info_buffer 
*buffer,
+                   (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
++                         int dir, unsigned int wcaps, int indices)
++{
++      hda_nid_t conn;
++
++      if (wcaps & AC_WCAP_STEREO)
++              return true;
++      /* check for a stereo-to-mono mix; it must be:
++       * only a single connection, only for input, and only a mixer widget
++       */
++      if (indices != 1 || dir != HDA_INPUT ||
++          get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++              return false;
++
++      if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
++              return false;
++      /* the connection source is a stereo? */
++      wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
++      return !!(wcaps & AC_WCAP_STEREO);
++}
++
+ static void print_amp_vals(struct snd_info_buffer *buffer,
+                          struct hda_codec *codec, hda_nid_t nid,
+-                         int dir, int stereo, int indices)
++                         int dir, unsigned int wcaps, int indices)
+ {
+       unsigned int val;
++      bool stereo;
+       int i;
+ 
++      stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
++
+       dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
+       for (i = 0; i < indices; i++) {
+               snd_iprintf(buffer, " [");
+@@ -682,12 +707,10 @@ static void print_codec_info(struct snd_info_entry 
*entry,
+                           (codec->single_adc_amp &&
+                            wid_type == AC_WID_AUD_IN))
+                               print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-                                             wid_caps & AC_WCAP_STEREO,
+-                                             1);
++                                             wid_caps, 1);
+                       else
+                               print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-                                             wid_caps & AC_WCAP_STEREO,
+-                                             conn_len);
++                                             wid_caps, conn_len);
+               }
+               if (wid_caps & AC_WCAP_OUT_AMP) {
+                       snd_iprintf(buffer, "  Amp-Out caps: ");
+@@ -696,11 +719,10 @@ static void print_codec_info(struct snd_info_entry 
*entry,
+                       if (wid_type == AC_WID_PIN &&
+                           codec->pin_amp_workaround)
+                               print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-                                             wid_caps & AC_WCAP_STEREO,
+-                                             conn_len);
++                                             wid_caps, conn_len);
+                       else
+                               print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-                                             wid_caps & AC_WCAP_STEREO, 1);
++                                             wid_caps, 1);
+               }
+ 
+               switch (wid_type) {
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index cccaf9c7a7bb..e2642ba88b2d 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -363,6 +363,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
+       SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
+       SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
++      SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
+       SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
+       SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
+       {} /* terminator */
+@@ -531,6 +532,7 @@ static int patch_cs420x(struct hda_codec *codec)
+               return -ENOMEM;
+ 
+       spec->gen.automute_hook = cs_automute;
++      codec->single_adc_amp = 1;
+ 
+       snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
+                          cs420x_fixups);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 1868d3a6e310..fab909908a42 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3223,6 +3223,7 @@ enum {
+       CXT_PINCFG_LENOVO_TP410,
+       CXT_PINCFG_LEMOTE_A1004,
+       CXT_PINCFG_LEMOTE_A1205,
++      CXT_PINCFG_COMPAQ_CQ60,
+       CXT_FIXUP_STEREO_DMIC,
+       CXT_FIXUP_INC_MIC_BOOST,
+       CXT_FIXUP_GPIO1,
+@@ -3296,6 +3297,15 @@ static const struct hda_fixup cxt_fixups[] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = cxt_pincfg_lemote,
+       },
++      [CXT_PINCFG_COMPAQ_CQ60] = {
++              .type = HDA_FIXUP_PINS,
++              .v.pins = (const struct hda_pintbl[]) {
++                      /* 0x17 was falsely set up as a mic, it should 0x1d */
++                      { 0x17, 0x400001f0 },
++                      { 0x1d, 0x97a70120 },
++                      { }
++              }
++      },
+       [CXT_FIXUP_STEREO_DMIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_stereo_dmic,
+@@ -3316,6 +3326,7 @@ static const struct hda_fixup cxt_fixups[] = {
+ };
+ 
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
++      SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
+       {}
+ };

Reply via email to