commit:     38dbdf600ccd3f9bbf4038202e0d001db9abedeb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov  8 13:48:09 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov  8 13:48:09 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=38dbdf60

Linux patch 4.13.12

 0000_README              |    4 +
 1011_linux-4.13.12.patch | 1438 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1442 insertions(+)

diff --git a/0000_README b/0000_README
index bca516e..878d286 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-4.13.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.11
 
+Patch:  1011_linux-4.13.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-4.13.12.patch b/1011_linux-4.13.12.patch
new file mode 100644
index 0000000..ed5088c
--- /dev/null
+++ b/1011_linux-4.13.12.patch
@@ -0,0 +1,1438 @@
+diff --git a/Makefile b/Makefile
+index 8280953c8a45..a7c847f495b0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/armada-375.dtsi 
b/arch/arm/boot/dts/armada-375.dtsi
+index 50c5e8417802..10b99530280a 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -178,9 +178,9 @@
+                               reg = <0x8000 0x1000>;
+                               cache-unified;
+                               cache-level = <2>;
+-                              arm,double-linefill-incr = <1>;
++                              arm,double-linefill-incr = <0>;
+                               arm,double-linefill-wrap = <0>;
+-                              arm,double-linefill = <1>;
++                              arm,double-linefill = <0>;
+                               prefetch-data = <1>;
+                       };
+ 
+diff --git a/arch/arm/boot/dts/armada-38x.dtsi 
b/arch/arm/boot/dts/armada-38x.dtsi
+index af31f5d6c0e5..c3448622e79e 100644
+--- a/arch/arm/boot/dts/armada-38x.dtsi
++++ b/arch/arm/boot/dts/armada-38x.dtsi
+@@ -143,9 +143,9 @@
+                               reg = <0x8000 0x1000>;
+                               cache-unified;
+                               cache-level = <2>;
+-                              arm,double-linefill-incr = <1>;
++                              arm,double-linefill-incr = <0>;
+                               arm,double-linefill-wrap = <0>;
+-                              arm,double-linefill = <1>;
++                              arm,double-linefill = <0>;
+                               prefetch-data = <1>;
+                       };
+ 
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi 
b/arch/arm/boot/dts/armada-39x.dtsi
+index 60fbfd5907c7..55d02641d930 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -111,9 +111,9 @@
+                               reg = <0x8000 0x1000>;
+                               cache-unified;
+                               cache-level = <2>;
+-                              arm,double-linefill-incr = <1>;
++                              arm,double-linefill-incr = <0>;
+                               arm,double-linefill-wrap = <0>;
+-                              arm,double-linefill = <1>;
++                              arm,double-linefill = <0>;
+                               prefetch-data = <1>;
+                       };
+ 
+diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
+index 721ab5ecfb9b..0f2c8a2a8131 100644
+--- a/arch/arm/include/asm/Kbuild
++++ b/arch/arm/include/asm/Kbuild
+@@ -20,7 +20,6 @@ generic-y += simd.h
+ generic-y += sizes.h
+ generic-y += timex.h
+ generic-y += trace_clock.h
+-generic-y += unaligned.h
+ 
+ generated-y += mach-types.h
+ generated-y += unistd-nr.h
+diff --git a/arch/arm/include/asm/unaligned.h 
b/arch/arm/include/asm/unaligned.h
+new file mode 100644
+index 000000000000..ab905ffcf193
+--- /dev/null
++++ b/arch/arm/include/asm/unaligned.h
+@@ -0,0 +1,27 @@
++#ifndef __ASM_ARM_UNALIGNED_H
++#define __ASM_ARM_UNALIGNED_H
++
++/*
++ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
++ * but we don't want to use linux/unaligned/access_ok.h since that can lead
++ * to traps on unaligned stm/ldm or strd/ldrd.
++ */
++#include <asm/byteorder.h>
++
++#if defined(__LITTLE_ENDIAN)
++# include <linux/unaligned/le_struct.h>
++# include <linux/unaligned/be_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned        __get_unaligned_le
++# define put_unaligned        __put_unaligned_le
++#elif defined(__BIG_ENDIAN)
++# include <linux/unaligned/be_struct.h>
++# include <linux/unaligned/le_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned        __get_unaligned_be
++# define put_unaligned        __put_unaligned_be
++#else
++# error need to define endianess
++#endif
++
++#endif /* __ASM_ARM_UNALIGNED_H */
+diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
+index 0064b86a2c87..30a13647c54c 100644
+--- a/arch/arm/kvm/emulate.c
++++ b/arch/arm/kvm/emulate.c
+@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+       u32 return_offset = (is_thumb) ? 2 : 4;
+ 
+       kvm_update_psr(vcpu, UND_MODE);
+-      *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
++      *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ 
+       /* Branch to exception vector */
+       *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+  */
+ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long 
addr)
+ {
+-      unsigned long cpsr = *vcpu_cpsr(vcpu);
+-      bool is_thumb = (cpsr & PSR_T_BIT);
+       u32 vect_offset;
+-      u32 return_offset = (is_thumb) ? 4 : 0;
++      u32 return_offset = (is_pabt) ? 4 : 8;
+       bool is_lpae;
+ 
+       kvm_update_psr(vcpu, ABT_MODE);
+diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
+index 8679405b0b2b..92eab1d51785 100644
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+ 
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ 
+ KVM=../../../../virt/kvm
+ 
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 8a62648848e5..c99ffd8dce27 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -116,7 +116,7 @@ static void __dump_instr(const char *lvl, struct pt_regs 
*regs)
+       for (i = -4; i < 1; i++) {
+               unsigned int val, bad;
+ 
+-              bad = __get_user(val, &((u32 *)addr)[i]);
++              bad = get_user(val, &((u32 *)addr)[i]);
+ 
+               if (!bad)
+                       p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
+index 14c4e3b14bcb..48b03547a969 100644
+--- a/arch/arm64/kvm/hyp/Makefile
++++ b/arch/arm64/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+ 
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ 
+ KVM=../../../../virt/kvm
+ 
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index da6a8cfa54a0..3556715a774e 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -33,12 +33,26 @@
+ #define LOWER_EL_AArch64_VECTOR               0x400
+ #define LOWER_EL_AArch32_VECTOR               0x600
+ 
++/*
++ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
++ */
++static const u8 return_offsets[8][2] = {
++      [0] = { 0, 0 },         /* Reset, unused */
++      [1] = { 4, 2 },         /* Undefined */
++      [2] = { 0, 0 },         /* SVC, unused */
++      [3] = { 4, 4 },         /* Prefetch abort */
++      [4] = { 8, 8 },         /* Data abort */
++      [5] = { 0, 0 },         /* HVC, unused */
++      [6] = { 4, 4 },         /* IRQ, unused */
++      [7] = { 4, 4 },         /* FIQ, unused */
++};
++
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+       unsigned long cpsr;
+       unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+       bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+-      u32 return_offset = (is_thumb) ? 4 : 0;
++      u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+       u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+ 
+       cpsr = mode | COMPAT_PSR_I_BIT;
+diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
+index 76923349b4fe..797da807916f 100644
+--- a/arch/mips/kernel/smp-cmp.c
++++ b/arch/mips/kernel/smp-cmp.c
+@@ -19,7 +19,7 @@
+ #undef DEBUG
+ 
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/smp.h>
+ #include <linux/cpumask.h>
+ #include <linux/interrupt.h>
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 6bace7695788..20d7bc5f0eb5 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+ 
++static DECLARE_COMPLETION(cpu_starting);
+ static DECLARE_COMPLETION(cpu_running);
+ 
+ /*
+@@ -376,6 +377,12 @@ asmlinkage void start_secondary(void)
+       cpumask_set_cpu(cpu, &cpu_coherent_mask);
+       notify_cpu_starting(cpu);
+ 
++      /* Notify boot CPU that we're starting & ready to sync counters */
++      complete(&cpu_starting);
++
++      synchronise_count_slave(cpu);
++
++      /* The CPU is running and counters synchronised, now mark it online */
+       set_cpu_online(cpu, true);
+ 
+       set_cpu_sibling_map(cpu);
+@@ -383,8 +390,11 @@ asmlinkage void start_secondary(void)
+ 
+       calculate_cpu_foreign_map();
+ 
++      /*
++       * Notify boot CPU that we're up & online and it can safely return
++       * from __cpu_up
++       */
+       complete(&cpu_running);
+-      synchronise_count_slave(cpu);
+ 
+       /*
+        * irq will be enabled in ->smp_finish(), enabling it too early
+@@ -443,17 +453,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ {
+       mp_ops->boot_secondary(cpu, tidle);
+ 
+-      /*
+-       * We must check for timeout here, as the CPU will not be marked
+-       * online until the counters are synchronised.
+-       */
+-      if (!wait_for_completion_timeout(&cpu_running,
++      /* Wait for CPU to start and be ready to sync counters */
++      if (!wait_for_completion_timeout(&cpu_starting,
+                                        msecs_to_jiffies(1000))) {
+               pr_crit("CPU%u: failed to start\n", cpu);
+               return -EIO;
+       }
+ 
+       synchronise_count_master(cpu);
++
++      /* Wait for CPU to finish startup & mark itself online before return */
++      wait_for_completion(&cpu_running);
+       return 0;
+ }
+ 
+diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
+index c28ff53c8da0..cdb5a191b9d5 100644
+--- a/arch/mips/mm/uasm-micromips.c
++++ b/arch/mips/mm/uasm-micromips.c
+@@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = 
{
+       [insn_jr]       = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, 
mm_pool32axf_op), RS},
+       [insn_lb]       = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+       [insn_ld]       = {0, 0},
+-      [insn_lh]       = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
++      [insn_lh]       = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+       [insn_ll]       = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS 
| RT | SIMM},
+       [insn_lld]      = {0, 0},
+       [insn_lui]      = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
+diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
+index 401776f92288..e45f05cc510d 100644
+--- a/arch/mips/net/ebpf_jit.c
++++ b/arch/mips/net/ebpf_jit.c
+@@ -1485,7 +1485,7 @@ static int build_one_insn(const struct bpf_insn *insn, 
struct jit_ctx *ctx,
+               }
+               src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+               if (src < 0)
+-                      return dst;
++                      return src;
+               if (BPF_MODE(insn->code) == BPF_XADD) {
+                       switch (BPF_SIZE(insn->code)) {
+                       case BPF_W:
+diff --git a/arch/powerpc/include/asm/code-patching.h 
b/arch/powerpc/include/asm/code-patching.h
+index 5482928eea1b..abef812de7f8 100644
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -83,16 +83,8 @@ static inline unsigned long ppc_function_entry(void *func)
+        * On PPC64 ABIv1 the function pointer actually points to the
+        * function's descriptor. The first entry in the descriptor is the
+        * address of the function text.
+-       *
+-       * However, we may also receive pointer to an assembly symbol. To
+-       * detect that, we first check if the function pointer we receive
+-       * already points to kernel/module text and we only dereference it
+-       * if it doesn't.
+        */
+-      if (kernel_text_address((unsigned long)func))
+-              return (unsigned long)func;
+-      else
+-              return ((func_descr_t *)func)->entry;
++      return ((func_descr_t *)func)->entry;
+ #else
+       return (unsigned long)func;
+ #endif
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 367494dc67d9..bebc3007a793 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -600,7 +600,12 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
+ 
+ unsigned long arch_deref_entry_point(void *entry)
+ {
+-      return ppc_global_function_entry(entry);
++#ifdef PPC64_ELF_ABI_v1
++      if (!kernel_text_address((unsigned long)entry))
++              return ppc_global_function_entry(entry);
++      else
++#endif
++              return (unsigned long)entry;
+ }
+ NOKPROBE_SYMBOL(arch_deref_entry_point);
+ 
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index cdf82492b770..836877e2da22 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -21,7 +21,7 @@ obj-y                        += common.o
+ obj-y                 += rdrand.o
+ obj-y                 += match.o
+ obj-y                 += bugs.o
+-obj-$(CONFIG_CPU_FREQ)        += aperfmperf.o
++obj-y                 += aperfmperf.o
+ 
+ obj-$(CONFIG_PROC_FS) += proc.o
+ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
+diff --git a/arch/x86/kernel/cpu/aperfmperf.c 
b/arch/x86/kernel/cpu/aperfmperf.c
+index 0ee83321a313..957813e0180d 100644
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
+       s64 time_delta = ktime_ms_delta(now, s->time);
+       unsigned long flags;
+ 
+-      /* Don't bother re-computing within the cache threshold time. */
+-      if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+-              return;
+-
+       local_irq_save(flags);
+       rdmsrl(MSR_IA32_APERF, aperf);
+       rdmsrl(MSR_IA32_MPERF, mperf);
+@@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void *dummy)
+ 
+ unsigned int arch_freq_get_on_cpu(int cpu)
+ {
++      s64 time_delta;
+       unsigned int khz;
+ 
+       if (!cpu_khz)
+@@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cpu)
+       if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+               return 0;
+ 
++      /* Don't bother re-computing within the cache threshold time. */
++      time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
++      khz = per_cpu(samples.khz, cpu);
++      if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
++              return khz;
++
+       smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+       khz = per_cpu(samples.khz, cpu);
+       if (khz)
+diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c 
b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+index 10cec43aac38..7f85b76f43bc 100644
+--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
++++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
+ static char mce_helper[128];
+ static char *mce_helper_argv[2] = { mce_helper, NULL };
+ 
+-#define mce_log_get_idx_check(p) \
+-({ \
+-      RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+-                       !lockdep_is_held(&mce_chrdev_read_mutex), \
+-                       "suspicious mce_log_get_idx_check() usage"); \
+-      smp_load_acquire(&(p)); \
+-})
+-
+ /*
+  * Lockless MCE logging infrastructure.
+  * This avoids deadlocks on printk locks without having to break locks. Also
+@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned 
long val,
+                               void *data)
+ {
+       struct mce *mce = (struct mce *)data;
+-      unsigned int next, entry;
+-
+-      wmb();
+-      for (;;) {
+-              entry = mce_log_get_idx_check(mcelog.next);
+-              for (;;) {
+-
+-                      /*
+-                       * When the buffer fills up discard new entries.
+-                       * Assume that the earlier errors are the more
+-                       * interesting ones:
+-                       */
+-                      if (entry >= MCE_LOG_LEN) {
+-                              set_bit(MCE_OVERFLOW,
+-                                      (unsigned long *)&mcelog.flags);
+-                              return NOTIFY_OK;
+-                      }
+-                      /* Old left over entry. Skip: */
+-                      if (mcelog.entry[entry].finished) {
+-                              entry++;
+-                              continue;
+-                      }
+-                      break;
+-              }
+-              smp_rmb();
+-              next = entry + 1;
+-              if (cmpxchg(&mcelog.next, entry, next) == entry)
+-                      break;
++      unsigned int entry;
++
++      mutex_lock(&mce_chrdev_read_mutex);
++
++      entry = mcelog.next;
++
++      /*
++       * When the buffer fills up discard new entries. Assume that the
++       * earlier errors are the more interesting ones:
++       */
++      if (entry >= MCE_LOG_LEN) {
++              set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
++              goto unlock;
+       }
++
++      mcelog.next = entry + 1;
++
+       memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
+-      wmb();
+       mcelog.entry[entry].finished = 1;
+-      wmb();
+ 
+       /* wake processes polling /dev/mcelog */
+       wake_up_interruptible(&mce_chrdev_wait);
+ 
++unlock:
++      mutex_unlock(&mce_chrdev_read_mutex);
++
+       return NOTIFY_OK;
+ }
+ 
+@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct 
file *file)
+       return 0;
+ }
+ 
+-static void collect_tscs(void *data)
+-{
+-      unsigned long *cpu_tsc = (unsigned long *)data;
+-
+-      cpu_tsc[smp_processor_id()] = rdtsc();
+-}
+-
+ static int mce_apei_read_done;
+ 
+ /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char 
__user *ubuf,
+                               size_t usize, loff_t *off)
+ {
+       char __user *buf = ubuf;
+-      unsigned long *cpu_tsc;
+-      unsigned prev, next;
++      unsigned next;
+       int i, err;
+ 
+-      cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
+-      if (!cpu_tsc)
+-              return -ENOMEM;
+-
+       mutex_lock(&mce_chrdev_read_mutex);
+ 
+       if (!mce_apei_read_done) {
+@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char 
__user *ubuf,
+                       goto out;
+       }
+ 
+-      next = mce_log_get_idx_check(mcelog.next);
+-
+       /* Only supports full reads right now */
+       err = -EINVAL;
+       if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+               goto out;
+ 
++      next = mcelog.next;
+       err = 0;
+-      prev = 0;
+-      do {
+-              for (i = prev; i < next; i++) {
+-                      unsigned long start = jiffies;
+-                      struct mce *m = &mcelog.entry[i];
+-
+-                      while (!m->finished) {
+-                              if (time_after_eq(jiffies, start + 2)) {
+-                                      memset(m, 0, sizeof(*m));
+-                                      goto timeout;
+-                              }
+-                              cpu_relax();
+-                      }
+-                      smp_rmb();
+-                      err |= copy_to_user(buf, m, sizeof(*m));
+-                      buf += sizeof(*m);
+-timeout:
+-                      ;
+-              }
+-
+-              memset(mcelog.entry + prev, 0,
+-                     (next - prev) * sizeof(struct mce));
+-              prev = next;
+-              next = cmpxchg(&mcelog.next, prev, 0);
+-      } while (next != prev);
+-
+-      synchronize_sched();
+ 
+-      /*
+-       * Collect entries that were still getting written before the
+-       * synchronize.
+-       */
+-      on_each_cpu(collect_tscs, cpu_tsc, 1);
+-
+-      for (i = next; i < MCE_LOG_LEN; i++) {
++      for (i = 0; i < next; i++) {
+               struct mce *m = &mcelog.entry[i];
+ 
+-              if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
+-                      err |= copy_to_user(buf, m, sizeof(*m));
+-                      smp_rmb();
+-                      buf += sizeof(*m);
+-                      memset(m, 0, sizeof(*m));
+-              }
++              err |= copy_to_user(buf, m, sizeof(*m));
++              buf += sizeof(*m);
+       }
+ 
++      memset(mcelog.entry, 0, next * sizeof(struct mce));
++      mcelog.next = 0;
++
+       if (err)
+               err = -EFAULT;
+ 
+ out:
+       mutex_unlock(&mce_chrdev_read_mutex);
+-      kfree(cpu_tsc);
+ 
+       return err ? err : buf - ubuf;
+ }
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 218f79825b3c..510e69596278 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -2,6 +2,7 @@
+ #include <linux/timex.h>
+ #include <linux/string.h>
+ #include <linux/seq_file.h>
++#include <linux/cpufreq.h>
+ 
+ /*
+  *    Get CPU information for use by the procfs.
+@@ -75,9 +76,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+       if (c->microcode)
+               seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+ 
+-      if (cpu_has(c, X86_FEATURE_TSC))
++      if (cpu_has(c, X86_FEATURE_TSC)) {
++              unsigned int freq = arch_freq_get_on_cpu(cpu);
++
++              if (!freq)
++                      freq = cpufreq_quick_get(cpu);
++              if (!freq)
++                      freq = cpu_khz;
+               seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+-                         cpu_khz / 1000, (cpu_khz % 1000));
++                         freq / 1000, (freq % 1000));
++      }
+ 
+       /* Cache size */
+       if (c->x86_cache_size >= 0)
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index d3d5523862c2..b49952b5a189 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
+       return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ }
+ 
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++static void virtblk_initialize_rq(struct request *req)
++{
++      struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
++
++      scsi_req_init(&vbr->sreq);
++}
++#endif
++
+ static const struct blk_mq_ops virtio_mq_ops = {
+       .queue_rq       = virtio_queue_rq,
+       .complete       = virtblk_request_done,
+       .init_request   = virtblk_init_request,
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++      .initialize_rq_fn = virtblk_initialize_rq,
++#endif
+       .map_queues     = virtblk_map_queues,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 31db356476f8..1086cf86354f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -93,6 +93,10 @@ static int uvd_v6_0_early_init(void *handle)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++      if (!(adev->flags & AMD_IS_APU) &&
++          (RREG32_SMC(ixCC_HARVEST_FUSES) & 
CC_HARVEST_FUSES__UVD_DISABLE_MASK))
++              return -ENOENT;
++
+       uvd_v6_0_set_ring_funcs(adev);
+       uvd_v6_0_set_irq_funcs(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 90332f55cfba..cf81065e3c5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct 
amdgpu_device *adev)
+ {
+       u32 tmp;
+ 
+-      /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
+       if ((adev->asic_type == CHIP_FIJI) ||
+-          (adev->asic_type == CHIP_STONEY) ||
+-          (adev->asic_type == CHIP_POLARIS10) ||
+-          (adev->asic_type == CHIP_POLARIS11) ||
+-          (adev->asic_type == CHIP_POLARIS12))
++          (adev->asic_type == CHIP_STONEY))
+               return AMDGPU_VCE_HARVEST_VCE1;
+ 
+-      /* Tonga and CZ are dual or single pipe */
+       if (adev->flags & AMD_IS_APU)
+               tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+                      VCE_HARVEST_FUSE_MACRO__MASK) >>
+@@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct 
amdgpu_device *adev)
+       case 3:
+               return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+       default:
++              if ((adev->asic_type == CHIP_POLARIS10) ||
++                  (adev->asic_type == CHIP_POLARIS11) ||
++                  (adev->asic_type == CHIP_POLARIS12))
++                      return AMDGPU_VCE_HARVEST_VCE1;
++
+               return 0;
+       }
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 61c313e21a91..169843de91cb 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3687,9 +3687,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
+ 
+       }
+ 
+-      /* Read the eDP Display control capabilities registers */
+-      if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & 
DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+-          drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
++      /*
++       * Read the eDP display control registers.
++       *
++       * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
++       * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
++       * set, but require eDP 1.4+ detection (e.g. for supported link rates
++       * method). The display control registers should read zero if they're
++       * not supported anyway.
++       */
++      if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+                            intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+                            sizeof(intel_dp->edp_dpcd))
+               DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) 
sizeof(intel_dp->edp_dpcd),
+diff --git a/drivers/gpu/drm/i915/intel_drv.h 
b/drivers/gpu/drm/i915/intel_drv.h
+index d93efb49a2e2..954e9454625e 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -495,7 +495,6 @@ struct intel_crtc_scaler_state {
+ 
+ struct intel_pipe_wm {
+       struct intel_wm_level wm[5];
+-      struct intel_wm_level raw_wm[5];
+       uint32_t linetime;
+       bool fbc_wm_enabled;
+       bool pipe_enabled;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 40b224b44d1b..1427cec843b9 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2696,9 +2696,9 @@ static void ilk_compute_wm_level(const struct 
drm_i915_private *dev_priv,
+                                const struct intel_crtc *intel_crtc,
+                                int level,
+                                struct intel_crtc_state *cstate,
+-                               struct intel_plane_state *pristate,
+-                               struct intel_plane_state *sprstate,
+-                               struct intel_plane_state *curstate,
++                               const struct intel_plane_state *pristate,
++                               const struct intel_plane_state *sprstate,
++                               const struct intel_plane_state *curstate,
+                                struct intel_wm_level *result)
+ {
+       uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+@@ -3016,28 +3016,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state 
*cstate)
+       struct intel_pipe_wm *pipe_wm;
+       struct drm_device *dev = state->dev;
+       const struct drm_i915_private *dev_priv = to_i915(dev);
+-      struct intel_plane *intel_plane;
+-      struct intel_plane_state *pristate = NULL;
+-      struct intel_plane_state *sprstate = NULL;
+-      struct intel_plane_state *curstate = NULL;
++      struct drm_plane *plane;
++      const struct drm_plane_state *plane_state;
++      const struct intel_plane_state *pristate = NULL;
++      const struct intel_plane_state *sprstate = NULL;
++      const struct intel_plane_state *curstate = NULL;
+       int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+       struct ilk_wm_maximums max;
+ 
+       pipe_wm = &cstate->wm.ilk.optimal;
+ 
+-      for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+-              struct intel_plane_state *ps;
+-
+-              ps = intel_atomic_get_existing_plane_state(state,
+-                                                         intel_plane);
+-              if (!ps)
+-                      continue;
++      drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, 
&cstate->base) {
++              const struct intel_plane_state *ps = 
to_intel_plane_state(plane_state);
+ 
+-              if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
++              if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+                       pristate = ps;
+-              else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
++              else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+                       sprstate = ps;
+-              else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
++              else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                       curstate = ps;
+       }
+ 
+@@ -3059,11 +3055,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state 
*cstate)
+       if (pipe_wm->sprites_scaled)
+               usable_level = 0;
+ 
+-      ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+-                           pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+-
+       memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+-      pipe_wm->wm[0] = pipe_wm->raw_wm[0];
++      ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
++                           pristate, sprstate, curstate, &pipe_wm->wm[0]);
+ 
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+@@ -3073,8 +3067,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state 
*cstate)
+ 
+       ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ 
+-      for (level = 1; level <= max_level; level++) {
+-              struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
++      for (level = 1; level <= usable_level; level++) {
++              struct intel_wm_level *wm = &pipe_wm->wm[level];
+ 
+               ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+                                    pristate, sprstate, curstate, wm);
+@@ -3084,13 +3078,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state 
*cstate)
+                * register maximums since such watermarks are
+                * always invalid.
+                */
+-              if (level > usable_level)
+-                      continue;
+-
+-              if (ilk_validate_wm_level(level, &max, wm))
+-                      pipe_wm->wm[level] = *wm;
+-              else
+-                      usable_level = level;
++              if (!ilk_validate_wm_level(level, &max, wm)) {
++                      memset(wm, 0, sizeof(*wm));
++                      break;
++              }
+       }
+ 
+       return 0;
+diff --git a/drivers/irqchip/irq-mvebu-gicp.c 
b/drivers/irqchip/irq-mvebu-gicp.c
+index b283fc90be1e..17a4a7b6cdbb 100644
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
+               return -ENOMEM;
+ 
+       gicp->dev = &pdev->dev;
++      spin_lock_init(&gicp->spi_lock);
+ 
+       gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!gicp->res)
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index e702d48bd023..81ba6e0d88d8 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+       struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+       int i;
+ 
+-      if (unlikely(direntry->d_name.len >
++      if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++                   direntry->d_name.len >
+                    le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+               return -ENAMETOOLONG;
+ 
+@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry 
*direntry,
+ 
+       rc = check_name(direntry, tcon);
+       if (rc)
+-              goto out_free_xid;
++              goto out;
+ 
+       server = tcon->ses->server;
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 28d2753be094..a9e3b26e1b72 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -855,9 +855,12 @@ static int hugetlbfs_error_remove_page(struct 
address_space *mapping,
+                               struct page *page)
+ {
+       struct inode *inode = mapping->host;
++      pgoff_t index = page->index;
+ 
+       remove_huge_page(page);
+-      hugetlb_fix_reserve_counts(inode);
++      if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
++              hugetlb_fix_reserve_counts(inode);
++
+       return 0;
+ }
+ 
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index fb15a96df0b6..386aecce881d 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct 
buffer_head *di_bh,
+ 
+ static int ocfs2_trim_extent(struct super_block *sb,
+                            struct ocfs2_group_desc *gd,
+-                           u32 start, u32 count)
++                           u64 group, u32 start, u32 count)
+ {
+       u64 discard, bcount;
++      struct ocfs2_super *osb = OCFS2_SB(sb);
+ 
+       bcount = ocfs2_clusters_to_blocks(sb, count);
+-      discard = le64_to_cpu(gd->bg_blkno) +
+-                      ocfs2_clusters_to_blocks(sb, start);
++      discard = ocfs2_clusters_to_blocks(sb, start);
++
++      /*
++       * For the first cluster group, the gd->bg_blkno is not at the start
++       * of the group, but at an offset from the start. If we add it while
++       * calculating discard for first group, we will wrongly start fstrim a
++       * few blocks after the desried start block and the range can cross
++       * over into the next cluster group. So, add it only if this is not
++       * the first cluster group.
++       */
++      if (group != osb->first_cluster_group_blkno)
++              discard += le64_to_cpu(gd->bg_blkno);
+ 
+       trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+ 
+@@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
+ }
+ 
+ static int ocfs2_trim_group(struct super_block *sb,
+-                          struct ocfs2_group_desc *gd,
++                          struct ocfs2_group_desc *gd, u64 group,
+                           u32 start, u32 max, u32 minbits)
+ {
+       int ret = 0, count = 0, next;
+@@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
+               next = ocfs2_find_next_bit(bitmap, max, start);
+ 
+               if ((next - start) >= minbits) {
+-                      ret = ocfs2_trim_extent(sb, gd,
++                      ret = ocfs2_trim_extent(sb, gd, group,
+                                               start, next - start);
+                       if (ret < 0) {
+                               mlog_errno(ret);
+@@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct 
fstrim_range *range)
+               }
+ 
+               gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+-              cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
++              cnt = ocfs2_trim_group(sb, gd, group,
++                                     first_bit, last_bit, minlen);
+               brelse(gd_bh);
+               gd_bh = NULL;
+               if (cnt < 0) {
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index d83d28e53e62..a615eda102ae 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -246,6 +246,10 @@ struct swap_info_struct {
+                                        * both locks need hold, hold swap_lock
+                                        * first.
+                                        */
++      spinlock_t cont_lock;           /*
++                                       * protect swap count continuation page
++                                       * list.
++                                       */
+       struct work_struct discard_work; /* discard worker */
+       struct swap_cluster_list discard_clusters; /* discard clusters list */
+ };
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7242a6e1ec76..95bbe99e4e6c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -901,9 +901,11 @@ list_update_cgroup_event(struct perf_event *event,
+       cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+       /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
+       if (add) {
++              struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
++
+               list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+-              if (perf_cgroup_from_task(current, ctx) == event->cgrp)
+-                      cpuctx->cgrp = event->cgrp;
++              if (cgroup_is_descendant(cgrp->css.cgroup, 
event->cgrp->css.cgroup))
++                      cpuctx->cgrp = cgrp;
+       } else {
+               list_del(cpuctx_entry);
+               cpuctx->cgrp = NULL;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index bf57ab12ffe8..a6639b346373 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -901,11 +901,27 @@ void exit_pi_state_list(struct task_struct *curr)
+        */
+       raw_spin_lock_irq(&curr->pi_lock);
+       while (!list_empty(head)) {
+-
+               next = head->next;
+               pi_state = list_entry(next, struct futex_pi_state, list);
+               key = pi_state->key;
+               hb = hash_futex(&key);
++
++              /*
++               * We can race against put_pi_state() removing itself from the
++               * list (a waiter going away). put_pi_state() will first
++               * decrement the reference count and then modify the list, so
++               * its possible to see the list entry but fail this reference
++               * acquire.
++               *
++               * In that case; drop the locks to let put_pi_state() make
++               * progress and retry the loop.
++               */
++              if (!atomic_inc_not_zero(&pi_state->refcount)) {
++                      raw_spin_unlock_irq(&curr->pi_lock);
++                      cpu_relax();
++                      raw_spin_lock_irq(&curr->pi_lock);
++                      continue;
++              }
+               raw_spin_unlock_irq(&curr->pi_lock);
+ 
+               spin_lock(&hb->lock);
+@@ -916,8 +932,10 @@ void exit_pi_state_list(struct task_struct *curr)
+                * task still owns the PI-state:
+                */
+               if (head->next != next) {
++                      /* retain curr->pi_lock for the loop invariant */
+                       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+                       spin_unlock(&hb->lock);
++                      put_pi_state(pi_state);
+                       continue;
+               }
+ 
+@@ -925,9 +943,8 @@ void exit_pi_state_list(struct task_struct *curr)
+               WARN_ON(list_empty(&pi_state->list));
+               list_del_init(&pi_state->list);
+               pi_state->owner = NULL;
+-              raw_spin_unlock(&curr->pi_lock);
+ 
+-              get_pi_state(pi_state);
++              raw_spin_unlock(&curr->pi_lock);
+               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+               spin_unlock(&hb->lock);
+ 
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 0bd8a611eb83..fef5d2e114be 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
+                               if (unlikely(len > datalen - dp))
+                                       goto data_overrun_error;
+                       }
++              } else {
++                      if (unlikely(len > datalen - dp))
++                              goto data_overrun_error;
+               }
+ 
+               if (flags & FLAG_CONS) {
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 31e207cb399b..011725849f52 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3977,6 +3977,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+                           unsigned long src_addr,
+                           struct page **pagep)
+ {
++      struct address_space *mapping;
++      pgoff_t idx;
++      unsigned long size;
+       int vm_shared = dst_vma->vm_flags & VM_SHARED;
+       struct hstate *h = hstate_vma(dst_vma);
+       pte_t _dst_pte;
+@@ -4014,13 +4017,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+       __SetPageUptodate(page);
+       set_page_huge_active(page);
+ 
++      mapping = dst_vma->vm_file->f_mapping;
++      idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++
+       /*
+        * If shared, add to page cache
+        */
+       if (vm_shared) {
+-              struct address_space *mapping = dst_vma->vm_file->f_mapping;
+-              pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++              size = i_size_read(mapping->host) >> huge_page_shift(h);
++              ret = -EFAULT;
++              if (idx >= size)
++                      goto out_release_nounlock;
+ 
++              /*
++               * Serialization between remove_inode_hugepages() and
++               * huge_add_to_page_cache() below happens through the
++               * hugetlb_fault_mutex_table that here must be hold by
++               * the caller.
++               */
+               ret = huge_add_to_page_cache(page, mapping, idx);
+               if (ret)
+                       goto out_release_nounlock;
+@@ -4029,6 +4043,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+       ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
+       spin_lock(ptl);
+ 
++      /*
++       * Recheck the i_size after holding PT lock to make sure not
++       * to leave any page mapped (as page_mapped()) beyond the end
++       * of the i_size (remove_inode_hugepages() is strict about
++       * enforcing that). If we bail out here, we'll also leave a
++       * page in the radix tree in the vm_shared case beyond the end
++       * of the i_size, but remove_inode_hugepages() will take care
++       * of it as soon as we drop the hugetlb_fault_mutex_table.
++       */
++      size = i_size_read(mapping->host) >> huge_page_shift(h);
++      ret = -EFAULT;
++      if (idx >= size)
++              goto out_release_unlock;
++
+       ret = -EEXIST;
+       if (!huge_pte_none(huge_ptep_get(dst_pte)))
+               goto out_release_unlock;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index a8952b6563c6..3191465b0ccf 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2635,6 +2635,7 @@ static struct swap_info_struct *alloc_swap_info(void)
+       p->flags = SWP_USED;
+       spin_unlock(&swap_lock);
+       spin_lock_init(&p->lock);
++      spin_lock_init(&p->cont_lock);
+ 
+       return p;
+ }
+@@ -3307,6 +3308,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t 
gfp_mask)
+       head = vmalloc_to_page(si->swap_map + offset);
+       offset &= ~PAGE_MASK;
+ 
++      spin_lock(&si->cont_lock);
+       /*
+        * Page allocation does not initialize the page's lru field,
+        * but it does always reset its private field.
+@@ -3326,7 +3328,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t 
gfp_mask)
+                * a continuation page, free our allocation and use this one.
+                */
+               if (!(count & COUNT_CONTINUED))
+-                      goto out;
++                      goto out_unlock_cont;
+ 
+               map = kmap_atomic(list_page) + offset;
+               count = *map;
+@@ -3337,11 +3339,13 @@ int add_swap_count_continuation(swp_entry_t entry, 
gfp_t gfp_mask)
+                * free our allocation and use this one.
+                */
+               if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+-                      goto out;
++                      goto out_unlock_cont;
+       }
+ 
+       list_add_tail(&page->lru, &head->lru);
+       page = NULL;                    /* now it's attached, don't free it */
++out_unlock_cont:
++      spin_unlock(&si->cont_lock);
+ out:
+       unlock_cluster(ci);
+       spin_unlock(&si->lock);
+@@ -3366,6 +3370,7 @@ static bool swap_count_continued(struct swap_info_struct 
*si,
+       struct page *head;
+       struct page *page;
+       unsigned char *map;
++      bool ret;
+ 
+       head = vmalloc_to_page(si->swap_map + offset);
+       if (page_private(head) != SWP_CONTINUED) {
+@@ -3373,6 +3378,7 @@ static bool swap_count_continued(struct swap_info_struct 
*si,
+               return false;           /* need to add count continuation */
+       }
+ 
++      spin_lock(&si->cont_lock);
+       offset &= ~PAGE_MASK;
+       page = list_entry(head->lru.next, struct page, lru);
+       map = kmap_atomic(page) + offset;
+@@ -3393,8 +3399,10 @@ static bool swap_count_continued(struct 
swap_info_struct *si,
+               if (*map == SWAP_CONT_MAX) {
+                       kunmap_atomic(map);
+                       page = list_entry(page->lru.next, struct page, lru);
+-                      if (page == head)
+-                              return false;   /* add count continuation */
++                      if (page == head) {
++                              ret = false;    /* add count continuation */
++                              goto out;
++                      }
+                       map = kmap_atomic(page) + offset;
+ init_map:             *map = 0;               /* we didn't zero the page */
+               }
+@@ -3407,7 +3415,7 @@ init_map:                *map = 0;               /* we 
didn't zero the page */
+                       kunmap_atomic(map);
+                       page = list_entry(page->lru.prev, struct page, lru);
+               }
+-              return true;                    /* incremented */
++              ret = true;                     /* incremented */
+ 
+       } else {                                /* decrementing */
+               /*
+@@ -3433,8 +3441,11 @@ init_map:               *map = 0;               /* we 
didn't zero the page */
+                       kunmap_atomic(map);
+                       page = list_entry(page->lru.prev, struct page, lru);
+               }
+-              return count == COUNT_CONTINUED;
++              ret = count == COUNT_CONTINUED;
+       }
++out:
++      spin_unlock(&si->cont_lock);
++      return ret;
+ }
+ 
+ /*
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 06173b091a74..c04032302a25 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -459,34 +459,33 @@ static long keyring_read(const struct key *keyring,
+                        char __user *buffer, size_t buflen)
+ {
+       struct keyring_read_iterator_context ctx;
+-      unsigned long nr_keys;
+-      int ret;
++      long ret;
+ 
+       kenter("{%d},,%zu", key_serial(keyring), buflen);
+ 
+       if (buflen & (sizeof(key_serial_t) - 1))
+               return -EINVAL;
+ 
+-      nr_keys = keyring->keys.nr_leaves_on_tree;
+-      if (nr_keys == 0)
+-              return 0;
+-
+-      /* Calculate how much data we could return */
+-      if (!buffer || !buflen)
+-              return nr_keys * sizeof(key_serial_t);
+-
+-      /* Copy the IDs of the subscribed keys into the buffer */
+-      ctx.buffer = (key_serial_t __user *)buffer;
+-      ctx.buflen = buflen;
+-      ctx.count = 0;
+-      ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+-      if (ret < 0) {
+-              kleave(" = %d [iterate]", ret);
+-              return ret;
++      /* Copy as many key IDs as fit into the buffer */
++      if (buffer && buflen) {
++              ctx.buffer = (key_serial_t __user *)buffer;
++              ctx.buflen = buflen;
++              ctx.count = 0;
++              ret = assoc_array_iterate(&keyring->keys,
++                                        keyring_read_iterator, &ctx);
++              if (ret < 0) {
++                      kleave(" = %ld [iterate]", ret);
++                      return ret;
++              }
+       }
+ 
+-      kleave(" = %zu [ok]", ctx.count);
+-      return ctx.count;
++      /* Return the size of the buffer needed */
++      ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
++      if (ret <= buflen)
++              kleave("= %ld [ok]", ret);
++      else
++              kleave("= %ld [buffer too small]", ret);
++      return ret;
+ }
+ 
+ /*
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index bd85315cbfeb..98aa89ff7bfd 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1147,20 +1147,21 @@ static long trusted_read(const struct key *key, char 
__user *buffer,
+       p = dereference_key_locked(key);
+       if (!p)
+               return -EINVAL;
+-      if (!buffer || buflen <= 0)
+-              return 2 * p->blob_len;
+-      ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+-      if (!ascii_buf)
+-              return -ENOMEM;
+ 
+-      bufp = ascii_buf;
+-      for (i = 0; i < p->blob_len; i++)
+-              bufp = hex_byte_pack(bufp, p->blob[i]);
+-      if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
++      if (buffer && buflen >= 2 * p->blob_len) {
++              ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
++              if (!ascii_buf)
++                      return -ENOMEM;
++
++              bufp = ascii_buf;
++              for (i = 0; i < p->blob_len; i++)
++                      bufp = hex_byte_pack(bufp, p->blob[i]);
++              if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
++                      kzfree(ascii_buf);
++                      return -EFAULT;
++              }
+               kzfree(ascii_buf);
+-              return -EFAULT;
+       }
+-      kzfree(ascii_buf);
+       return 2 * p->blob_len;
+ }
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 6c9cba2166d9..d10c780dfd54 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client 
*client,
+       if (atomic)
+               read_lock(&grp->list_lock);
+       else
+-              down_read(&grp->list_mutex);
++              down_read_nested(&grp->list_mutex, hop);
+       list_for_each_entry(subs, &grp->list_head, src_list) {
+               /* both ports ready? */
+               if (atomic_read(&subs->ref_count) != 2)
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index 6a437eb66115..59127b6ef39e 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -133,7 +133,8 @@ enum {
+ #endif /* CONFIG_X86_X32 */
+ };
+ 
+-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, 
unsigned long arg)
++static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++                                        unsigned long arg)
+ {
+       void __user *argp = compat_ptr(arg);
+ 
+@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, 
unsigned int cmd, uns
+       case SNDRV_TIMER_IOCTL_PAUSE:
+       case SNDRV_TIMER_IOCTL_PAUSE_OLD:
+       case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
+-              return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
++              return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+       case SNDRV_TIMER_IOCTL_GPARAMS32:
+               return snd_timer_user_gparams_compat(file, argp);
+       case SNDRV_TIMER_IOCTL_INFO32:
+@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file 
*file, unsigned int cmd, uns
+       }
+       return -ENOIOCTLCMD;
+ }
++
++static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++                                      unsigned long arg)
++{
++      struct snd_timer_user *tu = file->private_data;
++      long ret;
++
++      mutex_lock(&tu->ioctl_lock);
++      ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
++      mutex_unlock(&tu->ioctl_lock);
++      return ret;
++}
+diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
+index 2c1bd2763864..6758f789b712 100644
+--- a/sound/soc/codecs/adau17x1.c
++++ b/sound/soc/codecs/adau17x1.c
+@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
+       return 0;
+ }
+ 
++static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
++      struct snd_kcontrol *kcontrol, int event)
++{
++      struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++      struct adau *adau = snd_soc_codec_get_drvdata(codec);
++
++      /*
++       * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
++       * avoid losing SNR (workaround from ADI). This must be done after
++       * the ADC(s) have been enabled. According to the data sheet, it is
++       * normally illegal to set this bit when the sampling rate is 96 kHz,
++       * but according to ADI it is acceptable for this workaround.
++       */
++      regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++              ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
++      regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++              ADAU17X1_CONVERTER0_ADOSR, 0);
++
++      return 0;
++}
++
+ static const char * const adau17x1_mono_stereo_text[] = {
+       "Stereo",
+       "Mono Left Channel (L+R)",
+@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget 
adau17x1_dapm_widgets[] = {
+       SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
+               &adau17x1_dac_mode_mux),
+ 
+-      SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
++      SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
++                         adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
+       SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
+       SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
+       SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
+diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
+index bf04b7efee40..db350035fad7 100644
+--- a/sound/soc/codecs/adau17x1.h
++++ b/sound/soc/codecs/adau17x1.h
+@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
+ 
+ #define ADAU17X1_CONVERTER0_CONVSR_MASK               0x7
+ 
++#define ADAU17X1_CONVERTER0_ADOSR             BIT(3)
++
+ 
+ #endif
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index aa6b68db80b4..b606f1643fe5 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -1803,37 +1803,33 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 
id, void *entry,
+ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
+                         int start_id, entry_fn_t fn, void *opaque)
+ {
+-      void *entry = kzalloc(esz, GFP_KERNEL);
+       struct kvm *kvm = its->dev->kvm;
+       unsigned long len = size;
+       int id = start_id;
+       gpa_t gpa = base;
++      char entry[esz];
+       int ret;
+ 
++      memset(entry, 0, esz);
++
+       while (len > 0) {
+               int next_offset;
+               size_t byte_offset;
+ 
+               ret = kvm_read_guest(kvm, gpa, entry, esz);
+               if (ret)
+-                      goto out;
++                      return ret;
+ 
+               next_offset = fn(its, id, entry, opaque);
+-              if (next_offset <= 0) {
+-                      ret = next_offset;
+-                      goto out;
+-              }
++              if (next_offset <= 0)
++                      return next_offset;
+ 
+               byte_offset = next_offset * esz;
+               id += next_offset;
+               gpa += byte_offset;
+               len -= byte_offset;
+       }
+-      ret =  1;
+-
+-out:
+-      kfree(entry);
+-      return ret;
++      return 1;
+ }
+ 
+ /**

Reply via email to