Author: mpagano
Date: 2014-06-13 17:54:39 +0000 (Fri, 13 Jun 2014)
New Revision: 2836

Added:
   genpatches-2.6/trunk/3.10/1042_linux-3.10.43.patch
Modified:
   genpatches-2.6/trunk/3.10/0000_README
Log:
Linux patch 3.10.43

Modified: genpatches-2.6/trunk/3.10/0000_README
===================================================================
--- genpatches-2.6/trunk/3.10/0000_README       2014-06-12 13:15:55 UTC (rev 
2835)
+++ genpatches-2.6/trunk/3.10/0000_README       2014-06-13 17:54:39 UTC (rev 
2836)
@@ -210,6 +210,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.10.42
 
+Patch:  1042_linux-3.10.43.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.10.43
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

Added: genpatches-2.6/trunk/3.10/1042_linux-3.10.43.patch
===================================================================
--- genpatches-2.6/trunk/3.10/1042_linux-3.10.43.patch                          
(rev 0)
+++ genpatches-2.6/trunk/3.10/1042_linux-3.10.43.patch  2014-06-13 17:54:39 UTC 
(rev 2836)
@@ -0,0 +1,1387 @@
+diff --git a/Documentation/DocBook/media/Makefile 
b/Documentation/DocBook/media/Makefile
+index f9fd615427fb..1d27f0a1abd1 100644
+--- a/Documentation/DocBook/media/Makefile
++++ b/Documentation/DocBook/media/Makefile
+@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
+ #
+ 
+ install_media_images = \
+-      $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg 
$(MEDIA_OBJ_DIR)/media_api
++      $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg 
$(MEDIA_OBJ_DIR)/media_api
+ 
+ $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
+       $(Q)base64 -d $< >$@
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 9b34b1685078..8d90c42e5db6 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -438,6 +438,32 @@ This file shows up if CONFIG_DEBUG_STACKOVERFLOW is 
enabled.
+ 
+ ==============================================================
+ 
++perf_cpu_time_max_percent:
++
++Hints to the kernel how much CPU time it should be allowed to
++use to handle perf sampling events.  If the perf subsystem
++is informed that its samples are exceeding this limit, it
++will drop its sampling frequency to attempt to reduce its CPU
++usage.
++
++Some perf sampling happens in NMIs.  If these samples
++unexpectedly take too long to execute, the NMIs can become
++stacked up next to each other so much that nothing else is
++allowed to execute.
++
++0: disable the mechanism.  Do not monitor or correct perf's
++   sampling rate no matter how CPU time it takes.
++
++1-100: attempt to throttle perf's sample rate to this
++   percentage of CPU.  Note: the kernel calculates an
++   "expected" length of each sample event.  100 here means
++   100% of that expected length.  Even if this is set to
++   100, you may still see sample throttling if this
++   length is exceeded.  Set to 0 if you truly do not care
++   how much CPU is consumed.
++
++==============================================================
++
+ 
+ pid_max:
+ 
+diff --git a/Makefile b/Makefile
+index 4634015fed68..9cf513828341 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 7e1f76027f66..20e1c994669e 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -164,8 +164,9 @@ extern int __put_user_8(void *, unsigned long long);
+ #define __put_user_check(x,p)                                                 
\
+       ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
++              const typeof(*(p)) __user *__tmp_p = (p);               \
+               register const typeof(*(p)) __r2 asm("r2") = (x);       \
+-              register const typeof(*(p)) __user *__p asm("r0") = (p);\
++              register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+               register unsigned long __l asm("r1") = __limit;         \
+               register int __e asm("r0");                             \
+               switch (sizeof(*(__p))) {                               \
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index e19edc6f2d15..ace0ce8f6641 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -303,11 +303,18 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void 
*dev)
+       struct arm_pmu *armpmu = (struct arm_pmu *) dev;
+       struct platform_device *plat_device = armpmu->plat_device;
+       struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
++      int ret;
++      u64 start_clock, finish_clock;
+ 
++      start_clock = sched_clock();
+       if (plat && plat->handle_irq)
+-              return plat->handle_irq(irq, dev, armpmu->handle_irq);
++              ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
+       else
+-              return armpmu->handle_irq(irq, dev);
++              ret = armpmu->handle_irq(irq, dev);
++      finish_clock = sched_clock();
++
++      perf_sample_event_took(finish_clock - start_clock);
++      return ret;
+ }
+ 
+ static void
+diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c 
b/arch/arm/mach-imx/devices/platform-ipu-core.c
+index fc4dd7cedc11..6bd7c3f37ac0 100644
+--- a/arch/arm/mach-imx/devices/platform-ipu-core.c
++++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
+@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
+ 
+       pdev = platform_device_alloc("mx3-camera", 0);
+       if (!pdev)
+-              goto err;
++              return ERR_PTR(-ENOMEM);
+ 
+       pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
+       if (!pdev->dev.dma_mask)
+diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c 
b/arch/arm/mach-omap2/cclock3xxx_data.c
+index 45cd26430d1f..da6d407c21cd 100644
+--- a/arch/arm/mach-omap2/cclock3xxx_data.c
++++ b/arch/arm/mach-omap2/cclock3xxx_data.c
+@@ -418,7 +418,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
+       .clkdm_name     = "dpll4_clkdm",
+ };
+ 
+-DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, 
dpll4_m5x2_ck_ops);
++DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
++                      dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
+ 
+ static struct clk dpll4_m5x2_ck_3630 = {
+       .name           = "dpll4_m5x2_ck",
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c 
b/arch/arm/mach-omap2/cpuidle44xx.c
+index c443f2e97e10..f98410a257e3 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -14,6 +14,7 @@
+ #include <linux/cpuidle.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/export.h>
++#include <linux/clockchips.h>
+ 
+ #include <asm/cpuidle.h>
+ #include <asm/proc-fns.h>
+@@ -80,6 +81,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device 
*dev,
+                       int index)
+ {
+       struct idle_statedata *cx = state_ptr + index;
++      int cpu_id = smp_processor_id();
+ 
+       /*
+        * CPU0 has to wait and stay ON until CPU1 is OFF state.
+@@ -104,6 +106,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device 
*dev,
+               }
+       }
+ 
++      clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
++
+       /*
+        * Call idle CPU PM enter notifier chain so that
+        * VFP and per CPU interrupt context is saved.
+@@ -147,6 +151,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device 
*dev,
+               (cx->mpu_logic_state == PWRDM_POWER_OFF))
+               cpu_cluster_pm_exit();
+ 
++      clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
++
+ fail:
+       cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+       cpu_done[dev->cpu] = false;
+@@ -154,6 +160,16 @@ fail:
+       return index;
+ }
+ 
++/*
++ * For each cpu, setup the broadcast timer because local timers
++ * stops for the states above C1.
++ */
++static void omap_setup_broadcast_timer(void *arg)
++{
++      int cpu = smp_processor_id();
++      clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
++}
++
+ static struct cpuidle_driver omap4_idle_driver = {
+       .name                           = "omap4_idle",
+       .owner                          = THIS_MODULE,
+@@ -171,8 +187,7 @@ static struct cpuidle_driver omap4_idle_driver = {
+                       /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+                       .exit_latency = 328 + 440,
+                       .target_residency = 960,
+-                      .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED 
|
+-                               CPUIDLE_FLAG_TIMER_STOP,
++                      .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+                       .enter = omap_enter_idle_coupled,
+                       .name = "C2",
+                       .desc = "CPUx OFF, MPUSS CSWR",
+@@ -181,8 +196,7 @@ static struct cpuidle_driver omap4_idle_driver = {
+                       /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+                       .exit_latency = 460 + 518,
+                       .target_residency = 1100,
+-                      .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED 
|
+-                               CPUIDLE_FLAG_TIMER_STOP,
++                      .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+                       .enter = omap_enter_idle_coupled,
+                       .name = "C3",
+                       .desc = "CPUx OFF, MPUSS OSWR",
+@@ -213,5 +227,8 @@ int __init omap4_idle_init(void)
+       if (!cpu_clkdm[0] || !cpu_clkdm[1])
+               return -ENODEV;
+ 
++      /* Configure the broadcast timer on each cpu */
++      on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
++
+       return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
+ }
+diff --git a/arch/x86/kernel/cpu/perf_event.c 
b/arch/x86/kernel/cpu/perf_event.c
+index a69b67d968d4..123d9e2271dc 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1252,10 +1252,20 @@ void perf_events_lapic_init(void)
+ static int __kprobes
+ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
+ {
++      int ret;
++      u64 start_clock;
++      u64 finish_clock;
++
+       if (!atomic_read(&active_events))
+               return NMI_DONE;
+ 
+-      return x86_pmu.handle_irq(regs);
++      start_clock = local_clock();
++      ret = x86_pmu.handle_irq(regs);
++      finish_clock = local_clock();
++
++      perf_sample_event_took(finish_clock - start_clock);
++
++      return ret;
+ }
+ 
+ struct event_constraint emptyconstraint;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 117ce3813681..6416d0d07394 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -635,9 +635,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+                * relocations were valid.
+                */
+               for (j = 0; j < exec[i].relocation_count; j++) {
+-                      if (copy_to_user(&user_relocs[j].presumed_offset,
+-                                       &invalid_offset,
+-                                       sizeof(invalid_offset))) {
++                      if (__copy_to_user(&user_relocs[j].presumed_offset,
++                                         &invalid_offset,
++                                         sizeof(invalid_offset))) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto err;
+@@ -1151,18 +1151,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 
+       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+       if (!ret) {
++              struct drm_i915_gem_exec_object __user *user_exec_list =
++                      to_user_ptr(args->buffers_ptr);
++
+               /* Copy the new buffer offsets back to the user's exec list. */
+-              for (i = 0; i < args->buffer_count; i++)
+-                      exec_list[i].offset = exec2_list[i].offset;
+-              /* ... and back out to userspace */
+-              ret = copy_to_user(to_user_ptr(args->buffers_ptr),
+-                                 exec_list,
+-                                 sizeof(*exec_list) * args->buffer_count);
+-              if (ret) {
+-                      ret = -EFAULT;
+-                      DRM_DEBUG("failed to copy %d exec entries "
+-                                "back to user (%d)\n",
+-                                args->buffer_count, ret);
++              for (i = 0; i < args->buffer_count; i++) {
++                      ret = __copy_to_user(&user_exec_list[i].offset,
++                                           &exec2_list[i].offset,
++                                           sizeof(user_exec_list[i].offset));
++                      if (ret) {
++                              ret = -EFAULT;
++                              DRM_DEBUG("failed to copy %d exec entries "
++                                        "back to user (%d)\n",
++                                        args->buffer_count, ret);
++                              break;
++                      }
+               }
+       }
+ 
+@@ -1208,14 +1211,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void 
*data,
+       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+       if (!ret) {
+               /* Copy the new buffer offsets back to the user's exec list. */
+-              ret = copy_to_user(to_user_ptr(args->buffers_ptr),
+-                                 exec2_list,
+-                                 sizeof(*exec2_list) * args->buffer_count);
+-              if (ret) {
+-                      ret = -EFAULT;
+-                      DRM_DEBUG("failed to copy %d exec entries "
+-                                "back to user (%d)\n",
+-                                args->buffer_count, ret);
++              struct drm_i915_gem_exec_object2 *user_exec_list =
++                                 to_user_ptr(args->buffers_ptr);
++              int i;
++
++              for (i = 0; i < args->buffer_count; i++) {
++                      ret = __copy_to_user(&user_exec_list[i].offset,
++                                           &exec2_list[i].offset,
++                                           sizeof(user_exec_list[i].offset));
++                      if (ret) {
++                              ret = -EFAULT;
++                              DRM_DEBUG("failed to copy %d exec entries "
++                                        "back to user\n",
++                                        args->buffer_count);
++                              break;
++                      }
+               }
+       }
+ 
+diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c 
b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+index 019eacd8a68f..9ee40042fa3a 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
++++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+@@ -679,7 +679,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+       }
+ 
+       if (outp == 8)
+-              return false;
++              return conf;
+ 
+       data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, 
&info1);
+       if (data == 0x0000)
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c 
b/drivers/gpu/drm/radeon/radeon_bios.c
+index 061b227dae0c..b131520521e4 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device 
*rdev)
+               }
+       }
+ 
++      if (!found) {
++              while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, 
pdev)) != NULL) {
++                      dhandle = ACPI_HANDLE(&pdev->dev);
++                      if (!dhandle)
++                              continue;
++
++                      status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++                      if (!ACPI_FAILURE(status)) {
++                              found = true;
++                              break;
++                      }
++              }
++      }
++
+       if (!found)
+               return false;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c 
b/drivers/gpu/drm/radeon/radeon_object.c
+index 1424ccde2377..f83727915787 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -582,22 +582,30 @@ int radeon_bo_fault_reserve_notify(struct 
ttm_buffer_object *bo)
+       rbo = container_of(bo, struct radeon_bo, tbo);
+       radeon_bo_check_tiling(rbo, 0, 0);
+       rdev = rbo->rdev;
+-      if (bo->mem.mem_type == TTM_PL_VRAM) {
+-              size = bo->mem.num_pages << PAGE_SHIFT;
+-              offset = bo->mem.start << PAGE_SHIFT;
+-              if ((offset + size) > rdev->mc.visible_vram_size) {
+-                      /* hurrah the memory is not visible ! */
+-                      radeon_ttm_placement_from_domain(rbo, 
RADEON_GEM_DOMAIN_VRAM);
+-                      rbo->placement.lpfn = rdev->mc.visible_vram_size >> 
PAGE_SHIFT;
+-                      r = ttm_bo_validate(bo, &rbo->placement, false, false);
+-                      if (unlikely(r != 0))
+-                              return r;
+-                      offset = bo->mem.start << PAGE_SHIFT;
+-                      /* this should not happen */
+-                      if ((offset + size) > rdev->mc.visible_vram_size)
+-                              return -EINVAL;
+-              }
++      if (bo->mem.mem_type != TTM_PL_VRAM)
++              return 0;
++
++      size = bo->mem.num_pages << PAGE_SHIFT;
++      offset = bo->mem.start << PAGE_SHIFT;
++      if ((offset + size) <= rdev->mc.visible_vram_size)
++              return 0;
++
++      /* hurrah the memory is not visible ! */
++      radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
++      rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
++      r = ttm_bo_validate(bo, &rbo->placement, false, false);
++      if (unlikely(r == -ENOMEM)) {
++              radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
++              return ttm_bo_validate(bo, &rbo->placement, false, false);
++      } else if (unlikely(r != 0)) {
++              return r;
+       }
++
++      offset = bo->mem.start << PAGE_SHIFT;
++      /* this should never happen */
++      if ((offset + size) > rdev->mc.visible_vram_size)
++              return -EINVAL;
++
+       return 0;
+ }
+ 
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index df064e8cd9dc..f25f29835b3e 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -944,7 +944,7 @@ config SENSORS_NCT6775
+ 
+ config SENSORS_NTC_THERMISTOR
+       tristate "NTC thermistor support"
+-      depends on (!OF && !IIO) || (OF && IIO)
++      depends on !OF || IIO=n || IIO
+       help
+         This driver supports NTC thermistors sensor reading and its
+         interpretation. The driver can also monitor the temperature and
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index 9297164a23a5..c64d3d497c50 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -44,6 +44,7 @@ struct ntc_compensation {
+       unsigned int    ohm;
+ };
+ 
++/* Order matters, ntc_match references the entries by index */
+ static const struct platform_device_id ntc_thermistor_id[] = {
+       { "ncp15wb473", TYPE_NCPXXWB473 },
+       { "ncp18wb473", TYPE_NCPXXWB473 },
+@@ -141,7 +142,7 @@ struct ntc_data {
+       char name[PLATFORM_NAME_SIZE];
+ };
+ 
+-#ifdef CONFIG_OF
++#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
+ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
+ {
+       struct iio_channel *channel = pdata->chan;
+@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct 
ntc_thermistor_platform_data *pdata)
+ 
+ static const struct of_device_id ntc_match[] = {
+       { .compatible = "ntc,ncp15wb473",
+-              .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++              .data = &ntc_thermistor_id[0] },
+       { .compatible = "ntc,ncp18wb473",
+-              .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++              .data = &ntc_thermistor_id[1] },
+       { .compatible = "ntc,ncp21wb473",
+-              .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++              .data = &ntc_thermistor_id[2] },
+       { .compatible = "ntc,ncp03wb473",
+-              .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
++              .data = &ntc_thermistor_id[3] },
+       { .compatible = "ntc,ncp15wl333",
+-              .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
++              .data = &ntc_thermistor_id[4] },
+       { },
+ };
+ MODULE_DEVICE_TABLE(of, ntc_match);
+@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
+       return NULL;
+ }
+ 
++#define ntc_match     NULL
++
+ static void ntc_iio_channel_release(struct ntc_thermistor_platform_data 
*pdata)
+ { }
+ #endif
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c 
b/drivers/infiniband/ulp/isert/ib_isert.c
+index 988e29d18bb4..bae20f8bb034 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -965,6 +965,8 @@ sequence_cmd:
+ 
+       if (!rc && dump_payload == false && unsol_data)
+               iscsit_set_unsoliticed_dataout(cmd);
++      else if (dump_payload && imm_data)
++              target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ 
+       return 0;
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 1a75869d3a82..677973641d2b 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1954,6 +1954,8 @@ static int cache_create(struct cache_args *ca, struct 
cache **result)
+       ti->num_discard_bios = 1;
+       ti->discards_supported = true;
+       ti->discard_zeroes_data_unsupported = true;
++      /* Discard bios must be split on a block boundary */
++      ti->split_discard_bios = true;
+ 
+       cache->features = ca->features;
+       ti->per_bio_data_size = get_per_bio_data_size(cache);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 00a99fe797d4..963fa59be9b3 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7338,8 +7338,10 @@ void md_do_sync(struct md_thread *thread)
+       /* just incase thread restarts... */
+       if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+               return;
+-      if (mddev->ro) /* never try to sync a read-only array */
++      if (mddev->ro) {/* never try to sync a read-only array */
++              set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+               return;
++      }
+ 
+       if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+               if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+@@ -7788,6 +7790,7 @@ void md_check_recovery(struct mddev *mddev)
+                       /* There is no thread, but we need to call
+                        * ->spare_active and clear saved_raid_disk
+                        */
++                      set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       md_reap_sync_thread(mddev);
+                       clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       goto unlock;
+diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c 
b/drivers/staging/comedi/drivers/ni_daq_700.c
+index d067ef70e194..5e80d428e544 100644
+--- a/drivers/staging/comedi/drivers/ni_daq_700.c
++++ b/drivers/staging/comedi/drivers/ni_daq_700.c
+@@ -127,6 +127,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
+       /* write channel to multiplexer */
+       /* set mask scan bit high to disable scanning */
+       outb(chan | 0x80, dev->iobase + CMD_R1);
++      /* mux needs 2us to really settle [Fred Brooks]. */
++      udelay(2);
+ 
+       /* convert n samples */
+       for (n = 0; n < insn->n; n++) {
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 6c7b55c2947d..e70a48e3b376 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -2219,6 +2219,7 @@ static void __exit speakup_exit(void)
+       unregister_keyboard_notifier(&keyboard_notifier_block);
+       unregister_vt_notifier(&vt_notifier_block);
+       speakup_unregister_devsynth();
++      speakup_cancel_paste();
+       del_timer(&cursor_timer);
+       kthread_stop(speakup_task);
+       speakup_task = NULL;
+diff --git a/drivers/staging/speakup/selection.c 
b/drivers/staging/speakup/selection.c
+index f0fb00392d6b..f67941e78e4a 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -4,6 +4,8 @@
+ #include <linux/sched.h>
+ #include <linux/device.h> /* for dev_warn */
+ #include <linux/selection.h>
++#include <linux/workqueue.h>
++#include <asm/cmpxchg.h>
+ 
+ #include "speakup.h"
+ 
+@@ -121,20 +123,24 @@ int speakup_set_selection(struct tty_struct *tty)
+       return 0;
+ }
+ 
+-/* TODO: move to some helper thread, probably.  That'd fix having to check for
+- * in_atomic().  */
+-int speakup_paste_selection(struct tty_struct *tty)
++struct speakup_paste_work {
++      struct work_struct work;
++      struct tty_struct *tty;
++};
++
++static void __speakup_paste_selection(struct work_struct *work)
+ {
++      struct speakup_paste_work *spw =
++              container_of(work, struct speakup_paste_work, work);
++      struct tty_struct *tty = xchg(&spw->tty, NULL);
+       struct vc_data *vc = (struct vc_data *) tty->driver_data;
+       int pasted = 0, count;
+       DECLARE_WAITQUEUE(wait, current);
++
+       add_wait_queue(&vc->paste_wait, &wait);
+       while (sel_buffer && sel_buffer_lth > pasted) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (test_bit(TTY_THROTTLED, &tty->flags)) {
+-                      if (in_atomic())
+-                              /* if we are in an interrupt handler, abort */
+-                              break;
+                       schedule();
+                       continue;
+               }
+@@ -146,6 +152,26 @@ int speakup_paste_selection(struct tty_struct *tty)
+       }
+       remove_wait_queue(&vc->paste_wait, &wait);
+       current->state = TASK_RUNNING;
++      tty_kref_put(tty);
++}
++
++static struct speakup_paste_work speakup_paste_work = {
++      .work = __WORK_INITIALIZER(speakup_paste_work.work,
++                                 __speakup_paste_selection)
++};
++
++int speakup_paste_selection(struct tty_struct *tty)
++{
++      if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
++              return -EBUSY;
++
++      tty_kref_get(tty);
++      schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
+       return 0;
+ }
+ 
++void speakup_cancel_paste(void)
++{
++      cancel_work_sync(&speakup_paste_work.work);
++      tty_kref_put(speakup_paste_work.tty);
++}
+diff --git a/drivers/staging/speakup/speakup.h 
b/drivers/staging/speakup/speakup.h
+index 0126f714821a..74fe72429b2d 100644
+--- a/drivers/staging/speakup/speakup.h
++++ b/drivers/staging/speakup/speakup.h
+@@ -77,6 +77,7 @@ extern void synth_buffer_clear(void);
+ extern void speakup_clear_selection(void);
+ extern int speakup_set_selection(struct tty_struct *tty);
+ extern int speakup_paste_selection(struct tty_struct *tty);
++extern void speakup_cancel_paste(void);
+ extern void speakup_register_devsynth(void);
+ extern void speakup_unregister_devsynth(void);
+ extern void synth_write(const char *buf, size_t count);
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 4ed35231e552..2cdd5079ae78 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1754,10 +1754,13 @@ int usb_runtime_suspend(struct device *dev)
+       if (status == -EAGAIN || status == -EBUSY)
+               usb_mark_last_busy(udev);
+ 
+-      /* The PM core reacts badly unless the return code is 0,
+-       * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
++      /*
++       * The PM core reacts badly unless the return code is 0,
++       * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
++       * (except for root hubs, because they don't suspend through
++       * an upstream port like other USB devices).
+        */
+-      if (status != 0)
++      if (status != 0 && udev->parent)
+               return -EBUSY;
+       return status;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a036e03ae1b3..46efdca96952 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1683,8 +1683,19 @@ static int hub_probe(struct usb_interface *intf, const 
struct usb_device_id *id)
+        */
+       pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+ 
+-      /* Hubs have proper suspend/resume support. */
+-      usb_enable_autosuspend(hdev);
++      /*
++       * Hubs have proper suspend/resume support, except for root hubs
++       * where the controller driver doesn't have bus_suspend and
++       * bus_resume methods.
++       */
++      if (hdev->parent) {             /* normal device */
++              usb_enable_autosuspend(hdev);
++      } else {                        /* root hub */
++              const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
++
++              if (drv->bus_suspend && drv->bus_resume)
++                      usb_enable_autosuspend(hdev);
++      }
+ 
+       if (hdev->level == MAX_TOPO_LEVEL) {
+               dev_err(&intf->dev,
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index f2e57a1112c9..d007f0920126 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1794,6 +1794,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+               kfree(cur_cd);
+       }
+ 
++      num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
++      for (i = 0; i < num_ports; i++) {
++              struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
++              for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
++                      struct list_head *ep = &bwt->interval_bw[j].endpoints;
++                      while (!list_empty(ep))
++                              list_del_init(ep->next);
++              }
++      }
++
+       for (i = 1; i < MAX_HC_SLOTS; ++i)
+               xhci_free_virt_device(xhci, i);
+ 
+@@ -1834,16 +1844,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+       if (!xhci->rh_bw)
+               goto no_bw;
+ 
+-      num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+-      for (i = 0; i < num_ports; i++) {
+-              struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+-              for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+-                      struct list_head *ep = &bwt->interval_bw[j].endpoints;
+-                      while (!list_empty(ep))
+-                              list_del_init(ep->next);
+-              }
+-      }
+-
+       for (i = 0; i < num_ports; i++) {
+               struct xhci_tt_bw_info *tt, *n;
+               list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 2c635bd9c185..b9e663ac9a35 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -583,6 +583,8 @@ static struct usb_device_id id_table_combined [] = {
+       { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++      { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
++              .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       /*
+        * ELV devices:
+        */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 993c93df6874..500474c48f4b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -538,6 +538,11 @@
+  */
+ #define FTDI_TIAO_UMPA_PID    0x8a98  /* TIAO/DIYGADGET USB Multi-Protocol 
Adapter */
+ 
++/*
++ * NovaTech product ids (FTDI_VID)
++ */
++#define FTDI_NT_ORIONLXM_PID  0x7c90  /* OrionLXm Substation Automation 
Platform */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index c5c9cbf107d1..8cd6479a8b43 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -835,7 +835,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device 
*dev)
+       firmware_rec =  (struct ti_i2c_firmware_rec*)i2c_header->Data;
+ 
+       i2c_header->Type        = I2C_DESC_TYPE_FIRMWARE_BLANK;
+-      i2c_header->Size        = (__u16)buffer_size;
++      i2c_header->Size        = cpu_to_le16(buffer_size);
+       i2c_header->CheckSum    = cs;
+       firmware_rec->Ver_Major = OperationalMajorVersion;
+       firmware_rec->Ver_Minor = OperationalMinorVersion;
+diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
+index 51f83fbb73bb..6f6a856bc37c 100644
+--- a/drivers/usb/serial/io_usbvend.h
++++ b/drivers/usb/serial/io_usbvend.h
+@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
+ 
+ struct ti_i2c_desc {
+       __u8    Type;                   // Type of descriptor
+-      __u16   Size;                   // Size of data only not including 
header
++      __le16  Size;                   // Size of data only not including 
header
+       __u8    CheckSum;               // Checksum (8 bit sum of data only)
+       __u8    Data[0];                // Data starts here
+ } __attribute__((packed));
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f213ee978516..948a19f0cdf7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED       0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED       0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362          0x9010
++#define NOVATELWIRELESS_PRODUCT_E371          0x9011
+ #define NOVATELWIRELESS_PRODUCT_G2            0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551         0xB001
+ 
+@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
+       /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, 
NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, 
NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
++      { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, 
NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
+ 
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+       { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index c5b6dbf9c2fc..229a757e1c13 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -695,10 +695,17 @@ static inline void perf_callchain_store(struct 
perf_callchain_entry *entry, u64
+ extern int sysctl_perf_event_paranoid;
+ extern int sysctl_perf_event_mlock;
+ extern int sysctl_perf_event_sample_rate;
++extern int sysctl_perf_cpu_time_max_percent;
++
++extern void perf_sample_event_took(u64 sample_len_ns);
+ 
+ extern int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
++extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int 
write,
++              void __user *buffer, size_t *lenp,
++              loff_t *ppos);
++
+ 
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
+index 6cb4ea826834..4cc4d6e7e523 100644
+--- a/include/uapi/linux/usb/Kbuild
++++ b/include/uapi/linux/usb/Kbuild
+@@ -1,6 +1,7 @@
+ # UAPI Header export list
+ header-y += audio.h
+ header-y += cdc.h
++header-y += cdc-wdm.h
+ header-y += ch11.h
+ header-y += ch9.h
+ header-y += functionfs.h
+diff --git a/include/uapi/linux/usb/cdc-wdm.h 
b/include/uapi/linux/usb/cdc-wdm.h
+index f03134feebd6..0dc132e75030 100644
+--- a/include/uapi/linux/usb/cdc-wdm.h
++++ b/include/uapi/linux/usb/cdc-wdm.h
+@@ -9,6 +9,8 @@
+ #ifndef _UAPI__LINUX_USB_CDC_WDM_H
+ #define _UAPI__LINUX_USB_CDC_WDM_H
+ 
++#include <linux/types.h>
++
+ /*
+  * This IOCTL is used to retrieve the wMaxCommand for the device,
+  * defining the message limit for both reading and writing.
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 198a38883e64..bc255e25d5dd 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -698,10 +698,12 @@ void set_cpu_present(unsigned int cpu, bool present)
+ 
+ void set_cpu_online(unsigned int cpu, bool online)
+ {
+-      if (online)
++      if (online) {
+               cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
+-      else
++              cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
++      } else {
+               cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
++      }
+ }
+ 
+ void set_cpu_active(unsigned int cpu, bool active)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index ac9b8cce3df2..459b94c94721 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -165,25 +165,109 @@ int sysctl_perf_event_mlock __read_mostly = 512 + 
(PAGE_SIZE / 1024); /* 'free'
+ /*
+  * max perf event sample rate
+  */
+-#define DEFAULT_MAX_SAMPLE_RATE 100000
+-int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
+-static int max_samples_per_tick __read_mostly =
+-      DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
++#define DEFAULT_MAX_SAMPLE_RATE               100000
++#define DEFAULT_SAMPLE_PERIOD_NS      (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
++#define DEFAULT_CPU_TIME_MAX_PERCENT  25
++
++int sysctl_perf_event_sample_rate __read_mostly       = 
DEFAULT_MAX_SAMPLE_RATE;
++
++static int max_samples_per_tick __read_mostly = 
DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
++static int perf_sample_period_ns __read_mostly        = 
DEFAULT_SAMPLE_PERIOD_NS;
++
++static atomic_t perf_sample_allowed_ns __read_mostly =
++      ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 
100);
++
++void update_perf_cpu_limits(void)
++{
++      u64 tmp = perf_sample_period_ns;
++
++      tmp *= sysctl_perf_cpu_time_max_percent;
++      do_div(tmp, 100);
++      atomic_set(&perf_sample_allowed_ns, tmp);
++}
+ 
+ int perf_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+ {
+-      int ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 
+       if (ret || !write)
+               return ret;
+ 
+       max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
++      perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
++      update_perf_cpu_limits();
++
++      return 0;
++}
++
++int sysctl_perf_cpu_time_max_percent __read_mostly = 
DEFAULT_CPU_TIME_MAX_PERCENT;
++
++int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
++                              void __user *buffer, size_t *lenp,
++                              loff_t *ppos)
++{
++      int ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (ret || !write)
++              return ret;
++
++      update_perf_cpu_limits();
+ 
+       return 0;
+ }
+ 
++/*
++ * perf samples are done in some very critical code paths (NMIs).
++ * If they take too much CPU time, the system can lock up and not
++ * get any real work done.  This will drop the sample rate when
++ * we detect that events are taking too long.
++ */
++#define NR_ACCUMULATED_SAMPLES 128
++DEFINE_PER_CPU(u64, running_sample_length);
++
++void perf_sample_event_took(u64 sample_len_ns)
++{
++      u64 avg_local_sample_len;
++      u64 local_samples_len;
++
++      if (atomic_read(&perf_sample_allowed_ns) == 0)
++              return;
++
++      /* decay the counter by 1 average sample */
++      local_samples_len = __get_cpu_var(running_sample_length);
++      local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
++      local_samples_len += sample_len_ns;
++      __get_cpu_var(running_sample_length) = local_samples_len;
++
++      /*
++       * note: this will be biased artifically low until we have
++       * seen NR_ACCUMULATED_SAMPLES.  Doing it this way keeps us
++       * from having to maintain a count.
++       */
++      avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
++
++      if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
++              return;
++
++      if (max_samples_per_tick <= 1)
++              return;
++
++      max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
++      sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
++      perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
++
++      printk_ratelimited(KERN_WARNING
++                      "perf samples too long (%lld > %d), lowering "
++                      "kernel.perf_event_max_sample_rate to %d\n",
++                      avg_local_sample_len,
++                      atomic_read(&perf_sample_allowed_ns),
++                      sysctl_perf_event_sample_rate);
++
++      update_perf_cpu_limits();
++}
++
+ static atomic64_t perf_event_id;
+ 
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+@@ -1237,6 +1321,11 @@ group_sched_out(struct perf_event *group_event,
+               cpuctx->exclusive = 0;
+ }
+ 
++struct remove_event {
++      struct perf_event *event;
++      bool detach_group;
++};
++
+ /*
+  * Cross CPU call to remove a performance event
+  *
+@@ -1245,12 +1334,15 @@ group_sched_out(struct perf_event *group_event,
+  */
+ static int __perf_remove_from_context(void *info)
+ {
+-      struct perf_event *event = info;
++      struct remove_event *re = info;
++      struct perf_event *event = re->event;
+       struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ 
+       raw_spin_lock(&ctx->lock);
+       event_sched_out(event, cpuctx, ctx);
++      if (re->detach_group)
++              perf_group_detach(event);
+       list_del_event(event, ctx);
+       if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+               ctx->is_active = 0;
+@@ -1275,10 +1367,14 @@ static int __perf_remove_from_context(void *info)
+  * When called from perf_event_exit_task, it's OK because the
+  * context has been detached from its task.
+  */
+-static void perf_remove_from_context(struct perf_event *event)
++static void perf_remove_from_context(struct perf_event *event, bool 
detach_group)
+ {
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task = ctx->task;
++      struct remove_event re = {
++              .event = event,
++              .detach_group = detach_group,
++      };
+ 
+       lockdep_assert_held(&ctx->mutex);
+ 
+@@ -1287,12 +1383,12 @@ static void perf_remove_from_context(struct perf_event 
*event)
+                * Per cpu events are removed via an smp call and
+                * the removal is always successful.
+                */
+-              cpu_function_call(event->cpu, __perf_remove_from_context, 
event);
++              cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+               return;
+       }
+ 
+ retry:
+-      if (!task_function_call(task, __perf_remove_from_context, event))
++      if (!task_function_call(task, __perf_remove_from_context, &re))
+               return;
+ 
+       raw_spin_lock_irq(&ctx->lock);
+@@ -1309,6 +1405,8 @@ retry:
+        * Since the task isn't running, its safe to remove the event, us
+        * holding the ctx->lock ensures the task won't get scheduled in.
+        */
++      if (detach_group)
++              perf_group_detach(event);
+       list_del_event(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
+ }
+@@ -3015,10 +3113,7 @@ int perf_event_release_kernel(struct perf_event *event)
+        *     to trigger the AB-BA case.
+        */
+       mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+-      raw_spin_lock_irq(&ctx->lock);
+-      perf_group_detach(event);
+-      raw_spin_unlock_irq(&ctx->lock);
+-      perf_remove_from_context(event);
++      perf_remove_from_context(event, true);
+       mutex_unlock(&ctx->mutex);
+ 
+       free_event(event);
+@@ -5044,6 +5139,9 @@ struct swevent_htable {
+ 
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
++
++      /* Keeps track of cpu being initialized/exited */
++      bool                            online;
+ };
+ 
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+@@ -5290,8 +5388,14 @@ static int perf_swevent_add(struct perf_event *event, 
int flags)
+       hwc->state = !(flags & PERF_EF_START);
+ 
+       head = find_swevent_head(swhash, event);
+-      if (WARN_ON_ONCE(!head))
++      if (!head) {
++              /*
++               * We can race with cpu hotplug code. Do not
++               * WARN if the cpu just got unplugged.
++               */
++              WARN_ON_ONCE(swhash->online);
+               return -EINVAL;
++      }
+ 
+       hlist_add_head_rcu(&event->hlist_entry, head);
+ 
+@@ -6581,6 +6685,9 @@ SYSCALL_DEFINE5(perf_event_open,
+       if (attr.freq) {
+               if (attr.sample_freq > sysctl_perf_event_sample_rate)
+                       return -EINVAL;
++      } else {
++              if (attr.sample_period & (1ULL << 63))
++                      return -EINVAL;
+       }
+ 
+       /*
+@@ -6727,7 +6834,7 @@ SYSCALL_DEFINE5(perf_event_open,
+               struct perf_event_context *gctx = group_leader->ctx;
+ 
+               mutex_lock(&gctx->mutex);
+-              perf_remove_from_context(group_leader);
++              perf_remove_from_context(group_leader, false);
+ 
+               /*
+                * Removing from the context ends up with disabled
+@@ -6737,7 +6844,7 @@ SYSCALL_DEFINE5(perf_event_open,
+               perf_event__state_init(group_leader);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+-                      perf_remove_from_context(sibling);
++                      perf_remove_from_context(sibling, false);
+                       perf_event__state_init(sibling);
+                       put_ctx(gctx);
+               }
+@@ -6867,7 +6974,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int 
src_cpu, int dst_cpu)
+       mutex_lock(&src_ctx->mutex);
+       list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+                                event_entry) {
+-              perf_remove_from_context(event);
++              perf_remove_from_context(event, false);
+               put_ctx(src_ctx);
+               list_add(&event->event_entry, &events);
+       }
+@@ -6927,13 +7034,7 @@ __perf_event_exit_task(struct perf_event *child_event,
+                        struct perf_event_context *child_ctx,
+                        struct task_struct *child)
+ {
+-      if (child_event->parent) {
+-              raw_spin_lock_irq(&child_ctx->lock);
+-              perf_group_detach(child_event);
+-              raw_spin_unlock_irq(&child_ctx->lock);
+-      }
+-
+-      perf_remove_from_context(child_event);
++      perf_remove_from_context(child_event, !!child_event->parent);
+ 
+       /*
+        * It can happen that the parent exits first, and has events
+@@ -7395,6 +7496,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
+       struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+ 
+       mutex_lock(&swhash->hlist_mutex);
++      swhash->online = true;
+       if (swhash->hlist_refcount > 0) {
+               struct swevent_hlist *hlist;
+ 
+@@ -7417,14 +7519,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
+ 
+ static void __perf_event_exit_context(void *__info)
+ {
++      struct remove_event re = { .detach_group = false };
+       struct perf_event_context *ctx = __info;
+-      struct perf_event *event;
+ 
+       perf_pmu_rotate_stop(ctx->pmu);
+ 
+       rcu_read_lock();
+-      list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
+-              __perf_remove_from_context(event);
++      list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
++              __perf_remove_from_context(&re);
+       rcu_read_unlock();
+ }
+ 
+@@ -7452,6 +7554,7 @@ static void perf_event_exit_cpu(int cpu)
+       perf_event_exit_cpu_context(cpu);
+ 
+       mutex_lock(&swhash->hlist_mutex);
++      swhash->online = false;
+       swevent_hlist_release(swhash);
+       mutex_unlock(&swhash->hlist_mutex);
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index b4308d7da339..2672eca82a2b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5270,7 +5270,6 @@ static int __cpuinit sched_cpu_active(struct 
notifier_block *nfb,
+                                     unsigned long action, void *hcpu)
+ {
+       switch (action & ~CPU_TASKS_FROZEN) {
+-      case CPU_STARTING:
+       case CPU_DOWN_FAILED:
+               set_cpu_active((long)hcpu, true);
+               return NOTIFY_OK;
+diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
+index 1095e878a46f..b3f0a2783369 100644
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
+       int idx = 0;
+       int task_pri = convert_prio(p->prio);
+ 
+-      if (task_pri >= MAX_RT_PRIO)
+-              return 0;
++      BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+ 
+       for (idx = 0; idx < task_pri; idx++) {
+               struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 1101d92635c3..c23a8fd36149 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -326,50 +326,50 @@ out:
+  * softirq as those do not count in task exec_runtime any more.
+  */
+ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+-                                              struct rq *rq)
++                                       struct rq *rq, int ticks)
+ {
+-      cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
++      cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
++      u64 cputime = (__force u64) cputime_one_jiffy;
+       u64 *cpustat = kcpustat_this_cpu->cpustat;
+ 
+       if (steal_account_process_tick())
+               return;
+ 
++      cputime *= ticks;
++      scaled *= ticks;
++
+       if (irqtime_account_hi_update()) {
+-              cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
++              cpustat[CPUTIME_IRQ] += cputime;
+       } else if (irqtime_account_si_update()) {
+-              cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
++              cpustat[CPUTIME_SOFTIRQ] += cputime;
+       } else if (this_cpu_ksoftirqd() == p) {
+               /*
+                * ksoftirqd time do not get accounted in cpu_softirq_time.
+                * So, we have to handle it separately here.
+                * Also, p->stime needs to be updated for ksoftirqd.
+                */
+-              __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+-                                      CPUTIME_SOFTIRQ);
++              __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
+       } else if (user_tick) {
+-              account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
++              account_user_time(p, cputime, scaled);
+       } else if (p == rq->idle) {
+-              account_idle_time(cputime_one_jiffy);
++              account_idle_time(cputime);
+       } else if (p->flags & PF_VCPU) { /* System time or guest time */
+-              account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
++              account_guest_time(p, cputime, scaled);
+       } else {
+-              __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
+-                                      CPUTIME_SYSTEM);
++              __account_system_time(p, cputime, scaled,       CPUTIME_SYSTEM);
+       }
+ }
+ 
+ static void irqtime_account_idle_ticks(int ticks)
+ {
+-      int i;
+       struct rq *rq = this_rq();
+ 
+-      for (i = 0; i < ticks; i++)
+-              irqtime_account_process_tick(current, 0, rq);
++      irqtime_account_process_tick(current, 0, rq, ticks);
+ }
+ #else /* CONFIG_IRQ_TIME_ACCOUNTING */
+ static inline void irqtime_account_idle_ticks(int ticks) {}
+ static inline void irqtime_account_process_tick(struct task_struct *p, int 
user_tick,
+-                                              struct rq *rq) {}
++                                              struct rq *rq, int nr_ticks) {}
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+ 
+ /*
+@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int 
user_tick)
+               return;
+ 
+       if (sched_clock_irqtime) {
+-              irqtime_account_process_tick(p, user_tick, rq);
++              irqtime_account_process_tick(p, user_tick, rq, 1);
+               return;
+       }
+ 
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index ed6c01626acd..9469f4c61a30 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1049,6 +1049,16 @@ static struct ctl_table kern_table[] = {
+               .maxlen         = sizeof(sysctl_perf_event_sample_rate),
+               .mode           = 0644,
+               .proc_handler   = perf_proc_update_handler,
++              .extra1         = &one,
++      },
++      {
++              .procname       = "perf_cpu_time_max_percent",
++              .data           = &sysctl_perf_cpu_time_max_percent,
++              .maxlen         = sizeof(sysctl_perf_cpu_time_max_percent),
++              .mode           = 0644,
++              .proc_handler   = perf_cpu_time_max_percent_handler,
++              .extra1         = &zero,
++              .extra2         = &one_hundred,
+       },
+ #endif
+ #ifdef CONFIG_KMEMCHECK
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 4254eb021583..4f8548abd6ee 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1153,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int 
flags)
+        */
+       if (!PageHWPoison(p)) {
+               printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
++              atomic_long_sub(nr_pages, &num_poisoned_pages);
++              put_page(hpage);
+               res = 0;
+               goto out;
+       }
+diff --git a/mm/rmap.c b/mm/rmap.c
+index fbf0040a7342..b730a4409be6 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1675,10 +1675,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
+ {
+       struct anon_vma *root = anon_vma->root;
+ 
++      anon_vma_free(anon_vma);
+       if (root != anon_vma && atomic_dec_and_test(&root->refcount))
+               anon_vma_free(root);
+-
+-      anon_vma_free(anon_vma);
+ }
+ 
+ #ifdef CONFIG_MIGRATION
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 95f3f1da0d7f..d38e6a8d8b9f 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
+               .daddr = iph->daddr,
+               .saddr = iph->saddr,
+       };
++      int err;
+ 
+       dst = ip6_route_output(net, skb->sk, &fl6);
+-      if (dst->error) {
++      err = dst->error;
++      if (err) {
+               IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+               LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more 
route.\n");
+               dst_release(dst);
+-              return dst->error;
++              return err;
+       }
+ 
+       /* Drop old route. */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7de7cf1ec852..0923f09df503 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1590,12 +1590,10 @@ static const struct hda_fixup alc260_fixups[] = {
+       [ALC260_FIXUP_COEF] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+-                      { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+-                      { 0x20, AC_VERB_SET_PROC_COEF,  0x3040 },
++                      { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
++                      { 0x1a, AC_VERB_SET_PROC_COEF,  0x3040 },
+                       { }
+               },
+-              .chained = true,
+-              .chain_id = ALC260_FIXUP_HP_PIN_0F,
+       },
+       [ALC260_FIXUP_GPIO1] = {
+               .type = HDA_FIXUP_VERBS,
+@@ -1610,8 +1608,8 @@ static const struct hda_fixup alc260_fixups[] = {
+       [ALC260_FIXUP_REPLACER] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+-                      { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
+-                      { 0x20, AC_VERB_SET_PROC_COEF,  0x3050 },
++                      { 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
++                      { 0x1a, AC_VERB_SET_PROC_COEF,  0x3050 },
+                       { }
+               },
+               .chained = true,
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 07b1a3ad3e24..63b6f8c8edf2 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1514,7 +1514,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel,
+       switch (err) {
+       case EPERM:
+       case EACCES:
+-              return scnprintf(msg, size, "%s",
++              return scnprintf(msg, size,
+                "You may not have permission to collect %sstats.\n"
+                "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
+                " -1 - Not paranoid at all\n"


Reply via email to