commit:     dc6725ef86a129d4df053dc29b953e560f5db220
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 29 13:10:58 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 29 13:10:58 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc6725ef

Linux patch 4.19.223

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1222_linux-4.19.223.patch | 1396 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1400 insertions(+)

diff --git a/0000_README b/0000_README
index 7dda2480..ed044b8e 100644
--- a/0000_README
+++ b/0000_README
@@ -927,6 +927,10 @@ Patch:  1221_linux-4.19.222.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.222
 
+Patch:  1222_linux-4.19.223.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.223
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1222_linux-4.19.223.patch b/1222_linux-4.19.223.patch
new file mode 100644
index 00000000..f6334a18
--- /dev/null
+++ b/1222_linux-4.19.223.patch
@@ -0,0 +1,1396 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
+index f179e20eb8a0b..607db9519cfbd 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2019,8 +2019,12 @@
+                       Default is 1 (enabled)
+ 
+       kvm-intel.emulate_invalid_guest_state=
+-                      [KVM,Intel] Enable emulation of invalid guest states
+-                      Default is 0 (disabled)
++                      [KVM,Intel] Disable emulation of invalid guest state.
++                      Ignored if kvm-intel.enable_unrestricted_guest=1, as
++                      guest state is never invalid for unrestricted guests.
++                      This param doesn't apply to nested guests (L2), as KVM
++                      never emulates invalid L2 guest state.
++                      Default is 1 (enabled)
+ 
+       kvm-intel.flexpriority=
+                       [KVM,Intel] Disable FlexPriority feature (TPR shadow).
+diff --git a/Documentation/networking/bonding.txt 
b/Documentation/networking/bonding.txt
+index d3e5dd26db12d..4035a495c0606 100644
+--- a/Documentation/networking/bonding.txt
++++ b/Documentation/networking/bonding.txt
+@@ -191,11 +191,12 @@ ad_actor_sys_prio
+ ad_actor_system
+ 
+       In an AD system, this specifies the mac-address for the actor in
+-      protocol packet exchanges (LACPDUs). The value cannot be NULL or
+-      multicast. It is preferred to have the local-admin bit set for this
+-      mac but driver does not enforce it. If the value is not given then
+-      system defaults to using the masters' mac address as actors' system
+-      address.
++      protocol packet exchanges (LACPDUs). The value cannot be a multicast
++      address. If the all-zeroes MAC is specified, bonding will internally
++      use the MAC of the bond itself. It is preferred to have the
++      local-admin bit set for this mac but driver does not enforce it. If
++      the value is not given then system defaults to using the masters'
++      mac address as actors' system address.
+ 
+       This parameter has effect only in 802.3ad mode and is available through
+       SysFs interface.
+diff --git a/Makefile b/Makefile
+index aa6cdaebe18b2..6637882cb5e54 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 222
++SUBLEVEL = 223
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 89e551eebff1e..cde22c04ad2b8 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -620,11 +620,9 @@ call_fpe:
+       tstne   r0, #0x04000000                 @ bit 26 set on both ARM and 
Thumb-2
+       reteq   lr
+       and     r8, r0, #0x00000f00             @ mask out CP number
+- THUMB(       lsr     r8, r8, #8              )
+       mov     r7, #1
+-      add     r6, r10, #TI_USED_CP
+- ARM( strb    r7, [r6, r8, lsr #8]    )       @ set appropriate used_cp[]
+- THUMB(       strb    r7, [r6, r8]            )       @ set appropriate 
used_cp[]
++      add     r6, r10, r8, lsr #8             @ add used_cp[] array offset 
first
++      strb    r7, [r6, #TI_USED_CP]           @ set appropriate used_cp[]
+ #ifdef CONFIG_IWMMXT
+       @ Test if we need to give access to iWMMXt coprocessors
+       ldr     r5, [r10, #TI_FLAGS]
+@@ -633,7 +631,7 @@ call_fpe:
+       bcs     iwmmxt_task_enable
+ #endif
+  ARM( add     pc, pc, r8, lsr #6      )
+- THUMB(       lsl     r8, r8, #2              )
++ THUMB(       lsr     r8, r8, #6              )
+  THUMB(       add     pc, r8                  )
+       nop
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts 
b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+index 1238de25a9691..9b1789504f7a0 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+@@ -72,7 +72,7 @@
+       pinctrl-0 = <&emac_rgmii_pins>;
+       phy-supply = <&reg_gmac_3v3>;
+       phy-handle = <&ext_rgmii_phy>;
+-      phy-mode = "rgmii";
++      phy-mode = "rgmii-id";
+       status = "okay";
+ };
+ 
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 61a647a55c695..1ae007ec65c51 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -478,7 +478,7 @@ lws_start:
+       extrd,u %r1,PSW_W_BIT,1,%r1
+       /* sp must be aligned on 4, so deposit the W bit setting into
+        * the bottom of sp temporarily */
+-      or,ev   %r1,%r30,%r30
++      or,od   %r1,%r30,%r30
+ 
+       /* Clip LWS number to a 32-bit value for 32-bit processes */
+       depdi   0, 31, 32, %r20
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 2a9c12ffb5cbc..7de459cf36b54 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -1356,8 +1356,8 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+ #endif
+ #endif
+ 
+-#define PKRU_AD_BIT 0x1
+-#define PKRU_WD_BIT 0x2
++#define PKRU_AD_BIT 0x1u
++#define PKRU_WD_BIT 0x2u
+ #define PKRU_BITS_PER_PKEY 2
+ 
+ static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index b2bad345c523f..c2529dfda3e53 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -625,12 +625,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct 
bfq_queue *bfqq)
+ }
+ 
+ /*
+- * Tell whether there are active queues or groups with differentiated weights.
++ * Tell whether there are active queues with different weights or
++ * active groups.
+  */
+-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
++static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
+ {
+       /*
+-       * For weights to differ, at least one of the trees must contain
++       * For queue weights to differ, queue_weights_tree must contain
+        * at least two nodes.
+        */
+       return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
+@@ -638,9 +639,7 @@ static bool bfq_differentiated_weights(struct bfq_data 
*bfqd)
+                bfqd->queue_weights_tree.rb_node->rb_right)
+ #ifdef CONFIG_BFQ_GROUP_IOSCHED
+              ) ||
+-             (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
+-              (bfqd->group_weights_tree.rb_node->rb_left ||
+-               bfqd->group_weights_tree.rb_node->rb_right)
++              (bfqd->num_groups_with_pending_reqs > 0
+ #endif
+              );
+ }
+@@ -658,26 +657,25 @@ static bool bfq_differentiated_weights(struct bfq_data 
*bfqd)
+  * 3) all active groups at the same level in the groups tree have the same
+  *    number of children.
+  *
+- * Unfortunately, keeping the necessary state for evaluating exactly the
+- * above symmetry conditions would be quite complex and time-consuming.
+- * Therefore this function evaluates, instead, the following stronger
+- * sub-conditions, for which it is much easier to maintain the needed
+- * state:
++ * Unfortunately, keeping the necessary state for evaluating exactly
++ * the last two symmetry sub-conditions above would be quite complex
++ * and time consuming.  Therefore this function evaluates, instead,
++ * only the following stronger two sub-conditions, for which it is
++ * much easier to maintain the needed state:
+  * 1) all active queues have the same weight,
+- * 2) all active groups have the same weight,
+- * 3) all active groups have at most one active child each.
+- * In particular, the last two conditions are always true if hierarchical
+- * support and the cgroups interface are not enabled, thus no state needs
+- * to be maintained in this case.
++ * 2) there are no active groups.
++ * In particular, the last condition is always true if hierarchical
++ * support or the cgroups interface are not enabled, thus no state
++ * needs to be maintained in this case.
+  */
+ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+ {
+-      return !bfq_differentiated_weights(bfqd);
++      return !bfq_varied_queue_weights_or_active_groups(bfqd);
+ }
+ 
+ /*
+  * If the weight-counter tree passed as input contains no counter for
+- * the weight of the input entity, then add that counter; otherwise just
++ * the weight of the input queue, then add that counter; otherwise just
+  * increment the existing counter.
+  *
+  * Note that weight-counter trees contain few nodes in mostly symmetric
+@@ -688,25 +686,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+  * In most scenarios, the rate at which nodes are created/destroyed
+  * should be low too.
+  */
+-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
++void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+                         struct rb_root *root)
+ {
++      struct bfq_entity *entity = &bfqq->entity;
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+ 
+       /*
+-       * Do not insert if the entity is already associated with a
++       * Do not insert if the queue is already associated with a
+        * counter, which happens if:
+-       *   1) the entity is associated with a queue,
+-       *   2) a request arrival has caused the queue to become both
++       *   1) a request arrival has caused the queue to become both
+        *      non-weight-raised, and hence change its weight, and
+        *      backlogged; in this respect, each of the two events
+        *      causes an invocation of this function,
+-       *   3) this is the invocation of this function caused by the
++       *   2) this is the invocation of this function caused by the
+        *      second event. This second invocation is actually useless,
+        *      and we handle this fact by exiting immediately. More
+        *      efficient or clearer solutions might possibly be adopted.
+        */
+-      if (entity->weight_counter)
++      if (bfqq->weight_counter)
+               return;
+ 
+       while (*new) {
+@@ -716,7 +714,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct 
bfq_entity *entity,
+               parent = *new;
+ 
+               if (entity->weight == __counter->weight) {
+-                      entity->weight_counter = __counter;
++                      bfqq->weight_counter = __counter;
+                       goto inc_counter;
+               }
+               if (entity->weight < __counter->weight)
+@@ -725,68 +723,68 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct 
bfq_entity *entity,
+                       new = &((*new)->rb_right);
+       }
+ 
+-      entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+-                                       GFP_ATOMIC);
++      bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++                                     GFP_ATOMIC);
+ 
+       /*
+        * In the unlucky event of an allocation failure, we just
+-       * exit. This will cause the weight of entity to not be
+-       * considered in bfq_differentiated_weights, which, in its
+-       * turn, causes the scenario to be deemed wrongly symmetric in
+-       * case entity's weight would have been the only weight making
+-       * the scenario asymmetric. On the bright side, no unbalance
+-       * will however occur when entity becomes inactive again (the
+-       * invocation of this function is triggered by an activation
+-       * of entity). In fact, bfq_weights_tree_remove does nothing
+-       * if !entity->weight_counter.
++       * exit. This will cause the weight of queue to not be
++       * considered in bfq_varied_queue_weights_or_active_groups,
++       * which, in its turn, causes the scenario to be deemed
++       * wrongly symmetric in case bfqq's weight would have been
++       * the only weight making the scenario asymmetric.  On the
++       * bright side, no unbalance will however occur when bfqq
++       * becomes inactive again (the invocation of this function
++       * is triggered by an activation of queue).  In fact,
++       * bfq_weights_tree_remove does nothing if
++       * !bfqq->weight_counter.
+        */
+-      if (unlikely(!entity->weight_counter))
++      if (unlikely(!bfqq->weight_counter))
+               return;
+ 
+-      entity->weight_counter->weight = entity->weight;
+-      rb_link_node(&entity->weight_counter->weights_node, parent, new);
+-      rb_insert_color(&entity->weight_counter->weights_node, root);
++      bfqq->weight_counter->weight = entity->weight;
++      rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
++      rb_insert_color(&bfqq->weight_counter->weights_node, root);
+ 
+ inc_counter:
+-      entity->weight_counter->num_active++;
++      bfqq->weight_counter->num_active++;
++      bfqq->ref++;
+ }
+ 
+ /*
+- * Decrement the weight counter associated with the entity, and, if the
++ * Decrement the weight counter associated with the queue, and, if the
+  * counter reaches 0, remove the counter from the tree.
+  * See the comments to the function bfq_weights_tree_add() for considerations
+  * about overhead.
+  */
+ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+-                             struct bfq_entity *entity,
++                             struct bfq_queue *bfqq,
+                              struct rb_root *root)
+ {
+-      if (!entity->weight_counter)
++      if (!bfqq->weight_counter)
+               return;
+ 
+-      entity->weight_counter->num_active--;
+-      if (entity->weight_counter->num_active > 0)
++      bfqq->weight_counter->num_active--;
++      if (bfqq->weight_counter->num_active > 0)
+               goto reset_entity_pointer;
+ 
+-      rb_erase(&entity->weight_counter->weights_node, root);
+-      kfree(entity->weight_counter);
++      rb_erase(&bfqq->weight_counter->weights_node, root);
++      kfree(bfqq->weight_counter);
+ 
+ reset_entity_pointer:
+-      entity->weight_counter = NULL;
++      bfqq->weight_counter = NULL;
++      bfq_put_queue(bfqq);
+ }
+ 
+ /*
+- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
+- * parent entities.
++ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
++ * of active groups for each queue's inactive parent entity.
+  */
+ void bfq_weights_tree_remove(struct bfq_data *bfqd,
+                            struct bfq_queue *bfqq)
+ {
+       struct bfq_entity *entity = bfqq->entity.parent;
+ 
+-      __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+-                                &bfqd->queue_weights_tree);
+-
+       for_each_entity(entity) {
+               struct bfq_sched_data *sd = entity->my_sched_data;
+ 
+@@ -798,18 +796,37 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
+                        * next_in_service for details on why
+                        * in_service_entity must be checked too).
+                        *
+-                       * As a consequence, the weight of entity is
+-                       * not to be removed. In addition, if entity
+-                       * is active, then its parent entities are
+-                       * active as well, and thus their weights are
+-                       * not to be removed either. In the end, this
+-                       * loop must stop here.
++                       * As a consequence, its parent entities are
++                       * active as well, and thus this loop must
++                       * stop here.
+                        */
+                       break;
+               }
+-              __bfq_weights_tree_remove(bfqd, entity,
+-                                        &bfqd->group_weights_tree);
++
++              /*
++               * The decrement of num_groups_with_pending_reqs is
++               * not performed immediately upon the deactivation of
++               * entity, but it is delayed to when it also happens
++               * that the first leaf descendant bfqq of entity gets
++               * all its pending requests completed. The following
++               * instructions perform this delayed decrement, if
++               * needed. See the comments on
++               * num_groups_with_pending_reqs for details.
++               */
++              if (entity->in_groups_with_pending_reqs) {
++                      entity->in_groups_with_pending_reqs = false;
++                      bfqd->num_groups_with_pending_reqs--;
++              }
+       }
++
++      /*
++       * Next function is invoked last, because it causes bfqq to be
++       * freed if the following holds: bfqq is not in service and
++       * has no dispatched request. DO NOT use bfqq after the next
++       * function invocation.
++       */
++      __bfq_weights_tree_remove(bfqd, bfqq,
++                                &bfqd->queue_weights_tree);
+ }
+ 
+ /*
+@@ -1003,7 +1020,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct 
bfq_data *bfqd,
+ 
+ static int bfqq_process_refs(struct bfq_queue *bfqq)
+ {
+-      return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
++      return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
++              (bfqq->weight_counter != NULL);
+ }
+ 
+ /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
+@@ -2798,7 +2816,7 @@ static void bfq_dispatch_remove(struct request_queue *q, 
struct request *rq)
+       bfq_remove_request(q, rq);
+ }
+ 
+-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+       /*
+        * If this bfqq is shared between multiple processes, check
+@@ -2831,9 +2849,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, 
struct bfq_queue *bfqq)
+       /*
+        * All in-service entities must have been properly deactivated
+        * or requeued before executing the next function, which
+-       * resets all in-service entites as no more in service.
++       * resets all in-service entities as no more in service. This
++       * may cause bfqq to be freed. If this happens, the next
++       * function returns true.
+        */
+-      __bfq_bfqd_reset_in_service(bfqd);
++      return __bfq_bfqd_reset_in_service(bfqd);
+ }
+ 
+ /**
+@@ -3238,7 +3258,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
+       bool slow;
+       unsigned long delta = 0;
+       struct bfq_entity *entity = &bfqq->entity;
+-      int ref;
+ 
+       /*
+        * Check whether the process is slow (see bfq_bfqq_is_slow).
+@@ -3307,10 +3326,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
+        * reason.
+        */
+       __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
+-      ref = bfqq->ref;
+-      __bfq_bfqq_expire(bfqd, bfqq);
+-
+-      if (ref == 1) /* bfqq is gone, no more actions on it */
++      if (__bfq_bfqq_expire(bfqd, bfqq))
++              /* bfqq is gone, no more actions on it */
+               return;
+ 
+       bfqq->injected_service = 0;
+@@ -3521,9 +3538,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+        * symmetric scenario where:
+        * (i)  each of these processes must get the same throughput as
+        *      the others;
+-       * (ii) all these processes have the same I/O pattern
+-              (either sequential or random).
+-       * In fact, in such a scenario, the drive will tend to treat
++       * (ii) the I/O of each process has the same properties, in
++       *      terms of locality (sequential or random), direction
++       *      (reads or writes), request sizes, greediness
++       *      (from I/O-bound to sporadic), and so on.
++       * In fact, in such a scenario, the drive tends to treat
+        * the requests of each of these processes in about the same
+        * way as the requests of the others, and thus to provide
+        * each of these processes with about the same throughput
+@@ -3532,18 +3551,67 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+        * certainly needed to guarantee that bfqq receives its
+        * assigned fraction of the device throughput (see [1] for
+        * details).
++       * The problem is that idling may significantly reduce
++       * throughput with certain combinations of types of I/O and
++       * devices. An important example is sync random I/O, on flash
++       * storage with command queueing. So, unless bfqq falls in the
++       * above cases where idling also boosts throughput, it would
++       * be important to check conditions (i) and (ii) accurately,
++       * so as to avoid idling when not strictly needed for service
++       * guarantees.
++       *
++       * Unfortunately, it is extremely difficult to thoroughly
++       * check condition (ii). And, in case there are active groups,
++       * it becomes very difficult to check condition (i) too. In
++       * fact, if there are active groups, then, for condition (i)
++       * to become false, it is enough that an active group contains
++       * more active processes or sub-groups than some other active
++       * group. More precisely, for condition (i) to hold because of
++       * such a group, it is not even necessary that the group is
++       * (still) active: it is sufficient that, even if the group
++       * has become inactive, some of its descendant processes still
++       * have some request already dispatched but still waiting for
++       * completion. In fact, requests have still to be guaranteed
++       * their share of the throughput even after being
++       * dispatched. In this respect, it is easy to show that, if a
++       * group frequently becomes inactive while still having
++       * in-flight requests, and if, when this happens, the group is
++       * not considered in the calculation of whether the scenario
++       * is asymmetric, then the group may fail to be guaranteed its
++       * fair share of the throughput (basically because idling may
++       * not be performed for the descendant processes of the group,
++       * but it had to be).  We address this issue with the
++       * following bi-modal behavior, implemented in the function
++       * bfq_symmetric_scenario().
++       *
++       * If there are groups with requests waiting for completion
++       * (as commented above, some of these groups may even be
++       * already inactive), then the scenario is tagged as
++       * asymmetric, conservatively, without checking any of the
++       * conditions (i) and (ii). So the device is idled for bfqq.
++       * This behavior matches also the fact that groups are created
++       * exactly if controlling I/O is a primary concern (to
++       * preserve bandwidth and latency guarantees).
++       *
++       * On the opposite end, if there are no groups with requests
++       * waiting for completion, then only condition (i) is actually
++       * controlled, i.e., provided that condition (i) holds, idling
++       * is not performed, regardless of whether condition (ii)
++       * holds. In other words, only if condition (i) does not hold,
++       * then idling is allowed, and the device tends to be
++       * prevented from queueing many requests, possibly of several
++       * processes. Since there are no groups with requests waiting
++       * for completion, then, to control condition (i) it is enough
++       * to check just whether all the queues with requests waiting
++       * for completion also have the same weight.
+        *
+-       * We address this issue by controlling, actually, only the
+-       * symmetry sub-condition (i), i.e., provided that
+-       * sub-condition (i) holds, idling is not performed,
+-       * regardless of whether sub-condition (ii) holds. In other
+-       * words, only if sub-condition (i) holds, then idling is
+-       * allowed, and the device tends to be prevented from queueing
+-       * many requests, possibly of several processes. The reason
+-       * for not controlling also sub-condition (ii) is that we
+-       * exploit preemption to preserve guarantees in case of
+-       * symmetric scenarios, even if (ii) does not hold, as
+-       * explained in the next two paragraphs.
++       * Not checking condition (ii) evidently exposes bfqq to the
++       * risk of getting less throughput than its fair share.
++       * However, for queues with the same weight, a further
++       * mechanism, preemption, mitigates or even eliminates this
++       * problem. And it does so without consequences on overall
++       * throughput. This mechanism and its benefits are explained
++       * in the next three paragraphs.
+        *
+        * Even if a queue, say Q, is expired when it remains idle, Q
+        * can still preempt the new in-service queue if the next
+@@ -3557,11 +3625,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+        * idling allows the internal queues of the device to contain
+        * many requests, and thus to reorder requests, we can rather
+        * safely assume that the internal scheduler still preserves a
+-       * minimum of mid-term fairness. The motivation for using
+-       * preemption instead of idling is that, by not idling,
+-       * service guarantees are preserved without minimally
+-       * sacrificing throughput. In other words, both a high
+-       * throughput and its desired distribution are obtained.
++       * minimum of mid-term fairness.
+        *
+        * More precisely, this preemption-based, idleless approach
+        * provides fairness in terms of IOPS, and not sectors per
+@@ -3580,27 +3644,28 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+        * 1024/8 times as high as the service received by the other
+        * queue.
+        *
+-       * On the other hand, device idling is performed, and thus
+-       * pure sector-domain guarantees are provided, for the
+-       * following queues, which are likely to need stronger
+-       * throughput guarantees: weight-raised queues, and queues
+-       * with a higher weight than other queues. When such queues
+-       * are active, sub-condition (i) is false, which triggers
+-       * device idling.
++       * The motivation for using preemption instead of idling (for
++       * queues with the same weight) is that, by not idling,
++       * service guarantees are preserved (completely or at least in
++       * part) without minimally sacrificing throughput. And, if
++       * there is no active group, then the primary expectation for
++       * this device is probably a high throughput.
+        *
+-       * According to the above considerations, the next variable is
+-       * true (only) if sub-condition (i) holds. To compute the
+-       * value of this variable, we not only use the return value of
+-       * the function bfq_symmetric_scenario(), but also check
+-       * whether bfqq is being weight-raised, because
+-       * bfq_symmetric_scenario() does not take into account also
+-       * weight-raised queues (see comments on
+-       * bfq_weights_tree_add()). In particular, if bfqq is being
+-       * weight-raised, it is important to idle only if there are
+-       * other, non-weight-raised queues that may steal throughput
+-       * to bfqq. Actually, we should be even more precise, and
+-       * differentiate between interactive weight raising and
+-       * soft real-time weight raising.
++       * We are now left only with explaining the additional
++       * compound condition that is checked below for deciding
++       * whether the scenario is asymmetric. To explain this
++       * compound condition, we need to add that the function
++       * bfq_symmetric_scenario checks the weights of only
++       * non-weight-raised queues, for efficiency reasons (see
++       * comments on bfq_weights_tree_add()). Then the fact that
++       * bfqq is weight-raised is checked explicitly here. More
++       * precisely, the compound condition below takes into account
++       * also the fact that, even if bfqq is being weight-raised,
++       * the scenario is still symmetric if all queues with requests
++       * waiting for completion happen to be
++       * weight-raised. Actually, we should be even more precise
++       * here, and differentiate between interactive weight raising
++       * and soft real-time weight raising.
+        *
+        * As a side note, it is worth considering that the above
+        * device-idling countermeasures may however fail in the
+@@ -5422,7 +5487,7 @@ static int bfq_init_queue(struct request_queue *q, 
struct elevator_type *e)
+       bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
+ 
+       bfqd->queue_weights_tree = RB_ROOT;
+-      bfqd->group_weights_tree = RB_ROOT;
++      bfqd->num_groups_with_pending_reqs = 0;
+ 
+       INIT_LIST_HEAD(&bfqd->active_list);
+       INIT_LIST_HEAD(&bfqd->idle_list);
+diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
+index a41e9884f2dd2..ca98c98a8179b 100644
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -108,15 +108,14 @@ struct bfq_sched_data {
+ };
+ 
+ /**
+- * struct bfq_weight_counter - counter of the number of all active entities
++ * struct bfq_weight_counter - counter of the number of all active queues
+  *                             with a given weight.
+  */
+ struct bfq_weight_counter {
+-      unsigned int weight; /* weight of the entities this counter refers to */
+-      unsigned int num_active; /* nr of active entities with this weight */
++      unsigned int weight; /* weight of the queues this counter refers to */
++      unsigned int num_active; /* nr of active queues with this weight */
+       /*
+-       * Weights tree member (see bfq_data's @queue_weights_tree and
+-       * @group_weights_tree)
++       * Weights tree member (see bfq_data's @queue_weights_tree)
+        */
+       struct rb_node weights_node;
+ };
+@@ -151,8 +150,6 @@ struct bfq_weight_counter {
+ struct bfq_entity {
+       /* service_tree member */
+       struct rb_node rb_node;
+-      /* pointer to the weight counter associated with this entity */
+-      struct bfq_weight_counter *weight_counter;
+ 
+       /*
+        * Flag, true if the entity is on a tree (either the active or
+@@ -199,6 +196,9 @@ struct bfq_entity {
+ 
+       /* flag, set to request a weight, ioprio or ioprio_class change  */
+       int prio_changed;
++
++      /* flag, set if the entity is counted in groups_with_pending_reqs */
++      bool in_groups_with_pending_reqs;
+ };
+ 
+ struct bfq_group;
+@@ -266,6 +266,9 @@ struct bfq_queue {
+       /* entity representing this queue in the scheduler */
+       struct bfq_entity entity;
+ 
++      /* pointer to the weight counter associated with this entity */
++      struct bfq_weight_counter *weight_counter;
++
+       /* maximum budget allowed from the feedback mechanism */
+       int max_budget;
+       /* budget expiration (in jiffies) */
+@@ -448,15 +451,54 @@ struct bfq_data {
+        * bfq_weights_tree_[add|remove] for further details).
+        */
+       struct rb_root queue_weights_tree;
++
+       /*
+-       * rbtree of non-queue @bfq_entity weight counters, sorted by
+-       * weight. Used to keep track of whether all @bfq_groups have
+-       * the same weight. The tree contains one counter for each
+-       * distinct weight associated to some active @bfq_group (see
+-       * the comments to the functions bfq_weights_tree_[add|remove]
+-       * for further details).
++       * Number of groups with at least one descendant process that
++       * has at least one request waiting for completion. Note that
++       * this accounts for also requests already dispatched, but not
++       * yet completed. Therefore this number of groups may differ
++       * (be larger) than the number of active groups, as a group is
++       * considered active only if its corresponding entity has
++       * descendant queues with at least one request queued. This
++       * number is used to decide whether a scenario is symmetric.
++       * For a detailed explanation see comments on the computation
++       * of the variable asymmetric_scenario in the function
++       * bfq_better_to_idle().
++       *
++       * However, it is hard to compute this number exactly, for
++       * groups with multiple descendant processes. Consider a group
++       * that is inactive, i.e., that has no descendant process with
++       * pending I/O inside BFQ queues. Then suppose that
++       * num_groups_with_pending_reqs is still accounting for this
++       * group, because the group has descendant processes with some
++       * I/O request still in flight. num_groups_with_pending_reqs
++       * should be decremented when the in-flight request of the
++       * last descendant process is finally completed (assuming that
++       * nothing else has changed for the group in the meantime, in
++       * terms of composition of the group and active/inactive state of child
++       * groups and processes). To accomplish this, an additional
++       * pending-request counter must be added to entities, and must
++       * be updated correctly. To avoid this additional field and operations,
++       * we resort to the following tradeoff between simplicity and
++       * accuracy: for an inactive group that is still counted in
++       * num_groups_with_pending_reqs, we decrement
++       * num_groups_with_pending_reqs when the first descendant
++       * process of the group remains with no request waiting for
++       * completion.
++       *
++       * Even this simpler decrement strategy requires a little
++       * carefulness: to avoid multiple decrements, we flag a group,
++       * more precisely an entity representing a group, as still
++       * counted in num_groups_with_pending_reqs when it becomes
++       * inactive. Then, when the first descendant queue of the
++       * entity remains with no request waiting for completion,
++       * num_groups_with_pending_reqs is decremented, and this flag
++       * is reset. After this flag is reset for the entity,
++       * num_groups_with_pending_reqs won't be decremented any
++       * longer in case a new descendant queue of the entity remains
++       * with no request waiting for completion.
+        */
+-      struct rb_root group_weights_tree;
++      unsigned int num_groups_with_pending_reqs;
+ 
+       /*
+        * Number of bfq_queues containing requests (including the
+@@ -854,10 +896,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, 
bool is_sync);
+ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool 
is_sync);
+ struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
+ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
++void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+                         struct rb_root *root);
+ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
+-                             struct bfq_entity *entity,
++                             struct bfq_queue *bfqq,
+                              struct rb_root *root);
+ void bfq_weights_tree_remove(struct bfq_data *bfqd,
+                            struct bfq_queue *bfqq);
+@@ -951,7 +993,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
+                            bool ins_into_idle_tree);
+ bool next_queue_may_preempt(struct bfq_data *bfqd);
+ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
+-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+ void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+                        bool ins_into_idle_tree, bool expiration);
+ void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
+index ff7c2d470bb82..11ff5ceae02b4 100644
+--- a/block/bfq-wf2q.c
++++ b/block/bfq-wf2q.c
+@@ -788,25 +788,23 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree 
*old_st,
+               new_weight = entity->orig_weight *
+                            (bfqq ? bfqq->wr_coeff : 1);
+               /*
+-               * If the weight of the entity changes, remove the entity
+-               * from its old weight counter (if there is a counter
+-               * associated with the entity), and add it to the counter
+-               * associated with its new weight.
++               * If the weight of the entity changes, and the entity is a
++               * queue, remove the entity from its old weight counter (if
++               * there is a counter associated with the entity).
+                */
+-              if (prev_weight != new_weight) {
+-                      root = bfqq ? &bfqd->queue_weights_tree :
+-                                    &bfqd->group_weights_tree;
+-                      __bfq_weights_tree_remove(bfqd, entity, root);
++              if (prev_weight != new_weight && bfqq) {
++                      root = &bfqd->queue_weights_tree;
++                      __bfq_weights_tree_remove(bfqd, bfqq, root);
+               }
+               entity->weight = new_weight;
+               /*
+-               * Add the entity to its weights tree only if it is
+-               * not associated with a weight-raised queue.
++               * Add the entity, if it is not a weight-raised queue,
++               * to the counter associated with its new weight.
+                */
+-              if (prev_weight != new_weight &&
+-                  (bfqq ? bfqq->wr_coeff == 1 : 1))
++              if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
+                       /* If we get here, root has been initialized. */
+-                      bfq_weights_tree_add(bfqd, entity, root);
++                      bfq_weights_tree_add(bfqd, bfqq, root);
++              }
+ 
+               new_st->wsum += entity->weight;
+ 
+@@ -1012,9 +1010,12 @@ static void __bfq_activate_entity(struct bfq_entity 
*entity,
+       if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
+               struct bfq_group *bfqg =
+                       container_of(entity, struct bfq_group, entity);
++              struct bfq_data *bfqd = bfqg->bfqd;
+ 
+-              bfq_weights_tree_add(bfqg->bfqd, entity,
+-                                   &bfqd->group_weights_tree);
++              if (!entity->in_groups_with_pending_reqs) {
++                      entity->in_groups_with_pending_reqs = true;
++                      bfqd->num_groups_with_pending_reqs++;
++              }
+       }
+ #endif
+ 
+@@ -1599,7 +1600,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data 
*bfqd)
+       return bfqq;
+ }
+ 
+-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++/* returns true if the in-service queue gets freed */
++bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ {
+       struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
+       struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
+@@ -1623,8 +1625,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+        * service tree either, then release the service reference to
+        * the queue it represents (taken with bfq_get_entity).
+        */
+-      if (!in_serv_entity->on_st)
++      if (!in_serv_entity->on_st) {
++              /*
++               * If no process is referencing in_serv_bfqq any
++               * longer, then the service reference may be the only
++               * reference to the queue. If this is the case, then
++               * bfqq gets freed here.
++               */
++              int ref = in_serv_bfqq->ref;
+               bfq_put_queue(in_serv_bfqq);
++              if (ref == 1)
++                      return true;
++      }
++
++      return false;
+ }
+ 
+ void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+@@ -1667,15 +1681,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct 
bfq_queue *bfqq,
+ 
+       bfqd->busy_queues--;
+ 
+-      if (!bfqq->dispatched)
+-              bfq_weights_tree_remove(bfqd, bfqq);
+-
+       if (bfqq->wr_coeff > 1)
+               bfqd->wr_busy_queues--;
+ 
+       bfqg_stats_update_dequeue(bfqq_group(bfqq));
+ 
+       bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
++
++      if (!bfqq->dispatched)
++              bfq_weights_tree_remove(bfqd, bfqq);
+ }
+ 
+ /*
+@@ -1692,7 +1706,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct 
bfq_queue *bfqq)
+ 
+       if (!bfqq->dispatched)
+               if (bfqq->wr_coeff == 1)
+-                      bfq_weights_tree_add(bfqd, &bfqq->entity,
++                      bfq_weights_tree_add(bfqd, bfqq,
+                                            &bfqd->queue_weights_tree);
+ 
+       if (bfqq->wr_coeff > 1)
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c 
b/drivers/char/ipmi/ipmi_msghandler.c
+index 48929df7673b1..4cf3ef4ddec35 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -2863,7 +2863,7 @@ cleanup_bmc_device(struct kref *ref)
+        * with removing the device attributes while reading a device
+        * attribute.
+        */
+-      schedule_work(&bmc->remove_work);
++      queue_work(remove_work_wq, &bmc->remove_work);
+ }
+ 
+ /*
+@@ -5085,22 +5085,27 @@ static int ipmi_init_msghandler(void)
+       if (initialized)
+               goto out;
+ 
+-      init_srcu_struct(&ipmi_interfaces_srcu);
+-
+-      timer_setup(&ipmi_timer, ipmi_timeout, 0);
+-      mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+-
+-      atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
++      rv = init_srcu_struct(&ipmi_interfaces_srcu);
++      if (rv)
++              goto out;
+ 
+       remove_work_wq = 
create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+       if (!remove_work_wq) {
+               pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+               rv = -ENOMEM;
+-              goto out;
++              goto out_wq;
+       }
+ 
++      timer_setup(&ipmi_timer, ipmi_timeout, 0);
++      mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
++
++      atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
++
+       initialized = true;
+ 
++out_wq:
++      if (rv)
++              cleanup_srcu_struct(&ipmi_interfaces_srcu);
+ out:
+       mutex_unlock(&ipmi_interfaces_mutex);
+       return rv;
+diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
+index 27c08ddab0e1a..96db7e96fcea9 100644
+--- a/drivers/hid/hid-holtek-mouse.c
++++ b/drivers/hid/hid-holtek-mouse.c
+@@ -68,8 +68,23 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device 
*hdev, __u8 *rdesc,
+ static int holtek_mouse_probe(struct hid_device *hdev,
+                             const struct hid_device_id *id)
+ {
++      int ret;
++
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
++
++      ret = hid_parse(hdev);
++      if (ret) {
++              hid_err(hdev, "hid parse failed: %d\n", ret);
++              return ret;
++      }
++
++      ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++      if (ret) {
++              hid_err(hdev, "hw start failed: %d\n", ret);
++              return ret;
++      }
++
+       return 0;
+ }
+ 
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
+index c187e557678ef..30a7f7fde6511 100644
+--- a/drivers/hwmon/lm90.c
++++ b/drivers/hwmon/lm90.c
+@@ -197,6 +197,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, 
adt7461, max6680,
+ #define LM90_STATUS_RHIGH     (1 << 4) /* remote high temp limit tripped */
+ #define LM90_STATUS_LLOW      (1 << 5) /* local low temp limit tripped */
+ #define LM90_STATUS_LHIGH     (1 << 6) /* local high temp limit tripped */
++#define LM90_STATUS_BUSY      (1 << 7) /* conversion is ongoing */
+ 
+ #define MAX6696_STATUS2_R2THRM        (1 << 1) /* remote2 THERM limit tripped 
*/
+ #define MAX6696_STATUS2_R2OPEN        (1 << 2) /* remote2 is an open circuit 
*/
+@@ -786,7 +787,7 @@ static int lm90_update_device(struct device *dev)
+               val = lm90_read_reg(client, LM90_REG_R_STATUS);
+               if (val < 0)
+                       return val;
+-              data->alarms = val;     /* lower 8 bit of alarms */
++              data->alarms = val & ~LM90_STATUS_BUSY;
+ 
+               if (data->kind == max6696) {
+                       val = lm90_select_remote_channel(client, data, 1);
+@@ -1439,12 +1440,11 @@ static int lm90_detect(struct i2c_client *client,
+       if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
+               return -ENODEV;
+ 
+-      if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
++      if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
+               config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
+               if (config2 < 0)
+                       return -ENODEV;
+-      } else
+-              config2 = 0;            /* Make compiler happy */
++      }
+ 
+       if ((address == 0x4C || address == 0x4D)
+        && man_id == 0x01) { /* National Semiconductor */
+diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c 
b/drivers/infiniband/hw/qib/qib_user_sdma.c
+index 47ed3ab25dc95..6e6730f036b03 100644
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -945,7 +945,7 @@ static int qib_user_sdma_queue_pkts(const struct 
qib_devdata *dd,
+                                              &addrlimit) ||
+                           addrlimit > type_max(typeof(pkt->addrlimit))) {
+                               ret = -EINVAL;
+-                              goto free_pbc;
++                              goto free_pkt;
+                       }
+                       pkt->addrlimit = addrlimit;
+ 
+diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c 
b/drivers/input/touchscreen/atmel_mxt_ts.c
+index e8f98de60df3a..a2e10cae654f0 100644
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -1809,7 +1809,7 @@ static int mxt_read_info_block(struct mxt_data *data)
+       if (error) {
+               dev_err(&client->dev, "Error %d parsing object table\n", error);
+               mxt_free_object_table(data);
+-              goto err_free_mem;
++              return error;
+       }
+ 
+       data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
+diff --git a/drivers/net/bonding/bond_options.c 
b/drivers/net/bonding/bond_options.c
+index 80867bd8f44c3..c9aa28eee191d 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1439,7 +1439,7 @@ static int bond_option_ad_actor_system_set(struct 
bonding *bond,
+               mac = (u8 *)&newval->value;
+       }
+ 
+-      if (!is_valid_ether_addr(mac))
++      if (is_multicast_ether_addr(mac))
+               goto err;
+ 
+       netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+index 5f327659efa7a..85b688f60b876 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+@@ -202,7 +202,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+                                  struct qlcnic_info *, u16);
+ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+ void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
++int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+ void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+                             struct qlcnic_vf_info *, u16);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index 77e386ebff09c..98275f18a87b0 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -433,7 +433,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct 
qlcnic_adapter *adapter,
+                                           struct qlcnic_cmd_args *cmd)
+ {
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+-      int i, num_vlans;
++      int i, num_vlans, ret;
+       u16 *vlans;
+ 
+       if (sriov->allowed_vlans)
+@@ -444,7 +444,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct 
qlcnic_adapter *adapter,
+       dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+                sriov->num_allowed_vlans);
+ 
+-      qlcnic_sriov_alloc_vlans(adapter);
++      ret = qlcnic_sriov_alloc_vlans(adapter);
++      if (ret)
++              return ret;
+ 
+       if (!sriov->any_vlan)
+               return 0;
+@@ -2164,7 +2166,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter 
*adapter)
+       return err;
+ }
+ 
+-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
++int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+ {
+       struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct qlcnic_vf_info *vf;
+@@ -2174,7 +2176,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter 
*adapter)
+               vf = &sriov->vf_info[i];
+               vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+                                         sizeof(*vf->sriov_vlans), GFP_KERNEL);
++              if (!vf->sriov_vlans)
++                      return -ENOMEM;
+       }
++
++      return 0;
+ }
+ 
+ void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+index 50eaafa3eaba3..c9f2cd2462230 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+@@ -598,7 +598,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter 
*adapter,
+       if (err)
+               goto del_flr_queue;
+ 
+-      qlcnic_sriov_alloc_vlans(adapter);
++      err = qlcnic_sriov_alloc_vlans(adapter);
++      if (err)
++              goto del_flr_queue;
+ 
+       return err;
+ 
+diff --git a/drivers/net/ethernet/sfc/falcon/rx.c 
b/drivers/net/ethernet/sfc/falcon/rx.c
+index 02456ed13a7d4..5b93a3af4575d 100644
+--- a/drivers/net/ethernet/sfc/falcon/rx.c
++++ b/drivers/net/ethernet/sfc/falcon/rx.c
+@@ -732,7 +732,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
+                                           efx->rx_bufs_per_page);
+       rx_queue->page_ring = kcalloc(page_ring_size,
+                                     sizeof(*rx_queue->page_ring), GFP_KERNEL);
+-      rx_queue->page_ptr_mask = page_ring_size - 1;
++      if (!rx_queue->page_ring)
++              rx_queue->page_ptr_mask = 0;
++      else
++              rx_queue->page_ptr_mask = page_ring_size - 1;
+ }
+ 
+ void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
+diff --git a/drivers/net/ethernet/smsc/smc911x.c 
b/drivers/net/ethernet/smsc/smc911x.c
+index f97b35430c840..ac1ad00e2fc55 100644
+--- a/drivers/net/ethernet/smsc/smc911x.c
++++ b/drivers/net/ethernet/smsc/smc911x.c
+@@ -2080,6 +2080,11 @@ static int smc911x_drv_probe(struct platform_device 
*pdev)
+ 
+       ndev->dma = (unsigned char)-1;
+       ndev->irq = platform_get_irq(pdev, 0);
++      if (ndev->irq < 0) {
++              ret = ndev->irq;
++              goto release_both;
++      }
++
+       lp = netdev_priv(ndev);
+       lp->netdev = ndev;
+ #ifdef SMC_DYNAMIC_BUS_CONFIG
+diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
+index 778d3729f460a..89b3bc389f469 100644
+--- a/drivers/net/fjes/fjes_main.c
++++ b/drivers/net/fjes/fjes_main.c
+@@ -1284,6 +1284,11 @@ static int fjes_probe(struct platform_device *plat_dev)
+       hw->hw_res.start = res->start;
+       hw->hw_res.size = resource_size(res);
+       hw->hw_res.irq = platform_get_irq(plat_dev, 0);
++      if (hw->hw_res.irq < 0) {
++              err = hw->hw_res.irq;
++              goto err_free_control_wq;
++      }
++
+       err = fjes_hw_init(&adapter->hw);
+       if (err)
+               goto err_free_control_wq;
+diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
+index 940aa7a19f50b..ba0ca85e3d766 100644
+--- a/drivers/net/hamradio/mkiss.c
++++ b/drivers/net/hamradio/mkiss.c
+@@ -803,13 +803,14 @@ static void mkiss_close(struct tty_struct *tty)
+        */
+       netif_stop_queue(ax->dev);
+ 
+-      /* Free all AX25 frame buffers. */
++      unregister_netdev(ax->dev);
++
++      /* Free all AX25 frame buffers after unreg. */
+       kfree(ax->rbuff);
+       kfree(ax->xbuff);
+ 
+       ax->tty = NULL;
+ 
+-      unregister_netdev(ax->dev);
+       free_netdev(ax->dev);
+ }
+ 
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index f438be83d2594..a44968d5cac48 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -75,6 +75,8 @@
+ #define LAN7801_USB_PRODUCT_ID                (0x7801)
+ #define LAN78XX_EEPROM_MAGIC          (0x78A5)
+ #define LAN78XX_OTP_MAGIC             (0x78F3)
++#define AT29M2AF_USB_VENDOR_ID                (0x07C9)
++#define AT29M2AF_USB_PRODUCT_ID       (0x0012)
+ 
+ #define       MII_READ                        1
+ #define       MII_WRITE                       0
+@@ -4170,6 +4172,10 @@ static const struct usb_device_id products[] = {
+       /* LAN7801 USB Gigabit Ethernet Device */
+       USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
+       },
++      {
++      /* ATM2-AF USB Gigabit Ethernet Device */
++      USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
++      },
+       {},
+ };
+ MODULE_DEVICE_TABLE(usb, products);
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c 
b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 4e17728f29375..08f1688dfeb28 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1011,10 +1011,10 @@ static int stm32_gpiolib_register_bank(struct 
stm32_pinctrl *pctl,
+               bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
+               bank->gpio_chip.base = args.args[1];
+ 
+-              npins = args.args[2];
+-              while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+-                                                       ++i, &args))
+-                      npins += args.args[2];
++              /* get the last defined gpio line (offset + nb of pins) */
++              npins = args.args[0] + args.args[2];
++              while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 
++i, &args))
++                      npins = max(npins, (int)(args.args[0] + args.args[2]));
+       } else {
+               bank_nr = pctl->nbanks;
+               bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
+index 7dcb14d303eb4..d8715954f4e08 100644
+--- a/drivers/spi/spi-armada-3700.c
++++ b/drivers/spi/spi-armada-3700.c
+@@ -912,7 +912,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
+       return 0;
+ 
+ error_clk:
+-      clk_disable_unprepare(spi->clk);
++      clk_unprepare(spi->clk);
+ error:
+       spi_master_put(master);
+ out:
+diff --git a/drivers/usb/gadget/function/u_ether.c 
b/drivers/usb/gadget/function/u_ether.c
+index d7a12161e5531..1b3e674e6330d 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -860,19 +860,23 @@ int gether_register_netdev(struct net_device *net)
+ {
+       struct eth_dev *dev;
+       struct usb_gadget *g;
+-      struct sockaddr sa;
+       int status;
+ 
+       if (!net->dev.parent)
+               return -EINVAL;
+       dev = netdev_priv(net);
+       g = dev->gadget;
++
++      memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
++      net->addr_assign_type = NET_ADDR_RANDOM;
++
+       status = register_netdev(net);
+       if (status < 0) {
+               dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+               return status;
+       } else {
+               INFO(dev, "HOST MAC %pM\n", dev->host_mac);
++              INFO(dev, "MAC %pM\n", dev->dev_mac);
+ 
+               /* two kinds of host-initiated state changes:
+                *  - iff DATA transfer is active, carrier is "on"
+@@ -880,15 +884,6 @@ int gether_register_netdev(struct net_device *net)
+                */
+               netif_carrier_off(net);
+       }
+-      sa.sa_family = net->type;
+-      memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
+-      rtnl_lock();
+-      status = dev_set_mac_address(net, &sa);
+-      rtnl_unlock();
+-      if (status)
+-              pr_warn("cannot set self ethernet address: %d\n", status);
+-      else
+-              INFO(dev, "MAC %pM\n", dev->dev_mac);
+ 
+       return status;
+ }
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index 201e9da1692a4..64352d2833e2e 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -658,8 +658,15 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+       }
+ 
+       last = here;
+-      while (!IS_XATTR_LAST_ENTRY(last))
++      while (!IS_XATTR_LAST_ENTRY(last)) {
++              if ((void *)(last) + sizeof(__u32) > last_base_addr ||
++                      (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
++                      set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
++                      error = -EFSCORRUPTED;
++                      goto exit;
++              }
+               last = XATTR_NEXT_ENTRY(last);
++      }
+ 
+       newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
+ 
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index e7330a9a7d7dc..faee73c084d49 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -7,9 +7,27 @@
+ #include <uapi/linux/udp.h>
+ #include <uapi/linux/virtio_net.h>
+ 
++static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
++{
++      switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
++      case VIRTIO_NET_HDR_GSO_TCPV4:
++              return protocol == cpu_to_be16(ETH_P_IP);
++      case VIRTIO_NET_HDR_GSO_TCPV6:
++              return protocol == cpu_to_be16(ETH_P_IPV6);
++      case VIRTIO_NET_HDR_GSO_UDP:
++              return protocol == cpu_to_be16(ETH_P_IP) ||
++                     protocol == cpu_to_be16(ETH_P_IPV6);
++      default:
++              return false;
++      }
++}
++
+ static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+                                          const struct virtio_net_hdr *hdr)
+ {
++      if (skb->protocol)
++              return 0;
++
+       switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+       case VIRTIO_NET_HDR_GSO_UDP:
+@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff 
*skb,
+                       if (!skb->protocol) {
+                               __be16 protocol = 
dev_parse_header_protocol(skb);
+ 
+-                              virtio_net_hdr_set_proto(skb, hdr);
+-                              if (protocol && protocol != skb->protocol)
++                              if (!protocol)
++                                      virtio_net_hdr_set_proto(skb, hdr);
++                              else if (!virtio_net_hdr_match_proto(protocol, 
hdr->gso_type))
+                                       return -EINVAL;
++                              else
++                                      skb->protocol = protocol;
+                       }
+ retry:
+                       if (!skb_flow_dissect_flow_keys_basic(skb, &keys,
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index a45db78eaf00a..567fdfd9678d5 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -88,8 +88,10 @@ static void ax25_kill_by_device(struct net_device *dev)
+ again:
+       ax25_for_each(s, &ax25_list) {
+               if (s->ax25_dev == ax25_dev) {
+-                      s->ax25_dev = NULL;
+                       spin_unlock_bh(&ax25_list_lock);
++                      lock_sock(s->sk);
++                      s->ax25_dev = NULL;
++                      release_sock(s->sk);
+                       ax25_disconnect(s, ENETUNREACH);
+                       spin_lock_bh(&ax25_list_lock);
+ 
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index 25298b3eb8546..17ca9a681d47b 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -509,7 +509,8 @@ __build_packet_message(struct nfnl_log_net *log,
+               goto nla_put_failure;
+ 
+       if (indev && skb->dev &&
+-          skb->mac_header != skb->network_header) {
++          skb_mac_header_was_set(skb) &&
++          skb_mac_header_len(skb) != 0) {
+               struct nfulnl_msg_packet_hw phw;
+               int len;
+ 
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index eb5a052d3b252..8955431f2ab26 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -566,7 +566,8 @@ nfqnl_build_packet_message(struct net *net, struct 
nfqnl_instance *queue,
+               goto nla_put_failure;
+ 
+       if (indev && entskb->dev &&
+-          skb_mac_header_was_set(entskb)) {
++          skb_mac_header_was_set(entskb) &&
++          skb_mac_header_len(entskb) != 0) {
+               struct nfqnl_msg_packet_hw phw;
+               int len;
+ 
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index db34735403035..c0b4cc1e108b3 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -959,6 +959,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned 
long arg)
+                       ret =  -EBUSY;
+               else if (sk->sk_state == TCP_ESTABLISHED)
+                       ret = -EISCONN;
++              else if (!pn->pn_sk.sobject)
++                      ret = -EADDRNOTAVAIL;
+               else
+                       ret = pep_sock_enable(sk, NULL, 0);
+               release_sock(sk);
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index 84c2a17c56ee3..847a8f3fd06ea 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -234,6 +234,10 @@ int snd_jack_new(struct snd_card *card, const char *id, 
int type,
+               return -ENOMEM;
+ 
+       jack->id = kstrdup(id, GFP_KERNEL);
++      if (jack->id == NULL) {
++              kfree(jack);
++              return -ENOMEM;
++      }
+ 
+       /* don't creat input device for phantom jack */
+       if (!phantom_jack) {
+diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
+index a33cb744e96c8..4e77b1dcacc8b 100644
+--- a/sound/drivers/opl3/opl3_midi.c
++++ b/sound/drivers/opl3/opl3_midi.c
+@@ -412,7 +412,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct 
snd_midi_channel *chan)
+       }
+       if (instr_4op) {
+               vp2 = &opl3->voices[voice + 3];
+-              if (vp->state > 0) {
++              if (vp2->state > 0) {
+                       opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
+                                              voice_offset + 3);
+                       reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;

Reply via email to