If the device is running while the MTU is changed, ibmveth
is closed and the bounce buffer is freed. If a transmission
is sent before ibmveth can be reopened, ibmveth_start_xmit
tries to copy to the null bounce buffer, leading to a kernel
oops. The proposed solution disables the tx queue until
ibmveth is restarted.

Reported-by: Jan Stancek <jstan...@redhat.com>
Tested-by: Jan Stancek <jstan...@redhat.com>
Signed-off-by: Thomas Falcon <tlfal...@linux.vnet.ibm.com>
---
 drivers/net/ethernet/ibm/ibmveth.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmveth.c 
b/drivers/net/ethernet/ibm/ibmveth.c
index ebe6071..9a74e4c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1362,6 +1362,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int 
new_mtu)
        /* Deactivate all the buffer pools so that the next loop can activate
           only the buffer pools necessary to hold the new MTU */
        if (netif_running(adapter->netdev)) {
+               netif_tx_disable(dev);
                need_restart = 1;
                adapter->pool_config = 1;
                ibmveth_close(adapter->netdev);
@@ -1378,14 +1379,18 @@ static int ibmveth_change_mtu(struct net_device *dev, 
int new_mtu)
                                                ibmveth_get_desired_dma
                                                (viodev));
                        if (need_restart) {
-                               return ibmveth_open(adapter->netdev);
+                               rc = ibmveth_open(adapter->netdev);
+                               netif_wake_queue(dev);
+                               return rc;
                        }
                        return 0;
                }
        }
 
-       if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+       if (need_restart && (rc = ibmveth_open(adapter->netdev))) {
+               netif_wake_queue(dev);
                return rc;
+       }
 
        return -EINVAL;
 }
-- 
2.4.11

Reply via email to