We only need to hold rtnl_lock() around ndo calls. The device offload initialization doesn't require it. Neither will soon- -to-come querying the offload info. Use struct rw_semaphore because map offload will require sleeping with the semaphore held for read.
Suggested-by: Kirill Tkhai <ktk...@virtuozzo.com> Signed-off-by: Jakub Kicinski <jakub.kicin...@netronome.com> Reviewed-by: Quentin Monnet <quentin.mon...@netronome.com> --- kernel/bpf/offload.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 8455b89d1bbf..b88e5ebdc61d 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -20,8 +20,12 @@ #include <linux/netdevice.h> #include <linux/printk.h> #include <linux/rtnetlink.h> +#include <linux/rwsem.h> -/* protected by RTNL */ +/* Protects bpf_prog_offload_devs and offload members of all progs. + * RTNL lock cannot be taken when holding this lock. + */ +static struct rw_semaphore bpf_devs_lock; static LIST_HEAD(bpf_prog_offload_devs); int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -43,17 +47,21 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) offload->prog = prog; init_waitqueue_head(&offload->verifier_done); - rtnl_lock(); + /* Our UNREGISTER notifier will grab bpf_devs_lock, so we are safe + * to assume the netdev doesn't get unregistered as long as we hold + * bpf_devs_lock. + */ + down_write(&bpf_devs_lock); offload->netdev = __dev_get_by_index(net, attr->prog_ifindex); if (!offload->netdev) { - rtnl_unlock(); + up_write(&bpf_devs_lock); kfree(offload); return -EINVAL; } prog->aux->offload = offload; list_add_tail(&offload->offloads, &bpf_prog_offload_devs); - rtnl_unlock(); + up_write(&bpf_devs_lock); return 0; } @@ -126,7 +134,9 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog) wake_up(&offload->verifier_done); rtnl_lock(); + down_write(&bpf_devs_lock); __bpf_prog_offload_destroy(prog); + up_write(&bpf_devs_lock); rtnl_unlock(); kfree(offload); @@ -181,11 +191,13 @@ static int bpf_offload_notification(struct notifier_block *notifier, if (netdev->reg_state != NETREG_UNREGISTERING) break; + down_write(&bpf_devs_lock); list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) { if (offload->netdev == netdev) __bpf_prog_offload_destroy(offload->prog); } + up_write(&bpf_devs_lock); break; default: break; @@ -199,6 +211,7 @@ static struct notifier_block bpf_offload_notifier = { static int __init bpf_offload_init(void) { + init_rwsem(&bpf_devs_lock); register_netdevice_notifier(&bpf_offload_notifier); return 0; } -- 2.15.1