From: "Matthew Wilcox (Oracle)" <wi...@infradead.org> Use the xarray spinlock instead of the client spinlock.
Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org> --- include/net/9p/client.h | 2 +- net/9p/client.c | 41 ++++++++++++++++++++--------------------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/include/net/9p/client.h b/include/net/9p/client.h index acc60d8a3b3b..6fe36ca0c32e 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -124,7 +124,7 @@ struct p9_client { } trans_opts; struct idr fids; - struct idr reqs; + struct xarray reqs; char name[__NEW_UTS_LEN + 1]; }; diff --git a/net/9p/client.c b/net/9p/client.c index 9622f3e469f6..5c566e48f63e 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -269,7 +269,8 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size) { struct p9_req_t *req = kmem_cache_alloc(p9_req_cache, GFP_NOFS); int alloc_msize = min(c->msize, max_size); - int tag; + int err; + u32 tag; if (!req) return ERR_PTR(-ENOMEM); @@ -285,17 +286,17 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size) init_waitqueue_head(&req->wq); INIT_LIST_HEAD(&req->req_list); - idr_preload(GFP_NOFS); - spin_lock_irq(&c->lock); - if (type == P9_TVERSION) - tag = idr_alloc(&c->reqs, req, P9_NOTAG, P9_NOTAG + 1, - GFP_NOWAIT); - else - tag = idr_alloc(&c->reqs, req, 0, P9_NOTAG, GFP_NOWAIT); + xa_lock_irq(&c->reqs); + if (type == P9_TVERSION) { + tag = P9_NOTAG; + err = __xa_insert(&c->reqs, P9_NOTAG, req, GFP_NOFS); + } else { + err = __xa_alloc(&c->reqs, &tag, req, XA_LIMIT(0, P9_NOTAG - 1), + GFP_NOFS); + } req->tc.tag = tag; - spin_unlock_irq(&c->lock); - idr_preload_end(); - if (tag < 0) + xa_unlock_irq(&c->reqs); + if (err < 0) goto free; /* Init ref to two because in the general case there is one ref @@ -334,7 +335,7 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag) rcu_read_lock(); again: - req = idr_find(&c->reqs, tag); + req = xa_load(&c->reqs, tag); if (req) { /* We have to be careful with the req found under rcu_read_lock * Thanks to SLAB_TYPESAFE_BY_RCU we can safely try to get the @@ -367,9 +368,9 @@ static int p9_tag_remove(struct p9_client *c, struct p9_req_t *r) u16 tag = r->tc.tag; p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag); - spin_lock_irqsave(&c->lock, flags); - idr_remove(&c->reqs, tag); - spin_unlock_irqrestore(&c->lock, flags); + xa_lock_irqsave(&c->reqs, flags); + __xa_erase(&c->reqs, tag); + xa_unlock_irqrestore(&c->reqs, flags); return p9_req_put(r); } @@ -397,16 +398,14 @@ EXPORT_SYMBOL(p9_req_put); static void p9_tag_cleanup(struct p9_client *c) { struct p9_req_t *req; - int id; + unsigned long id; - rcu_read_lock(); - idr_for_each_entry(&c->reqs, req, id) { - pr_info("Tag %d still in use\n", id); + xa_for_each(&c->reqs, id, req) { + pr_info("Tag %ld still in use\n", id); if (p9_tag_remove(c, req) == 0) pr_warn("Packet with tag %d has still references", req->tc.tag); } - rcu_read_unlock(); } /** @@ -1016,7 +1015,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) spin_lock_init(&clnt->lock); idr_init(&clnt->fids); - idr_init(&clnt->reqs); + xa_init_flags(&clnt->reqs, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); err = parse_opts(options, clnt); if (err < 0) -- 2.23.0.rc1