Thanks to the acquire semantics of qemu_event_reset and qemu_event_wait, some memory barriers can be removed.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- util/rcu.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/util/rcu.c b/util/rcu.c index bceb3e4..9adc5e4 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -82,14 +82,16 @@ static void wait_for_readers(void) /* Instead of using atomic_mb_set for index->waiting, and * atomic_mb_read for index->ctr, memory barriers are placed * manually since writes to different threads are independent. - * atomic_mb_set has a smp_wmb before... + * qemu_event_reset has acquire semantics, so no memory barrier + * is needed here. */ - smp_wmb(); QLIST_FOREACH(index, ®istry, node) { atomic_set(&index->waiting, true); } - /* ... and a smp_mb after. */ + /* Here, order the stores to index->waiting before the + * loads of index->ctr. + */ smp_mb(); QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { @@ -104,9 +106,6 @@ static void wait_for_readers(void) } } - /* atomic_mb_read has smp_rmb after. */ - smp_rmb(); - if (QLIST_EMPTY(®istry)) { break; } -- 2.7.4