At the moment spapr_tce_tables is not protected against races. This makes
use of RCU-variants of list helpers. As some bits are executed in real
mode, this makes use of just introduced list_for_each_entry_rcu_notrace().

This converts release_spapr_tce_table() to a RCU scheduled handler.

Signed-off-by: Alexey Kardashevskiy <[email protected]>
---
 arch/powerpc/include/asm/kvm_host.h |  1 +
 arch/powerpc/kvm/book3s.c           |  2 +-
 arch/powerpc/kvm/book3s_64_vio.c    | 20 +++++++++++---------
 3 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 98eebbf6..e19d412 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -178,6 +178,7 @@ struct kvmppc_spapr_tce_table {
        struct kvm *kvm;
        u64 liobn;
        u32 window_size;
+       struct rcu_head rcu;
        struct page *pages[0];
 };
 
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 53285d5..3418f7c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -806,7 +806,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 {
 
 #ifdef CONFIG_PPC64
-       INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+       INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
        INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 #endif
 
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 54cf9bc..9526c34 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -45,19 +45,16 @@ static long kvmppc_stt_npages(unsigned long window_size)
                     * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
 }
 
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+static void release_spapr_tce_table(struct rcu_head *head)
 {
-       struct kvm *kvm = stt->kvm;
+       struct kvmppc_spapr_tce_table *stt = container_of(head,
+                       struct kvmppc_spapr_tce_table, rcu);
        int i;
 
-       mutex_lock(&kvm->lock);
-       list_del(&stt->list);
        for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
                __free_page(stt->pages[i]);
+
        kfree(stt);
-       mutex_unlock(&kvm->lock);
-
-       kvm_put_kvm(kvm);
 }
 
 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault 
*vmf)
@@ -88,7 +85,12 @@ static int kvm_spapr_tce_release(struct inode *inode, struct 
file *filp)
 {
        struct kvmppc_spapr_tce_table *stt = filp->private_data;
 
-       release_spapr_tce_table(stt);
+       list_del_rcu(&stt->list);
+
+       kvm_put_kvm(stt->kvm);
+
+       call_rcu(&stt->rcu, release_spapr_tce_table);
+
        return 0;
 }
 
@@ -131,7 +133,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
        kvm_get_kvm(kvm);
 
        mutex_lock(&kvm->lock);
-       list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+       list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
 
        mutex_unlock(&kvm->lock);
 
-- 
2.4.0.rc3.8.gfb3e7d5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to