From: Nikita Kalyazin <[email protected]>

write syscall populates guest_memfd with user-supplied data in a generic
way, ie no vendor-specific preparation is performed.  If the request is
not page-aligned, the remaining bytes are initialised to 0.

write is only supported for non-CoCo setups where guest memory is not
hardware-encrypted.

Signed-off-by: Nikita Kalyazin <[email protected]>
---
 virt/kvm/guest_memfd.c | 48 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 94bafd6c558c..f4e218049afa 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -380,6 +380,8 @@ static int kvm_gmem_mmap(struct file *file, struct 
vm_area_struct *vma)
 
 static struct file_operations kvm_gmem_fops = {
        .mmap           = kvm_gmem_mmap,
+       .llseek         = default_llseek,
+       .write_iter     = generic_perform_write,
        .open           = generic_file_open,
        .release        = kvm_gmem_release,
        .fallocate      = kvm_gmem_fallocate,
@@ -390,6 +392,49 @@ void kvm_gmem_init(struct module *module)
        kvm_gmem_fops.owner = module;
 }
 
+static int kvm_kmem_gmem_write_begin(const struct kiocb *kiocb,
+                                    struct address_space *mapping,
+                                    loff_t pos, unsigned int len,
+                                    struct folio **foliop,
+                                    void **fsdata)
+{
+       struct file *file = kiocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       pgoff_t index = pos >> PAGE_SHIFT;
+       struct folio *folio;
+
+       if (!kvm_gmem_supports_mmap(inode))
+               return -ENODEV;
+
+       if (pos + len > i_size_read(inode))
+               return -EINVAL;
+
+       folio = kvm_gmem_get_folio(inode, index);
+       if (IS_ERR(folio))
+               return -EFAULT;
+
+       *foliop = folio;
+       return 0;
+}
+
+static int kvm_kmem_gmem_write_end(const struct kiocb *kiocb,
+                                  struct address_space *mapping,
+                                  loff_t pos, unsigned int len,
+                                  unsigned int copied,
+                                  struct folio *folio, void *fsdata)
+{
+       if (copied && copied < len) {
+               unsigned int from = pos & ((1UL << folio_order(folio)) - 1);
+
+               folio_zero_range(folio, from + copied, len - copied);
+       }
+
+       folio_unlock(folio);
+       folio_put(folio);
+
+       return copied;
+}
+
 static int kvm_gmem_migrate_folio(struct address_space *mapping,
                                  struct folio *dst, struct folio *src,
                                  enum migrate_mode mode)
@@ -442,6 +487,8 @@ static void kvm_gmem_free_folio(struct folio *folio)
 
 static const struct address_space_operations kvm_gmem_aops = {
        .dirty_folio = noop_dirty_folio,
+       .write_begin = kvm_kmem_gmem_write_begin,
+       .write_end = kvm_kmem_gmem_write_end,
        .migrate_folio  = kvm_gmem_migrate_folio,
        .error_remove_folio = kvm_gmem_error_folio,
 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
@@ -489,6 +536,7 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, 
u64 flags)
        }
 
        file->f_flags |= O_LARGEFILE;
+       file->f_mode |= FMODE_LSEEK | FMODE_PWRITE;
 
        inode = file->f_inode;
        WARN_ON(file->f_mapping != inode->i_mapping);
-- 
2.50.1


Reply via email to