On Thursday, March 5, 2020 11:29:41 AM -03 David Hildenbrand wrote: > Implement resizeable mmaps. For now, the actual resizing is not wired up. > Introduce qemu_ram_mmap_resizeable() and qemu_ram_mmap_resize(). Make > qemu_ram_mmap() a wrapper of qemu_ram_mmap_resizeable(). > > Reviewed-by: Peter Xu <[email protected]> > Cc: Richard Henderson <[email protected]> > Cc: Igor Kotrasinski <[email protected]> > Cc: Murilo Opsfelder Araujo <[email protected]> > Cc: "Michael S. Tsirkin" <[email protected]> > Cc: Greg Kurz <[email protected]> > Cc: Eduardo Habkost <[email protected]> > Cc: "Dr. David Alan Gilbert" <[email protected]> > Cc: Igor Mammedov <[email protected]> > Signed-off-by: David Hildenbrand <[email protected]> > ---
Acked-by: Murilo Opsfelder Araujo <[email protected]> > include/qemu/mmap-alloc.h | 21 +++++++++++-------- > util/mmap-alloc.c | 43 ++++++++++++++++++++++++++++----------- > 2 files changed, 44 insertions(+), 20 deletions(-) > > diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h > index e786266b92..ca8f7edf70 100644 > --- a/include/qemu/mmap-alloc.h > +++ b/include/qemu/mmap-alloc.h > @@ -7,11 +7,13 @@ size_t qemu_fd_getpagesize(int fd); > size_t qemu_mempath_getpagesize(const char *mem_path); > > /** > - * qemu_ram_mmap: mmap the specified file or device. > + * qemu_ram_mmap_resizeable: reserve a memory region of @max_size to mmap > the + * specified file or device and mmap @size > of it. * > * Parameters: > * @fd: the file or the device to mmap > * @size: the number of bytes to be mmaped > + * @max_size: the number of bytes to be reserved > * @align: if not zero, specify the alignment of the starting mapping > address; * otherwise, the alignment in use will be determined by > QEMU. * @shared: map has RAM_SHARED flag. > @@ -21,12 +23,15 @@ size_t qemu_mempath_getpagesize(const char *mem_path); > * On success, return a pointer to the mapped area. > * On failure, return MAP_FAILED. > */ > -void *qemu_ram_mmap(int fd, > - size_t size, > - size_t align, > - bool shared, > - bool is_pmem); > - > -void qemu_ram_munmap(int fd, void *ptr, size_t size); > +void *qemu_ram_mmap_resizeable(int fd, size_t size, size_t max_size, > + size_t align, bool shared, bool is_pmem); > +bool qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t > new_size, + bool shared, bool is_pmem); > +static inline void *qemu_ram_mmap(int fd, size_t size, size_t align, > + bool shared, bool is_pmem) > +{ > + return qemu_ram_mmap_resizeable(fd, size, size, align, shared, > is_pmem); +} > +void qemu_ram_munmap(int fd, void *ptr, size_t max_size); > > #endif > diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c > index 2767caa33b..7ed85f16d3 100644 > --- a/util/mmap-alloc.c > +++ b/util/mmap-alloc.c > @@ -174,11 +174,8 @@ static inline size_t mmap_guard_pagesize(int fd) > #endif > } > > -void *qemu_ram_mmap(int fd, > - size_t size, > - size_t align, > - bool shared, > - bool is_pmem) > +void *qemu_ram_mmap_resizeable(int fd, size_t size, size_t max_size, > + size_t align, bool shared, bool is_pmem) > { > const size_t guard_pagesize = mmap_guard_pagesize(fd); > const size_t pagesize = qemu_fd_getpagesize(fd); > @@ -186,12 +183,14 @@ void *qemu_ram_mmap(int fd, > void *ptr, *guardptr; > > g_assert(QEMU_IS_ALIGNED(size, pagesize)); > + g_assert(QEMU_IS_ALIGNED(max_size, pagesize)); > > /* > * Note: this always allocates at least one extra page of virtual > address - * space, even if size is already aligned. > + * space, even if the size is already aligned. We will reserve an area > of + * at least max_size, but only activate the requested part of it. > */ > - total = size + align; > + total = max_size + align; > > guardptr = mmap_reserve(NULL, total, fd); > if (guardptr == MAP_FAILED) { > @@ -219,21 +218,41 @@ void *qemu_ram_mmap(int fd, > * a guard page guarding against potential buffer overflows. > */ > total -= offset; > - if (total > size + guard_pagesize) { > - munmap(ptr + size + guard_pagesize, total - size - guard_pagesize); > + if (total > max_size + guard_pagesize) { > + munmap(ptr + max_size + guard_pagesize, > + total - max_size - guard_pagesize); > } > > return ptr; > } > > -void qemu_ram_munmap(int fd, void *ptr, size_t size) > +bool qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t > new_size, + bool shared, bool is_pmem) > { > const size_t pagesize = qemu_fd_getpagesize(fd); > > - g_assert(QEMU_IS_ALIGNED(size, pagesize)); > + g_assert(QEMU_IS_ALIGNED(old_size, pagesize)); > + g_assert(QEMU_IS_ALIGNED(new_size, pagesize)); > + > + if (old_size < new_size) { > + /* activate the missing piece in the reserved area */ > + ptr = mmap_activate(ptr + old_size, new_size - old_size, fd, > old_size, + shared, is_pmem); > + } else if (old_size > new_size) { > + /* discard this piece, marking it reserved */ > + ptr = mmap_reserve(ptr + new_size, old_size - new_size, fd); > + } > + return ptr != MAP_FAILED; > +} > + > +void qemu_ram_munmap(int fd, void *ptr, size_t max_size) > +{ > + const size_t pagesize = qemu_fd_getpagesize(fd); > + > + g_assert(QEMU_IS_ALIGNED(max_size, pagesize)); > > if (ptr) { > /* Unmap both the RAM block and the guard page */ > - munmap(ptr, size + mmap_guard_pagesize(fd)); > + munmap(ptr, max_size + mmap_guard_pagesize(fd)); > } > } -- Murilo
