Prepare for multithreading of rumpdisk by protecting critical paths from concurrent access.
TESTED to still boot via rump with this change even though multithread is not yet enabled. --- rumpdisk/block-rump.c | 63 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 10 deletions(-) diff --git a/rumpdisk/block-rump.c b/rumpdisk/block-rump.c index 10fa66ad..03731b36 100644 --- a/rumpdisk/block-rump.c +++ b/rumpdisk/block-rump.c @@ -52,6 +52,9 @@ static bool disabled; static mach_port_t master_host; +/* Lock to protect concurrent multithread access */ +pthread_mutex_t rumpdisk_rwlock = PTHREAD_MUTEX_INITIALIZER; + /* One of these is associated with each open instance of a device. */ struct block_data { @@ -183,16 +186,26 @@ rumpdisk_device_close (void *d) struct block_data *bd = d; io_return_t err; + pthread_mutex_lock (&rumpdisk_rwlock); + bd->taken--; if (bd->taken) - return D_SUCCESS; + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return D_SUCCESS; + } err = rump_errno2host (rump_sys_close (bd->rump_fd)); if (err != D_SUCCESS) - return err; + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return err; + } ports_port_deref (bd); ports_destroy_right (bd); + + pthread_mutex_unlock (&rumpdisk_rwlock); return D_SUCCESS; } @@ -233,22 +246,32 @@ rumpdisk_device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_ty bd = search_bd (name); if (bd) { + pthread_mutex_lock (&rumpdisk_rwlock); + bd->taken++; *devp = ports_get_right (bd); *devicePoly = MACH_MSG_TYPE_MAKE_SEND; + + pthread_mutex_unlock (&rumpdisk_rwlock); return D_SUCCESS; } translate_name (dev_name, DISK_NAME_LEN, name); + pthread_mutex_lock(&rumpdisk_rwlock); + fd = rump_sys_open (dev_name, dev_mode_to_rump_mode (mode)); if (fd < 0) - return rump_errno2host (errno); + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return rump_errno2host (errno); + } ret = rump_sys_ioctl (fd, DIOCGMEDIASIZE, &media_size); if (ret < 0) { mach_print ("DIOCGMEDIASIZE ioctl fails\n"); + pthread_mutex_unlock (&rumpdisk_rwlock); return rump_errno2host (errno); } @@ -256,6 +279,7 @@ rumpdisk_device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_ty if (ret < 0) { mach_print ("DIOCGSECTORSIZE ioctl fails\n"); + pthread_mutex_unlock (&rumpdisk_rwlock); return rump_errno2host (errno); } @@ -263,6 +287,7 @@ rumpdisk_device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_ty if (err != 0) { rump_sys_close (fd); + pthread_mutex_unlock (&rumpdisk_rwlock); return err; } @@ -280,6 +305,8 @@ rumpdisk_device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_ty *devp = ports_get_right (bd); *devicePoly = MACH_MSG_TYPE_MAKE_SEND; + + pthread_mutex_unlock (&rumpdisk_rwlock); return D_SUCCESS; } @@ -296,6 +323,8 @@ rumpdisk_device_write (void *d, mach_port_t reply_port, if ((bd->mode & D_WRITE) == 0) return D_INVALID_OPERATION; + pthread_mutex_lock (&rumpdisk_rwlock); + if ((vm_offset_t) data % pagesize) { /* Not aligned, have to copy to aligned buffer. */ @@ -307,7 +336,10 @@ rumpdisk_device_write (void *d, mach_port_t reply_port, /* While at it, make it contiguous */ ret = vm_allocate_contiguous (master_host, mach_task_self (), &buf, &pap, count, 0, 0x100000000ULL, 0); if (ret != KERN_SUCCESS) - return ENOMEM; + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return ENOMEM; + } memcpy ((void*) buf, data, count); @@ -317,7 +349,10 @@ rumpdisk_device_write (void *d, mach_port_t reply_port, vm_deallocate (mach_task_self (), (vm_address_t) buf, count); if (written < 0) - return rump_errno2host (err); + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return rump_errno2host (err); + } } else { @@ -343,7 +378,10 @@ rumpdisk_device_write (void *d, mach_port_t reply_port, done = rump_sys_pwrite (bd->rump_fd, (const void *)data + written, todo, (off_t)bn * bd->block_size + written); if (done < 0) - return rump_errno2host (errno); + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return rump_errno2host (errno); + } written += done; } @@ -352,6 +390,7 @@ rumpdisk_device_write (void *d, mach_port_t reply_port, vm_deallocate (mach_task_self (), (vm_address_t) data, count); *bytes_written = (int)written; + pthread_mutex_unlock (&rumpdisk_rwlock); return D_SUCCESS; } @@ -375,11 +414,16 @@ rumpdisk_device_read (void *d, mach_port_t reply_port, if (count == 0) return D_SUCCESS; + pthread_mutex_lock (&rumpdisk_rwlock); + /* TODO: directly write at *data when it is aligned */ *data = 0; ret = vm_allocate (mach_task_self (), &buf, npages * pagesize, TRUE); if (ret != KERN_SUCCESS) - return ENOMEM; + { + pthread_mutex_unlock (&rumpdisk_rwlock); + return ENOMEM; + } /* Ensure physical allocation by writing a single byte of each */ for (i = 0; i < npages; i++) @@ -400,6 +444,7 @@ rumpdisk_device_read (void *d, mach_port_t reply_port, { err = errno; vm_deallocate (mach_task_self (), buf, npages * pagesize); + pthread_mutex_unlock (&rumpdisk_rwlock); return rump_errno2host (err); } @@ -408,6 +453,7 @@ rumpdisk_device_read (void *d, mach_port_t reply_port, *bytes_read = done; *data = (void*) buf; + pthread_mutex_unlock (&rumpdisk_rwlock); return D_SUCCESS; } @@ -455,9 +501,6 @@ rumpdisk_device_get_status (void *d, dev_flavor_t flavor, dev_status_t status, * Short term strategy: * * Make device_read/write multithreaded. - * Note: this would require an rwlock between device_open/close/read/write, to - * protect against e.g. concurrent open, unexpected close while read/write is - * called, etc. * * Long term strategy: * -- 2.35.1