replace mainly useless exit(1) on fatal error path with abort(), so that it would be possible to generate core dump, that could be used to analyse cause of problem.
Signed-off-by: Igor Mammedov <[email protected]> --- hw/virtio/virtio.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 7ed06ea..9d3ac72 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -315,7 +315,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) if (num_heads > vq->vring.num) { error_report("Guest moved used index from %u to %u", idx, vq->shadow_avail_idx); - exit(1); + abort(); } /* On success, callers read a descriptor at vq->last_avail_idx. * Make sure descriptor read does not bypass avail index read. */ @@ -337,7 +337,7 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) { error_report("Guest says index %u is available", head); - exit(1); + abort(); } return head; @@ -360,7 +360,7 @@ static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, if (next >= max) { error_report("Desc next is %u", next); - exit(1); + abort(); } vring_desc_read(vdev, desc, desc_pa, next); @@ -393,13 +393,13 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, if (desc.flags & VRING_DESC_F_INDIRECT) { if (desc.len % sizeof(VRingDesc)) { error_report("Invalid size for indirect buffer table"); - exit(1); + abort(); } /* If we've got too many, that implies a descriptor loop. */ if (num_bufs >= max) { error_report("Looped descriptor"); - exit(1); + abort(); } /* loop over the indirect descriptor table */ @@ -414,7 +414,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, /* If we've got too many, that implies a descriptor loop. */ if (++num_bufs > max) { error_report("Looped descriptor"); - exit(1); + abort(); } if (desc.flags & VRING_DESC_F_WRITE) { @@ -462,7 +462,7 @@ static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iove if (num_sg == max_num_sg) { error_report("virtio: too many write descriptors in indirect table"); - exit(1); + abort(); } iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write); @@ -500,11 +500,11 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); if (!sg[i].iov_base) { error_report("virtio: error trying to map MMIO memory"); - exit(1); + abort(); } if (len != sg[i].iov_len) { error_report("virtio: unexpected memory split"); - exit(1); + abort(); } } } @@ -570,7 +570,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) if (desc.flags & VRING_DESC_F_INDIRECT) { if (desc.len % sizeof(VRingDesc)) { error_report("Invalid size for indirect buffer table"); - exit(1); + abort(); } /* loop over the indirect descriptor table */ @@ -588,7 +588,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) } else { if (in_num) { error_report("Incorrect order for descriptors"); - exit(1); + abort(); } virtqueue_map_desc(&out_num, addr, iov, VIRTQUEUE_MAX_SIZE, false, desc.addr, desc.len); @@ -597,7 +597,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz) /* If we've got too many, that implies a descriptor loop. */ if ((in_num + out_num) > max) { error_report("Looped descriptor"); - exit(1); + abort(); } } while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max); -- 1.8.3.1
