On Thu, 26 Jun 2025 at 08:47, Cédric Le Goater <[email protected]> wrote:
>
> From: John Levon <[email protected]>
>
> Add the basic implementation for receiving vfio-user messages from the
> control socket.
>
Hi; Coverity suggests there are some issues with this code
(CID 1611807, 1611808, 1611809):
> +/*
> + * Receive and process one incoming message.
> + *
> + * For replies, find matching outgoing request and wake any waiters.
> + * For requests, queue in incoming list and run request BH.
> + */
> +static int vfio_user_recv_one(VFIOUserProxy *proxy, Error **errp)
> +{
> + /*
> + * For replies, find the matching pending request.
> + * For requests, reap incoming FDs.
> + */
> + if (isreply) {
> + QTAILQ_FOREACH(msg, &proxy->pending, next) {
> + if (hdr.id == msg->id) {
> + break;
> + }
> + }
> + if (msg == NULL) {
> + error_setg(errp, "unexpected reply");
> + goto err;
> + }
> + QTAILQ_REMOVE(&proxy->pending, msg, next);
> +
> + /*
> + * Process any received FDs
> + */
> + if (numfds != 0) {
> + if (msg->fds == NULL || msg->fds->recv_fds < numfds) {
> + error_setg(errp, "unexpected FDs");
> + goto err;
> + }
> + msg->fds->recv_fds = numfds;
> + memcpy(msg->fds->fds, fdp, numfds * sizeof(int));
> + }
> + } else {
> + if (numfds != 0) {
> + reqfds = vfio_user_getfds(numfds);
> + memcpy(reqfds->fds, fdp, numfds * sizeof(int));
> + } else {
> + reqfds = NULL;
> + }
Here we allocate memory into reqfds...
> + }
> +
> + /*
> + * Put the whole message into a single buffer.
> + */
> + if (isreply) {
> + if (hdr.size > msg->rsize) {
> + error_setg(errp, "reply larger than recv buffer");
> + goto err;
> + }
> + *msg->hdr = hdr;
> + data = (char *)msg->hdr + sizeof(hdr);
> + } else {
> + buf = g_malloc0(hdr.size);
> + memcpy(buf, &hdr, sizeof(hdr));
> + data = buf + sizeof(hdr);
> + msg = vfio_user_getmsg(proxy, (VFIOUserHdr *)buf, reqfds);
> + msg->type = VFIO_MSG_REQ;
...and here we allocate memory into msg...
> + }
> +
> + /*
> + * Read rest of message.
> + */
> + msgleft = hdr.size - sizeof(hdr);
> + while (msgleft > 0) {
> + ret = qio_channel_read(proxy->ioc, data, msgleft, errp);
> +
> + /* prepare to complete read on next iternation */
> + if (ret == QIO_CHANNEL_ERR_BLOCK) {
> + proxy->part_recv = msg;
> + proxy->recv_left = msgleft;
> + return ret;
> + }
> +
> + if (ret <= 0) {
> + goto fatal;
> + }
...but here we may take an error-exit codepath to the 'fatal'
label...
> + trace_vfio_user_recv_read(hdr.id, ret);
> +
> + msgleft -= ret;
> + data += ret;
> + }
> +
> + vfio_user_process(proxy, msg, isreply);
> + return 0;
> +
> + /*
> + * fatal means the other side closed or we don't trust the stream
> + * err means this message is corrupt
> + */
> +fatal:
> + vfio_user_shutdown(proxy);
> + proxy->state = VFIO_PROXY_ERROR;
> +
> + /* set error if server side closed */
> + if (ret == 0) {
> + error_setg(errp, "server closed socket");
> + }
> +
> +err:
> + for (i = 0; i < numfds; i++) {
> + close(fdp[i]);
> + }
> + if (isreply && msg != NULL) {
> + /* force an error to keep sending thread from hanging */
> + vfio_user_set_error(msg->hdr, EINVAL);
> + msg->complete = true;
> + qemu_cond_signal(&msg->cv);
> + }
> + return -1;
...and in this error handling codepath we don't do anything
to free either msg or reqfds.
Coverity also wonders if you have missing locking because
the call to qemu_cond_signal() here is the only place
that touches msg->cv without holding a lock. But this one's
a heuristic that may be wrong.
thanks
-- PMM