> -----Original Message-----
> From: Xen-devel <[email protected]> On Behalf Of 
> Oleksandr Tyshchenko
> Sent: 12 January 2021 21:52
> To: [email protected]
> Cc: Oleksandr Tyshchenko <[email protected]>; Paul Durrant 
> <[email protected]>; Jan Beulich
> <[email protected]>; Andrew Cooper <[email protected]>; Roger Pau 
> Monné
> <[email protected]>; Wei Liu <[email protected]>; Julien Grall 
> <[email protected]>; Stefano Stabellini
> <[email protected]>; Julien Grall <[email protected]>
> Subject: [PATCH V4 02/24] x86/ioreq: Add IOREQ_STATUS_* #define-s and update 
> code for moving
> 
> From: Oleksandr Tyshchenko <[email protected]>
> 
> This patch continues to make some preparation to x86/hvm/ioreq.c
> before moving to the common code.
> 
> Add IOREQ_STATUS_* #define-s and update candidates for moving
> since X86EMUL_* shouldn't be exposed to the common code in
> that form.
> 
> This support is going to be used on Arm to be able run device
> emulator outside of Xen hypervisor.
> 
> Signed-off-by: Oleksandr Tyshchenko <[email protected]>

Reviewed-by: Paul Durrant <[email protected]>

> Acked-by: Jan Beulich <[email protected]>
> Reviewed-by: Alex Bennée <[email protected]>
> CC: Julien Grall <[email protected]>
> [On Arm only]
> Tested-by: Wei Chen <[email protected]>
> 
> ---
> Please note, this is a split/cleanup/hardening of Julien's PoC:
> "Add support for Guest IO forwarding to a device emulator"
> 
> Changes V2 -> V3:
>  - new patch, was split from
>    [PATCH V2 01/23] x86/ioreq: Prepare IOREQ feature for making it common
> 
> Changes V3 -> V4:
>  - add Alex's R-b and Jan's A-b
>  - add a comment above IOREQ_STATUS_* #define-s
> ---
>  xen/arch/x86/hvm/ioreq.c        | 16 ++++++++--------
>  xen/include/asm-x86/hvm/ioreq.h |  5 +++++
>  2 files changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index 468fe84..ff9a546 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -1405,7 +1405,7 @@ static int hvm_send_buffered_ioreq(struct 
> hvm_ioreq_server *s, ioreq_t *p)
>      pg = iorp->va;
> 
>      if ( !pg )
> -        return X86EMUL_UNHANDLEABLE;
> +        return IOREQ_STATUS_UNHANDLED;
> 
>      /*
>       * Return 0 for the cases we can't deal with:
> @@ -1435,7 +1435,7 @@ static int hvm_send_buffered_ioreq(struct 
> hvm_ioreq_server *s, ioreq_t *p)
>          break;
>      default:
>          gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
> -        return X86EMUL_UNHANDLEABLE;
> +        return IOREQ_STATUS_UNHANDLED;
>      }
> 
>      spin_lock(&s->bufioreq_lock);
> @@ -1445,7 +1445,7 @@ static int hvm_send_buffered_ioreq(struct 
> hvm_ioreq_server *s, ioreq_t *p)
>      {
>          /* The queue is full: send the iopacket through the normal path. */
>          spin_unlock(&s->bufioreq_lock);
> -        return X86EMUL_UNHANDLEABLE;
> +        return IOREQ_STATUS_UNHANDLED;
>      }
> 
>      pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
> @@ -1476,7 +1476,7 @@ static int hvm_send_buffered_ioreq(struct 
> hvm_ioreq_server *s, ioreq_t *p)
>      notify_via_xen_event_channel(d, s->bufioreq_evtchn);
>      spin_unlock(&s->bufioreq_lock);
> 
> -    return X86EMUL_OKAY;
> +    return IOREQ_STATUS_HANDLED;
>  }
> 
>  int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
> @@ -1492,7 +1492,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t 
> *proto_p,
>          return hvm_send_buffered_ioreq(s, proto_p);
> 
>      if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
> -        return X86EMUL_RETRY;
> +        return IOREQ_STATUS_RETRY;
> 
>      list_for_each_entry ( sv,
>                            &s->ioreq_vcpu_list,
> @@ -1532,11 +1532,11 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, 
> ioreq_t *proto_p,
>              notify_via_xen_event_channel(d, port);
> 
>              sv->pending = true;
> -            return X86EMUL_RETRY;
> +            return IOREQ_STATUS_RETRY;
>          }
>      }
> 
> -    return X86EMUL_UNHANDLEABLE;
> +    return IOREQ_STATUS_UNHANDLED;
>  }
> 
>  unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
> @@ -1550,7 +1550,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool 
> buffered)
>          if ( !s->enabled )
>              continue;
> 
> -        if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
> +        if ( hvm_send_ioreq(s, p, buffered) == IOREQ_STATUS_UNHANDLED )
>              failed++;
>      }
> 
> diff --git a/xen/include/asm-x86/hvm/ioreq.h b/xen/include/asm-x86/hvm/ioreq.h
> index 13d35e1..f140ef4 100644
> --- a/xen/include/asm-x86/hvm/ioreq.h
> +++ b/xen/include/asm-x86/hvm/ioreq.h
> @@ -77,6 +77,11 @@ bool arch_ioreq_server_get_type_addr(const struct domain 
> *d,
>                                       uint64_t *addr);
>  void arch_ioreq_domain_init(struct domain *d);
> 
> +/* This correlation must not be altered */
> +#define IOREQ_STATUS_HANDLED     X86EMUL_OKAY
> +#define IOREQ_STATUS_UNHANDLED   X86EMUL_UNHANDLEABLE
> +#define IOREQ_STATUS_RETRY       X86EMUL_RETRY
> +
>  #endif /* __ASM_X86_HVM_IOREQ_H__ */
> 
>  /*
> --
> 2.7.4
> 



Reply via email to