From: Nicholas Piggin <npig...@gmail.com> xive2 must take into account redistribution of group interrupts if the VP directed priority exceeds the group interrupt priority after this operation. The xive1 code is not group aware so implement this for xive2.
Signed-off-by: Nicholas Piggin <npig...@gmail.com> Reviewed-by: Glenn Miles <mil...@linux.ibm.com> Reviewed-by: Michael Kowal <ko...@linux.ibm.com> Tested-by: Gautam Menghani <gau...@linux.ibm.com> Link: https://lore.kernel.org/qemu-devel/20250512031100.439842-47-npig...@gmail.com Signed-off-by: Cédric Le Goater <c...@redhat.com> --- include/hw/ppc/xive2.h | 2 ++ hw/intc/xive.c | 2 ++ hw/intc/xive2.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h index c1ab06a55adf..45266c2a8b9e 100644 --- a/include/hw/ppc/xive2.h +++ b/include/hw/ppc/xive2.h @@ -130,6 +130,8 @@ void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, diff --git a/hw/intc/xive.c b/hw/intc/xive.c index e7f77be2f711..25cb3877cb15 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -747,6 +747,8 @@ static const XiveTmOp xive2_tm_operations[] = { /* MMIOs above 2K : special operations with side effects */ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive2_tm_set_os_pending, NULL }, { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false, NULL, xive2_tm_pull_os_ctx }, { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index 23eb85bb8669..f9eaea119289 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -1323,6 +1323,34 @@ void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + uint8_t priority = value & 0xff; + + /* + * XXX: should this simply set a bit in IPB and wait for it to be picked + * up next cycle, or is it supposed to present it now? We implement the + * latter here. + */ + regs[TM_IPB] |= xive_priority_to_ipb(priority); + if (xive_ipb_to_pipr(regs[TM_IPB]) >= regs[TM_PIPR]) { + return; + } + if (xive_nsr_indicates_group_exception(ring, regs[TM_NSR])) { + xive2_redistribute(xrtr, tctx, ring); + } + + xive_tctx_pipr_present(tctx, ring, priority, 0); +} + static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) { uint8_t *regs = &tctx->regs[ring]; -- 2.50.1