On Wed, Apr 14, 2021 at 5:35 PM Bin Meng <bmeng...@gmail.com> wrote: > > Hi Alistair, > > On Tue, Apr 13, 2021 at 10:42 AM Alistair Francis > <alistair.fran...@wdc.com> wrote: > > > > From: Hou Weiying <weiying_...@outlook.com> > > > > This commit adds support for ePMP v0.9.1. > > > > The ePMP spec can be found in: > > https://docs.google.com/document/d/1Mh_aiHYxemL0umN3GTTw8vsbmzHZ_nxZXgjgOUzbvc8 > > > > Signed-off-by: Hongzheng-Li <ethan.lee....@gmail.com> > > Signed-off-by: Hou Weiying <weiying_...@outlook.com> > > Signed-off-by: Myriad-Dreamin <camiy...@gmail.com> > > Message-Id: > > <sg2pr02mb263462ccdbcbbad36983c2cd93...@sg2pr02mb2634.apcprd02.prod.outlook.com> > > [ Changes by AF: > > - Rebase on master > > - Update to latest spec > > - Use a switch case to handle ePMP MML permissions > > - Fix a few bugs > > ] > > Signed-off-by: Alistair Francis <alistair.fran...@wdc.com> > > --- > > target/riscv/pmp.c | 164 +++++++++++++++++++++++++++++++++++++++++---- > > 1 file changed, 152 insertions(+), 12 deletions(-) > > > > diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c > > index e35988eec2..00f91d074f 100644 > > --- a/target/riscv/pmp.c > > +++ b/target/riscv/pmp.c > > @@ -90,11 +90,42 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, > > uint32_t pmp_index) > > static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t > > val) > > { > > if (pmp_index < MAX_RISCV_PMPS) { > > - if (!pmp_is_locked(env, pmp_index)) { > > - env->pmp_state.pmp[pmp_index].cfg_reg = val; > > - pmp_update_rule(env, pmp_index); > > + bool locked = true; > > + > > + if (riscv_feature(env, RISCV_FEATURE_EPMP)) { > > + /* mseccfg.RLB is set */ > > + if (MSECCFG_RLB_ISSET(env)) { > > + locked = false; > > + } > > + > > + /* mseccfg.MML is not set */ > > + if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) > > { > > + locked = false; > > + } > > + > > + /* mseccfg.MML is set */ > > + if (MSECCFG_MML_ISSET(env)) { > > + /* not adding execute bit */ > > + if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) > > { > > + locked = false; > > + } > > + /* shared region and not adding X bit */ > > + if ((val & PMP_LOCK) != PMP_LOCK && > > + (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { > > + locked = false; > > + } > > + } > > } else { > > + if (!pmp_is_locked(env, pmp_index)) { > > + locked = false; > > + } > > + } > > + > > + if (locked) { > > qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - > > locked\n"); > > + } else { > > + env->pmp_state.pmp[pmp_index].cfg_reg = val; > > + pmp_update_rule(env, pmp_index); > > } > > } else { > > qemu_log_mask(LOG_GUEST_ERROR, > > @@ -217,6 +248,32 @@ static bool pmp_hart_has_privs_default(CPURISCVState > > *env, target_ulong addr, > > { > > bool ret; > > > > + if (riscv_feature(env, RISCV_FEATURE_EPMP)) { > > + if (MSECCFG_MMWP_ISSET(env)) { > > + /* > > + * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set > > + * so we default to deny all, even for M-mode. > > + */ > > + *allowed_privs = 0; > > + return false; > > + } else if (MSECCFG_MML_ISSET(env)) { > > + /* > > + * The Machine Mode Lockdown (mseccfg.MML) bit is set > > + * so we can only execute code in M-mode with an applicable > > + * rule. Other modes are disabled. > > + */ > > + if (mode == PRV_M && !(privs & PMP_EXEC)) { > > + ret = true; > > + *allowed_privs = PMP_READ | PMP_WRITE; > > + } else { > > + ret = false; > > + *allowed_privs = 0; > > + } > > + > > + return ret; > > + } > > + } > > + > > if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) { > > /* > > * Privileged spec v1.10 states if HW doesn't implement any PMP > > entry > > @@ -294,13 +351,94 @@ bool pmp_hart_has_privs(CPURISCVState *env, > > target_ulong addr, > > pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); > > > > /* > > - * If the PMP entry is not off and the address is in range, do the > > priv > > - * check > > + * Convert the PMP permissions to match the truth table in the > > + * ePMP spec. > > */ > > + const uint8_t epmp_operation = > > + ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | > > + ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | > > + (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | > > + ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); > > + > > if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { > > - *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; > > - if ((mode != PRV_M) || pmp_is_locked(env, i)) { > > - *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; > > + /* > > + * If the PMP entry is not off and the address is in range, > > + * do the priv check > > + */ > > + if (!MSECCFG_MML_ISSET(env)) { > > + /* > > + * If mseccfg.MML Bit is not set, do pmp priv check > > + * This will always apply to regular PMP. > > + */ > > + *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; > > + if ((mode != PRV_M) || pmp_is_locked(env, i)) { > > + *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; > > + } > > + } else { > > + /* > > + * If mseccfg.MML Bit set, do the enhanced pmp priv check > > + */ > > + if (mode == PRV_M) { > > + switch (epmp_operation) { > > + case 0: > > + case 1: > > + case 4: > > + case 5: > > + case 6: > > + case 7: > > + case 8: > > + *allowed_privs = 0; > > + break; > > + case 2: > > + case 3: > > + case 14: > > + *allowed_privs = PMP_READ | PMP_WRITE; > > + break; > > + case 9: > > + case 10: > > + *allowed_privs = PMP_EXEC; > > + break; > > + case 11: > > + case 13: > > + *allowed_privs = PMP_READ | PMP_EXEC; > > + break; > > + case 12: > > + case 15: > > + *allowed_privs = PMP_READ; > > + break; > > + } > > + } else { > > + switch (epmp_operation) { > > + case 0: > > + case 8: > > + case 9: > > + case 12: > > + case 13: > > + case 14: > > + *allowed_privs = 0; > > + break; > > + case 1: > > + case 10: > > + case 11: > > + *allowed_privs = PMP_EXEC; > > + break; > > + case 2: > > + case 4: > > + case 15: > > + *allowed_privs = PMP_READ; > > + break; > > + case 3: > > + case 6: > > + *allowed_privs = PMP_READ | PMP_WRITE; > > + break; > > + case 5: > > + *allowed_privs = PMP_READ | PMP_EXEC; > > + break; > > + case 7: > > + *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; > > + break; > > + } > > + } > > } > > > > ret = ((privs & *allowed_privs) == privs); > > @@ -328,10 +466,12 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t > > reg_index, > > > > trace_pmpcfg_csr_write(env->mhartid, reg_index, val); > > > > - if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { > > - qemu_log_mask(LOG_GUEST_ERROR, > > - "ignoring pmpcfg write - incorrect address\n"); > > - return; > > + if (!riscv_feature(env, RISCV_FEATURE_EPMP) || > > !MSECCFG_RLB_ISSET(env)) { > > + if ((reg_index & 1) && (sizeof(target_ulong) == 8)) { > > + qemu_log_mask(LOG_GUEST_ERROR, > > + "ignoring pmpcfg write - PMP config is > > locked\n"); > > I think the original log message was for checking register address for > RV64, and should still retain. We should add another branch with a > different log message for the new ePMP check.
This whole change is actually wrong. I have removed this entire chunk. Alistair > > > + return; > > + } > > Regards, > Bin