On 18.09.19 20:02, Richard Henderson wrote:
> Handle bswap on ram directly in load/store_helper. This fixes a
> bug with the previous implementation in that one cannot use the
> I/O path for RAM.
>
> Fixes: a26fc6f5152b47f1
> Signed-off-by: Richard Henderson <[email protected]>
> ---
> include/exec/cpu-all.h | 2 +
> accel/tcg/cputlb.c | 105 +++++++++++++++++++++--------------------
> 2 files changed, 57 insertions(+), 50 deletions(-)
>
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index d2d443c4f9..3928edab9a 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -331,6 +331,8 @@ CPUArchState *cpu_copy(CPUArchState *env);
> #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
> /* Set if TLB entry contains a watchpoint. */
> #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
> +/* Set if TLB entry requires byte swap. */
> +#define TLB_BSWAP (1 << (TARGET_PAGE_BITS - 5))
Shouldn't you also take care of TLB_BSWAP in probe_access?
>
> /* Use this mask to check interception with an alignment mask
> * in a TCG backend.
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index b4a63d3928..7301f9e699 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -737,8 +737,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong
> vaddr,
> address |= TLB_INVALID_MASK;
> }
> if (attrs.byte_swap) {
> - /* Force the access through the I/O slow path. */
> - address |= TLB_MMIO;
> + address |= TLB_BSWAP;
> }
> if (!memory_region_is_ram(section->mr) &&
> !memory_region_is_romd(section->mr)) {
> @@ -901,10 +900,6 @@ static uint64_t io_readx(CPUArchState *env,
> CPUIOTLBEntry *iotlbentry,
> bool locked = false;
> MemTxResult r;
>
> - if (iotlbentry->attrs.byte_swap) {
> - op ^= MO_BSWAP;
> - }
> -
> section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
> mr = section->mr;
> mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> @@ -947,10 +942,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry
> *iotlbentry,
> bool locked = false;
> MemTxResult r;
>
> - if (iotlbentry->attrs.byte_swap) {
> - op ^= MO_BSWAP;
> - }
> -
> section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
> mr = section->mr;
> mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> @@ -1311,7 +1302,8 @@ static inline uint64_t wrap_ldul_le(const void *haddr)
> static inline uint64_t QEMU_ALWAYS_INLINE
> load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
> uintptr_t retaddr, MemOp op, bool code_read,
> - FullLoadHelper *full_load, LoadHelper *direct)
> + FullLoadHelper *full_load, LoadHelper *direct,
> + LoadHelper *direct_swap)
> {
> uintptr_t mmu_idx = get_mmuidx(oi);
> uintptr_t index = tlb_index(env, mmu_idx, addr);
> @@ -1361,17 +1353,22 @@ load_helper(CPUArchState *env, target_ulong addr,
> TCGMemOpIdx oi,
> /* On watchpoint hit, this will longjmp out. */
> cpu_check_watchpoint(env_cpu(env), addr, size,
> iotlbentry->attrs, BP_MEM_READ, retaddr);
> -
> - /* The backing page may or may not require I/O. */
> - tlb_addr &= ~TLB_WATCHPOINT;
> - if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
> - goto do_aligned_access;
> - }
> }
>
> /* Handle I/O access. */
> - return io_readx(env, iotlbentry, mmu_idx, addr,
> - retaddr, access_type, op);
> + if (likely(tlb_addr & TLB_MMIO)) {
> + return io_readx(env, iotlbentry, mmu_idx, addr,
> + retaddr, access_type,
> + op ^ (tlb_addr & TLB_BSWAP ? MO_BSWAP : 0));
> + }
> +
> + haddr = (void *)((uintptr_t)addr + entry->addend);
> +
> + if (unlikely(tlb_addr & TLB_BSWAP)) {
> + return direct_swap(haddr);
> + } else {
> + return direct(haddr);
> + }
You can drop the else statement and just return.
[...]
I assume this patch does not yet result in the issues you are
experiencing, right?
Looks in general sane to me ...
--
Thanks,
David / dhildenb