On 3/4/19 7:13 AM, Mateja Marjanovic wrote: > + } else if (rt == rd) { > + TCGv_i64 t0 = tcg_temp_new(); > + TCGv_i64 t1 = tcg_temp_new(); > + uint64_t mask0 = (1ULL << 32) - 1; > + uint64_t mask1 = mask0 << 32; > + > + tcg_gen_andi_i64(t0, cpu_gpr[rt], mask1); > + tcg_gen_shri_i64(t0, t0, 32); > + tcg_gen_andi_i64(t1, cpu_mmr[rt], mask0); > + tcg_gen_shli_i64(t1, t1, 32); > + > + tcg_gen_andi_i64(cpu_mmr[rd], cpu_mmr[rd], mask1); > + tcg_gen_or_i64(cpu_mmr[rd], cpu_mmr[rd], t0); > + > + tcg_gen_andi_i64(cpu_gpr[rd], cpu_gpr[rd], mask0); > + tcg_gen_or_i64(cpu_gpr[rd], cpu_gpr[rd], t1); > + > + tcg_temp_free(t0); > + tcg_temp_free(t1); > + } else { > + TCGv_i64 t0 = tcg_temp_new(); > + TCGv_i64 t1 = tcg_temp_new(); > + uint64_t mask0 = (1ULL << 32) - 1; > + uint64_t mask1 = mask0 << 32; > + > + tcg_gen_andi_i64(t0, cpu_mmr[rt], mask1); > + tcg_gen_andi_i64(t1, cpu_gpr[rt], mask1); > + tcg_gen_shri_i64(t1, t1, 32); > + tcg_gen_or_i64(cpu_mmr[rd], t0, t1); > + > + tcg_gen_andi_i64(t0, cpu_mmr[rt], mask0); > + tcg_gen_shli_i64(t0, t0, 32); > + tcg_gen_andi_i64(t1, cpu_gpr[rt], mask0); > + tcg_gen_or_i64(cpu_gpr[rd], t0, t1); > + > + tcg_temp_free(t0); > + tcg_temp_free(t1); > + }
Again, both of these cases can be handled much simpler: tcg_gen_shri_i64(t0, cpu_gpr[rt], 32); tcg_gen_deposit_i64(cpu_gpr[rd], cpu_gpr[rt], cpu_mmr[rt], 32, 32); tcg_gen_deposit_i64(cpu_mmu[rd], cpu_mmr[rt], t0, 0, 32); r~