For 16 kB data cache with 4 ways and 32 byte cache lines we have tag LSB
[(128 locations for each way: 7 bits) + byte offset (32 bytes: 5 bits)]
lower than page offset (13 bits for 8 kB pages).
So it is possible for PTAG to change even for single page loop. Hence 
update ARC_REG_*C_PTAG register on each cacheline iteration.

Signed-off-by: Barbaros Tokaoglu <btokao...@airspan.com>
---
  arch/arc/mm/cache.c | 13 ++-----------
  1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 8aa1231..d0875a8 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -251,13 +251,6 @@ void __cache_line_loop_v3(phys_addr_t paddr, 
unsigned long vaddr,
        num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

        /*
-        * MMUv3, cache ops require paddr in PTAG reg
-        * if V-P const for loop, PTAG can be written once outside loop
-        */
-       if (full_page)
-               write_aux_reg(aux_tag, paddr);
-
-       /*
         * This is technically for MMU v4, using the MMU v3 programming model
         * Special work for HS38 aliasing I-cache configuration with PAE40
         *   - upper 8 bits of paddr need to be written into PTAG_HI
@@ -268,10 +261,8 @@ void __cache_line_loop_v3(phys_addr_t paddr, 
unsigned long vaddr,
                write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);

        while (num_lines-- > 0) {
-               if (!full_page) {
-                       write_aux_reg(aux_tag, paddr);
-                       paddr += L1_CACHE_BYTES;
-               }
+               write_aux_reg(aux_tag, paddr);
+               paddr += L1_CACHE_BYTES;

                write_aux_reg(aux_cmd, vaddr);
                vaddr += L1_CACHE_BYTES;
-- 
2.7.4
_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to