+CC Arnd

On 4/26/21 3:08 AM, Vladimir Isaev wrote:
> 32-bit PAGE_MASK can not be used as a mask for physical addresses
> when PAE is enabled. PHYSICAL_PAGE_MASK must be used for physical
> addresses instead of PAGE_MASK.

Can you provide a bit more context : like w/o this exit/munmap on 5.x 
kernels was crashing - with the actual stack trace.


> Signed-off-by: Vladimir Isaev <is...@synopsys.com>

This also needs to be CC <stable>

> ---
>   arch/arc/include/asm/pgtable.h   | 12 +++---------
>   arch/arc/include/uapi/asm/page.h |  7 +++++++
>   arch/arc/mm/ioremap.c            |  4 ++--
>   arch/arc/mm/tlb.c                |  2 +-
>   4 files changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
> index 163641726a2b..25c95fbc7021 100644
> --- a/arch/arc/include/asm/pgtable.h
> +++ b/arch/arc/include/asm/pgtable.h
> @@ -107,8 +107,8 @@
>   #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
>   
>   /* Set of bits not changed in pte_modify */
> -#define _PAGE_CHG_MASK       (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | 
> _PAGE_SPECIAL)
> -
> +#define _PAGE_CHG_MASK       (PHYSICAL_PAGE_MASK | _PAGE_ACCESSED | 
> _PAGE_DIRTY | \
> +                                                            _PAGE_SPECIAL)

Bike shed: Can we call this PAGE_MASK_PHYS

>   /* More Abbrevaited helpers */
>   #define PAGE_U_NONE     __pgprot(___DEF)
>   #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
> @@ -132,13 +132,7 @@
>   #define PTE_BITS_IN_PD0             (_PAGE_GLOBAL | _PAGE_PRESENT | 
> _PAGE_HW_SZ)
>   #define PTE_BITS_RWX                (_PAGE_EXECUTE | _PAGE_WRITE | 
> _PAGE_READ)
>   
> -#ifdef CONFIG_ARC_HAS_PAE40
> -#define PTE_BITS_NON_RWX_IN_PD1      (0xff00000000 | PAGE_MASK | 
> _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 40
> -#else
> -#define PTE_BITS_NON_RWX_IN_PD1      (PAGE_MASK | _PAGE_CACHEABLE)
> -#define MAX_POSSIBLE_PHYSMEM_BITS 32
> -#endif
> +#define PTE_BITS_NON_RWX_IN_PD1      (PHYSICAL_PAGE_MASK | _PAGE_CACHEABLE)
>   
>   /**************************************************************************
>    * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
> diff --git a/arch/arc/include/uapi/asm/page.h 
> b/arch/arc/include/uapi/asm/page.h
> index 2a97e2718a21..8fecf2a2b592 100644
> --- a/arch/arc/include/uapi/asm/page.h
> +++ b/arch/arc/include/uapi/asm/page.h
> @@ -33,5 +33,12 @@
>   
>   #define PAGE_MASK   (~(PAGE_SIZE-1))
>   
> +#ifdef CONFIG_ARC_HAS_PAE40
> +#define MAX_POSSIBLE_PHYSMEM_BITS 40
> +#define PHYSICAL_PAGE_MASK   (0xff00000000ull | PAGE_MASK)
> +#else
> +#define MAX_POSSIBLE_PHYSMEM_BITS 32
> +#define PHYSICAL_PAGE_MASK   PAGE_MASK
> +#endif

Not a good idea as you already saw the kernel built bot complaining. 
Granted we have the old PAGE_SIZE cruft there, but that's not the 
precedent for adding more.

>   
>   #endif /* _UAPI__ASM_ARC_PAGE_H */
> diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
> index fac4adc90204..eb109d57d544 100644
> --- a/arch/arc/mm/ioremap.c
> +++ b/arch/arc/mm/ioremap.c
> @@ -71,8 +71,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long 
> size,
>       prot = pgprot_noncached(prot);
>   
>       /* Mappings have to be page-aligned */
> -     off = paddr & ~PAGE_MASK;

This is offset *within* a page so upper bits must not matter. In fact, 
with this a bogus offset like 0xFF_FFFFFFFF can turn into something 
weird such as 0xFF_00000000

> -     paddr &= PAGE_MASK;
> +     off = paddr & ~PHYSICAL_PAGE_MASK;
> +     paddr &= PHYSICAL_PAGE_MASK;

This change is OK but feels weird nonetheless. ioremap is intended for 
actual IO regions and not just making making normal pages uncached. I 
know you tried the devmem trick to do this but I don't think that is a 
"production" way to render uncached pages in PAE region.

>       size = PAGE_ALIGN(end + 1) - paddr;
>   
>       /*
> diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
> index 9bb3c24f3677..15a3b92e9e72 100644
> --- a/arch/arc/mm/tlb.c
> +++ b/arch/arc/mm/tlb.c
> @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, 
> unsigned long vaddr_unaligned,
>                     pte_t *ptep)
>   {
>       unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
> -     phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
> +     phys_addr_t paddr = pte_val(*ptep) & PHYSICAL_PAGE_MASK;
>       struct page *page = pfn_to_page(pte_pfn(*ptep));
>   
>       create_tlb(vma, vaddr, ptep);

_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to