RE: [PATCH V5 12/26] hexagon/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
> -Original Message- > From: Anshuman Khandual ... > WARNING: This email originated from outside of Qualcomm. Please be wary > of any links or attachments, and do not enable macros. > > This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports > standard > vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, > which looks > up a private and static protection_map[] array. Subsequently all __SXXX and > __PXXX macros can be dropped which are no longer needed. > > Cc: Brian Cain > Cc: linux-hexa...@vger.kernel.org > Cc: linux-ker...@vger.kernel.org > Signed-off-by: Anshuman Khandual > --- > arch/hexagon/Kconfig | 1 + > arch/hexagon/include/asm/pgtable.h | 27 --- > arch/hexagon/mm/init.c | 42 ++ > 3 files changed, 43 insertions(+), 27 deletions(-) > > diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig > index 54eadf265178..bc4ceecd0588 100644 > --- a/arch/hexagon/Kconfig > +++ b/arch/hexagon/Kconfig > @@ -6,6 +6,7 @@ config HEXAGON > def_bool y > select ARCH_32BIT_OFF_T > select ARCH_HAS_SYNC_DMA_FOR_DEVICE > + select ARCH_HAS_VM_GET_PAGE_PROT > select ARCH_NO_PREEMPT > select DMA_GLOBAL_POOL > # Other pending projects/to-do items. > diff --git a/arch/hexagon/include/asm/pgtable.h > b/arch/hexagon/include/asm/pgtable.h > index 0610724d6a28..f7048c18b6f9 100644 > --- a/arch/hexagon/include/asm/pgtable.h > +++ b/arch/hexagon/include/asm/pgtable.h > @@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att; > */ > #define CACHEDEF (CACHE_DEFAULT << 6) > > -/* Private (copy-on-write) page protections. */ > -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) > -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | > CACHEDEF) > -#define __P010 __P000 /* Write-only copy-on-write */ > -#define __P011 __P001 /* Read/Write copy-on-write */ > -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ > - _PAGE_EXECUTE | CACHEDEF) > -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | > _PAGE_EXECUTE | \ > - _PAGE_READ | CACHEDEF) > -#define __P110 __P100 /* Write/execute copy-on-write */ > -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ > - > -/* Shared page protections. */ > -#define __S000 __P000 > -#define __S001 __P001 > -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ > - _PAGE_WRITE | CACHEDEF) > -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | > \ > - _PAGE_WRITE | CACHEDEF) > -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ > - _PAGE_EXECUTE | CACHEDEF) > -#define __S101 __P101 > -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ > - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) > -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | > \ > - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) > - > extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ > > /* HUGETLB not working currently */ > diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c > index 3167a3b5c97b..146115c9de61 100644 > --- a/arch/hexagon/mm/init.c > +++ b/arch/hexagon/mm/init.c > @@ -234,3 +234,45 @@ void __init setup_arch_memory(void) > * which is called by start_kernel() later on in the process > */ > } > + > +static const pgprot_t protection_map[16] = { > + [VM_NONE] = > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > + CACHEDEF), > + [VM_READ] = > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > + _PAGE_READ > | CACHEDEF), > + [VM_WRITE] = > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > + CACHEDEF), > + [VM_WRITE | VM_READ]= > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > + _PAGE_READ > | CACHEDEF), > + [VM_EXEC] = > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > + > _PAGE_EXECUTE | CACHEDEF), > + [VM_EXEC | VM_READ] = > __pgprot(_PAGE_PRESENT | > _PAGE_USER | > +
RE: [PATCH] kernel: exit: cleanup release_thread()
> -Original Message- > From: Kefeng Wang ... > Only x86 has own release_thread(), introduce a new weak > release_thread() function to clean empty definitions in > other ARCHs. > > Signed-off-by: Kefeng Wang > --- Acked-by: Brian Cain ___ linux-snps-arc mailing list linux-snps-arc@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-snps-arc
RE: [PATCH v3 4/5] arch,locking/atomic: hexagon: add arch_cmpxchg[64]_local
> -Original Message- > From: wuqiang.matt > Sent: Tuesday, November 21, 2023 8:24 AM > To: ubiz...@gmail.com; mark.rutl...@arm.com; vgu...@kernel.org; Brian > Cain ; jo...@southpole.se; > stefan.kristians...@saunalahti.fi; sho...@gmail.com; ch...@zankel.net; > jcmvb...@gmail.com; ge...@linux-m68k.org; andi.sh...@linux.intel.com; > mi...@kernel.org; pal...@rivosinc.com; andrzej.ha...@intel.com; > a...@arndb.de; pet...@infradead.org; mhira...@kernel.org > Cc: linux-a...@vger.kernel.org; linux-snps-arc@lists.infradead.org; linux- > ker...@vger.kernel.org; linux-hexa...@vger.kernel.org; linux- > openr...@vger.kernel.org; linux-trace-ker...@vger.kernel.org; > mat...@163.com; li...@roeck-us.net; wuqiang.matt > ; kernel test robot > Subject: [PATCH v3 4/5] arch,locking/atomic: hexagon: add > arch_cmpxchg[64]_local > > WARNING: This email originated from outside of Qualcomm. Please be wary of > any links or attachments, and do not enable macros. > > hexagonc hasn't arch_cmpxhg_local implemented, which causes > building failures for any references of try_cmpxchg_local, > reported by the kernel test robot. > > This patch implements arch_cmpxchg[64]_local with the native > cmpxchg variant if the corresponding data size is supported, > otherwise generci_cmpxchg[64]_local is to be used. > > Reported-by: kernel test robot > Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4- > l...@intel.com/ > > Signed-off-by: wuqiang.matt > Reviewed-by: Masami Hiramatsu (Google) > --- > arch/hexagon/include/asm/cmpxchg.h | 51 +- > 1 file changed, 50 insertions(+), 1 deletion(-) > > diff --git a/arch/hexagon/include/asm/cmpxchg.h > b/arch/hexagon/include/asm/cmpxchg.h > index bf6cf5579cf4..302fa30f25aa 100644 > --- a/arch/hexagon/include/asm/cmpxchg.h > +++ b/arch/hexagon/include/asm/cmpxchg.h > @@ -8,6 +8,8 @@ > #ifndef _ASM_CMPXCHG_H > #define _ASM_CMPXCHG_H > > +#include > + > /* > * __arch_xchg - atomically exchange a register and a memory location > * @x: value to swap > @@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int > size) > * variable casting. > */ > > -#define arch_cmpxchg(ptr, old, new)\ > +#define __cmpxchg_32(ptr, old, new)\ > ({ \ > __typeof__(ptr) __ptr = (ptr); \ > __typeof__(*(ptr)) __old = (old); \ > __typeof__(*(ptr)) __new = (new); \ > __typeof__(*(ptr)) __oldval = 0;\ > \ > + BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ > + \ > asm volatile( \ > "1: %0 = memw_locked(%1);\n"\ > " { P0 = cmp.eq(%0,%2);\n"\ > @@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size) > __oldval; \ > }) > > +#define __cmpxchg(ptr, old, val, size) \ > +({ \ > + __typeof__(*(ptr)) oldval; \ > + \ > + switch (size) { \ > + case 4: \ > + oldval = __cmpxchg_32(ptr, old, val); \ > + break; \ > + default:\ > + BUILD_BUG();\ > + oldval = val; \ > + break; \ > + } \ > + \ > + oldval; \ > +}) > + > +#define arch_cmpxchg(ptr, o, n)__cmpxchg((ptr), (o), (n), > sizeof(*(ptr))) > + > +/* > + * always make arch_cmpxchg[64]_local available, native cmpxchg > + * will be used if available, then generic_cmpxchg[64]_local > + */ > +#include > + > +#define arch_cmpxchg_local(ptr, old, val) \ > +({