Hi Balbir, Just one nit below :)
On Wed, Oct 1, 2025 at 3:43 PM Balbir Singh <[email protected]> wrote: > > Add routines to support allocation of large order zone device folios > and helper functions for zone device folios, to check if a folio is > device private and helpers for setting zone device data. > > When large folios are used, the existing page_free() callback in > pgmap is called when the folio is freed, this is true for both > PAGE_SIZE and higher order pages. > > Zone device private large folios do not support deferred split and > scan like normal THP folios. > > Signed-off-by: Balbir Singh <[email protected]> > Cc: David Hildenbrand <[email protected]> > Cc: Zi Yan <[email protected]> > Cc: Joshua Hahn <[email protected]> > Cc: Rakie Kim <[email protected]> > Cc: Byungchul Park <[email protected]> > Cc: Gregory Price <[email protected]> > Cc: Ying Huang <[email protected]> > Cc: Alistair Popple <[email protected]> > Cc: Oscar Salvador <[email protected]> > Cc: Lorenzo Stoakes <[email protected]> > Cc: Baolin Wang <[email protected]> > Cc: "Liam R. Howlett" <[email protected]> > Cc: Nico Pache <[email protected]> > Cc: Ryan Roberts <[email protected]> > Cc: Dev Jain <[email protected]> > Cc: Barry Song <[email protected]> > Cc: Lyude Paul <[email protected]> > Cc: Danilo Krummrich <[email protected]> > Cc: David Airlie <[email protected]> > Cc: Simona Vetter <[email protected]> > Cc: Ralph Campbell <[email protected]> > Cc: Mika Penttilä <[email protected]> > Cc: Matthew Brost <[email protected]> > Cc: Francois Dugast <[email protected]> > Cc: Madhavan Srinivasan <[email protected]> > Cc: Christophe Leroy <[email protected]> > Cc: Felix Kuehling <[email protected]> > Cc: Alex Deucher <[email protected]> > Cc: "Christian König" <[email protected]> > Cc: Andrew Morton <[email protected]> > --- > arch/powerpc/kvm/book3s_hv_uvmem.c | 2 +- > drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 2 +- > drivers/gpu/drm/drm_pagemap.c | 2 +- > drivers/gpu/drm/nouveau/nouveau_dmem.c | 2 +- > include/linux/memremap.h | 10 ++++++++- > lib/test_hmm.c | 2 +- > mm/memremap.c | 26 ++++++++++++++---------- > mm/rmap.c | 6 +++++- > 8 files changed, 34 insertions(+), 18 deletions(-) > > diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c > b/arch/powerpc/kvm/book3s_hv_uvmem.c > index 03f8c34fa0a2..91f763410673 100644 > --- a/arch/powerpc/kvm/book3s_hv_uvmem.c > +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c > @@ -723,7 +723,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long > gpa, struct kvm *kvm) > > dpage = pfn_to_page(uvmem_pfn); > dpage->zone_device_data = pvt; > - zone_device_page_init(dpage); > + zone_device_page_init(dpage, 0); > return dpage; > out_clear: > spin_lock(&kvmppc_uvmem_bitmap_lock); > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > index 79251f22b702..d0e2cae33035 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c > @@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, > unsigned long pfn) > page = pfn_to_page(pfn); > svm_range_bo_ref(prange->svm_bo); > page->zone_device_data = prange->svm_bo; > - zone_device_page_init(page); > + zone_device_page_init(page, 0); > } > > static void > diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c > index 1da55322af12..31c53f724e25 100644 > --- a/drivers/gpu/drm/drm_pagemap.c > +++ b/drivers/gpu/drm/drm_pagemap.c > @@ -196,7 +196,7 @@ static void drm_pagemap_get_devmem_page(struct page *page, > struct drm_pagemap_zdd *zdd) > { > page->zone_device_data = drm_pagemap_zdd_get(zdd); > - zone_device_page_init(page); > + zone_device_page_init(page, 0); > } > > /** > diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c > b/drivers/gpu/drm/nouveau/nouveau_dmem.c > index ca4932a150e3..53cc1926b9da 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c > +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c > @@ -318,7 +318,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm) > return NULL; > } > > - zone_device_page_init(page); > + zone_device_page_init(page, 0); > return page; > } > > diff --git a/include/linux/memremap.h b/include/linux/memremap.h > index e5951ba12a28..d2487a19cba2 100644 > --- a/include/linux/memremap.h > +++ b/include/linux/memremap.h > @@ -206,7 +206,7 @@ static inline bool is_fsdax_page(const struct page *page) > } > > #ifdef CONFIG_ZONE_DEVICE > -void zone_device_page_init(struct page *page); > +void zone_device_page_init(struct page *page, unsigned int order); > void *memremap_pages(struct dev_pagemap *pgmap, int nid); > void memunmap_pages(struct dev_pagemap *pgmap); > void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); > @@ -215,6 +215,14 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn); > bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); > > unsigned long memremap_compat_align(void); > + > +static inline void zone_device_folio_init(struct folio *folio, unsigned int > order) > +{ > + zone_device_page_init(&folio->page, order); > + if (order) > + folio_set_large_rmappable(folio); > +} > + > #else > static inline void *devm_memremap_pages(struct device *dev, > struct dev_pagemap *pgmap) > diff --git a/lib/test_hmm.c b/lib/test_hmm.c > index 83e3d8208a54..24d82121cde8 100644 > --- a/lib/test_hmm.c > +++ b/lib/test_hmm.c > @@ -627,7 +627,7 @@ static struct page *dmirror_devmem_alloc_page(struct > dmirror_device *mdevice) > goto error; > } > > - zone_device_page_init(dpage); > + zone_device_page_init(dpage, 0); > dpage->zone_device_data = rpage; > return dpage; > > diff --git a/mm/memremap.c b/mm/memremap.c > index 46cb1b0b6f72..e45dfb568710 100644 > --- a/mm/memremap.c > +++ b/mm/memremap.c > @@ -416,20 +416,19 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); > void free_zone_device_folio(struct folio *folio) > { > struct dev_pagemap *pgmap = folio->pgmap; > + unsigned long nr = folio_nr_pages(folio); > + int i; > > if (WARN_ON_ONCE(!pgmap)) > return; > > mem_cgroup_uncharge(folio); > > - /* > - * Note: we don't expect anonymous compound pages yet. Once supported > - * and we could PTE-map them similar to THP, we'd have to clear > - * PG_anon_exclusive on all tail pages. > - */ > if (folio_test_anon(folio)) { > - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); > - __ClearPageAnonExclusive(folio_page(folio, 0)); > + for (i = 0; i < nr; i++) > + __ClearPageAnonExclusive(folio_page(folio, i)); > + } else { > + VM_WARN_ON_ONCE(folio_test_large(folio)); > } > > /* > @@ -456,8 +455,8 @@ void free_zone_device_folio(struct folio *folio) > case MEMORY_DEVICE_COHERENT: > if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) > break; > - pgmap->ops->page_free(folio_page(folio, 0)); > - put_dev_pagemap(pgmap); > + pgmap->ops->page_free(&folio->page); > + percpu_ref_put_many(&folio->pgmap->ref, nr); Nit: &pgmap->ref here for consistency? Cheers, Lance
