On Wed, Oct 1, 2025 at 3:25 PM Balbir Singh <[email protected]> wrote:
>
> Add device-private THP support to reverse mapping infrastructure, enabling
> proper handling during migration and walk operations.
>
> The key changes are:
> - add_migration_pmd()/remove_migration_pmd(): Handle device-private
> entries during folio migration and splitting
> - page_vma_mapped_walk(): Recognize device-private THP entries during
> VMA traversal operations
>
> This change supports folio splitting and migration operations on
> device-private entries.
>
> Cc: Andrew Morton <[email protected]>
> Cc: David Hildenbrand <[email protected]>
> Cc: Zi Yan <[email protected]>
> Cc: Joshua Hahn <[email protected]>
> Cc: Rakie Kim <[email protected]>
> Cc: Byungchul Park <[email protected]>
> Cc: Gregory Price <[email protected]>
> Cc: Ying Huang <[email protected]>
> Cc: Alistair Popple <[email protected]>
> Cc: Oscar Salvador <[email protected]>
> Cc: Lorenzo Stoakes <[email protected]>
> Cc: Baolin Wang <[email protected]>
> Cc: "Liam R. Howlett" <[email protected]>
> Cc: Nico Pache <[email protected]>
> Cc: Ryan Roberts <[email protected]>
> Cc: Dev Jain <[email protected]>
> Cc: Barry Song <[email protected]>
> Cc: Lyude Paul <[email protected]>
> Cc: Danilo Krummrich <[email protected]>
> Cc: David Airlie <[email protected]>
> Cc: Simona Vetter <[email protected]>
> Cc: Ralph Campbell <[email protected]>
> Cc: Mika Penttilä <[email protected]>
> Cc: Matthew Brost <[email protected]>
> Cc: Francois Dugast <[email protected]>
> Acked-by: Zi Yan <[email protected]>
> Signed-off-by: Balbir Singh <[email protected]>
> Reviewed-by: SeongJae Park <[email protected]>
> ---
> mm/damon/ops-common.c | 20 +++++++++++++++++---
> mm/huge_memory.c | 16 +++++++++++++++-
> mm/page_idle.c | 7 +++++--
> mm/page_vma_mapped.c | 7 +++++++
> mm/rmap.c | 24 ++++++++++++++++++++----
> 5 files changed, 64 insertions(+), 10 deletions(-)
>
> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index 998c5180a603..ac54bf5b2623 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct
> *vma, unsigned long addr
> void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long
> addr)
> {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> - struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
> + pmd_t pmdval = pmdp_get(pmd);
> + struct folio *folio;
> + bool young = false;
> + unsigned long pfn;
> +
> + if (likely(pmd_present(pmdval)))
> + pfn = pmd_pfn(pmdval);
> + else
> + pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
>
> + folio = damon_get_folio(pfn);
> if (!folio)
> return;
>
> - if (pmdp_clear_young_notify(vma, addr, pmd))
> + if (likely(pmd_present(pmdval)))
> + young |= pmdp_clear_young_notify(vma, addr, pmd);
> + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr +
> HPAGE_PMD_SIZE);
> + if (young)
> folio_set_young(folio);
>
> folio_set_idle(folio);
> @@ -203,7 +215,9 @@ static bool damon_folio_young_one(struct folio *folio,
> mmu_notifier_test_young(vma->vm_mm, addr);
> } else {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> - *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
> + pmd_t pmd = pmdp_get(pvmw.pmd);
> +
> + *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
> !folio_test_idle(folio) ||
> mmu_notifier_test_young(vma->vm_mm, addr);
> #else
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 8e0a1747762d..483b8341ce22 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -4628,7 +4628,10 @@ int set_pmd_migration_entry(struct
> page_vma_mapped_walk *pvmw,
> return 0;
>
> flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
> - pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
> + if (unlikely(!pmd_present(*pvmw->pmd)))
> + pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address,
> pvmw->pmd);
> + else
> + pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
>
> /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
> anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
> @@ -4678,6 +4681,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk
> *pvmw, struct page *new)
> entry = pmd_to_swp_entry(*pvmw->pmd);
> folio_get(folio);
> pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
> +
> + if (folio_is_device_private(folio)) {
> + if (pmd_write(pmde))
> + entry = make_writable_device_private_entry(
> + page_to_pfn(new));
> + else
> + entry = make_readable_device_private_entry(
> + page_to_pfn(new));
> + pmde = swp_entry_to_pmd(entry);
> + }
> +
> if (pmd_swp_soft_dirty(*pvmw->pmd))
> pmde = pmd_mksoft_dirty(pmde);
> if (is_writable_migration_entry(entry))
> diff --git a/mm/page_idle.c b/mm/page_idle.c
> index a82b340dc204..d4299de81031 100644
> --- a/mm/page_idle.c
> +++ b/mm/page_idle.c
> @@ -71,8 +71,11 @@ static bool page_idle_clear_pte_refs_one(struct folio
> *folio,
> referenced |= ptep_test_and_clear_young(vma,
> addr, pvmw.pte);
> referenced |= mmu_notifier_clear_young(vma->vm_mm,
> addr, addr + PAGE_SIZE);
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> - if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
> - referenced = true;
> + pmd_t pmdval = pmdp_get(pvmw.pmd);
> +
> + if (likely(pmd_present(pmdval)))
> + referenced |= pmdp_clear_young_notify(vma,
> addr, pvmw.pmd);
> + referenced |= mmu_notifier_clear_young(vma->vm_mm,
> addr, addr + PMD_SIZE);
> } else {
> /* unexpected pmd-mapped page? */
> WARN_ON_ONCE(1);
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index c498a91b6706..137ce27ff68c 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -277,6 +277,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk
> *pvmw)
> * cannot return prematurely, while zap_huge_pmd() has
> * cleared *pmd but not decremented
> compound_mapcount().
> */
> + swp_entry_t entry = pmd_to_swp_entry(pmde);
> +
> + if (is_device_private_entry(entry)) {
> + pvmw->ptl = pmd_lock(mm, pvmw->pmd);
> + return true;
> + }
> +
We could make this simpler:
if (is_device_private_entry(pmd_to_swp_entry(pmde))) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
return true;
}
Thanks,
Lance
> if ((pvmw->flags & PVMW_SYNC) &&
> thp_vma_suitable_order(vma, pvmw->address,
> PMD_ORDER) &&
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 9bab13429975..c3fc30cf3636 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1046,9 +1046,16 @@ static int page_vma_mkclean_one(struct
> page_vma_mapped_walk *pvmw)
> } else {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> pmd_t *pmd = pvmw->pmd;
> - pmd_t entry;
> + pmd_t entry = pmdp_get(pmd);
>
> - if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
> + /*
> + * Please see the comment above (!pte_present).
> + * A non present PMD is not writable from a CPU
> + * perspective.
> + */
> + if (!pmd_present(entry))
> + continue;
> + if (!pmd_dirty(entry) && !pmd_write(entry))
> continue;
>
> flush_cache_range(vma, address,
> @@ -2343,6 +2350,9 @@ static bool try_to_migrate_one(struct folio *folio,
> struct vm_area_struct *vma,
> while (page_vma_mapped_walk(&pvmw)) {
> /* PMD-mapped THP migration entry */
> if (!pvmw.pte) {
> + __maybe_unused unsigned long pfn;
> + __maybe_unused pmd_t pmdval;
> +
> if (flags & TTU_SPLIT_HUGE_PMD) {
> split_huge_pmd_locked(vma, pvmw.address,
> pvmw.pmd, true);
> @@ -2351,8 +2361,14 @@ static bool try_to_migrate_one(struct folio *folio,
> struct vm_area_struct *vma,
> break;
> }
> #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> - subpage = folio_page(folio,
> - pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
> + pmdval = pmdp_get(pvmw.pmd);
> + if (likely(pmd_present(pmdval)))
> + pfn = pmd_pfn(pmdval);
> + else
> + pfn =
> swp_offset_pfn(pmd_to_swp_entry(pmdval));
> +
> + subpage = folio_page(folio, pfn - folio_pfn(folio));
> +
> VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
> !folio_test_pmd_mappable(folio),
> folio);
>
> --
> 2.51.0
>
>