On Tue, Oct 21, 2025 at 08:57:16PM -0400, Pasha Tatashin wrote:
> Allow users of KHO to cancel the previous preservation by adding the
> necessary interfaces to unpreserve folio and pages.
> 
> Signed-off-by: Pasha Tatashin <[email protected]>

Reviewed-by: Mike Rapoport (Microsoft) <[email protected]>

> ---
>  include/linux/kexec_handover.h | 12 +++++
>  kernel/kexec_handover.c        | 85 ++++++++++++++++++++++++++++------
>  2 files changed, 84 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
> index 2faf290803ce..4ba145713838 100644
> --- a/include/linux/kexec_handover.h
> +++ b/include/linux/kexec_handover.h
> @@ -43,7 +43,9 @@ bool kho_is_enabled(void);
>  bool is_kho_boot(void);
>  
>  int kho_preserve_folio(struct folio *folio);
> +int kho_unpreserve_folio(struct folio *folio);
>  int kho_preserve_pages(struct page *page, unsigned int nr_pages);
> +int kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
>  int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
>  struct folio *kho_restore_folio(phys_addr_t phys);
>  struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
> @@ -76,11 +78,21 @@ static inline int kho_preserve_folio(struct folio *folio)
>       return -EOPNOTSUPP;
>  }
>  
> +static inline int kho_unpreserve_folio(struct folio *folio)
> +{
> +     return -EOPNOTSUPP;
> +}
> +
>  static inline int kho_preserve_pages(struct page *page, unsigned int 
> nr_pages)
>  {
>       return -EOPNOTSUPP;
>  }
>  
> +static inline int kho_unpreserve_pages(struct page *page, unsigned int 
> nr_pages)
> +{
> +     return -EOPNOTSUPP;
> +}
> +
>  static inline int kho_preserve_vmalloc(void *ptr,
>                                      struct kho_vmalloc *preservation)
>  {
> diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
> index 0a4234269fe5..8412897385ad 100644
> --- a/kernel/kexec_handover.c
> +++ b/kernel/kexec_handover.c
> @@ -157,26 +157,33 @@ static void *xa_load_or_alloc(struct xarray *xa, 
> unsigned long index)
>       return no_free_ptr(elm);
>  }
>  
> -static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
> -                          unsigned long end_pfn)
> +static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned 
> long pfn,
> +                                unsigned int order)
>  {
>       struct kho_mem_phys_bits *bits;
>       struct kho_mem_phys *physxa;
> +     const unsigned long pfn_high = pfn >> order;
>  
> -     while (pfn < end_pfn) {
> -             const unsigned int order =
> -                     min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
> -             const unsigned long pfn_high = pfn >> order;
> +     physxa = xa_load(&track->orders, order);
> +     if (!physxa)
> +             return;
> +
> +     bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
> +     if (!bits)
> +             return;
>  
> -             physxa = xa_load(&track->orders, order);
> -             if (!physxa)
> -                     continue;
> +     clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
> +}
> +
> +static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
> +                          unsigned long end_pfn)
> +{
> +     unsigned int order;
>  
> -             bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
> -             if (!bits)
> -                     continue;
> +     while (pfn < end_pfn) {
> +             order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
>  
> -             clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
> +             __kho_unpreserve_order(track, pfn, order);
>  
>               pfn += 1 << order;
>       }
> @@ -749,6 +756,30 @@ int kho_preserve_folio(struct folio *folio)
>  }
>  EXPORT_SYMBOL_GPL(kho_preserve_folio);
>  
> +/**
> + * kho_unpreserve_folio - unpreserve a folio.
> + * @folio: folio to unpreserve.
> + *
> + * Instructs KHO to unpreserve a folio that was preserved by
> + * kho_preserve_folio() before. The provided @folio (pfn and order)
> + * must exactly match a previously preserved folio.
> + *
> + * Return: 0 on success, error code on failure
> + */
> +int kho_unpreserve_folio(struct folio *folio)
> +{
> +     const unsigned long pfn = folio_pfn(folio);
> +     const unsigned int order = folio_order(folio);
> +     struct kho_mem_track *track = &kho_out.track;
> +
> +     if (kho_out.finalized)
> +             return -EBUSY;
> +
> +     __kho_unpreserve_order(track, pfn, order);
> +     return 0;
> +}
> +EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
> +
>  /**
>   * kho_preserve_pages - preserve contiguous pages across kexec
>   * @page: first page in the list.
> @@ -793,6 +824,34 @@ int kho_preserve_pages(struct page *page, unsigned int 
> nr_pages)
>  }
>  EXPORT_SYMBOL_GPL(kho_preserve_pages);
>  
> +/**
> + * kho_unpreserve_pages - unpreserve contiguous pages.
> + * @page: first page in the list.
> + * @nr_pages: number of pages.
> + *
> + * Instructs KHO to unpreserve @nr_pages contigious  pages starting from 
> @page.
> + * This call must exactly match a granularity at which memory was originally
> + * preserved by kho_preserve_pages, call with the same @page and
> + * @nr_pages). Unpreserving arbitrary sub-ranges of larger preserved blocks 
> is
> + * not supported.
> + *
> + * Return: 0 on success, error code on failure
> + */
> +int kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
> +{
> +     struct kho_mem_track *track = &kho_out.track;
> +     const unsigned long start_pfn = page_to_pfn(page);
> +     const unsigned long end_pfn = start_pfn + nr_pages;
> +
> +     if (kho_out.finalized)
> +             return -EBUSY;
> +
> +     __kho_unpreserve(track, start_pfn, end_pfn);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
> +
>  struct kho_vmalloc_hdr {
>       DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
>  };
> -- 
> 2.51.0.915.g61a8936c21-goog
> 

-- 
Sincerely yours,
Mike.

Reply via email to