On 2025-11-24 09:55, Chen, Xiaogang wrote:
[AMD Official Use Only - AMD Internal Distribution Only]

ping

-----Original Message-----
From: Xiaogang.Chen <[email protected]>
Sent: Thursday, November 20, 2025 9:51 AM
To: [email protected]
Cc: Chen, Xiaogang <[email protected]>
Subject: [PATCH v4] drm/amdkfd: Use huge page size to check split svm range 
alignment

From: Xiaogang Chen <[email protected]>

Fixes: 7ef6b2d4b7e5 (drm/amdkfd: remap unaligned svm ranges that have split)

When split svm ranges that have been mapped using huge page should use huge page 
size(2MB) to check split range alignment, not prange->granularity that means 
migration granularity.

Signed-off-by: Xiaogang Chen <[email protected]>
---
  drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 46 +++++++++++++++++++---------
  1 file changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 521c14c7a789..7fe9d569d416 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1144,30 +1144,48 @@ static int
  svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
                      struct list_head *insert_list, struct list_head 
*remap_list)  {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
         struct svm_range *tail = NULL;
-       int r = svm_range_split(prange, prange->start, new_last, &tail);
+       int r;

-       if (!r) {
-               list_add(&tail->list, insert_list);
-               if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
-                       list_add(&tail->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, prange->start, new_last, &tail);
+
+       if (r)
+               return r;
+
+       list_add(&tail->list, insert_list);
+
+       if (huge_page_mapping && tail->start > start_align &&
+           tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512)))
+               list_add(&tail->update_list, remap_list);
+
+       return 0;
  }

  static int
  svm_range_split_head(struct svm_range *prange, uint64_t new_start,
                      struct list_head *insert_list, struct list_head 
*remap_list)  {
+       unsigned long last_align_down = ALIGN_DOWN(prange->last, 512);
+       unsigned long start_align = ALIGN(prange->start, 512);
+       bool huge_page_mapping = last_align_down > start_align;
         struct svm_range *head = NULL;
-       int r = svm_range_split(prange, new_start, prange->last, &head);
+       int r;

-       if (!r) {
-               list_add(&head->list, insert_list);
-               if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
-                       list_add(&head->update_list, remap_list);
-       }
-       return r;
+       r = svm_range_split(prange, new_start, prange->last, &head);
+
+       if (r)
+               return r;
+
+       list_add(&head->list, insert_list);
+
+       if (huge_page_mapping && head->last > start_align &&
+           head->last < last_align_down && (!IS_ALIGNED(head->last, 512)))
replace all head->last with head->last + 1 in the if condition, or use new_start instead.

With this fixed, this patch is
Reviewed-by: Philip Yang <[email protected]>
+               list_add(&head->update_list, remap_list);
+
+       return 0;
  }

  static void
--
2.34.1


Reply via email to