In situations where the system is very short on RAM, the shmem
readback from swap-space may invoke the OOM killer.

However, since this might be a recoverable situation where the caller
is indicating this by setting
struct ttm_operation_ctx::gfp_retry_mayfail to true, adjust the gfp
value used by the allocation accordingly.

Signed-off-by: Thomas Hellström <[email protected]>
---
 drivers/gpu/drm/ttm/ttm_backup.c | 6 ++++--
 drivers/gpu/drm/ttm/ttm_pool.c   | 5 ++++-
 include/drm/ttm/ttm_backup.h     | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 6bd4c123d94c..81df4cb5606b 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -44,18 +44,20 @@ void ttm_backup_drop(struct file *backup, pgoff_t handle)
  * @dst: The struct page to copy into.
  * @handle: The handle returned when the page was backed up.
  * @intr: Try to perform waits interruptible or at least killable.
+ * @additional_gfp: GFP mask to add to the default GFP mask if any.
  *
  * Return: 0 on success, Negative error code on failure, notably
  * -EINTR if @intr was set to true and a signal is pending.
  */
 int ttm_backup_copy_page(struct file *backup, struct page *dst,
-                        pgoff_t handle, bool intr)
+                        pgoff_t handle, bool intr, gfp_t additional_gfp)
 {
        struct address_space *mapping = backup->f_mapping;
        struct folio *from_folio;
        pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
 
-       from_folio = shmem_read_folio(mapping, idx);
+       from_folio = shmem_read_folio_gfp(mapping, idx, 
mapping_gfp_mask(mapping)
+                                         | additional_gfp);
        if (IS_ERR(from_folio))
                return PTR_ERR(from_folio);
 
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 8fa9e09f6ee5..aa41099c5ecf 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -530,6 +530,8 @@ static int ttm_pool_restore_commit(struct 
ttm_pool_tt_restore *restore,
                p = first_page[i];
                if (ttm_backup_page_ptr_is_handle(p)) {
                        unsigned long handle = ttm_backup_page_ptr_to_handle(p);
+                       gfp_t additional_gfp = ctx->gfp_retry_mayfail ?
+                               __GFP_RETRY_MAYFAIL | __GFP_NOWARN : 0;
 
                        if (IS_ENABLED(CONFIG_FAULT_INJECTION) && 
ctx->interruptible &&
                            should_fail(&backup_fault_inject, 1)) {
@@ -543,7 +545,8 @@ static int ttm_pool_restore_commit(struct 
ttm_pool_tt_restore *restore,
                        }
 
                        ret = ttm_backup_copy_page(backup, 
restore->alloced_page + i,
-                                                  handle, ctx->interruptible);
+                                                  handle, ctx->interruptible,
+                                                  additional_gfp);
                        if (ret)
                                break;
 
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
index c33cba111171..29b9c855af77 100644
--- a/include/drm/ttm/ttm_backup.h
+++ b/include/drm/ttm/ttm_backup.h
@@ -56,7 +56,7 @@ ttm_backup_page_ptr_to_handle(const struct page *page)
 void ttm_backup_drop(struct file *backup, pgoff_t handle);
 
 int ttm_backup_copy_page(struct file *backup, struct page *dst,
-                        pgoff_t handle, bool intr);
+                        pgoff_t handle, bool intr, gfp_t additional_gfp);
 
 s64
 ttm_backup_backup_page(struct file *backup, struct page *page,
-- 
2.53.0

Reply via email to