amdgpu_ttm_set_buffer_funcs_status(adev, true);
if (r)
@@ -4493,7 +4493,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->num_rings = 0;
RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
- adev->mman.buffer_funcs_ring = NULL;
+ adev->mman.num_buffer_funcs_scheds = 0;
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
@@ -5965,7 +5965,7 @@ int amdgpu_device_reinit_after_reset(struct
amdgpu_reset_context *reset_context)
if (r)
goto out;
- if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
+ if
(tmp_adev->mman.buffer_funcs_scheds[0]->ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
r = amdgpu_device_ip_resume_phase3(tmp_adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 2713dd51ab9a..4433d8620129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -651,12 +651,14 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device
*adev)
void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_ring *ring;
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
struct dma_fence *fence;
struct amdgpu_job *job;
int r, i;
+ ring = container_of(adev->mman.buffer_funcs_scheds[0], struct amdgpu_ring, sched);
+
if (!hub->sdma_invalidation_workaround || vmid ||
!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
!ring->sched.ready) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6c333dba7a35..11fec0fa4c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -308,7 +308,7 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev,
struct dma_resv *resv,
struct dma_fence **f)
{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_ring *ring;
struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
@@ -321,6 +321,8 @@ static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device
*adev,
return -EINVAL;
}
+ ring = container_of(adev->mman.buffer_funcs_scheds[0], struct amdgpu_ring, sched);
+
amdgpu_res_first(src->mem, src->offset, size, &src_mm);
amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
@@ -1493,6 +1495,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_res_cursor src_mm;
+ struct amdgpu_ring *ring;
struct amdgpu_job *job;
struct dma_fence *fence;
uint64_t src_addr, dst_addr;
@@ -1530,7 +1533,8 @@ static int amdgpu_ttm_access_memory_sdma(struct
ttm_buffer_object *bo,
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
PAGE_SIZE, 0);
- amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+ ring = container_of(adev->mman.buffer_funcs_scheds[0], struct
amdgpu_ring, sched);
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
@@ -2196,11 +2200,9 @@ u32 amdgpu_ttm_set_buffer_funcs_status(struct
amdgpu_device *adev, bool enable)
return windows;
if (enable) {
- struct amdgpu_ring *ring;
struct drm_gpu_scheduler *sched;
- ring = adev->mman.buffer_funcs_ring;
- sched = &ring->sched;
+ sched = adev->mman.buffer_funcs_scheds[0];
r = drm_sched_entity_init(&adev->mman.default_entity.base,
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
@@ -2432,7 +2434,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_ring *ring;
struct amdgpu_ttm_buffer_entity *entity;
struct amdgpu_res_cursor cursor;
u64 addr;
@@ -2443,6 +2445,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
if (!fence)
return -EINVAL;
+
+ ring = container_of(adev->mman.buffer_funcs_scheds[0], struct
amdgpu_ring, sched);
entity = &adev->mman.clear_entities[0];
*fence = dma_fence_get_stub();
@@ -2494,9 +2498,9 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct dma_fence *fence = NULL;
struct amdgpu_res_cursor dst;
+ struct amdgpu_ring *ring;
int r, e;
if (!adev->mman.buffer_funcs_enabled) {
@@ -2505,6 +2509,8 @@ int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity
*entity,
return -EINVAL;
}
+ ring = container_of(adev->mman.buffer_funcs_scheds[0], struct amdgpu_ring, sched);
+
if (entity == NULL) {
e = atomic_inc_return(&adev->mman.next_clear_entity) %
adev->mman.num_clear_entities;
@@ -2579,6 +2585,27 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device
*adev, int mem_type)
return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
+void amdgpu_sdma_set_buffer_funcs_scheds(struct amdgpu_device *adev,
+ const struct amdgpu_buffer_funcs
*buffer_funcs)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ struct drm_gpu_scheduler *sched;
+ int i;
+
+ adev->mman.buffer_funcs = buffer_funcs;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->mman.buffer_funcs_scheds[i] = sched;
+ }
+
+ adev->mman.num_buffer_funcs_scheds = hub->sdma_invalidation_workaround ?
+ 1 : adev->sdma.num_instances;
+}
+