int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
enum amd_powergating_state state);
+static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
+{
+ return amdgpu_gpu_recovery != 0 &&
+ adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
+}
+
#include "amdgpu_object.h"
static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
if (r)
goto out;
- src_addr = write ? amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo) :
- amdgpu_bo_gpu_offset(abo);
- dst_addr = write ? amdgpu_bo_gpu_offset(abo) :
- amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+ src_addr = amdgpu_bo_gpu_offset(abo);
+ dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+ if (write)
+ swap(src_addr, dst_addr);
+
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
return r;
}
-static inline bool amdgpu_ttm_allow_post_mortem_debug(struct amdgpu_device *adev)
-{
- return amdgpu_gpu_recovery == 0 ||
- adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
- adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
- adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
- adev->video_timeout == MAX_SCHEDULE_TIMEOUT;
-}
-
/**
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- if (!amdgpu_ttm_allow_post_mortem_debug(adev) &&
+ if (amdgpu_device_has_timeouts_enabled(adev) &&
!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
return len;
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- if (adev->mman.sdma_access_ptr)
- amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+ amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
DRM_INFO("amdgpu: ttm finalized\n");
}