return -EINVAL;
spin_lock(&kiq->ring_lock);
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_compute_rings)) {
- spin_unlock(&adev->gfx.kiq[0].ring_lock);
- return -ENOMEM;
- }
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
- RESET_QUEUES, 0, 0);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.compute_ring[i],
+ RESET_QUEUES, 0, 0);
+ }
}
if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
spin_lock(&kiq->ring_lock);
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_compute_rings +
- kiq->pmf->set_resources_size);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- spin_unlock(&adev->gfx.kiq[0].ring_lock);
- return r;
- }
+ /* No need to map kcq on the slave */
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_compute_rings +
+ kiq->pmf->set_resources_size);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
+ return r;
+ }
- if (adev->enable_mes)
- queue_mask = ~0ULL;
+ if (adev->enable_mes)
+ queue_mask = ~0ULL;
- kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- j = i + xcc_id * adev->gfx.num_compute_rings;
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[i]);
+ }
}
r = amdgpu_ring_test_helper(kiq_ring);
adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
}
}
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
+{
+ return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
+ adev->gfx.num_xcc_per_xcp : 1));
+}
if (r)
return r;
- for (j = 0; j < adev->gfx.num_compute_rings; j++) {
- ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings];
- amdgpu_ring_test_helper(ring);
+ /* skip ring test on slave kcq */
+ if (amdgpu_gfx_is_master_xcc(adev, i)) {
+ for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+ ring = &adev->gfx.compute_ring[j +
+ i * adev->gfx.num_compute_rings];
+ amdgpu_ring_test_helper(ring);
+ }
}
gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);