2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
39 #include "gc/gc_10_1_0_offset.h"
40 #include "gc/gc_10_1_0_sh_mask.h"
41 #include "hdp/hdp_5_0_0_offset.h"
42 #include "hdp/hdp_5_0_0_sh_mask.h"
43 #include "smuio/smuio_11_0_0_offset.h"
44 #include "mp/mp_11_0_offset.h"
47 #include "soc15_common.h"
48 #include "gmc_v10_0.h"
49 #include "gfxhub_v2_0.h"
50 #include "mmhub_v2_0.h"
51 #include "nbio_v2_3.h"
53 #include "navi10_ih.h"
54 #include "gfx_v10_0.h"
55 #include "sdma_v5_0.h"
56 #include "sdma_v5_2.h"
58 #include "jpeg_v2_0.h"
60 #include "jpeg_v3_0.h"
61 #include "dce_virtual.h"
62 #include "mes_v10_1.h"
65 static const struct amd_ip_funcs nv_common_ip_funcs;
68 * Indirect registers accessor
70 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
72 unsigned long flags, address, data;
74 address = adev->nbio.funcs->get_pcie_index_offset(adev);
75 data = adev->nbio.funcs->get_pcie_data_offset(adev);
77 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
79 (void)RREG32(address);
81 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
85 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
87 unsigned long flags, address, data;
89 address = adev->nbio.funcs->get_pcie_index_offset(adev);
90 data = adev->nbio.funcs->get_pcie_data_offset(adev);
92 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
94 (void)RREG32(address);
97 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
100 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
102 unsigned long flags, address, data;
105 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
106 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
108 spin_lock_irqsave(&adev->didt_idx_lock, flags);
109 WREG32(address, (reg));
111 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
115 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
117 unsigned long flags, address, data;
119 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
120 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
122 spin_lock_irqsave(&adev->didt_idx_lock, flags);
123 WREG32(address, (reg));
125 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
128 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
130 return adev->nbio.funcs->get_memsize(adev);
133 static u32 nv_get_xclk(struct amdgpu_device *adev)
135 return adev->clock.spll.reference_freq;
139 void nv_grbm_select(struct amdgpu_device *adev,
140 u32 me, u32 pipe, u32 queue, u32 vmid)
142 u32 grbm_gfx_cntl = 0;
143 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
144 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
145 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
146 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
148 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
151 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
156 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
162 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
163 u8 *bios, u32 length_bytes)
170 if (length_bytes == 0)
172 /* APU vbios image is part of sbios image */
173 if (adev->flags & AMD_IS_APU)
176 dw_ptr = (u32 *)bios;
177 length_dw = ALIGN(length_bytes, 4) / 4;
179 /* set rom index to 0 */
180 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
181 /* read out the rom data */
182 for (i = 0; i < length_dw; i++)
183 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
188 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
189 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
190 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
191 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
192 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
193 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
194 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
195 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
196 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
197 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
198 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
199 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
200 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
201 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
202 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
203 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
204 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
205 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
206 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
207 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
210 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
211 u32 sh_num, u32 reg_offset)
215 mutex_lock(&adev->grbm_idx_mutex);
216 if (se_num != 0xffffffff || sh_num != 0xffffffff)
217 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
219 val = RREG32(reg_offset);
221 if (se_num != 0xffffffff || sh_num != 0xffffffff)
222 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
223 mutex_unlock(&adev->grbm_idx_mutex);
227 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
228 bool indexed, u32 se_num,
229 u32 sh_num, u32 reg_offset)
232 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
234 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
235 return adev->gfx.config.gb_addr_config;
236 return RREG32(reg_offset);
240 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
241 u32 sh_num, u32 reg_offset, u32 *value)
244 struct soc15_allowed_register_entry *en;
247 for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
248 en = &nv_allowed_read_registers[i];
250 (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
253 *value = nv_get_register_value(adev,
254 nv_allowed_read_registers[i].grbm_indexed,
255 se_num, sh_num, reg_offset);
261 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
266 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
269 pci_clear_master(adev->pdev);
271 pci_save_state(adev->pdev);
273 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
274 dev_info(adev->dev, "GPU smu mode1 reset\n");
275 ret = amdgpu_dpm_mode1_reset(adev);
277 dev_info(adev->dev, "GPU psp mode1 reset\n");
278 ret = psp_gpu_reset(adev);
282 dev_err(adev->dev, "GPU mode1 reset failed\n");
283 pci_restore_state(adev->pdev);
285 /* wait for asic to come out of reset */
286 for (i = 0; i < adev->usec_timeout; i++) {
287 u32 memsize = adev->nbio.funcs->get_memsize(adev);
289 if (memsize != 0xffffffff)
294 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
299 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
301 struct smu_context *smu = &adev->smu;
303 if (smu_baco_is_support(smu))
309 static enum amd_reset_method
310 nv_asic_reset_method(struct amdgpu_device *adev)
312 struct smu_context *smu = &adev->smu;
314 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
315 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
316 return amdgpu_reset_method;
318 if (amdgpu_reset_method != -1)
319 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
320 amdgpu_reset_method);
322 if (smu_baco_is_support(smu))
323 return AMD_RESET_METHOD_BACO;
325 return AMD_RESET_METHOD_MODE1;
328 static int nv_asic_reset(struct amdgpu_device *adev)
331 struct smu_context *smu = &adev->smu;
333 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
334 dev_info(adev->dev, "GPU BACO reset\n");
336 ret = smu_baco_enter(smu);
339 ret = smu_baco_exit(smu);
343 ret = nv_asic_mode1_reset(adev);
348 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
354 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
360 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
362 if (pci_is_root_bus(adev->pdev->bus))
365 if (amdgpu_pcie_gen2 == 0)
368 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
369 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
375 static void nv_program_aspm(struct amdgpu_device *adev)
378 if (amdgpu_aspm == 0)
384 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
387 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
388 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
391 static const struct amdgpu_ip_block_version nv_common_ip_block =
393 .type = AMD_IP_BLOCK_TYPE_COMMON,
397 .funcs = &nv_common_ip_funcs,
400 static int nv_reg_base_init(struct amdgpu_device *adev)
404 if (amdgpu_discovery) {
405 r = amdgpu_discovery_reg_base_init(adev);
407 DRM_WARN("failed to init reg base from ip discovery table, "
408 "fallback to legacy init method\n");
416 switch (adev->asic_type) {
418 navi10_reg_base_init(adev);
421 navi14_reg_base_init(adev);
424 navi12_reg_base_init(adev);
426 case CHIP_SIENNA_CICHLID:
427 case CHIP_NAVY_FLOUNDER:
428 sienna_cichlid_reg_base_init(adev);
437 void nv_set_virt_ops(struct amdgpu_device *adev)
439 adev->virt.ops = &xgpu_nv_virt_ops;
442 int nv_set_ip_blocks(struct amdgpu_device *adev)
446 adev->nbio.funcs = &nbio_v2_3_funcs;
447 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
449 /* Set IP register base before any HW register access */
450 r = nv_reg_base_init(adev);
454 switch (adev->asic_type) {
457 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
458 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
459 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
460 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
461 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
462 !amdgpu_sriov_vf(adev))
463 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
464 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
465 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
466 #if defined(CONFIG_DRM_AMD_DC)
467 else if (amdgpu_device_has_dc_support(adev))
468 amdgpu_device_ip_block_add(adev, &dm_ip_block);
470 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
471 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
472 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
473 !amdgpu_sriov_vf(adev))
474 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
475 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
476 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
477 if (adev->enable_mes)
478 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
481 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
482 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
483 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
484 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
485 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
486 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
487 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
488 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
489 #if defined(CONFIG_DRM_AMD_DC)
490 else if (amdgpu_device_has_dc_support(adev))
491 amdgpu_device_ip_block_add(adev, &dm_ip_block);
493 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
494 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
496 !amdgpu_sriov_vf(adev))
497 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
498 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
499 if (!amdgpu_sriov_vf(adev))
500 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
502 case CHIP_SIENNA_CICHLID:
503 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
504 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
505 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
506 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
507 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
508 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
509 is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
510 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
511 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
512 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
513 #if defined(CONFIG_DRM_AMD_DC)
514 else if (amdgpu_device_has_dc_support(adev))
515 amdgpu_device_ip_block_add(adev, &dm_ip_block);
517 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
518 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
519 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
520 if (!amdgpu_sriov_vf(adev))
521 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
523 if (adev->enable_mes)
524 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
526 case CHIP_NAVY_FLOUNDER:
527 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
528 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
529 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
530 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
531 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
532 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
533 is_support_sw_smu(adev))
534 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
535 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
536 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
537 #if defined(CONFIG_DRM_AMD_DC)
538 else if (amdgpu_device_has_dc_support(adev))
539 amdgpu_device_ip_block_add(adev, &dm_ip_block);
541 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
542 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
543 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
544 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
545 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
546 is_support_sw_smu(adev))
547 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
556 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
558 return adev->nbio.funcs->get_rev_id(adev);
561 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
563 adev->nbio.funcs->hdp_flush(adev, ring);
566 static void nv_invalidate_hdp(struct amdgpu_device *adev,
567 struct amdgpu_ring *ring)
569 if (!ring || !ring->funcs->emit_wreg) {
570 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
572 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
573 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
577 static bool nv_need_full_reset(struct amdgpu_device *adev)
582 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
586 if (adev->flags & AMD_IS_APU)
589 /* Check sOS sign of life register to confirm sys driver and sOS
590 * are already been loaded.
592 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
599 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
603 * dummy implement for pcie_replay_count sysfs interface
609 static void nv_init_doorbell_index(struct amdgpu_device *adev)
611 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
612 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
613 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
614 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
615 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
616 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
617 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
618 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
619 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
620 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
621 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
622 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
623 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
624 adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
625 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
626 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
627 adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
628 adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
629 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
630 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
631 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
632 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
633 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
634 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
635 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
637 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
638 adev->doorbell_index.sdma_doorbell_range = 20;
641 static const struct amdgpu_asic_funcs nv_asic_funcs =
643 .read_disabled_bios = &nv_read_disabled_bios,
644 .read_bios_from_rom = &nv_read_bios_from_rom,
645 .read_register = &nv_read_register,
646 .reset = &nv_asic_reset,
647 .reset_method = &nv_asic_reset_method,
648 .set_vga_state = &nv_vga_set_state,
649 .get_xclk = &nv_get_xclk,
650 .set_uvd_clocks = &nv_set_uvd_clocks,
651 .set_vce_clocks = &nv_set_vce_clocks,
652 .get_config_memsize = &nv_get_config_memsize,
653 .flush_hdp = &nv_flush_hdp,
654 .invalidate_hdp = &nv_invalidate_hdp,
655 .init_doorbell_index = &nv_init_doorbell_index,
656 .need_full_reset = &nv_need_full_reset,
657 .need_reset_on_init = &nv_need_reset_on_init,
658 .get_pcie_replay_count = &nv_get_pcie_replay_count,
659 .supports_baco = &nv_asic_supports_baco,
662 static int nv_common_early_init(void *handle)
664 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
665 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
667 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
668 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
669 adev->smc_rreg = NULL;
670 adev->smc_wreg = NULL;
671 adev->pcie_rreg = &nv_pcie_rreg;
672 adev->pcie_wreg = &nv_pcie_wreg;
674 /* TODO: will add them during VCN v2 implementation */
675 adev->uvd_ctx_rreg = NULL;
676 adev->uvd_ctx_wreg = NULL;
678 adev->didt_rreg = &nv_didt_rreg;
679 adev->didt_wreg = &nv_didt_wreg;
681 adev->asic_funcs = &nv_asic_funcs;
683 adev->rev_id = nv_get_rev_id(adev);
684 adev->external_rev_id = 0xff;
685 switch (adev->asic_type) {
687 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
688 AMD_CG_SUPPORT_GFX_CGCG |
689 AMD_CG_SUPPORT_IH_CG |
690 AMD_CG_SUPPORT_HDP_MGCG |
691 AMD_CG_SUPPORT_HDP_LS |
692 AMD_CG_SUPPORT_SDMA_MGCG |
693 AMD_CG_SUPPORT_SDMA_LS |
694 AMD_CG_SUPPORT_MC_MGCG |
695 AMD_CG_SUPPORT_MC_LS |
696 AMD_CG_SUPPORT_ATHUB_MGCG |
697 AMD_CG_SUPPORT_ATHUB_LS |
698 AMD_CG_SUPPORT_VCN_MGCG |
699 AMD_CG_SUPPORT_JPEG_MGCG |
700 AMD_CG_SUPPORT_BIF_MGCG |
701 AMD_CG_SUPPORT_BIF_LS;
702 adev->pg_flags = AMD_PG_SUPPORT_VCN |
703 AMD_PG_SUPPORT_VCN_DPG |
704 AMD_PG_SUPPORT_JPEG |
705 AMD_PG_SUPPORT_ATHUB;
706 adev->external_rev_id = adev->rev_id + 0x1;
709 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
710 AMD_CG_SUPPORT_GFX_CGCG |
711 AMD_CG_SUPPORT_IH_CG |
712 AMD_CG_SUPPORT_HDP_MGCG |
713 AMD_CG_SUPPORT_HDP_LS |
714 AMD_CG_SUPPORT_SDMA_MGCG |
715 AMD_CG_SUPPORT_SDMA_LS |
716 AMD_CG_SUPPORT_MC_MGCG |
717 AMD_CG_SUPPORT_MC_LS |
718 AMD_CG_SUPPORT_ATHUB_MGCG |
719 AMD_CG_SUPPORT_ATHUB_LS |
720 AMD_CG_SUPPORT_VCN_MGCG |
721 AMD_CG_SUPPORT_JPEG_MGCG |
722 AMD_CG_SUPPORT_BIF_MGCG |
723 AMD_CG_SUPPORT_BIF_LS;
724 adev->pg_flags = AMD_PG_SUPPORT_VCN |
725 AMD_PG_SUPPORT_JPEG |
726 AMD_PG_SUPPORT_VCN_DPG;
727 adev->external_rev_id = adev->rev_id + 20;
730 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
731 AMD_CG_SUPPORT_GFX_MGLS |
732 AMD_CG_SUPPORT_GFX_CGCG |
733 AMD_CG_SUPPORT_GFX_CP_LS |
734 AMD_CG_SUPPORT_GFX_RLC_LS |
735 AMD_CG_SUPPORT_IH_CG |
736 AMD_CG_SUPPORT_HDP_MGCG |
737 AMD_CG_SUPPORT_HDP_LS |
738 AMD_CG_SUPPORT_SDMA_MGCG |
739 AMD_CG_SUPPORT_SDMA_LS |
740 AMD_CG_SUPPORT_MC_MGCG |
741 AMD_CG_SUPPORT_MC_LS |
742 AMD_CG_SUPPORT_ATHUB_MGCG |
743 AMD_CG_SUPPORT_ATHUB_LS |
744 AMD_CG_SUPPORT_VCN_MGCG |
745 AMD_CG_SUPPORT_JPEG_MGCG;
746 adev->pg_flags = AMD_PG_SUPPORT_VCN |
747 AMD_PG_SUPPORT_VCN_DPG |
748 AMD_PG_SUPPORT_JPEG |
749 AMD_PG_SUPPORT_ATHUB;
750 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
751 * as a consequence, the rev_id and external_rev_id are wrong.
752 * workaround it by hardcoding rev_id to 0 (default value).
754 if (amdgpu_sriov_vf(adev))
756 adev->external_rev_id = adev->rev_id + 0xa;
758 case CHIP_SIENNA_CICHLID:
759 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
760 AMD_CG_SUPPORT_GFX_CGCG |
761 AMD_CG_SUPPORT_GFX_3D_CGCG |
762 AMD_CG_SUPPORT_MC_MGCG |
763 AMD_CG_SUPPORT_VCN_MGCG |
764 AMD_CG_SUPPORT_JPEG_MGCG |
765 AMD_CG_SUPPORT_HDP_MGCG |
766 AMD_CG_SUPPORT_HDP_LS |
767 AMD_CG_SUPPORT_IH_CG |
768 AMD_CG_SUPPORT_MC_LS;
769 adev->pg_flags = AMD_PG_SUPPORT_VCN |
770 AMD_PG_SUPPORT_VCN_DPG |
771 AMD_PG_SUPPORT_JPEG |
772 AMD_PG_SUPPORT_ATHUB |
773 AMD_PG_SUPPORT_MMHUB;
774 if (amdgpu_sriov_vf(adev)) {
775 /* hypervisor control CG and PG enablement */
779 adev->external_rev_id = adev->rev_id + 0x28;
781 case CHIP_NAVY_FLOUNDER:
782 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
783 AMD_CG_SUPPORT_GFX_CGCG |
784 AMD_CG_SUPPORT_GFX_3D_CGCG |
785 AMD_CG_SUPPORT_VCN_MGCG |
786 AMD_CG_SUPPORT_JPEG_MGCG |
787 AMD_CG_SUPPORT_MC_MGCG |
788 AMD_CG_SUPPORT_MC_LS |
789 AMD_CG_SUPPORT_HDP_MGCG |
790 AMD_CG_SUPPORT_HDP_LS;
791 adev->pg_flags = AMD_PG_SUPPORT_VCN |
792 AMD_PG_SUPPORT_VCN_DPG |
793 AMD_PG_SUPPORT_JPEG |
794 AMD_PG_SUPPORT_ATHUB |
795 AMD_PG_SUPPORT_MMHUB;
796 adev->external_rev_id = adev->rev_id + 0x32;
800 /* FIXME: not supported yet */
804 if (amdgpu_sriov_vf(adev)) {
805 amdgpu_virt_init_setting(adev);
806 xgpu_nv_mailbox_set_irq_funcs(adev);
812 static int nv_common_late_init(void *handle)
814 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816 if (amdgpu_sriov_vf(adev))
817 xgpu_nv_mailbox_get_irq(adev);
822 static int nv_common_sw_init(void *handle)
824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
826 if (amdgpu_sriov_vf(adev))
827 xgpu_nv_mailbox_add_irq_id(adev);
832 static int nv_common_sw_fini(void *handle)
837 static int nv_common_hw_init(void *handle)
839 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
841 /* enable pcie gen2/3 link */
842 nv_pcie_gen3_enable(adev);
844 nv_program_aspm(adev);
845 /* setup nbio registers */
846 adev->nbio.funcs->init_registers(adev);
847 /* remap HDP registers to a hole in mmio space,
848 * for the purpose of expose those registers
851 if (adev->nbio.funcs->remap_hdp_registers)
852 adev->nbio.funcs->remap_hdp_registers(adev);
853 /* enable the doorbell aperture */
854 nv_enable_doorbell_aperture(adev, true);
859 static int nv_common_hw_fini(void *handle)
861 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
863 /* disable the doorbell aperture */
864 nv_enable_doorbell_aperture(adev, false);
869 static int nv_common_suspend(void *handle)
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
873 return nv_common_hw_fini(adev);
876 static int nv_common_resume(void *handle)
878 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880 return nv_common_hw_init(adev);
883 static bool nv_common_is_idle(void *handle)
888 static int nv_common_wait_for_idle(void *handle)
893 static int nv_common_soft_reset(void *handle)
898 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
901 uint32_t hdp_clk_cntl, hdp_clk_cntl1;
902 uint32_t hdp_mem_pwr_cntl;
904 if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
905 AMD_CG_SUPPORT_HDP_DS |
906 AMD_CG_SUPPORT_HDP_SD)))
909 hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
910 hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
912 /* Before doing clock/power mode switch,
913 * forced on IPH & RC clock */
914 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
915 IPH_MEM_CLK_SOFT_OVERRIDE, 1);
916 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
917 RC_MEM_CLK_SOFT_OVERRIDE, 1);
918 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
920 /* HDP 5.0 doesn't support dynamic power mode switch,
921 * disable clock and power gating before any changing */
922 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
923 IPH_MEM_POWER_CTRL_EN, 0);
924 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
925 IPH_MEM_POWER_LS_EN, 0);
926 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
927 IPH_MEM_POWER_DS_EN, 0);
928 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
929 IPH_MEM_POWER_SD_EN, 0);
930 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
931 RC_MEM_POWER_CTRL_EN, 0);
932 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
933 RC_MEM_POWER_LS_EN, 0);
934 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
935 RC_MEM_POWER_DS_EN, 0);
936 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
937 RC_MEM_POWER_SD_EN, 0);
938 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
940 /* only one clock gating mode (LS/DS/SD) can be enabled */
941 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
942 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
944 IPH_MEM_POWER_LS_EN, enable);
945 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
947 RC_MEM_POWER_LS_EN, enable);
948 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
949 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
951 IPH_MEM_POWER_DS_EN, enable);
952 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
954 RC_MEM_POWER_DS_EN, enable);
955 } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
956 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
958 IPH_MEM_POWER_SD_EN, enable);
959 /* RC should not use shut down mode, fallback to ds */
960 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
962 RC_MEM_POWER_DS_EN, enable);
965 /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
966 * be set for SRAM LS/DS/SD */
967 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
968 AMD_CG_SUPPORT_HDP_SD)) {
969 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
970 IPH_MEM_POWER_CTRL_EN, 1);
971 hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
972 RC_MEM_POWER_CTRL_EN, 1);
975 WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
977 /* restore IPH & RC clock override after clock/power mode changing */
978 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
981 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
984 uint32_t hdp_clk_cntl;
986 if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
989 hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
994 (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
995 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
996 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
997 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
998 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
999 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
1001 hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
1002 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
1003 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
1004 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
1005 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
1006 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
1009 WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
1012 static int nv_common_set_clockgating_state(void *handle,
1013 enum amd_clockgating_state state)
1015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1017 if (amdgpu_sriov_vf(adev))
1020 switch (adev->asic_type) {
1024 case CHIP_SIENNA_CICHLID:
1025 case CHIP_NAVY_FLOUNDER:
1026 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1027 state == AMD_CG_STATE_GATE);
1028 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1029 state == AMD_CG_STATE_GATE);
1030 nv_update_hdp_mem_power_gating(adev,
1031 state == AMD_CG_STATE_GATE);
1032 nv_update_hdp_clock_gating(adev,
1033 state == AMD_CG_STATE_GATE);
1041 static int nv_common_set_powergating_state(void *handle,
1042 enum amd_powergating_state state)
1048 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1053 if (amdgpu_sriov_vf(adev))
1056 adev->nbio.funcs->get_clockgating_state(adev, flags);
1058 /* AMD_CG_SUPPORT_HDP_MGCG */
1059 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
1060 if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
1061 HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
1062 HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
1063 HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
1064 HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
1065 HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
1066 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1068 /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
1069 tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
1070 if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
1071 *flags |= AMD_CG_SUPPORT_HDP_LS;
1072 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
1073 *flags |= AMD_CG_SUPPORT_HDP_DS;
1074 else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
1075 *flags |= AMD_CG_SUPPORT_HDP_SD;
1080 static const struct amd_ip_funcs nv_common_ip_funcs = {
1081 .name = "nv_common",
1082 .early_init = nv_common_early_init,
1083 .late_init = nv_common_late_init,
1084 .sw_init = nv_common_sw_init,
1085 .sw_fini = nv_common_sw_fini,
1086 .hw_init = nv_common_hw_init,
1087 .hw_fini = nv_common_hw_fini,
1088 .suspend = nv_common_suspend,
1089 .resume = nv_common_resume,
1090 .is_idle = nv_common_is_idle,
1091 .wait_for_idle = nv_common_wait_for_idle,
1092 .soft_reset = nv_common_soft_reset,
1093 .set_clockgating_state = nv_common_set_clockgating_state,
1094 .set_powergating_state = nv_common_set_powergating_state,
1095 .get_clockgating_state = nv_common_get_clockgating_state,