--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/radeon/si.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/radeon/si.c 2021-02-04 17:41:59.000000000 +0000 @@ -22,50 +22,100 @@ * Authors: Alex Deucher */ #include -#include #include #include #include #include "radeon.h" #include "radeon_asic.h" +#include "radeon_audio.h" #include #include "sid.h" #include "atom.h" #include "si_blit_shaders.h" +#include "clearstate_si.h" +#include "radeon_ucode.h" -#define SI_PFP_UCODE_SIZE 2144 -#define SI_PM4_UCODE_SIZE 2144 -#define SI_CE_UCODE_SIZE 2144 -#define SI_RLC_UCODE_SIZE 2048 -#define SI_MC_UCODE_SIZE 7769 -#define OLAND_MC_UCODE_SIZE 7863 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); MODULE_FIRMWARE("radeon/TAHITI_me.bin"); MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); +MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); +MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); + +MODULE_FIRMWARE("radeon/tahiti_pfp.bin"); +MODULE_FIRMWARE("radeon/tahiti_me.bin"); +MODULE_FIRMWARE("radeon/tahiti_ce.bin"); +MODULE_FIRMWARE("radeon/tahiti_mc.bin"); +MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); +MODULE_FIRMWARE("radeon/tahiti_smc.bin"); + MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); +MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); +MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); + +MODULE_FIRMWARE("radeon/pitcairn_pfp.bin"); +MODULE_FIRMWARE("radeon/pitcairn_me.bin"); +MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); +MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); + MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); +MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); +MODULE_FIRMWARE("radeon/VERDE_smc.bin"); + +MODULE_FIRMWARE("radeon/verde_pfp.bin"); +MODULE_FIRMWARE("radeon/verde_me.bin"); +MODULE_FIRMWARE("radeon/verde_ce.bin"); +MODULE_FIRMWARE("radeon/verde_mc.bin"); +MODULE_FIRMWARE("radeon/verde_rlc.bin"); +MODULE_FIRMWARE("radeon/verde_smc.bin"); + MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); +MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); +MODULE_FIRMWARE("radeon/OLAND_smc.bin"); + +MODULE_FIRMWARE("radeon/oland_pfp.bin"); +MODULE_FIRMWARE("radeon/oland_me.bin"); +MODULE_FIRMWARE("radeon/oland_ce.bin"); +MODULE_FIRMWARE("radeon/oland_mc.bin"); +MODULE_FIRMWARE("radeon/oland_rlc.bin"); +MODULE_FIRMWARE("radeon/oland_smc.bin"); + MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc2.bin"); MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_pfp.bin"); +MODULE_FIRMWARE("radeon/hainan_me.bin"); +MODULE_FIRMWARE("radeon/hainan_ce.bin"); +MODULE_FIRMWARE("radeon/hainan_mc.bin"); +MODULE_FIRMWARE("radeon/hainan_rlc.bin"); +MODULE_FIRMWARE("radeon/hainan_smc.bin"); + +static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); +static void si_pcie_gen3_enable(struct radeon_device *rdev); +static void si_program_aspm(struct radeon_device *rdev); +extern void sumo_rlc_fini(struct radeon_device *rdev); +extern int sumo_rlc_init(struct radeon_device *rdev); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); @@ -74,6 +124,235 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); extern bool evergreen_is_display_hung(struct radeon_device *rdev); +static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable); +static void si_init_pg(struct radeon_device *rdev); +static void si_init_cg(struct radeon_device *rdev); +static void si_fini_pg(struct radeon_device *rdev); +static void si_fini_cg(struct radeon_device *rdev); +static void si_rlc_stop(struct radeon_device *rdev); + +static const u32 verde_rlc_save_restore_register_list[] = +{ + (0x8000 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x8040 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x8000 << 16) | (0xe80 >> 2), + 0x00000000, + (0x8040 << 16) | (0xe80 >> 2), + 0x00000000, + (0x8000 << 16) | (0x89bc >> 2), + 0x00000000, + (0x8040 << 16) | (0x89bc >> 2), + 0x00000000, + (0x8000 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x8040 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x98f0 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xe7c >> 2), + 0x00000000, + (0x8000 << 16) | (0x9148 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9148 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9150 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x897c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8d8c >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac54 >> 2), + 0X00000000, + 0x3, + (0x9c00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9910 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9914 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9918 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x991c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9920 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9924 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9928 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x992c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9930 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9934 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9938 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x993c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9940 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9944 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9948 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x994c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9950 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9954 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9958 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x995c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9960 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9964 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9968 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x996c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9970 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9974 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9978 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x997c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9980 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9984 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9988 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x998c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c08 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x8040 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x8000 << 16) | (0xe84 >> 2), + 0x00000000, + (0x8040 << 16) | (0xe84 >> 2), + 0x00000000, + (0x8000 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x8040 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x8000 << 16) | (0x914c >> 2), + 0x00000000, + (0x8040 << 16) | (0x914c >> 2), + 0x00000000, + (0x8000 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x8040 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9354 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9354 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9060 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9364 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x913c >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e0 >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e4 >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e0 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e4 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e50 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c0c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e58 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e5c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9508 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x950c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9494 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88cc >> 2), + 0x00000000, + (0x9c00 << 16) | (0x89b0 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8b10 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9830 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9838 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9a10 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8001 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8001 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8041 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8041 << 16) | (0x9874 >> 2), + 0x00000000, + 0x00000000 +}; static const u32 tahiti_golden_rlc_registers[] = { @@ -985,6 +1264,36 @@ } } +/** + * si_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int si_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS2: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): + case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) @@ -1229,43 +1538,56 @@ }; /* ucode loading */ -static int si_mc_load_microcode(struct radeon_device *rdev) +int si_mc_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; + const __be32 *fw_data = NULL; + const __le32 *new_fw_data = NULL; u32 running, blackout = 0; - u32 *io_mc_regs; - int i, ucode_size, regs_size; + u32 *io_mc_regs = NULL; + const __le32 *new_io_mc_regs = NULL; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; - switch (rdev->family) { - case CHIP_TAHITI: - io_mc_regs = (u32 *)&tahiti_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_PITCAIRN: - io_mc_regs = (u32 *)&pitcairn_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_VERDE: - default: - io_mc_regs = (u32 *)&verde_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_OLAND: - io_mc_regs = (u32 *)&oland_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_HAINAN: - io_mc_regs = (u32 *)&hainan_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; + if (rdev->new_fw) { + const struct mc_firmware_header_v1_0 *hdr = + (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; + + radeon_ucode_print_mc_hdr(&hdr->header); + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + new_io_mc_regs = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + new_fw_data = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + } else { + ucode_size = rdev->mc_fw->size / 4; + + switch (rdev->family) { + case CHIP_TAHITI: + io_mc_regs = (u32 *)&tahiti_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_PITCAIRN: + io_mc_regs = (u32 *)&pitcairn_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_VERDE: + default: + io_mc_regs = (u32 *)&verde_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_OLAND: + io_mc_regs = (u32 *)&oland_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_HAINAN: + io_mc_regs = (u32 *)&hainan_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + } + fw_data = (const __be32 *)rdev->mc_fw->data; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1282,13 +1604,21 @@ /* load mc io regs */ for (i = 0; i < regs_size; i++) { - WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); - WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + if (rdev->new_fw) { + WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); + WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); + } else { + WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); + WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + } } /* load the MC ucode */ - fw_data = (const __be32 *)rdev->mc_fw->data; - for (i = 0; i < ucode_size; i++) - WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + for (i = 0; i < ucode_size; i++) { + if (rdev->new_fw) + WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); + else + WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + } /* put the engine back into the active state */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); @@ -1316,132 +1646,246 @@ static int si_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; - const char *rlc_chip_name; + const char *new_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; + size_t smc_req_size, mc2_req_size; char fw_name[30]; int err; + int new_fw = 0; DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; - rlc_chip_name = "TAHITI"; + new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = TAHITI_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4); break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; - rlc_chip_name = "PITCAIRN"; + new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4); break; case CHIP_VERDE: chip_name = "VERDE"; - rlc_chip_name = "VERDE"; + new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = VERDE_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4); break; case CHIP_OLAND: chip_name = "OLAND"; - rlc_chip_name = "OLAND"; + new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4); break; case CHIP_HAINAN: chip_name = "HAINAN"; - rlc_chip_name = "HAINAN"; + new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); break; default: BUG(); } - DRM_INFO("Loading %s Microcode\n", chip_name); + DRM_INFO("Loading %s Microcode\n", new_chip_name); - snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); - if (err) - goto out; - if (rdev->pfp_fw->size != pfp_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->pfp_fw->size, fw_name); - err = -EINVAL; - goto out; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->pfp_fw->size != pfp_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->pfp_fw->size, fw_name); + err = -EINVAL; + goto out; + } + } else { + err = radeon_ucode_validate(rdev->pfp_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); - if (err) - goto out; - if (rdev->me_fw->size != me_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->me_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->me_fw->size != me_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->me_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->me_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); - err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); - if (err) - goto out; - if (rdev->ce_fw->size != ce_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->ce_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->ce_fw->size != ce_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->ce_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->ce_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); - if (err) - goto out; - if (rdev->rlc_fw->size != rlc_req_size) { - printk(KERN_ERR - "si_rlc: Bogus length %zu in firmware \"%s\"\n", - rdev->rlc_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->rlc_fw->size != rlc_req_size) { + printk(KERN_ERR + "si_rlc: Bogus length %zu in firmware \"%s\"\n", + rdev->rlc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->rlc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); - err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { - printk(KERN_ERR - "si_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)) { + printk(KERN_ERR + "si_mc: Bogus length %zu in firmware \"%s\"\n", + rdev->mc_fw->size, fw_name); + err = -EINVAL; + } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); + } else { + err = radeon_ucode_validate(rdev->mc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } -out: - platform_device_unregister(pdev); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + err = 0; + } else if (rdev->smc_fw->size != smc_req_size) { + printk(KERN_ERR + "si_smc: Bogus length %zu in firmware \"%s\"\n", + rdev->smc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->smc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } + } + if (new_fw == 0) { + rdev->new_fw = false; + } else if (new_fw < 6) { + printk(KERN_ERR "si_fw: mixing new and old firmware!\n"); + err = -EINVAL; + } else { + rdev->new_fw = true; + } +out: if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -1457,6 +1901,8 @@ rdev->rlc_fw = NULL; release_firmware(rdev->mc_fw); rdev->mc_fw = NULL; + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; } return err; } @@ -1807,7 +2253,8 @@ u32 lb_size, u32 num_heads) { struct drm_display_mode *mode = &radeon_crtc->base.mode; - struct dce6_wm_params wm; + struct dce6_wm_params wm_low, wm_high; + u32 dram_channels; u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; @@ -1823,38 +2270,83 @@ priority_a_cnt = 0; priority_b_cnt = 0; - wm.yclk = rdev->pm.current_mclk * 10; - wm.sclk = rdev->pm.current_sclk * 10; - wm.disp_clk = mode->clock; - wm.src_width = mode->crtc_hdisplay; - wm.active_time = mode->crtc_hdisplay * pixel_period; - wm.blank_time = line_time - wm.active_time; - wm.interlaced = false; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm.interlaced = true; - wm.vsc = radeon_crtc->vsc; - wm.vtaps = 1; - if (radeon_crtc->rmx_type != RMX_OFF) - wm.vtaps = 2; - wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm.lb_size = lb_size; if (rdev->family == CHIP_ARUBA) - wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); + dram_channels = evergreen_get_number_of_dram_channels(rdev); else - wm.dram_channels = si_get_number_of_dram_channels(rdev); - wm.num_heads = num_heads; + dram_channels = si_get_number_of_dram_channels(rdev); + + /* watermark for high clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + wm_high.yclk = + radeon_dpm_get_mclk(rdev, false) * 10; + wm_high.sclk = + radeon_dpm_get_sclk(rdev, false) * 10; + } else { + wm_high.yclk = rdev->pm.current_mclk * 10; + wm_high.sclk = rdev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_high.interlaced = true; + wm_high.vsc = radeon_crtc->vsc; + wm_high.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = dram_channels; + wm_high.num_heads = num_heads; + + /* watermark for low clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + wm_low.yclk = + radeon_dpm_get_mclk(rdev, true) * 10; + wm_low.sclk = + radeon_dpm_get_sclk(rdev, true) * 10; + } else { + wm_low.yclk = rdev->pm.current_mclk * 10; + wm_low.sclk = rdev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = radeon_crtc->vsc; + wm_low.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = dram_channels; + wm_low.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); + latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535); /* set for low clocks */ - /* wm.yclk = low clk; wm.sclk = low clk */ - latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535); + latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ - if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || - !dce6_average_bandwidth_vs_available_bandwidth(&wm) || - !dce6_check_latency_hiding(&wm) || + if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce6_check_latency_hiding(&wm_high) || + (rdev->disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + priority_a_cnt |= PRIORITY_ALWAYS_ON; + priority_b_cnt |= PRIORITY_ALWAYS_ON; + } + if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce6_check_latency_hiding(&wm_low) || (rdev->disp_priority == 2)) { DRM_DEBUG_KMS("force priority to high\n"); priority_a_cnt |= PRIORITY_ALWAYS_ON; @@ -1884,6 +2376,9 @@ c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -1910,6 +2405,10 @@ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); + /* save values for DPM */ + radeon_crtc->line_time = line_time; + radeon_crtc->wm_high = latency_watermark_a; + radeon_crtc->wm_low = latency_watermark_b; } void dce6_bandwidth_update(struct radeon_device *rdev) @@ -1919,6 +2418,9 @@ u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -2516,7 +3018,7 @@ } static u32 si_get_rb_disabled(struct radeon_device *rdev, - u32 max_rb_num, u32 se_num, + u32 max_rb_num_per_se, u32 sh_per_se) { u32 data, mask; @@ -2530,14 +3032,14 @@ data >>= BACKEND_DISABLE_SHIFT; - mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); + mask = si_create_bitmask(max_rb_num_per_se / sh_per_se); return data & mask; } static void si_setup_rb(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, - u32 max_rb_num) + u32 max_rb_num_per_se) { int i, j; u32 data, mask; @@ -2547,19 +3049,21 @@ for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { si_select_se_sh(rdev, i, j); - data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); + data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); } } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); mask = 1; - for (i = 0; i < max_rb_num; i++) { + for (i = 0; i < max_rb_num_per_se * se_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } + rdev->config.si.backend_enable_mask = enabled_rbs; + for (i = 0; i < se_num; i++) { si_select_se_sh(rdev, i, 0xffffffff); data = 0; @@ -2691,6 +3195,8 @@ } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 1); + WREG32(SRBM_INT_ACK, 1); evergreen_fix_pci_max_read_req_size(rdev); @@ -2788,6 +3294,13 @@ rdev->config.si.max_sh_per_se, rdev->config.si.max_cu_per_sh); + rdev->config.si.active_cus = 0; + for (i = 0; i < rdev->config.si.max_shader_engines; i++) { + for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { + rdev->config.si.active_cus += + hweight32(si_get_cu_active_bitmap(rdev, i, j)); + } + } /* set HW defaults for 3D engine */ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | @@ -2876,7 +3389,7 @@ /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -2888,6 +3401,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; + unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; u32 header; if (ib->is_const_ib) { @@ -2909,7 +3423,7 @@ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (1 << 8)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, next_rptr); } @@ -2923,14 +3437,13 @@ #endif (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); - radeon_ring_write(ring, ib->length_dw | - (ib->vm ? (ib->vm->id << 24) : 0)); + radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); if (!ib->is_const_ib) { /* flush read cache over gart for this vmid */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); + radeon_ring_write(ring, vm_id); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | PACKET3_TC_ACTION_ENA | @@ -2950,7 +3463,8 @@ if (enable) WREG32(CP_ME_CNTL, 0); else { - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); WREG32(SCRATCH_UMSK, 0); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; @@ -2962,34 +3476,77 @@ static int si_cp_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; int i; - if (!rdev->me_fw || !rdev->pfp_fw) + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) return -EINVAL; si_cp_enable(rdev, false); - /* PFP */ - fw_data = (const __be32 *)rdev->pfp_fw->data; - WREG32(CP_PFP_UCODE_ADDR, 0); - for (i = 0; i < SI_PFP_UCODE_SIZE; i++) - WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_PFP_UCODE_ADDR, 0); - - /* CE */ - fw_data = (const __be32 *)rdev->ce_fw->data; - WREG32(CP_CE_UCODE_ADDR, 0); - for (i = 0; i < SI_CE_UCODE_SIZE; i++) - WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_CE_UCODE_ADDR, 0); + if (rdev->new_fw) { + const struct gfx_firmware_header_v1_0 *pfp_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; + const struct gfx_firmware_header_v1_0 *ce_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; + const struct gfx_firmware_header_v1_0 *me_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; + const __le32 *fw_data; + u32 fw_size; + + radeon_ucode_print_gfx_hdr(&pfp_hdr->header); + radeon_ucode_print_gfx_hdr(&ce_hdr->header); + radeon_ucode_print_gfx_hdr(&me_hdr->header); + + /* PFP */ + fw_data = (const __le32 *) + (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __le32 *) + (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *) + (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } else { + const __be32 *fw_data; - /* ME */ - fw_data = (const __be32 *)rdev->me_fw->data; - WREG32(CP_ME_RAM_WADDR, 0); - for (i = 0; i < SI_PM4_UCODE_SIZE; i++) - WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_ME_RAM_WADDR, 0); + /* PFP */ + fw_data = (const __be32 *)rdev->pfp_fw->data; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < SI_PFP_UCODE_SIZE; i++) + WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __be32 *)rdev->ce_fw->data; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < SI_CE_UCODE_SIZE; i++) + WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *)rdev->me_fw->data; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < SI_PM4_UCODE_SIZE; i++) + WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } WREG32(CP_PFP_UCODE_ADDR, 0); WREG32(CP_CE_UCODE_ADDR, 0); @@ -3022,7 +3579,7 @@ radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); radeon_ring_write(ring, 0xc000); radeon_ring_write(ring, 0xe000); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); si_cp_enable(rdev, true); @@ -3051,7 +3608,7 @@ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { ring = &rdev->ring[i]; @@ -3061,7 +3618,7 @@ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } return 0; @@ -3092,16 +3649,7 @@ u32 rb_bufsz; int r; - /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ - WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | - SOFT_RESET_PA | - SOFT_RESET_VGT | - SOFT_RESET_SPI | - SOFT_RESET_SX)); - RREG32(GRBM_SOFT_RESET); - mdelay(15); - WREG32(GRBM_SOFT_RESET, 0); - RREG32(GRBM_SOFT_RESET); + si_enable_gui_idle_interrupt(rdev, false); WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); @@ -3115,8 +3663,8 @@ /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3143,13 +3691,11 @@ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB0_RPTR); - /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3169,13 +3715,11 @@ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB1_RPTR); - /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3195,8 +3739,6 @@ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB2_RPTR); - /* start the rings */ si_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; @@ -3218,10 +3760,15 @@ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } + si_enable_gui_idle_interrupt(rdev, true); + + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + return 0; } -static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) +u32 si_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -3319,6 +3866,13 @@ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + /* disable PG/CG */ + si_fini_pg(rdev); + si_fini_cg(rdev); + + /* stop the rlc */ + si_rlc_stop(rdev); + /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); @@ -3427,6 +3981,106 @@ evergreen_print_gpu_status_regs(rdev); } +static void si_set_clk_bypass_mode(struct radeon_device *rdev) +{ + u32 tmp, i; + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_BYPASS_EN; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp |= SPLL_CTLREQ_CHG; + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + break; + udelay(1); + } + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + tmp = RREG32(MPLL_CNTL_MODE); + tmp &= ~MPLL_MCLK_SEL; + WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32(SPLL_CNTL_MODE); + tmp |= SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_RESET; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_SLEEP; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(SPLL_CNTL_MODE); + tmp &= ~SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); +} + +static void si_gpu_pci_config_reset(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 tmp, i; + + dev_info(rdev->dev, "GPU pci config reset\n"); + + /* disable dpm? */ + + /* disable cg/pg */ + si_fini_pg(rdev); + si_fini_cg(rdev); + + /* Disable CP parsing/prefetching */ + WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); + /* dma0 */ + tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); + /* dma1 */ + tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); + /* XXX other engines? */ + + /* halt the rlc, disable cp internal ints */ + si_rlc_stop(rdev); + + udelay(50); + + /* disable mem access */ + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); + } + + /* set mclk/sclk to bypass */ + si_set_clk_bypass_mode(rdev); + /* powerdown spll */ + si_spll_powerdown(rdev); + /* disable BM */ + pci_clear_master(rdev->pdev); + /* reset */ + radeon_pci_config_reset(rdev); + /* wait for asic to come out of reset */ + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) + break; + udelay(1); + } +} + int si_asic_reset(struct radeon_device *rdev) { u32 reset_mask; @@ -3436,10 +4090,17 @@ if (reset_mask) r600_set_bios_scratch_engine_hung(rdev, true); + /* try soft reset */ si_gpu_soft_reset(rdev, reset_mask); reset_mask = si_gpu_check_soft_reset(rdev); + /* try pci config reset */ + if (reset_mask && radeon_hard_reset) + si_gpu_pci_config_reset(rdev); + + reset_mask = si_gpu_check_soft_reset(rdev); + if (!reset_mask) r600_set_bios_scratch_engine_hung(rdev, false); @@ -3462,39 +4123,9 @@ if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - -/** - * si_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = si_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -3550,8 +4181,8 @@ } } -static void si_vram_gtt_location(struct radeon_device *rdev, - struct radeon_mc *mc) +void si_vram_gtt_location(struct radeon_device *rdev, + struct radeon_mc *mc) { if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ @@ -3654,23 +4285,25 @@ r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | ENABLE_L1_TLB | + ENABLE_L1_FRAGMENT_PROCESSING | SYSTEM_ACCESS_MODE_NOT_IN_SYS | ENABLE_ADVANCED_DRIVER_MODEL | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | - L2_CACHE_BIGK_FRAGMENT_SIZE(0)); + BANK_SELECT(4) | + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); @@ -3688,7 +4321,7 @@ /* empty context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); /* Assign the pt base to something valid for now; the pts used for * the VMs are determined by the application and setup and assigned * on the fly in the vm part of radeon_gart.c @@ -3696,10 +4329,10 @@ for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); else WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); } /* enable context1-15 */ @@ -3707,6 +4340,7 @@ (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -3730,6 +4364,17 @@ static void si_pcie_gart_disable(struct radeon_device *rdev) { + unsigned i; + + for (i = 1; i < 16; ++i) { + uint32_t reg; + if (i < 8) + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); + else + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); + rdev->vm_manager.saved_table_addr[i] = RREG32(reg); + } + /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -4078,7 +4723,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) { int ret = 0; - u32 idx = 0; + u32 idx = 0, i; struct radeon_cs_packet pkt; do { @@ -4120,8 +4765,15 @@ ret = -EINVAL; break; } - if (ret) + if (ret) { + for (i = 0; i < ib->length_dw; i++) { + if (i == idx) + printk("\t0x%08x <---\n", ib->ptr[i]); + else + printk("\t0x%08x\n", ib->ptr[i]); + } break; + } } while (idx < ib->length_dw); return ret; @@ -4145,136 +4797,290 @@ } /** - * si_vm_set_page - update the page tables using the CP + * si_vm_decode_fault - print human readable fault info * * @rdev: radeon_device pointer - * @ib: indirect buffer to fill with commands - * @pe: addr of the page entry - * @addr: dst addr to write into pe - * @count: number of page entries to update - * @incr: increase next addr by incr bytes - * @flags: access flags + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value * - * Update the page tables using the CP (SI). + * Print human readable fault information (SI). */ -void si_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - uint64_t value; - unsigned ndw; - - if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { - while (count) { - ndw = 2 + count * 2; - if (ndw > 0x3FFE) - ndw = 0x3FFE; - - ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); - ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(1)); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - for (; ndw > 2; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } +static void si_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr) +{ + u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; + u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; + u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; + char *block; + + if (rdev->family == CHIP_TAHITI) { + switch (mc_id) { + case 160: + case 144: + case 96: + case 80: + case 224: + case 208: + case 32: + case 16: + block = "CB"; + break; + case 161: + case 145: + case 97: + case 81: + case 225: + case 209: + case 33: + case 17: + block = "CB_FMASK"; + break; + case 162: + case 146: + case 98: + case 82: + case 226: + case 210: + case 34: + case 18: + block = "CB_CMASK"; + break; + case 163: + case 147: + case 99: + case 83: + case 227: + case 211: + case 35: + case 19: + block = "CB_IMMED"; + break; + case 164: + case 148: + case 100: + case 84: + case 228: + case 212: + case 36: + case 20: + block = "DB"; + break; + case 165: + case 149: + case 101: + case 85: + case 229: + case 213: + case 37: + case 21: + block = "DB_HTILE"; + break; + case 167: + case 151: + case 103: + case 87: + case 231: + case 215: + case 39: + case 23: + block = "DB_STEN"; + break; + case 72: + case 68: + case 64: + case 8: + case 4: + case 0: + case 136: + case 132: + case 128: + case 200: + case 196: + case 192: + block = "TC"; + break; + case 112: + case 48: + block = "CP"; + break; + case 49: + case 177: + case 50: + case 178: + block = "SH"; + break; + case 53: + case 190: + block = "VGT"; + break; + case 117: + block = "IH"; + break; + case 51: + case 115: + block = "RLC"; + break; + case 119: + case 183: + block = "DMA0"; + break; + case 61: + block = "DMA1"; + break; + case 248: + case 120: + block = "HDP"; + break; + default: + block = "unknown"; + break; } } else { - /* DMA */ - if (flags & RADEON_VM_PAGE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; - } + switch (mc_id) { + case 32: + case 16: + case 96: + case 80: + case 160: + case 144: + case 224: + case 208: + block = "CB"; + break; + case 33: + case 17: + case 97: + case 81: + case 161: + case 145: + case 225: + case 209: + block = "CB_FMASK"; + break; + case 34: + case 18: + case 98: + case 82: + case 162: + case 146: + case 226: + case 210: + block = "CB_CMASK"; + break; + case 35: + case 19: + case 99: + case 83: + case 163: + case 147: + case 227: + case 211: + block = "CB_IMMED"; + break; + case 36: + case 20: + case 100: + case 84: + case 164: + case 148: + case 228: + case 212: + block = "DB"; + break; + case 37: + case 21: + case 101: + case 85: + case 165: + case 149: + case 229: + case 213: + block = "DB_HTILE"; + break; + case 39: + case 23: + case 103: + case 87: + case 167: + case 151: + case 231: + case 215: + block = "DB_STEN"; + break; + case 72: + case 68: + case 8: + case 4: + case 136: + case 132: + case 200: + case 196: + block = "TC"; + break; + case 112: + case 48: + block = "CP"; + break; + case 49: + case 177: + case 50: + case 178: + block = "SH"; + break; + case 53: + block = "VGT"; + break; + case 117: + block = "IH"; + break; + case 51: + case 115: + block = "RLC"; + break; + case 119: + case 183: + block = "DMA0"; + break; + case 61: + block = "DMA1"; + break; + case 248: + case 120: + block = "HDP"; + break; + default: + block = "unknown"; + break; } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); } + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + protections, vmid, addr, + (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", + block, mc_id); } -void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - /* write new base address */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); - if (vm->id < 8) { + if (vm_id < 8) { radeon_ring_write(ring, - (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); + (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); } else { radeon_ring_write(ring, - (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); + (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); } radeon_ring_write(ring, 0); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + radeon_ring_write(ring, pd_addr >> 12); /* flush hdp cache */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); radeon_ring_write(ring, 0); @@ -4282,171 +5088,817 @@ /* bits 0-15 are the VM contexts0-15 */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 0); - radeon_ring_write(ring, 1 << vm->id); + radeon_ring_write(ring, 1 << vm_id); + + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); } -void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +/* + * Power and clock gating + */ +static void si_wait_for_rlc_serdes(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[ridx]; + int i; - if (vm == NULL) - return; + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0) + break; + udelay(1); + } + + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0) + break; + udelay(1); + } +} + +static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable) +{ + u32 tmp = RREG32(CP_INT_CNTL_RING0); + u32 mask; + int i; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - if (vm->id < 8) { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); + if (enable) + tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + else + tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); + + if (!enable) { + /* read a gfx register */ + tmp = RREG32(DB_DEPTH_INFO); + + mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS; + for (i = 0; i < rdev->usec_timeout; i++) { + if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS)) + break; + udelay(1); + } + } +} + +static void si_set_uvd_dcm(struct radeon_device *rdev, + bool sw_mode) +{ + u32 tmp, tmp2; + + tmp = RREG32(UVD_CGC_CTRL); + tmp &= ~(CLK_OD_MASK | CG_DT_MASK); + tmp |= DCM | CG_DT(1) | CLK_OD(4); + + if (sw_mode) { + tmp &= ~0x7ffff800; + tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7); } else { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); + tmp |= 0x7ffff800; + tmp2 = 0; } - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - /* flush hdp cache */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); - - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); - radeon_ring_write(ring, 1 << vm->id); + WREG32(UVD_CGC_CTRL, tmp); + WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2); } -/* - * RLC - */ -void si_rlc_fini(struct radeon_device *rdev) +void si_init_uvd_internal_cg(struct radeon_device *rdev) { - int r; + bool hw_mode = true; + + if (hw_mode) { + si_set_uvd_dcm(rdev, false); + } else { + u32 tmp = RREG32(UVD_CGC_CTRL); + tmp &= ~DCM; + WREG32(UVD_CGC_CTRL, tmp); + } +} + +static u32 si_halt_rlc(struct radeon_device *rdev) +{ + u32 data, orig; + + orig = data = RREG32(RLC_CNTL); + + if (data & RLC_ENABLE) { + data &= ~RLC_ENABLE; + WREG32(RLC_CNTL, data); + + si_wait_for_rlc_serdes(rdev); + } + + return orig; +} + +static void si_update_rlc(struct radeon_device *rdev, u32 rlc) +{ + u32 tmp; + + tmp = RREG32(RLC_CNTL); + if (tmp != rlc) + WREG32(RLC_CNTL, rlc); +} + +static void si_enable_dma_pg(struct radeon_device *rdev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(DMA_PG); + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA)) + data |= PG_CNTL_ENABLE; + else + data &= ~PG_CNTL_ENABLE; + if (orig != data) + WREG32(DMA_PG, data); +} + +static void si_init_dma_pg(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(DMA_PGFSM_WRITE, 0x00002000); + WREG32(DMA_PGFSM_CONFIG, 0x100010ff); + + for (tmp = 0; tmp < 5; tmp++) + WREG32(DMA_PGFSM_WRITE, 0); +} + +static void si_enable_gfx_cgpg(struct radeon_device *rdev, + bool enable) +{ + u32 tmp; - /* save restore block */ - if (rdev->rlc.save_restore_obj) { - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); - radeon_bo_unpin(rdev->rlc.save_restore_obj); - radeon_bo_unreserve(rdev->rlc.save_restore_obj); + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { + tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); + WREG32(RLC_TTOP_D, tmp); + + tmp = RREG32(RLC_PG_CNTL); + tmp |= GFX_PG_ENABLE; + WREG32(RLC_PG_CNTL, tmp); + + tmp = RREG32(RLC_AUTO_PG_CTRL); + tmp |= AUTO_PG_EN; + WREG32(RLC_AUTO_PG_CTRL, tmp); + } else { + tmp = RREG32(RLC_AUTO_PG_CTRL); + tmp &= ~AUTO_PG_EN; + WREG32(RLC_AUTO_PG_CTRL, tmp); - radeon_bo_unref(&rdev->rlc.save_restore_obj); - rdev->rlc.save_restore_obj = NULL; + tmp = RREG32(DB_RENDER_CONTROL); } +} + +static void si_init_gfx_cgpg(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + + tmp = RREG32(RLC_PG_CNTL); + tmp |= GFX_PG_SRC; + WREG32(RLC_PG_CNTL, tmp); + + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + + tmp = RREG32(RLC_AUTO_PG_CTRL); + + tmp &= ~GRBM_REG_SGIT_MASK; + tmp |= GRBM_REG_SGIT(0x700); + tmp &= ~PG_AFTER_GRBM_REG_ST_MASK; + WREG32(RLC_AUTO_PG_CTRL, tmp); +} - /* clear state block */ - if (rdev->rlc.clear_state_obj) { - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); - radeon_bo_unpin(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); +static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) +{ + u32 mask = 0, tmp, tmp1; + int i; + + si_select_se_sh(rdev, se, sh); + tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); + tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); - radeon_bo_unref(&rdev->rlc.clear_state_obj); - rdev->rlc.clear_state_obj = NULL; + tmp &= 0xffff0000; + + tmp |= tmp1; + tmp >>= 16; + + for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) { + mask <<= 1; + mask |= 1; } + + return (~tmp) & mask; } -int si_rlc_init(struct radeon_device *rdev) +static void si_init_ao_cu_mask(struct radeon_device *rdev) { - int r; + u32 i, j, k, active_cu_number = 0; + u32 mask, counter, cu_bitmap; + u32 tmp = 0; - /* save restore block */ - if (rdev->rlc.save_restore_obj == NULL) { - r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, - &rdev->rlc.save_restore_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); - return r; + for (i = 0; i < rdev->config.si.max_shader_engines; i++) { + for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { + mask = 1; + cu_bitmap = 0; + counter = 0; + for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { + if (si_get_cu_active_bitmap(rdev, i, j) & mask) { + if (counter < 2) + cu_bitmap |= mask; + counter++; + } + mask <<= 1; + } + + active_cu_number += counter; + tmp |= (cu_bitmap << (i * 16 + j * 8)); } } - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; + WREG32(RLC_PG_AO_CU_MASK, tmp); + + tmp = RREG32(RLC_MAX_PG_CU); + tmp &= ~MAX_PU_CU_MASK; + tmp |= MAX_PU_CU(active_cu_number); + WREG32(RLC_MAX_PG_CU, tmp); +} + +static void si_enable_cgcg(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig, tmp; + + orig = data = RREG32(RLC_CGCG_CGLS_CTRL); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { + si_enable_gui_idle_interrupt(rdev, true); + + WREG32(RLC_GCPM_GENERAL_3, 0x00000080); + + tmp = si_halt_rlc(rdev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff); + + si_wait_for_rlc_serdes(rdev); + + si_update_rlc(rdev, tmp); + + WREG32(RLC_SERDES_WR_CTRL, 0x007000ff); + + data |= CGCG_EN | CGLS_EN; + } else { + si_enable_gui_idle_interrupt(rdev, false); + + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + data &= ~(CGCG_EN | CGLS_EN); } - r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.save_restore_gpu_addr); - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - if (r) { - dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); - si_rlc_fini(rdev); - return r; + + if (orig != data) + WREG32(RLC_CGCG_CGLS_CTRL, data); +} + +static void si_enable_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 data, orig, tmp = 0; + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { + orig = data = RREG32(CGTS_SM_CTRL_REG); + data = 0x96940200; + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + } + + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data &= 0xffffffc0; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + tmp = si_halt_rlc(rdev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff); + + si_update_rlc(rdev, tmp); + } else { + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000003; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + data = RREG32(CP_MEM_SLP_CNTL); + if (data & CP_MEM_LS_EN) { + data &= ~CP_MEM_LS_EN; + WREG32(CP_MEM_SLP_CNTL, data); + } + orig = data = RREG32(CGTS_SM_CTRL_REG); + data |= LS_OVERRIDE | OVERRIDE; + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + + tmp = si_halt_rlc(rdev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff); + + si_update_rlc(rdev, tmp); } +} - /* clear state block */ - if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, - &rdev->rlc.clear_state_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; +static void si_enable_uvd_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data, tmp; + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { + tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); + tmp |= 0x3fff; + WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); + + orig = data = RREG32(UVD_CGC_CTRL); + data |= DCM; + if (orig != data) + WREG32(UVD_CGC_CTRL, data); + + WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0); + WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0); + } else { + tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); + tmp &= ~0x3fff; + WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); + + orig = data = RREG32(UVD_CGC_CTRL); + data &= ~DCM; + if (orig != data) + WREG32(UVD_CGC_CTRL, data); + + WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff); + WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff); + } +} + +static const u32 mc_cg_registers[] = +{ + MC_HUB_MISC_HUB_CG, + MC_HUB_MISC_SIP_CG, + MC_HUB_MISC_VM_CG, + MC_XPB_CLK_GAT, + ATC_MISC_CG, + MC_CITF_MISC_WR_CG, + MC_CITF_MISC_RD_CG, + MC_CITF_MISC_VM_CG, + VM_L2_CG, +}; + +static void si_enable_mc_ls(struct radeon_device *rdev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) + data |= MC_LS_ENABLE; + else + data &= ~MC_LS_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void si_enable_mc_mgcg(struct radeon_device *rdev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) + data |= MC_CG_ENABLE; + else + data &= ~MC_CG_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void si_enable_dma_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data, offset; + int i; + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data &= ~MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + WREG32(DMA_CLK_CTRL + offset, 0x00000100); + } + } else { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data |= MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + + orig = data = RREG32(DMA_CLK_CTRL + offset); + data = 0xff000000; + if (data != orig) + WREG32(DMA_CLK_CTRL + offset, data); } } - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; +} + +static void si_enable_bif_mgls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32_PCIE(PCIE_CNTL2); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN; + else + data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN); + + if (orig != data) + WREG32_PCIE(PCIE_CNTL2, data); +} + +static void si_enable_hdp_mgcg(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_HOST_PATH_CNTL); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) + data &= ~CLOCK_GATING_DIS; + else + data |= CLOCK_GATING_DIS; + + if (orig != data) + WREG32(HDP_HOST_PATH_CNTL, data); +} + +static void si_enable_hdp_ls(struct radeon_device *rdev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_MEM_POWER_LS); + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) + data |= HDP_LS_ENABLE; + else + data &= ~HDP_LS_ENABLE; + + if (orig != data) + WREG32(HDP_MEM_POWER_LS, data); +} + +static void si_update_cg(struct radeon_device *rdev, + u32 block, bool enable) +{ + if (block & RADEON_CG_BLOCK_GFX) { + si_enable_gui_idle_interrupt(rdev, false); + /* order matters! */ + if (enable) { + si_enable_mgcg(rdev, true); + si_enable_cgcg(rdev, true); + } else { + si_enable_cgcg(rdev, false); + si_enable_mgcg(rdev, false); + } + si_enable_gui_idle_interrupt(rdev, true); } - r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.clear_state_gpu_addr); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - if (r) { - dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; + + if (block & RADEON_CG_BLOCK_MC) { + si_enable_mc_mgcg(rdev, enable); + si_enable_mc_ls(rdev, enable); } - return 0; + if (block & RADEON_CG_BLOCK_SDMA) { + si_enable_dma_mgcg(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_BIF) { + si_enable_bif_mgls(rdev, enable); + } + + if (block & RADEON_CG_BLOCK_UVD) { + if (rdev->has_uvd) { + si_enable_uvd_mgcg(rdev, enable); + } + } + + if (block & RADEON_CG_BLOCK_HDP) { + si_enable_hdp_mgcg(rdev, enable); + si_enable_hdp_ls(rdev, enable); + } +} + +static void si_init_cg(struct radeon_device *rdev) +{ + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), true); + if (rdev->has_uvd) { + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); + si_init_uvd_internal_cg(rdev); + } +} + +static void si_fini_cg(struct radeon_device *rdev) +{ + if (rdev->has_uvd) { + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); + } + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); +} + +u32 si_get_csb_size(struct radeon_device *rdev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config */ + count += 3; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); + buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + switch (rdev->family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + buffer[count++] = cpu_to_le32(0x2a00126a); + break; + case CHIP_VERDE: + buffer[count++] = cpu_to_le32(0x0000124a); + break; + case CHIP_OLAND: + buffer[count++] = cpu_to_le32(0x00000082); + break; + case CHIP_HAINAN: + buffer[count++] = cpu_to_le32(0x00000000); + break; + default: + buffer[count++] = cpu_to_le32(0x00000000); + break; + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void si_init_pg(struct radeon_device *rdev) +{ + if (rdev->pg_flags) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { + si_init_dma_pg(rdev); + } + si_init_ao_cu_mask(rdev); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { + si_init_gfx_cgpg(rdev); + } else { + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + } + si_enable_dma_pg(rdev, true); + si_enable_gfx_cgpg(rdev, true); + } else { + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + } +} + +static void si_fini_pg(struct radeon_device *rdev) +{ + if (rdev->pg_flags) { + si_enable_dma_pg(rdev, false); + si_enable_gfx_cgpg(rdev, false); + } +} + +/* + * RLC + */ +void si_rlc_reset(struct radeon_device *rdev) +{ + u32 tmp = RREG32(GRBM_SOFT_RESET); + + tmp |= SOFT_RESET_RLC; + WREG32(GRBM_SOFT_RESET, tmp); + udelay(50); + tmp &= ~SOFT_RESET_RLC; + WREG32(GRBM_SOFT_RESET, tmp); + udelay(50); } static void si_rlc_stop(struct radeon_device *rdev) { WREG32(RLC_CNTL, 0); + + si_enable_gui_idle_interrupt(rdev, false); + + si_wait_for_rlc_serdes(rdev); } static void si_rlc_start(struct radeon_device *rdev) { WREG32(RLC_CNTL, RLC_ENABLE); + + si_enable_gui_idle_interrupt(rdev, true); + + udelay(50); +} + +static bool si_lbpw_supported(struct radeon_device *rdev) +{ + u32 tmp; + + /* Enable LBPW only for DDR3 */ + tmp = RREG32(MC_SEQ_MISC0); + if ((tmp & 0xF0000000) == 0xB0000000) + return true; + return false; +} + +static void si_enable_lbpw(struct radeon_device *rdev, bool enable) +{ + u32 tmp; + + tmp = RREG32(RLC_LB_CNTL); + if (enable) + tmp |= LOAD_BALANCE_ENABLE; + else + tmp &= ~LOAD_BALANCE_ENABLE; + WREG32(RLC_LB_CNTL, tmp); + + if (!enable) { + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); + WREG32(SPI_LB_CU_MASK, 0x00ff); + } } static int si_rlc_resume(struct radeon_device *rdev) { u32 i; - const __be32 *fw_data; if (!rdev->rlc_fw) return -EINVAL; si_rlc_stop(rdev); + si_rlc_reset(rdev); + + si_init_pg(rdev); + + si_init_cg(rdev); + WREG32(RLC_RL_BASE, 0); WREG32(RLC_RL_SIZE, 0); WREG32(RLC_LB_CNTL, 0); WREG32(RLC_LB_CNTR_MAX, 0xffffffff); WREG32(RLC_LB_CNTR_INIT, 0); - - WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); - WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(RLC_MC_CNTL, 0); WREG32(RLC_UCODE_CNTL, 0); - fw_data = (const __be32 *)rdev->rlc_fw->data; - for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { - WREG32(RLC_UCODE_ADDR, i); - WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + if (rdev->new_fw) { + const struct rlc_firmware_header_v1_0 *hdr = + (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; + u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + const __le32 *fw_data = (const __le32 *) + (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + radeon_ucode_print_rlc_hdr(&hdr->header); + + for (i = 0; i < fw_size; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++)); + } + } else { + const __be32 *fw_data = + (const __be32 *)rdev->rlc_fw->data; + for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } } WREG32(RLC_UCODE_ADDR, 0); + si_enable_lbpw(rdev, si_lbpw_supported(rdev)); + si_rlc_start(rdev); return 0; @@ -4484,7 +5936,9 @@ { u32 tmp; - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING2, 0); tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -4492,6 +5946,7 @@ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); + WREG32(SRBM_INT_CNTL, 0); if (rdev->num_crtc >= 2) { WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -4570,7 +6025,7 @@ WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | @@ -4609,13 +6064,13 @@ int si_irq_set(struct radeon_device *rdev) { - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 cp_int_cntl; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; + u32 thermal_int = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -4629,18 +6084,24 @@ return 0; } + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + if (!ASIC_IS_NODCE(rdev)) { - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); } dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; + thermal_int = RREG32(CG_THERMAL_INT) & + ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); + /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("si_irq_set: sw int gfx\n"); @@ -4695,27 +6156,27 @@ } if (rdev->irq.hpd[0]) { DRM_DEBUG("si_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("si_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("si_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("si_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("si_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("si_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -4727,6 +6188,11 @@ WREG32(GRBM_INT_CNTL, grbm_int_cntl); + if (rdev->irq.dpm_thermal) { + DRM_DEBUG("dpm thermal\n"); + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; + } + if (rdev->num_crtc >= 2) { WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); @@ -4741,16 +6207,22 @@ } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -4762,6 +6234,11 @@ WREG32(DC_HPD6_INT_CONTROL, hpd6); } + WREG32(CG_THERMAL_INT, thermal_int); + + /* posting read */ + RREG32(SRBM_STATUS); + return 0; } @@ -4862,6 +6339,37 @@ tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } static void si_irq_disable(struct radeon_device *rdev) @@ -4895,12 +6403,13 @@ wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; @@ -4926,6 +6435,9 @@ u32 src_id, src_data, ring_id; u32 ring_index; bool queue_hotplug = false; + bool queue_dp = false; + bool queue_thermal = false; + u32 status, addr; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -4957,23 +6469,27 @@ case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4983,23 +6499,27 @@ case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D2 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5009,23 +6529,27 @@ case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D3 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[2])) + radeon_crtc_handle_vblank(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + break; case 1: /* D3 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; - DRM_DEBUG("IH: D3 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5035,23 +6559,27 @@ case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D4 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[3])) + radeon_crtc_handle_vblank(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + break; case 1: /* D4 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; - DRM_DEBUG("IH: D4 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5061,23 +6589,27 @@ case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D5 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[4])) + radeon_crtc_handle_vblank(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + break; case 1: /* D5 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; - DRM_DEBUG("IH: D5 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -5087,91 +6619,180 @@ case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D6 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[5])) + radeon_crtc_handle_vblank(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + break; case 1: /* D6 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; - DRM_DEBUG("IH: D6 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + break; case 1: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + break; case 2: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + break; case 3: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + break; case 4: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + break; case 5: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + + break; + case 6: + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + + break; + case 7: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + + break; + case 8: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + + break; + case 9: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + + break; + case 10: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + + break; + case 11: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; + case 96: + DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); + WREG32(SRBM_INT_ACK, 0x1); + break; case 124: /* UVD */ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); break; case 146: case 147: + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + status); + si_vm_decode_fault(rdev, status, addr); break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); @@ -5200,6 +6821,16 @@ DRM_DEBUG("IH: DMA trap\n"); radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); break; + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + rdev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + rdev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; @@ -5215,11 +6846,15 @@ /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); + if (queue_thermal && rdev->pm.dpm_enabled) + schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ @@ -5230,80 +6865,6 @@ return IRQ_HANDLED; } -/** - * si_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (SI). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_bytes, cur_size_in_bytes; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); - num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_bytes = size_in_bytes; - if (cur_size_in_bytes > 0xFFFFF) - cur_size_in_bytes = 0xFFFFF; - size_in_bytes -= cur_size_in_bytes; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); - radeon_ring_write(ring, dst_offset & 0xffffffff); - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_bytes; - dst_offset += cur_size_in_bytes; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - /* * startup/shutdown callbacks */ @@ -5312,34 +6873,39 @@ struct radeon_ring *ring; int r; + /* enable pcie gen2/3 link */ + si_pcie_gen3_enable(rdev); + /* enable aspm */ + si_program_aspm(rdev); + + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; + si_mc_program(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || - !rdev->rlc_fw || !rdev->mc_fw) { - r = si_init_microcode(rdev); + if (!rdev->pm.dpm_enabled) { + r = si_mc_load_microcode(rdev); if (r) { - DRM_ERROR("Failed to load firmware!\n"); + DRM_ERROR("Failed to load MC firmware!\n"); return r; } } - r = si_mc_load_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load MC firmware!\n"); - return r; - } - - r = r600_vram_scratch_init(rdev); - if (r) - return r; - r = si_pcie_gart_enable(rdev); if (r) return r; si_gpu_init(rdev); /* allocate rlc buffers */ - r = si_rlc_init(rdev); + if (rdev->family == CHIP_VERDE) { + rdev->rlc.reg_list = verde_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list); + } + rdev->rlc.cs_data = si_cs_data; + r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -5381,7 +6947,7 @@ } if (rdev->has_uvd) { - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -5392,6 +6958,22 @@ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; } + r = radeon_vce_resume(rdev); + if (!r) { + r = vce_v1_0_resume(rdev); + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE1_INDEX); + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE2_INDEX); + } + if (r) { + dev_err(rdev->dev, "VCE init error (%d).\n", r); + rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; + rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; + } + /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); @@ -5409,38 +6991,31 @@ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, - CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, - CP_RB1_RPTR, CP_RB1_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, - CP_RB2_RPTR, CP_RB2_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, - DMA_RB_RPTR + DMA0_REGISTER_OFFSET, - DMA_RB_WPTR + DMA0_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, - DMA_RB_RPTR + DMA1_REGISTER_OFFSET, - DMA_RB_WPTR + DMA1_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; @@ -5458,17 +7033,32 @@ if (rdev->has_uvd) { ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } } + r = -ENOENT; + + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + VCE_CMD_NO_OP); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + VCE_CMD_NO_OP); + + if (!r) + r = vce_v1_0_init(rdev); + else if (r != -ENOENT) + DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -5481,6 +7071,10 @@ return r; } + r = radeon_audio_init(rdev); + if (r) + return r; + return 0; } @@ -5498,6 +7092,9 @@ /* init golden registers */ si_init_golden_registers(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); + rdev->accel_working = true; r = si_startup(rdev); if (r) { @@ -5512,13 +7109,18 @@ int si_suspend(struct radeon_device *rdev) { + radeon_pm_suspend(rdev); + radeon_audio_fini(rdev); radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + radeon_vce_suspend(rdev); } + si_fini_pg(rdev); + si_fini_cg(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -5582,6 +7184,18 @@ if (r) return r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || + !rdev->rlc_fw || !rdev->mc_fw) { + r = si_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + + /* Initialize power management */ + radeon_pm_init(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -5611,6 +7225,17 @@ } } + r = radeon_vce_init(rdev); + if (!r) { + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + } + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -5625,7 +7250,7 @@ si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); @@ -5648,17 +7273,21 @@ void si_fini(struct radeon_device *rdev) { + radeon_pm_fini(rdev); si_cp_fini(rdev); cayman_dma_fini(rdev); + si_fini_pg(rdev); + si_fini_cg(rdev); si_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->has_uvd) { - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + radeon_vce_fini(rdev); } si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); @@ -5776,8 +7405,491 @@ mdelay(100); - /* posting read */ - RREG32(SRBM_STATUS); + return 0; +} + +static void si_pcie_gen3_enable(struct radeon_device *rdev) +{ + struct pci_dev *root = rdev->pdev->bus->self; + int bridge_pos, gpu_pos; + u32 speed_cntl, mask, current_data_rate; + int ret, i; + u16 tmp16; + + if (pci_is_root_bus(rdev->pdev->bus)) + return; + + if (radeon_pcie_gen2 == 0) + return; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + return; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> + LC_CURRENT_DATA_RATE_SHIFT; + if (mask & DRM_PCIE_SPEED_80) { + if (current_data_rate == 2) { + DRM_INFO("PCIE gen 3 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n"); + } else if (mask & DRM_PCIE_SPEED_50) { + if (current_data_rate == 1) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); + } + + bridge_pos = pci_pcie_cap(root); + if (!bridge_pos) + return; + + gpu_pos = pci_pcie_cap(rdev->pdev); + if (!gpu_pos) + return; + + if (mask & DRM_PCIE_SPEED_80) { + /* re-try equalization if gen3 is not already enabled */ + if (current_data_rate != 2) { + u16 bridge_cfg, gpu_cfg; + u16 bridge_cfg2, gpu_cfg2; + u32 max_lw, current_lw, tmp; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + tmp = RREG32_PCIE(PCIE_LC_STATUS1); + max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; + current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT; + + if (current_lw < max_lw) { + tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + if (tmp & LC_RENEGOTIATION_SUPPORT) { + tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS); + tmp |= (max_lw << LC_LINK_WIDTH_SHIFT); + tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW; + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp); + } + } + + for (i = 0; i < 10; i++) { + /* check status */ + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + if (tmp16 & PCI_EXP_DEVSTA_TRPND) + break; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_REDO_EQ; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + mdelay(100); + + /* linkctl */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + /* linkctl2 */ + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp &= ~LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + } + } + } + + /* set the link speed */ + speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE; + speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~0xf; + if (mask & DRM_PCIE_SPEED_80) + tmp16 |= 3; /* gen3 */ + else if (mask & DRM_PCIE_SPEED_50) + tmp16 |= 2; /* gen2 */ + else + tmp16 |= 1; /* gen1 */ + pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + for (i = 0; i < rdev->usec_timeout; i++) { + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0) + break; + udelay(1); + } +} + +static void si_program_aspm(struct radeon_device *rdev) +{ + u32 data, orig; + bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; + bool disable_clkreq = false; + + if (radeon_aspm == 0) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + data &= ~LC_XMIT_N_FTS_MASK; + data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3); + data |= LC_GO_TO_RECOVERY; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL3, data); + + orig = data = RREG32_PCIE(PCIE_P_CNTL); + data |= P_IGNORE_EDB_ERR; + if (orig != data) + WREG32_PCIE(PCIE_P_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK); + data |= LC_PMI_TO_L1_DIS; + if (!disable_l0s) + data |= LC_L0S_INACTIVITY(7); + + if (!disable_l1) { + data |= LC_L1_INACTIVITY(7); + data &= ~LC_PMI_TO_L1_DIS; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + + if (!disable_plloff_in_l1) { + bool clk_req_support; + + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data); + + if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) { + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2); + data &= ~PLL_RAMP_UP_TIME_2_MASK; + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data); + + orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3); + data &= ~PLL_RAMP_UP_TIME_3_MASK; + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2); + data &= ~PLL_RAMP_UP_TIME_2_MASK; + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3); + data &= ~PLL_RAMP_UP_TIME_3_MASK; + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data); + } + orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + data &= ~LC_DYN_LANES_PWR_STATE_MASK; + data |= LC_DYN_LANES_PWR_STATE(3); + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); + + orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + WREG32_PIF_PHY0(PB0_PIF_CNTL, data); + + orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + WREG32_PIF_PHY1(PB1_PIF_CNTL, data); + + if (!disable_clkreq && + !pci_is_root_bus(rdev->pdev->bus)) { + struct pci_dev *root = rdev->pdev->bus->self; + u32 lnkcap; + + clk_req_support = false; + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap & PCI_EXP_LNKCAP_CLKPM) + clk_req_support = true; + } else { + clk_req_support = false; + } + + if (clk_req_support) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2); + data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL2, data); + + orig = data = RREG32(THM_CLK_CNTL); + data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK); + data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1); + if (orig != data) + WREG32(THM_CLK_CNTL, data); + + orig = data = RREG32(MISC_CLK_CNTL); + data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK); + data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1); + if (orig != data) + WREG32(MISC_CLK_CNTL, data); + + orig = data = RREG32(CG_CLKPIN_CNTL); + data &= ~BCLK_AS_XCLK; + if (orig != data) + WREG32(CG_CLKPIN_CNTL, data); + + orig = data = RREG32(CG_CLKPIN_CNTL_2); + data &= ~FORCE_BIF_REFCLK_EN; + if (orig != data) + WREG32(CG_CLKPIN_CNTL_2, data); + + orig = data = RREG32(MPLL_BYPASSCLK_SEL); + data &= ~MPLL_CLKOUT_SEL_MASK; + data |= MPLL_CLKOUT_SEL(4); + if (orig != data) + WREG32(MPLL_BYPASSCLK_SEL, data); + + orig = data = RREG32(SPLL_CNTL_MODE); + data &= ~SPLL_REFCLK_SEL_MASK; + if (orig != data) + WREG32(SPLL_CNTL_MODE, data); + } + } + } else { + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + + orig = data = RREG32_PCIE(PCIE_CNTL2); + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN; + if (orig != data) + WREG32_PCIE(PCIE_CNTL2, data); + + if (!disable_l0s) { + data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) { + data = RREG32_PCIE(PCIE_LC_STATUS1); + if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~LC_L0S_INACTIVITY_MASK; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + } + } +} + +int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) +{ + unsigned i; + + /* make sure VCEPLL_CTLREQ is deasserted */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* assert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < 100; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) + break; + mdelay(10); + } + + /* deassert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + if (i == 100) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) +{ + unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0; + int r; + + /* bypass evclk and ecclk with bclk */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + /* put PLL in bypass mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK, + ~VCEPLL_BYPASS_EN_MASK); + + if (!evclk || !ecclk) { + /* keep the Bypass mode, put PLL to sleep */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + return 0; + } + + r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &evclk_div, &ecclk_div); + if (r) + return r; + + /* set RESET_ANTI_MUX to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); + + /* set VCO_MODE to 1 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK, + ~VCEPLL_VCO_MODE_MASK); + + /* toggle VCEPLL_SLEEP to 1 then back to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK); + + /* deassert VCEPLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(1); + + r = si_vce_send_vcepll_ctlreq(rdev); + if (r) + return r; + + /* assert VCEPLL_RESET again */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK); + + /* disable spread spectrum. */ + WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); + + /* set feedback divider */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK); + + /* set ref divider to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK); + + /* set PDIV_A and PDIV_B */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div), + ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK)); + + /* give the PLL some time to settle */ + mdelay(15); + + /* deassert PLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(15); + + /* switch from bypass mode to normal mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK); + + r = si_vce_send_vcepll_ctlreq(rdev); + if (r) + return r; + + /* switch VCLK and DCLK selection */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + mdelay(100); return 0; }