--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/radeon/ni.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/radeon/ni.c 2021-02-04 17:41:59.000000000 +0000 @@ -22,17 +22,170 @@ * Authors: Alex Deucher */ #include -#include #include #include #include #include "radeon.h" #include "radeon_asic.h" +#include "radeon_audio.h" #include #include "nid.h" #include "atom.h" #include "ni_reg.h" #include "cayman_blit_shaders.h" +#include "radeon_ucode.h" +#include "clearstate_cayman.h" + +/* + * Indirect registers accessor + */ +u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->smc_idx_lock, flags); + WREG32(TN_SMC_IND_INDEX_0, (reg)); + r = RREG32(TN_SMC_IND_DATA_0); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); + return r; +} + +void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&rdev->smc_idx_lock, flags); + WREG32(TN_SMC_IND_INDEX_0, (reg)); + WREG32(TN_SMC_IND_DATA_0, (v)); + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); +} + +static const u32 tn_rlc_save_restore_register_list[] = +{ + 0x98fc, + 0x98f0, + 0x9834, + 0x9838, + 0x9870, + 0x9874, + 0x8a14, + 0x8b24, + 0x8bcc, + 0x8b10, + 0x8c30, + 0x8d00, + 0x8d04, + 0x8c00, + 0x8c04, + 0x8c10, + 0x8c14, + 0x8d8c, + 0x8cf0, + 0x8e38, + 0x9508, + 0x9688, + 0x9608, + 0x960c, + 0x9610, + 0x9614, + 0x88c4, + 0x8978, + 0x88d4, + 0x900c, + 0x9100, + 0x913c, + 0x90e8, + 0x9354, + 0xa008, + 0x98f8, + 0x9148, + 0x914c, + 0x3f94, + 0x98f4, + 0x9b7c, + 0x3f8c, + 0x8950, + 0x8954, + 0x8a18, + 0x8b28, + 0x9144, + 0x3f90, + 0x915c, + 0x9160, + 0x9178, + 0x917c, + 0x9180, + 0x918c, + 0x9190, + 0x9194, + 0x9198, + 0x919c, + 0x91a8, + 0x91ac, + 0x91b0, + 0x91b4, + 0x91b8, + 0x91c4, + 0x91c8, + 0x91cc, + 0x91d0, + 0x91d4, + 0x91e0, + 0x91e4, + 0x91ec, + 0x91f0, + 0x91f4, + 0x9200, + 0x9204, + 0x929c, + 0x8030, + 0x9150, + 0x9a60, + 0x920c, + 0x9210, + 0x9228, + 0x922c, + 0x9244, + 0x9248, + 0x91e8, + 0x9294, + 0x9208, + 0x9224, + 0x9240, + 0x9220, + 0x923c, + 0x9258, + 0x9744, + 0xa200, + 0xa204, + 0xa208, + 0xa20c, + 0x8d58, + 0x9030, + 0x9034, + 0x9038, + 0x903c, + 0x9040, + 0x9654, + 0x897c, + 0xa210, + 0xa214, + 0x9868, + 0xa02c, + 0x9664, + 0x9698, + 0x949c, + 0x8e10, + 0x8e18, + 0x8c50, + 0x8c58, + 0x8c60, + 0x8c68, + 0x89b4, + 0x9830, + 0x802c, +}; extern bool evergreen_is_display_hung(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); @@ -44,36 +197,30 @@ extern int evergreen_mc_init(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); -extern void si_rlc_fini(struct radeon_device *rdev); -extern int si_rlc_init(struct radeon_device *rdev); - -#define EVERGREEN_PFP_UCODE_SIZE 1120 -#define EVERGREEN_PM4_UCODE_SIZE 1376 -#define EVERGREEN_RLC_UCODE_SIZE 768 -#define BTC_MC_UCODE_SIZE 6024 - -#define CAYMAN_PFP_UCODE_SIZE 2176 -#define CAYMAN_PM4_UCODE_SIZE 2176 -#define CAYMAN_RLC_UCODE_SIZE 1024 -#define CAYMAN_MC_UCODE_SIZE 6037 - -#define ARUBA_RLC_UCODE_SIZE 1536 +extern void evergreen_program_aspm(struct radeon_device *rdev); +extern void sumo_rlc_fini(struct radeon_device *rdev); +extern int sumo_rlc_init(struct radeon_device *rdev); +extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev); /* Firmware Names */ MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); MODULE_FIRMWARE("radeon/BARTS_me.bin"); MODULE_FIRMWARE("radeon/BARTS_mc.bin"); +MODULE_FIRMWARE("radeon/BARTS_smc.bin"); MODULE_FIRMWARE("radeon/BTC_rlc.bin"); MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); MODULE_FIRMWARE("radeon/TURKS_me.bin"); MODULE_FIRMWARE("radeon/TURKS_mc.bin"); +MODULE_FIRMWARE("radeon/TURKS_smc.bin"); MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); MODULE_FIRMWARE("radeon/CAICOS_me.bin"); MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); +MODULE_FIRMWARE("radeon/CAICOS_smc.bin"); MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); +MODULE_FIRMWARE("radeon/CAYMAN_smc.bin"); MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); MODULE_FIRMWARE("radeon/ARUBA_me.bin"); MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); @@ -562,22 +709,15 @@ int ni_init_microcode(struct radeon_device *rdev) { - struct platform_device *pdev; const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; + size_t smc_req_size = 0; char fw_name[30]; int err; DRM_DEBUG("\n"); - pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); - err = IS_ERR(pdev); - if (err) { - printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); - return -EINVAL; - } - switch (rdev->family) { case CHIP_BARTS: chip_name = "BARTS"; @@ -586,6 +726,7 @@ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; mc_req_size = BTC_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4); break; case CHIP_TURKS: chip_name = "TURKS"; @@ -594,6 +735,7 @@ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; mc_req_size = BTC_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4); break; case CHIP_CAICOS: chip_name = "CAICOS"; @@ -602,6 +744,7 @@ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; mc_req_size = BTC_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4); break; case CHIP_CAYMAN: chip_name = "CAYMAN"; @@ -610,6 +753,7 @@ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; + smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4); break; case CHIP_ARUBA: chip_name = "ARUBA"; @@ -626,7 +770,7 @@ DRM_INFO("Loading %s Microcode\n", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); - err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->pfp_fw->size != pfp_req_size) { @@ -638,7 +782,7 @@ } snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); - err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->me_fw->size != me_req_size) { @@ -649,7 +793,7 @@ } snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); - err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->rlc_fw->size != rlc_req_size) { @@ -662,7 +806,7 @@ /* no MC ucode on TN */ if (!(rdev->flags & RADEON_IS_IGP)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); - err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) goto out; if (rdev->mc_fw->size != mc_req_size) { @@ -672,9 +816,26 @@ err = -EINVAL; } } -out: - platform_device_unregister(pdev); + if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + err = 0; + } else if (rdev->smc_fw->size != smc_req_size) { + printk(KERN_ERR + "ni_mc: Bogus length %zu in firmware \"%s\"\n", + rdev->mc_fw->size, fw_name); + err = -EINVAL; + } + } + +out: if (err) { if (err != -EINVAL) printk(KERN_ERR @@ -692,6 +853,43 @@ return err; } +/** + * cayman_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int cayman_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): + case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + +int tn_get_temp(struct radeon_device *rdev) +{ + u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff; + int actual_temp = (temp / 8) - 49; + + return actual_temp * 1000; +} + /* * Core functions */ @@ -818,6 +1016,8 @@ } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 0x1); + WREG32(SRBM_INT_ACK, 0x1); evergreen_fix_pci_max_read_req_size(rdev); @@ -914,6 +1114,18 @@ disabled_rb_mask &= ~(1 << i); } + for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) { + u32 simd_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; + simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se; + tmp <<= 16; + tmp |= simd_disable_bitmap; + } + rdev->config.cayman.active_simds = hweight32(~tmp); + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); @@ -1039,6 +1251,16 @@ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); udelay(50); + + /* set clockgating golden values on TN */ + if (rdev->family == CHIP_ARUBA) { + tmp = RREG32_CG(CG_CGTT_LOCAL_0); + tmp &= ~0x00380000; + WREG32_CG(CG_CGTT_LOCAL_0, tmp); + tmp = RREG32_CG(CG_CGTT_LOCAL_1); + tmp &= ~0x0e000000; + WREG32_CG(CG_CGTT_LOCAL_1, tmp); + } } /* @@ -1064,7 +1286,6 @@ r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | @@ -1075,12 +1296,14 @@ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | + BANK_SELECT(6) | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); @@ -1103,9 +1326,10 @@ */ for (i = 1; i < 8; i++) { WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), + rdev->vm_manager.max_pfn - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); } /* enable context1-7 */ @@ -1113,6 +1337,7 @@ (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -1136,6 +1361,13 @@ static void cayman_pcie_gart_disable(struct radeon_device *rdev) { + unsigned i; + + for (i = 1; i < 8; ++i) { + rdev->vm_manager.saved_table_addr[i] = RREG32( + VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2)); + } + /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -1164,9 +1396,7 @@ void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl) { - u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; - - WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); + WREG32(SRBM_GFX_CNTL, RINGID(ring)); WREG32(CP_INT_CNTL, cp_int_cntl); } @@ -1190,7 +1420,7 @@ /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -1199,6 +1429,7 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; + unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA; @@ -1221,32 +1452,14 @@ #endif (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); - radeon_ring_write(ring, ib->length_dw | - (ib->vm ? (ib->vm->id << 24) : 0)); + radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); /* flush read cache over gart for this vmid */ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); - radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ -} - -void cayman_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait) -{ - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */ } static void cayman_cp_enable(struct radeon_device *rdev, bool enable) @@ -1254,13 +1467,63 @@ if (enable) WREG32(CP_ME_CNTL, 0); else { - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(SCRATCH_UMSK, 0); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; } } +u32 cayman_gfx_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 rptr; + + if (rdev->wb.enabled) + rptr = rdev->wb.wb[ring->rptr_offs/4]; + else { + if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) + rptr = RREG32(CP_RB0_RPTR); + else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) + rptr = RREG32(CP_RB1_RPTR); + else + rptr = RREG32(CP_RB2_RPTR); + } + + return rptr; +} + +u32 cayman_gfx_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 wptr; + + if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) + wptr = RREG32(CP_RB0_WPTR); + else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) + wptr = RREG32(CP_RB1_WPTR); + else + wptr = RREG32(CP_RB2_WPTR); + + return wptr; +} + +void cayman_gfx_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) { + WREG32(CP_RB0_WPTR, ring->wptr); + (void)RREG32(CP_RB0_WPTR); + } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) { + WREG32(CP_RB1_WPTR, ring->wptr); + (void)RREG32(CP_RB1_WPTR); + } else { + WREG32(CP_RB2_WPTR, ring->wptr); + (void)RREG32(CP_RB2_WPTR); + } +} + static int cayman_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; @@ -1305,7 +1568,7 @@ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); cayman_cp_enable(rdev, true); @@ -1347,7 +1610,7 @@ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); /* XXX init other rings */ @@ -1389,6 +1652,16 @@ CP_RB1_BASE, CP_RB2_BASE }; + static const unsigned cp_rb_rptr[] = { + CP_RB0_RPTR, + CP_RB1_RPTR, + CP_RB2_RPTR + }; + static const unsigned cp_rb_wptr[] = { + CP_RB0_WPTR, + CP_RB1_WPTR, + CP_RB2_WPTR + }; struct radeon_ring *ring; int i, r; @@ -1422,8 +1695,8 @@ /* Set ring buffer size */ ring = &rdev->ring[ridx[i]]; - rb_cntl = drm_order(ring->ring_size / 8); - rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; + rb_cntl = order_base_2(ring->ring_size / 8); + rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8; #ifdef __BIG_ENDIAN rb_cntl |= BUF_SWAP_32BIT; #endif @@ -1446,9 +1719,9 @@ ring = &rdev->ring[ridx[i]]; WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); - ring->rptr = ring->wptr = 0; - WREG32(ring->rptr_reg, ring->rptr); - WREG32(ring->wptr_reg, ring->wptr); + ring->wptr = 0; + WREG32(cp_rb_rptr[i], 0); + WREG32(cp_rb_wptr[i], ring->wptr); mdelay(1); WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); @@ -1468,189 +1741,13 @@ return r; } - return 0; -} - -/* - * DMA - * Starting with R600, the GPU has an asynchronous - * DMA engine. The programming model is very similar - * to the 3D engine (ring buffer, IBs, etc.), but the - * DMA controller has it's own packet format that is - * different form the PM4 format used by the 3D engine. - * It supports copying data, writing embedded data, - * solid fills, and a number of other things. It also - * has support for tiling/detiling of buffers. - * Cayman and newer support two asynchronous DMA engines. - */ -/** - * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (cayman-SI). - */ -void cayman_dma_ring_ib_execute(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 4; - while ((next_rptr & 7) != 5) - next_rptr++; - next_rptr += 3; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); - radeon_ring_write(ring, next_rptr); - } - - /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. - * Pad as necessary with NOPs. - */ - while ((ring->wptr & 7) != 5) - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); - radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); - radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); - radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); - -} - -/** - * cayman_dma_stop - stop the async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the async dma engines (cayman-SI). - */ -void cayman_dma_stop(struct radeon_device *rdev) -{ - u32 rb_cntl; - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - - /* dma0 */ - rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); - - /* dma1 */ - rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); - - rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; - rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; -} - -/** - * cayman_dma_resume - setup and start the async dma engines - * - * @rdev: radeon_device pointer - * - * Set up the DMA ring buffers and enable them. (cayman-SI). - * Returns 0 for success, error for failure. - */ -int cayman_dma_resume(struct radeon_device *rdev) -{ - struct radeon_ring *ring; - u32 rb_cntl, dma_cntl, ib_cntl; - u32 rb_bufsz; - u32 reg_offset, wb_offset; - int i, r; - - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - - for (i = 0; i < 2; i++) { - if (i == 0) { - ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; - reg_offset = DMA0_REGISTER_OFFSET; - wb_offset = R600_WB_DMA_RPTR_OFFSET; - } else { - ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; - reg_offset = DMA1_REGISTER_OFFSET; - wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; - } - - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); - WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); - - /* Set ring buffer size in dwords */ - rb_bufsz = drm_order(ring->ring_size / 4); - rb_cntl = rb_bufsz << 1; -#ifdef __BIG_ENDIAN - rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; -#endif - WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(DMA_RB_RPTR + reg_offset, 0); - WREG32(DMA_RB_WPTR + reg_offset, 0); - - /* set the wb address whether it's enabled or not */ - WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, - upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); - WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, - ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); - - if (rdev->wb.enabled) - rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; - - WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); - - /* enable DMA IBs */ - ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; -#ifdef __BIG_ENDIAN - ib_cntl |= DMA_IB_SWAP_ENABLE; -#endif - WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); - - dma_cntl = RREG32(DMA_CNTL + reg_offset); - dma_cntl &= ~CTXEMPTY_INT_ENABLE; - WREG32(DMA_CNTL + reg_offset, dma_cntl); - - ring->wptr = 0; - WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); - - ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; - - WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); - - ring->ready = true; - - r = radeon_ring_test(rdev, ring->idx, ring); - if (r) { - ring->ready = false; - return r; - } - } - - radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } -/** - * cayman_dma_fini - tear down the async dma engines - * - * @rdev: radeon_device pointer - * - * Stop the async dma engines and free the rings (cayman-SI). - */ -void cayman_dma_fini(struct radeon_device *rdev) -{ - cayman_dma_stop(rdev); - radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); - radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); -} - -static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) +u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -1873,8 +1970,10 @@ reset_mask = cayman_gpu_check_soft_reset(rdev); - if (!reset_mask) - r600_set_bios_scratch_engine_hung(rdev, false); + if (reset_mask) + evergreen_gpu_pci_config_reset(rdev); + + r600_set_bios_scratch_engine_hung(rdev, false); return 0; } @@ -1895,39 +1994,9 @@ if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - -/** - * cayman_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = cayman_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -1938,26 +2007,17 @@ /* enable pcie gen2 link */ evergreen_pcie_gen2_enable(rdev); + /* enable aspm */ + evergreen_program_aspm(rdev); - evergreen_mc_program(rdev); + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; - if (rdev->flags & RADEON_IS_IGP) { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - } else { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } + evergreen_mc_program(rdev); + if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) { r = ni_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -1965,25 +2025,18 @@ } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - r = cayman_pcie_gart_enable(rdev); if (r) return r; cayman_gpu_init(rdev); - r = evergreen_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); - } - /* allocate rlc buffers */ if (rdev->flags & RADEON_IS_IGP) { - r = si_rlc_init(rdev); + rdev->rlc.reg_list = tn_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list); + rdev->rlc.cs_data = cayman_cs_data; + r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -2001,7 +2054,7 @@ return r; } - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -2011,6 +2064,25 @@ if (r) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; + if (rdev->family == CHIP_ARUBA) { + r = radeon_vce_resume(rdev); + if (!r) + r = vce_v1_0_resume(rdev); + + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE1_INDEX); + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE2_INDEX); + + if (r) { + dev_err(rdev->dev, "VCE init error (%d).\n", r); + rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; + rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; + } + } + r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); if (r) { dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); @@ -2051,24 +2123,19 @@ evergreen_irq_set(rdev); r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, - CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, - DMA_RB_RPTR + DMA0_REGISTER_OFFSET, - DMA_RB_WPTR + DMA0_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, - DMA_RB_RPTR + DMA1_REGISTER_OFFSET, - DMA_RB_WPTR + DMA1_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; @@ -2085,16 +2152,29 @@ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } + if (rdev->family == CHIP_ARUBA) { + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); + + if (!r) + r = vce_v1_0_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); + } + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -2107,7 +2187,7 @@ return r; } - r = r600_audio_init(rdev); + r = radeon_audio_init(rdev); if (r) return r; @@ -2128,6 +2208,9 @@ /* init golden registers */ ni_init_golden_registers(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); + rdev->accel_working = true; r = cayman_startup(rdev); if (r) { @@ -2140,11 +2223,12 @@ int cayman_suspend(struct radeon_device *rdev) { - r600_audio_fini(rdev); + radeon_pm_suspend(rdev); + radeon_audio_fini(rdev); radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -2207,6 +2291,27 @@ if (r) return r; + if (rdev->flags & RADEON_IS_IGP) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } + + /* Initialize power management */ + radeon_pm_init(rdev); + ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -2225,6 +2330,19 @@ r600_ring_init(rdev, ring, 4096); } + if (rdev->family == CHIP_ARUBA) { + r = radeon_vce_init(rdev); + if (!r) { + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + } + } + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -2240,7 +2358,7 @@ cayman_dma_fini(rdev); r600_irq_fini(rdev); if (rdev->flags & RADEON_IS_IGP) - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); @@ -2266,18 +2384,20 @@ void cayman_fini(struct radeon_device *rdev) { - r600_blit_fini(rdev); + radeon_pm_fini(rdev); cayman_cp_fini(rdev); cayman_dma_fini(rdev); r600_irq_fini(rdev); if (rdev->flags & RADEON_IS_IGP) - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + if (rdev->family == CHIP_ARUBA) + radeon_vce_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); @@ -2309,129 +2429,165 @@ { } -#define R600_ENTRY_VALID (1 << 0) -#define R600_PTE_SYSTEM (1 << 1) -#define R600_PTE_SNOOPED (1 << 2) -#define R600_PTE_READABLE (1 << 5) -#define R600_PTE_WRITEABLE (1 << 6) - -uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) -{ - uint32_t r600_flags = 0; - r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; - r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; - r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; - if (flags & RADEON_VM_PAGE_SYSTEM) { - r600_flags |= R600_PTE_SYSTEM; - r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; - } - return r600_flags; -} - /** - * cayman_vm_set_page - update the page tables using the CP + * cayman_vm_decode_fault - print human readable fault info * * @rdev: radeon_device pointer - * @ib: indirect buffer to fill with commands - * @pe: addr of the page entry - * @addr: dst addr to write into pe - * @count: number of page entries to update - * @incr: increase next addr by incr bytes - * @flags: access flags + * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value + * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value * - * Update the page tables using the CP (cayman/TN). + * Print human readable fault information (cayman/TN). */ -void cayman_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - uint64_t value; - unsigned ndw; - - if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { - while (count) { - ndw = 1 + count * 2; - if (ndw > 0x3FFF) - ndw = 0x3FFF; - - ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 1; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - if ((flags & RADEON_VM_PAGE_SYSTEM) || - (count == 1)) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); +void cayman_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr) +{ + u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; + u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; + u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; + char *block; + + switch (mc_id) { + case 32: + case 16: + case 96: + case 80: + case 160: + case 144: + case 224: + case 208: + block = "CB"; + break; + case 33: + case 17: + case 97: + case 81: + case 161: + case 145: + case 225: + case 209: + block = "CB_FMASK"; + break; + case 34: + case 18: + case 98: + case 82: + case 162: + case 146: + case 226: + case 210: + block = "CB_CMASK"; + break; + case 35: + case 19: + case 99: + case 83: + case 163: + case 147: + case 227: + case 211: + block = "CB_IMMED"; + break; + case 36: + case 20: + case 100: + case 84: + case 164: + case 148: + case 228: + case 212: + block = "DB"; + break; + case 37: + case 21: + case 101: + case 85: + case 165: + case 149: + case 229: + case 213: + block = "DB_HTILE"; + break; + case 38: + case 22: + case 102: + case 86: + case 166: + case 150: + case 230: + case 214: + block = "SX"; + break; + case 39: + case 23: + case 103: + case 87: + case 167: + case 151: + case 231: + case 215: + block = "DB_STEN"; + break; + case 40: + case 24: + case 104: + case 88: + case 232: + case 216: + case 168: + case 152: + block = "TC_TFETCH"; + break; + case 41: + case 25: + case 105: + case 89: + case 233: + case 217: + case 169: + case 153: + block = "TC_VFETCH"; + break; + case 42: + case 26: + case 106: + case 90: + case 234: + case 218: + case 170: + case 154: + block = "VC"; + break; + case 112: + block = "CP"; + break; + case 113: + case 114: + block = "SH"; + break; + case 115: + block = "VGT"; + break; + case 178: + block = "IH"; + break; + case 51: + block = "RLC"; + break; + case 55: + block = "DMA"; + break; + case 56: + block = "HDP"; + break; + default: + block = "unknown"; + break; } + + printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", + protections, vmid, addr, + (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", + block, mc_id); } /** @@ -2442,15 +2598,11 @@ * Update the page table base and flush the VM TLB * using the CP (cayman-si). */ -void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0)); + radeon_ring_write(ring, pd_addr >> 12); /* flush hdp cache */ radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); @@ -2458,32 +2610,50 @@ /* bits 0-7 are the VM contexts0-7 */ radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); - radeon_ring_write(ring, 1 << vm->id); + radeon_ring_write(ring, 1 << vm_id); + + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); } -void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) { - struct radeon_ring *ring = &rdev->ring[ridx]; + struct atom_clock_dividers dividers; + int r, i; - if (vm == NULL) - return; + r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, + ecclk, false, ÷rs); + if (r) + return r; + + for (i = 0; i < 100; i++) { + if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK)); - /* flush hdp cache */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); + for (i = 0; i < 100; i++) { + if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); - radeon_ring_write(ring, 1 << vm->id); + return 0; } -