--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/radeon/evergreen.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/radeon/evergreen.c 2021-02-04 17:41:59.000000000 +0000 @@ -22,20 +22,87 @@ * Authors: Alex Deucher */ #include -#include #include #include #include "radeon.h" #include "radeon_asic.h" +#include "radeon_audio.h" #include #include "evergreend.h" #include "atom.h" #include "avivod.h" #include "evergreen_reg.h" #include "evergreen_blit_shaders.h" +#include "radeon_ucode.h" -#define EVERGREEN_PFP_UCODE_SIZE 1120 -#define EVERGREEN_PM4_UCODE_SIZE 1376 +/* + * Indirect registers accessor + */ +u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->cg_idx_lock, flags); + WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); + r = RREG32(EVERGREEN_CG_IND_DATA); + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); + return r; +} + +void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&rdev->cg_idx_lock, flags); + WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); + WREG32(EVERGREEN_CG_IND_DATA, (v)); + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); +} + +u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); + r = RREG32(EVERGREEN_PIF_PHY0_DATA); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); + return r; +} + +void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); + WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); +} + +u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); + r = RREG32(EVERGREEN_PIF_PHY1_DATA); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); + return r; +} + +void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&rdev->pif_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); + WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); +} static const u32 crtc_offsets[6] = { @@ -47,11 +114,108 @@ EVERGREEN_CRTC5_REGISTER_OFFSET }; +#include "clearstate_evergreen.h" + +static const u32 sumo_rlc_save_restore_register_list[] = +{ + 0x98fc, + 0x9830, + 0x9834, + 0x9838, + 0x9870, + 0x9874, + 0x8a14, + 0x8b24, + 0x8bcc, + 0x8b10, + 0x8d00, + 0x8d04, + 0x8c00, + 0x8c04, + 0x8c08, + 0x8c0c, + 0x8d8c, + 0x8c20, + 0x8c24, + 0x8c28, + 0x8c18, + 0x8c1c, + 0x8cf0, + 0x8e2c, + 0x8e38, + 0x8c30, + 0x9508, + 0x9688, + 0x9608, + 0x960c, + 0x9610, + 0x9614, + 0x88c4, + 0x88d4, + 0xa008, + 0x900c, + 0x9100, + 0x913c, + 0x98f8, + 0x98f4, + 0x9b7c, + 0x3f8c, + 0x8950, + 0x8954, + 0x8a18, + 0x8b28, + 0x9144, + 0x9148, + 0x914c, + 0x3f90, + 0x3f94, + 0x915c, + 0x9160, + 0x9178, + 0x917c, + 0x9180, + 0x918c, + 0x9190, + 0x9194, + 0x9198, + 0x919c, + 0x91a8, + 0x91ac, + 0x91b0, + 0x91b4, + 0x91b8, + 0x91c4, + 0x91c8, + 0x91cc, + 0x91d0, + 0x91d4, + 0x91e0, + 0x91e4, + 0x91ec, + 0x91f0, + 0x91f4, + 0x9200, + 0x9204, + 0x929c, + 0x9150, + 0x802c, +}; + static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); void evergreen_pcie_gen2_enable(struct radeon_device *rdev); +void evergreen_program_aspm(struct radeon_device *rdev); extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl); +extern void cayman_vm_decode_fault(struct radeon_device *rdev, + u32 status, u32 addr); +void cik_init_cp_pg_table(struct radeon_device *rdev); + +extern u32 si_get_csb_size(struct radeon_device *rdev); +extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); +extern u32 cik_get_csb_size(struct radeon_device *rdev); +extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); +extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); static const u32 evergreen_golden_registers[] = { @@ -911,6 +1075,34 @@ } } +/** + * evergreen_get_allowed_info_register - fetch the register for the info ioctl + * + * @rdev: radeon_device pointer + * @reg: register offset in bytes + * @val: register value + * + * Returns 0 for success or -EINVAL for an invalid register + * + */ +int evergreen_get_allowed_info_register(struct radeon_device *rdev, + u32 reg, u32 *val) +{ + switch (reg) { + case GRBM_STATUS: + case GRBM_STATUS_SE0: + case GRBM_STATUS_SE1: + case SRBM_STATUS: + case SRBM_STATUS2: + case DMA_STATUS_REG: + case UVD_STATUS: + *val = RREG32(reg); + return 0; + default: + return -EINVAL; + } +} + void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, unsigned *bankh, unsigned *mtaspect, unsigned *tile_split) @@ -1080,23 +1272,72 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) { - u16 ctl, v; - int err; - - err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); - if (err) - return; - - v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; + int readrq; + u16 v; + readrq = pcie_get_readrq(rdev->pdev); + v = ffs(readrq) - 8; /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it * to avoid hangs or perfomance issues */ - if ((v == 0) || (v == 6) || (v == 7)) { - ctl &= ~PCI_EXP_DEVCTL_READRQ; - ctl |= (2 << 12); - pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); + if ((v == 0) || (v == 6) || (v == 7)) + pcie_set_readrq(rdev->pdev, 512); +} + +void dce4_program_fmt(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + int bpc = 0; + u32 tmp = 0; + enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + bpc = radeon_get_monitor_bpc(connector); + dither = radeon_connector->dither; } + + /* LVDS/eDP FMT is set up by atom */ + if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + /* not needed for analog */ + if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || + (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) + return; + + if (bpc == 0) + return; + + switch (bpc) { + case 6: + if (dither == RADEON_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | + FMT_SPATIAL_DITHER_EN); + else + tmp |= FMT_TRUNCATE_EN; + break; + case 8: + if (dither == RADEON_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | + FMT_RGB_RANDOM_ENABLE | + FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); + else + tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); + break; + case 10: + default: + /* not needed */ + break; + } + + WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); } static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) @@ -1157,83 +1398,43 @@ } /** - * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to prepare for pageflip on - * - * Pre-pageflip callback (evergreen+). - * Enables the pageflip irq (vblank irq). - */ -void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) -{ - /* enable the pflip int */ - radeon_irq_kms_pflip_irq_get(rdev, crtc); -} - -/** - * evergreen_post_page_flip - pos-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to cleanup pageflip on - * - * Post-pageflip callback (evergreen+). - * Disables the pageflip irq (vblank irq). - */ -void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) -{ - /* disable the pflip int */ - radeon_irq_kms_pflip_irq_put(rdev, crtc); -} - -/** * evergreen_page_flip - pageflip callback. * * @rdev: radeon_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) * - * Does the actual pageflip (evergreen+). - * During vblank we take the crtc lock and wait for the update_pending - * bit to go high, when it does, we release the lock, and allow the - * double buffered update to take place. - * Returns the current update pending status. + * Triggers the actual pageflip by updating the primary + * surface base address (evergreen+). */ -u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; - u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); - int i; - - /* Lock the graphics update lock */ - tmp |= EVERGREEN_GRPH_UPDATE_LOCK; - WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); /* update the scanout addresses */ - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, - upper_32_bits(crtc_base)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, - (u32)crtc_base); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, upper_32_bits(crtc_base)); WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)crtc_base); + /* post the write */ + RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset); +} - /* Wait for update_pending to go high. */ - for (i = 0; i < rdev->usec_timeout; i++) { - if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) - break; - udelay(1); - } - DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); - - /* Unlock the lock, so double-buffering can take place inside vblank */ - tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; - WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +/** + * evergreen_page_flip_pending - check if page flip is still pending + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to check + * + * Returns the current update pending status. + */ +bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; /* Return current update_pending status: */ - return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; + return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & + EVERGREEN_GRPH_SURFACE_UPDATE_PENDING); } /* get temperature in millidegrees */ @@ -1417,8 +1618,8 @@ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; if (voltage->type == VOLTAGE_SW) { - /* 0xff01 is a flag rather then an actual voltage */ - if (voltage->voltage == 0xff01) + /* 0xff0x are flags rather then an actual voltage */ + if ((voltage->voltage & 0xff00) == 0xff00) return; if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -1438,8 +1639,8 @@ voltage = &rdev->pm.power_state[req_ps_idx]. clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage; - /* 0xff01 is a flag rather then an actual voltage */ - if (voltage->vddci == 0xff01) + /* 0xff0x are flags rather then an actual voltage */ + if ((voltage->vddci & 0xff00) == 0xff00) return; if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); @@ -1536,7 +1737,7 @@ case RADEON_HPD_6: if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) connected = true; - break; + break; default: break; } @@ -2053,7 +2254,8 @@ u32 lb_size, u32 num_heads) { struct drm_display_mode *mode = &radeon_crtc->base.mode; - struct evergreen_wm_params wm; + struct evergreen_wm_params wm_low, wm_high; + u32 dram_channels; u32 pixel_period; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; @@ -2069,39 +2271,81 @@ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; + dram_channels = evergreen_get_number_of_dram_channels(rdev); + + /* watermark for high clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + wm_high.yclk = + radeon_dpm_get_mclk(rdev, false) * 10; + wm_high.sclk = + radeon_dpm_get_sclk(rdev, false) * 10; + } else { + wm_high.yclk = rdev->pm.current_mclk * 10; + wm_high.sclk = rdev->pm.current_sclk * 10; + } - wm.yclk = rdev->pm.current_mclk * 10; - wm.sclk = rdev->pm.current_sclk * 10; - wm.disp_clk = mode->clock; - wm.src_width = mode->crtc_hdisplay; - wm.active_time = mode->crtc_hdisplay * pixel_period; - wm.blank_time = line_time - wm.active_time; - wm.interlaced = false; + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) - wm.interlaced = true; - wm.vsc = radeon_crtc->vsc; - wm.vtaps = 1; + wm_high.interlaced = true; + wm_high.vsc = radeon_crtc->vsc; + wm_high.vtaps = 1; if (radeon_crtc->rmx_type != RMX_OFF) - wm.vtaps = 2; - wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ - wm.lb_size = lb_size; - wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); - wm.num_heads = num_heads; + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = dram_channels; + wm_high.num_heads = num_heads; + + /* watermark for low clocks */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + wm_low.yclk = + radeon_dpm_get_mclk(rdev, true) * 10; + wm_low.sclk = + radeon_dpm_get_sclk(rdev, true) * 10; + } else { + wm_low.yclk = rdev->pm.current_mclk * 10; + wm_low.sclk = rdev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = radeon_crtc->vsc; + wm_low.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = dram_channels; + wm_low.num_heads = num_heads; /* set for high clocks */ - latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); + latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535); /* set for low clocks */ - /* wm.yclk = low clk; wm.sclk = low clk */ - latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); + latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535); /* possibly force display priority to high */ /* should really do this at mode validation time... */ - if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || - !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || - !evergreen_check_latency_hiding(&wm) || + if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) || + !evergreen_check_latency_hiding(&wm_high) || (rdev->disp_priority == 2)) { - DRM_DEBUG_KMS("force priority to high\n"); + DRM_DEBUG_KMS("force priority a to high\n"); priority_a_cnt |= PRIORITY_ALWAYS_ON; + } + if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) || + !evergreen_check_latency_hiding(&wm_low) || + (rdev->disp_priority == 2)) { + DRM_DEBUG_KMS("force priority b to high\n"); priority_b_cnt |= PRIORITY_ALWAYS_ON; } @@ -2128,6 +2372,9 @@ c.full = dfixed_div(c, a); priority_b_mark = dfixed_trunc(c); priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + + /* Save number of lines the linebuffer leads before the scanout */ + radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); } /* select wm A */ @@ -2154,6 +2401,10 @@ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); + /* save values for DPM */ + radeon_crtc->line_time = line_time; + radeon_crtc->wm_high = latency_watermark_a; + radeon_crtc->wm_low = latency_watermark_b; } /** @@ -2171,6 +2422,9 @@ u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -2249,7 +2503,6 @@ r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -2355,10 +2608,152 @@ WREG32(VM_CONTEXT1_CNTL, 0); } +static const unsigned ni_dig_offsets[] = +{ + NI_DIG0_REGISTER_OFFSET, + NI_DIG1_REGISTER_OFFSET, + NI_DIG2_REGISTER_OFFSET, + NI_DIG3_REGISTER_OFFSET, + NI_DIG4_REGISTER_OFFSET, + NI_DIG5_REGISTER_OFFSET +}; + +static const unsigned ni_tx_offsets[] = +{ + NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1, + NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1, + NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1, + NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1, + NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1, + NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 +}; + +static const unsigned evergreen_dp_offsets[] = +{ + EVERGREEN_DP0_REGISTER_OFFSET, + EVERGREEN_DP1_REGISTER_OFFSET, + EVERGREEN_DP2_REGISTER_OFFSET, + EVERGREEN_DP3_REGISTER_OFFSET, + EVERGREEN_DP4_REGISTER_OFFSET, + EVERGREEN_DP5_REGISTER_OFFSET +}; + + +/* + * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc + * We go from crtc to connector and it is not relible since it + * should be an opposite direction .If crtc is enable then + * find the dig_fe which selects this crtc and insure that it enable. + * if such dig_fe is found then find dig_be which selects found dig_be and + * insure that it enable and in DP_SST mode. + * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing + * from dp symbols clocks . + */ +static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev, + unsigned crtc_id, unsigned *ret_dig_fe) +{ + unsigned i; + unsigned dig_fe; + unsigned dig_be; + unsigned dig_en_be; + unsigned uniphy_pll; + unsigned digs_fe_selected; + unsigned dig_be_mode; + unsigned dig_fe_mask; + bool is_enabled = false; + bool found_crtc = false; + + /* loop through all running dig_fe to find selected crtc */ + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { + dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]); + if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON && + crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) { + /* found running pipe */ + found_crtc = true; + dig_fe_mask = 1 << i; + dig_fe = i; + break; + } + } + + if (found_crtc) { + /* loop through all running dig_be to find selected dig_fe */ + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { + dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]); + /* if dig_fe_selected by dig_be? */ + digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be); + dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be); + if (dig_fe_mask & digs_fe_selected && + /* if dig_be in sst mode? */ + dig_be_mode == NI_DIG_BE_DPSST) { + dig_en_be = RREG32(NI_DIG_BE_EN_CNTL + + ni_dig_offsets[i]); + uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 + + ni_tx_offsets[i]); + /* dig_be enable and tx is running */ + if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE && + dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON && + uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) { + is_enabled = true; + *ret_dig_fe = dig_fe; + break; + } + } + } + } + + return is_enabled; +} + +/* + * Blank dig when in dp sst mode + * Dig ignores crtc timing + */ +static void evergreen_blank_dp_output(struct radeon_device *rdev, + unsigned dig_fe) +{ + unsigned stream_ctrl; + unsigned fifo_ctrl; + unsigned counter = 0; + + if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) { + DRM_ERROR("invalid dig_fe %d\n", dig_fe); + return; + } + + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + + evergreen_dp_offsets[dig_fe]); + if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) { + DRM_ERROR("dig %d , should be enable\n", dig_fe); + return; + } + + stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; + WREG32(EVERGREEN_DP_VID_STREAM_CNTL + + evergreen_dp_offsets[dig_fe], stream_ctrl); + + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + + evergreen_dp_offsets[dig_fe]); + while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) { + msleep(1); + counter++; + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + + evergreen_dp_offsets[dig_fe]); + } + if (counter >= 32 ) + DRM_ERROR("counter exceeds %d\n", counter); + + fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]); + fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET; + WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl); + +} + void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { u32 crtc_enabled, tmp, frame_count, blackout; int i, j; + unsigned dig_fe; if (!ASIC_IS_NODCE(rdev)) { save->vga_render_control = RREG32(VGA_RENDER_CONTROL); @@ -2398,7 +2793,17 @@ break; udelay(1); } - + /*we should disable dig if it drives dp sst*/ + /*but we are in radeon_device_init and the topology is unknown*/ + /*and it is available after radeon_modeset_init*/ + /*the following method radeon_atom_encoder_dpms_dig*/ + /*does the job if we initialize it properly*/ + /*for now we do it this manually*/ + /**/ + if (ASIC_IS_DCE5(rdev) && + evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe)) + evergreen_blank_dp_output(rdev, dig_fe); + /*we could remove 6 lines below*/ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); @@ -2468,8 +2873,9 @@ for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -2502,7 +2908,7 @@ if (save->crtc_enabled[i]) { if (ASIC_IS_DCE6(rdev)) { tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN; WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); @@ -2695,7 +3101,7 @@ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); @@ -2738,7 +3144,7 @@ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); return 0; } @@ -2763,8 +3169,8 @@ RREG32(GRBM_SOFT_RESET); /* Set ring buffer size */ - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2800,8 +3206,6 @@ WREG32(CP_RB_BASE, ring->gpu_addr >> 8); WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); - ring->rptr = RREG32(CP_RB_RPTR); - evergreen_cp_start(rdev); ring->ready = true; r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); @@ -2834,7 +3238,7 @@ u32 vgt_cache_invalidation; u32 hdp_host_path_cntl, tmp; u32 disabled_rb_mask; - int i, j, num_shader_engines, ps_thread_count; + int i, j, ps_thread_count; switch (rdev->family) { case CHIP_CYPRESS: @@ -3077,6 +3481,8 @@ } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 0x1); + WREG32(SRBM_INT_ACK, 0x1); evergreen_fix_pci_max_read_req_size(rdev); @@ -3132,16 +3538,12 @@ rdev->config.evergreen.tile_config |= ((gb_addr_config & 0x30000000) >> 28) << 12; - num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; - if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { u32 efuse_straps_4; u32 efuse_straps_3; - WREG32(RCU_IND_INDEX, 0x204); - efuse_straps_4 = RREG32(RCU_IND_DATA); - WREG32(RCU_IND_INDEX, 0x203); - efuse_straps_3 = RREG32(RCU_IND_DATA); + efuse_straps_4 = RREG32_RCU(0x204); + efuse_straps_3 = RREG32_RCU(0x203); tmp = (((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); } else { @@ -3167,6 +3569,18 @@ disabled_rb_mask &= ~(1 << i); } + for (i = 0; i < rdev->config.evergreen.num_ses; i++) { + u32 simd_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; + simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds; + tmp <<= 16; + tmp |= simd_disable_bitmap; + } + rdev->config.evergreen.active_simds = hweight32(~tmp); + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); @@ -3497,7 +3911,7 @@ return true; } -static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) +u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -3680,6 +4094,48 @@ evergreen_print_gpu_status_regs(rdev); } +void evergreen_gpu_pci_config_reset(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 tmp, i; + + dev_info(rdev->dev, "GPU pci config reset\n"); + + /* disable dpm? */ + + /* Disable CP parsing/prefetching */ + WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); + udelay(50); + /* Disable DMA */ + tmp = RREG32(DMA_RB_CNTL); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, tmp); + /* XXX other engines? */ + + /* halt the rlc */ + r600_rlc_stop(rdev); + + udelay(50); + + /* set mclk/sclk to bypass */ + rv770_set_clk_bypass_mode(rdev); + /* disable BM */ + pci_clear_master(rdev->pdev); + /* disable mem access */ + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); + } + /* reset */ + radeon_pci_config_reset(rdev); + /* wait for asic to come out of reset */ + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) + break; + udelay(1); + } +} + int evergreen_asic_reset(struct radeon_device *rdev) { u32 reset_mask; @@ -3689,10 +4145,17 @@ if (reset_mask) r600_set_bios_scratch_engine_hung(rdev, true); + /* try soft reset */ evergreen_gpu_soft_reset(rdev, reset_mask); reset_mask = evergreen_gpu_check_soft_reset(rdev); + /* try pci config reset */ + if (reset_mask && radeon_hard_reset) + evergreen_gpu_pci_config_reset(rdev); + + reset_mask = evergreen_gpu_check_soft_reset(rdev); + if (!reset_mask) r600_set_bios_scratch_engine_hung(rdev, false); @@ -3715,34 +4178,354 @@ if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } -/** - * evergreen_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. +/* + * RLC */ -bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +#define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000 +#define RLC_CLEAR_STATE_END_MARKER 0x00000001 + +void sumo_rlc_fini(struct radeon_device *rdev) { - u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); + int r; - if (!(reset_mask & RADEON_RESET_DMA)) { - radeon_ring_lockup_update(ring); - return false; + /* save restore block */ + if (rdev->rlc.save_restore_obj) { + r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); + if (unlikely(r != 0)) + dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); + radeon_bo_unpin(rdev->rlc.save_restore_obj); + radeon_bo_unreserve(rdev->rlc.save_restore_obj); + + radeon_bo_unref(&rdev->rlc.save_restore_obj); + rdev->rlc.save_restore_obj = NULL; + } + + /* clear state block */ + if (rdev->rlc.clear_state_obj) { + r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); + if (unlikely(r != 0)) + dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); + radeon_bo_unpin(rdev->rlc.clear_state_obj); + radeon_bo_unreserve(rdev->rlc.clear_state_obj); + + radeon_bo_unref(&rdev->rlc.clear_state_obj); + rdev->rlc.clear_state_obj = NULL; + } + + /* clear state block */ + if (rdev->rlc.cp_table_obj) { + r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); + radeon_bo_unpin(rdev->rlc.cp_table_obj); + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + + radeon_bo_unref(&rdev->rlc.cp_table_obj); + rdev->rlc.cp_table_obj = NULL; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); +} + +#define CP_ME_TABLE_SIZE 96 + +int sumo_rlc_init(struct radeon_device *rdev) +{ + const u32 *src_ptr; + volatile u32 *dst_ptr; + u32 dws, data, i, j, k, reg_num; + u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0; + u64 reg_list_mc_addr; + const struct cs_section_def *cs_data; + int r; + + src_ptr = rdev->rlc.reg_list; + dws = rdev->rlc.reg_list_size; + if (rdev->family >= CHIP_BONAIRE) { + dws += (5 * 16) + 48 + 48 + 64; + } + cs_data = rdev->rlc.cs_data; + + if (src_ptr) { + /* save restore block */ + if (rdev->rlc.save_restore_obj == NULL) { + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + NULL, &rdev->rlc.save_restore_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); + return r; + } + } + + r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); + if (unlikely(r != 0)) { + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.save_restore_gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->rlc.save_restore_obj); + dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + + r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + /* write the sr buffer */ + dst_ptr = rdev->rlc.sr_ptr; + if (rdev->family >= CHIP_TAHITI) { + /* SI */ + for (i = 0; i < rdev->rlc.reg_list_size; i++) + dst_ptr[i] = cpu_to_le32(src_ptr[i]); + } else { + /* ON/LN/TN */ + /* format: + * dw0: (reg2 << 16) | reg1 + * dw1: reg1 save space + * dw2: reg2 save space + */ + for (i = 0; i < dws; i++) { + data = src_ptr[i] >> 2; + i++; + if (i < dws) + data |= (src_ptr[i] >> 2) << 16; + j = (((i - 1) * 3) / 2); + dst_ptr[j] = cpu_to_le32(data); + } + j = ((i * 3) / 2); + dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER); + } + radeon_bo_kunmap(rdev->rlc.save_restore_obj); + radeon_bo_unreserve(rdev->rlc.save_restore_obj); + } + + if (cs_data) { + /* clear state block */ + if (rdev->family >= CHIP_BONAIRE) { + rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev); + } else if (rdev->family >= CHIP_TAHITI) { + rdev->rlc.clear_state_size = si_get_csb_size(rdev); + dws = rdev->rlc.clear_state_size + (256 / 4); + } else { + reg_list_num = 0; + dws = 0; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_list_num++; + dws += cs_data[i].section[j].reg_count; + } + } + reg_list_blk_index = (3 * reg_list_num + 2); + dws += reg_list_blk_index; + rdev->rlc.clear_state_size = dws; + } + + if (rdev->rlc.clear_state_obj == NULL) { + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + NULL, &rdev->rlc.clear_state_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + } + r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); + if (unlikely(r != 0)) { + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.clear_state_gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->rlc.clear_state_obj); + dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + + r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + /* set up the cs buffer */ + dst_ptr = rdev->rlc.cs_ptr; + if (rdev->family >= CHIP_BONAIRE) { + cik_get_csb_buffer(rdev, dst_ptr); + } else if (rdev->family >= CHIP_TAHITI) { + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; + dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr)); + dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr)); + dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size); + si_get_csb_buffer(rdev, &dst_ptr[(256/4)]); + } else { + reg_list_hdr_blk_index = 0; + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); + data = upper_32_bits(reg_list_mc_addr); + dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + reg_list_hdr_blk_index++; + for (i = 0; cs_data[i].section != NULL; i++) { + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { + reg_num = cs_data[i].section[j].reg_count; + data = reg_list_mc_addr & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + reg_list_hdr_blk_index++; + + data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; + dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + reg_list_hdr_blk_index++; + + data = 0x08000000 | (reg_num * 4); + dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data); + reg_list_hdr_blk_index++; + + for (k = 0; k < reg_num; k++) { + data = cs_data[i].section[j].extent[k]; + dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data); + } + reg_list_mc_addr += reg_num * 4; + reg_list_blk_index += reg_num; + } + } + dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER); + } + radeon_bo_kunmap(rdev->rlc.clear_state_obj); + radeon_bo_unreserve(rdev->rlc.clear_state_obj); + } + + if (rdev->rlc.cp_table_size) { + if (rdev->rlc.cp_table_obj == NULL) { + r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, + PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + NULL, &rdev->rlc.cp_table_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + } + + r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->rlc.cp_table_gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr); + if (r) { + dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r); + sumo_rlc_fini(rdev); + return r; + } + + cik_init_cp_pg_table(rdev); + + radeon_bo_kunmap(rdev->rlc.cp_table_obj); + radeon_bo_unreserve(rdev->rlc.cp_table_obj); + + } + + return 0; +} + +static void evergreen_rlc_start(struct radeon_device *rdev) +{ + u32 mask = RLC_ENABLE; + + if (rdev->flags & RADEON_IS_IGP) { + mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC; + } + + WREG32(RLC_CNTL, mask); +} + +int evergreen_rlc_resume(struct radeon_device *rdev) +{ + u32 i; + const __be32 *fw_data; + + if (!rdev->rlc_fw) + return -EINVAL; + + r600_rlc_stop(rdev); + + WREG32(RLC_HB_CNTL, 0); + + if (rdev->flags & RADEON_IS_IGP) { + if (rdev->family == CHIP_ARUBA) { + u32 always_on_bitmap = + 3 | (3 << (16 * rdev->config.cayman.max_shader_engines)); + /* find out the number of active simds */ + u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; + tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se; + tmp = hweight32(~tmp); + if (tmp == rdev->config.cayman.max_simds_per_se) { + WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap); + WREG32(TN_RLC_LB_PARAMS, 0x00601004); + WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff); + WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000); + WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000); + } + } else { + WREG32(RLC_HB_WPTR_LSB_ADDR, 0); + WREG32(RLC_HB_WPTR_MSB_ADDR, 0); + } + WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + } else { + WREG32(RLC_HB_BASE, 0); + WREG32(RLC_HB_RPTR, 0); + WREG32(RLC_HB_WPTR, 0); + WREG32(RLC_HB_WPTR_LSB_ADDR, 0); + WREG32(RLC_HB_WPTR_MSB_ADDR, 0); + } + WREG32(RLC_MC_CNTL, 0); + WREG32(RLC_UCODE_CNTL, 0); + + fw_data = (const __be32 *)rdev->rlc_fw->data; + if (rdev->family >= CHIP_ARUBA) { + for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } else if (rdev->family >= CHIP_CAYMAN) { + for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } else { + for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } + } + WREG32(RLC_UCODE_ADDR, 0); + + evergreen_rlc_start(rdev); + + return 0; } /* Interrupts */ @@ -3771,6 +4554,7 @@ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; WREG32(DMA_CNTL, tmp); WREG32(GRBM_INT_CNTL, 0); + WREG32(SRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { @@ -3820,9 +4604,9 @@ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; u32 dma_cntl, dma_cntl1 = 0; + u32 thermal_int = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3836,12 +4620,18 @@ return 0; } - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + if (rdev->family == CHIP_ARUBA) + thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) & + ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); + else + thermal_int = RREG32(CG_THERMAL_INT) & + ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; @@ -3887,6 +4677,11 @@ } } + if (rdev->irq.dpm_thermal) { + DRM_DEBUG("dpm thermal\n"); + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; + } + if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); @@ -3919,27 +4714,27 @@ } if (rdev->irq.hpd[0]) { DRM_DEBUG("evergreen_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("evergreen_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("evergreen_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("evergreen_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("evergreen_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("evergreen_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.afmt[0]) { DRM_DEBUG("evergreen_irq_set: hdmi 0\n"); @@ -3991,15 +4786,21 @@ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4008,6 +4809,10 @@ WREG32(DC_HPD4_INT_CONTROL, hpd4); WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); + if (rdev->family == CHIP_ARUBA) + WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int); + else + WREG32(CG_THERMAL_INT, thermal_int); WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1); WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2); @@ -4123,6 +4928,38 @@ tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; @@ -4180,12 +5017,13 @@ wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; @@ -4202,6 +5040,9 @@ u32 ring_index; bool queue_hotplug = false; bool queue_hdmi = false; + bool queue_dp = false; + bool queue_thermal = false; + u32 status, addr; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; @@ -4214,7 +5055,7 @@ return IRQ_NONE; rptr = rdev->ih.rptr; - DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); @@ -4232,23 +5073,27 @@ case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[0]) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D1 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + break; case 1: /* D1 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; - DRM_DEBUG("IH: D1 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4258,23 +5103,27 @@ case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[1]) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D2 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + break; case 1: /* D2 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; - DRM_DEBUG("IH: D2 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4284,23 +5133,27 @@ case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[2]) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D3 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[2])) + radeon_crtc_handle_vblank(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + break; case 1: /* D3 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; - DRM_DEBUG("IH: D3 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4310,23 +5163,27 @@ case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[3]) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D4 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[3])) + radeon_crtc_handle_vblank(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + break; case 1: /* D4 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; - DRM_DEBUG("IH: D4 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4336,23 +5193,27 @@ case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[4]) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D5 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[4])) + radeon_crtc_handle_vblank(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + break; case 1: /* D5 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; - DRM_DEBUG("IH: D5 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4362,72 +5223,140 @@ case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - if (rdev->irq.crtc_vblank_int[5]) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - } - if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; - DRM_DEBUG("IH: D6 vblank\n"); + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) + DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n"); + + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); } + if (atomic_read(&rdev->irq.pflip[5])) + radeon_crtc_handle_vblank(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + break; case 1: /* D6 vline */ - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; - DRM_DEBUG("IH: D6 vline\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) + DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); break; case 1: - if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); break; case 2: - if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); break; case 3: - if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); break; case 4: - if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); break; case 5: - if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; - queue_hotplug = true; - DRM_DEBUG("IH: HPD6\n"); - } + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + break; + case 6: + if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + break; + case 7: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + break; + case 8: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + break; + case 9: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + break; + case 10: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + break; + case 11: + if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); @@ -4437,64 +5366,79 @@ case 44: /* hdmi */ switch (src_data) { case 0: - if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI0\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI0\n"); break; case 1: - if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI1\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI1\n"); break; case 2: - if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI2\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI2\n"); break; case 3: - if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI3\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI3\n"); break; case 4: - if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI4\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI4\n"); break; case 5: - if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { - rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; - queue_hdmi = true; - DRM_DEBUG("IH: HDMI5\n"); - } + if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG)) + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; + queue_hdmi = true; + DRM_DEBUG("IH: HDMI5\n"); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); break; } + case 96: + DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); + WREG32(SRBM_INT_ACK, 0x1); + break; case 124: /* UVD */ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); break; case 146: case 147: + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + status); + cayman_vm_decode_fault(rdev, status, addr); break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ @@ -4523,6 +5467,16 @@ DRM_DEBUG("IH: DMA trap\n"); radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); break; + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + rdev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + rdev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; @@ -4540,13 +5494,17 @@ /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) - schedule_work(&rdev->hotplug_work); + schedule_delayed_work(&rdev->hotplug_work, 0); if (queue_hdmi) schedule_work(&rdev->audio_work); + if (queue_thermal && rdev->pm.dpm_enabled) + schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ @@ -4557,143 +5515,6 @@ return IRQ_HANDLED; } -/** - * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring - * - * @rdev: radeon_device pointer - * @fence: radeon fence object - * - * Add a DMA fence packet to the ring to write - * the fence seq number and DMA trap packet to generate - * an interrupt if needed (evergreen-SI). - */ -void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence) -{ - struct radeon_ring *ring = &rdev->ring[fence->ring]; - u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - /* write the fence */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); - radeon_ring_write(ring, addr & 0xfffffffc); - radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); - radeon_ring_write(ring, fence->seq); - /* generate an interrupt */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); - /* flush HDP */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); -} - -/** - * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * - * Schedule an IB in the DMA ring (evergreen). - */ -void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - - if (rdev->wb.enabled) { - u32 next_rptr = ring->wptr + 4; - while ((next_rptr & 7) != 5) - next_rptr++; - next_rptr += 3; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); - radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); - radeon_ring_write(ring, next_rptr); - } - - /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. - * Pad as necessary with NOPs. - */ - while ((ring->wptr & 7) != 5) - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); - radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); - radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); - -} - -/** - * evergreen_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (evergreen-cayman). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_dw, cur_size_in_dw; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_dw = size_in_dw; - if (cur_size_in_dw > 0xFFFFF) - cur_size_in_dw = 0xFFFFF; - size_in_dw -= cur_size_in_dw; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); - radeon_ring_write(ring, dst_offset & 0xfffffffc); - radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_dw * 4; - dst_offset += cur_size_in_dw * 4; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - static int evergreen_startup(struct radeon_device *rdev) { struct radeon_ring *ring; @@ -4701,36 +5522,24 @@ /* enable pcie gen2 link */ evergreen_pcie_gen2_enable(rdev); + /* enable aspm */ + evergreen_program_aspm(rdev); + + /* scratch needs to be initialized before MC */ + r = r600_vram_scratch_init(rdev); + if (r) + return r; evergreen_mc_program(rdev); - if (ASIC_IS_DCE5(rdev)) { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } + if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) { r = ni_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); return r; } - } else { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = r600_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } } - r = r600_vram_scratch_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); } else { @@ -4740,11 +5549,17 @@ } evergreen_gpu_init(rdev); - r = evergreen_blit_init(rdev); - if (r) { - r600_blit_fini(rdev); - rdev->asic->copy.copy = NULL; - dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); + /* allocate rlc buffers */ + if (rdev->flags & RADEON_IS_IGP) { + rdev->rlc.reg_list = sumo_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list); + rdev->rlc.cs_data = evergreen_cs_data; + r = sumo_rlc_init(rdev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } } /* allocate wb buffer */ @@ -4764,7 +5579,7 @@ return r; } - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -4792,15 +5607,13 @@ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, - R600_CP_RB_RPTR, R600_CP_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, - DMA_RB_RPTR, DMA_RB_WPTR, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0)); if (r) return r; @@ -4816,12 +5629,10 @@ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: error initializing UVD (%d).\n", r); @@ -4833,7 +5644,7 @@ return r; } - r = r600_audio_init(rdev); + r = radeon_audio_init(rdev); if (r) { DRM_ERROR("radeon: audio init failed\n"); return r; @@ -4861,6 +5672,9 @@ /* init golden registers */ evergreen_init_golden_registers(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); + rdev->accel_working = true; r = evergreen_startup(rdev); if (r) { @@ -4875,8 +5689,9 @@ int evergreen_suspend(struct radeon_device *rdev) { - r600_audio_fini(rdev); - r600_uvd_stop(rdev); + radeon_pm_suspend(rdev); + radeon_audio_fini(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -4951,6 +5766,27 @@ if (r) return r; + if (ASIC_IS_DCE5(rdev)) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } + + /* Initialize power management */ + radeon_pm_init(rdev); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -4978,6 +5814,8 @@ r700_cp_fini(rdev); r600_dma_fini(rdev); r600_irq_fini(rdev); + if (rdev->flags & RADEON_IS_IGP) + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); @@ -5001,17 +5839,19 @@ void evergreen_fini(struct radeon_device *rdev) { - r600_audio_fini(rdev); - r600_blit_fini(rdev); + radeon_pm_fini(rdev); + radeon_audio_fini(rdev); r700_cp_fini(rdev); r600_dma_fini(rdev); r600_irq_fini(rdev); + if (rdev->flags & RADEON_IS_IGP) + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - evergreen_pcie_gart_fini(rdev); - r600_uvd_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); @@ -5084,3 +5924,153 @@ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } } + +void evergreen_program_aspm(struct radeon_device *rdev) +{ + u32 data, orig; + u32 pcie_lc_cntl, pcie_lc_cntl_old; + bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false; + /* fusion_platform = true + * if the system is a fusion system + * (APU or DGPU in a fusion system). + * todo: check if the system is a fusion platform. + */ + bool fusion_platform = false; + + if (radeon_aspm == 0) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + case CHIP_JUNIPER: + case CHIP_REDWOOD: + case CHIP_CEDAR: + case CHIP_SUMO: + case CHIP_SUMO2: + case CHIP_PALM: + case CHIP_ARUBA: + disable_l0s = true; + break; + default: + disable_l0s = false; + break; + } + + if (rdev->flags & RADEON_IS_IGP) + fusion_platform = true; /* XXX also dGPUs in a fusion system */ + + data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING); + if (fusion_platform) + data &= ~MULTI_PIF; + else + data |= MULTI_PIF; + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_PAIRING, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING); + if (fusion_platform) + data &= ~MULTI_PIF; + else + data |= MULTI_PIF; + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_PAIRING, data); + + pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL); + pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK); + if (!disable_l0s) { + if (rdev->family >= CHIP_BARTS) + pcie_lc_cntl |= LC_L0S_INACTIVITY(7); + else + pcie_lc_cntl |= LC_L0S_INACTIVITY(3); + } + + if (!disable_l1) { + if (rdev->family >= CHIP_BARTS) + pcie_lc_cntl |= LC_L1_INACTIVITY(7); + else + pcie_lc_cntl |= LC_L1_INACTIVITY(8); + + if (!disable_plloff_in_l1) { + data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data); + + data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data); + + if (rdev->family >= CHIP_BARTS) { + data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + data |= PLL_RAMP_UP_TIME_0(4); + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data); + + data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + data |= PLL_RAMP_UP_TIME_1(4); + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + data |= PLL_RAMP_UP_TIME_0(4); + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + data |= PLL_RAMP_UP_TIME_1(4); + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data); + } + + data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + data &= ~LC_DYN_LANES_PWR_STATE_MASK; + data |= LC_DYN_LANES_PWR_STATE(3); + if (data != orig) + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); + + if (rdev->family >= CHIP_BARTS) { + data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + data |= LS2_EXIT_TIME(1); + if (data != orig) + WREG32_PIF_PHY0(PB0_PIF_CNTL, data); + + data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + data |= LS2_EXIT_TIME(1); + if (data != orig) + WREG32_PIF_PHY1(PB1_PIF_CNTL, data); + } + } + } + + /* evergreen parts only */ + if (rdev->family < CHIP_BARTS) + pcie_lc_cntl |= LC_PMI_TO_L1_DIS; + + if (pcie_lc_cntl != pcie_lc_cntl_old) + WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl); +}