--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/i915/i915_irq.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/i915/i915_irq.c 2021-02-04 17:41:59.000000000 +0000 @@ -30,13 +30,34 @@ #include #include +#include #include #include #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" -static const u32 hpd_ibx[] = { +/** + * DOC: interrupt handling + * + * These functions provide the basic support for enabling and disabling the + * interrupt handling support. There's a lot more functionality in i915_irq.c + * and related files, but that will be described in separate chapters. + */ + +static const u32 hpd_ilk[HPD_NUM_PINS] = { + [HPD_PORT_A] = DE_DP_A_HOTPLUG, +}; + +static const u32 hpd_ivb[HPD_NUM_PINS] = { + [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, +}; + +static const u32 hpd_bdw[HPD_NUM_PINS] = { + [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, +}; + +static const u32 hpd_ibx[HPD_NUM_PINS] = { [HPD_CRT] = SDE_CRT_HOTPLUG, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, [HPD_PORT_B] = SDE_PORTB_HOTPLUG, @@ -44,7 +65,7 @@ [HPD_PORT_D] = SDE_PORTD_HOTPLUG }; -static const u32 hpd_cpt[] = { +static const u32 hpd_cpt[HPD_NUM_PINS] = { [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, @@ -52,7 +73,15 @@ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT }; -static const u32 hpd_mask_i915[] = { +static const u32 hpd_spt[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, + [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT +}; + +static const u32 hpd_mask_i915[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_EN, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, @@ -61,7 +90,7 @@ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN }; -static const u32 hpd_status_gen4[] = { +static const u32 hpd_status_g4x[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, @@ -70,7 +99,7 @@ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS }; -static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ +static const u32 hpd_status_i915[HPD_NUM_PINS] = { [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, @@ -79,120 +108,581 @@ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS }; -static void ibx_hpd_irq_setup(struct drm_device *dev); -static void i915_hpd_irq_setup(struct drm_device *dev); +/* BXT hpd list */ +static const u32 hpd_bxt[HPD_NUM_PINS] = { + [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, + [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, + [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC +}; + +/* IIR can theoretically queue up two events. Be paranoid. */ +#define GEN8_IRQ_RESET_NDX(type, which) do { \ + I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ + POSTING_READ(GEN8_##type##_IMR(which)); \ + I915_WRITE(GEN8_##type##_IER(which), 0); \ + I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ + POSTING_READ(GEN8_##type##_IIR(which)); \ + I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ + POSTING_READ(GEN8_##type##_IIR(which)); \ +} while (0) + +#define GEN5_IRQ_RESET(type) do { \ + I915_WRITE(type##IMR, 0xffffffff); \ + POSTING_READ(type##IMR); \ + I915_WRITE(type##IER, 0); \ + I915_WRITE(type##IIR, 0xffffffff); \ + POSTING_READ(type##IIR); \ + I915_WRITE(type##IIR, 0xffffffff); \ + POSTING_READ(type##IIR); \ +} while (0) + +/* + * We should clear IMR at preinstall/uninstall, and just check at postinstall. + */ +static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) +{ + u32 val = I915_READ(reg); + + if (val == 0) + return; + + WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", + reg, val); + I915_WRITE(reg, 0xffffffff); + POSTING_READ(reg); + I915_WRITE(reg, 0xffffffff); + POSTING_READ(reg); +} + +#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ + gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ + I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ + I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ + POSTING_READ(GEN8_##type##_IMR(which)); \ +} while (0) + +#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ + gen5_assert_iir_is_zero(dev_priv, type##IIR); \ + I915_WRITE(type##IER, (ier_val)); \ + I915_WRITE(type##IMR, (imr_val)); \ + POSTING_READ(type##IMR); \ +} while (0) + +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); /* For display hotplug interrupt */ -static void -ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +static inline void +i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, + uint32_t mask, + uint32_t bits) +{ + uint32_t val; + + assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(bits & ~mask); + + val = I915_READ(PORT_HOTPLUG_EN); + val &= ~mask; + val |= bits; + I915_WRITE(PORT_HOTPLUG_EN, val); +} + +/** + * i915_hotplug_interrupt_update - update hotplug interrupt enable + * @dev_priv: driver private + * @mask: bits to update + * @bits: bits to enable + * NOTE: the HPD enable bits are modified both inside and outside + * of an interrupt context. To avoid that read-modify-write cycles + * interfer, these bits are protected by a spinlock. Since this + * function is usually not called from a context where the lock is + * held already, this function acquires the lock itself. A non-locking + * version is also available. + */ +void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t mask, + uint32_t bits) +{ + spin_lock_irq(&dev_priv->irq_lock); + i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); + spin_unlock_irq(&dev_priv->irq_lock); +} + +/** + * ilk_update_display_irq - update DEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ilk_update_display_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) { - if ((dev_priv->irq_mask & mask) != 0) { - dev_priv->irq_mask &= ~mask; + uint32_t new_val; + + assert_spin_locked(&dev_priv->irq_lock); + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + new_val = dev_priv->irq_mask; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->irq_mask) { + dev_priv->irq_mask = new_val; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } -static void -ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +void +ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { - if ((dev_priv->irq_mask & mask) != mask) { - dev_priv->irq_mask |= mask; - I915_WRITE(DEIMR, dev_priv->irq_mask); - POSTING_READ(DEIMR); - } + ilk_update_display_irq(dev_priv, mask, mask); } void -i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) +{ + ilk_update_display_irq(dev_priv, mask, 0); +} + +/** + * ilk_update_gt_irq - update GTIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + assert_spin_locked(&dev_priv->irq_lock); + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + dev_priv->gt_irq_mask &= ~interrupt_mask; + dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); +} + +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, mask); +} + +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, 0); +} + +static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; +} + +static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; +} + +static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; +} + +/** + * snb_update_pm_irq - update GEN6_PMIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void snb_update_pm_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t new_val; + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + assert_spin_locked(&dev_priv->irq_lock); + + new_val = dev_priv->pm_irq_mask; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->pm_irq_mask) { + dev_priv->pm_irq_mask = new_val; + I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); + POSTING_READ(gen6_pm_imr(dev_priv)); + } +} + +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + snb_update_pm_irq(dev_priv, mask, mask); +} + +static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, + uint32_t mask) +{ + snb_update_pm_irq(dev_priv, mask, 0); +} + +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + __gen6_disable_pm_irq(dev_priv, mask); +} + +void gen6_reset_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t reg = gen6_pm_iir(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + I915_WRITE(reg, dev_priv->pm_rps_events); + I915_WRITE(reg, dev_priv->pm_rps_events); + POSTING_READ(reg); + dev_priv->rps.pm_iir = 0; + spin_unlock_irq(&dev_priv->irq_lock); +} + +void gen6_enable_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + + WARN_ON(dev_priv->rps.pm_iir); + WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); + dev_priv->rps.interrupts_enabled = true; + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | + dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + + spin_unlock_irq(&dev_priv->irq_lock); +} + +u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) +{ + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + + return mask; +} + +void gen6_disable_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->rps.interrupts_enabled = false; + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->rps.work); + + spin_lock_irq(&dev_priv->irq_lock); + + I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); + + __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & + ~dev_priv->pm_rps_events); + + spin_unlock_irq(&dev_priv->irq_lock); + + synchronize_irq(dev->irq); +} + +/** + * bdw_update_port_irq - update DE port interrupt + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void bdw_update_port_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t new_val; + uint32_t old_val; + + assert_spin_locked(&dev_priv->irq_lock); + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + old_val = I915_READ(GEN8_DE_PORT_IMR); + + new_val = old_val; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != old_val) { + I915_WRITE(GEN8_DE_PORT_IMR, new_val); + POSTING_READ(GEN8_DE_PORT_IMR); + } +} + +/** + * ibx_display_interrupt_update - update SDEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t sdeimr = I915_READ(SDEIMR); + sdeimr &= ~interrupt_mask; + sdeimr |= (~enabled_irq_mask & interrupt_mask); + + WARN_ON(enabled_irq_mask & ~interrupt_mask); + + assert_spin_locked(&dev_priv->irq_lock); + + if (WARN_ON(!intel_irqs_enabled(dev_priv))) + return; + + I915_WRITE(SDEIMR, sdeimr); + POSTING_READ(SDEIMR); +} + +static void +__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 enable_mask, u32 status_mask) { u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0x7fff0000; + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; + + assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); + + if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", + pipe_name(pipe), enable_mask, status_mask)) + return; - if ((pipestat & mask) == mask) + if ((pipestat & enable_mask) == enable_mask) return; + dev_priv->pipestat_irq_mask[pipe] |= status_mask; + /* Enable the interrupt, clear any pending status */ - pipestat |= mask | (mask >> 16); + pipestat |= enable_mask | status_mask; I915_WRITE(reg, pipestat); POSTING_READ(reg); } -void -i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +static void +__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 enable_mask, u32 status_mask) { u32 reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0x7fff0000; + u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; - if ((pipestat & mask) == 0) + assert_spin_locked(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); + + if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", + pipe_name(pipe), enable_mask, status_mask)) return; - pipestat &= ~mask; + if ((pipestat & enable_mask) == 0) + return; + + dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; + + pipestat &= ~enable_mask; I915_WRITE(reg, pipestat); POSTING_READ(reg); } +static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) +{ + u32 enable_mask = status_mask << 16; + + /* + * On pipe A we don't support the PSR interrupt yet, + * on pipe B and C the same bit MBZ. + */ + if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) + return 0; + /* + * On pipe B and C we don't support the PSR interrupt yet, on pipe + * A the same bit is for perf counters which we don't use either. + */ + if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) + return 0; + + enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | + SPRITE0_FLIP_DONE_INT_EN_VLV | + SPRITE1_FLIP_DONE_INT_EN_VLV); + if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; + if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) + enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; + + return enable_mask; +} + +void +i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask) +{ + u32 enable_mask; + + if (IS_VALLEYVIEW(dev_priv->dev)) + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + status_mask); + else + enable_mask = status_mask << 16; + __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); +} + +void +i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, + u32 status_mask) +{ + u32 enable_mask; + + if (IS_VALLEYVIEW(dev_priv->dev)) + enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, + status_mask); + else + enable_mask = status_mask << 16; + __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); +} + /** - * intel_enable_asle - enable ASLE interrupt for OpRegion + * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion + * @dev: drm device */ -void intel_enable_asle(struct drm_device *dev) +static void i915_enable_asle_pipestat(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; - unsigned long irqflags; + struct drm_i915_private *dev_priv = dev->dev_private; - /* FIXME: opregion/asle for VLV */ - if (IS_VALLEYVIEW(dev)) + if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock_irq(&dev_priv->irq_lock); - if (HAS_PCH_SPLIT(dev)) - ironlake_enable_display_irq(dev_priv, DE_GSE); - else { - i915_enable_pipestat(dev_priv, 1, - PIPE_LEGACY_BLC_EVENT_ENABLE); - if (INTEL_INFO(dev)->gen >= 4) - i915_enable_pipestat(dev_priv, 0, - PIPE_LEGACY_BLC_EVENT_ENABLE); - } + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); + if (INTEL_INFO(dev)->gen >= 4) + i915_enable_pipestat(dev_priv, PIPE_A, + PIPE_LEGACY_BLC_EVENT_STATUS); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock_irq(&dev_priv->irq_lock); } -/** - * i915_pipe_enabled - check if a pipe is enabled - * @dev: DRM device - * @pipe: pipe to check +/* + * This timing diagram depicts the video signal in and + * around the vertical blanking period. + * + * Assumptions about the fictitious mode used in this example: + * vblank_start >= 3 + * vsync_start = vblank_start + 1 + * vsync_end = vblank_start + 2 + * vtotal = vblank_start + 3 + * + * start of vblank: + * latch double buffered registers + * increment frame counter (ctg+) + * generate start of vblank interrupt (gen4+) + * | + * | frame start: + * | generate frame start interrupt (aka. vblank interrupt) (gmch) + * | may be shifted forward 1-3 extra lines via PIPECONF + * | | + * | | start of vsync: + * | | generate vsync interrupt + * | | | + * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx + * . \hs/ . \hs/ \hs/ \hs/ . \hs/ + * ----va---> <-----------------vb--------------------> <--------va------------- + * | | <----vs-----> | + * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) + * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) + * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) + * | | | + * last visible pixel first visible pixel + * | increment frame counter (gen3/4) + * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) * - * Reading certain registers when the pipe is disabled can hang the chip. - * Use this routine to make sure the PLL is running and the pipe is active - * before reading such registers if unsure. - */ -static int -i915_pipe_enabled(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); + * x = horizontal active + * _ = horizontal blanking + * hs = horizontal sync + * va = vertical active + * vb = vertical blanking + * vs = vertical sync + * vbs = vblank_start (number) + * + * Summary: + * - most events happen at the start of horizontal sync + * - frame start happens at the start of horizontal blank, 1-4 lines + * (depending on PIPECONF settings) after the start of vblank + * - gen3/4 pixel and frame counter are synchronized with the start + * of horizontal active on the first line of vertical active + */ - return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; +static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +{ + /* Gen2 doesn't have a hardware frame counter */ + return 0; } /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ -static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) +static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long high_frame; unsigned long low_frame; - u32 high1, high2, low; + u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + const struct drm_display_mode *mode = &intel_crtc->base.hwmode; + + htotal = mode->crtc_htotal; + hsync_start = mode->crtc_hsync_start; + vbl_start = mode->crtc_vblank_start; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vbl_start = DIV_ROUND_UP(vbl_start, 2); - if (!i915_pipe_enabled(dev, pipe)) { - DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %c\n", pipe_name(pipe)); - return 0; - } + /* Convert to pixel count */ + vbl_start *= htotal; + + /* Start of vblank event occurs at start of hsync */ + vbl_start -= htotal - hsync_start; high_frame = PIPEFRAME(pipe); low_frame = PIPEFRAMEPIXEL(pipe); @@ -204,196 +694,256 @@ */ do { high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; - low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + low = I915_READ(low_frame); high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); high1 >>= PIPE_FRAME_HIGH_SHIFT; + pixel = low & PIPE_PIXEL_MASK; low >>= PIPE_FRAME_LOW_SHIFT; - return (high1 << 8) | low; + + /* + * The frame counter increments at beginning of active. + * Cook up a vblank counter by also checking the pixel + * counter against vblank start. + */ + return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) +static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int reg = PIPE_FRMCOUNT_GM45(pipe); + struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_pipe_enabled(dev, pipe)) { - DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %c\n", pipe_name(pipe)); - return 0; + return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); +} + +/* raw reads, only for fast reads of display block, no need for forcewake etc. */ +#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) + +static int __intel_get_crtc_scanline(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_display_mode *mode = &crtc->base.hwmode; + enum pipe pipe = crtc->pipe; + int position, vtotal; + + vtotal = mode->crtc_vtotal; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vtotal /= 2; + + if (IS_GEN2(dev)) + position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; + else + position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + + /* + * On HSW, the DSL reg (0x70000) appears to return 0 if we + * read it just before the start of vblank. So try it again + * so we don't accidentally end up spanning a vblank frame + * increment, causing the pipe_update_end() code to squak at us. + * + * The nature of this problem means we can't simply check the ISR + * bit and return the vblank start value; nor can we use the scanline + * debug register in the transcoder as it appears to have the same + * problem. We may need to extend this to include other platforms, + * but so far testing only shows the problem on HSW. + */ + if (HAS_DDI(dev) && !position) { + int i, temp; + + for (i = 0; i < 100; i++) { + udelay(1); + temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & + DSL_LINEMASK_GEN3; + if (temp != position) { + position = temp; + break; + } + } } - return I915_READ(reg); + /* + * See update_scanline_offset() for the details on the + * scanline_offset adjustment. + */ + return (position + crtc->scanline_offset) % vtotal; } -static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, - int *vpos, int *hpos) +static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 vbl = 0, position = 0; - int vbl_start, vbl_end, htotal, vtotal; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int position; + int vbl_start, vbl_end, hsync_start, htotal, vtotal; bool in_vbl = true; int ret = 0; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); + unsigned long irqflags; - if (!i915_pipe_enabled(dev, pipe)) { + if (WARN_ON(!mode->crtc_clock)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } - /* Get vtotal. */ - vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + htotal = mode->crtc_htotal; + hsync_start = mode->crtc_hsync_start; + vtotal = mode->crtc_vtotal; + vbl_start = mode->crtc_vblank_start; + vbl_end = mode->crtc_vblank_end; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vbl_start = DIV_ROUND_UP(vbl_start, 2); + vbl_end /= 2; + vtotal /= 2; + } + + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* + * Lock uncore.lock, as we will do multiple timing critical raw + * register reads, potentially with preemption disabled, so the + * following code must not block on uncore.lock. + */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ - if (INTEL_INFO(dev)->gen >= 4) { + /* Get optional system timestamp before query. */ + if (stime) + *stime = ktime_get(); + + if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ - position = I915_READ(PIPEDSL(pipe)); - - /* Decode into vertical scanout position. Don't have - * horizontal scanout position. - */ - *vpos = position & 0x1fff; - *hpos = 0; + position = __intel_get_crtc_scanline(intel_crtc); } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal * scanout position. */ - position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; - htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); - *vpos = position / htotal; - *hpos = position - (*vpos * htotal); + /* convert to pixel counts */ + vbl_start *= htotal; + vbl_end *= htotal; + vtotal *= htotal; + + /* + * In interlaced modes, the pixel counter counts all pixels, + * so one field will have htotal more pixels. In order to avoid + * the reported position from jumping backwards when the pixel + * counter is beyond the length of the shorter field, just + * clamp the position the length of the shorter field. This + * matches how the scanline counter based position works since + * the scanline counter doesn't count the two half lines. + */ + if (position >= vtotal) + position = vtotal - 1; + + /* + * Start of vblank interrupt is triggered at start of hsync, + * just prior to the first active line of vblank. However we + * consider lines to start at the leading edge of horizontal + * active. So, should we get here before we've crossed into + * the horizontal active of the first line in vblank, we would + * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, + * always add htotal-hsync_start to the current pixel position. + */ + position = (position + htotal - hsync_start) % vtotal; } - /* Query vblank area. */ - vbl = I915_READ(VBLANK(cpu_transcoder)); + /* Get optional system timestamp after query. */ + if (etime) + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - /* Test position against vblank region. */ - vbl_start = vbl & 0x1fff; - vbl_end = (vbl >> 16) & 0x1fff; - - if ((*vpos < vbl_start) || (*vpos > vbl_end)) - in_vbl = false; - - /* Inside "upper part" of vblank area? Apply corrective offset: */ - if (in_vbl && (*vpos >= vbl_start)) - *vpos = *vpos - vtotal; - - /* Readouts valid? */ - if (vbl > 0) - ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + in_vbl = position >= vbl_start && position < vbl_end; + + /* + * While in vblank, position will be negative + * counting up towards 0 at vbl_end. And outside + * vblank, position will be positive counting + * up since vbl_end. + */ + if (position >= vbl_start) + position -= vbl_end; + else + position += vtotal - vbl_end; + + if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + *vpos = position; + *hpos = 0; + } else { + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } /* In vblank? */ if (in_vbl) - ret |= DRM_SCANOUTPOS_INVBL; + ret |= DRM_SCANOUTPOS_IN_VBLANK; return ret; } -static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, +int intel_get_crtc_scanline(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + unsigned long irqflags; + int position; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + position = __intel_get_crtc_scanline(crtc); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + + return position; +} + +static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, int *max_error, struct timeval *vblank_time, unsigned flags) { struct drm_crtc *crtc; - if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { - DRM_ERROR("Invalid crtc %d\n", pipe); + if (pipe >= INTEL_INFO(dev)->num_pipes) { + DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } /* Get drm_crtc to timestamp: */ crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc == NULL) { - DRM_ERROR("Invalid crtc %d\n", pipe); + DRM_ERROR("Invalid crtc %u\n", pipe); return -EINVAL; } - if (!crtc->enabled) { - DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + if (!crtc->hwmode.crtc_clock) { + DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); return -EBUSY; } /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, - crtc); + &crtc->hwmode); } -/* - * Handle hotplug events outside the interrupt handler proper. - */ -#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) - -static void i915_hotplug_work_func(struct work_struct *work) +static void ironlake_rps_change_irq_handler(struct drm_device *dev) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - hotplug_work); - struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_connector *intel_connector; - struct intel_encoder *intel_encoder; - struct drm_connector *connector; - unsigned long irqflags; - bool hpd_disabled = false; - - /* HPD irq before everything is fully set up. */ - if (!dev_priv->enable_hotplug_processing) - return; - - mutex_lock(&mode_config->mutex); - DRM_DEBUG_KMS("running encoder hotplug functions\n"); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - list_for_each_entry(connector, &mode_config->connector_list, head) { - intel_connector = to_intel_connector(connector); - intel_encoder = intel_connector->encoder; - if (intel_encoder->hpd_pin > HPD_NONE && - dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && - connector->polled == DRM_CONNECTOR_POLL_HPD) { - DRM_INFO("HPD interrupt storm detected on connector %s: " - "switching from hotplug detection to polling\n", - drm_get_connector_name(connector)); - dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; - connector->polled = DRM_CONNECTOR_POLL_CONNECT - | DRM_CONNECTOR_POLL_DISCONNECT; - hpd_disabled = true; - } - } - /* if there were no outputs to poll, poll was disabled, - * therefore make sure it's enabled when disabling HPD on - * some connectors */ - if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); - mod_timer(&dev_priv->hotplug_reenable_timer, - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); - } - - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) - if (intel_encoder->hot_plug) - intel_encoder->hot_plug(intel_encoder); - - mutex_unlock(&mode_config->mutex); - - /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(dev); -} - -static void ironlake_handle_rps_change(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; - unsigned long flags; - spin_lock_irqsave(&mchdev_lock, flags); + spin_lock(&mchdev_lock); I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -421,60 +971,185 @@ if (ironlake_set_drps(dev, new_delay)) dev_priv->ips.cur_delay = new_delay; - spin_unlock_irqrestore(&mchdev_lock, flags); + spin_unlock(&mchdev_lock); return; } -static void notify_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void notify_ring(struct intel_engine_cs *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (ring->obj == NULL) + if (!intel_ring_initialized(ring)) return; - trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); + trace_i915_gem_request_notify(ring); wake_up_all(&ring->irq_queue); - if (i915_enable_hangcheck) { - dev_priv->gpu_error.hangcheck_count = 0; - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); +} + +static void vlv_c0_read(struct drm_i915_private *dev_priv, + struct intel_rps_ei *ei) +{ + ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); +} + +static bool vlv_c0_above(struct drm_i915_private *dev_priv, + const struct intel_rps_ei *old, + const struct intel_rps_ei *now, + int threshold) +{ + u64 time, c0; + unsigned int mul = 100; + + if (old->cz_clock == 0) + return false; + + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) + mul <<= 8; + + time = now->cz_clock - old->cz_clock; + time *= threshold * dev_priv->czclk_freq; + + /* Workload can be split between render + media, e.g. SwapBuffers + * being blitted in X after being rendered in mesa. To account for + * this we need to combine both engines into our activity counter. + */ + c0 = now->render_c0 - old->render_c0; + c0 += now->media_c0 - old->media_c0; + c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; + + return c0 >= time; +} + +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) +{ + vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); + dev_priv->rps.up_ei = dev_priv->rps.down_ei; +} + +static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) +{ + struct intel_rps_ei now; + u32 events = 0; + + if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + return 0; + + vlv_c0_read(dev_priv, &now); + if (now.cz_clock == 0) + return 0; + + if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { + if (!vlv_c0_above(dev_priv, + &dev_priv->rps.down_ei, &now, + dev_priv->rps.down_threshold)) + events |= GEN6_PM_RP_DOWN_THRESHOLD; + dev_priv->rps.down_ei = now; + } + + if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + if (vlv_c0_above(dev_priv, + &dev_priv->rps.up_ei, &now, + dev_priv->rps.up_threshold)) + events |= GEN6_PM_RP_UP_THRESHOLD; + dev_priv->rps.up_ei = now; } + + return events; } -static void gen6_pm_rps_work(struct work_struct *work) +static bool any_waiters(struct drm_i915_private *dev_priv) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - rps.work); - u32 pm_iir, pm_imr; - u8 new_delay; + struct intel_engine_cs *ring; + int i; + + for_each_ring(ring, dev_priv, i) + if (ring->irq_refcount) + return true; + + return false; +} - spin_lock_irq(&dev_priv->rps.lock); +static void gen6_pm_rps_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, rps.work); + bool client_boost; + int new_delay, adj, min, max; + u32 pm_iir; + + spin_lock_irq(&dev_priv->irq_lock); + /* Speed up work cancelation during disabling rps interrupts. */ + if (!dev_priv->rps.interrupts_enabled) { + spin_unlock_irq(&dev_priv->irq_lock); + return; + } pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - pm_imr = I915_READ(GEN6_PMIMR); - I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps.lock); + /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + client_boost = dev_priv->rps.client_boost; + dev_priv->rps.client_boost = false; + spin_unlock_irq(&dev_priv->irq_lock); + + /* Make sure we didn't queue anything we're not going to process. */ + WARN_ON(pm_iir & ~dev_priv->pm_rps_events); - if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) + if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) return; mutex_lock(&dev_priv->rps.hw_lock); - if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) - new_delay = dev_priv->rps.cur_delay + 1; - else - new_delay = dev_priv->rps.cur_delay - 1; + pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); + + adj = dev_priv->rps.last_adj; + new_delay = dev_priv->rps.cur_freq; + min = dev_priv->rps.min_freq_softlimit; + max = dev_priv->rps.max_freq_softlimit; + + if (client_boost) { + new_delay = dev_priv->rps.max_freq_softlimit; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (adj > 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; + /* + * For better performance, jump directly + * to RPe if we're below it. + */ + if (new_delay < dev_priv->rps.efficient_freq - adj) { + new_delay = dev_priv->rps.efficient_freq; + adj = 0; + } + } else if (any_waiters(dev_priv)) { + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { + if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; + else + new_delay = dev_priv->rps.min_freq_softlimit; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { + if (adj < 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; + } else { /* unknown event */ + adj = 0; + } + + dev_priv->rps.last_adj = adj; /* sysfs frequency interfaces may have snuck in while servicing the * interrupt */ - if (!(new_delay > dev_priv->rps.max_delay || - new_delay < dev_priv->rps.min_delay)) { - gen6_set_rps(dev_priv->dev, new_delay); - } + new_delay += adj; + new_delay = clamp_t(int, new_delay, min, max); + + intel_set_rps(dev_priv->dev, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); } @@ -491,12 +1166,12 @@ */ static void ivybridge_parity_work(struct work_struct *work) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - l3_parity.error_work); + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, l3_parity.error_work); u32 error_status, row, bank, subbank; - char *parity_event[5]; + char *parity_event[6]; uint32_t misccpctl; - unsigned long flags; + uint8_t slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. * In order to prevent a get/put style interface, acquire struct mutex @@ -504,260 +1179,679 @@ */ mutex_lock(&dev_priv->dev->struct_mutex); + /* If we've screwed up tracking, just let the interrupt fire again */ + if (WARN_ON(!dev_priv->l3_parity.which_slice)) + goto out; + misccpctl = I915_READ(GEN7_MISCCPCTL); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); POSTING_READ(GEN7_MISCCPCTL); - error_status = I915_READ(GEN7_L3CDERRST1); - row = GEN7_PARITY_ERROR_ROW(error_status); - bank = GEN7_PARITY_ERROR_BANK(error_status); - subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); - - I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | - GEN7_L3CDERRST1_ENABLE); - POSTING_READ(GEN7_L3CDERRST1); + while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { + u32 reg; - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + slice--; + if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) + break; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->l3_parity.which_slice &= ~(1<dev->struct_mutex); + reg = GEN7_L3CDERRST1 + (slice * 0x200); - parity_event[0] = "L3_PARITY_ERROR=1"; - parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); - parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); - parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); - parity_event[4] = NULL; + error_status = I915_READ(reg); + row = GEN7_PARITY_ERROR_ROW(error_status); + bank = GEN7_PARITY_ERROR_BANK(error_status); + subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); + + I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); + POSTING_READ(reg); + + parity_event[0] = I915_L3_PARITY_UEVENT "=1"; + parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); + parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); + parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); + parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); + parity_event[5] = NULL; + + kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, + KOBJ_CHANGE, parity_event); + + DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", + slice, row, bank, subbank); + + kfree(parity_event[4]); + kfree(parity_event[3]); + kfree(parity_event[2]); + kfree(parity_event[1]); + } - kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, - KOBJ_CHANGE, parity_event); + I915_WRITE(GEN7_MISCCPCTL, misccpctl); - DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", - row, bank, subbank); +out: + WARN_ON(dev_priv->l3_parity.which_slice); + spin_lock_irq(&dev_priv->irq_lock); + gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); + spin_unlock_irq(&dev_priv->irq_lock); - kfree(parity_event[3]); - kfree(parity_event[2]); - kfree(parity_event[1]); + mutex_unlock(&dev_priv->dev->struct_mutex); } -static void ivybridge_handle_parity_error(struct drm_device *dev) +static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = dev->dev_private; - if (!HAS_L3_GPU_CACHE(dev)) + if (!HAS_L3_DPF(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_lock(&dev_priv->irq_lock); + gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); + spin_unlock(&dev_priv->irq_lock); + + iir &= GT_PARITY_ERROR(dev); + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) + dev_priv->l3_parity.which_slice |= 1 << 1; + + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) + dev_priv->l3_parity.which_slice |= 1 << 0; queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); } +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & + (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + notify_ring(&dev_priv->ring[RCS]); + if (gt_iir & ILK_BSD_USER_INTERRUPT) + notify_ring(&dev_priv->ring[VCS]); +} + static void snb_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) { - if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | - GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & GEN6_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); - if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); + if (gt_iir & + (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + notify_ring(&dev_priv->ring[RCS]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + notify_ring(&dev_priv->ring[VCS]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(&dev_priv->ring[BCS]); + + if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | + GT_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) + DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); - if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | - GT_GEN6_BSD_CS_ERROR_INTERRUPT | - GT_RENDER_CS_ERROR_INTERRUPT)) { - DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); - i915_handle_error(dev, false); + if (gt_iir & GT_PARITY_ERROR(dev)) + ivybridge_parity_error_irq_handler(dev, gt_iir); +} + +static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, + u32 master_ctl) +{ + irqreturn_t ret = IRQ_NONE; + + if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { + u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); + if (tmp) { + I915_WRITE_FW(GEN8_GT_IIR(0), tmp); + ret = IRQ_HANDLED; + + if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) + intel_lrc_irq_handler(&dev_priv->ring[RCS]); + if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) + notify_ring(&dev_priv->ring[RCS]); + + if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) + intel_lrc_irq_handler(&dev_priv->ring[BCS]); + if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) + notify_ring(&dev_priv->ring[BCS]); + } else + DRM_ERROR("The master control interrupt lied (GT0)!\n"); + } + + if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { + u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); + if (tmp) { + I915_WRITE_FW(GEN8_GT_IIR(1), tmp); + ret = IRQ_HANDLED; + + if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) + intel_lrc_irq_handler(&dev_priv->ring[VCS]); + if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) + notify_ring(&dev_priv->ring[VCS]); + + if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) + intel_lrc_irq_handler(&dev_priv->ring[VCS2]); + if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) + notify_ring(&dev_priv->ring[VCS2]); + } else + DRM_ERROR("The master control interrupt lied (GT1)!\n"); + } + + if (master_ctl & GEN8_GT_VECS_IRQ) { + u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); + if (tmp) { + I915_WRITE_FW(GEN8_GT_IIR(3), tmp); + ret = IRQ_HANDLED; + + if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) + intel_lrc_irq_handler(&dev_priv->ring[VECS]); + if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) + notify_ring(&dev_priv->ring[VECS]); + } else + DRM_ERROR("The master control interrupt lied (GT3)!\n"); + } + + if (master_ctl & GEN8_GT_PM_IRQ) { + u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); + if (tmp & dev_priv->pm_rps_events) { + I915_WRITE_FW(GEN8_GT_IIR(2), + tmp & dev_priv->pm_rps_events); + ret = IRQ_HANDLED; + gen6_rps_irq_handler(dev_priv, tmp); + } else + DRM_ERROR("The master control interrupt lied (PM)!\n"); } - if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) - ivybridge_handle_parity_error(dev); + return ret; } -static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, - u32 pm_iir) +static bool bxt_port_hotplug_long_detect(enum port port, u32 val) { - unsigned long flags; + switch (port) { + case PORT_A: + return val & PORTA_HOTPLUG_LONG_DETECT; + case PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + default: + return false; + } +} - /* - * IIR bits should never already be set because IMR should - * prevent an interrupt from being shown in IIR. The warning - * displays a case where we've unsafely cleared - * dev_priv->rps.pm_iir. Although missing an interrupt of the same - * type is not a problem, it displays a problem in the logic. - * - * The mask bit in IMR is cleared by dev_priv->rps.work. - */ +static bool spt_port_hotplug2_long_detect(enum port port, u32 val) +{ + switch (port) { + case PORT_E: + return val & PORTE_HOTPLUG_LONG_DETECT; + default: + return false; + } +} - spin_lock_irqsave(&dev_priv->rps.lock, flags); - dev_priv->rps.pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); - POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); +static bool spt_port_hotplug_long_detect(enum port port, u32 val) +{ + switch (port) { + case PORT_A: + return val & PORTA_HOTPLUG_LONG_DETECT; + case PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + case PORT_D: + return val & PORTD_HOTPLUG_LONG_DETECT; + default: + return false; + } +} + +static bool ilk_port_hotplug_long_detect(enum port port, u32 val) +{ + switch (port) { + case PORT_A: + return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; + default: + return false; + } +} - queue_work(dev_priv->wq, &dev_priv->rps.work); +static bool pch_port_hotplug_long_detect(enum port port, u32 val) +{ + switch (port) { + case PORT_B: + return val & PORTB_HOTPLUG_LONG_DETECT; + case PORT_C: + return val & PORTC_HOTPLUG_LONG_DETECT; + case PORT_D: + return val & PORTD_HOTPLUG_LONG_DETECT; + default: + return false; + } } -#define HPD_STORM_DETECT_PERIOD 1000 -#define HPD_STORM_THRESHOLD 5 +static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) +{ + switch (port) { + case PORT_B: + return val & PORTB_HOTPLUG_INT_LONG_PULSE; + case PORT_C: + return val & PORTC_HOTPLUG_INT_LONG_PULSE; + case PORT_D: + return val & PORTD_HOTPLUG_INT_LONG_PULSE; + default: + return false; + } +} -static inline bool hotplug_irq_storm_detect(struct drm_device *dev, - u32 hotplug_trigger, - const u32 *hpd) +/* + * Get a bit mask of pins that have triggered, and which ones may be long. + * This can be called multiple times with the same masks to accumulate + * hotplug detection results from several registers. + * + * Note that the caller is expected to zero out the masks initially. + */ +static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, + u32 hotplug_trigger, u32 dig_hotplug_reg, + const u32 hpd[HPD_NUM_PINS], + bool long_pulse_detect(enum port port, u32 val)) { - drm_i915_private_t *dev_priv = dev->dev_private; - unsigned long irqflags; + enum port port; int i; - bool ret = false; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + for_each_hpd_pin(i) { + if ((hpd[i] & hotplug_trigger) == 0) + continue; - for (i = 1; i < HPD_NUM_PINS; i++) { + *pin_mask |= BIT(i); - if (!(hpd[i] & hotplug_trigger) || - dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) + if (!intel_hpd_pin_to_port(i, &port)) continue; - if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, - dev_priv->hpd_stats[i].hpd_last_jiffies - + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { - dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; - dev_priv->hpd_stats[i].hpd_cnt = 0; - } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { - dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; - DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); - ret = true; - } else { - dev_priv->hpd_stats[i].hpd_cnt++; - } + if (long_pulse_detect(port, dig_hotplug_reg)) + *long_mask |= BIT(i); } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask); - return ret; } static void gmbus_irq_handler(struct drm_device *dev) { - struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; wake_up_all(&dev_priv->gmbus_wait_queue); } static void dp_aux_irq_handler(struct drm_device *dev) { - struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; wake_up_all(&dev_priv->gmbus_wait_queue); } +#if defined(CONFIG_DEBUG_FS) +static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, + uint32_t crc0, uint32_t crc1, + uint32_t crc2, uint32_t crc3, + uint32_t crc4) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + struct intel_pipe_crc_entry *entry; + int head, tail; + + spin_lock(&pipe_crc->lock); + + if (!pipe_crc->entries) { + spin_unlock(&pipe_crc->lock); + DRM_DEBUG_KMS("spurious interrupt\n"); + return; + } + + head = pipe_crc->head; + tail = pipe_crc->tail; + + if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { + spin_unlock(&pipe_crc->lock); + DRM_ERROR("CRC buffer overflowing\n"); + return; + } + + entry = &pipe_crc->entries[head]; + + entry->frame = dev->driver->get_vblank_counter(dev, pipe); + entry->crc[0] = crc0; + entry->crc[1] = crc1; + entry->crc[2] = crc2; + entry->crc[3] = crc3; + entry->crc[4] = crc4; + + head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); + pipe_crc->head = head; + + spin_unlock(&pipe_crc->lock); + + wake_up_interruptible(&pipe_crc->wq); +} +#else +static inline void +display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, + uint32_t crc0, uint32_t crc1, + uint32_t crc2, uint32_t crc3, + uint32_t crc4) {} +#endif + + +static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + 0, 0, 0, 0); +} + +static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + I915_READ(PIPE_CRC_RES_2_IVB(pipe)), + I915_READ(PIPE_CRC_RES_3_IVB(pipe)), + I915_READ(PIPE_CRC_RES_4_IVB(pipe)), + I915_READ(PIPE_CRC_RES_5_IVB(pipe))); +} + +static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t res1, res2; + + if (INTEL_INFO(dev)->gen >= 3) + res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); + else + res1 = 0; + + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); + else + res2 = 0; + + display_pipe_crc_irq_handler(dev, pipe, + I915_READ(PIPE_CRC_RES_RED(pipe)), + I915_READ(PIPE_CRC_RES_GREEN(pipe)), + I915_READ(PIPE_CRC_RES_BLUE(pipe)), + res1, res2); +} + +/* The RPS events need forcewake, so we add them to a work queue and mask their + * IMR bits until the work is done. Other interrupts can be processed without + * the work queue. */ +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) +{ + if (pm_iir & dev_priv->pm_rps_events) { + spin_lock(&dev_priv->irq_lock); + gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + if (dev_priv->rps.interrupts_enabled) { + dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; + queue_work(dev_priv->wq, &dev_priv->rps.work); + } + spin_unlock(&dev_priv->irq_lock); + } + + if (INTEL_INFO(dev_priv)->gen >= 8) + return; + + if (HAS_VEBOX(dev_priv->dev)) { + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + notify_ring(&dev_priv->ring[VECS]); + + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); + } +} + +static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) +{ + if (!drm_handle_vblank(dev, pipe)) + return false; + + return true; +} + +static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pipe_stats[I915_MAX_PIPES] = { }; + int pipe; + + spin_lock(&dev_priv->irq_lock); + for_each_pipe(dev_priv, pipe) { + int reg; + u32 mask, iir_bit = 0; + + /* + * PIPESTAT bits get signalled even when the interrupt is + * disabled with the mask bits, and some of the status bits do + * not generate interrupts at all (like the underrun bit). Hence + * we need to be careful that we only handle what we want to + * handle. + */ + + /* fifo underruns are filterered in the underrun handler. */ + mask = PIPE_FIFO_UNDERRUN_STATUS; + + switch (pipe) { + case PIPE_A: + iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; + case PIPE_B: + iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + break; + case PIPE_C: + iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; + break; + } + if (iir & iir_bit) + mask |= dev_priv->pipestat_irq_mask[pipe]; + + if (!mask) + continue; + + reg = PIPESTAT(pipe); + mask |= PIPESTAT_INT_ENABLE_MASK; + pipe_stats[pipe] = I915_READ(reg) & mask; + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | + PIPESTAT_INT_STATUS_MASK)) + I915_WRITE(reg, pipe_stats[pipe]); + } + spin_unlock(&dev_priv->irq_lock); + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); + + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + gmbus_irq_handler(dev); +} + +static void i9xx_hpd_irq_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 pin_mask = 0, long_mask = 0; + + if (!hotplug_status) + return; + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + /* + * Make sure hotplug status is cleared before we clear IIR, or else we + * may miss hotplug events. + */ + POSTING_READ(PORT_HOTPLUG_STAT); + + if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; + + if (hotplug_trigger) { + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + hotplug_trigger, hpd_status_g4x, + i9xx_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev, pin_mask, long_mask); + } + + if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) + dp_aux_irq_handler(dev); + } else { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + + if (hotplug_trigger) { + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + hotplug_trigger, hpd_status_i915, + i9xx_port_hotplug_long_detect); + intel_hpd_irq_handler(dev, pin_mask, long_mask); + } + } +} + static irqreturn_t valleyview_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, gt_iir, pm_iir; irqreturn_t ret = IRQ_NONE; - unsigned long irqflags; - int pipe; - u32 pipe_stats[I915_MAX_PIPES]; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; while (true) { - iir = I915_READ(VLV_IIR); + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); + if (gt_iir) + I915_WRITE(GTIIR, gt_iir); + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) + I915_WRITE(GEN6_PMIIR, pm_iir); + + iir = I915_READ(VLV_IIR); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } if (gt_iir == 0 && pm_iir == 0 && iir == 0) goto out; ret = IRQ_HANDLED; - snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (gt_iir) + snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (pm_iir) + gen6_rps_irq_handler(dev_priv, pm_iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); + } - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - for_each_pipe(pipe) { - int reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); +out: + return ret; +} - /* - * Clear the PIPE*STAT regs before the IIR - */ - if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); - I915_WRITE(reg, pipe_stats[pipe]); - } - } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +static irqreturn_t cherryview_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl, iir; + irqreturn_t ret = IRQ_NONE; + + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; - for_each_pipe(pipe) { - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(dev, pipe); + for (;;) { + master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; + iir = I915_READ(VLV_IIR); - if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip(dev, pipe); - } - } + if (master_ctl == 0 && iir == 0) + break; - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + ret = IRQ_HANDLED; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - if (hotplug_trigger) { - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) - i915_hpd_irq_setup(dev); - queue_work(dev_priv->wq, - &dev_priv->hotplug_work); - } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - I915_READ(PORT_HOTPLUG_STAT); + I915_WRITE(GEN8_MASTER_IRQ, 0); + + /* Find, clear, then process each source of interrupt */ + + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); } - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gen8_gt_irq_handler(dev_priv, master_ctl); - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - I915_WRITE(VLV_IIR, iir); + I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); + POSTING_READ(GEN8_MASTER_IRQ); } -out: return ret; } +static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + const u32 hpd[HPD_NUM_PINS]) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + dig_hotplug_reg, hpd, + pch_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev, pin_mask, long_mask); +} + static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - if (hotplug_trigger) { - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) - ibx_hpd_irq_setup(dev); - queue_work(dev_priv->wq, &dev_priv->hotplug_work); - } - if (pch_iir & SDE_AUDIO_POWER_MASK) + if (hotplug_trigger) + ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + + if (pch_iir & SDE_AUDIO_POWER_MASK) { + int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> + SDE_AUDIO_POWER_SHIFT); DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", - (pch_iir & SDE_AUDIO_POWER_MASK) >> - SDE_AUDIO_POWER_SHIFT); + port_name(port)); + } if (pch_iir & SDE_AUX_MASK) dp_aux_irq_handler(dev); @@ -775,7 +1869,7 @@ DRM_ERROR("PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); @@ -786,27 +1880,72 @@ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); - if (pch_iir & SDE_TRANSB_FIFO_UNDER) - DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) - DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); + + if (pch_iir & SDE_TRANSB_FIFO_UNDER) + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); +} + +static void ivb_err_int_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 err_int = I915_READ(GEN7_ERR_INT); + enum pipe pipe; + + if (err_int & ERR_INT_POISON) + DRM_ERROR("Poison interrupt\n"); + + for_each_pipe(dev_priv, pipe) { + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + + if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { + if (IS_IVYBRIDGE(dev)) + ivb_pipe_crc_irq_handler(dev, pipe); + else + hsw_pipe_crc_irq_handler(dev, pipe); + } + } + + I915_WRITE(GEN7_ERR_INT, err_int); +} + +static void cpt_serr_int_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 serr_int = I915_READ(SERR_INT); + + if (serr_int & SERR_INT_POISON) + DRM_ERROR("PCH poison interrupt\n"); + + if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); + + if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); + + if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); + + I915_WRITE(SERR_INT, serr_int); } static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - if (hotplug_trigger) { - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) - ibx_hpd_irq_setup(dev); - queue_work(dev_priv->wq, &dev_priv->hotplug_work); + if (hotplug_trigger) + ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { + int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> + SDE_AUDIO_POWER_SHIFT_CPT); + DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", + port_name(port)); } - if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) - DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", - (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> - SDE_AUDIO_POWER_SHIFT_CPT); if (pch_iir & SDE_AUX_MASK_CPT) dp_aux_irq_handler(dev); @@ -821,25 +1960,188 @@ DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), I915_READ(FDI_RX_IIR(pipe))); + + if (pch_iir & SDE_ERROR_CPT) + cpt_serr_int_handler(dev); +} + +static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & + ~SDE_PORTE_HOTPLUG_SPT; + u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; + u32 pin_mask = 0, long_mask = 0; + + if (hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + dig_hotplug_reg, hpd_spt, + spt_port_hotplug_long_detect); + } + + if (hotplug2_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); + I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); + + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, + dig_hotplug_reg, hpd_spt, + spt_port_hotplug2_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_CPT) + gmbus_irq_handler(dev); +} + +static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + const u32 hpd[HPD_NUM_PINS]) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); + I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); + + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + dig_hotplug_reg, hpd, + ilk_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev, pin_mask, long_mask); +} + +static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; + + if (hotplug_trigger) + ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); + + if (de_iir & DE_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE) + intel_opregion_asle_intr(dev); + + if (de_iir & DE_POISON) + DRM_ERROR("Poison interrupt\n"); + + for_each_pipe(dev_priv, pipe) { + if (de_iir & DE_PIPE_VBLANK(pipe) && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); + + if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + + if (de_iir & DE_PIPE_CRC_DONE(pipe)) + i9xx_pipe_crc_irq_handler(dev, pipe); + + /* plane/pipes map 1:1 on ilk+ */ + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + u32 pch_iir = I915_READ(SDEIIR); + + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev); +} + +static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; + + if (hotplug_trigger) + ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); + + if (de_iir & DE_ERR_INT_IVB) + ivb_err_int_handler(dev); + + if (de_iir & DE_AUX_CHANNEL_A_IVB) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE_IVB) + intel_opregion_asle_intr(dev); + + for_each_pipe(dev_priv, pipe) { + if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); + + /* plane/pipes map 1:1 on ilk+ */ + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } + } + + /* check event from PCH */ + if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } } -static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +/* + * To handle irqs with the minimum potential races with fresh interrupts, we: + * 1 - Disable Master Interrupt Control. + * 2 - Find the source(s) of the interrupt. + * 3 - Clear the Interrupt Identity bits (IIR). + * 4 - Process the interrupt(s) that had bits set in the IIRs. + * 5 - Re-enable Master Interrupt Control. + */ +static irqreturn_t ironlake_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; - int i; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + + /* We get interrupts on unclaimed registers, so check for this before we + * do any I915_{READ,WRITE}. */ + intel_uncore_check_errors(dev); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + POSTING_READ(DEIER); /* Disable south interrupts. We'll only write to SDEIIR once, so further * interrupts will will be stored on its back queue, and then we'll be @@ -852,50 +2154,35 @@ POSTING_READ(SDEIER); } + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); if (gt_iir) { - snb_gt_irq_handler(dev, dev_priv, gt_iir); I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; + if (INTEL_INFO(dev)->gen >= 6) + snb_gt_irq_handler(dev, dev_priv, gt_iir); + else + ilk_gt_irq_handler(dev, dev_priv, gt_iir); } de_iir = I915_READ(DEIIR); if (de_iir) { - if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); - - if (de_iir & DE_GSE_IVB) - intel_opregion_gse_intr(dev); - - for (i = 0; i < 3; i++) { - if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) - drm_handle_vblank(dev, i); - if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { - intel_prepare_page_flip(dev, i); - intel_finish_page_flip_plane(dev, i); - } - } - - /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { - u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - - /* clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } - I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; + if (INTEL_INFO(dev)->gen >= 7) + ivb_display_irq_handler(dev, de_iir); + else + ilk_display_irq_handler(dev, de_iir); } - pm_iir = I915_READ(GEN6_PMIIR); - if (pm_iir) { - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - ret = IRQ_HANDLED; + if (INTEL_INFO(dev)->gen >= 6) { + u32 pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = IRQ_HANDLED; + gen6_rps_irq_handler(dev_priv, pm_iir); + } } I915_WRITE(DEIER, de_ier); @@ -908,103 +2195,176 @@ return ret; } -static void ilk_gt_irq_handler(struct drm_device *dev, - struct drm_i915_private *dev_priv, - u32 gt_iir) +static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + const u32 hpd[HPD_NUM_PINS]) { - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & GT_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); + struct drm_i915_private *dev_priv = to_i915(dev); + u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + + intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, + dig_hotplug_reg, hpd, + bxt_port_hotplug_long_detect); + + intel_hpd_irq_handler(dev, pin_mask, long_mask); } -static irqreturn_t ironlake_irq_handler(int irq, void *arg) +static irqreturn_t gen8_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl; + irqreturn_t ret = IRQ_NONE; + uint32_t tmp = 0; + enum pipe pipe; + u32 aux_mask = GEN8_AUX_CHANNEL_A; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; - /* disable master interrupt before clearing iir */ - de_ier = I915_READ(DEIER); - I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - POSTING_READ(DEIER); + if (INTEL_INFO(dev_priv)->gen >= 9) + aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; + if (!master_ctl) + return IRQ_NONE; - /* Disable south interrupts. We'll only write to SDEIIR once, so further - * interrupts will will be stored on its back queue, and then we'll be - * able to process them after we restore SDEIER (as soon as we restore - * it, we'll get an interrupt if SDEIIR still has something to process - * due to its back queue). */ - sde_ier = I915_READ(SDEIER); - I915_WRITE(SDEIER, 0); - POSTING_READ(SDEIER); + I915_WRITE_FW(GEN8_MASTER_IRQ, 0); - de_iir = I915_READ(DEIIR); - gt_iir = I915_READ(GTIIR); - pm_iir = I915_READ(GEN6_PMIIR); + /* Find, clear, then process each source of interrupt */ - if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) - goto done; + ret = gen8_gt_irq_handler(dev_priv, master_ctl); - ret = IRQ_HANDLED; + if (master_ctl & GEN8_DE_MISC_IRQ) { + tmp = I915_READ(GEN8_DE_MISC_IIR); + if (tmp) { + I915_WRITE(GEN8_DE_MISC_IIR, tmp); + ret = IRQ_HANDLED; + if (tmp & GEN8_DE_MISC_GSE) + intel_opregion_asle_intr(dev); + else + DRM_ERROR("Unexpected DE Misc interrupt\n"); + } + else + DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); + } - if (IS_GEN5(dev)) - ilk_gt_irq_handler(dev, dev_priv, gt_iir); - else - snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (master_ctl & GEN8_DE_PORT_IRQ) { + tmp = I915_READ(GEN8_DE_PORT_IIR); + if (tmp) { + bool found = false; + u32 hotplug_trigger = 0; + + if (IS_BROXTON(dev_priv)) + hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK; + else if (IS_BROADWELL(dev_priv)) + hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG; + + I915_WRITE(GEN8_DE_PORT_IIR, tmp); + ret = IRQ_HANDLED; + + if (tmp & aux_mask) { + dp_aux_irq_handler(dev); + found = true; + } - if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); + if (hotplug_trigger) { + if (IS_BROXTON(dev)) + bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt); + else + ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw); + found = true; + } - if (de_iir & DE_GSE) - intel_opregion_gse_intr(dev); + if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { + gmbus_irq_handler(dev); + found = true; + } - if (de_iir & DE_PIPEA_VBLANK) - drm_handle_vblank(dev, 0); + if (!found) + DRM_ERROR("Unexpected DE Port interrupt\n"); + } + else + DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); + } - if (de_iir & DE_PIPEB_VBLANK) - drm_handle_vblank(dev, 1); + for_each_pipe(dev_priv, pipe) { + uint32_t pipe_iir, flip_done = 0, fault_errors = 0; - if (de_iir & DE_PLANEA_FLIP_DONE) { - intel_prepare_page_flip(dev, 0); - intel_finish_page_flip_plane(dev, 0); - } + if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) + continue; - if (de_iir & DE_PLANEB_FLIP_DONE) { - intel_prepare_page_flip(dev, 1); - intel_finish_page_flip_plane(dev, 1); - } + pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (pipe_iir) { + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + + if (pipe_iir & GEN8_PIPE_VBLANK && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); + + if (INTEL_INFO(dev_priv)->gen >= 9) + flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; - /* check event from PCH */ - if (de_iir & DE_PCH_EVENT) { - u32 pch_iir = I915_READ(SDEIIR); + if (flip_done) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); - else - ibx_irq_handler(dev, pch_iir); + if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); - /* should clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_handle_rps_change(dev); - if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + if (INTEL_INFO(dev_priv)->gen >= 9) + fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(DEIIR, de_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); + if (fault_errors) + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); + } else + DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + } -done: - I915_WRITE(DEIER, de_ier); - POSTING_READ(DEIER); - I915_WRITE(SDEIER, sde_ier); - POSTING_READ(SDEIER); + if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && + master_ctl & GEN8_DE_PCH_IRQ) { + /* + * FIXME(BDW): Assume for now that the new interrupt handling + * scheme also closed the SDE interrupt handling race we've seen + * on older pch-split platforms. But this needs testing. + */ + u32 pch_iir = I915_READ(SDEIIR); + if (pch_iir) { + I915_WRITE(SDEIIR, pch_iir); + ret = IRQ_HANDLED; + + if (HAS_PCH_SPT(dev_priv)) + spt_irq_handler(dev, pch_iir); + else + cpt_irq_handler(dev, pch_iir); + } else { + /* + * Like on previous PCH there seems to be something + * fishy going on with forwarding PCH interrupts. + */ + DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); + } + } + + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); + POSTING_READ_FW(GEN8_MASTER_IRQ); return ret; } @@ -1012,7 +2372,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { - struct intel_ring_buffer *ring; + struct intel_engine_cs *ring; int i; /* @@ -1038,25 +2398,22 @@ } /** - * i915_error_work_func - do process context error handling work - * @work: work struct + * i915_reset_and_wakeup - do process context error handling work + * @dev: drm device * * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_error_work_func(struct work_struct *work) +static void i915_reset_and_wakeup(struct drm_device *dev) { - struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, - work); - drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, - gpu_error); - struct drm_device *dev = dev_priv->dev; - char *error_event[] = { "ERROR=1", NULL }; - char *reset_event[] = { "RESET=1", NULL }; - char *reset_done_event[] = { "ERROR=0", NULL }; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_gpu_error *error = &dev_priv->gpu_error; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int ret; - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); /* * Note that there's only one work item which does gpu resets, so we @@ -1070,10 +2427,21 @@ */ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_event); /* + * In most cases it's guaranteed that we get here with an RPM + * reference held, for example because there is a pending GPU + * request that won't finish until the reset is done. This + * isn't the case at least when we get here by doing a + * simulated reset via debugs, so get an RPM reference. + */ + intel_runtime_pm_get(dev_priv); + + intel_prepare_reset(dev); + + /* * All state reset _must_ be completed before we update the * reset counter, for otherwise waiters might miss the reset * pending state and not properly drop locks, resulting in @@ -1081,7 +2449,9 @@ */ ret = i915_reset(dev); - intel_display_handle_reset(dev); + intel_finish_reset(dev); + + intel_runtime_pm_put(dev_priv); if (ret == 0) { /* @@ -1094,13 +2464,13 @@ * updates before * the counter increment. */ - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&dev_priv->gpu_error.reset_counter); - kobject_uevent_env(&dev->primary->kdev.kobj, + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_done_event); } else { - atomic_set(&error->reset_counter, I915_WEDGED); + atomic_or(I915_WEDGED, &error->reset_counter); } /* @@ -1111,533 +2481,6 @@ } } -/* NB: please notice the memset */ -static void i915_get_extra_instdone(struct drm_device *dev, - uint32_t *instdone) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - - switch(INTEL_INFO(dev)->gen) { - case 2: - case 3: - instdone[0] = I915_READ(INSTDONE); - break; - case 4: - case 5: - case 6: - instdone[0] = I915_READ(INSTDONE_I965); - instdone[1] = I915_READ(INSTDONE1); - break; - default: - WARN_ONCE(1, "Unsupported platform\n"); - case 7: - instdone[0] = I915_READ(GEN7_INSTDONE_1); - instdone[1] = I915_READ(GEN7_SC_INSTDONE); - instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone[3] = I915_READ(GEN7_ROW_INSTDONE); - break; - } -} - -#ifdef CONFIG_DEBUG_FS -static struct drm_i915_error_object * -i915_error_object_create_sized(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - const int num_pages) -{ - struct drm_i915_error_object *dst; - int i; - u32 reloc_offset; - - if (src == NULL || src->pages == NULL) - return NULL; - - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) - return NULL; - - reloc_offset = src->gtt_offset; - for (i = 0; i < num_pages; i++) { - unsigned long flags; - void *d; - - d = kmalloc(PAGE_SIZE, GFP_ATOMIC); - if (d == NULL) - goto unwind; - - local_irq_save(flags); - if (reloc_offset < dev_priv->gtt.mappable_end && - src->has_global_gtt_mapping) { - void __iomem *s; - - /* Simply ignore tiling or any overlapping fence. - * It's part of the error state, and this hopefully - * captures what the GPU read. - */ - - s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc_offset); - memcpy_fromio(d, s, PAGE_SIZE); - io_mapping_unmap_atomic(s); - } else if (src->stolen) { - unsigned long offset; - - offset = dev_priv->mm.stolen_base; - offset += src->stolen->start; - offset += i << PAGE_SHIFT; - - memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); - } else { - struct page *page; - void *s; - - page = i915_gem_object_get_page(src, i); - - drm_clflush_pages(&page, 1); - - s = kmap_atomic(page); - memcpy(d, s, PAGE_SIZE); - kunmap_atomic(s); - - drm_clflush_pages(&page, 1); - } - local_irq_restore(flags); - - dst->pages[i] = d; - - reloc_offset += PAGE_SIZE; - } - dst->page_count = num_pages; - dst->gtt_offset = src->gtt_offset; - - return dst; - -unwind: - while (i--) - kfree(dst->pages[i]); - kfree(dst); - return NULL; -} -#define i915_error_object_create(dev_priv, src) \ - i915_error_object_create_sized((dev_priv), (src), \ - (src)->base.size>>PAGE_SHIFT) - -static void -i915_error_object_free(struct drm_i915_error_object *obj) -{ - int page; - - if (obj == NULL) - return; - - for (page = 0; page < obj->page_count; page++) - kfree(obj->pages[page]); - - kfree(obj); -} - -void -i915_error_state_free(struct kref *error_ref) -{ - struct drm_i915_error_state *error = container_of(error_ref, - typeof(*error), ref); - int i; - - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - i915_error_object_free(error->ring[i].batchbuffer); - i915_error_object_free(error->ring[i].ringbuffer); - kfree(error->ring[i].requests); - } - - kfree(error->active_bo); - kfree(error->overlay); - kfree(error); -} -static void capture_bo(struct drm_i915_error_buffer *err, - struct drm_i915_gem_object *obj) -{ - err->size = obj->base.size; - err->name = obj->base.name; - err->rseqno = obj->last_read_seqno; - err->wseqno = obj->last_write_seqno; - err->gtt_offset = obj->gtt_offset; - err->read_domains = obj->base.read_domains; - err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (obj->pin_count > 0) - err->pinned = 1; - if (obj->user_pin_count > 0) - err->pinned = -1; - err->tiling = obj->tiling_mode; - err->dirty = obj->dirty; - err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring ? obj->ring->id : -1; - err->cache_level = obj->cache_level; -} - -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, mm_list) { - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, gtt_list) { - if (obj->pin_count == 0) - continue; - - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static void i915_gem_record_fences(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - - default: - BUG(); - } -} - -static struct drm_i915_error_object * -i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) -{ - struct drm_i915_gem_object *obj; - u32 seqno; - - if (!ring->get_seqno) - return NULL; - - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { - u32 acthd = I915_READ(ACTHD); - - if (WARN_ON(ring->id != RCS)) - return NULL; - - obj = ring->private; - if (acthd >= obj->gtt_offset && - acthd < obj->gtt_offset + obj->base.size) - return i915_error_object_create(dev_priv, obj); - } - - seqno = ring->get_seqno(ring, false); - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (obj->ring != ring) - continue; - - if (i915_seqno_passed(seqno, obj->last_read_seqno)) - continue; - - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) - continue; - - /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userspace. - */ - return i915_error_object_create(dev_priv, obj); - } - - return NULL; -} - -static void i915_record_ring_state(struct drm_device *dev, - struct drm_i915_error_state *error, - struct intel_ring_buffer *ring) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { - error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); - error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); - error->semaphore_mboxes[ring->id][0] - = I915_READ(RING_SYNC_0(ring->mmio_base)); - error->semaphore_mboxes[ring->id][1] - = I915_READ(RING_SYNC_1(ring->mmio_base)); - error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; - error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; - } - - if (INTEL_INFO(dev)->gen >= 4) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); - error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); - error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); - error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); - error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - if (ring->id == RCS) - error->bbaddr = I915_READ64(BB_ADDR); - } else { - error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); - error->ipeir[ring->id] = I915_READ(IPEIR); - error->ipehr[ring->id] = I915_READ(IPEHR); - error->instdone[ring->id] = I915_READ(INSTDONE); - } - - error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); - error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring, false); - error->acthd[ring->id] = intel_ring_get_active_head(ring); - error->head[ring->id] = I915_READ_HEAD(ring); - error->tail[ring->id] = I915_READ_TAIL(ring); - error->ctl[ring->id] = I915_READ_CTL(ring); - - error->cpu_ring_head[ring->id] = ring->head; - error->cpu_ring_tail[ring->id] = ring->tail; -} - - -static void i915_gem_record_active_context(struct intel_ring_buffer *ring, - struct drm_i915_error_state *error, - struct drm_i915_error_ring *ering) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_object *obj; - - /* Currently render ring is the only HW context user */ - if (ring->id != RCS || !error->ccid) - return; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { - ering->ctx = i915_error_object_create_sized(dev_priv, - obj, 1); - } - } -} - -static void i915_gem_record_rings(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - struct drm_i915_gem_request *request; - int i, count; - - for_each_ring(ring, dev_priv, i) { - i915_record_ring_state(dev, error, ring); - - error->ring[i].batchbuffer = - i915_error_first_batchbuffer(dev_priv, ring); - - error->ring[i].ringbuffer = - i915_error_object_create(dev_priv, ring->obj); - - - i915_gem_record_active_context(ring, error, &error->ring[i]); - - count = 0; - list_for_each_entry(request, &ring->request_list, list) - count++; - - error->ring[i].num_requests = count; - error->ring[i].requests = - kmalloc(count*sizeof(struct drm_i915_error_request), - GFP_ATOMIC); - if (error->ring[i].requests == NULL) { - error->ring[i].num_requests = 0; - continue; - } - - count = 0; - list_for_each_entry(request, &ring->request_list, list) { - struct drm_i915_error_request *erq; - - erq = &error->ring[i].requests[count++]; - erq->seqno = request->seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->tail; - } - } -} - -/** - * i915_capture_error_state - capture an error record for later analysis - * @dev: drm device - * - * Should be called when an error is detected (either a hang or an error - * interrupt) to capture error state from the time of the error. Fills - * out a structure which becomes available in debugfs for user level tools - * to pick up. - */ -static void i915_capture_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - struct drm_i915_error_state *error; - unsigned long flags; - int i, pipe; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - if (error) - return; - - /* Account for pipe specific data like PIPE*STAT */ - error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - return; - } - - DRM_INFO("capturing error event; look for more information in " - "/sys/kernel/debug/dri/%d/i915_error_state\n", - dev->primary->index); - - kref_init(&error->ref); - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); - if (HAS_HW_CONTEXTS(dev)) - error->ccid = I915_READ(CCID); - - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else if (IS_VALLEYVIEW(dev)) - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); - else if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); - - if (INTEL_INFO(dev)->gen >= 6) - error->derrmr = I915_READ(DERRMR); - - if (IS_VALLEYVIEW(dev)) - error->forcewake = I915_READ(FORCEWAKE_VLV); - else if (INTEL_INFO(dev)->gen >= 7) - error->forcewake = I915_READ(FORCEWAKE_MT); - else if (INTEL_INFO(dev)->gen == 6) - error->forcewake = I915_READ(FORCEWAKE); - - if (!HAS_PCH_SPLIT(dev)) - for_each_pipe(pipe) - error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); - - if (INTEL_INFO(dev)->gen >= 6) { - error->error = I915_READ(ERROR_GEN6); - error->done_reg = I915_READ(DONE_REG); - } - - if (INTEL_INFO(dev)->gen == 7) - error->err_int = I915_READ(GEN7_ERR_INT); - - i915_get_extra_instdone(dev, error->extra_instdone); - - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); - - /* Record buffers on the active and pinned lists. */ - error->active_bo = NULL; - error->pinned_bo = NULL; - - i = 0; - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) - i++; - error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) - if (obj->pin_count) - i++; - error->pinned_bo_count = i - error->active_bo_count; - - error->active_bo = NULL; - error->pinned_bo = NULL; - if (i) { - error->active_bo = kmalloc(sizeof(*error->active_bo)*i, - GFP_ATOMIC); - if (error->active_bo) - error->pinned_bo = - error->active_bo + error->active_bo_count; - } - - if (error->active_bo) - error->active_bo_count = - capture_active_bo(error->active_bo, - error->active_bo_count, - &dev_priv->mm.active_list); - - if (error->pinned_bo) - error->pinned_bo_count = - capture_pinned_bo(error->pinned_bo, - error->pinned_bo_count, - &dev_priv->mm.bound_list); - - do_gettimeofday(&error->time); - - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - if (dev_priv->gpu_error.first_error == NULL) { - dev_priv->gpu_error.first_error = error; - error = NULL; - } - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - i915_error_state_free(&error->ref); -} - -void i915_destroy_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_error_state *error; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - dev_priv->gpu_error.first_error = NULL; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - kref_put(&error->ref, i915_error_state_free); -} -#else -#define i915_capture_error_state(x) -#endif - static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1686,7 +2529,7 @@ if (eir & I915_ERROR_MEMORY_REFRESH) { pr_err("memory refresh error:\n"); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) pr_err("pipe %c stat: 0x%08x\n", pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ @@ -1731,30 +2574,37 @@ } /** - * i915_handle_error - handle an error interrupt + * i915_handle_error - handle a gpu error * @dev: drm device * - * Do some basic checking of regsiter state at error interrupt time and + * Do some basic checking of register state at error time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_device *dev, bool wedged) +void i915_handle_error(struct drm_device *dev, bool wedged, + const char *fmt, ...) { struct drm_i915_private *dev_priv = dev->dev_private; + va_list args; + char error_msg[80]; + + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); - i915_capture_error_state(dev); + i915_capture_error_state(dev, wedged, error_msg); i915_report_and_clear_eir(dev); if (wedged) { - atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, + atomic_or(I915_RESET_IN_PROGRESS_FLAG, &dev_priv->gpu_error.reset_counter); /* - * Wakeup waiting processes so that the reset work function - * i915_error_work_func doesn't deadlock trying to grab various - * locks. By bumping the reset counter first, the woken + * Wakeup waiting processes so that the reset function + * i915_reset_and_wakeup doesn't deadlock trying to grab + * various locks. By bumping the reset counter first, the woken * processes will see a reset in progress and back off, * releasing their locks and then wait for the reset completion. * We must do this for _all_ gpu waiters that might hold locks @@ -1767,410 +2617,545 @@ i915_error_wake_up(dev_priv, false); } - /* - * Our reset work can grab modeset locks (since it needs to reset the - * state of outstanding pagelips). Hence it must not be run on our own - * dev-priv->wq work queue for otherwise the flush_work in the pageflip - * code will deadlock. - */ - schedule_work(&dev_priv->gpu_error.work); -} - -static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj; - struct intel_unpin_work *work; - unsigned long flags; - bool stall_detected; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - if (work == NULL || - atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || - !work->enable_stall_check) { - /* Either the pending flip IRQ arrived, or we're too early. Don't check */ - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ - obj = work->pending_flip_obj; - if (INTEL_INFO(dev)->gen >= 4) { - int dspsurf = DSPSURF(intel_crtc->plane); - stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == - obj->gtt_offset; - } else { - int dspaddr = DSPADDR(intel_crtc->plane); - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + - crtc->y * crtc->fb->pitches[0] + - crtc->x * crtc->fb->bits_per_pixel/8); - } - - spin_unlock_irqrestore(&dev->event_lock, flags); - - if (stall_detected) { - DRM_DEBUG_DRIVER("Pageflip stall detected\n"); - intel_prepare_page_flip(dev, intel_crtc->plane); - } + i915_reset_and_wakeup(dev); } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static int i915_enable_vblank(struct drm_device *dev, int pipe) +static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); + PIPE_START_VBLANK_INTERRUPT_STATUS); else i915_enable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE); - - /* maintain vblank delivery even in deep C-states */ - if (dev_priv->info->gen == 3) - I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); + PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } -static int ironlake_enable_vblank(struct drm_device *dev, int pipe) +static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + ironlake_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } -static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) +static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (5 * pipe)); + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } -static int valleyview_enable_vblank(struct drm_device *dev, int pipe) +static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - u32 imr; - - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - imr = I915_READ(VLV_IMR); - if (pipe == 0) - imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - else - imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - I915_WRITE(VLV_IMR, imr); - i915_enable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); + dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - return 0; } /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static void i915_disable_vblank(struct drm_device *dev, int pipe) +static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - if (dev_priv->info->gen == 3) - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); - i915_disable_pipestat(dev_priv, pipe, - PIPE_VBLANK_INTERRUPT_ENABLE | - PIPE_START_VBLANK_INTERRUPT_ENABLE); + PIPE_VBLANK_INTERRUPT_STATUS | + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void ironlake_disable_vblank(struct drm_device *dev, int pipe) +static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + ironlake_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) +static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (pipe * 5)); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void valleyview_disable_vblank(struct drm_device *dev, int pipe) +static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; - u32 imr; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, - PIPE_START_VBLANK_INTERRUPT_ENABLE); - imr = I915_READ(VLV_IMR); - if (pipe == 0) - imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - else - imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - I915_WRITE(VLV_IMR, imr); + dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static u32 -ring_last_seqno(struct intel_ring_buffer *ring) +static bool +ring_idle(struct intel_engine_cs *ring, u32 seqno) { - return list_entry(ring->request_list.prev, - struct drm_i915_gem_request, list)->seqno; + return (list_empty(&ring->request_list) || + i915_seqno_passed(seqno, ring->last_submitted_seqno)); } -static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +static bool +ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) { - if (list_empty(&ring->request_list) || - i915_seqno_passed(ring->get_seqno(ring, false), - ring_last_seqno(ring))) { - /* Issue a wake-up to catch stuck h/w. */ - if (waitqueue_active(&ring->irq_queue)) { - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - ring->name); - wake_up_all(&ring->irq_queue); - *err = true; + if (INTEL_INFO(dev)->gen >= 8) { + return (ipehr >> 23) == 0x1c; + } else { + ipehr &= ~MI_SEMAPHORE_SYNC_MASK; + return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | + MI_SEMAPHORE_REGISTER); + } +} + +static struct intel_engine_cs * +semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_engine_cs *signaller; + int i; + + if (INTEL_INFO(dev_priv->dev)->gen >= 8) { + for_each_ring(signaller, dev_priv, i) { + if (ring == signaller) + continue; + + if (offset == signaller->semaphore.signal_ggtt[ring->id]) + return signaller; + } + } else { + u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; + + for_each_ring(signaller, dev_priv, i) { + if(ring == signaller) + continue; + + if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) + return signaller; } - return true; } - return false; + + DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", + ring->id, ipehr, offset); + + return NULL; } -static bool semaphore_passed(struct intel_ring_buffer *ring) +static struct intel_engine_cs * +semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; - struct intel_ring_buffer *signaller; - u32 cmd, ipehr, acthd_min; + u32 cmd, ipehr, head; + u64 offset = 0; + int i, backwards; + + /* + * This function does not support execlist mode - any attempt to + * proceed further into this function will result in a kernel panic + * when dereferencing ring->buffer, which is not set up in execlist + * mode. + * + * The correct way of doing it would be to derive the currently + * executing ring buffer from the current context, which is derived + * from the currently running request. Unfortunately, to get the + * current request we would have to grab the struct_mutex before doing + * anything else, which would be ill-advised since some other thread + * might have grabbed it already and managed to hang itself, causing + * the hang checker to deadlock. + * + * Therefore, this function does not support execlist mode in its + * current form. Just return NULL and move on. + */ + if (ring->buffer == NULL) + return NULL; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); - if ((ipehr & ~(0x3 << 16)) != - (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) - return false; + if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) + return NULL; - /* ACTHD is likely pointing to the dword after the actual command, - * so scan backwards until we find the MBOX. + /* + * HEAD is likely pointing to the dword after the actual command, + * so scan backwards until we find the MBOX. But limit it to just 3 + * or 4 dwords depending on the semaphore wait command size. + * Note that we don't care about ACTHD here since that might + * point at at batch, and semaphores are always emitted into the + * ringbuffer itself. */ - acthd_min = max((int)acthd - 3 * 4, 0); - do { - cmd = ioread32(ring->virtual_start + acthd); + head = I915_READ_HEAD(ring) & HEAD_ADDR; + backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; + + for (i = backwards; i; --i) { + /* + * Be paranoid and presume the hw has gone off into the wild - + * our ring is smaller than what the hardware (and hence + * HEAD_ADDR) allows. Also handles wrap-around. + */ + head &= ring->buffer->size - 1; + + /* This here seems to blow up */ + cmd = ioread32(ring->buffer->virtual_start + head); if (cmd == ipehr) break; - acthd -= 4; - if (acthd < acthd_min) - return false; - } while (1); + head -= 4; + } + + if (!i) + return NULL; - signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; - return i915_seqno_passed(signaller->get_seqno(signaller, false), - ioread32(ring->virtual_start+acthd+4)+1); + *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; + if (INTEL_INFO(ring->dev)->gen >= 8) { + offset = ioread32(ring->buffer->virtual_start + head + 12); + offset <<= 32; + offset = ioread32(ring->buffer->virtual_start + head + 8); + } + return semaphore_wait_to_signaller_ring(ring, ipehr, offset); } -static bool kick_ring(struct intel_ring_buffer *ring) +static int semaphore_passed(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_engine_cs *signaller; + u32 seqno; + + ring->hangcheck.deadlock++; + + signaller = semaphore_waits_for(ring, &seqno); + if (signaller == NULL) + return -1; + + /* Prevent pathological recursion due to driver bugs */ + if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) + return -1; + + if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) + return 1; + + /* cursory check for an unkickable deadlock */ + if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && + semaphore_passed(signaller) < 0) + return -1; + + return 0; +} + +static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *ring; + int i; + + for_each_ring(ring, dev_priv, i) + ring->hangcheck.deadlock = 0; +} + +static enum intel_ring_hangcheck_action +ring_stuck(struct intel_engine_cs *ring, u64 acthd) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp = I915_READ_CTL(ring); + u32 tmp; + + if (acthd != ring->hangcheck.acthd) { + if (acthd > ring->hangcheck.max_acthd) { + ring->hangcheck.max_acthd = acthd; + return HANGCHECK_ACTIVE; + } + + return HANGCHECK_ACTIVE_LOOP; + } + + if (IS_GEN2(dev)) + return HANGCHECK_HUNG; + + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { - DRM_ERROR("Kicking stuck wait on %s\n", - ring->name); + i915_handle_error(dev, false, + "Kicking stuck wait on %s", + ring->name); I915_WRITE_CTL(ring, tmp); - return true; + return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && - tmp & RING_WAIT_SEMAPHORE && - semaphore_passed(ring)) { - DRM_ERROR("Kicking stuck semaphore on %s\n", - ring->name); - I915_WRITE_CTL(ring, tmp); - return true; + if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + switch (semaphore_passed(ring)) { + default: + return HANGCHECK_HUNG; + case 1: + i915_handle_error(dev, false, + "Kicking stuck semaphore on %s", + ring->name); + I915_WRITE_CTL(ring, tmp); + return HANGCHECK_KICK; + case 0: + return HANGCHECK_WAIT; + } } - return false; + + return HANGCHECK_HUNG; } -static bool i915_hangcheck_hung(struct drm_device *dev) +/* + * This is called when the chip hasn't reported back with completed + * batchbuffers in a long time. We keep track per ring seqno progress and + * if there are no progress, hangcheck score for that ring is increased. + * Further, acthd is inspected to see if the ring is stuck. On stuck case + * we kick the ring. If we see no progress on three subsequent calls + * we assume chip is wedged and try to fix it by resetting the chip. + */ +static void i915_hangcheck_elapsed(struct work_struct *work) { - drm_i915_private_t *dev_priv = dev->dev_private; - - if (dev_priv->gpu_error.hangcheck_count++ > 1) { - bool hung = true; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + gpu_error.hangcheck_work.work); + struct drm_device *dev = dev_priv->dev; + struct intel_engine_cs *ring; + int i; + int busy_count = 0, rings_hung = 0; + bool stuck[I915_NUM_RINGS] = { 0 }; +#define BUSY 1 +#define KICK 5 +#define HUNG 20 - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); + if (!i915.enable_hangcheck) + return; - if (!IS_GEN2(dev)) { - struct intel_ring_buffer *ring; - int i; + for_each_ring(ring, dev_priv, i) { + u64 acthd; + u32 seqno; + bool busy = true; + + semaphore_clear_deadlocks(dev_priv); + + seqno = ring->get_seqno(ring, false); + acthd = intel_ring_get_active_head(ring); + + if (ring->hangcheck.seqno == seqno) { + if (ring_idle(ring, seqno)) { + ring->hangcheck.action = HANGCHECK_IDLE; + + if (waitqueue_active(&ring->irq_queue)) { + /* Issue a wake-up to catch stuck h/w. */ + if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { + if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); + else + DRM_INFO("Fake missed irq on %s\n", + ring->name); + wake_up_all(&ring->irq_queue); + } + /* Safeguard against driver failure */ + ring->hangcheck.score += BUSY; + } else + busy = false; + } else { + /* We always increment the hangcheck score + * if the ring is busy and still processing + * the same request, so that no single request + * can run indefinitely (such as a chain of + * batches). The only time we do not increment + * the hangcheck score on this ring, if this + * ring is in a legitimate wait for another + * ring. In that case the waiting ring is a + * victim and we want to be sure we catch the + * right culprit. Then every time we do kick + * the ring, add a small increment to the + * score so that we can catch a batch that is + * being repeatedly kicked and so responsible + * for stalling the machine. + */ + ring->hangcheck.action = ring_stuck(ring, + acthd); + + switch (ring->hangcheck.action) { + case HANGCHECK_IDLE: + case HANGCHECK_WAIT: + case HANGCHECK_ACTIVE: + break; + case HANGCHECK_ACTIVE_LOOP: + ring->hangcheck.score += BUSY; + break; + case HANGCHECK_KICK: + ring->hangcheck.score += KICK; + break; + case HANGCHECK_HUNG: + ring->hangcheck.score += HUNG; + stuck[i] = true; + break; + } + } + } else { + ring->hangcheck.action = HANGCHECK_ACTIVE; - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. + /* Gradually reduce the count so that we catch DoS + * attempts across multiple batches. */ - for_each_ring(ring, dev_priv, i) - hung &= !kick_ring(ring); + if (ring->hangcheck.score > 0) + ring->hangcheck.score--; + + ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; } - return hung; + ring->hangcheck.seqno = seqno; + ring->hangcheck.acthd = acthd; + busy_count += busy; } - return false; + for_each_ring(ring, dev_priv, i) { + if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { + DRM_INFO("%s on %s\n", + stuck[i] ? "stuck" : "no progress", + ring->name); + rings_hung++; + } + } + + if (rings_hung) + return i915_handle_error(dev, true, "Ring hung"); + + if (busy_count) + /* Reset timer case chip hangs without another request + * being added */ + i915_queue_hangcheck(dev); } -/** - * This is called when the chip hasn't reported back with completed - * batchbuffers in a long time. The first time this is called we simply record - * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses - * again, we assume the chip is wedged and try to fix it. - */ -void i915_hangcheck_elapsed(unsigned long data) -{ - struct drm_device *dev = (struct drm_device *)data; - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; - struct intel_ring_buffer *ring; - bool err = false, idle; - int i; +void i915_queue_hangcheck(struct drm_device *dev) +{ + struct i915_gpu_error *e = &to_i915(dev)->gpu_error; - if (!i915_enable_hangcheck) + if (!i915.enable_hangcheck) return; - memset(acthd, 0, sizeof(acthd)); - idle = true; - for_each_ring(ring, dev_priv, i) { - idle &= i915_hangcheck_ring_idle(ring, &err); - acthd[i] = intel_ring_get_active_head(ring); - } + /* Don't continually defer the hangcheck so that it is always run at + * least once after work has been scheduled on any ring. Otherwise, + * we will ignore a hung ring if a second ring is kept busy. + */ - /* If all work is done then ACTHD clearly hasn't advanced. */ - if (idle) { - if (err) { - if (i915_hangcheck_hung(dev)) - return; + queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, + round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); +} - goto repeat; - } +static void ibx_irq_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->gpu_error.hangcheck_count = 0; + if (HAS_PCH_NOP(dev)) return; - } - i915_get_extra_instdone(dev, instdone); - if (memcmp(dev_priv->gpu_error.last_acthd, acthd, - sizeof(acthd)) == 0 && - memcmp(dev_priv->gpu_error.prev_instdone, instdone, - sizeof(instdone)) == 0) { - if (i915_hangcheck_hung(dev)) - return; - } else { - dev_priv->gpu_error.hangcheck_count = 0; + GEN5_IRQ_RESET(SDE); - memcpy(dev_priv->gpu_error.last_acthd, acthd, - sizeof(acthd)); - memcpy(dev_priv->gpu_error.prev_instdone, instdone, - sizeof(instdone)); - } + if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) + I915_WRITE(SERR_INT, 0xffffffff); +} -repeat: - /* Reset timer case chip hangs without another request being added */ - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); +/* + * SDEIER is also touched by the interrupt handler to work around missed PCH + * interrupts. Hence we can't update it after the interrupt handler is enabled - + * instead we unconditionally enable all PCH interrupt sources here, but then + * only unmask them as needed with SDEIMR. + * + * This function needs to be called before interrupts are enabled. + */ +static void ibx_irq_pre_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_PCH_NOP(dev)) + return; + + WARN_ON(I915_READ(SDEIER) != 0); + I915_WRITE(SDEIER, 0xffffffff); + POSTING_READ(SDEIER); +} + +static void gen5_gt_irq_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + GEN5_IRQ_RESET(GT); + if (INTEL_INFO(dev)->gen >= 6) + GEN5_IRQ_RESET(GEN6_PM); } /* drm_dma.h hooks */ -static void ironlake_irq_preinstall(struct drm_device *dev) +static void ironlake_irq_reset(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; - atomic_set(&dev_priv->irq_received, 0); + I915_WRITE(HWSTAM, 0xffffffff); - I915_WRITE(HWSTAM, 0xeffe); + GEN5_IRQ_RESET(DE); + if (IS_GEN7(dev)) + I915_WRITE(GEN7_ERR_INT, 0xffffffff); - /* XXX hotplug from PCH */ + gen5_gt_irq_reset(dev); - I915_WRITE(DEIMR, 0xffffffff); - I915_WRITE(DEIER, 0x0); - POSTING_READ(DEIER); + ibx_irq_reset(dev); +} + +static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; - /* and GT */ - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); + i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - if (HAS_PCH_NOP(dev)) - return; + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); - /* south display irq */ - I915_WRITE(SDEIMR, 0xffffffff); - /* - * SDEIER is also touched by the interrupt handler to work around missed - * PCH interrupts. Hence we can't update it after the interrupt handler - * is enabled - instead we unconditionally enable all PCH interrupt - * sources here, but then only unmask them as needed with SDEIMR. - */ - I915_WRITE(SDEIER, 0xffffffff); - POSTING_READ(SDEIER); + GEN5_IRQ_RESET(VLV_); } static void valleyview_irq_preinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int pipe; - - atomic_set(&dev_priv->irq_received, 0); + struct drm_i915_private *dev_priv = dev->dev_private; /* VLV magic */ I915_WRITE(VLV_IMR, 0); @@ -2178,217 +3163,427 @@ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); I915_WRITE(RING_IMR(BLT_RING_BASE), 0); - /* and GT */ - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); + gen5_gt_irq_reset(dev); - I915_WRITE(DPINVGTT, 0xff); + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - POSTING_READ(VLV_IER); + vlv_display_irq_reset(dev_priv); +} + +static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) +{ + GEN8_IRQ_RESET_NDX(GT, 0); + GEN8_IRQ_RESET_NDX(GT, 1); + GEN8_IRQ_RESET_NDX(GT, 2); + GEN8_IRQ_RESET_NDX(GT, 3); +} + +static void gen8_irq_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); + + gen8_gt_irq_reset(dev_priv); + + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); + + GEN5_IRQ_RESET(GEN8_DE_PORT_); + GEN5_IRQ_RESET(GEN8_DE_MISC_); + GEN5_IRQ_RESET(GEN8_PCU_); + + if (HAS_PCH_SPLIT(dev)) + ibx_irq_reset(dev); +} + +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask) +{ + uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; + + spin_lock_irq(&dev_priv->irq_lock); + if (pipe_mask & 1 << PIPE_A) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, + dev_priv->de_irq_mask[PIPE_A], + ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); + if (pipe_mask & 1 << PIPE_B) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, + dev_priv->de_irq_mask[PIPE_B], + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); + if (pipe_mask & 1 << PIPE_C) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, + dev_priv->de_irq_mask[PIPE_C], + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); + spin_unlock_irq(&dev_priv->irq_lock); +} + +static void cherryview_irq_preinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); + + gen8_gt_irq_reset(dev_priv); + + GEN5_IRQ_RESET(GEN8_PCU_); + + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); + + vlv_display_irq_reset(dev_priv); +} + +static u32 intel_hpd_enabled_irqs(struct drm_device *dev, + const u32 hpd[HPD_NUM_PINS]) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *encoder; + u32 enabled_irqs = 0; + + for_each_intel_encoder(dev, encoder) + if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + enabled_irqs |= hpd[encoder->hpd_pin]; + + return enabled_irqs; } static void ibx_hpd_irq_setup(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *intel_encoder; - u32 mask = ~I915_READ(SDEIMR); - u32 hotplug; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_irqs, hotplug, enabled_irqs; if (HAS_PCH_IBX(dev)) { - mask &= ~SDE_HOTPLUG_MASK; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) - if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_ibx[intel_encoder->hpd_pin]; + hotplug_irqs = SDE_HOTPLUG_MASK; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); } else { - mask &= ~SDE_HOTPLUG_MASK_CPT; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) - if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_cpt[intel_encoder->hpd_pin]; + hotplug_irqs = SDE_HOTPLUG_MASK_CPT; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); } - I915_WRITE(SDEIMR, ~mask); + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); /* * Enable digital hotplug on the PCH, and configure the DP short pulse - * duration to 2ms (which is the minimum in the Display Port spec) - * - * This register is the same on all known PCH chips. + * duration to 2ms (which is the minimum in the Display Port spec). + * The pulse duration bits are reserved on LPT+. */ hotplug = I915_READ(PCH_PORT_HOTPLUG); hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; + /* + * When CPU and PCH are on the same package, port A + * HPD must be enabled in both north and south. + */ + if (HAS_PCH_LPT_LP(dev)) + hotplug |= PORTA_HOTPLUG_ENABLE; + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); +} + +static void spt_hpd_irq_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_irqs, hotplug, enabled_irqs; + + hotplug_irqs = SDE_HOTPLUG_MASK_SPT; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + /* Enable digital hotplug on the PCH */ + hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + + hotplug = I915_READ(PCH_PORT_HOTPLUG2); + hotplug |= PORTE_HOTPLUG_ENABLE; + I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); +} + +static void ilk_hpd_irq_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_irqs, hotplug, enabled_irqs; + + if (INTEL_INFO(dev)->gen >= 8) { + hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); + + bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + } else if (INTEL_INFO(dev)->gen >= 7) { + hotplug_irqs = DE_DP_A_HOTPLUG_IVB; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); + + ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); + } else { + hotplug_irqs = DE_DP_A_HOTPLUG; + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); + + ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); + } + + /* + * Enable digital hotplug on the CPU, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec) + * The pulse duration bits are reserved on HSW+. + */ + hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); + hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; + hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; + I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); + + ibx_hpd_irq_setup(dev); +} + +static void bxt_hpd_irq_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_irqs, hotplug, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); + hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; + + bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + + hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | + PORTA_HOTPLUG_ENABLE; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } static void ibx_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 mask; + if (HAS_PCH_NOP(dev)) + return; + if (HAS_PCH_IBX(dev)) - mask = SDE_GMBUS | SDE_AUX_MASK; + mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; else mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; - if (HAS_PCH_NOP(dev)) - return; - - I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + gen5_assert_iir_is_zero(dev_priv, SDEIIR); I915_WRITE(SDEIMR, ~mask); } +static void gen5_gt_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pm_irqs, gt_irqs; + + pm_irqs = gt_irqs = 0; + + dev_priv->gt_irq_mask = ~0; + if (HAS_L3_DPF(dev)) { + /* L3 parity interrupt is always unmasked. */ + dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); + gt_irqs |= GT_PARITY_ERROR(dev); + } + + gt_irqs |= GT_RENDER_USER_INTERRUPT; + if (IS_GEN5(dev)) { + gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | + ILK_BSD_USER_INTERRUPT; + } else { + gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; + } + + GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); + + if (INTEL_INFO(dev)->gen >= 6) { + /* + * RPS interrupts will get enabled/disabled on demand when RPS + * itself is enabled/disabled. + */ + if (HAS_VEBOX(dev)) + pm_irqs |= PM_VEBOX_USER_INTERRUPT; + + dev_priv->pm_irq_mask = 0xffffffff; + GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); + } +} + static int ironlake_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | - DE_AUX_CHANNEL_A; - u32 render_irqs; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 display_mask, extra_mask; + + if (INTEL_INFO(dev)->gen >= 7) { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | + DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); + extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | + DE_DP_A_HOTPLUG_IVB); + } else { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | + DE_AUX_CHANNEL_A | + DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | + DE_POISON); + extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | + DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | + DE_DP_A_HOTPLUG); + } dev_priv->irq_mask = ~display_mask; - /* should always can generate irq */ - I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); - POSTING_READ(DEIER); + I915_WRITE(HWSTAM, 0xeffe); - dev_priv->gt_irq_mask = ~0; + ibx_irq_pre_postinstall(dev); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); - if (IS_GEN6(dev)) - render_irqs = - GT_USER_INTERRUPT | - GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT; - else - render_irqs = - GT_USER_INTERRUPT | - GT_PIPE_NOTIFY | - GT_BSD_USER_INTERRUPT; - I915_WRITE(GTIER, render_irqs); - POSTING_READ(GTIER); + gen5_gt_irq_postinstall(dev); ibx_irq_postinstall(dev); if (IS_IRONLAKE_M(dev)) { - /* Clear & enable PCU event interrupts */ - I915_WRITE(DEIIR, DE_PCU_EVENT); - I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); + /* Enable PCU event interrupts + * + * spinlocking not required here for correctness since interrupt + * setup is guaranteed to run in single-threaded context. But we + * need it to make the assert_spin_locked happy. */ + spin_lock_irq(&dev_priv->irq_lock); ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + spin_unlock_irq(&dev_priv->irq_lock); } return 0; } -static int ivybridge_irq_postinstall(struct drm_device *dev) +static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = - DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | - DE_PLANEC_FLIP_DONE_IVB | - DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | - DE_AUX_CHANNEL_A_IVB; - u32 render_irqs; + u32 pipestat_mask; + u32 iir_mask; + enum pipe pipe; + + pipestat_mask = PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS; + + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), pipestat_mask); + POSTING_READ(PIPESTAT(PIPE_A)); + + pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | + PIPE_CRC_DONE_INTERRUPT_STATUS; + + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_enable_pipestat(dev_priv, pipe, pipestat_mask); + + iir_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + if (IS_CHERRYVIEW(dev_priv)) + iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; + dev_priv->irq_mask &= ~iir_mask; + + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + POSTING_READ(VLV_IMR); +} - dev_priv->irq_mask = ~display_mask; +static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) +{ + u32 pipestat_mask; + u32 iir_mask; + enum pipe pipe; - /* should always can generate irq */ - I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, - display_mask | - DE_PIPEC_VBLANK_IVB | - DE_PIPEB_VBLANK_IVB | - DE_PIPEA_VBLANK_IVB); - POSTING_READ(DEIER); + iir_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + if (IS_CHERRYVIEW(dev_priv)) + iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + dev_priv->irq_mask |= iir_mask; + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + I915_WRITE(VLV_IIR, iir_mask); + I915_WRITE(VLV_IIR, iir_mask); + POSTING_READ(VLV_IIR); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | + PIPE_CRC_DONE_INTERRUPT_STATUS; - render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIER, render_irqs); - POSTING_READ(GTIER); + i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_disable_pipestat(dev_priv, pipe, pipestat_mask); - ibx_irq_postinstall(dev); + pipestat_mask = PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS; - return 0; + for_each_pipe(dev_priv, pipe) + I915_WRITE(PIPESTAT(pipe), pipestat_mask); + POSTING_READ(PIPESTAT(PIPE_A)); } -static int valleyview_irq_postinstall(struct drm_device *dev) +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 enable_mask; - u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; - u32 render_irqs; - u16 msid; - - enable_mask = I915_DISPLAY_PORT_INTERRUPT; - enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + assert_spin_locked(&dev_priv->irq_lock); - /* - *Leave vblank interrupts masked initially. enable/disable will - * toggle them based on usage. - */ - dev_priv->irq_mask = (~enable_mask) | - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + if (dev_priv->display_irqs_enabled) + return; - /* Hack for broken MSIs on VLV */ - pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); - pci_read_config_word(dev->pdev, 0x98, &msid); - msid &= 0xff; /* mask out delivery bits */ - msid |= (1<<14); - pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); + dev_priv->display_irqs_enabled = true; - I915_WRITE(PORT_HOTPLUG_EN, 0); - POSTING_READ(PORT_HOTPLUG_EN); + if (intel_irqs_enabled(dev_priv)) + valleyview_display_irqs_install(dev_priv); +} - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - I915_WRITE(VLV_IER, enable_mask); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(PIPESTAT(0), 0xffff); - I915_WRITE(PIPESTAT(1), 0xffff); - POSTING_READ(VLV_IER); - - i915_enable_pipestat(dev_priv, 0, pipestat_enable); - i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); - i915_enable_pipestat(dev_priv, 1, pipestat_enable); +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) +{ + assert_spin_locked(&dev_priv->irq_lock); + + if (!dev_priv->display_irqs_enabled) + return; + + dev_priv->display_irqs_enabled = false; + + if (intel_irqs_enabled(dev_priv)) + valleyview_display_irqs_uninstall(dev_priv); +} + +static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) +{ + dev_priv->irq_mask = ~0; + + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); + POSTING_READ(PORT_HOTPLUG_EN); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IER, ~dev_priv->irq_mask); + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + POSTING_READ(VLV_IMR); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_install(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); +} - render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT; - I915_WRITE(GTIER, render_irqs); - POSTING_READ(GTIER); +static int valleyview_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + vlv_display_irq_postinstall(dev_priv); + + gen5_gt_irq_postinstall(dev); /* ack & enable invalid PTE error interrupts */ #if 0 /* FIXME: add support to irq handler for checking these bits */ @@ -2401,65 +3596,184 @@ return 0; } +static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) +{ + /* These are interrupts we'll toggle with the ring mask register */ + uint32_t gt_interrupts[] = { + GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_L3_PARITY_ERROR_INTERRUPT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + 0, + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT + }; + + dev_priv->pm_irq_mask = 0xffffffff; + GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); + GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + /* + * RPS interrupts will get enabled/disabled on demand when RPS itself + * is enabled/disabled. + */ + GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); + GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); +} + +static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) +{ + uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + uint32_t de_pipe_enables; + u32 de_port_masked = GEN8_AUX_CHANNEL_A; + u32 de_port_enables; + enum pipe pipe; + + if (INTEL_INFO(dev_priv)->gen >= 9) { + de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | + GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + if (IS_BROXTON(dev_priv)) + de_port_masked |= BXT_DE_PORT_GMBUS; + } else { + de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | + GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + } + + de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | + GEN8_PIPE_FIFO_UNDERRUN; + + de_port_enables = de_port_masked; + if (IS_BROXTON(dev_priv)) + de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; + else if (IS_BROADWELL(dev_priv)) + de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; + + dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; + dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; + dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; + + for_each_pipe(dev_priv, pipe) + if (intel_display_power_is_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + de_pipe_enables); + + GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); +} + +static int gen8_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_PCH_SPLIT(dev)) + ibx_irq_pre_postinstall(dev); + + gen8_gt_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(dev_priv); + + if (HAS_PCH_SPLIT(dev)) + ibx_irq_postinstall(dev); + + I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); + POSTING_READ(GEN8_MASTER_IRQ); + + return 0; +} + +static int cherryview_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + vlv_display_irq_postinstall(dev_priv); + + gen8_gt_irq_postinstall(dev_priv); + + I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); + POSTING_READ(GEN8_MASTER_IRQ); + + return 0; +} + +static void gen8_irq_uninstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv) + return; + + gen8_irq_reset(dev); +} + +static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) +{ + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + valleyview_display_irqs_uninstall(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + vlv_display_irq_reset(dev_priv); + + dev_priv->irq_mask = ~0; +} + static void valleyview_irq_uninstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int pipe; + struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + I915_WRITE(VLV_MASTER_IER, 0); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); + gen5_gt_irq_reset(dev); I915_WRITE(HWSTAM, 0xffffffff); - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - for_each_pipe(pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IMR, 0xffffffff); - I915_WRITE(VLV_IER, 0x0); - POSTING_READ(VLV_IER); + + vlv_display_irq_uninstall(dev_priv); } -static void ironlake_irq_uninstall(struct drm_device *dev) +static void cherryview_irq_uninstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); + I915_WRITE(GEN8_MASTER_IRQ, 0); + POSTING_READ(GEN8_MASTER_IRQ); - I915_WRITE(HWSTAM, 0xffffffff); + gen8_gt_irq_reset(dev_priv); - I915_WRITE(DEIMR, 0xffffffff); - I915_WRITE(DEIER, 0x0); - I915_WRITE(DEIIR, I915_READ(DEIIR)); - - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - I915_WRITE(GTIIR, I915_READ(GTIIR)); + GEN5_IRQ_RESET(GEN8_PCU_); - if (HAS_PCH_NOP(dev)) + vlv_display_irq_uninstall(dev_priv); +} + +static void ironlake_irq_uninstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv) return; - I915_WRITE(SDEIMR, 0xffffffff); - I915_WRITE(SDEIER, 0x0); - I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + ironlake_irq_reset(dev); } static void i8xx_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE16(IMR, 0xffff); I915_WRITE16(IER, 0x0); @@ -2468,7 +3782,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -2478,17 +3792,22 @@ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IER, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT); POSTING_READ16(IER); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + spin_unlock_irq(&dev_priv->irq_lock); + return 0; } @@ -2496,18 +3815,16 @@ * Returns true when a page flip has completed. */ static bool i8xx_handle_vblank(struct drm_device *dev, - int pipe, u16 iir) + int plane, int pipe, u32 iir) { - drm_i915_private_t *dev_priv = dev->dev_private; - u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); + struct drm_i915_private *dev_priv = dev->dev_private; + u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!drm_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev, pipe)) return false; if ((iir & flip_pending) == 0) - return false; - - intel_prepare_page_flip(dev, pipe); + goto check_page_flip; /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip @@ -2516,27 +3833,30 @@ * an interrupt per se, we watch for the change at vblank. */ if (I915_READ16(ISR) & flip_pending) - return false; + goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); - return true; + +check_page_flip: + intel_check_page_flip(dev, pipe); + return false; } static irqreturn_t i8xx_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; u16 iir, new_iir; u32 pipe_stats[2]; - unsigned long irqflags; - int irq_received; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; iir = I915_READ16(IIR); if (iir == 0) @@ -2548,42 +3868,44 @@ * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* * Clear the PIPE*STAT regs before the IIR */ - if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); + if (pipe_stats[pipe] & 0x8000ffff) I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; - } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); I915_WRITE16(IIR, iir & ~flip_mask); new_iir = I915_READ16(IIR); /* Flush posted writes */ - i915_update_dri1_breadcrumb(dev); - if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(&dev_priv->ring[RCS]); - if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, 0, iir)) - flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); - - if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, 1, iir)) - flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); + for_each_pipe(dev_priv, pipe) { + int plane = pipe; + if (HAS_FBC(dev)) + plane = !plane; + + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && + i8xx_handle_vblank(dev, plane, pipe, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); + } iir = new_iir; } @@ -2593,10 +3915,10 @@ static void i8xx_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); @@ -2608,18 +3930,16 @@ static void i915_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - if (I915_HAS_HOTPLUG(dev)) { - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE16(HWSTAM, 0xeffe); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); @@ -2628,7 +3948,7 @@ static int i915_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); @@ -2639,18 +3959,16 @@ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev)) { - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); /* Enable in IER... */ @@ -2663,7 +3981,14 @@ I915_WRITE(IER, enable_mask); POSTING_READ(IER); - intel_opregion_enable_asle(dev); + i915_enable_asle_pipestat(dev); + + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + spin_unlock_irq(&dev_priv->irq_lock); return 0; } @@ -2674,16 +3999,14 @@ static bool i915_handle_vblank(struct drm_device *dev, int plane, int pipe, u32 iir) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!drm_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev, pipe)) return false; if ((iir & flip_pending) == 0) - return false; - - intel_prepare_page_flip(dev, plane); + goto check_page_flip; /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip @@ -2692,25 +4015,29 @@ * an interrupt per se, we watch for the change at vblank. */ if (I915_READ(ISR) & flip_pending) - return false; + goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); - return true; + +check_page_flip: + intel_check_page_flip(dev, pipe); + return false; } static irqreturn_t i915_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; int pipe, ret = IRQ_NONE; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; iir = I915_READ(IIR); do { @@ -2722,55 +4049,39 @@ * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); /* Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; /* Consume port. Then clear IIR or we'll miss events */ - if ((I915_HAS_HOTPLUG(dev)) && - (iir & I915_DISPLAY_PORT_INTERRUPT)) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - if (hotplug_trigger) { - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) - i915_hpd_irq_setup(dev); - queue_work(dev_priv->wq, - &dev_priv->hotplug_work); - } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - POSTING_READ(PORT_HOTPLUG_STAT); - } + if (I915_HAS_HOTPLUG(dev) && + iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); I915_WRITE(IIR, iir & ~flip_mask); new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(&dev_priv->ring[RCS]); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (IS_MOBILE(dev)) + if (HAS_FBC(dev)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && @@ -2779,6 +4090,13 @@ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, + pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) @@ -2803,25 +4121,21 @@ iir = new_iir; } while (iir & ~flip_mask); - i915_update_dri1_breadcrumb(dev); - return ret; } static void i915_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - del_timer_sync(&dev_priv->hotplug_reenable_timer); - if (I915_HAS_HOTPLUG(dev)) { - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } I915_WRITE16(HWSTAM, 0xffff); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { /* Clear enable bits; then clear status bits */ I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); @@ -2834,16 +4148,14 @@ static void i965_irq_preinstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); - - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xeffe); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); @@ -2852,7 +4164,7 @@ static int i965_irq_postinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 enable_mask; u32 error_mask; @@ -2873,7 +4185,13 @@ if (IS_G4X(dev)) enable_mask |= I915_BSD_USER_INTERRUPT; - i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + spin_unlock_irq(&dev_priv->irq_lock); /* * Enable some error detection, note the instruction error mask @@ -2894,75 +4212,70 @@ I915_WRITE(IER, enable_mask); POSTING_READ(IER); - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); - intel_opregion_enable_asle(dev); + i915_enable_asle_pipestat(dev); return 0; } static void i915_hpd_irq_setup(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *intel_encoder; + struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en; - if (I915_HAS_HOTPLUG(dev)) { - hotplug_en = I915_READ(PORT_HOTPLUG_EN); - hotplug_en &= ~HOTPLUG_INT_EN_MASK; - /* Note HDMI and DP share hotplug bits */ - /* enable bits are the same for all generations */ - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) - if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; - /* Programming the CRT detection parameters tends - to generate a spurious hotplug event about three - seconds later. So just do it once. - */ - if (IS_G4X(dev)) - hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; - hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; - hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + assert_spin_locked(&dev_priv->irq_lock); - /* Ignore TV since it's buggy */ - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - } + /* Note HDMI and DP share hotplug bits */ + /* enable bits are the same for all generations */ + hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); + /* Programming the CRT detection parameters tends + to generate a spurious hotplug event about three + seconds later. So just do it once. + */ + if (IS_G4X(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + + /* Ignore TV since it's buggy */ + i915_hotplug_interrupt_update_locked(dev_priv, + HOTPLUG_INT_EN_MASK | + CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | + CRT_HOTPLUG_ACTIVATION_PERIOD_64, + hotplug_en); } static irqreturn_t i965_irq_handler(int irq, void *arg) { - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; u32 iir, new_iir; u32 pipe_stats[I915_MAX_PIPES]; - unsigned long irqflags; - int irq_received; int ret = IRQ_NONE, pipe; u32 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; - atomic_inc(&dev_priv->irq_received); + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; iir = I915_READ(IIR); for (;;) { + bool irq_received = (iir & ~flip_mask) != 0; bool blc_event = false; - irq_received = (iir & ~flip_mask) != 0; - /* Can't rely on pipestat interrupt bit in iir as it might * have been cleared after the pipestat interrupt was received. * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + spin_lock(&dev_priv->irq_lock); if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - i915_handle_error(dev, false); + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { int reg = PIPESTAT(pipe); pipe_stats[pipe] = I915_READ(reg); @@ -2970,14 +4283,11 @@ * Clear the PIPE*STAT regs before the IIR */ if (pipe_stats[pipe] & 0x8000ffff) { - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe %c underrun\n", - pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; + irq_received = true; } } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + spin_unlock(&dev_priv->irq_lock); if (!irq_received) break; @@ -2985,42 +4295,31 @@ ret = IRQ_HANDLED; /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? - HOTPLUG_INT_STATUS_G4X : - HOTPLUG_INT_STATUS_I915); - - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_status); - if (hotplug_trigger) { - if (hotplug_irq_storm_detect(dev, hotplug_trigger, - IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915)) - i915_hpd_irq_setup(dev); - queue_work(dev_priv->wq, - &dev_priv->hotplug_work); - } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - I915_READ(PORT_HOTPLUG_STAT); - } + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); I915_WRITE(IIR, iir & ~flip_mask); new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(&dev_priv->ring[RCS]); if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); + notify_ring(&dev_priv->ring[VCS]); - for_each_pipe(pipe) { + for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && i915_handle_vblank(dev, pipe, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; - } + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev); @@ -3046,102 +4345,91 @@ iir = new_iir; } - i915_update_dri1_breadcrumb(dev); - return ret; } static void i965_irq_uninstall(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int pipe; if (!dev_priv) return; - del_timer_sync(&dev_priv->hotplug_reenable_timer); - - I915_WRITE(PORT_HOTPLUG_EN, 0); + i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xffffffff); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - for_each_pipe(pipe) + for_each_pipe(dev_priv, pipe) I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_WRITE(IIR, I915_READ(IIR)); } -static void i915_reenable_hotplug_timer_func(unsigned long data) +/** + * intel_irq_init - initializes irq support + * @dev_priv: i915 device instance + * + * This function initializes all the irq support including work items, timers + * and all the vtables. It does not setup the interrupt itself though. + */ +void intel_irq_init(struct drm_i915_private *dev_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; struct drm_device *dev = dev_priv->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - unsigned long irqflags; - int i; - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { - struct drm_connector *connector; - - if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) - continue; - - dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; - - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - if (intel_connector->encoder->hpd_pin == i) { - if (connector->polled != intel_connector->polled) - DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", - drm_get_connector_name(connector)); - connector->polled = intel_connector->polled; - if (!connector->polled) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - } - } - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} + intel_hpd_init_work(dev_priv); -void intel_irq_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); - setup_timer(&dev_priv->gpu_error.hangcheck_timer, - i915_hangcheck_elapsed, - (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, - (unsigned long) dev_priv); + /* Let's track the enabled rps events */ + if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + /* WaGsvRC0ResidencyMethod:vlv */ + dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; + else + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + + INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, + i915_hangcheck_elapsed); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv)) { + dev->max_vblank_count = 0; + dev->driver->get_vblank_counter = i8xx_get_vblank_counter; + } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ - dev->driver->get_vblank_counter = gm45_get_vblank_counter; + dev->driver->get_vblank_counter = g4x_get_vblank_counter; + } else { + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ } - if (drm_core_check_feature(dev, DRIVER_MODESET)) - dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; - else - dev->driver->get_vblank_timestamp = NULL; + /* + * Opt out of the vblank disable timer on everything except gen2. + * Gen2 doesn't have a hardware frame counter and so depends on + * vblank interrupts to produce sane vblank seuquence numbers. + */ + if (!IS_GEN2(dev_priv)) + dev->vblank_disable_immediate = true; + + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - if (IS_VALLEYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { + dev->driver->irq_handler = cherryview_irq_handler; + dev->driver->irq_preinstall = cherryview_irq_preinstall; + dev->driver->irq_postinstall = cherryview_irq_postinstall; + dev->driver->irq_uninstall = cherryview_irq_uninstall; + dev->driver->enable_vblank = valleyview_enable_vblank; + dev->driver->disable_vblank = valleyview_disable_vblank; + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else if (IS_VALLEYVIEW(dev_priv)) { dev->driver->irq_handler = valleyview_irq_handler; dev->driver->irq_preinstall = valleyview_irq_preinstall; dev->driver->irq_postinstall = valleyview_irq_postinstall; @@ -3149,64 +4437,112 @@ dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - /* Share pre & uninstall handlers with ILK/SNB */ - dev->driver->irq_handler = ivybridge_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_preinstall; - dev->driver->irq_postinstall = ivybridge_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_uninstall; - dev->driver->enable_vblank = ivybridge_enable_vblank; - dev->driver->disable_vblank = ivybridge_disable_vblank; - dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; + } else if (INTEL_INFO(dev_priv)->gen >= 8) { + dev->driver->irq_handler = gen8_irq_handler; + dev->driver->irq_preinstall = gen8_irq_reset; + dev->driver->irq_postinstall = gen8_irq_postinstall; + dev->driver->irq_uninstall = gen8_irq_uninstall; + dev->driver->enable_vblank = gen8_enable_vblank; + dev->driver->disable_vblank = gen8_disable_vblank; + if (IS_BROXTON(dev)) + dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; + else if (HAS_PCH_SPT(dev)) + dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; + else + dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_preinstall = ironlake_irq_reset; dev->driver->irq_postinstall = ironlake_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->enable_vblank = ironlake_enable_vblank; dev->driver->disable_vblank = ironlake_disable_vblank; - dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; + dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (INTEL_INFO(dev)->gen == 2) { + if (INTEL_INFO(dev_priv)->gen == 2) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev)->gen == 3) { + } else if (INTEL_INFO(dev_priv)->gen == 3) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; dev->driver->irq_handler = i915_irq_handler; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else { dev->driver->irq_preinstall = i965_irq_preinstall; dev->driver->irq_postinstall = i965_irq_postinstall; dev->driver->irq_uninstall = i965_irq_uninstall; dev->driver->irq_handler = i965_irq_handler; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } + if (I915_HAS_HOTPLUG(dev_priv)) + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; dev->driver->enable_vblank = i915_enable_vblank; dev->driver->disable_vblank = i915_disable_vblank; } } -void intel_hpd_init(struct drm_device *dev) +/** + * intel_irq_install - enables the hardware interrupt + * @dev_priv: i915 device instance + * + * This function enables the hardware interrupt handling, but leaves the hotplug + * handling still disabled. It is called after intel_irq_init(). + * + * In the driver load and resume code we need working interrupts in a few places + * but don't want to deal with the hassle of concurrent probe and hotplug + * workers. Hence the split into this two-stage approach. + */ +int intel_irq_install(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_connector *connector; - int i; + /* + * We enable some interrupt sources in our postinstall hooks, so mark + * interrupts as enabled _before_ actually enabling them to avoid + * special cases in our ordering checks. + */ + dev_priv->pm.irqs_enabled = true; - for (i = 1; i < HPD_NUM_PINS; i++) { - dev_priv->hpd_stats[i].hpd_cnt = 0; - dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; - } - list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_connector *intel_connector = to_intel_connector(connector); - connector->polled = intel_connector->polled; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); +} + +/** + * intel_irq_uninstall - finilizes all irq handling + * @dev_priv: i915 device instance + * + * This stops interrupt and hotplug handling and unregisters and frees all + * resources acquired in the init functions. + */ +void intel_irq_uninstall(struct drm_i915_private *dev_priv) +{ + drm_irq_uninstall(dev_priv->dev); + intel_hpd_cancel_work(dev_priv); + dev_priv->pm.irqs_enabled = false; +} + +/** + * intel_runtime_pm_disable_interrupts - runtime interrupt disabling + * @dev_priv: i915 device instance + * + * This function is used to disable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->dev->driver->irq_uninstall(dev_priv->dev); + dev_priv->pm.irqs_enabled = false; + synchronize_irq(dev_priv->dev->irq); +} + +/** + * intel_runtime_pm_enable_interrupts - runtime interrupt enabling + * @dev_priv: i915 device instance + * + * This function is used to enable interrupts at runtime, both in the runtime + * pm and the system suspend/resume code. + */ +void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) +{ + dev_priv->pm.irqs_enabled = true; + dev_priv->dev->driver->irq_preinstall(dev_priv->dev); + dev_priv->dev->driver->irq_postinstall(dev_priv->dev); }