--- zzzz-none-000/linux-3.10.107/arch/powerpc/sysdev/fsl_pci.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/powerpc/sysdev/fsl_pci.c 2021-02-04 17:41:59.000000000 +0000 @@ -22,26 +22,32 @@ #include #include #include -#include +#include #include #include #include +#include +#include +#include #include #include #include +#include #include +#include +#include #include #include static int fsl_pcie_bus_fixup, is_mpc83xx_pci; -static void quirk_fsl_pcie_header(struct pci_dev *dev) +static void quirk_fsl_pcie_early(struct pci_dev *dev) { u8 hdr_type; /* if we aren't a PCIe don't bother */ - if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) + if (!pci_is_pcie(dev)) return; /* if we aren't in host mode don't bother */ @@ -62,13 +68,10 @@ u32 val = 0; if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) { - if (hose->ops->read == fsl_indirect_read_config) { - struct pci_bus bus; - bus.number = 0; - bus.sysdata = hose; - bus.ops = hose->ops; - indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val); - } else + if (hose->ops->read == fsl_indirect_read_config) + __indirect_read_config(hose, hose->first_busno, 0, + PCIE_LTSSM, 4, &val); + else early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); if (val < PCIE_LTSSM_L0) return 1; @@ -108,6 +111,18 @@ #define MAX_PHYS_ADDR_BITS 40 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; +#ifdef CONFIG_SWIOTLB +static void setup_swiotlb_ops(struct pci_controller *hose) +{ + if (ppc_swiotlb_enable) { + hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb; + set_pci_dma_ops(&swiotlb_dma_ops); + } +} +#else +static inline void setup_swiotlb_ops(struct pci_controller *hose) {} +#endif + static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -118,7 +133,7 @@ * address width of the SoC such that we can address any internal * SoC address from across PCI if needed */ - if ((dev->bus == &pci_bus_type) && + if ((dev_is_pci(dev)) && dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { set_dma_ops(dev, &dma_direct_ops); set_dma_offset(dev, pci64_dma_offset); @@ -145,7 +160,7 @@ flags |= 0x10000000; /* enable relaxed ordering */ for (i = 0; size > 0; i++) { - unsigned int bits = min(ilog2(size), + unsigned int bits = min_t(u32, ilog2(size), __ffs(pci_addr | phys_addr)); if (index + i >= 5) @@ -164,6 +179,19 @@ return i; } +static bool is_kdump(void) +{ + struct device_node *node; + + node = of_find_node_by_type(NULL, "memory"); + if (!node) { + WARN_ON_ONCE(1); + return false; + } + + return of_property_read_bool(node, "linux,usable-memory"); +} + /* atmu setup for fsl pci/pcie controller */ static void setup_pci_atmu(struct pci_controller *hose) { @@ -177,6 +205,16 @@ const char *name = hose->dn->full_name; const u64 *reg; int len; + bool setup_inbound; + + /* + * If this is kdump, we don't want to trigger a bunch of PCI + * errors by closing the window on in-flight DMA. + * + * We still run most of the function's logic so that things like + * hose->dma_window_size still get set. + */ + setup_inbound = !is_kdump(); if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { @@ -189,8 +227,11 @@ /* Disable all windows (except powar0 since it's ignored) */ for(i = 1; i < 5; i++) out_be32(&pci->pow[i].powar, 0); - for (i = start_idx; i < end_idx; i++) - out_be32(&pci->piw[i].piwar, 0); + + if (setup_inbound) { + for (i = start_idx; i < end_idx; i++) + out_be32(&pci->piw[i].piwar, 0); + } /* Setup outbound MEM window */ for(i = 0, j = 1; i < 3; i++) { @@ -263,6 +304,7 @@ /* Setup inbound mem window */ mem = memblock_end_of_DRAM(); + pr_info("%s: end of DRAM %llx\n", __func__, mem); /* * The msi-address-64 property, if it exists, indicates the physical @@ -297,20 +339,22 @@ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { /* Size window to exact size if power-of-two or one size up */ if ((1ull << mem_log) != mem) { + mem_log++; if ((1ull << mem_log) > mem) pr_info("%s: Setting PCI inbound window " "greater than memory size\n", name); - mem_log++; } piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); - /* Setup inbound memory window */ - out_be32(&pci->piw[win_idx].pitar, 0x00000000); - out_be32(&pci->piw[win_idx].piwbar, 0x00000000); - out_be32(&pci->piw[win_idx].piwar, piwar); - win_idx--; + if (setup_inbound) { + /* Setup inbound memory window */ + out_be32(&pci->piw[win_idx].pitar, 0x00000000); + out_be32(&pci->piw[win_idx].piwbar, 0x00000000); + out_be32(&pci->piw[win_idx].piwar, piwar); + } + win_idx--; hose->dma_window_base_cur = 0x00000000; hose->dma_window_size = (resource_size_t)sz; @@ -328,13 +372,15 @@ piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); - /* Setup inbound memory window */ - out_be32(&pci->piw[win_idx].pitar, 0x00000000); - out_be32(&pci->piw[win_idx].piwbear, - pci64_dma_offset >> 44); - out_be32(&pci->piw[win_idx].piwbar, - pci64_dma_offset >> 12); - out_be32(&pci->piw[win_idx].piwar, piwar); + if (setup_inbound) { + /* Setup inbound memory window */ + out_be32(&pci->piw[win_idx].pitar, 0x00000000); + out_be32(&pci->piw[win_idx].piwbear, + pci64_dma_offset >> 44); + out_be32(&pci->piw[win_idx].piwbar, + pci64_dma_offset >> 12); + out_be32(&pci->piw[win_idx].piwar, piwar); + } /* * install our own dma_set_mask handler to fixup dma_ops @@ -347,12 +393,15 @@ } else { u64 paddr = 0; - /* Setup inbound memory window */ - out_be32(&pci->piw[win_idx].pitar, paddr >> 12); - out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); - out_be32(&pci->piw[win_idx].piwar, (piwar | (mem_log - 1))); - win_idx--; + if (setup_inbound) { + /* Setup inbound memory window */ + out_be32(&pci->piw[win_idx].pitar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwar, + (piwar | (mem_log - 1))); + } + win_idx--; paddr += 1ull << mem_log; sz -= 1ull << mem_log; @@ -360,11 +409,15 @@ mem_log = ilog2(sz); piwar |= (mem_log - 1); - out_be32(&pci->piw[win_idx].pitar, paddr >> 12); - out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); - out_be32(&pci->piw[win_idx].piwar, piwar); - win_idx--; + if (setup_inbound) { + out_be32(&pci->piw[win_idx].pitar, + paddr >> 12); + out_be32(&pci->piw[win_idx].piwbar, + paddr >> 12); + out_be32(&pci->piw[win_idx].piwar, piwar); + } + win_idx--; paddr += 1ull << mem_log; } @@ -373,7 +426,9 @@ } if (hose->dma_window_size < mem) { -#ifndef CONFIG_SWIOTLB +#ifdef CONFIG_SWIOTLB + ppc_swiotlb_enable = 1; +#else pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", name); @@ -448,7 +503,7 @@ } } -int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) +int fsl_add_bridge(struct platform_device *pdev, int is_primary) { int len; struct pci_controller *hose; @@ -513,7 +568,8 @@ } else { /* For PCI read PROG to identify controller mode */ early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); - if ((progif & 1) == 1) + if ((progif & 1) && + !of_property_read_bool(dev, "fsl,pci-agent-force-enum")) goto no_bridge; } @@ -542,6 +598,9 @@ /* Setup PEX window registers */ setup_pci_atmu(hose); + /* Set up controller operations */ + setup_swiotlb_ops(hose); + return 0; no_bridge: @@ -556,7 +615,8 @@ } #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, + quirk_fsl_pcie_early); #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) struct mpc83xx_pcie_priv { @@ -635,61 +695,21 @@ return pcie->cfg_type1 + offset; } -static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - void __iomem *cfg_addr; - - cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); - if (!cfg_addr) - return PCIBIOS_DEVICE_NOT_FOUND; - - switch (len) { - case 1: - *val = in_8(cfg_addr); - break; - case 2: - *val = in_le16(cfg_addr); - break; - default: - *val = in_le32(cfg_addr); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); - void __iomem *cfg_addr; - - cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); - if (!cfg_addr) - return PCIBIOS_DEVICE_NOT_FOUND; /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) val &= 0xffffff00; - switch (len) { - case 1: - out_8(cfg_addr, val); - break; - case 2: - out_le16(cfg_addr, val); - break; - default: - out_le32(cfg_addr, val); - break; - } - - return PCIBIOS_SUCCESSFUL; + return pci_generic_config_write(bus, devfn, offset, len, val); } static struct pci_ops mpc83xx_pcie_ops = { - .read = mpc83xx_pcie_read_config, + .map_bus = mpc83xx_pcie_remap_cfg, + .read = pci_generic_config_read, .write = mpc83xx_pcie_write_config, }; @@ -843,8 +863,8 @@ in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; for (i = 0; i < 4; i++) { /* not enabled, skip */ - if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN) - continue; + if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN)) + continue; if (get_immrbase() == in_le32(&in[i].tar)) return (u64)in_le32(&in[i].barh) << 32 | @@ -861,6 +881,14 @@ pci_bus_read_config_dword(hose->bus, PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); + + /* + * For PEXCSRBAR, bit 3-0 indicate prefetchable and + * address type. So when getting base address, these + * bits should be masked + */ + base &= PCI_BASE_ADDRESS_MEM_MASK; + return base; } #endif @@ -868,12 +896,167 @@ return 0; } +#ifdef CONFIG_E500 +static int mcheck_handle_load(struct pt_regs *regs, u32 inst) +{ + unsigned int rd, ra, rb, d; + + rd = get_rt(inst); + ra = get_ra(inst); + rb = get_rb(inst); + d = get_d(inst); + + switch (get_op(inst)) { + case 31: + switch (get_xop(inst)) { + case OP_31_XOP_LWZX: + case OP_31_XOP_LWBRX: + regs->gpr[rd] = 0xffffffff; + break; + + case OP_31_XOP_LWZUX: + regs->gpr[rd] = 0xffffffff; + regs->gpr[ra] += regs->gpr[rb]; + break; + + case OP_31_XOP_LBZX: + regs->gpr[rd] = 0xff; + break; + + case OP_31_XOP_LBZUX: + regs->gpr[rd] = 0xff; + regs->gpr[ra] += regs->gpr[rb]; + break; + + case OP_31_XOP_LHZX: + case OP_31_XOP_LHBRX: + regs->gpr[rd] = 0xffff; + break; + + case OP_31_XOP_LHZUX: + regs->gpr[rd] = 0xffff; + regs->gpr[ra] += regs->gpr[rb]; + break; + + case OP_31_XOP_LHAX: + regs->gpr[rd] = ~0UL; + break; + + case OP_31_XOP_LHAUX: + regs->gpr[rd] = ~0UL; + regs->gpr[ra] += regs->gpr[rb]; + break; + + default: + return 0; + } + break; + + case OP_LWZ: + regs->gpr[rd] = 0xffffffff; + break; + + case OP_LWZU: + regs->gpr[rd] = 0xffffffff; + regs->gpr[ra] += (s16)d; + break; + + case OP_LBZ: + regs->gpr[rd] = 0xff; + break; + + case OP_LBZU: + regs->gpr[rd] = 0xff; + regs->gpr[ra] += (s16)d; + break; + + case OP_LHZ: + regs->gpr[rd] = 0xffff; + break; + + case OP_LHZU: + regs->gpr[rd] = 0xffff; + regs->gpr[ra] += (s16)d; + break; + + case OP_LHA: + regs->gpr[rd] = ~0UL; + break; + + case OP_LHAU: + regs->gpr[rd] = ~0UL; + regs->gpr[ra] += (s16)d; + break; + + default: + return 0; + } + + return 1; +} + +static int is_in_pci_mem_space(phys_addr_t addr) +{ + struct pci_controller *hose; + struct resource *res; + int i; + + list_for_each_entry(hose, &hose_list, list_node) { + if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)) + continue; + + for (i = 0; i < 3; i++) { + res = &hose->mem_resources[i]; + if ((res->flags & IORESOURCE_MEM) && + addr >= res->start && addr <= res->end) + return 1; + } + } + return 0; +} + +int fsl_pci_mcheck_exception(struct pt_regs *regs) +{ + u32 inst; + int ret; + phys_addr_t addr = 0; + + /* Let KVM/QEMU deal with the exception */ + if (regs->msr & MSR_GS) + return 0; + +#ifdef CONFIG_PHYS_64BIT + addr = mfspr(SPRN_MCARU); + addr <<= 32; +#endif + addr += mfspr(SPRN_MCAR); + + if (is_in_pci_mem_space(addr)) { + if (user_mode(regs)) { + pagefault_disable(); + ret = get_user(regs->nip, &inst); + pagefault_enable(); + } else { + ret = probe_kernel_address((void *)regs->nip, inst); + } + + if (!ret && mcheck_handle_load(regs, inst)) { + regs->nip += 4; + return 1; + } + } + + return 0; +} +#endif + #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static const struct of_device_id pci_ids[] = { { .compatible = "fsl,mpc8540-pci", }, { .compatible = "fsl,mpc8548-pcie", }, { .compatible = "fsl,mpc8610-pci", }, { .compatible = "fsl,mpc8641-pcie", }, + { .compatible = "fsl,qoriq-pcie", }, { .compatible = "fsl,qoriq-pcie-v2.1", }, { .compatible = "fsl,qoriq-pcie-v2.2", }, { .compatible = "fsl,qoriq-pcie-v2.3", }, @@ -924,73 +1107,170 @@ } } -static int fsl_pci_probe(struct platform_device *pdev) +#ifdef CONFIG_PM_SLEEP +static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id) { - int ret; - struct device_node *node; -#ifdef CONFIG_SWIOTLB - struct pci_controller *hose; -#endif + struct pci_controller *hose = dev_id; + struct ccsr_pci __iomem *pci = hose->private_data; + u32 dr; - node = pdev->dev.of_node; - ret = fsl_add_bridge(pdev, fsl_pci_primary == node); + dr = in_be32(&pci->pex_pme_mes_dr); + if (!dr) + return IRQ_NONE; -#ifdef CONFIG_SWIOTLB - if (ret == 0) { - hose = pci_find_hose_for_OF_device(pdev->dev.of_node); + out_be32(&pci->pex_pme_mes_dr, dr); - /* - * if we couldn't map all of DRAM via the dma windows - * we need SWIOTLB to handle buffers located outside of - * dma capable memory region - */ - if (memblock_end_of_DRAM() - 1 > hose->dma_window_base_cur + - hose->dma_window_size) - ppc_swiotlb_enable = 1; + return IRQ_HANDLED; +} + +static int fsl_pci_pme_probe(struct pci_controller *hose) +{ + struct ccsr_pci __iomem *pci; + struct pci_dev *dev; + int pme_irq; + int res; + u16 pms; + + /* Get hose's pci_dev */ + dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list); + + /* PME Disable */ + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms); + pms &= ~PCI_PM_CTRL_PME_ENABLE; + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms); + + pme_irq = irq_of_parse_and_map(hose->dn, 0); + if (!pme_irq) { + dev_err(&dev->dev, "Failed to map PME interrupt.\n"); + + return -ENXIO; + } + + res = devm_request_irq(hose->parent, pme_irq, + fsl_pci_pme_handle, + IRQF_SHARED, + "[PCI] PME", hose); + if (res < 0) { + dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq); + irq_dispose_mapping(pme_irq); + + return -ENODEV; } -#endif - mpc85xx_pci_err_probe(pdev); + pci = hose->private_data; + + /* Enable PTOD, ENL23D & EXL23D */ + clrbits32(&pci->pex_pme_mes_disr, + PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D); + + out_be32(&pci->pex_pme_mes_ier, 0); + setbits32(&pci->pex_pme_mes_ier, + PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D); + + /* PME Enable */ + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms); + pms |= PCI_PM_CTRL_PME_ENABLE; + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms); return 0; } -#ifdef CONFIG_PM -static int fsl_pci_resume(struct device *dev) +static void send_pme_turnoff_message(struct pci_controller *hose) { - struct pci_controller *hose; - struct resource pci_rsrc; + struct ccsr_pci __iomem *pci = hose->private_data; + u32 dr; + int i; - hose = pci_find_hose_for_OF_device(dev->of_node); - if (!hose) - return -ENODEV; + /* Send PME_Turn_Off Message Request */ + setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR); - if (of_address_to_resource(dev->of_node, 0, &pci_rsrc)) { - dev_err(dev, "Get pci register base failed."); - return -ENODEV; + /* Wait trun off done */ + for (i = 0; i < 150; i++) { + dr = in_be32(&pci->pex_pme_mes_dr); + if (dr) { + out_be32(&pci->pex_pme_mes_dr, dr); + break; + } + + udelay(1000); } +} - setup_pci_atmu(hose); +static void fsl_pci_syscore_do_suspend(struct pci_controller *hose) +{ + send_pme_turnoff_message(hose); +} + +static int fsl_pci_syscore_suspend(void) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + fsl_pci_syscore_do_suspend(hose); return 0; } -static const struct dev_pm_ops pci_pm_ops = { - .resume = fsl_pci_resume, -}; +static void fsl_pci_syscore_do_resume(struct pci_controller *hose) +{ + struct ccsr_pci __iomem *pci = hose->private_data; + u32 dr; + int i; -#define PCI_PM_OPS (&pci_pm_ops) + /* Send Exit L2 State Message */ + setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S); -#else + /* Wait exit done */ + for (i = 0; i < 150; i++) { + dr = in_be32(&pci->pex_pme_mes_dr); + if (dr) { + out_be32(&pci->pex_pme_mes_dr, dr); + break; + } + + udelay(1000); + } + + setup_pci_atmu(hose); +} + +static void fsl_pci_syscore_resume(void) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + fsl_pci_syscore_do_resume(hose); +} -#define PCI_PM_OPS NULL +static struct syscore_ops pci_syscore_pm_ops = { + .suspend = fsl_pci_syscore_suspend, + .resume = fsl_pci_syscore_resume, +}; +#endif +void fsl_pcibios_fixup_phb(struct pci_controller *phb) +{ +#ifdef CONFIG_PM_SLEEP + fsl_pci_pme_probe(phb); #endif +} + +static int fsl_pci_probe(struct platform_device *pdev) +{ + struct device_node *node; + int ret; + + node = pdev->dev.of_node; + ret = fsl_add_bridge(pdev, fsl_pci_primary == node); + + mpc85xx_pci_err_probe(pdev); + + return 0; +} static struct platform_driver fsl_pci_driver = { .driver = { .name = "fsl-pci", - .pm = PCI_PM_OPS, .of_match_table = pci_ids, }, .probe = fsl_pci_probe, @@ -998,6 +1278,9 @@ static int __init fsl_pci_init(void) { +#ifdef CONFIG_PM_SLEEP + register_syscore_ops(&pci_syscore_pm_ops); +#endif return platform_driver_register(&fsl_pci_driver); } arch_initcall(fsl_pci_init);