--- zzzz-none-000/linux-3.10.107/arch/arm/mach-mvebu/coherency.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arm/mach-mvebu/coherency.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,5 +1,6 @@ /* - * Coherency fabric (Aurora) support for Armada 370 and XP platforms. + * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP + * platforms. * * Copyright (C) 2012 Marvell * @@ -11,12 +12,14 @@ * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * - * The Armada 370 and Armada XP SOCs have a coherency fabric which is + * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is * responsible for ensuring hardware coherency between all CPUs and between * CPUs and I/O masters. This file initializes the coherency fabric and * supplies basic routines for configuring and controlling hardware coherency */ +#define pr_fmt(fmt) "mvebu-coherency: " fmt + #include #include #include @@ -24,136 +27,189 @@ #include #include #include +#include +#include +#include #include -#include "armada-370-xp.h" +#include +#include +#include +#include "coherency.h" +#include "mvebu-soc-id.h" -/* - * Some functions in this file are called very early during SMP - * initialization. At that time the device tree framework is not yet - * ready, and it is not possible to get the register address to - * ioremap it. That's why the pointer below is given with an initial - * value matching its virtual mapping - */ -static void __iomem *coherency_base = ARMADA_370_XP_REGS_VIRT_BASE + 0x20200; +unsigned long coherency_phys_base; +void __iomem *coherency_base; static void __iomem *coherency_cpu_base; +static void __iomem *cpu_config_base; /* Coherency fabric registers */ -#define COHERENCY_FABRIC_CFG_OFFSET 0x4 - #define IO_SYNC_BARRIER_CTL_OFFSET 0x0 -static struct of_device_id of_coherency_table[] = { - {.compatible = "marvell,coherency-fabric"}, +enum { + COHERENCY_FABRIC_TYPE_NONE, + COHERENCY_FABRIC_TYPE_ARMADA_370_XP, + COHERENCY_FABRIC_TYPE_ARMADA_375, + COHERENCY_FABRIC_TYPE_ARMADA_380, +}; + +static const struct of_device_id of_coherency_table[] = { + {.compatible = "marvell,coherency-fabric", + .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, + {.compatible = "marvell,armada-375-coherency-fabric", + .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 }, + {.compatible = "marvell,armada-380-coherency-fabric", + .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 }, { /* end of list */ }, }; -#ifdef CONFIG_SMP -int coherency_get_cpu_count(void) +/* Functions defined in coherency_ll.S */ +int ll_enable_coherency(void); +void ll_add_cpu_to_smp_group(void); + +#define CPU_CONFIG_SHARED_L2 BIT(16) + +/* + * Disable the "Shared L2 Present" bit in CPU Configuration register + * on Armada XP. + * + * The "Shared L2 Present" bit affects the "level of coherence" value + * in the clidr CP15 register. Cache operation functions such as + * "flush all" and "invalidate all" operate on all the cache levels + * that included in the defined level of coherence. When HW I/O + * coherency is used, this bit causes unnecessary flushes of the L2 + * cache. + */ +static void armada_xp_clear_shared_l2(void) { - int reg, cnt; + u32 reg; - reg = readl(coherency_base + COHERENCY_FABRIC_CFG_OFFSET); - cnt = (reg & 0xF) + 1; + if (!cpu_config_base) + return; - return cnt; + reg = readl(cpu_config_base); + reg &= ~CPU_CONFIG_SHARED_L2; + writel(reg, cpu_config_base); } -#endif - -/* Function defined in coherency_ll.S */ -int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id); -int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id) +static int mvebu_hwcc_notifier(struct notifier_block *nb, + unsigned long event, void *__dev) { - if (!coherency_base) { - pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id); - pr_warn("Coherency fabric is not initialized\n"); - return 1; - } + struct device *dev = __dev; - return ll_set_cpu_coherent(coherency_base, hw_cpu_id); -} + if (event != BUS_NOTIFY_ADD_DEVICE) + return NOTIFY_DONE; + set_dma_ops(dev, &arm_coherent_dma_ops); -static inline void mvebu_hwcc_sync_io_barrier(void) -{ - writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET); - while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1); + return NOTIFY_OK; } -static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - if (dir != DMA_TO_DEVICE) - mvebu_hwcc_sync_io_barrier(); - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} +static struct notifier_block mvebu_hwcc_nb = { + .notifier_call = mvebu_hwcc_notifier, +}; +static struct notifier_block mvebu_hwcc_pci_nb = { + .notifier_call = mvebu_hwcc_notifier, +}; -static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb, + unsigned long action, void *hcpu) { - if (dir != DMA_TO_DEVICE) - mvebu_hwcc_sync_io_barrier(); -} + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + armada_xp_clear_shared_l2(); -static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - if (dir != DMA_TO_DEVICE) - mvebu_hwcc_sync_io_barrier(); + return NOTIFY_OK; } -static struct dma_map_ops mvebu_hwcc_dma_ops = { - .alloc = arm_dma_alloc, - .free = arm_dma_free, - .mmap = arm_dma_mmap, - .map_page = mvebu_hwcc_dma_map_page, - .unmap_page = mvebu_hwcc_dma_unmap_page, - .get_sgtable = arm_dma_get_sgtable, - .map_sg = arm_dma_map_sg, - .unmap_sg = arm_dma_unmap_sg, - .sync_single_for_cpu = mvebu_hwcc_dma_sync, - .sync_single_for_device = mvebu_hwcc_dma_sync, - .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, - .sync_sg_for_device = arm_dma_sync_sg_for_device, - .set_dma_mask = arm_dma_set_mask, +static struct notifier_block armada_xp_clear_shared_l2_notifier = { + .notifier_call = armada_xp_clear_shared_l2_notifier_func, + .priority = 100, }; -static int mvebu_hwcc_platform_notifier(struct notifier_block *nb, - unsigned long event, void *__dev) +static void __init armada_370_coherency_init(struct device_node *np) { - struct device *dev = __dev; + struct resource res; + struct device_node *cpu_config_np; - if (event != BUS_NOTIFY_ADD_DEVICE) - return NOTIFY_DONE; - set_dma_ops(dev, &mvebu_hwcc_dma_ops); + of_address_to_resource(np, 0, &res); + coherency_phys_base = res.start; + /* + * Ensure secondary CPUs will see the updated value, + * which they read before they join the coherency + * fabric, and therefore before they are coherent with + * the boot CPU cache. + */ + sync_cache_w(&coherency_phys_base); + coherency_base = of_iomap(np, 0); + coherency_cpu_base = of_iomap(np, 1); + + cpu_config_np = of_find_compatible_node(NULL, NULL, + "marvell,armada-xp-cpu-config"); + if (!cpu_config_np) + goto exit; + + cpu_config_base = of_iomap(cpu_config_np, 0); + if (!cpu_config_base) { + of_node_put(cpu_config_np); + goto exit; + } - return NOTIFY_OK; -} + of_node_put(cpu_config_np); -static struct notifier_block mvebu_hwcc_platform_nb = { - .notifier_call = mvebu_hwcc_platform_notifier, -}; + register_cpu_notifier(&armada_xp_clear_shared_l2_notifier); + +exit: + set_cpu_coherent(); +} /* - * Keep track of whether we have IO hardware coherency enabled or not. - * On Armada 370's we will not be using it for example. We need to make - * that available [through coherency_available()] so the mbus controller - * doesn't enable the IO coherency bit in the attribute bits of the - * chip selects. + * This ioremap hook is used on Armada 375/38x to ensure that all MMIO + * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is + * needed for the HW I/O coherency mechanism to work properly without + * deadlock. */ -static int coherency_enabled; +static void __iomem * +armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, + unsigned int mtype, void *caller) +{ + mtype = MT_UNCACHED; + return __arm_ioremap_caller(phys_addr, size, mtype, caller); +} -int coherency_available(void) +static void __init armada_375_380_coherency_init(struct device_node *np) { - return coherency_enabled; + struct device_node *cache_dn; + + coherency_cpu_base = of_iomap(np, 0); + arch_ioremap_caller = armada_wa_ioremap_caller; + + /* + * We should switch the PL310 to I/O coherency mode only if + * I/O coherency is actually enabled. + */ + if (!coherency_available()) + return; + + /* + * Add the PL310 property "arm,io-coherent". This makes sure the + * outer sync operation is not used, which allows to + * workaround the system erratum that causes deadlocks when + * doing PCIe in an SMP situation on Armada 375 and Armada + * 38x. + */ + for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") { + struct property *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + p->name = kstrdup("arm,io-coherent", GFP_KERNEL); + of_add_property(cache_dn, p); + } } -int __init coherency_init(void) +static int coherency_type(void) { struct device_node *np; + const struct of_device_id *match; + int type; /* * The coherency fabric is needed: @@ -174,20 +230,82 @@ * SoCs except the Armada 370). Unfortunately, such decisions * are taken very early in the kernel boot process, at a point * where we don't know yet on which SoC we are running. + */ if (!is_smp()) - return 0; + return COHERENCY_FABRIC_TYPE_NONE; + + np = of_find_matching_node_and_match(NULL, of_coherency_table, &match); + if (!np) + return COHERENCY_FABRIC_TYPE_NONE; + + type = (int) match->data; + + of_node_put(np); + + return type; +} + +int set_cpu_coherent(void) +{ + int type = coherency_type(); + + if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) { + if (!coherency_base) { + pr_warn("Can't make current CPU cache coherent.\n"); + pr_warn("Coherency fabric is not initialized\n"); + return 1; + } + + armada_xp_clear_shared_l2(); + ll_add_cpu_to_smp_group(); + return ll_enable_coherency(); + } + + return 0; +} + +int coherency_available(void) +{ + return coherency_type() != COHERENCY_FABRIC_TYPE_NONE; +} + +int __init coherency_init(void) +{ + int type = coherency_type(); + struct device_node *np; np = of_find_matching_node(NULL, of_coherency_table); - if (np) { - pr_info("Initializing Coherency fabric\n"); - coherency_base = of_iomap(np, 0); - coherency_cpu_base = of_iomap(np, 1); - set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0); - coherency_enabled = 1; + + if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) + armada_370_coherency_init(np); + else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 || + type == COHERENCY_FABRIC_TYPE_ARMADA_380) + armada_375_380_coherency_init(np); + + of_node_put(np); + + return 0; +} + +static int __init coherency_late_init(void) +{ + if (coherency_available()) bus_register_notifier(&platform_bus_type, - &mvebu_hwcc_platform_nb); - } + &mvebu_hwcc_nb); + return 0; +} + +postcore_initcall(coherency_late_init); +#if IS_ENABLED(CONFIG_PCI) +static int __init coherency_pci_init(void) +{ + if (coherency_available()) + bus_register_notifier(&pci_bus_type, + &mvebu_hwcc_pci_nb); return 0; } + +arch_initcall(coherency_pci_init); +#endif