--- zzzz-none-000/linux-3.10.107/drivers/of/address.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/of/address.c 2021-02-04 17:41:59.000000000 +0000 @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include /* Max address size we deal with */ @@ -91,7 +93,7 @@ return IORESOURCE_MEM; } -#ifdef CONFIG_PCI +#ifdef CONFIG_OF_ADDRESS_PCI /* * PCI bus specific translator */ @@ -166,7 +168,9 @@ { return of_bus_default_translate(addr + 1, offset, na - 1); } +#endif /* CONFIG_OF_ADDRESS_PCI */ +#ifdef CONFIG_PCI const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags) { @@ -224,6 +228,124 @@ return __of_address_to_resource(dev, addrp, size, flags, NULL, r); } EXPORT_SYMBOL_GPL(of_pci_address_to_resource); + +int of_pci_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "ranges", &rlen); + if (parser->range == NULL) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + + return 0; +} +EXPORT_SYMBOL_GPL(of_pci_range_parser_init); + +struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, + struct of_pci_range *range) +{ + const int na = 3, ns = 2; + + if (!range) + return NULL; + + if (!parser->range || parser->range + parser->np > parser->end) + return NULL; + + range->pci_space = parser->range[0]; + range->flags = of_bus_pci_get_flags(parser->range); + range->pci_addr = of_read_number(parser->range + 1, ns); + range->cpu_addr = of_translate_address(parser->node, + parser->range + na); + range->size = of_read_number(parser->range + parser->pna + na, ns); + + parser->range += parser->np; + + /* Now consume following elements while they are contiguous */ + while (parser->range + parser->np <= parser->end) { + u32 flags, pci_space; + u64 pci_addr, cpu_addr, size; + + pci_space = be32_to_cpup(parser->range); + flags = of_bus_pci_get_flags(parser->range); + pci_addr = of_read_number(parser->range + 1, ns); + cpu_addr = of_translate_address(parser->node, + parser->range + na); + size = of_read_number(parser->range + parser->pna + na, ns); + + if (flags != range->flags) + break; + if (pci_addr != range->pci_addr + range->size || + cpu_addr != range->cpu_addr + range->size) + break; + + range->size += size; + parser->range += parser->np; + } + + return range; +} +EXPORT_SYMBOL_GPL(of_pci_range_parser_one); + +/* + * of_pci_range_to_resource - Create a resource from an of_pci_range + * @range: the PCI range that describes the resource + * @np: device node where the range belongs to + * @res: pointer to a valid resource that will be updated to + * reflect the values contained in the range. + * + * Returns EINVAL if the range cannot be converted to resource. + * + * Note that if the range is an IO range, the resource will be converted + * using pci_address_to_pio() which can fail if it is called too early or + * if the range cannot be matched to any host bridge IO space (our case here). + * To guard against that we try to register the IO range first. + * If that fails we know that pci_address_to_pio() will do too. + */ +int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, struct resource *res) +{ + int err; + res->flags = range->flags; + res->parent = res->child = res->sibling = NULL; + res->name = np->full_name; + + if (res->flags & IORESOURCE_IO) { + unsigned long port; + err = pci_register_io_range(range->cpu_addr, range->size); + if (err) + goto invalid_range; + port = pci_address_to_pio(range->cpu_addr); + if (port == (unsigned long)-1) { + err = -EINVAL; + goto invalid_range; + } + res->start = port; + } else { + if ((sizeof(resource_size_t) < 8) && + upper_32_bits(range->cpu_addr)) { + err = -EINVAL; + goto invalid_range; + } + + res->start = range->cpu_addr; + } + res->end = res->start + range->size - 1; + return 0; + +invalid_range: + res->start = (resource_size_t)OF_BAD_ADDR; + res->end = (resource_size_t)OF_BAD_ADDR; + return err; +} #endif /* CONFIG_PCI */ /* @@ -289,7 +411,7 @@ */ static struct of_bus of_busses[] = { -#ifdef CONFIG_PCI +#ifdef CONFIG_OF_ADDRESS_PCI /* PCI */ { .name = "pci", @@ -300,7 +422,7 @@ .translate = of_bus_pci_translate, .get_flags = of_bus_pci_get_flags, }, -#endif /* CONFIG_PCI */ +#endif /* CONFIG_OF_ADDRESS_PCI */ /* ISA */ { .name = "isa", @@ -334,12 +456,17 @@ return NULL; } -static int of_empty_ranges_quirk(void) +static int of_empty_ranges_quirk(struct device_node *np) { if (IS_ENABLED(CONFIG_PPC)) { - /* To save cycles, we cache the result */ + /* To save cycles, we cache the result for global "Mac" setting */ static int quirk_state = -1; + /* PA-SEMI sdc DT bug */ + if (of_device_is_compatible(np, "1682m-sdc")) + return true; + + /* Make quirk cached */ if (quirk_state < 0) quirk_state = of_machine_is_compatible("Power Macintosh") || @@ -358,9 +485,10 @@ int rone; u64 offset = OF_BAD_ADDR; - /* Normally, an absence of a "ranges" property means we are + /* + * Normally, an absence of a "ranges" property means we are * crossing a non-translatable boundary, and thus the addresses - * below the current not cannot be converted to CPU physical ones. + * below the current cannot be converted to CPU physical ones. * Unfortunately, while this is very clear in the spec, it's not * what Apple understood, and they do have things like /uni-n or * /ht nodes with no "ranges" property and a lot of perfectly @@ -374,8 +502,8 @@ * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); - if (ranges == NULL && !of_empty_ranges_quirk()) { - pr_err("OF: no ranges; cannot translate\n"); + if (ranges == NULL && !of_empty_ranges_quirk(parent)) { + pr_debug("OF: no ranges; cannot translate\n"); return 1; } if (ranges == NULL || rlen == 0) { @@ -428,7 +556,7 @@ int na, ns, pna, pns; u64 result = OF_BAD_ADDR; - pr_debug("OF: ** translation for device %s **\n", dev->full_name); + pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev)); /* Increase refcount at current level */ of_node_get(dev); @@ -442,14 +570,13 @@ /* Count address cells & copy address locally */ bus->count_cells(dev, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { - printk(KERN_ERR "prom_parse: Bad cell count for %s\n", - dev->full_name); + pr_debug("OF: Bad cell count for %s\n", of_node_full_name(dev)); goto bail; } memcpy(addr, in_addr, na * 4); pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n", - bus->name, na, ns, parent->full_name); + bus->name, na, ns, of_node_full_name(parent)); of_dump_addr("OF: translating address:", addr, na); /* Translate */ @@ -471,12 +598,12 @@ pbus->count_cells(dev, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { printk(KERN_ERR "prom_parse: Bad cell count for %s\n", - dev->full_name); + of_node_full_name(dev)); break; } pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n", - pbus->name, pna, pns, parent->full_name); + pbus->name, pna, pns, of_node_full_name(parent)); /* Apply bus translation */ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) @@ -508,25 +635,6 @@ } EXPORT_SYMBOL(of_translate_dma_address); -bool of_can_translate_address(struct device_node *dev) -{ - struct device_node *parent; - struct of_bus *bus; - int na, ns; - - parent = of_get_parent(dev); - if (parent == NULL) - return false; - - bus = of_match_bus(parent); - bus->count_cells(dev, &na, &ns); - - of_node_put(parent); - - return OF_CHECK_COUNTS(na, ns); -} -EXPORT_SYMBOL(of_can_translate_address); - const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { @@ -565,6 +673,121 @@ } EXPORT_SYMBOL(of_get_address); +#ifdef PCI_IOBASE +struct io_range { + struct list_head list; + phys_addr_t start; + resource_size_t size; +}; + +static LIST_HEAD(io_range_list); +static DEFINE_SPINLOCK(io_range_lock); +#endif + +/* + * Record the PCI IO range (expressed as CPU physical address + size). + * Return a negative value if an error has occured, zero otherwise + */ +int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size) +{ + int err = 0; + +#ifdef PCI_IOBASE + struct io_range *range; + resource_size_t allocated_size = 0; + + /* check if the range hasn't been previously recorded */ + spin_lock(&io_range_lock); + list_for_each_entry(range, &io_range_list, list) { + if (addr >= range->start && addr + size <= range->start + size) { + /* range already registered, bail out */ + goto end_register; + } + allocated_size += range->size; + } + + /* range not registed yet, check for available space */ + if (allocated_size + size - 1 > IO_SPACE_LIMIT) { + /* if it's too big check if 64K space can be reserved */ + if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) { + err = -E2BIG; + goto end_register; + } + + size = SZ_64K; + pr_warn("Requested IO range too big, new size set to 64K\n"); + } + + /* add the range to the list */ + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) { + err = -ENOMEM; + goto end_register; + } + + range->start = addr; + range->size = size; + + list_add_tail(&range->list, &io_range_list); + +end_register: + spin_unlock(&io_range_lock); +#endif + + return err; +} + +phys_addr_t pci_pio_to_address(unsigned long pio) +{ + phys_addr_t address = (phys_addr_t)OF_BAD_ADDR; + +#ifdef PCI_IOBASE + struct io_range *range; + resource_size_t allocated_size = 0; + + if (pio > IO_SPACE_LIMIT) + return address; + + spin_lock(&io_range_lock); + list_for_each_entry(range, &io_range_list, list) { + if (pio >= allocated_size && pio < allocated_size + range->size) { + address = range->start + pio - allocated_size; + break; + } + allocated_size += range->size; + } + spin_unlock(&io_range_lock); +#endif + + return address; +} + +unsigned long __weak pci_address_to_pio(phys_addr_t address) +{ +#ifdef PCI_IOBASE + struct io_range *res; + resource_size_t offset = 0; + unsigned long addr = -1; + + spin_lock(&io_range_lock); + list_for_each_entry(res, &io_range_list, list) { + if (address >= res->start && address < res->start + res->size) { + addr = address - res->start + offset; + break; + } + offset += res->size; + } + spin_unlock(&io_range_lock); + + return addr; +#else + if (address > IO_SPACE_LIMIT) + return (unsigned long)-1; + + return (unsigned long) address; +#endif +} + static int __of_address_to_resource(struct device_node *dev, const __be32 *addrp, u64 size, unsigned int flags, const char *name, struct resource *r) @@ -657,3 +880,149 @@ return ioremap(res.start, resource_size(&res)); } EXPORT_SYMBOL(of_iomap); + +/* + * of_io_request_and_map - Requests a resource and maps the memory mapped IO + * for a given device_node + * @device: the device whose io range will be mapped + * @index: index of the io range + * @name: name of the resource + * + * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded + * error code on failure. Usage example: + * + * base = of_io_request_and_map(node, 0, "foo"); + * if (IS_ERR(base)) + * return PTR_ERR(base); + */ +void __iomem *of_io_request_and_map(struct device_node *np, int index, + const char *name) +{ + struct resource res; + void __iomem *mem; + + if (of_address_to_resource(np, index, &res)) + return IOMEM_ERR_PTR(-EINVAL); + + if (!request_mem_region(res.start, resource_size(&res), name)) + return IOMEM_ERR_PTR(-EBUSY); + + mem = ioremap(res.start, resource_size(&res)); + if (!mem) { + release_mem_region(res.start, resource_size(&res)); + return IOMEM_ERR_PTR(-ENOMEM); + } + + return mem; +} +EXPORT_SYMBOL(of_io_request_and_map); + +/** + * of_dma_get_range - Get DMA range info + * @np: device node to get DMA range info + * @dma_addr: pointer to store initial DMA address of DMA range + * @paddr: pointer to store initial CPU address of DMA range + * @size: pointer to store size of DMA range + * + * Look in bottom up direction for the first "dma-ranges" property + * and parse it. + * dma-ranges format: + * DMA addr (dma_addr) : naddr cells + * CPU addr (phys_addr_t) : pna cells + * size : nsize cells + * + * It returns -ENODEV if "dma-ranges" property was not found + * for this device in DT. + */ +int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) +{ + struct device_node *node = of_node_get(np); + const __be32 *ranges = NULL; + int len, naddr, nsize, pna; + int ret = 0; + u64 dmaaddr; + + if (!node) + return -EINVAL; + + while (1) { + naddr = of_n_addr_cells(node); + nsize = of_n_size_cells(node); + node = of_get_next_parent(node); + if (!node) + break; + + ranges = of_get_property(node, "dma-ranges", &len); + + /* Ignore empty ranges, they imply no translation required */ + if (ranges && len > 0) + break; + + /* + * At least empty ranges has to be defined for parent node if + * DMA is supported + */ + if (!ranges) + break; + } + + if (!ranges) { + pr_debug("%s: no dma-ranges found for node(%s)\n", + __func__, np->full_name); + ret = -ENODEV; + goto out; + } + + len /= sizeof(u32); + + pna = of_n_addr_cells(node); + + /* dma-ranges format: + * DMA addr : naddr cells + * CPU addr : pna cells + * size : nsize cells + */ + dmaaddr = of_read_number(ranges, naddr); + *paddr = of_translate_dma_address(np, ranges); + if (*paddr == OF_BAD_ADDR) { + pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n", + __func__, dma_addr, np->full_name); + ret = -EINVAL; + goto out; + } + *dma_addr = dmaaddr; + + *size = of_read_number(ranges + naddr + pna, nsize); + + pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", + *dma_addr, *paddr, *size); + +out: + of_node_put(node); + + return ret; +} +EXPORT_SYMBOL_GPL(of_dma_get_range); + +/** + * of_dma_is_coherent - Check if device is coherent + * @np: device node + * + * It returns true if "dma-coherent" property was found + * for this device in DT. + */ +bool of_dma_is_coherent(struct device_node *np) +{ + struct device_node *node = of_node_get(np); + + while (node) { + if (of_property_read_bool(node, "dma-coherent")) { + of_node_put(node); + return true; + } + node = of_get_next_parent(node); + } + of_node_put(node); + return false; +} +EXPORT_SYMBOL_GPL(of_dma_is_coherent);