--- zzzz-none-000/linux-3.10.107/drivers/base/dma-contiguous.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/base/dma-contiguous.c 2021-02-04 17:41:59.000000000 +0000 @@ -24,22 +24,9 @@ #include #include -#include -#include -#include #include -#include -#include -#include #include - -struct cma { - unsigned long base_pfn; - unsigned long count; - unsigned long *bitmap; -}; - -struct cma *dma_contiguous_default_area; +#include #ifdef CONFIG_CMA_SIZE_MBYTES #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES @@ -47,9 +34,11 @@ #define CMA_SIZE_MBYTES 0 #endif +struct cma *dma_contiguous_default_area; + /* * Default global CMA area size can be defined in kernel's .config. - * This is usefull mainly for distro maintainers to create a kernel + * This is useful mainly for distro maintainers to create a kernel * that works correctly for most supported systems. * The size can be set in bytes or as a percentage of the total memory * in the system. @@ -57,13 +46,24 @@ * Users, who want to set the size of global CMA area for their system * should use cma= kernel parameter. */ -static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; +static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline; static int __init early_cma(char *p) { pr_debug("%s(%s)\n", __func__, p); size_cmdline = memparse(p, &p); + if (*p != '@') + return 0; + base_cmdline = memparse(p + 1, &p); + if (*p != '-') { + limit_cmdline = base_cmdline + size_cmdline; + return 0; + } + limit_cmdline = memparse(p + 1, &p); + return 0; } early_param("cma", early_cma); @@ -96,7 +96,7 @@ #endif /** - * dma_contiguous_reserve() - reserve area for contiguous memory handling + * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be @@ -107,11 +107,18 @@ void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; + phys_addr_t selected_base = 0; + phys_addr_t selected_limit = limit; + bool fixed = false; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { selected_size = size_cmdline; + selected_base = base_cmdline; + selected_limit = min_not_zero(limit_cmdline, limit); + if (base_cmdline + size_cmdline == limit_cmdline) + fixed = true; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES selected_size = size_bytes; @@ -124,172 +131,49 @@ #endif } - if (selected_size) { + if (selected_size && !dma_contiguous_default_area) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_declare_contiguous(NULL, selected_size, 0, limit); + dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); } -}; - -static DEFINE_MUTEX(cma_mutex); - -static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) -{ - unsigned long pfn = base_pfn; - unsigned i = count >> pageblock_order; - struct zone *zone; - - WARN_ON_ONCE(!pfn_valid(pfn)); - zone = page_zone(pfn_to_page(pfn)); - - do { - unsigned j; - base_pfn = pfn; - for (j = pageblock_nr_pages; j; --j, pfn++) { - WARN_ON_ONCE(!pfn_valid(pfn)); - if (page_zone(pfn_to_page(pfn)) != zone) - return -EINVAL; - } - init_cma_reserved_pageblock(pfn_to_page(base_pfn)); - } while (--i); - return 0; } -static __init struct cma *cma_create_area(unsigned long base_pfn, - unsigned long count) -{ - int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); - struct cma *cma; - int ret = -ENOMEM; - - pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); - - cma = kmalloc(sizeof *cma, GFP_KERNEL); - if (!cma) - return ERR_PTR(-ENOMEM); - - cma->base_pfn = base_pfn; - cma->count = count; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - - if (!cma->bitmap) - goto no_mem; - - ret = cma_activate_area(base_pfn, count); - if (ret) - goto error; - - pr_debug("%s: returned %p\n", __func__, (void *)cma); - return cma; - -error: - kfree(cma->bitmap); -no_mem: - kfree(cma); - return ERR_PTR(ret); -} - -static struct cma_reserved { - phys_addr_t start; - unsigned long size; - struct device *dev; -} cma_reserved[MAX_CMA_AREAS] __initdata; -static unsigned cma_reserved_count __initdata; - -static int __init cma_init_reserved_areas(void) -{ - struct cma_reserved *r = cma_reserved; - unsigned i = cma_reserved_count; - - pr_debug("%s()\n", __func__); - - for (; i; --i, ++r) { - struct cma *cma; - cma = cma_create_area(PFN_DOWN(r->start), - r->size >> PAGE_SHIFT); - if (!IS_ERR(cma)) - dev_set_cma_area(r->dev, cma); - } - return 0; -} -core_initcall(cma_init_reserved_areas); - /** - * dma_declare_contiguous() - reserve area for contiguous memory handling - * for particular device - * @dev: Pointer to device structure. - * @size: Size of the reserved memory. - * @base: Start address of the reserved memory (optional, 0 for any). + * dma_contiguous_reserve_area() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. * - * This function reserves memory for specified device. It should be - * called by board specific code when early allocator (memblock or bootmem) - * is still activate. + * If @fixed is true, reserve contiguous area at exactly @base. If false, + * reserve in range from @base to @limit. */ -int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, - phys_addr_t base, phys_addr_t limit) +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed) { - struct cma_reserved *r = &cma_reserved[cma_reserved_count]; - phys_addr_t alignment; - - pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, - (unsigned long)size, (unsigned long)base, - (unsigned long)limit); - - /* Sanity checks */ - if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - - if (!size) - return -EINVAL; - - /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); - base = ALIGN(base, alignment); - size = ALIGN(size, alignment); - limit &= ~(alignment - 1); - - /* Reserve memory */ - if (base) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - base = -EBUSY; - goto err; - } - } else { - /* - * Use __memblock_alloc_base() since - * memblock_alloc_base() panic()s. - */ - phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); - if (!addr) { - base = -ENOMEM; - goto err; - } else { - base = addr; - } - } + int ret; - /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. - */ - r->start = base; - r->size = size; - r->dev = dev; - cma_reserved_count++; - pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, - (unsigned long)base); + ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); + if (ret) + return ret; /* Architecture specific contiguous memory fixup. */ - dma_contiguous_early_fixup(base, size); + dma_contiguous_early_fixup(cma_get_base(*res_cma), + cma_get_size(*res_cma)); + return 0; -err: - pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); - return base; } /** @@ -300,57 +184,16 @@ * * This function allocates memory buffer for specified device. It uses * device specific contiguous memory area if available or the default - * global one. Requires architecture specific get_dev_cma_area() helper + * global one. Requires architecture specific dev_get_cma_area() helper * function. */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int align) { - unsigned long mask, pfn, pageno, start = 0; - struct cma *cma = dev_get_cma_area(dev); - struct page *page = NULL; - int ret; - - if (!cma || !cma->count) - return NULL; - if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, - count, align); - - if (!count) - return NULL; - - mask = (1 << align) - 1; - - mutex_lock(&cma_mutex); - - for (;;) { - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, - start, count, mask); - if (pageno >= cma->count) - break; - - pfn = cma->base_pfn + pageno; - ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); - if (ret == 0) { - bitmap_set(cma->bitmap, pageno, count); - page = pfn_to_page(pfn); - break; - } else if (ret != -EBUSY) { - break; - } - pr_debug("%s(): memory range at %p is busy, retrying\n", - __func__, pfn_to_page(pfn)); - /* try again with a bit different memory target */ - start = pageno + mask + 1; - } - - mutex_unlock(&cma_mutex); - pr_debug("%s(): returned %p\n", __func__, page); - return page; + return cma_alloc(dev_get_cma_area(dev), count, align); } /** @@ -366,25 +209,72 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { - struct cma *cma = dev_get_cma_area(dev); - unsigned long pfn; + return cma_release(dev_get_cma_area(dev), pages, count); +} + +/* + * Support for reserved memory regions defined in device tree + */ +#ifdef CONFIG_OF_RESERVED_MEM +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) fmt + +static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) +{ + dev_set_cma_area(dev, rmem->priv); + return 0; +} + +static void rmem_cma_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + dev_set_cma_area(dev, NULL); +} + +static const struct reserved_mem_ops rmem_cma_ops = { + .device_init = rmem_cma_device_init, + .device_release = rmem_cma_device_release, +}; + +static int __init rmem_cma_setup(struct reserved_mem *rmem) +{ + phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + phys_addr_t mask = align - 1; + unsigned long node = rmem->fdt_node; + struct cma *cma; + int err; - if (!cma || !pages) - return false; + if (!of_get_flat_dt_prop(node, "reusable", NULL) || + of_get_flat_dt_prop(node, "no-map", NULL)) + return -EINVAL; - pr_debug("%s(page %p)\n", __func__, (void *)pages); + if ((rmem->base & mask) || (rmem->size & mask)) { + pr_err("Reserved memory: incorrect alignment of CMA region\n"); + return -EINVAL; + } - pfn = page_to_pfn(pages); + err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma); + if (err) { + pr_err("Reserved memory: unable to setup CMA region\n"); + return err; + } + /* Architecture specific contiguous memory fixup. */ + dma_contiguous_early_fixup(rmem->base, rmem->size); - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) - return false; + if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) + dma_contiguous_set_default(cma); - VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); + rmem->ops = &rmem_cma_ops; + rmem->priv = cma; - mutex_lock(&cma_mutex); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); - free_contig_range(pfn, count); - mutex_unlock(&cma_mutex); + pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); - return true; + return 0; } +RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); +#endif