--- zzzz-none-000/linux-4.4.271/drivers/iommu/io-pgtable-arm.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/drivers/iommu/io-pgtable-arm.c 2023-04-19 10:22:28.000000000 +0000 @@ -22,9 +22,11 @@ #include #include +#include #include #include #include +#include #include @@ -58,8 +60,10 @@ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ * (d)->bits_per_level) + (d)->pg_shift) +#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) + #define ARM_LPAE_PAGES_PER_PGD(d) \ - DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) + DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) /* * Calculate the index at level l used to map virtual address a using the @@ -169,7 +173,7 @@ /* IOPTE accessors */ #define iopte_deref(pte,d) \ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ - & ~((1ULL << (d)->pg_shift) - 1))) + & ~(ARM_LPAE_GRANULE(d) - 1ULL))) #define iopte_type(pte,l) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) @@ -326,7 +330,7 @@ /* Grab a pointer to the next level */ pte = *ptep; if (!pte) { - cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift, + cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), GFP_ATOMIC, cfg); if (!cptep) return -ENOMEM; @@ -403,6 +407,63 @@ return ret; } +static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int iommu_prot, size_t *size) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte *ptep = data->pgd; + int lvl = ARM_LPAE_START_LVL(data); + arm_lpae_iopte prot; + struct scatterlist *s; + size_t mapped = 0; + int i, ret; + unsigned int min_pagesz; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + goto out_err; + + prot = arm_lpae_prot_to_pte(data, iommu_prot); + + min_pagesz = 1 << __ffs(data->iop.cfg.pgsize_bitmap); + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + size_t size = s->length; + + /* + * We are mapping on IOMMU page boundaries, so offset within + * the page must be 0. However, the IOMMU may support pages + * smaller than PAGE_SIZE, so s->offset may still represent + * an offset of that boundary within the CPU page. + */ + if (!IS_ALIGNED(s->offset, min_pagesz)) + goto out_err; + + while (size) { + size_t pgsize = iommu_pgsize( + data->iop.cfg.pgsize_bitmap, iova | phys, size); + ret = __arm_lpae_map(data, iova, phys, pgsize, prot, + lvl, ptep); + if (ret) + goto out_err; + + iova += pgsize; + mapped += pgsize; + phys += pgsize; + size -= pgsize; + } + } + + return mapped; + +out_err: + /* Return the size of the partial mapping so that they can be undone */ + *size = mapped; + return 0; +} + static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *ptep) { @@ -412,7 +473,7 @@ if (lvl == ARM_LPAE_START_LVL(data)) table_size = data->pgd_size; else - table_size = 1UL << data->pg_shift; + table_size = ARM_LPAE_GRANULE(data); start = ptep; @@ -478,7 +539,7 @@ __arm_lpae_set_pte(ptep, table, cfg); iova &= ~(blk_size - 1); - cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie); + cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); return size; } @@ -504,12 +565,13 @@ if (!iopte_leaf(pte, lvl)) { /* Also flush any partial walks */ - tlb->tlb_add_flush(iova, size, false, cookie); + tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data), + false, cookie); tlb->tlb_sync(cookie); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); } else { - tlb->tlb_add_flush(iova, size, true, cookie); + tlb->tlb_add_flush(iova, size, size, true, cookie); } return size; @@ -528,7 +590,7 @@ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); } -static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, +static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { size_t unmapped; @@ -575,7 +637,7 @@ return 0; found_translation: - iova &= ((1 << data->pg_shift) - 1); + iova &= (ARM_LPAE_GRANULE(data) - 1); return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; } @@ -652,6 +714,7 @@ data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, + .map_sg = arm_lpae_map_sg, .unmap = arm_lpae_unmap, .iova_to_phys = arm_lpae_iova_to_phys, }; @@ -673,7 +736,7 @@ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); - switch (1 << data->pg_shift) { + switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: reg |= ARM_LPAE_TCR_TG0_4K; break; @@ -774,7 +837,7 @@ sl = ARM_LPAE_START_LVL(data); - switch (1 << data->pg_shift) { + switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: reg |= ARM_LPAE_TCR_TG0_4K; sl++; /* SL0 format is different for 4K granule size */ @@ -894,8 +957,8 @@ WARN_ON(cookie != cfg_cookie); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, - void *cookie) +static void dummy_tlb_add_flush(unsigned long iova, size_t size, + size_t granule, bool leaf, void *cookie) { WARN_ON(cookie != cfg_cookie); WARN_ON(!(size & cfg_cookie->pgsize_bitmap));