--- zzzz-none-000/linux-4.4.60/drivers/iommu/iommu.c 2017-04-08 07:53:53.000000000 +0000 +++ scorpion-7490-727/linux-4.4.60/drivers/iommu/iommu.c 2021-02-04 17:41:59.000000000 +0000 @@ -31,6 +31,7 @@ #include #include #include +#include #include static struct kset *iommu_group_kset; @@ -337,9 +338,9 @@ if (!domain || domain->type != IOMMU_DOMAIN_DMA) return 0; - BUG_ON(!domain->ops->pgsize_bitmap); + BUG_ON(!domain->pgsize_bitmap); - pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); + pg_size = 1UL << __ffs(domain->pgsize_bitmap); INIT_LIST_HEAD(&mappings); iommu_get_dm_regions(dev, &mappings); @@ -435,6 +436,7 @@ __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); @@ -560,6 +562,19 @@ EXPORT_SYMBOL_GPL(iommu_group_get); /** + * iommu_group_ref_get - Increment reference on a group + * @group: the group to use, must not be NULL + * + * This function is called by iommu drivers to take additional references on an + * existing group. Returns the given group for convenience. + */ +struct iommu_group *iommu_group_ref_get(struct iommu_group *group) +{ + kobject_get(group->devices_kobj); + return group; +} + +/** * iommu_group_put - Decrement group reference * @group: the group to use * @@ -914,8 +929,9 @@ * result in ADD/DEL notifiers to group->notifier */ if (action == BUS_NOTIFY_ADD_DEVICE) { - if (ops->add_device) - return ops->add_device(dev); + /*if (ops->add_device) + * return ops->add_device(dev); + */ } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { if (ops->remove_device && dev->iommu_group) { ops->remove_device(dev); @@ -1073,6 +1089,9 @@ domain->ops = bus->iommu_ops; domain->type = type; + /* Assume all sizes by default; the driver may override this later */ + domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN); return domain; } @@ -1097,8 +1116,14 @@ return -ENODEV; ret = domain->ops->attach_dev(domain, dev); - if (!ret) + if (!ret) { trace_attach_device_to_domain(dev); + + if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) { + strlcpy(domain->name, dev_name(dev), + IOMMU_DOMAIN_NAME_LEN); + } + } return ret; } @@ -1297,7 +1322,7 @@ pgsize = (1UL << (pgsize_idx + 1)) - 1; /* throw away page sizes not supported by the hardware */ - pgsize &= domain->ops->pgsize_bitmap; + pgsize &= domain->pgsize_bitmap; /* make sure we're still sane */ BUG_ON(!pgsize); @@ -1315,17 +1340,18 @@ unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; + phys_addr_t orig_paddr = paddr; int ret = 0; if (unlikely(domain->ops->map == NULL || - domain->ops->pgsize_bitmap == 0UL)) + domain->pgsize_bitmap == 0UL)) return -ENODEV; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * both the virtual address and the physical one, as well as @@ -1359,7 +1385,7 @@ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else - trace_map(orig_iova, paddr, orig_size); + trace_map(domain, orig_iova, orig_paddr, orig_size, prot); return ret; } @@ -1372,14 +1398,14 @@ unsigned long orig_iova = iova; if (unlikely(domain->ops->unmap == NULL || - domain->ops->pgsize_bitmap == 0UL)) + domain->pgsize_bitmap == 0UL)) return -ENODEV; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be @@ -1412,11 +1438,23 @@ unmapped += unmapped_page; } - trace_unmap(orig_iova, size, unmapped); + trace_unmap(domain, orig_iova, size, unmapped); return unmapped; } EXPORT_SYMBOL_GPL(iommu_unmap); +size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + size_t mapped; + + mapped = domain->ops->map_sg(domain, iova, sg, nents, prot); + trace_map_sg(domain, iova, mapped, prot); + return mapped; +} +EXPORT_SYMBOL(iommu_map_sg); + size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { @@ -1425,10 +1463,10 @@ unsigned int i, min_pagesz; int ret; - if (unlikely(domain->ops->pgsize_bitmap == 0UL)) + if (unlikely(domain->pgsize_bitmap == 0UL)) return 0; - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); for_each_sg(sg, s, nents, i) { phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; @@ -1480,6 +1518,47 @@ } EXPORT_SYMBOL_GPL(iommu_domain_window_disable); +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +int report_iommu_fault(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); + + trace_io_page_fault(dev, iova, flags); + return ret; +} + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", @@ -1509,7 +1588,7 @@ break; case DOMAIN_ATTR_PAGING: paging = data; - *paging = (domain->ops->pgsize_bitmap != 0UL); + *paging = (domain->pgsize_bitmap != 0UL); break; case DOMAIN_ATTR_WINDOWS: count = data; @@ -1626,3 +1705,60 @@ return ret; } + +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) + return ops == fwspec->ops ? 0 : -EINVAL; + + fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + + of_node_get(to_of_node(iommu_fwnode)); + fwspec->iommu_fwnode = iommu_fwnode; + fwspec->ops = ops; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_init); + +void iommu_fwspec_free(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) { + fwnode_handle_put(fwspec->iommu_fwnode); + kfree(fwspec); + dev->iommu_fwspec = NULL; + } +} +EXPORT_SYMBOL_GPL(iommu_fwspec_free); + +int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + size_t size; + int i; + + if (!fwspec) + return -EINVAL; + + size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); + if (size > sizeof(*fwspec)) { + fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + } + + for (i = 0; i < num_ids; i++) + fwspec->ids[fwspec->num_ids + i] = ids[i]; + + fwspec->num_ids += num_ids; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);