--- zzzz-none-000/linux-4.4.271/drivers/iommu/iommu.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/drivers/iommu/iommu.c 2023-04-19 10:22:28.000000000 +0000 @@ -31,6 +31,7 @@ #include #include #include +#include #include static struct kset *iommu_group_kset; @@ -68,6 +69,58 @@ const char *buf, size_t count); }; +static DEFINE_MUTEX(iommu_debug_lock); +static LIST_HEAD(iommu_debug_attachments); + +/* + * Used by debug tools to display the name of the device(s) associated + * with a particular domain. + */ +struct iommu_debug_attachment { + struct iommu_domain *domain; + struct iommu_group *group; + struct list_head list; +}; + +void iommu_debug_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct iommu_debug_attachment *attach; + struct iommu_group *group; + + group = iommu_group_get(dev); + if (!group) + return; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return; + + attach->domain = domain; + attach->group = group; + INIT_LIST_HEAD(&attach->list); + + mutex_lock(&iommu_debug_lock); + list_add(&attach->list, &iommu_debug_attachments); + mutex_unlock(&iommu_debug_lock); +} + +void iommu_debug_domain_remove(struct iommu_domain *domain) +{ + struct iommu_debug_attachment *it, *tmp; + + mutex_lock(&iommu_debug_lock); + list_for_each_entry_safe(it, tmp, &iommu_debug_attachments, list) { + if (it->domain != domain) + continue; + list_del(&it->list); + iommu_group_put(it->group); + kfree(it); + } + + mutex_unlock(&iommu_debug_lock); +} + #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) @@ -130,7 +183,7 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) { - return sprintf(buf, "%s\n", group->name); + return snprintf(buf, PAGE_SIZE, "%s\n", group->name); } static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); @@ -337,9 +390,9 @@ if (!domain || domain->type != IOMMU_DOMAIN_DMA) return 0; - BUG_ON(!domain->ops->pgsize_bitmap); + BUG_ON(!domain->pgsize_bitmap); - pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); + pg_size = 1UL << __ffs(domain->pgsize_bitmap); INIT_LIST_HEAD(&mappings); iommu_get_dm_regions(dev, &mappings); @@ -431,6 +484,7 @@ if (ret) goto err_put_group; + /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); @@ -572,6 +626,19 @@ EXPORT_SYMBOL_GPL(iommu_group_get); /** + * iommu_group_ref_get - Increment reference on a group + * @group: the group to use, must not be NULL + * + * This function is called by iommu drivers to take additional references on an + * existing group. Returns the given group for convenience. + */ +struct iommu_group *iommu_group_ref_get(struct iommu_group *group) +{ + kobject_get(group->devices_kobj); + return group; +} + +/** * iommu_group_put - Decrement group reference * @group: the group to use * @@ -1085,6 +1152,9 @@ domain->ops = bus->iommu_ops; domain->type = type; + /* Assume all sizes by default; the driver may override this later */ + domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN); return domain; } @@ -1109,8 +1179,15 @@ return -ENODEV; ret = domain->ops->attach_dev(domain, dev); - if (!ret) + if (!ret) { trace_attach_device_to_domain(dev); + iommu_debug_attach_device(domain, dev); + + if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) { + strlcpy(domain->name, dev_name(dev), + IOMMU_DOMAIN_NAME_LEN); + } + } return ret; } @@ -1146,6 +1223,8 @@ static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { + iommu_debug_domain_remove(domain); + if (unlikely(domain->ops->detach_dev == NULL)) return; @@ -1289,8 +1368,8 @@ } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); -static size_t iommu_pgsize(struct iommu_domain *domain, - unsigned long addr_merge, size_t size) +size_t iommu_pgsize(unsigned long pgsize_bitmap, + unsigned long addr_merge, size_t size) { unsigned int pgsize_idx; size_t pgsize; @@ -1309,7 +1388,7 @@ pgsize = (1UL << (pgsize_idx + 1)) - 1; /* throw away page sizes not supported by the hardware */ - pgsize &= domain->ops->pgsize_bitmap; + pgsize &= pgsize_bitmap; /* make sure we're still sane */ BUG_ON(!pgsize); @@ -1327,17 +1406,18 @@ unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; + phys_addr_t orig_paddr = paddr; int ret = 0; if (unlikely(domain->ops->map == NULL || - domain->ops->pgsize_bitmap == 0UL)) + domain->pgsize_bitmap == 0UL)) return -ENODEV; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * both the virtual address and the physical one, as well as @@ -1353,7 +1433,8 @@ pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { - size_t pgsize = iommu_pgsize(domain, iova | paddr, size); + size_t pgsize = iommu_pgsize(domain->pgsize_bitmap, + iova | paddr, size); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); @@ -1371,7 +1452,7 @@ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else - trace_map(orig_iova, paddr, orig_size); + trace_map(domain, orig_iova, orig_paddr, orig_size, prot); return ret; } @@ -1384,14 +1465,14 @@ unsigned long orig_iova = iova; if (unlikely(domain->ops->unmap == NULL || - domain->ops->pgsize_bitmap == 0UL)) + domain->pgsize_bitmap == 0UL)) return -ENODEV; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be @@ -1411,7 +1492,8 @@ * or we hit an area that isn't mapped. */ while (unmapped < size) { - size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); + size_t pgsize = iommu_pgsize(domain->pgsize_bitmap, + iova, size - unmapped); unmapped_page = domain->ops->unmap(domain, iova, pgsize); if (!unmapped_page) @@ -1424,11 +1506,23 @@ unmapped += unmapped_page; } - trace_unmap(orig_iova, size, unmapped); + trace_unmap(domain, orig_iova, size, unmapped); return unmapped; } EXPORT_SYMBOL_GPL(iommu_unmap); +size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + size_t mapped; + + mapped = domain->ops->map_sg(domain, iova, sg, nents, prot); + trace_map_sg(domain, iova, mapped, prot); + return mapped; +} +EXPORT_SYMBOL(iommu_map_sg); + size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { @@ -1437,10 +1531,10 @@ unsigned int i, min_pagesz; int ret; - if (unlikely(domain->ops->pgsize_bitmap == 0UL)) + if (unlikely(domain->pgsize_bitmap == 0UL)) return 0; - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); for_each_sg(sg, s, nents, i) { phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; @@ -1492,6 +1586,47 @@ } EXPORT_SYMBOL_GPL(iommu_domain_window_disable); +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +int report_iommu_fault(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); + + trace_io_page_fault(dev, iova, flags); + return ret; +} + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", @@ -1521,7 +1656,7 @@ break; case DOMAIN_ATTR_PAGING: paging = data; - *paging = (domain->ops->pgsize_bitmap != 0UL); + *paging = (domain->pgsize_bitmap != 0UL); break; case DOMAIN_ATTR_WINDOWS: count = data; @@ -1638,3 +1773,60 @@ return ret; } + +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + const struct iommu_ops *ops) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) + return ops == fwspec->ops ? 0 : -EINVAL; + + fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + + of_node_get(to_of_node(iommu_fwnode)); + fwspec->iommu_fwnode = iommu_fwnode; + fwspec->ops = ops; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_init); + +void iommu_fwspec_free(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (fwspec) { + fwnode_handle_put(fwspec->iommu_fwnode); + kfree(fwspec); + dev->iommu_fwspec = NULL; + } +} +EXPORT_SYMBOL_GPL(iommu_fwspec_free); + +int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + size_t size; + int i; + + if (!fwspec) + return -EINVAL; + + size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); + if (size > sizeof(*fwspec)) { + fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); + if (!fwspec) + return -ENOMEM; + } + + for (i = 0; i < num_ids; i++) + fwspec->ids[fwspec->num_ids + i] = ids[i]; + + fwspec->num_ids += num_ids; + dev->iommu_fwspec = fwspec; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);