--- zzzz-none-000/linux-3.10.107/drivers/xen/xenbus/xenbus_client.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/xen/xenbus/xenbus_client.c 2021-02-04 17:41:59.000000000 +0000 @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include @@ -45,23 +45,37 @@ #include #include #include +#include #include "xenbus_probe.h" +#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE)) + +#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS)) + struct xenbus_map_node { struct list_head next; union { - struct vm_struct *area; /* PV */ - struct page *page; /* HVM */ + struct { + struct vm_struct *area; + } pv; + struct { + struct page *pages[XENBUS_MAX_RING_PAGES]; + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; + void *addr; + } hvm; }; - grant_handle_t handle; + grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; + unsigned int nr_handles; }; static DEFINE_SPINLOCK(xenbus_valloc_lock); static LIST_HEAD(xenbus_valloc_pages); struct xenbus_ring_ops { - int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); + int (*map)(struct xenbus_device *dev, + grant_ref_t *gnt_refs, unsigned int nr_grefs, + void **vaddr); int (*unmap)(struct xenbus_device *dev, void *vaddr); }; @@ -258,7 +272,6 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { - int ret; unsigned int len; char *printf_buffer = NULL; char *path_buffer = NULL; @@ -269,9 +282,7 @@ goto fail; len = sprintf(printf_buffer, "%i ", -err); - ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); - - BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); + vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); dev_err(&dev->dev, "%s\n", printf_buffer); @@ -357,17 +368,39 @@ /** * xenbus_grant_ring * @dev: xenbus device - * @ring_mfn: mfn of ring to grant - - * Grant access to the given @ring_mfn to the peer of the given device. Return - * 0 on success, or -errno on error. On error, the device will switch to + * @vaddr: starting virtual address of the ring + * @nr_pages: number of pages to be granted + * @grefs: grant reference array to be filled in + * + * Grant access to the given @vaddr to the peer of the given device. + * Then fill in @grefs with grant references. Return 0 on success, or + * -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ -int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) +int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, + unsigned int nr_pages, grant_ref_t *grefs) { - int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); - if (err < 0) - xenbus_dev_fatal(dev, err, "granting access to ring page"); + int err; + int i, j; + + for (i = 0; i < nr_pages; i++) { + err = gnttab_grant_foreign_access(dev->otherend_id, + virt_to_gfn(vaddr), 0); + if (err < 0) { + xenbus_dev_fatal(dev, err, + "granting access to ring page"); + goto fail; + } + grefs[i] = err; + + vaddr = vaddr + XEN_PAGE_SIZE; + } + + return 0; + +fail: + for (j = 0; j < i; j++) + gnttab_end_foreign_access_ref(grefs[j], 0); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); @@ -400,33 +433,6 @@ /** - * Bind to an existing interdomain event channel in another domain. Returns 0 - * on success and stores the local port in *port. On error, returns -errno, - * switches the device to XenbusStateClosing, and saves the error in XenStore. - */ -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) -{ - struct evtchn_bind_interdomain bind_interdomain; - int err; - - bind_interdomain.remote_dom = dev->otherend_id; - bind_interdomain.remote_port = remote_port; - - err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, - &bind_interdomain); - if (err) - xenbus_dev_fatal(dev, err, - "binding to event channel %d from domain %d", - remote_port, dev->otherend_id); - else - *port = bind_interdomain.local_port; - - return err; -} -EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); - - -/** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) @@ -448,62 +454,130 @@ /** * xenbus_map_ring_valloc * @dev: xenbus device - * @gnt_ref: grant reference + * @gnt_refs: grant reference array + * @nr_grefs: number of grant references * @vaddr: pointer to address to be filled out by mapping * - * Based on Rusty Russell's skeleton driver's map_page. - * Map a page of memory into this domain from another domain's grant table. - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the - * page to that address, and sets *vaddr to that address. - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) - * or -ENOMEM on error. If an error is returned, device will switch to + * Map @nr_grefs pages of memory into this domain from another + * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs + * pages of virtual address space, maps the pages to that address, and + * sets *vaddr to that address. Returns 0 on success, and GNTST_* + * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on + * error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) +int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, + unsigned int nr_grefs, void **vaddr) { - return ring_ops->map(dev, gnt_ref, vaddr); + return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); +/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned + * long), e.g. 32-on-64. Caller is responsible for preparing the + * right array to feed into this function */ +static int __xenbus_map_ring(struct xenbus_device *dev, + grant_ref_t *gnt_refs, + unsigned int nr_grefs, + grant_handle_t *handles, + phys_addr_t *addrs, + unsigned int flags, + bool *leaked) +{ + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; + int i, j; + int err = GNTST_okay; + + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; + + for (i = 0; i < nr_grefs; i++) { + memset(&map[i], 0, sizeof(map[i])); + gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], + dev->otherend_id); + handles[i] = INVALID_GRANT_HANDLE; + } + + gnttab_batch_map(map, i); + + for (i = 0; i < nr_grefs; i++) { + if (map[i].status != GNTST_okay) { + err = map[i].status; + xenbus_dev_fatal(dev, map[i].status, + "mapping in shared page %d from domain %d", + gnt_refs[i], dev->otherend_id); + goto fail; + } else + handles[i] = map[i].handle; + } + + return GNTST_okay; + + fail: + for (i = j = 0; i < nr_grefs; i++) { + if (handles[i] != INVALID_GRANT_HANDLE) { + memset(&unmap[j], 0, sizeof(unmap[j])); + gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], + GNTMAP_host_map, handles[i]); + j++; + } + } + + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) + BUG(); + + *leaked = false; + for (i = 0; i < j; i++) { + if (unmap[i].status != GNTST_okay) { + *leaked = true; + break; + } + } + + return err; +} + static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, - int gnt_ref, void **vaddr) + grant_ref_t *gnt_refs, + unsigned int nr_grefs, + void **vaddr) { - struct gnttab_map_grant_ref op = { - .flags = GNTMAP_host_map | GNTMAP_contains_pte, - .ref = gnt_ref, - .dom = dev->otherend_id, - }; struct xenbus_map_node *node; struct vm_struct *area; - pte_t *pte; + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + int err = GNTST_okay; + int i; + bool leaked; *vaddr = NULL; + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; - area = alloc_vm_area(PAGE_SIZE, &pte); + area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); if (!area) { kfree(node); return -ENOMEM; } - op.host_addr = arbitrary_virt_to_machine(pte).maddr; - - gnttab_batch_map(&op, 1); + for (i = 0; i < nr_grefs; i++) + phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; - if (op.status != GNTST_okay) { - free_vm_area(area); - kfree(node); - xenbus_dev_fatal(dev, op.status, - "mapping in shared page %d from domain %d", - gnt_ref, dev->otherend_id); - return op.status; - } + err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, + phys_addrs, + GNTMAP_host_map | GNTMAP_contains_pte, + &leaked); + if (err) + goto failed; - node->handle = op.handle; - node->area = area; + node->nr_handles = nr_grefs; + node->pv.area = area; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); @@ -511,14 +585,56 @@ *vaddr = area->addr; return 0; + +failed: + if (!leaked) + free_vm_area(area); + else + pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); + + kfree(node); + return err; +} + +struct map_ring_valloc_hvm +{ + unsigned int idx; + + /* Why do we need two arrays? See comment of __xenbus_map_ring */ + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; +}; + +static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, + unsigned int goffset, + unsigned int len, + void *data) +{ + struct map_ring_valloc_hvm *info = data; + unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); + + info->phys_addrs[info->idx] = vaddr; + info->addrs[info->idx] = vaddr; + + info->idx++; } static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, - int gnt_ref, void **vaddr) + grant_ref_t *gnt_ref, + unsigned int nr_grefs, + void **vaddr) { struct xenbus_map_node *node; int err; void *addr; + bool leaked = false; + struct map_ring_valloc_hvm info = { + .idx = 0, + }; + unsigned int nr_pages = XENBUS_PAGES(nr_grefs); + + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; *vaddr = NULL; @@ -526,15 +642,29 @@ if (!node) return -ENOMEM; - err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); + err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); if (err) goto out_err; - addr = pfn_to_kaddr(page_to_pfn(node->page)); + gnttab_foreach_grant(node->hvm.pages, nr_grefs, + xenbus_map_ring_setup_grant_hvm, + &info); + + err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, + info.phys_addrs, GNTMAP_host_map, &leaked); + node->nr_handles = nr_grefs; - err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); if (err) - goto out_err_free_ballooned_pages; + goto out_free_ballooned_pages; + + addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, + PAGE_KERNEL); + if (!addr) { + err = -ENOMEM; + goto out_xenbus_unmap_ring; + } + + node->hvm.addr = addr; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); @@ -543,8 +673,15 @@ *vaddr = addr; return 0; - out_err_free_ballooned_pages: - free_xenballooned_pages(1, &node->page); + out_xenbus_unmap_ring: + if (!leaked) + xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); + else + pr_alert("leaking %p size %u page(s)", + addr, nr_pages); + out_free_ballooned_pages: + if (!leaked) + free_xenballooned_pages(nr_pages, node->hvm.pages); out_err: kfree(node); return err; @@ -554,35 +691,37 @@ /** * xenbus_map_ring * @dev: xenbus device - * @gnt_ref: grant reference - * @handle: pointer to grant handle to be filled - * @vaddr: address to be mapped to + * @gnt_refs: grant reference array + * @nr_grefs: number of grant reference + * @handles: pointer to grant handle to be filled + * @vaddrs: addresses to be mapped to + * @leaked: fail to clean up a failed map, caller should not free vaddr * - * Map a page of memory into this domain from another domain's grant table. + * Map pages of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do - * this yourself!). It only maps in the page to the specified address. + * this yourself!). It only maps in the pages to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) - * or -ENOMEM on error. If an error is returned, device will switch to - * XenbusStateClosing and the error message will be saved in XenStore. - */ -int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, - grant_handle_t *handle, void *vaddr) -{ - struct gnttab_map_grant_ref op; + * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to + * XenbusStateClosing and the first error message will be saved in XenStore. + * Further more if we fail to map the ring, caller should check @leaked. + * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller + * should not free the address space of @vaddr. + */ +int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, + unsigned int nr_grefs, grant_handle_t *handles, + unsigned long *vaddrs, bool *leaked) +{ + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; + int i; - gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, - dev->otherend_id); + if (nr_grefs > XENBUS_MAX_RING_GRANTS) + return -EINVAL; - gnttab_batch_map(&op, 1); + for (i = 0; i < nr_grefs; i++) + phys_addrs[i] = (unsigned long)vaddrs[i]; - if (op.status != GNTST_okay) { - xenbus_dev_fatal(dev, op.status, - "mapping in shared page %d from domain %d", - gnt_ref, dev->otherend_id); - } else - *handle = op.handle; - - return op.status; + return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles, + phys_addrs, GNTMAP_host_map, leaked); } EXPORT_SYMBOL_GPL(xenbus_map_ring); @@ -608,14 +747,15 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; - struct gnttab_unmap_grant_ref op = { - .host_addr = (unsigned long)vaddr, - }; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; unsigned int level; + int i; + bool leaked = false; + int err; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { - if (node->area->addr == vaddr) { + if (node->pv.area->addr == vaddr) { list_del(&node->next); goto found; } @@ -630,22 +770,59 @@ return GNTST_bad_virt_addr; } - op.handle = node->handle; - op.host_addr = arbitrary_virt_to_machine( - lookup_address((unsigned long)vaddr, &level)).maddr; + for (i = 0; i < node->nr_handles; i++) { + unsigned long addr; + + memset(&unmap[i], 0, sizeof(unmap[i])); + addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); + unmap[i].host_addr = arbitrary_virt_to_machine( + lookup_address(addr, &level)).maddr; + unmap[i].dev_bus_addr = 0; + unmap[i].handle = node->handles[i]; + } - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) BUG(); - if (op.status == GNTST_okay) - free_vm_area(node->area); + err = GNTST_okay; + leaked = false; + for (i = 0; i < node->nr_handles; i++) { + if (unmap[i].status != GNTST_okay) { + leaked = true; + xenbus_dev_error(dev, unmap[i].status, + "unmapping page at handle %d error %d", + node->handles[i], unmap[i].status); + err = unmap[i].status; + break; + } + } + + if (!leaked) + free_vm_area(node->pv.area); else - xenbus_dev_error(dev, op.status, - "unmapping page at handle %d error %d", - node->handle, op.status); + pr_alert("leaking VM area %p size %u page(s)", + node->pv.area, node->nr_handles); kfree(node); - return op.status; + return err; +} + +struct unmap_ring_vfree_hvm +{ + unsigned int idx; + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; +}; + +static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, + unsigned int goffset, + unsigned int len, + void *data) +{ + struct unmap_ring_vfree_hvm *info = data; + + info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); + + info->idx++; } static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) @@ -653,10 +830,14 @@ int rv; struct xenbus_map_node *node; void *addr; + struct unmap_ring_vfree_hvm info = { + .idx = 0, + }; + unsigned int nr_pages; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { - addr = pfn_to_kaddr(page_to_pfn(node->page)); + addr = node->hvm.addr; if (addr == vaddr) { list_del(&node->next); goto found; @@ -672,12 +853,20 @@ return GNTST_bad_virt_addr; } - rv = xenbus_unmap_ring(dev, node->handle, addr); + nr_pages = XENBUS_PAGES(node->nr_handles); - if (!rv) - free_xenballooned_pages(1, &node->page); + gnttab_foreach_grant(node->hvm.pages, node->nr_handles, + xenbus_unmap_ring_setup_grant_hvm, + &info); + + rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, + info.addrs); + if (!rv) { + vunmap(vaddr); + free_xenballooned_pages(nr_pages, node->hvm.pages); + } else - WARN(1, "Leaking %p\n", vaddr); + WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); kfree(node); return rv; @@ -686,29 +875,44 @@ /** * xenbus_unmap_ring * @dev: xenbus device - * @handle: grant handle - * @vaddr: addr to unmap + * @handles: grant handle array + * @nr_handles: number of handles in the array + * @vaddrs: addresses to unmap * - * Unmap a page of memory in this domain that was imported from another domain. + * Unmap memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, - grant_handle_t handle, void *vaddr) + grant_handle_t *handles, unsigned int nr_handles, + unsigned long *vaddrs) { - struct gnttab_unmap_grant_ref op; + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; + int i; + int err; + + if (nr_handles > XENBUS_MAX_RING_GRANTS) + return -EINVAL; - gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); + for (i = 0; i < nr_handles; i++) + gnttab_set_unmap_op(&unmap[i], vaddrs[i], + GNTMAP_host_map, handles[i]); - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) BUG(); - if (op.status != GNTST_okay) - xenbus_dev_error(dev, op.status, - "unmapping page at handle %d error %d", - handle, op.status); + err = GNTST_okay; + for (i = 0; i < nr_handles; i++) { + if (unmap[i].status != GNTST_okay) { + xenbus_dev_error(dev, unmap[i].status, + "unmapping page at handle %d error %d", + handles[i], unmap[i].status); + err = unmap[i].status; + break; + } + } - return op.status; + return err; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); @@ -743,7 +947,7 @@ void __init xenbus_ring_ops_init(void) { - if (xen_pv_domain()) + if (!xen_feature(XENFEAT_auto_translated_physmap)) ring_ops = &ring_ops_pv; else ring_ops = &ring_ops_hvm;