--- zzzz-none-000/linux-3.10.107/drivers/vhost/vhost.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/vhost/vhost.c 2021-02-04 17:41:59.000000000 +0000 @@ -13,28 +13,105 @@ #include #include -#include /* memcpy_fromiovec */ +#include #include #include #include #include -#include #include #include #include #include +#include #include #include +#include +#include #include "vhost.h" +static ushort max_mem_regions = 64; +module_param(max_mem_regions, ushort, 0444); +MODULE_PARM_DESC(max_mem_regions, + "Maximum number of memory regions in memory map. (default: 64)"); + enum { - VHOST_MEMORY_MAX_NREGIONS = 64, VHOST_MEMORY_F_LOG = 0x1, }; -#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) -#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) +#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) +#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) + +#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY +static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) +{ + vq->user_be = !virtio_legacy_is_little_endian(); +} + +static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) +{ + struct vhost_vring_state s; + + if (vq->private_data) + return -EBUSY; + + if (copy_from_user(&s, argp, sizeof(s))) + return -EFAULT; + + if (s.num != VHOST_VRING_LITTLE_ENDIAN && + s.num != VHOST_VRING_BIG_ENDIAN) + return -EINVAL; + + vq->user_be = s.num; + + return 0; +} + +static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, + int __user *argp) +{ + struct vhost_vring_state s = { + .index = idx, + .num = vq->user_be + }; + + if (copy_to_user(argp, &s, sizeof(s))) + return -EFAULT; + + return 0; +} + +static void vhost_init_is_le(struct vhost_virtqueue *vq) +{ + /* Note for legacy virtio: user_be is initialized at reset time + * according to the host endianness. If userspace does not set an + * explicit endianness, the default behavior is native endian, as + * expected by legacy virtio. + */ + vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; +} +#else +static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq) +{ +} + +static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) +{ + return -ENOIOCTLCMD; +} + +static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, + int __user *argp) +{ + return -ENOIOCTLCMD; +} + +static void vhost_init_is_le(struct vhost_virtqueue *vq) +{ + if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) + vq->is_le = true; +} +#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) @@ -66,6 +143,7 @@ work->flushing = 0; work->queue_seq = work->done_seq = 0; } +EXPORT_SYMBOL_GPL(vhost_work_init); /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, @@ -79,6 +157,7 @@ vhost_work_init(&poll->work, fn); } +EXPORT_SYMBOL_GPL(vhost_poll_init); /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */ @@ -101,6 +180,7 @@ return ret; } +EXPORT_SYMBOL_GPL(vhost_poll_start); /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */ @@ -111,6 +191,7 @@ poll->wqh = NULL; } } +EXPORT_SYMBOL_GPL(vhost_poll_stop); static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, unsigned seq) @@ -123,7 +204,7 @@ return left <= 0; } -static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) +void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { unsigned seq; int flushing; @@ -138,6 +219,7 @@ spin_unlock_irq(&dev->work_lock); BUG_ON(flushing < 0); } +EXPORT_SYMBOL_GPL(vhost_work_flush); /* Flush any work that has been scheduled. When calling this, don't hold any * locks that are also used by the callback. */ @@ -145,6 +227,7 @@ { vhost_work_flush(poll->dev, &poll->work); } +EXPORT_SYMBOL_GPL(vhost_poll_flush); void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) { @@ -154,15 +237,19 @@ if (list_empty(&work->node)) { list_add_tail(&work->node, &dev->work_list); work->queue_seq++; + spin_unlock_irqrestore(&dev->work_lock, flags); wake_up_process(dev->worker); + } else { + spin_unlock_irqrestore(&dev->work_lock, flags); } - spin_unlock_irqrestore(&dev->work_lock, flags); } +EXPORT_SYMBOL_GPL(vhost_work_queue); void vhost_poll_queue(struct vhost_poll *poll) { vhost_work_queue(poll->dev, &poll->work); } +EXPORT_SYMBOL_GPL(vhost_poll_queue); static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) @@ -180,6 +267,7 @@ vq->log_used = false; vq->log_addr = -1ull; vq->private_data = NULL; + vq->acked_features = 0; vq->log_base = NULL; vq->error_ctx = NULL; vq->error = NULL; @@ -187,6 +275,9 @@ vq->call_ctx = NULL; vq->call = NULL; vq->log_ctx = NULL; + vq->memory = NULL; + vq->is_le = virtio_legacy_is_little_endian(); + vhost_vq_reset_user_be(vq); } static int vhost_worker(void *data) @@ -251,17 +342,16 @@ /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { + struct vhost_virtqueue *vq; int i; for (i = 0; i < dev->nvqs; ++i) { - dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect * - UIO_MAXIOV, GFP_KERNEL); - dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV, - GFP_KERNEL); - dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads * - UIO_MAXIOV, GFP_KERNEL); - if (!dev->vqs[i]->indirect || !dev->vqs[i]->log || - !dev->vqs[i]->heads) + vq = dev->vqs[i]; + vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, + GFP_KERNEL); + vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); + vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); + if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; } return 0; @@ -280,9 +370,10 @@ vhost_vq_free_iovecs(dev->vqs[i]); } -long vhost_dev_init(struct vhost_dev *dev, +void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs) { + struct vhost_virtqueue *vq; int i; dev->vqs = vqs; @@ -297,19 +388,19 @@ dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { - dev->vqs[i]->log = NULL; - dev->vqs[i]->indirect = NULL; - dev->vqs[i]->heads = NULL; - dev->vqs[i]->dev = dev; - mutex_init(&dev->vqs[i]->mutex); - vhost_vq_reset(dev, dev->vqs[i]); - if (dev->vqs[i]->handle_kick) - vhost_poll_init(&dev->vqs[i]->poll, - dev->vqs[i]->handle_kick, POLLIN, dev); + vq = dev->vqs[i]; + vq->log = NULL; + vq->indirect = NULL; + vq->heads = NULL; + vq->dev = dev; + mutex_init(&vq->mutex); + vhost_vq_reset(dev, vq); + if (vq->handle_kick) + vhost_poll_init(&vq->poll, vq->handle_kick, + POLLIN, dev); } - - return 0; } +EXPORT_SYMBOL_GPL(vhost_dev_init); /* Caller should have device mutex */ long vhost_dev_check_owner(struct vhost_dev *dev) @@ -317,6 +408,7 @@ /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; } +EXPORT_SYMBOL_GPL(vhost_dev_check_owner); struct vhost_attach_cgroups_struct { struct vhost_work work; @@ -348,6 +440,7 @@ { return dev->mm; } +EXPORT_SYMBOL_GPL(vhost_dev_has_owner); /* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) @@ -391,21 +484,31 @@ err_mm: return err; } +EXPORT_SYMBOL_GPL(vhost_dev_set_owner); struct vhost_memory *vhost_dev_reset_owner_prepare(void) { return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); } +EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) { + int i; + vhost_dev_cleanup(dev, true); /* Restore memory to default empty mapping. */ memory->nregions = 0; - RCU_INIT_POINTER(dev->memory, memory); + dev->memory = memory; + /* We don't need VQ locks below since vhost_dev_cleanup makes sure + * VQs aren't running. + */ + for (i = 0; i < dev->nvqs; ++i) + dev->vqs[i]->memory = memory; } +EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); void vhost_dev_stop(struct vhost_dev *dev) { @@ -418,6 +521,7 @@ } } } +EXPORT_SYMBOL_GPL(vhost_dev_stop); /* Caller should have device mutex if and only if locked is set */ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) @@ -445,10 +549,8 @@ fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ - kfree(rcu_dereference_protected(dev->memory, - locked == - lockdep_is_held(&dev->mutex))); - RCU_INIT_POINTER(dev->memory, NULL); + kvfree(dev->memory); + dev->memory = NULL; WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { kthread_stop(dev->worker); @@ -458,6 +560,7 @@ mmput(dev->mm); dev->mm = NULL; } +EXPORT_SYMBOL_GPL(vhost_dev_cleanup); static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) { @@ -506,11 +609,13 @@ for (i = 0; i < d->nvqs; ++i) { int ok; + bool log; + mutex_lock(&d->vqs[i]->mutex); + log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); /* If ring is inactive, will check when it's enabled. */ if (d->vqs[i]->private_data) - ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, - log_all); + ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log); else ok = 1; mutex_unlock(&d->vqs[i]->mutex); @@ -520,12 +625,12 @@ return 1; } -static int vq_access_ok(struct vhost_dev *d, unsigned int num, +static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, struct vring_desc __user *desc, struct vring_avail __user *avail, struct vring_used __user *used) { - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return access_ok(VERIFY_READ, desc, num * sizeof *desc) && access_ok(VERIFY_READ, avail, sizeof *avail + num * sizeof *avail->ring + s) && @@ -537,25 +642,19 @@ /* Caller should have device mutex but not vq mutex */ int vhost_log_access_ok(struct vhost_dev *dev) { - struct vhost_memory *mp; - - mp = rcu_dereference_protected(dev->memory, - lockdep_is_held(&dev->mutex)); - return memory_access_ok(dev, mp, 1); + return memory_access_ok(dev, dev->memory, 1); } +EXPORT_SYMBOL_GPL(vhost_log_access_ok); /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ -static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, +static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { - struct vhost_memory *mp; - size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; - mp = rcu_dereference_protected(vq->dev->memory, - lockdep_is_held(&vq->mutex)); - return vq_memory_access_ok(log_base, mp, - vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && + return vq_memory_access_ok(log_base, vq->memory, + vhost_has_feature(vq, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, sizeof *vq->used + vq->num * sizeof *vq->used->ring + s)); @@ -565,42 +664,69 @@ /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { - return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && - vq_log_access_ok(vq->dev, vq, vq->log_base); + return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && + vq_log_access_ok(vq, vq->log_base); +} +EXPORT_SYMBOL_GPL(vhost_vq_access_ok); + +static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) +{ + const struct vhost_memory_region *r1 = p1, *r2 = p2; + if (r1->guest_phys_addr < r2->guest_phys_addr) + return 1; + if (r1->guest_phys_addr > r2->guest_phys_addr) + return -1; + return 0; +} + +static void *vhost_kvzalloc(unsigned long size) +{ + void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + + if (!n) + n = vzalloc(size); + return n; } static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; unsigned long size = offsetof(struct vhost_memory, regions); + int i; if (copy_from_user(&mem, m, size)) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; - if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) + if (mem.nregions > max_mem_regions) return -E2BIG; - newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); + newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } + sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), + vhost_memory_reg_sort_cmp, NULL); - if (!memory_access_ok(d, newmem, - vhost_has_feature(d, VHOST_F_LOG_ALL))) { - kfree(newmem); + if (!memory_access_ok(d, newmem, 0)) { + kvfree(newmem); return -EFAULT; } - oldmem = rcu_dereference_protected(d->memory, - lockdep_is_held(&d->mutex)); - rcu_assign_pointer(d->memory, newmem); - synchronize_rcu(); - kfree(oldmem); + oldmem = d->memory; + d->memory = newmem; + + /* All memory accesses are done under some VQ mutex. */ + for (i = 0; i < d->nvqs; ++i) { + mutex_lock(&d->vqs[i]->mutex); + d->vqs[i]->memory = newmem; + mutex_unlock(&d->vqs[i]->mutex); + } + kvfree(oldmem); return 0; } @@ -687,9 +813,13 @@ r = -EFAULT; break; } - if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || - (a.used_user_addr & (sizeof *vq->used->ring - 1)) || - (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { + + /* Make sure it's safe to cast pointers to vring types. */ + BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); + BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); + if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || + (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || + (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) { r = -EINVAL; break; } @@ -698,7 +828,7 @@ * If it is not, we don't as size might not have been setup. * We will verify when backend is configured. */ if (vq->private_data) { - if (!vq_access_ok(d, vq->num, + if (!vq_access_ok(vq, vq->num, (void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.used_user_addr)) { @@ -776,6 +906,12 @@ } else filep = eventfp; break; + case VHOST_SET_VRING_ENDIAN: + r = vhost_set_vring_endian(vq, argp); + break; + case VHOST_GET_VRING_ENDIAN: + r = vhost_get_vring_endian(vq, idx, argp); + break; default: r = -ENOIOCTLCMD; } @@ -797,6 +933,7 @@ vhost_poll_flush(&vq->poll); return r; } +EXPORT_SYMBOL_GPL(vhost_vring_ioctl); /* Caller must have device mutex */ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) @@ -837,7 +974,7 @@ vq = d->vqs[i]; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ - if (vq->private_data && !vq_log_access_ok(d, vq, base)) + if (vq->private_data && !vq_log_access_ok(vq, base)) r = -EFAULT; else vq->log_base = base; @@ -878,21 +1015,27 @@ done: return r; } +EXPORT_SYMBOL_GPL(vhost_dev_ioctl); static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { - struct vhost_memory_region *reg; - int i; + const struct vhost_memory_region *reg; + int start = 0, end = mem->nregions; - /* linear search is not brilliant, but we really have on the order of 6 - * regions in practice */ - for (i = 0; i < mem->nregions; ++i) { - reg = mem->regions + i; - if (reg->guest_phys_addr <= addr && - reg->guest_phys_addr + reg->memory_size - 1 >= addr) - return reg; + while (start < end) { + int slot = start + (end - start) / 2; + reg = mem->regions + slot; + if (addr >= reg->guest_phys_addr) + end = slot; + else + start = slot + 1; } + + reg = mem->regions + start; + if (addr >= reg->guest_phys_addr && + reg->guest_phys_addr + reg->memory_size > addr) + return reg; return NULL; } @@ -969,11 +1112,12 @@ BUG(); return 0; } +EXPORT_SYMBOL_GPL(vhost_log_write); static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; - if (__put_user(vq->used_flags, &vq->used->flags) < 0) + if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ @@ -991,7 +1135,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) { - if (__put_user(vq->avail_idx, vhost_avail_event(vq))) + if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq))) return -EFAULT; if (unlikely(vq->log_used)) { void __user *used; @@ -1010,18 +1154,30 @@ int vhost_init_used(struct vhost_virtqueue *vq) { + __virtio16 last_used_idx; int r; - if (!vq->private_data) + if (!vq->private_data) { + vq->is_le = virtio_legacy_is_little_endian(); return 0; + } + + vhost_init_is_le(vq); r = vhost_update_used_flags(vq); if (r) return r; vq->signalled_used_valid = false; - return get_user(vq->last_used_idx, &vq->used->idx); + if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) + return -EFAULT; + r = __get_user(last_used_idx, &vq->used->idx); + if (r) + return r; + vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); + return 0; } +EXPORT_SYMBOL_GPL(vhost_init_used); -static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, +static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size) { const struct vhost_memory_region *reg; @@ -1030,9 +1186,7 @@ u64 s = 0; int ret = 0; - rcu_read_lock(); - - mem = rcu_dereference(dev->memory); + mem = vq->memory; while ((u64)len > s) { u64 size; if (unlikely(ret >= iov_size)) { @@ -1054,23 +1208,22 @@ ++ret; } - rcu_read_unlock(); return ret; } /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, * or -1U if we're at the end. */ -static unsigned next_desc(struct vring_desc *desc) +static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ - if (!(desc->flags & VRING_DESC_F_NEXT)) + if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) return -1U; /* Check they're not leading us off end of descriptors. */ - next = desc->next; + next = vhost16_to_cpu(vq, desc->next); /* Make sure compiler knows to grab that: we don't want it changing! */ /* We will use the result as an index in an array, so most * architectures only need a compiler barrier here. */ @@ -1079,7 +1232,7 @@ return next; } -static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, +static int get_indirect(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num, @@ -1087,29 +1240,32 @@ { struct vring_desc desc; unsigned int i = 0, count, found = 0; + u32 len = vhost32_to_cpu(vq, indirect->len); + struct iov_iter from; int ret; /* Sanity check */ - if (unlikely(indirect->len % sizeof desc)) { + if (unlikely(len % sizeof desc)) { vq_err(vq, "Invalid length in indirect descriptor: " "len 0x%llx not multiple of 0x%zx\n", - (unsigned long long)indirect->len, + (unsigned long long)len, sizeof desc); return -EINVAL; } - ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, + ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, UIO_MAXIOV); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } + iov_iter_init(&from, READ, vq->indirect, ret, len); /* We will use the result as an address to read from, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); - count = indirect->len / sizeof desc; + count = len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ if (unlikely(count > USHRT_MAX + 1)) { @@ -1126,19 +1282,20 @@ i, count); return -EINVAL; } - if (unlikely(memcpy_fromiovec((unsigned char *)&desc, - vq->indirect, sizeof desc))) { + if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) != + sizeof(desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", - i, (size_t)indirect->addr + i * sizeof desc); + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } - if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { + if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", - i, (size_t)indirect->addr + i * sizeof desc); + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } - ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), + vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d indirect idx %d\n", @@ -1146,11 +1303,11 @@ return ret; } /* If this is an input descriptor, increment that count. */ - if (desc.flags & VRING_DESC_F_WRITE) { + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { *in_num += ret; if (unlikely(log)) { - log[*log_num].addr = desc.addr; - log[*log_num].len = desc.len; + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); + log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { @@ -1163,7 +1320,7 @@ } *out_num += ret; } - } while ((i = next_desc(&desc)) != -1); + } while ((i = next_desc(vq, &desc)) != -1); return 0; } @@ -1175,7 +1332,7 @@ * This function returns the descriptor number found, or vq->num (which is * never a valid descriptor number) if none was found. A negative code is * returned on error. */ -int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, +int vhost_get_vq_desc(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num) @@ -1183,15 +1340,18 @@ struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx; + __virtio16 avail_idx; + __virtio16 ring_head; int ret; /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; - if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { + if (unlikely(__get_user(avail_idx, &vq->avail->idx))) { vq_err(vq, "Failed to access avail idx at %p\n", &vq->avail->idx); return -EFAULT; } + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { vq_err(vq, "Guest moved used index from %u to %u", @@ -1208,14 +1368,16 @@ /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(__get_user(head, - &vq->avail->ring[last_avail_idx % vq->num]))) { + if (unlikely(__get_user(ring_head, + &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, &vq->avail->ring[last_avail_idx % vq->num]); return -EFAULT; } + head = vhost16_to_cpu(vq, ring_head); + /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", @@ -1248,8 +1410,8 @@ i, vq->desc + i); return -EFAULT; } - if (desc.flags & VRING_DESC_F_INDIRECT) { - ret = get_indirect(dev, vq, iov, iov_size, + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { + ret = get_indirect(vq, iov, iov_size, out_num, in_num, log, log_num, &desc); if (unlikely(ret < 0)) { @@ -1260,20 +1422,21 @@ continue; } - ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), + vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d descriptor idx %d\n", ret, i); return ret; } - if (desc.flags & VRING_DESC_F_WRITE) { + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { /* If this is an input descriptor, * increment that count. */ *in_num += ret; if (unlikely(log)) { - log[*log_num].addr = desc.addr; - log[*log_num].len = desc.len; + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); + log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { @@ -1286,7 +1449,7 @@ } *out_num += ret; } - } while ((i = next_desc(&desc)) != -1); + } while ((i = next_desc(vq, &desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; @@ -1296,60 +1459,27 @@ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); return head; } +EXPORT_SYMBOL_GPL(vhost_get_vq_desc); /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { vq->last_avail_idx -= n; } +EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { - struct vring_used_elem __user *used; + struct vring_used_elem heads = { + cpu_to_vhost32(vq, head), + cpu_to_vhost32(vq, len) + }; - /* The virtqueue contains a ring of used buffers. Get a pointer to the - * next entry in that used ring. */ - used = &vq->used->ring[vq->last_used_idx % vq->num]; - if (__put_user(head, &used->id)) { - vq_err(vq, "Failed to write used id"); - return -EFAULT; - } - if (__put_user(len, &used->len)) { - vq_err(vq, "Failed to write used len"); - return -EFAULT; - } - /* Make sure buffer is written before we update index. */ - smp_wmb(); - if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) { - vq_err(vq, "Failed to increment used idx"); - return -EFAULT; - } - if (unlikely(vq->log_used)) { - /* Make sure data is seen before log. */ - smp_wmb(); - /* Log used ring entry write. */ - log_write(vq->log_base, - vq->log_addr + - ((void __user *)used - (void __user *)vq->used), - sizeof *used); - /* Log used index update. */ - log_write(vq->log_base, - vq->log_addr + offsetof(struct vring_used, idx), - sizeof vq->used->idx); - if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); - } - vq->last_used_idx++; - /* If the driver never bothers to signal in a very long while, - * used index might wrap around. If that happens, invalidate - * signalled_used index we stored. TODO: make sure driver - * signals at least once in 2^16 and remove this. */ - if (unlikely(vq->last_used_idx == vq->signalled_used)) - vq->signalled_used_valid = false; - return 0; + return vhost_add_used_n(vq, &heads, 1); } +EXPORT_SYMBOL_GPL(vhost_add_used); static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, @@ -1359,9 +1489,18 @@ u16 old, new; int start; - start = vq->last_used_idx % vq->num; + start = vq->last_used_idx & (vq->num - 1); used = vq->used->ring + start; - if (__copy_to_user(used, heads, count * sizeof *used)) { + if (count == 1) { + if (__put_user(heads[0].id, &used->id)) { + vq_err(vq, "Failed to write used id"); + return -EFAULT; + } + if (__put_user(heads[0].len, &used->len)) { + vq_err(vq, "Failed to write used len"); + return -EFAULT; + } + } else if (__copy_to_user(used, heads, count * sizeof *used)) { vq_err(vq, "Failed to write used"); return -EFAULT; } @@ -1392,7 +1531,7 @@ { int start, n, r; - start = vq->last_used_idx % vq->num; + start = vq->last_used_idx & (vq->num - 1); n = vq->num - start; if (n < count) { r = __vhost_add_used_n(vq, heads, n); @@ -1405,7 +1544,7 @@ /* Make sure buffer is written before we update index. */ smp_wmb(); - if (put_user(vq->last_used_idx, &vq->used->idx)) { + if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } @@ -1419,27 +1558,29 @@ } return r; } +EXPORT_SYMBOL_GPL(vhost_add_used_n); static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { - __u16 old, new, event; + __u16 old, new; + __virtio16 event; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); - if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && + if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) return true; - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { - __u16 flags; + if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { + __virtio16 flags; if (__get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } - return !(flags & VRING_AVAIL_F_NO_INTERRUPT); + return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); } old = vq->signalled_used; v = vq->signalled_used_valid; @@ -1449,11 +1590,11 @@ if (unlikely(!v)) return true; - if (get_user(event, vhost_used_event(vq))) { + if (__get_user(event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } - return vring_need_event(event, new, old); + return vring_need_event(vhost16_to_cpu(vq, event), new, old); } /* This actually signals the guest, using eventfd. */ @@ -1463,6 +1604,7 @@ if (vq->call_ctx && vhost_notify(dev, vq)) eventfd_signal(vq->call_ctx, 1); } +EXPORT_SYMBOL_GPL(vhost_signal); /* And here's the combo meal deal. Supersize me! */ void vhost_add_used_and_signal(struct vhost_dev *dev, @@ -1472,6 +1614,7 @@ vhost_add_used(vq, head, len); vhost_signal(dev, vq); } +EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); /* multi-buffer version of vhost_add_used_and_signal */ void vhost_add_used_and_signal_n(struct vhost_dev *dev, @@ -1481,17 +1624,18 @@ vhost_add_used_n(vq, heads, count); vhost_signal(dev, vq); } +EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { - u16 avail_idx; + __virtio16 avail_idx; int r; if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) return false; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { + if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) { vq_err(vq, "Failed to enable notification at %p: %d\n", @@ -1516,8 +1660,9 @@ return false; } - return avail_idx != vq->avail_idx; + return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; } +EXPORT_SYMBOL_GPL(vhost_enable_notify); /* We don't need to be notified again. */ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) @@ -1527,10 +1672,28 @@ if (vq->used_flags & VRING_USED_F_NO_NOTIFY) return; vq->used_flags |= VRING_USED_F_NO_NOTIFY; - if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { + if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); } } +EXPORT_SYMBOL_GPL(vhost_disable_notify); + +static int __init vhost_init(void) +{ + return 0; +} + +static void __exit vhost_exit(void) +{ +} + +module_init(vhost_init); +module_exit(vhost_exit); + +MODULE_VERSION("0.0.1"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Michael S. Tsirkin"); +MODULE_DESCRIPTION("Host kernel accelerator for virtio");