Loading drivers/vhost/net.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, int r, nlogs = 0; int r, nlogs = 0; while (datalen > 0) { while (datalen > 0) { if (unlikely(seg >= VHOST_NET_MAX_SG)) { if (unlikely(seg >= UIO_MAXIOV)) { r = -ENOBUFS; r = -ENOBUFS; goto err; goto err; } } Loading drivers/vhost/vhost.c +48 −1 Original line number Original line Diff line number Diff line Loading @@ -212,6 +212,45 @@ static int vhost_worker(void *data) } } } } /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads * UIO_MAXIOV, GFP_KERNEL); if (!dev->vqs[i].indirect || !dev->vqs[i].log || !dev->vqs[i].heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) { kfree(dev->vqs[i].indirect); kfree(dev->vqs[i].log); kfree(dev->vqs[i].heads); } return -ENOMEM; } static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { kfree(dev->vqs[i].indirect); dev->vqs[i].indirect = NULL; kfree(dev->vqs[i].log); dev->vqs[i].log = NULL; kfree(dev->vqs[i].heads); dev->vqs[i].heads = NULL; } } long vhost_dev_init(struct vhost_dev *dev, long vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue *vqs, int nvqs) struct vhost_virtqueue *vqs, int nvqs) { { Loading @@ -229,6 +268,9 @@ long vhost_dev_init(struct vhost_dev *dev, dev->worker = NULL; dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].log = NULL; dev->vqs[i].indirect = NULL; dev->vqs[i].heads = NULL; dev->vqs[i].dev = dev; dev->vqs[i].dev = dev; mutex_init(&dev->vqs[i].mutex); mutex_init(&dev->vqs[i].mutex); vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs + i); Loading Loading @@ -295,6 +337,10 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) if (err) if (err) goto err_cgroup; goto err_cgroup; err = vhost_dev_alloc_iovecs(dev); if (err) goto err_cgroup; return 0; return 0; err_cgroup: err_cgroup: kthread_stop(worker); kthread_stop(worker); Loading Loading @@ -345,6 +391,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev) fput(dev->vqs[i].call); fput(dev->vqs[i].call); vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs + i); } } vhost_dev_free_iovecs(dev); if (dev->log_ctx) if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; dev->log_ctx = NULL; Loading Loading @@ -947,7 +994,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, } } ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, ARRAY_SIZE(vq->indirect)); UIO_MAXIOV); if (unlikely(ret < 0)) { if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; return ret; Loading drivers/vhost/vhost.h +8 −10 Original line number Original line Diff line number Diff line Loading @@ -15,11 +15,6 @@ struct vhost_device; struct vhost_device; enum { /* Enough place for all fragments, head, and virtio net header. */ VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, }; struct vhost_work; struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); typedef void (*vhost_work_fn_t)(struct vhost_work *work); Loading Loading @@ -93,12 +88,15 @@ struct vhost_virtqueue { bool log_used; bool log_used; u64 log_addr; u64 log_addr; struct iovec indirect[VHOST_NET_MAX_SG]; struct iovec iov[UIO_MAXIOV]; struct iovec iov[VHOST_NET_MAX_SG]; /* hdr is used to store the virtio header. struct iovec hdr[VHOST_NET_MAX_SG]; * Since each iovec has >= 1 byte length, we never need more than * header length entries to store the header. */ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; struct iovec *indirect; size_t vhost_hlen; size_t vhost_hlen; size_t sock_hlen; size_t sock_hlen; struct vring_used_elem heads[VHOST_NET_MAX_SG]; struct vring_used_elem *heads; /* We use a kind of RCU to access private pointer. /* We use a kind of RCU to access private pointer. * All readers access it from worker, which makes it possible to * All readers access it from worker, which makes it possible to * flush the vhost_work instead of synchronize_rcu. Therefore readers do * flush the vhost_work instead of synchronize_rcu. Therefore readers do Loading @@ -109,7 +107,7 @@ struct vhost_virtqueue { void *private_data; void *private_data; /* Log write descriptors */ /* Log write descriptors */ void __user *log_base; void __user *log_base; struct vhost_log log[VHOST_NET_MAX_SG]; struct vhost_log *log; }; }; struct vhost_dev { struct vhost_dev { Loading Loading
drivers/vhost/net.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, int r, nlogs = 0; int r, nlogs = 0; while (datalen > 0) { while (datalen > 0) { if (unlikely(seg >= VHOST_NET_MAX_SG)) { if (unlikely(seg >= UIO_MAXIOV)) { r = -ENOBUFS; r = -ENOBUFS; goto err; goto err; } } Loading
drivers/vhost/vhost.c +48 −1 Original line number Original line Diff line number Diff line Loading @@ -212,6 +212,45 @@ static int vhost_worker(void *data) } } } } /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV, GFP_KERNEL); dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads * UIO_MAXIOV, GFP_KERNEL); if (!dev->vqs[i].indirect || !dev->vqs[i].log || !dev->vqs[i].heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) { kfree(dev->vqs[i].indirect); kfree(dev->vqs[i].log); kfree(dev->vqs[i].heads); } return -ENOMEM; } static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { kfree(dev->vqs[i].indirect); dev->vqs[i].indirect = NULL; kfree(dev->vqs[i].log); dev->vqs[i].log = NULL; kfree(dev->vqs[i].heads); dev->vqs[i].heads = NULL; } } long vhost_dev_init(struct vhost_dev *dev, long vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue *vqs, int nvqs) struct vhost_virtqueue *vqs, int nvqs) { { Loading @@ -229,6 +268,9 @@ long vhost_dev_init(struct vhost_dev *dev, dev->worker = NULL; dev->worker = NULL; for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) { dev->vqs[i].log = NULL; dev->vqs[i].indirect = NULL; dev->vqs[i].heads = NULL; dev->vqs[i].dev = dev; dev->vqs[i].dev = dev; mutex_init(&dev->vqs[i].mutex); mutex_init(&dev->vqs[i].mutex); vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs + i); Loading Loading @@ -295,6 +337,10 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) if (err) if (err) goto err_cgroup; goto err_cgroup; err = vhost_dev_alloc_iovecs(dev); if (err) goto err_cgroup; return 0; return 0; err_cgroup: err_cgroup: kthread_stop(worker); kthread_stop(worker); Loading Loading @@ -345,6 +391,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev) fput(dev->vqs[i].call); fput(dev->vqs[i].call); vhost_vq_reset(dev, dev->vqs + i); vhost_vq_reset(dev, dev->vqs + i); } } vhost_dev_free_iovecs(dev); if (dev->log_ctx) if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; dev->log_ctx = NULL; Loading Loading @@ -947,7 +994,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, } } ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, ARRAY_SIZE(vq->indirect)); UIO_MAXIOV); if (unlikely(ret < 0)) { if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; return ret; Loading
drivers/vhost/vhost.h +8 −10 Original line number Original line Diff line number Diff line Loading @@ -15,11 +15,6 @@ struct vhost_device; struct vhost_device; enum { /* Enough place for all fragments, head, and virtio net header. */ VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2, }; struct vhost_work; struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); typedef void (*vhost_work_fn_t)(struct vhost_work *work); Loading Loading @@ -93,12 +88,15 @@ struct vhost_virtqueue { bool log_used; bool log_used; u64 log_addr; u64 log_addr; struct iovec indirect[VHOST_NET_MAX_SG]; struct iovec iov[UIO_MAXIOV]; struct iovec iov[VHOST_NET_MAX_SG]; /* hdr is used to store the virtio header. struct iovec hdr[VHOST_NET_MAX_SG]; * Since each iovec has >= 1 byte length, we never need more than * header length entries to store the header. */ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; struct iovec *indirect; size_t vhost_hlen; size_t vhost_hlen; size_t sock_hlen; size_t sock_hlen; struct vring_used_elem heads[VHOST_NET_MAX_SG]; struct vring_used_elem *heads; /* We use a kind of RCU to access private pointer. /* We use a kind of RCU to access private pointer. * All readers access it from worker, which makes it possible to * All readers access it from worker, which makes it possible to * flush the vhost_work instead of synchronize_rcu. Therefore readers do * flush the vhost_work instead of synchronize_rcu. Therefore readers do Loading @@ -109,7 +107,7 @@ struct vhost_virtqueue { void *private_data; void *private_data; /* Log write descriptors */ /* Log write descriptors */ void __user *log_base; void __user *log_base; struct vhost_log log[VHOST_NET_MAX_SG]; struct vhost_log *log; }; }; struct vhost_dev { struct vhost_dev { Loading