Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f2683b74 authored by David S. Miller's avatar David S. Miller
Browse files


More iov_iter work from Al Viro.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 98781965 57dd8a07
Loading
Loading
Loading
Loading
+11 −29
Original line number Diff line number Diff line
@@ -338,49 +338,31 @@ static const struct net_proto_family alg_family = {
	.owner	=	THIS_MODULE,
};

int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
		   int write)
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
{
	unsigned long from = (unsigned long)addr;
	unsigned long npages;
	unsigned off;
	int err;
	int i;

	err = -EFAULT;
	if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
		goto out;

	off = from & ~PAGE_MASK;
	npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (npages > ALG_MAX_PAGES)
		npages = ALG_MAX_PAGES;
	size_t off;
	ssize_t n;
	int npages, i;

	err = get_user_pages_fast(from, npages, write, sgl->pages);
	if (err < 0)
		goto out;
	n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
	if (n < 0)
		return n;

	npages = err;
	err = -EINVAL;
	npages = PAGE_ALIGN(off + n);
	if (WARN_ON(npages == 0))
		goto out;

	err = 0;
		return -EINVAL;

	sg_init_table(sgl->sg, npages);

	for (i = 0; i < npages; i++) {
	for (i = 0, len = n; i < npages; i++) {
		int plen = min_t(int, len, PAGE_SIZE - off);

		sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);

		off = 0;
		len -= plen;
		err += plen;
	}

out:
	return err;
	return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);

+17 −28
Original line number Diff line number Diff line
@@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct hash_ctx *ctx = ask->private;
	unsigned long iovlen;
	const struct iovec *iov;
	long copied = 0;
	int err;

@@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,

	ctx->more = 0;

	for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
	     iovlen--, iov++) {
		unsigned long seglen = iov->iov_len;
		char __user *from = iov->iov_base;
	while (iov_iter_count(&msg->msg_iter)) {
		int len = iov_iter_count(&msg->msg_iter);

		while (seglen) {
			int len = min_t(unsigned long, seglen, limit);
			int newlen;
		if (len > limit)
			len = limit;

			newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
			if (newlen < 0) {
				err = copied ? 0 : newlen;
		len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
		if (len < 0) {
			err = copied ? 0 : len;
			goto unlock;
		}

			ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
						newlen);
		ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);

			err = af_alg_wait_for_completion(
				crypto_ahash_update(&ctx->req),
		err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
						 &ctx->completion);

		af_alg_free_sg(&ctx->sgl);

		if (err)
			goto unlock;

			seglen -= newlen;
			from += newlen;
			copied += newlen;
		}
		copied += len;
		iov_iter_advance(&msg->msg_iter, len);
	}

	err = 0;
+33 −41
Original line number Diff line number Diff line
@@ -426,19 +426,12 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
		&ctx->req));
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg;
	unsigned long iovlen;
	const struct iovec *iov;
	int err = -EAGAIN;
	int used;
	long copied = 0;

	lock_sock(sk);
	for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
	     iovlen--, iov++) {
		unsigned long seglen = iov->iov_len;
		char __user *from = iov->iov_base;

		while (seglen) {
	while (iov_iter_count(&msg->msg_iter)) {
		sgl = list_first_entry(&ctx->tsgl,
				       struct skcipher_sg_list, list);
		sg = sgl->sg;
@@ -446,15 +439,16 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
		while (!sg->length)
			sg++;

			if (!ctx->used) {
		used = ctx->used;
		if (!used) {
			err = skcipher_wait_for_data(sk, flags);
			if (err)
				goto unlock;
		}

			used = min_t(unsigned long, ctx->used, seglen);
		used = min_t(unsigned long, used, iov_iter_count(&msg->msg_iter));

			used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
		used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
		err = used;
		if (err < 0)
			goto unlock;
@@ -483,10 +477,8 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
			goto unlock;

		copied += used;
			from += used;
			seglen -= used;
		skcipher_pull_sgl(sk, used);
		}
		iov_iter_advance(&msg->msg_iter, used);
	}

	err = 0;
+8 −8
Original line number Diff line number Diff line
@@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
			to_copy = size - bytes_copied;

		if (is_iovec) {
			struct iovec *iov = (struct iovec *)src;
			struct msghdr *msg = (struct msghdr *)src;
			int err;

			/* The iovec will track bytes_copied internally. */
			err = memcpy_fromiovec((u8 *)va + page_offset,
					       iov, to_copy);
			err = memcpy_from_msg((u8 *)va + page_offset,
					      msg, to_copy);
			if (err != 0) {
				if (kernel_if->host)
					kunmap(kernel_if->u.h.page[page_index]);
@@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
 */
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
				  u64 queue_offset,
				  const void *src,
				  const void *msg,
				  size_t src_offset, size_t size)
{

@@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
	 * We ignore src_offset because src is really a struct iovec * and will
	 * maintain offset internally.
	 */
	return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
	return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
}

/*
@@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
 * of bytes enqueued or < 0 on error.
 */
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
			  void *iov,
			  struct msghdr *msg,
			  size_t iov_size,
			  int buf_type)
{
	ssize_t result;

	if (!qpair || !iov)
	if (!qpair)
		return VMCI_ERROR_INVALID_ARGS;

	qp_lock(qpair);
@@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
		result = qp_enqueue_locked(qpair->produce_q,
					   qpair->consume_q,
					   qpair->produce_q_size,
					   iov, iov_size,
					   msg, iov_size,
					   qp_memcpy_to_queue_iov);

		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+28 −63
Original line number Diff line number Diff line
@@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref {

struct vhost_net_virtqueue {
	struct vhost_virtqueue vq;
	/* hdr is used to store the virtio header.
	 * Since each iovec has >= 1 byte length, we never need more than
	 * header length entries to store the header. */
	struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
	size_t vhost_hlen;
	size_t sock_hlen;
	/* vhost zerocopy support fields below: */
@@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock)
		sock_flag(sock->sk, SOCK_ZEROCOPY);
}

/* Pop first len bytes from iovec. Return number of segments used. */
static int move_iovec_hdr(struct iovec *from, struct iovec *to,
			  size_t len, int iov_count)
{
	int seg = 0;
	size_t size;

	while (len && seg < iov_count) {
		size = min(from->iov_len, len);
		to->iov_base = from->iov_base;
		to->iov_len = size;
		from->iov_len -= size;
		from->iov_base += size;
		len -= size;
		++from;
		++to;
		++seg;
	}
	return seg;
}
/* Copy iovec entries for len bytes from iovec. */
static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
			   size_t len, int iovcount)
{
	int seg = 0;
	size_t size;

	while (len && seg < iovcount) {
		size = min(from->iov_len, len);
		to->iov_base = from->iov_base;
		to->iov_len = size;
		len -= size;
		++from;
		++to;
		++seg;
	}
}

/* In case of DMA done not in order in lower device driver for some reason.
 * upend_idx is used to track end of used idx, done_idx is used to track head
 * of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net)
{
	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
	struct vhost_virtqueue *vq = &nvq->vq;
	unsigned out, in, s;
	unsigned out, in;
	int head;
	struct msghdr msg = {
		.msg_name = NULL,
@@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net)
			break;
		}
		/* Skip header. TODO: support TSO. */
		s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
		len = iov_length(vq->iov, out);
		iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
		iov_iter_advance(&msg.msg_iter, hdr_size);
		/* Sanity check */
		if (!len) {
		if (!iov_iter_count(&msg.msg_iter)) {
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
			       iov_length(nvq->hdr, s), hdr_size);
			       len, hdr_size);
			break;
		}
		len = iov_iter_count(&msg.msg_iter);

		zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
				   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
@@ -569,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
		.msg_controllen = 0,
		.msg_flags = MSG_DONTWAIT,
	};
	struct virtio_net_hdr_mrg_rxbuf hdr = {
		.hdr.flags = 0,
		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
	struct virtio_net_hdr hdr = {
		.flags = 0,
		.gso_type = VIRTIO_NET_HDR_GSO_NONE
	};
	size_t total_len = 0;
	int err, mergeable;
@@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net)
	size_t vhost_hlen, sock_hlen;
	size_t vhost_len, sock_len;
	struct socket *sock;
	struct iov_iter fixup;

	mutex_lock(&vq->mutex);
	sock = vq->private_data;
@@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net)
			break;
		}
		/* We don't need to be notified again. */
		if (unlikely((vhost_hlen)))
			/* Skip header. TODO: support TSO. */
			move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
		else
			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
			 * needed because recvmsg can modify msg_iov. */
			copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len);
		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
		fixup = msg.msg_iter;
		if (unlikely((vhost_hlen))) {
			/* We will supply the header ourselves
			 * TODO: support TSO.
			 */
			iov_iter_advance(&msg.msg_iter, vhost_hlen);
		} else {
			/* It'll come from socket; we'll need to patch
			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
			 */
			iov_iter_advance(&fixup, sizeof(hdr));
		}
		err = sock->ops->recvmsg(NULL, sock, &msg,
					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
		/* Userspace might have consumed the packet meanwhile:
@@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net)
			vhost_discard_vq_desc(vq, headcount);
			continue;
		}
		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
		if (unlikely(vhost_hlen) &&
		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
				      vhost_hlen)) {
		    copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
			       vq->iov->iov_base);
			break;
		}
		/* TODO: Should check and handle checksum. */
		/* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF
		 * TODO: Should check and handle checksum.
		 */
		if (likely(mergeable) &&
		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
				      offsetof(typeof(hdr), num_buffers),
				      sizeof hdr.num_buffers)) {
		    copy_to_iter(&headcount, 2, &fixup) != 2) {
			vq_err(vq, "Failed num_buffers write");
			vhost_discard_vq_desc(vq, headcount);
			break;
Loading