Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16382e17 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull iov_iter updates from Al Viro:

 - bio_{map,copy}_user_iov() series; those are cleanups - fixes from the
   same pile went into mainline (and stable) in late September.

 - fs/iomap.c iov_iter-related fixes

 - new primitive - iov_iter_for_each_range(), which applies a function
   to kernel-mapped segments of an iov_iter.

   Usable for kvec and bvec ones, the latter does kmap()/kunmap() around
   the callback. _Not_ usable for iovec- or pipe-backed iov_iter; the
   latter is not hard to fix if the need ever appears, the former is by
   design.

   Another related primitive will have to wait for the next cycle - it
   passes page + offset + size instead of pointer + size, and that one
   will be usable for everything _except_ kvec. Unfortunately, that one
   didn't get exposure in -next yet, so...

 - a bit more lustre iov_iter work, including a use case for
   iov_iter_for_each_range() (checksum calculation)

 - vhost/scsi leak fix in failure exit

 - misc cleanups and detritectomy...

* 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (21 commits)
  iomap_dio_actor(): fix iov_iter bugs
  switch ksocknal_lib_recv_...() to use of iov_iter_for_each_range()
  lustre: switch struct ksock_conn to iov_iter
  vhost/scsi: switch to iov_iter_get_pages()
  fix a page leak in vhost_scsi_iov_to_sgl() error recovery
  new primitive: iov_iter_for_each_range()
  lnet_return_rx_credits_locked: don't abuse list_entry
  xen: don't open-code iov_iter_kvec()
  orangefs: remove detritus from struct orangefs_kiocb_s
  kill iov_shorten()
  bio_alloc_map_data(): do bmd->iter setup right there
  bio_copy_user_iov(): saner bio size calculation
  bio_map_user_iov(): get rid of copying iov_iter
  bio_copy_from_iter(): get rid of copying iov_iter
  move more stuff down into bio_copy_user_iov()
  blk_rq_map_user_iov(): move iov_iter_advance() down
  bio_map_user_iov(): get rid of the iov_for_each()
  bio_map_user_iov(): move alignment check into the main loop
  don't rely upon subsequent bio_add_pc_page() calls failing
  ... and with iov_iter_get_pages_alloc() it becomes even simpler
  ...
parents 93f30c73 cfe057f7
Loading
Loading
Loading
Loading
+73 −119
Original line number Diff line number Diff line
@@ -1062,14 +1062,21 @@ struct bio_map_data {
	struct iovec iov[];
};

static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
					       gfp_t gfp_mask)
{
	if (iov_count > UIO_MAXIOV)
	struct bio_map_data *bmd;
	if (data->nr_segs > UIO_MAXIOV)
		return NULL;

	return kmalloc(sizeof(struct bio_map_data) +
		       sizeof(struct iovec) * iov_count, gfp_mask);
	bmd = kmalloc(sizeof(struct bio_map_data) +
		       sizeof(struct iovec) * data->nr_segs, gfp_mask);
	if (!bmd)
		return NULL;
	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
	bmd->iter = *data;
	bmd->iter.iov = bmd->iov;
	return bmd;
}

/**
@@ -1080,7 +1087,7 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
 * Copy all pages from iov_iter to bio.
 * Returns 0 on success, or error on failure.
 */
static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
	int i;
	struct bio_vec *bvec;
@@ -1091,9 +1098,9 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
		ret = copy_page_from_iter(bvec->bv_page,
					  bvec->bv_offset,
					  bvec->bv_len,
					  &iter);
					  iter);

		if (!iov_iter_count(&iter))
		if (!iov_iter_count(iter))
			break;

		if (ret < bvec->bv_len)
@@ -1187,40 +1194,18 @@ int bio_uncopy_user(struct bio *bio)
 */
struct bio *bio_copy_user_iov(struct request_queue *q,
			      struct rq_map_data *map_data,
			      const struct iov_iter *iter,
			      struct iov_iter *iter,
			      gfp_t gfp_mask)
{
	struct bio_map_data *bmd;
	struct page *page;
	struct bio *bio;
	int i, ret;
	int nr_pages = 0;
	int i = 0, ret;
	int nr_pages;
	unsigned int len = iter->count;
	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;

	for (i = 0; i < iter->nr_segs; i++) {
		unsigned long uaddr;
		unsigned long end;
		unsigned long start;

		uaddr = (unsigned long) iter->iov[i].iov_base;
		end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
			>> PAGE_SHIFT;
		start = uaddr >> PAGE_SHIFT;

		/*
		 * Overflow, abort
		 */
		if (end < start)
			return ERR_PTR(-EINVAL);

		nr_pages += end - start;
	}

	if (offset)
		nr_pages++;

	bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
	bmd = bio_alloc_map_data(iter, gfp_mask);
	if (!bmd)
		return ERR_PTR(-ENOMEM);

@@ -1230,9 +1215,10 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
	 * shortlived one.
	 */
	bmd->is_our_pages = map_data ? 0 : 1;
	memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
	bmd->iter = *iter;
	bmd->iter.iov = bmd->iov;

	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
	if (nr_pages > BIO_MAX_PAGES)
		nr_pages = BIO_MAX_PAGES;

	ret = -ENOMEM;
	bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1281,17 +1267,24 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
	if (ret)
		goto cleanup;

	if (map_data)
		map_data->offset += bio->bi_iter.bi_size;

	/*
	 * success
	 */
	if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
	    (map_data && map_data->from_user)) {
		ret = bio_copy_from_iter(bio, *iter);
		ret = bio_copy_from_iter(bio, iter);
		if (ret)
			goto cleanup;
	} else {
		iov_iter_advance(iter, bio->bi_iter.bi_size);
	}

	bio->bi_private = bmd;
	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);
	return bio;
cleanup:
	if (!map_data)
@@ -1312,88 +1305,48 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
 *	device. Returns an error pointer in case of error.
 */
struct bio *bio_map_user_iov(struct request_queue *q,
			     const struct iov_iter *iter,
			     struct iov_iter *iter,
			     gfp_t gfp_mask)
{
	int j;
	int nr_pages = 0;
	struct page **pages;
	struct bio *bio;
	int cur_page = 0;
	int ret, offset;
	struct iov_iter i;
	struct iovec iov;
	int ret;
	struct bio_vec *bvec;

	iov_for_each(iov, i, *iter) {
		unsigned long uaddr = (unsigned long) iov.iov_base;
		unsigned long len = iov.iov_len;
		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long start = uaddr >> PAGE_SHIFT;

		/*
		 * Overflow, abort
		 */
		if (end < start)
	if (!iov_iter_count(iter))
		return ERR_PTR(-EINVAL);

		nr_pages += end - start;
		/*
		 * buffer must be aligned to at least logical block size for now
		 */
		if (uaddr & queue_dma_alignment(q))
			return ERR_PTR(-EINVAL);
	}

	if (!nr_pages)
		return ERR_PTR(-EINVAL);

	bio = bio_kmalloc(gfp_mask, nr_pages);
	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
	if (!bio)
		return ERR_PTR(-ENOMEM);

	ret = -ENOMEM;
	pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
	if (!pages)
		goto out;
	while (iov_iter_count(iter)) {
		struct page **pages;
		ssize_t bytes;
		size_t offs, added = 0;
		int npages;

	iov_for_each(iov, i, *iter) {
		unsigned long uaddr = (unsigned long) iov.iov_base;
		unsigned long len = iov.iov_len;
		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long start = uaddr >> PAGE_SHIFT;
		const int local_nr_pages = end - start;
		const int page_limit = cur_page + local_nr_pages;

		ret = get_user_pages_fast(uaddr, local_nr_pages,
				(iter->type & WRITE) != WRITE,
				&pages[cur_page]);
		if (unlikely(ret < local_nr_pages)) {
			for (j = cur_page; j < page_limit; j++) {
				if (!pages[j])
					break;
				put_page(pages[j]);
			}
			ret = -EFAULT;
		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
		if (unlikely(bytes <= 0)) {
			ret = bytes ? bytes : -EFAULT;
			goto out_unmap;
		}

		offset = offset_in_page(uaddr);
		for (j = cur_page; j < page_limit; j++) {
			unsigned int bytes = PAGE_SIZE - offset;
			unsigned short prev_bi_vcnt = bio->bi_vcnt;
		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);

			if (len <= 0)
				break;
		if (unlikely(offs & queue_dma_alignment(q))) {
			ret = -EINVAL;
			j = 0;
		} else {
			for (j = 0; j < npages; j++) {
				struct page *page = pages[j];
				unsigned int n = PAGE_SIZE - offs;
				unsigned short prev_bi_vcnt = bio->bi_vcnt;

			if (bytes > len)
				bytes = len;
				if (n > bytes)
					n = bytes;

			/*
			 * sorry...
			 */
			if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
					    bytes)
				if (!bio_add_pc_page(q, bio, page, n, offs))
					break;

				/*
@@ -1401,22 +1354,25 @@ struct bio *bio_map_user_iov(struct request_queue *q,
				 * drop page reference if needed
				 */
				if (bio->bi_vcnt == prev_bi_vcnt)
				put_page(pages[j]);
					put_page(page);

			len -= bytes;
			offset = 0;
				added += n;
				bytes -= n;
				offs = 0;
			}
			iov_iter_advance(iter, added);
		}

		cur_page = j;
		/*
		 * release the pages we didn't map into the bio, if any
		 */
		while (j < page_limit)
		while (j < npages)
			put_page(pages[j++]);
		kvfree(pages);
		/* couldn't stuff something into bio? */
		if (bytes)
			break;
	}

	kfree(pages);

	bio_set_flag(bio, BIO_USER_MAPPED);

	/*
@@ -1432,8 +1388,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
	bio_for_each_segment_all(bvec, bio, j) {
		put_page(bvec->bv_page);
	}
 out:
	kfree(pages);
	bio_put(bio);
	return ERR_PTR(ret);
}
+0 −7
Original line number Diff line number Diff line
@@ -67,13 +67,6 @@ static int __blk_rq_map_user_iov(struct request *rq,
	bio->bi_opf &= ~REQ_OP_MASK;
	bio->bi_opf |= req_op(rq);

	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);

	iov_iter_advance(iter, bio->bi_iter.bi_size);
	if (map_data)
		map_data->offset += bio->bi_iter.bi_size;

	orig_bio = bio;

	/*
+2 −2
Original line number Diff line number Diff line
@@ -1683,10 +1683,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
	case SOCKNAL_RX_LNET_PAYLOAD:
		last_rcv = conn->ksnc_rx_deadline -
			   cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
		CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
		CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
		       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
		       &conn->ksnc_ipaddr, conn->ksnc_port,
		       conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
		       iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
		       cfs_duration_sec(cfs_time_sub(cfs_time_current(),
						     last_rcv)));
		lnet_finalize(conn->ksnc_peer->ksnp_ni,
+2 −7
Original line number Diff line number Diff line
@@ -358,11 +358,7 @@ struct ksock_conn {
	__u8               ksnc_rx_scheduled; /* being progressed */
	__u8               ksnc_rx_state;     /* what is being read */
	int                ksnc_rx_nob_left;  /* # bytes to next hdr/body */
	int                ksnc_rx_nob_wanted;/* bytes actually wanted */
	int                ksnc_rx_niov;      /* # iovec frags */
	struct kvec        *ksnc_rx_iov;      /* the iovec frags */
	int                ksnc_rx_nkiov;     /* # page frags */
	struct bio_vec		*ksnc_rx_kiov;	/* the page frags */
	struct iov_iter    ksnc_rx_to;		/* copy destination */
	union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
	__u32              ksnc_rx_csum;      /* partial checksum for incoming
					       * data
@@ -701,8 +697,7 @@ int ksocknal_lib_setup_sock(struct socket *so);
int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
void ksocknal_lib_eager_ack(struct ksock_conn *conn);
int ksocknal_lib_recv_iov(struct ksock_conn *conn);
int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
int ksocknal_lib_recv(struct ksock_conn *conn);
int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
				   int *rxmem, int *nagle);

+32 −125
Original line number Diff line number Diff line
@@ -250,66 +250,16 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
}

static int
ksocknal_recv_iov(struct ksock_conn *conn)
ksocknal_recv_iter(struct ksock_conn *conn)
{
	struct kvec *iov = conn->ksnc_rx_iov;
	int nob;
	int rc;

	LASSERT(conn->ksnc_rx_niov > 0);

	/*
	 * Never touch conn->ksnc_rx_iov or change connection
	 * status inside ksocknal_lib_recv_iov
	 */
	rc = ksocknal_lib_recv_iov(conn);

	if (rc <= 0)
		return rc;

	/* received something... */
	nob = rc;

	conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
	conn->ksnc_rx_deadline =
		cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
	mb();		       /* order with setting rx_started */
	conn->ksnc_rx_started = 1;

	conn->ksnc_rx_nob_wanted -= nob;
	conn->ksnc_rx_nob_left -= nob;

	do {
		LASSERT(conn->ksnc_rx_niov > 0);

		if (nob < (int)iov->iov_len) {
			iov->iov_len -= nob;
			iov->iov_base += nob;
			return -EAGAIN;
		}

		nob -= iov->iov_len;
		conn->ksnc_rx_iov = ++iov;
		conn->ksnc_rx_niov--;
	} while (nob);

	return rc;
}

static int
ksocknal_recv_kiov(struct ksock_conn *conn)
{
	struct bio_vec *kiov = conn->ksnc_rx_kiov;
	int nob;
	int rc;

	LASSERT(conn->ksnc_rx_nkiov > 0);

	/*
	 * Never touch conn->ksnc_rx_kiov or change connection
	 * status inside ksocknal_lib_recv_iov
	 * Never touch conn->ksnc_rx_to or change connection
	 * status inside ksocknal_lib_recv
	 */
	rc = ksocknal_lib_recv_kiov(conn);
	rc = ksocknal_lib_recv(conn);

	if (rc <= 0)
		return rc;
@@ -323,22 +273,11 @@ ksocknal_recv_kiov(struct ksock_conn *conn)
	mb();		       /* order with setting rx_started */
	conn->ksnc_rx_started = 1;

	conn->ksnc_rx_nob_wanted -= nob;
	conn->ksnc_rx_nob_left -= nob;

	do {
		LASSERT(conn->ksnc_rx_nkiov > 0);

		if (nob < (int)kiov->bv_len) {
			kiov->bv_offset += nob;
			kiov->bv_len -= nob;
	iov_iter_advance(&conn->ksnc_rx_to, nob);
	if (iov_iter_count(&conn->ksnc_rx_to))
		return -EAGAIN;
		}

		nob -= kiov->bv_len;
		conn->ksnc_rx_kiov = ++kiov;
		conn->ksnc_rx_nkiov--;
	} while (nob);

	return 1;
}
@@ -348,7 +287,7 @@ ksocknal_receive(struct ksock_conn *conn)
{
	/*
	 * Return 1 on success, 0 on EOF, < 0 on error.
	 * Caller checks ksnc_rx_nob_wanted to determine
	 * Caller checks ksnc_rx_to to determine
	 * progress/completion.
	 */
	int rc;
@@ -365,11 +304,7 @@ ksocknal_receive(struct ksock_conn *conn)
	}

	for (;;) {
		if (conn->ksnc_rx_niov)
			rc = ksocknal_recv_iov(conn);
		else
			rc = ksocknal_recv_kiov(conn);

		rc = ksocknal_recv_iter(conn);
		if (rc <= 0) {
			/* error/EOF or partial receive */
			if (rc == -EAGAIN) {
@@ -383,7 +318,7 @@ ksocknal_receive(struct ksock_conn *conn)

		/* Completed a fragment */

		if (!conn->ksnc_rx_nob_wanted) {
		if (!iov_iter_count(&conn->ksnc_rx_to)) {
			rc = 1;
			break;
		}
@@ -1051,6 +986,7 @@ int
ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
	static char ksocknal_slop_buffer[4096];
	struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;

	int nob;
	unsigned int niov;
@@ -1071,32 +1007,26 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
		case  KSOCK_PROTO_V2:
		case  KSOCK_PROTO_V3:
			conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;

			conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
			kvec->iov_base = &conn->ksnc_msg;
			kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
			conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
			conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
			iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
					1, offsetof(struct ksock_msg, ksm_u));
			break;

		case KSOCK_PROTO_V1:
			/* Receiving bare struct lnet_hdr */
			conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
			conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
			kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
			kvec->iov_len = sizeof(struct lnet_hdr);
			conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);

			conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
			conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
			conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
			iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
					1, sizeof(struct lnet_hdr));
			break;

		default:
			LBUG();
		}
		conn->ksnc_rx_niov = 1;

		conn->ksnc_rx_kiov = NULL;
		conn->ksnc_rx_nkiov = 0;
		conn->ksnc_rx_csum = ~0;
		return 1;
	}
@@ -1107,15 +1037,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
	 */
	conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
	conn->ksnc_rx_nob_left = nob_to_skip;
	conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
	skipped = 0;
	niov = 0;

	do {
		nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));

		conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
		conn->ksnc_rx_iov[niov].iov_len  = nob;
		kvec[niov].iov_base = ksocknal_slop_buffer;
		kvec[niov].iov_len  = nob;
		niov++;
		skipped += nob;
		nob_to_skip -= nob;
@@ -1123,16 +1052,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
	} while (nob_to_skip &&    /* mustn't overflow conn's rx iov */
		 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));

	conn->ksnc_rx_niov = niov;
	conn->ksnc_rx_kiov = NULL;
	conn->ksnc_rx_nkiov = 0;
	conn->ksnc_rx_nob_wanted = skipped;
	iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
	return 0;
}

static int
ksocknal_process_receive(struct ksock_conn *conn)
{
	struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
	struct lnet_hdr *lhdr;
	struct lnet_process_id *id;
	int rc;
@@ -1146,7 +1073,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
		conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
		conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
 again:
	if (conn->ksnc_rx_nob_wanted) {
	if (iov_iter_count(&conn->ksnc_rx_to)) {
		rc = ksocknal_receive(conn);

		if (rc <= 0) {
@@ -1171,7 +1098,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
			return (!rc ? -ESHUTDOWN : rc);
		}

		if (conn->ksnc_rx_nob_wanted) {
		if (iov_iter_count(&conn->ksnc_rx_to)) {
			/* short read */
			return -EAGAIN;
		}
@@ -1234,16 +1161,13 @@ ksocknal_process_receive(struct ksock_conn *conn)
		}

		conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
		conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
		conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);

		conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
		conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
		conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
		kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
		kvec->iov_len = sizeof(struct ksock_lnet_msg);

		conn->ksnc_rx_niov = 1;
		conn->ksnc_rx_kiov = NULL;
		conn->ksnc_rx_nkiov = 0;
		iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
				1, sizeof(struct ksock_lnet_msg));

		goto again;     /* read lnet header now */

@@ -1345,26 +1269,9 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
	LASSERT(to->nr_segs <= LNET_MAX_IOV);

	conn->ksnc_cookie = msg;
	conn->ksnc_rx_nob_wanted = iov_iter_count(to);
	conn->ksnc_rx_nob_left = rlen;

	if (to->type & ITER_KVEC) {
		conn->ksnc_rx_nkiov = 0;
		conn->ksnc_rx_kiov = NULL;
		conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
		conn->ksnc_rx_niov =
			lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
					 to->nr_segs, to->kvec,
					 to->iov_offset, iov_iter_count(to));
	} else {
		conn->ksnc_rx_niov = 0;
		conn->ksnc_rx_iov = NULL;
		conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
		conn->ksnc_rx_nkiov =
			lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
					 to->nr_segs, to->bvec,
					 to->iov_offset, iov_iter_count(to));
	}
	conn->ksnc_rx_to = *to;

	LASSERT(conn->ksnc_rx_scheduled);

@@ -2329,12 +2236,12 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
				     conn->ksnc_rx_deadline)) {
			/* Timed out incomplete incoming message */
			ksocknal_conn_addref(conn);
			CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
			CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
				libcfs_id2str(peer->ksnp_id),
				&conn->ksnc_ipaddr,
				conn->ksnc_port,
				conn->ksnc_rx_state,
				conn->ksnc_rx_nob_wanted,
				iov_iter_count(&conn->ksnc_rx_to),
				conn->ksnc_rx_nob_left);
			return conn;
		}
Loading