Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1756ac3d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (27 commits)
  drivers/char: Eliminate use after free
  virtio: console: Accept console size along with resize control message
  virtio: console: Store each console's size in the console structure
  virtio: console: Resize console port 0 on config intr only if multiport is off
  virtio: console: Add support for nonblocking write()s
  virtio: console: Rename wait_is_over() to will_read_block()
  virtio: console: Don't always create a port 0 if using multiport
  virtio: console: Use a control message to add ports
  virtio: console: Move code around for future patches
  virtio: console: Remove config work handler
  virtio: console: Don't call hvc_remove() on unplugging console ports
  virtio: console: Return -EPIPE to hvc_console if we lost the connection
  virtio: console: Let host know of port or device add failures
  virtio: console: Add a __send_control_msg() that can send messages without a valid port
  virtio: Revert "virtio: disable multiport console support."
  virtio: add_buf_gfp
  trans_virtio: use virtqueue_xxx wrappers
  virtio-rng: use virtqueue_xxx wrappers
  virtio_ring: remove a level of indirection
  virtio_net: use virtqueue_xxx wrappers
  ...

Fix up conflicts in drivers/net/virtio_net.c due to new virtqueue_xxx
wrappers changes conflicting with some other cleanups.
parents 98edb6ca 0643e4c6
Loading
Loading
Loading
Loading
+43 −3
Original line number Original line Diff line number Diff line
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&vblk->lock, flags);
	spin_lock_irqsave(&vblk->lock, flags);
	while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
	while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
		int error;
		int error;


		switch (vbr->status) {
		switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
			vbr->req->sense_len = vbr->in_hdr.sense_len;
			vbr->req->sense_len = vbr->in_hdr.sense_len;
			vbr->req->errors = vbr->in_hdr.errors;
			vbr->req->errors = vbr->in_hdr.errors;
		}
		}
		if (blk_special_request(vbr->req))
			vbr->req->errors = (error != 0);


		__blk_end_request_all(vbr->req, error);
		__blk_end_request_all(vbr->req, error);
		list_del(&vbr->list);
		list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
		break;
		break;
	case REQ_TYPE_SPECIAL:
		vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
		break;
	case REQ_TYPE_LINUX_BLOCK:
	case REQ_TYPE_LINUX_BLOCK:
		if (req->cmd[0] == REQ_LB_OP_FLUSH) {
		if (req->cmd[0] == REQ_LB_OP_FLUSH) {
			vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
			vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
		}
		}
	}
	}


	if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
	if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
		mempool_free(vbr, vblk->pool);
		mempool_free(vbr, vblk->pool);
		return false;
		return false;
	}
	}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
	}
	}


	if (issued)
	if (issued)
		vblk->vq->vq_ops->kick(vblk->vq);
		virtqueue_kick(vblk->vq);
}
}


static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
	req->cmd[0] = REQ_LB_OP_FLUSH;
	req->cmd[0] = REQ_LB_OP_FLUSH;
}
}


/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request *req;
	struct bio *bio;

	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
			   GFP_KERNEL);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
	if (IS_ERR(req)) {
		bio_put(bio);
		return PTR_ERR(req);
	}

	req->cmd_type = REQ_TYPE_SPECIAL;
	return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
}

static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			 unsigned cmd, unsigned long data)
			 unsigned cmd, unsigned long data)
{
{
	struct gendisk *disk = bdev->bd_disk;
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_blk *vblk = disk->private_data;


	if (cmd == 0x56424944) { /* 'VBID' */
		void __user *usr_data = (void __user *)data;
		char id_str[VIRTIO_BLK_ID_BYTES];
		int err;

		err = virtblk_get_id(disk, id_str);
		if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
			err = -EFAULT;
		return err;
	}
	/*
	/*
	 * Only allow the generic SCSI ioctls if the host can support it.
	 * Only allow the generic SCSI ioctls if the host can support it.
	 */
	 */
+3 −3
Original line number Original line Diff line number Diff line
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
static void random_recv_done(struct virtqueue *vq)
{
{
	/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
	/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
	if (!vq->vq_ops->get_buf(vq, &data_avail))
	if (!virtqueue_get_buf(vq, &data_avail))
		return;
		return;


	complete(&have_data);
	complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
	sg_init_one(&sg, buf, size);
	sg_init_one(&sg, buf, size);


	/* There should always be room for one buffer. */
	/* There should always be room for one buffer. */
	if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
	if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
		BUG();
		BUG();


	vq->vq_ops->kick(vq);
	virtqueue_kick(vq);
}
}


static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
+377 −323

File changed.

Preview size limit exceeded, changes collapsed.

+23 −23
Original line number Original line Diff line number Diff line
@@ -122,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
	struct virtnet_info *vi = svq->vdev->priv;
	struct virtnet_info *vi = svq->vdev->priv;


	/* Suppress further interrupts. */
	/* Suppress further interrupts. */
	svq->vq_ops->disable_cb(svq);
	virtqueue_disable_cb(svq);


	/* We were probably waiting for more output buffers. */
	/* We were probably waiting for more output buffers. */
	netif_wake_queue(vi->dev);
	netif_wake_queue(vi->dev);
@@ -210,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
			return -EINVAL;
			return -EINVAL;
		}
		}


		page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
		page = virtqueue_get_buf(vi->rvq, &len);
		if (!page) {
		if (!page) {
			pr_debug("%s: rx error: %d buffers missing\n",
			pr_debug("%s: rx error: %d buffers missing\n",
				 skb->dev->name, hdr->mhdr.num_buffers);
				 skb->dev->name, hdr->mhdr.num_buffers);
@@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)


	skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
	skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);


	err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
	if (err < 0)
	if (err < 0)
		dev_kfree_skb(skb);
		dev_kfree_skb(skb);


@@ -385,7 +385,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)


	/* chain first in list head */
	/* chain first in list head */
	first->private = (unsigned long)list;
	first->private = (unsigned long)list;
	err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
				       first);
				       first);
	if (err < 0)
	if (err < 0)
		give_pages(vi, first);
		give_pages(vi, first);
@@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)


	sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
	sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);


	err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
	err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
	if (err < 0)
	if (err < 0)
		give_pages(vi, page);
		give_pages(vi, page);


@@ -433,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
	} while (err > 0);
	} while (err > 0);
	if (unlikely(vi->num > vi->max))
	if (unlikely(vi->num > vi->max))
		vi->max = vi->num;
		vi->max = vi->num;
	vi->rvq->vq_ops->kick(vi->rvq);
	virtqueue_kick(vi->rvq);
	return !oom;
	return !oom;
}
}


@@ -442,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
	struct virtnet_info *vi = rvq->vdev->priv;
	struct virtnet_info *vi = rvq->vdev->priv;
	/* Schedule NAPI, Suppress further interrupts if successful. */
	/* Schedule NAPI, Suppress further interrupts if successful. */
	if (napi_schedule_prep(&vi->napi)) {
	if (napi_schedule_prep(&vi->napi)) {
		rvq->vq_ops->disable_cb(rvq);
		virtqueue_disable_cb(rvq);
		__napi_schedule(&vi->napi);
		__napi_schedule(&vi->napi);
	}
	}
}
}
@@ -471,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)


again:
again:
	while (received < budget &&
	while (received < budget &&
	       (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
	       (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
		receive_buf(vi->dev, buf, len);
		receive_buf(vi->dev, buf, len);
		--vi->num;
		--vi->num;
		received++;
		received++;
@@ -485,9 +485,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
	/* Out of packets? */
	/* Out of packets? */
	if (received < budget) {
	if (received < budget) {
		napi_complete(napi);
		napi_complete(napi);
		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
		if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
		    napi_schedule_prep(napi)) {
		    napi_schedule_prep(napi)) {
			vi->rvq->vq_ops->disable_cb(vi->rvq);
			virtqueue_disable_cb(vi->rvq);
			__napi_schedule(napi);
			__napi_schedule(napi);
			goto again;
			goto again;
		}
		}
@@ -501,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
	struct sk_buff *skb;
	struct sk_buff *skb;
	unsigned int len, tot_sgs = 0;
	unsigned int len, tot_sgs = 0;


	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
		pr_debug("Sent skb %p\n", skb);
		pr_debug("Sent skb %p\n", skb);
		vi->dev->stats.tx_bytes += skb->len;
		vi->dev->stats.tx_bytes += skb->len;
		vi->dev->stats.tx_packets++;
		vi->dev->stats.tx_packets++;
@@ -554,7 +554,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
		sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
		sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);


	hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
	hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
	return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
	return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
					0, skb);
					0, skb);
}
}


@@ -574,14 +574,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
	if (unlikely(capacity < 0)) {
	if (unlikely(capacity < 0)) {
		netif_stop_queue(dev);
		netif_stop_queue(dev);
		dev_warn(&dev->dev, "Unexpected full queue\n");
		dev_warn(&dev->dev, "Unexpected full queue\n");
		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
		if (unlikely(!virtqueue_enable_cb(vi->svq))) {
			vi->svq->vq_ops->disable_cb(vi->svq);
			virtqueue_disable_cb(vi->svq);
			netif_start_queue(dev);
			netif_start_queue(dev);
			goto again;
			goto again;
		}
		}
		return NETDEV_TX_BUSY;
		return NETDEV_TX_BUSY;
	}
	}
	vi->svq->vq_ops->kick(vi->svq);
	virtqueue_kick(vi->svq);


	/* Don't wait up for transmitted skbs to be freed. */
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	skb_orphan(skb);
@@ -591,12 +591,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
	 * before it gets out of hand.  Naturally, this wastes entries. */
	 * before it gets out of hand.  Naturally, this wastes entries. */
	if (capacity < 2+MAX_SKB_FRAGS) {
	if (capacity < 2+MAX_SKB_FRAGS) {
		netif_stop_queue(dev);
		netif_stop_queue(dev);
		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
		if (unlikely(!virtqueue_enable_cb(vi->svq))) {
			/* More just got used, free them then recheck. */
			/* More just got used, free them then recheck. */
			capacity += free_old_xmit_skbs(vi);
			capacity += free_old_xmit_skbs(vi);
			if (capacity >= 2+MAX_SKB_FRAGS) {
			if (capacity >= 2+MAX_SKB_FRAGS) {
				netif_start_queue(dev);
				netif_start_queue(dev);
				vi->svq->vq_ops->disable_cb(vi->svq);
				virtqueue_disable_cb(vi->svq);
			}
			}
		}
		}
	}
	}
@@ -641,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
	if (napi_schedule_prep(&vi->napi)) {
	if (napi_schedule_prep(&vi->napi)) {
		vi->rvq->vq_ops->disable_cb(vi->rvq);
		virtqueue_disable_cb(vi->rvq);
		__napi_schedule(&vi->napi);
		__napi_schedule(&vi->napi);
	}
	}
	return 0;
	return 0;
@@ -678,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));


	BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);


	vi->cvq->vq_ops->kick(vi->cvq);
	virtqueue_kick(vi->cvq);


	/*
	/*
	 * Spin for a response, the kick causes an ioport write, trapping
	 * Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 * into the hypervisor, so the request should be handled immediately.
	 */
	 */
	while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
	while (!virtqueue_get_buf(vi->cvq, &tmp))
		cpu_relax();
		cpu_relax();


	return status == VIRTIO_NET_OK;
	return status == VIRTIO_NET_OK;
@@ -1003,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
{
	void *buf;
	void *buf;
	while (1) {
	while (1) {
		buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
		buf = virtqueue_detach_unused_buf(vi->svq);
		if (!buf)
		if (!buf)
			break;
			break;
		dev_kfree_skb(buf);
		dev_kfree_skb(buf);
	}
	}
	while (1) {
	while (1) {
		buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
		buf = virtqueue_detach_unused_buf(vi->rvq);
		if (!buf)
		if (!buf)
			break;
			break;
		if (vi->mergeable_rx_bufs || vi->big_packets)
		if (vi->mergeable_rx_bufs || vi->big_packets)
+8 −9
Original line number Original line Diff line number Diff line
@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
	struct virtio_balloon *vb;
	struct virtio_balloon *vb;
	unsigned int len;
	unsigned int len;


	vb = vq->vq_ops->get_buf(vq, &len);
	vb = virtqueue_get_buf(vq, &len);
	if (vb)
	if (vb)
		complete(&vb->acked);
		complete(&vb->acked);
}
}
@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
	init_completion(&vb->acked);
	init_completion(&vb->acked);


	/* We should always be able to add one buffer to an empty queue. */
	/* We should always be able to add one buffer to an empty queue. */
	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
	if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
		BUG();
		BUG();
	vq->vq_ops->kick(vq);
	virtqueue_kick(vq);


	/* When host has read buffer, this completes via balloon_ack */
	/* When host has read buffer, this completes via balloon_ack */
	wait_for_completion(&vb->acked);
	wait_for_completion(&vb->acked);
@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
	struct virtio_balloon *vb;
	struct virtio_balloon *vb;
	unsigned int len;
	unsigned int len;


	vb = vq->vq_ops->get_buf(vq, &len);
	vb = virtqueue_get_buf(vq, &len);
	if (!vb)
	if (!vb)
		return;
		return;
	vb->need_stats_update = 1;
	vb->need_stats_update = 1;
@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)


	vq = vb->stats_vq;
	vq = vb->stats_vq;
	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
	if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
	if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
		BUG();
		BUG();
	vq->vq_ops->kick(vq);
	virtqueue_kick(vq);
}
}


static void virtballoon_changed(struct virtio_device *vdev)
static void virtballoon_changed(struct virtio_device *vdev)
@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
		 * use it to signal us later.
		 * use it to signal us later.
		 */
		 */
		sg_init_one(&sg, vb->stats, sizeof vb->stats);
		sg_init_one(&sg, vb->stats, sizeof vb->stats);
		if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
		if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
						  &sg, 1, 0, vb) < 0)
			BUG();
			BUG();
		vb->stats_vq->vq_ops->kick(vb->stats_vq);
		virtqueue_kick(vb->stats_vq);
	}
	}


	vb->thread = kthread_run(balloon, vb, "vballoon");
	vb->thread = kthread_run(balloon, vb, "vballoon");
Loading