Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab70537c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  lguest: struct device - replace bus_id with dev_name()
  lguest: move the initial guest page table creation code to the host
  kvm-s390: implement config_changed for virtio on s390
  virtio_console: support console resizing
  virtio: add PCI device release() function
  virtio_blk: fix type warning
  virtio: block: dynamic maximum segments
  virtio: set max_segment_size and max_sectors to infinite.
  virtio: avoid implicit use of Linux page size in balloon interface
  virtio: hand virtio ring alignment as argument to vring_new_virtqueue
  virtio: use KVM_S390_VIRTIO_RING_ALIGN instead of relying on pagesize
  virtio: use LGUEST_VRING_ALIGN instead of relying on pagesize
  virtio: Don't use PAGE_SIZE for vring alignment in virtio_pci.
  virtio: rename 'pagesize' arg to vring_init/vring_size
  virtio: Don't use PAGE_SIZE in virtio_pci.c
  virtio: struct device - replace bus_id with dev_name(), dev_set_name()
  virtio-pci queue allocation not page-aligned
parents 14a3c4ab bda53cd5
Loading
Loading
Loading
Loading
+9 −57
Original line number Original line Diff line number Diff line
@@ -481,51 +481,6 @@ static unsigned long load_initrd(const char *name, unsigned long mem)
	/* We return the initrd size. */
	/* We return the initrd size. */
	return len;
	return len;
}
}

/* Once we know how much memory we have we can construct simple linear page
 * tables which set virtual == physical which will get the Guest far enough
 * into the boot to create its own.
 *
 * We lay them out of the way, just below the initrd (which is why we need to
 * know its size here). */
static unsigned long setup_pagetables(unsigned long mem,
				      unsigned long initrd_size)
{
	unsigned long *pgdir, *linear;
	unsigned int mapped_pages, i, linear_pages;
	unsigned int ptes_per_page = getpagesize()/sizeof(void *);

	mapped_pages = mem/getpagesize();

	/* Each PTE page can map ptes_per_page pages: how many do we need? */
	linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page;

	/* We put the toplevel page directory page at the top of memory. */
	pgdir = from_guest_phys(mem) - initrd_size - getpagesize();

	/* Now we use the next linear_pages pages as pte pages */
	linear = (void *)pgdir - linear_pages*getpagesize();

	/* Linear mapping is easy: put every page's address into the mapping in
	 * order.  PAGE_PRESENT contains the flags Present, Writable and
	 * Executable. */
	for (i = 0; i < mapped_pages; i++)
		linear[i] = ((i * getpagesize()) | PAGE_PRESENT);

	/* The top level points to the linear page table pages above. */
	for (i = 0; i < mapped_pages; i += ptes_per_page) {
		pgdir[i/ptes_per_page]
			= ((to_guest_phys(linear) + i*sizeof(void *))
			   | PAGE_PRESENT);
	}

	verbose("Linear mapping of %u pages in %u pte pages at %#lx\n",
		mapped_pages, linear_pages, to_guest_phys(linear));

	/* We return the top level (guest-physical) address: the kernel needs
	 * to know where it is. */
	return to_guest_phys(pgdir);
}
/*:*/
/*:*/


/* Simple routine to roll all the commandline arguments together with spaces
/* Simple routine to roll all the commandline arguments together with spaces
@@ -548,13 +503,13 @@ static void concat(char *dst, char *args[])


/*L:185 This is where we actually tell the kernel to initialize the Guest.  We
/*L:185 This is where we actually tell the kernel to initialize the Guest.  We
 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
 * the base of Guest "physical" memory, the top physical page to allow, the
 * the base of Guest "physical" memory, the top physical page to allow and the
 * top level pagetable and the entry point for the Guest. */
 * entry point for the Guest. */
static int tell_kernel(unsigned long pgdir, unsigned long start)
static int tell_kernel(unsigned long start)
{
{
	unsigned long args[] = { LHREQ_INITIALIZE,
	unsigned long args[] = { LHREQ_INITIALIZE,
				 (unsigned long)guest_base,
				 (unsigned long)guest_base,
				 guest_limit / getpagesize(), pgdir, start };
				 guest_limit / getpagesize(), start };
	int fd;
	int fd;


	verbose("Guest: %p - %p (%#lx)\n",
	verbose("Guest: %p - %p (%#lx)\n",
@@ -1030,7 +985,7 @@ static void update_device_status(struct device *dev)
		/* Zero out the virtqueues. */
		/* Zero out the virtqueues. */
		for (vq = dev->vq; vq; vq = vq->next) {
		for (vq = dev->vq; vq; vq = vq->next) {
			memset(vq->vring.desc, 0,
			memset(vq->vring.desc, 0,
			       vring_size(vq->config.num, getpagesize()));
			       vring_size(vq->config.num, LGUEST_VRING_ALIGN));
			lg_last_avail(vq) = 0;
			lg_last_avail(vq) = 0;
		}
		}
	} else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) {
	} else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) {
@@ -1211,7 +1166,7 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
	void *p;
	void *p;


	/* First we need some memory for this virtqueue. */
	/* First we need some memory for this virtqueue. */
	pages = (vring_size(num_descs, getpagesize()) + getpagesize() - 1)
	pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
		/ getpagesize();
		/ getpagesize();
	p = get_pages(pages);
	p = get_pages(pages);


@@ -1228,7 +1183,7 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
	vq->config.pfn = to_guest_phys(p) / getpagesize();
	vq->config.pfn = to_guest_phys(p) / getpagesize();


	/* Initialize the vring. */
	/* Initialize the vring. */
	vring_init(&vq->vring, num_descs, p, getpagesize());
	vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);


	/* Append virtqueue to this device's descriptor.  We use
	/* Append virtqueue to this device's descriptor.  We use
	 * device_config() to get the end of the device's current virtqueues;
	 * device_config() to get the end of the device's current virtqueues;
@@ -1941,7 +1896,7 @@ int main(int argc, char *argv[])
{
{
	/* Memory, top-level pagetable, code startpoint and size of the
	/* Memory, top-level pagetable, code startpoint and size of the
	 * (optional) initrd. */
	 * (optional) initrd. */
	unsigned long mem = 0, pgdir, start, initrd_size = 0;
	unsigned long mem = 0, start, initrd_size = 0;
	/* Two temporaries and the /dev/lguest file descriptor. */
	/* Two temporaries and the /dev/lguest file descriptor. */
	int i, c, lguest_fd;
	int i, c, lguest_fd;
	/* The boot information for the Guest. */
	/* The boot information for the Guest. */
@@ -2040,9 +1995,6 @@ int main(int argc, char *argv[])
		boot->hdr.type_of_loader = 0xFF;
		boot->hdr.type_of_loader = 0xFF;
	}
	}


	/* Set up the initial linear pagetables, starting below the initrd. */
	pgdir = setup_pagetables(mem, initrd_size);

	/* The Linux boot header contains an "E820" memory map: ours is a
	/* The Linux boot header contains an "E820" memory map: ours is a
	 * simple, single region. */
	 * simple, single region. */
	boot->e820_entries = 1;
	boot->e820_entries = 1;
@@ -2064,7 +2016,7 @@ int main(int argc, char *argv[])


	/* We tell the kernel to initialize the Guest: this returns the open
	/* We tell the kernel to initialize the Guest: this returns the open
	 * /dev/lguest file descriptor. */
	 * /dev/lguest file descriptor. */
	lguest_fd = tell_kernel(pgdir, start);
	lguest_fd = tell_kernel(start);


	/* We clone off a thread, which wakes the Launcher whenever one of the
	/* We clone off a thread, which wakes the Launcher whenever one of the
	 * input file descriptors needs attention.  We call this the Waker, and
	 * input file descriptors needs attention.  We call this the Waker, and
+4 −0
Original line number Original line Diff line number Diff line
@@ -50,6 +50,10 @@ struct kvm_vqconfig {
#define KVM_S390_VIRTIO_RESET		1
#define KVM_S390_VIRTIO_RESET		1
#define KVM_S390_VIRTIO_SET_STATUS	2
#define KVM_S390_VIRTIO_SET_STATUS	2


/* The alignment to use between consumer and producer parts of vring.
 * This is pagesize for historical reasons. */
#define KVM_S390_VIRTIO_RING_ALIGN	4096

#ifdef __KERNEL__
#ifdef __KERNEL__
/* early virtio console setup */
/* early virtio console setup */
#ifdef CONFIG_S390_GUEST
#ifdef CONFIG_S390_GUEST
+0 −15
Original line number Original line Diff line number Diff line
@@ -30,21 +30,6 @@ ENTRY(lguest_entry)
	movl $lguest_data - __PAGE_OFFSET, %edx
	movl $lguest_data - __PAGE_OFFSET, %edx
	int $LGUEST_TRAP_ENTRY
	int $LGUEST_TRAP_ENTRY


	/* The Host put the toplevel pagetable in lguest_data.pgdir.  The movsl
	 * instruction uses %esi implicitly as the source for the copy we're
	 * about to do. */
	movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi

	/* Copy first 32 entries of page directory to __PAGE_OFFSET entries.
	 * This means the first 128M of kernel memory will be mapped at
	 * PAGE_OFFSET where the kernel expects to run.  This will get it far
	 * enough through boot to switch to its own pagetables. */
	movl $32, %ecx
	movl %esi, %edi
	addl $((__PAGE_OFFSET >> 22) * 4), %edi
	rep
	movsl

	/* Set up the initial stack so we can run C code. */
	/* Set up the initial stack so we can run C code. */
	movl $(init_thread_union+THREAD_SIZE),%esp
	movl $(init_thread_union+THREAD_SIZE),%esp


+28 −13
Original line number Original line Diff line number Diff line
@@ -6,7 +6,6 @@
#include <linux/virtio_blk.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
#include <linux/scatterlist.h>


#define VIRTIO_MAX_SG	(3+MAX_PHYS_SEGMENTS)
#define PART_BITS 4
#define PART_BITS 4


static int major, index;
static int major, index;
@@ -26,8 +25,11 @@ struct virtio_blk


	mempool_t *pool;
	mempool_t *pool;


	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

	/* Scatterlist: can be too big for stack. */
	/* Scatterlist: can be too big for stack. */
	struct scatterlist sg[VIRTIO_MAX_SG];
	struct scatterlist sg[/*sg_elems*/];
};
};


struct virtblk_req
struct virtblk_req
@@ -97,8 +99,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
	if (blk_barrier_rq(vbr->req))
	if (blk_barrier_rq(vbr->req))
		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;


	/* This init could be done at vblk creation time */
	sg_init_table(vblk->sg, VIRTIO_MAX_SG);
	sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
	sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
	num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
	num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
	sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
	sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
@@ -130,7 +130,7 @@ static void do_virtblk_request(struct request_queue *q)


	while ((req = elv_next_request(q)) != NULL) {
	while ((req = elv_next_request(q)) != NULL) {
		vblk = req->rq_disk->private_data;
		vblk = req->rq_disk->private_data;
		BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg));
		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);


		/* If this request fails, stop queue and wait for something to
		/* If this request fails, stop queue and wait for something to
		   finish to restart it. */
		   finish to restart it. */
@@ -196,12 +196,22 @@ static int virtblk_probe(struct virtio_device *vdev)
	int err;
	int err;
	u64 cap;
	u64 cap;
	u32 v;
	u32 v;
	u32 blk_size;
	u32 blk_size, sg_elems;


	if (index_to_minor(index) >= 1 << MINORBITS)
	if (index_to_minor(index) >= 1 << MINORBITS)
		return -ENOSPC;
		return -ENOSPC;


	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
	/* We need to know how many segments before we allocate. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
				offsetof(struct virtio_blk_config, seg_max),
				&sg_elems);
	if (err)
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
	if (!vblk) {
	if (!vblk) {
		err = -ENOMEM;
		err = -ENOMEM;
		goto out;
		goto out;
@@ -210,6 +220,8 @@ static int virtblk_probe(struct virtio_device *vdev)
	INIT_LIST_HEAD(&vblk->reqs);
	INIT_LIST_HEAD(&vblk->reqs);
	spin_lock_init(&vblk->lock);
	spin_lock_init(&vblk->lock);
	vblk->vdev = vdev;
	vblk->vdev = vdev;
	vblk->sg_elems = sg_elems;
	sg_init_table(vblk->sg, vblk->sg_elems);


	/* We expect one virtqueue, for output. */
	/* We expect one virtqueue, for output. */
	vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
	vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
@@ -279,6 +291,13 @@ static int virtblk_probe(struct virtio_device *vdev)
	}
	}
	set_capacity(vblk->disk, cap);
	set_capacity(vblk->disk, cap);


	/* We can handle whatever the host told us to handle. */
	blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
	blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);

	/* No real sector limit. */
	blk_queue_max_sectors(vblk->disk->queue, -1U);

	/* Host can optionally specify maximum segment size and number of
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
	 * segments. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
@@ -286,12 +305,8 @@ static int virtblk_probe(struct virtio_device *vdev)
				&v);
				&v);
	if (!err)
	if (!err)
		blk_queue_max_segment_size(vblk->disk->queue, v);
		blk_queue_max_segment_size(vblk->disk->queue, v);

	else
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
		blk_queue_max_segment_size(vblk->disk->queue, -1U);
				offsetof(struct virtio_blk_config, seg_max),
				&v);
	if (!err)
		blk_queue_max_hw_segments(vblk->disk->queue, v);


	/* Host can optionally specify the block size of the device */
	/* Host can optionally specify the block size of the device */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
+1 −0
Original line number Original line Diff line number Diff line
@@ -695,6 +695,7 @@ void hvc_resize(struct hvc_struct *hp, struct winsize ws)
	hp->ws = ws;
	hp->ws = ws;
	schedule_work(&hp->tty_resize);
	schedule_work(&hp->tty_resize);
}
}
EXPORT_SYMBOL_GPL(hvc_resize);


/*
/*
 * This kthread is either polling or interrupt driven.  This is determined by
 * This kthread is either polling or interrupt driven.  This is determined by
Loading