Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 17f34580 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky
Browse files

[S390] Convert to SPARSEMEM & SPARSEMEM_VMEMMAP



Convert s390 to SPARSEMEM and SPARSEMEM_VMEMMAP. We do a select
of SPARSEMEM_VMEMMAP since it is configurable. This is because
SPARSEMEM without SPARSEMEM_VMEMMAP gives us a hell of broken
include dependencies that I don't want to fix.

Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 53492b1d
Loading
Loading
Loading
Loading
+8 −0
Original line number Original line Diff line number Diff line
@@ -300,6 +300,14 @@ comment "Kernel preemption"


source "kernel/Kconfig.preempt"
source "kernel/Kconfig.preempt"


config ARCH_SPARSEMEM_ENABLE
	def_bool y
	select SPARSEMEM_VMEMMAP_ENABLE
	select SPARSEMEM_VMEMMAP

config ARCH_SPARSEMEM_DEFAULT
	def_bool y

source "mm/Kconfig"
source "mm/Kconfig"


comment "I/O subsystem configuration"
comment "I/O subsystem configuration"
+4 −4
Original line number Original line Diff line number Diff line
@@ -287,7 +287,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
	if (rc < 0)
	if (rc < 0)
		goto out_free;
		goto out_free;


	rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
	rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);


	if (rc)
	if (rc)
		goto out_free;
		goto out_free;
@@ -351,7 +351,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
	release_resource(seg->res);
	release_resource(seg->res);
	kfree(seg->res);
	kfree(seg->res);
 out_shared:
 out_shared:
	remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
	vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
 out_free:
 out_free:
	kfree(seg);
	kfree(seg);
 out:
 out:
@@ -474,7 +474,7 @@ segment_modify_shared (char *name, int do_nonshared)
	rc = 0;
	rc = 0;
	goto out_unlock;
	goto out_unlock;
 out_del:
 out_del:
	remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
	vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
	list_del(&seg->list);
	list_del(&seg->list);
	dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
	dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
	kfree(seg);
	kfree(seg);
@@ -508,7 +508,7 @@ segment_unload(char *name)
		goto out_unlock;
		goto out_unlock;
	release_resource(seg->res);
	release_resource(seg->res);
	kfree(seg->res);
	kfree(seg->res);
	remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
	vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
	list_del(&seg->list);
	list_del(&seg->list);
	dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
	dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
	kfree(seg);
	kfree(seg);
+2 −0
Original line number Original line Diff line number Diff line
@@ -106,6 +106,8 @@ void __init paging_init(void)
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	__raw_local_irq_ssm(ssm_mask);
	__raw_local_irq_ssm(ssm_mask);


	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
+6 −75
Original line number Original line Diff line number Diff line
@@ -27,43 +27,6 @@ struct memory_segment {


static LIST_HEAD(mem_segs);
static LIST_HEAD(mem_segs);


void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
			   unsigned long start_pfn)
{
	struct page *start, *end;
	struct page *map_start, *map_end;
	int i;

	start = pfn_to_page(start_pfn);
	end = start + size;

	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		unsigned long cstart, cend;

		cstart = PFN_DOWN(memory_chunk[i].addr);
		cend = cstart + PFN_DOWN(memory_chunk[i].size);

		map_start = mem_map + cstart;
		map_end = mem_map + cend;

		if (map_start < start)
			map_start = start;
		if (map_end > end)
			map_end = end;

		map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
			/ sizeof(struct page);
		map_end += ((PFN_ALIGN((unsigned long) map_end)
			     - (unsigned long) map_end)
			    / sizeof(struct page));

		if (map_start < map_end)
			memmap_init_zone((unsigned long)(map_end - map_start),
					 nid, zone, page_to_pfn(map_start),
					 MEMMAP_EARLY);
	}
}

static void __ref *vmem_alloc_pages(unsigned int order)
static void __ref *vmem_alloc_pages(unsigned int order)
{
{
	if (slab_is_available())
	if (slab_is_available())
@@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void)
/*
/*
 * Add a physical memory range to the 1:1 mapping.
 * Add a physical memory range to the 1:1 mapping.
 */
 */
static int vmem_add_range(unsigned long start, unsigned long size, int ro)
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{
{
	unsigned long address;
	unsigned long address;
	pgd_t *pg_dir;
	pgd_t *pg_dir;
@@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
/*
/*
 * Add a backed mem_map array to the virtual mem_map array.
 * Add a backed mem_map array to the virtual mem_map array.
 */
 */
static int vmem_add_mem_map(unsigned long start, unsigned long size)
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
{
	unsigned long address, start_addr, end_addr;
	unsigned long address, start_addr, end_addr;
	struct page *map_start, *map_end;
	pgd_t *pg_dir;
	pgd_t *pg_dir;
	pud_t *pu_dir;
	pud_t *pu_dir;
	pmd_t *pm_dir;
	pmd_t *pm_dir;
@@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
	pte_t  pte;
	pte_t  pte;
	int ret = -ENOMEM;
	int ret = -ENOMEM;


	map_start = VMEM_MAP + PFN_DOWN(start);
	start_addr = (unsigned long) start;
	map_end	= VMEM_MAP + PFN_DOWN(start + size);
	end_addr = (unsigned long) (start + nr);

	start_addr = (unsigned long) map_start & PAGE_MASK;
	end_addr = PFN_ALIGN((unsigned long) map_end);


	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
		pg_dir = pgd_offset_k(address);
		pg_dir = pgd_offset_k(address);
@@ -268,16 +227,6 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
	return ret;
	return ret;
}
}


static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
{
	int ret;

	ret = vmem_add_mem_map(start, size);
	if (ret)
		return ret;
	return vmem_add_range(start, size, ro);
}

/*
/*
 * Add memory segment to the segment list if it doesn't overlap with
 * Add memory segment to the segment list if it doesn't overlap with
 * an already present segment.
 * an already present segment.
@@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg)
	vmem_remove_range(seg->start, seg->size);
	vmem_remove_range(seg->start, seg->size);
}
}


int remove_shared_memory(unsigned long start, unsigned long size)
int vmem_remove_mapping(unsigned long start, unsigned long size)
{
{
	struct memory_segment *seg;
	struct memory_segment *seg;
	int ret;
	int ret;
@@ -339,11 +288,9 @@ int remove_shared_memory(unsigned long start, unsigned long size)
	return ret;
	return ret;
}
}


int add_shared_memory(unsigned long start, unsigned long size)
int vmem_add_mapping(unsigned long start, unsigned long size)
{
{
	struct memory_segment *seg;
	struct memory_segment *seg;
	struct page *page;
	unsigned long pfn, num_pfn, end_pfn;
	int ret;
	int ret;


	mutex_lock(&vmem_mutex);
	mutex_lock(&vmem_mutex);
@@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size)
	ret = vmem_add_mem(start, size, 0);
	ret = vmem_add_mem(start, size, 0);
	if (ret)
	if (ret)
		goto out_remove;
		goto out_remove;

	pfn = PFN_DOWN(start);
	num_pfn = PFN_DOWN(size);
	end_pfn = pfn + num_pfn;

	page = pfn_to_page(pfn);
	memset(page, 0, num_pfn * sizeof(struct page));

	for (; pfn < end_pfn; pfn++) {
		page = pfn_to_page(pfn);
		init_page_count(page);
		reset_page_mapcount(page);
		SetPageReserved(page);
		INIT_LIST_HEAD(&page->lru);
	}
	goto out;
	goto out;


out_remove:
out_remove:
@@ -401,7 +333,6 @@ void __init vmem_map_init(void)
	INIT_LIST_HEAD(&init_mm.context.crst_list);
	INIT_LIST_HEAD(&init_mm.context.crst_list);
	INIT_LIST_HEAD(&init_mm.context.pgtable_list);
	INIT_LIST_HEAD(&init_mm.context.pgtable_list);
	init_mm.context.noexec = 0;
	init_mm.context.noexec = 0;
	NODE_DATA(0)->node_mem_map = VMEM_MAP;
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	ro_end = PFN_ALIGN((unsigned long)&_eshared);
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+12 −11
Original line number Original line Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_config.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_ring.h>
#include <linux/pfn.h>
#include <asm/io.h>
#include <asm/io.h>
#include <asm/kvm_para.h>
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
#include <asm/kvm_virtio.h>
@@ -180,11 +181,10 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,


	config = kvm_vq_config(kdev->desc)+index;
	config = kvm_vq_config(kdev->desc)+index;


	if (add_shared_memory(config->address,
	err = vmem_add_mapping(config->address,
				vring_size(config->num, PAGE_SIZE))) {
			       vring_size(config->num, PAGE_SIZE));
		err = -ENOMEM;
	if (err)
		goto out;
		goto out;
	}


	vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
	vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
				 kvm_notify, callback);
				 kvm_notify, callback);
@@ -202,8 +202,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
	vq->priv = config;
	vq->priv = config;
	return vq;
	return vq;
unmap:
unmap:
	remove_shared_memory(config->address, vring_size(config->num,
	vmem_remove_mapping(config->address,
			     PAGE_SIZE));
			    vring_size(config->num, PAGE_SIZE));
out:
out:
	return ERR_PTR(err);
	return ERR_PTR(err);
}
}
@@ -213,7 +213,7 @@ static void kvm_del_vq(struct virtqueue *vq)
	struct kvm_vqconfig *config = vq->priv;
	struct kvm_vqconfig *config = vq->priv;


	vring_del_virtqueue(vq);
	vring_del_virtqueue(vq);
	remove_shared_memory(config->address,
	vmem_remove_mapping(config->address,
			    vring_size(config->num, PAGE_SIZE));
			    vring_size(config->num, PAGE_SIZE));
}
}


@@ -318,12 +318,13 @@ static int __init kvm_devices_init(void)
		return rc;
		return rc;
	}
	}


	if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) {
	rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE);
	if (rc) {
		device_unregister(&kvm_root);
		device_unregister(&kvm_root);
		return -ENOMEM;
		return rc;
	}
	}


	kvm_devices  = (void *) (max_pfn << PAGE_SHIFT);
	kvm_devices = (void *) PFN_PHYS(max_pfn);


	ctl_set_bit(0, 9);
	ctl_set_bit(0, 9);
	register_external_interrupt(0x2603, kvm_extint_handler);
	register_external_interrupt(0x2603, kvm_extint_handler);
Loading