Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2bf1bef0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more s390 updates from Martin Schwidefsky:
 "This is the second batch of s390 patches for the 3.10 merge window.

  Heiko improved the memory detection, this fixes kdump for large memory
  sizes.  Some kvm related memory management work, new ipldev/condev
  keywords in cio and bug fixes."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/mem_detect: remove artificial kdump memory types
  s390/mm: add pte invalidation notifier for kvm
  s390/zcrypt: ap bus rescan problem when toggle crypto adapters on/off
  s390/memory hotplug,sclp: get rid of per memory increment usecount
  s390/memory hotplug: provide memory_block_size_bytes() function
  s390/mem_detect: limit memory detection loop to "mem=" parameter
  s390/kdump,bootmem: fix bootmem allocator bitmap size
  s390: get rid of odd global real_memory_size
  s390/kvm: Change the virtual memory mapping location for Virtio devices
  s390/zcore: calculate real memory size using own get_mem_size function
  s390/mem_detect: add DAT sanity check
  s390/mem_detect: fix lockdep irq tracing
  s390/mem_detect: move memory detection code to mm folder
  s390/zfcpdump: exploit new cio_ignore keywords
  s390/cio: add condev keyword to cio_ignore
  s390/cio: add ipldev keyword to cio_ignore
  s390/uaccess: add "fallthrough" comments
parents d7ab7302 996b4a7d
Loading
Loading
Loading
Loading
+7 −5
Original line number Original line Diff line number Diff line
@@ -8,9 +8,9 @@ Command line parameters


  Enable logging of debug information in case of ccw device timeouts.
  Enable logging of debug information in case of ccw device timeouts.


* cio_ignore = {all} |
* cio_ignore = device[,device[,..]]
	       {<device> | <range of devices>} |

	       {!<device> | !<range of devices>}
	device := {all | [!]ipldev | [!]condev | [!]<devno> | [!]<devno>-<devno>}


  The given devices will be ignored by the common I/O-layer; no detection
  The given devices will be ignored by the common I/O-layer; no detection
  and device sensing will be done on any of those devices. The subchannel to 
  and device sensing will be done on any of those devices. The subchannel to 
@@ -24,8 +24,10 @@ Command line parameters
  device numbers (0xabcd or abcd, for 2.4 backward compatibility). If you
  device numbers (0xabcd or abcd, for 2.4 backward compatibility). If you
  give a device number 0xabcd, it will be interpreted as 0.0.abcd.
  give a device number 0xabcd, it will be interpreted as 0.0.abcd.


  You can use the 'all' keyword to ignore all devices.
  You can use the 'all' keyword to ignore all devices. The 'ipldev' and 'condev'
  The '!' operator will cause the I/O-layer to _not_ ignore a device.
  keywords can be used to refer to the CCW based boot device and CCW console
  device respectively (these are probably useful only when combined with the '!'
  operator). The '!' operator will cause the I/O-layer to _not_ ignore a device.
  The command line is parsed from left to right.
  The command line is parsed from left to right.


  For example, 
  For example, 
+56 −10
Original line number Original line Diff line number Diff line
@@ -306,6 +306,7 @@ extern unsigned long MODULES_END;
#define RCP_HC_BIT	0x00200000UL
#define RCP_HC_BIT	0x00200000UL
#define RCP_GR_BIT	0x00040000UL
#define RCP_GR_BIT	0x00040000UL
#define RCP_GC_BIT	0x00020000UL
#define RCP_GC_BIT	0x00020000UL
#define RCP_IN_BIT	0x00008000UL	/* IPTE notify bit */


/* User dirty / referenced bit for KVM's migration feature */
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT	0x00008000UL
#define KVM_UR_BIT	0x00008000UL
@@ -373,6 +374,7 @@ extern unsigned long MODULES_END;
#define RCP_HC_BIT	0x0020000000000000UL
#define RCP_HC_BIT	0x0020000000000000UL
#define RCP_GR_BIT	0x0004000000000000UL
#define RCP_GR_BIT	0x0004000000000000UL
#define RCP_GC_BIT	0x0002000000000000UL
#define RCP_GC_BIT	0x0002000000000000UL
#define RCP_IN_BIT	0x0000800000000000UL	/* IPTE notify bit */


/* User dirty / referenced bit for KVM's migration feature */
/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT	0x0000800000000000UL
#define KVM_UR_BIT	0x0000800000000000UL
@@ -746,30 +748,42 @@ struct gmap {


/**
/**
 * struct gmap_rmap - reverse mapping for segment table entries
 * struct gmap_rmap - reverse mapping for segment table entries
 * @next: pointer to the next gmap_rmap structure in the list
 * @gmap: pointer to the gmap_struct
 * @entry: pointer to a segment table entry
 * @entry: pointer to a segment table entry
 * @vmaddr: virtual address in the guest address space
 */
 */
struct gmap_rmap {
struct gmap_rmap {
	struct list_head list;
	struct list_head list;
	struct gmap *gmap;
	unsigned long *entry;
	unsigned long *entry;
	unsigned long vmaddr;
};
};


/**
/**
 * struct gmap_pgtable - gmap information attached to a page table
 * struct gmap_pgtable - gmap information attached to a page table
 * @vmaddr: address of the 1MB segment in the process virtual memory
 * @vmaddr: address of the 1MB segment in the process virtual memory
 * @mapper: list of segment table entries maping a page table
 * @mapper: list of segment table entries mapping a page table
 */
 */
struct gmap_pgtable {
struct gmap_pgtable {
	unsigned long vmaddr;
	unsigned long vmaddr;
	struct list_head mapper;
	struct list_head mapper;
};
};


/**
 * struct gmap_notifier - notify function block for page invalidation
 * @notifier_call: address of callback function
 */
struct gmap_notifier {
	struct list_head list;
	void (*notifier_call)(struct gmap *gmap, unsigned long address);
};

struct gmap *gmap_alloc(struct mm_struct *mm);
struct gmap *gmap_alloc(struct mm_struct *mm);
void gmap_free(struct gmap *gmap);
void gmap_free(struct gmap *gmap);
void gmap_enable(struct gmap *gmap);
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
int gmap_map_segment(struct gmap *gmap, unsigned long from,
		     unsigned long to, unsigned long length);
		     unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_translate(unsigned long address, struct gmap *);
unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long gmap_translate(unsigned long address, struct gmap *);
@@ -777,6 +791,24 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);


void gmap_register_ipte_notifier(struct gmap_notifier *);
void gmap_unregister_ipte_notifier(struct gmap_notifier *);
int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);

static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
					unsigned long addr,
					pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
	if (pgste_val(pgste) & RCP_IN_BIT) {
		pgste_val(pgste) &= ~RCP_IN_BIT;
		gmap_do_ipte_notify(mm, addr, ptep);
	}
#endif
	return pgste;
}

/*
/*
 * Certain architectures need to do special things when PTEs
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * within a page table are directly modified.  Thus, the following
@@ -1032,8 +1064,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
	pte_t pte;
	pte_t pte;


	mm->context.flush_mm = 1;
	mm->context.flush_mm = 1;
	if (mm_has_pgste(mm))
	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_get_lock(ptep);
		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
	}


	pte = *ptep;
	pte = *ptep;
	if (!mm_exclusive(mm))
	if (!mm_exclusive(mm))
@@ -1052,11 +1086,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
					   unsigned long address,
					   unsigned long address,
					   pte_t *ptep)
					   pte_t *ptep)
{
{
	pgste_t pgste;
	pte_t pte;
	pte_t pte;


	mm->context.flush_mm = 1;
	mm->context.flush_mm = 1;
	if (mm_has_pgste(mm))
	if (mm_has_pgste(mm)) {
		pgste_get_lock(ptep);
		pgste = pgste_get_lock(ptep);
		pgste_ipte_notify(mm, address, ptep, pgste);
	}


	pte = *ptep;
	pte = *ptep;
	if (!mm_exclusive(mm))
	if (!mm_exclusive(mm))
@@ -1082,8 +1119,10 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
	pgste_t pgste;
	pgste_t pgste;
	pte_t pte;
	pte_t pte;


	if (mm_has_pgste(vma->vm_mm))
	if (mm_has_pgste(vma->vm_mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_get_lock(ptep);
		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
	}


	pte = *ptep;
	pte = *ptep;
	__ptep_ipte(address, ptep);
	__ptep_ipte(address, ptep);
@@ -1111,8 +1150,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
	pgste_t pgste;
	pgste_t pgste;
	pte_t pte;
	pte_t pte;


	if (mm_has_pgste(mm))
	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_get_lock(ptep);
		if (!full)
			pgste = pgste_ipte_notify(mm, address, ptep, pgste);
	}


	pte = *ptep;
	pte = *ptep;
	if (!full)
	if (!full)
@@ -1135,8 +1177,10 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,


	if (pte_write(pte)) {
	if (pte_write(pte)) {
		mm->context.flush_mm = 1;
		mm->context.flush_mm = 1;
		if (mm_has_pgste(mm))
		if (mm_has_pgste(mm)) {
			pgste = pgste_get_lock(ptep);
			pgste = pgste_get_lock(ptep);
			pgste = pgste_ipte_notify(mm, address, ptep, pgste);
		}


		if (!mm_exclusive(mm))
		if (!mm_exclusive(mm))
			__ptep_ipte(address, ptep);
			__ptep_ipte(address, ptep);
@@ -1160,8 +1204,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,


	if (pte_same(*ptep, entry))
	if (pte_same(*ptep, entry))
		return 0;
		return 0;
	if (mm_has_pgste(vma->vm_mm))
	if (mm_has_pgste(vma->vm_mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_get_lock(ptep);
		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
	}


	__ptep_ipte(address, ptep);
	__ptep_ipte(address, ptep);


+3 −6
Original line number Original line Diff line number Diff line
@@ -33,8 +33,6 @@


#define CHUNK_READ_WRITE 0
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY  1
#define CHUNK_READ_ONLY  1
#define CHUNK_OLDMEM	 4
#define CHUNK_CRASHK	 5


struct mem_chunk {
struct mem_chunk {
	unsigned long addr;
	unsigned long addr;
@@ -43,13 +41,12 @@ struct mem_chunk {
};
};


extern struct mem_chunk memory_chunk[];
extern struct mem_chunk memory_chunk[];
extern unsigned long real_memory_size;
extern int memory_end_set;
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long memory_end;


void detect_memory_layout(struct mem_chunk chunk[]);
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
		     unsigned long size, int type);
		     unsigned long size);


#define PRIMARY_SPACE_MODE	0
#define PRIMARY_SPACE_MODE	0
#define ACCESS_REGISTER_MODE	1
#define ACCESS_REGISTER_MODE	1
+1 −1
Original line number Original line Diff line number Diff line
@@ -30,7 +30,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w


obj-y	:= bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y	:= bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y	+= processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y	+= processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y	+= debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
obj-y	+= debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y	+= dumpstack.o
obj-y	+= dumpstack.o


+3 −3
Original line number Original line Diff line number Diff line
@@ -88,8 +88,8 @@ static struct mem_chunk *get_memory_layout(void)
	struct mem_chunk *chunk_array;
	struct mem_chunk *chunk_array;


	chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
	chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
	detect_memory_layout(chunk_array);
	detect_memory_layout(chunk_array, 0);
	create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
	create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
	return chunk_array;
	return chunk_array;
}
}


@@ -344,7 +344,7 @@ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
	for (i = 0; i < MEMORY_CHUNKS; i++) {
	for (i = 0; i < MEMORY_CHUNKS; i++) {
		mem_chunk = &chunk_array[i];
		mem_chunk = &chunk_array[i];
		if (mem_chunk->size == 0)
		if (mem_chunk->size == 0)
			break;
			continue;
		if (chunk_array[i].type != CHUNK_READ_WRITE &&
		if (chunk_array[i].type != CHUNK_READ_WRITE &&
		    chunk_array[i].type != CHUNK_READ_ONLY)
		    chunk_array[i].type != CHUNK_READ_ONLY)
			continue;
			continue;
Loading