Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 85eae57b authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-s390-next-4.19-1' of...

Merge tag 'kvm-s390-next-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Features for 4.19

- initial version for host large page support. Must be enabled with
  module parameter hpage=1 and will conflict with the nested=1
  parameter.
- enable etoken facility for guests
- Fixes
parents 3a1174cd 23758461
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -4391,6 +4391,22 @@ all such vmexits.

Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.

7.14 KVM_CAP_S390_HPAGE_1M

Architectures: s390
Parameters: none
Returns: 0 on success, -EINVAL if hpage module parameter was not set
	 or cmma is enabled

With this capability the KVM support for memory backing with 1m pages
through hugetlbfs can be enabled for a VM. After the capability is
enabled, cmma can't be enabled anymore and pfmfi and the storage key
interpretation are disabled. If cmma has already been enabled or the
hpage module parameter is not set to 1, -EINVAL is returned.

While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.

8. Other capabilities.
----------------------

+10 −0
Original line number Diff line number Diff line
@@ -9,6 +9,14 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H

/* Generic bits for GMAP notification on DAT table entry changes. */
#define GMAP_NOTIFY_SHADOW	0x2
#define GMAP_NOTIFY_MPROT	0x1

/* Status bits only for huge segment entries */
#define _SEGMENT_ENTRY_GMAP_IN		0x8000	/* invalidation notify bit */
#define _SEGMENT_ENTRY_GMAP_UC		0x4000	/* dirty (migration) */

/**
 * struct gmap_struct - guest address space
 * @list: list head for the mm->context gmap list
@@ -132,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
int gmap_mprotect_notify(struct gmap *, unsigned long start,
			 unsigned long len, int prot);

void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
			     unsigned long gaddr, unsigned long vmaddr);
#endif /* _ASM_S390_GMAP_H */
+4 −1
Original line number Diff line number Diff line
@@ -37,7 +37,10 @@ static inline int prepare_hugepage_range(struct file *file,
	return 0;
}

#define arch_clear_hugepage_flags(page)		do { } while (0)
static inline void arch_clear_hugepage_flags(struct page *page)
{
	clear_bit(PG_arch_1, &page->flags);
}

static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
				  pte_t *ptep, unsigned long sz)
+4 −7
Original line number Diff line number Diff line
@@ -269,6 +269,7 @@ struct kvm_s390_sie_block {
	__u8	reserved1c0[8];		/* 0x01c0 */
#define ECD_HOSTREGMGMT	0x20000000
#define ECD_MEF		0x08000000
#define ECD_ETOKENF	0x02000000
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
	__u64	pp;			/* 0x01de */
@@ -655,6 +656,7 @@ struct kvm_vcpu_arch {
	seqcount_t cputm_seqcount;
	__u64 cputm_start;
	bool gs_enabled;
	bool skey_enabled;
};

struct kvm_vm_stat {
@@ -793,12 +795,6 @@ struct kvm_s390_vsie {
	struct page *pages[KVM_MAX_VCPUS];
};

struct kvm_s390_migration_state {
	unsigned long bitmap_size;	/* in bits (number of guest pages) */
	atomic64_t dirty_pages;		/* number of dirty pages */
	unsigned long *pgste_bitmap;
};

struct kvm_arch{
	void *sca;
	int use_esca;
@@ -828,7 +824,8 @@ struct kvm_arch{
	struct kvm_s390_vsie vsie;
	u8 epdx;
	u64 epoch;
	struct kvm_s390_migration_state *migration_state;
	int migration_mode;
	atomic64_t cmma_dirty_pages;
	/* subset of available cpu features enabled by user space */
	DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
	struct kvm_s390_gisa *gisa;
+2 −0
Original line number Diff line number Diff line
@@ -24,6 +24,8 @@ typedef struct {
	unsigned int uses_skeys:1;
	/* The mmu context uses CMM. */
	unsigned int uses_cmm:1;
	/* The gmaps associated with this context are allowed to use huge pages. */
	unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;

#define INIT_MM_CONTEXT(name)						   \
Loading