Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b3124ec2 authored by Michael Ellerman's avatar Michael Ellerman
Browse files

Merge branch 'fixes' into next

Merge our fixes branch from the 4.18 cycle to resolve some minor
conflicts.
parents f7a6947c cca19f0b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -237,6 +237,7 @@ endif
cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200)		+= -Wa,-me200
cpu-as-$(CONFIG_E500)		+= -Wa,-me500
cpu-as-$(CONFIG_PPC_BOOK3S_64)	+= -Wa,-mpower4
cpu-as-$(CONFIG_PPC_E500MC)	+= $(call as-option,-Wa$(comma)-me500mc)

+23 −14
Original line number Diff line number Diff line
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
		unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
		unsigned long ua, unsigned long *hpa);
		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
		unsigned long ua, unsigned long *hpa);
		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
@@ -143,23 +143,32 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
{
	int c;

	c = atomic_dec_if_positive(&mm->context.copros);

	/* Detect imbalance between add and remove */
	WARN_ON(c < 0);

	/*
	 * Need to broadcast a global flush of the full mm before
	 * decrementing active_cpus count, as the next TLBI may be
	 * local and the nMMU and/or PSL need to be cleaned up.
	 * Should be rare enough so that it's acceptable.
	 * When removing the last copro, we need to broadcast a global
	 * flush of the full mm, as the next TLBI may be local and the
	 * nMMU and/or PSL need to be cleaned up.
	 *
	 * Both the 'copros' and 'active_cpus' counts are looked at in
	 * flush_all_mm() to determine the scope (local/global) of the
	 * TLBIs, so we need to flush first before decrementing
	 * 'copros'. If this API is used by several callers for the
	 * same context, it can lead to over-flushing. It's hopefully
	 * not common enough to be a problem.
	 *
	 * Skip on hash, as we don't know how to do the proper flush
	 * for the time being. Invalidations will remain global if
	 * used on hash.
	 * used on hash. Note that we can't drop 'copros' either, as
	 * it could make some invalidations local with no flush
	 * in-between.
	 */
	if (c == 0 && radix_enabled()) {
	if (radix_enabled()) {
		flush_all_mm(mm);

		c = atomic_dec_if_positive(&mm->context.copros);
		/* Detect imbalance between add and remove */
		WARN_ON(c < 0);

		if (c == 0)
			dec_mm_active_cpus(mm);
	}
}
+2 −0
Original line number Diff line number Diff line
@@ -146,7 +146,9 @@ power9_restore_additional_sprs:
	mtspr	SPRN_MMCR1, r4

	ld	r3, STOP_MMCR2(r13)
	ld	r4, PACA_SPRG_VDSO(r13)
	mtspr	SPRN_MMCR2, r3
	mtspr	SPRN_SPRG3, r4
	blr

/*
+1 −1
Original line number Diff line number Diff line
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
		/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
		return H_TOO_HARD;

	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
		return H_HARDWARE;

	if (mm_iommu_mapped_inc(mem))
+4 −2
Original line number Diff line number Diff line
@@ -275,7 +275,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
	if (!mem)
		return H_TOO_HARD;

	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
			&hpa)))
		return H_HARDWARE;

	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
@@ -461,7 +462,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,

		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
		if (mem)
			prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
			prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
					IOMMU_PAGE_SHIFT_4K, &tces) == 0;
	}

	if (!prereg) {
Loading