Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f700a420 authored by James Hogan's avatar James Hogan
Browse files

Merge tag 'mips_kvm_4.11_1' into mips-for-linux-next

MIPS dependencies for KVM

Miscellaneous MIPS architecture changes depended on by the MIPS KVM
changes in the KVM tree.

- Move pgd_alloc() out of header.
- Exports so KVM can access page table management and TLBEX functions.
- Add return errors to protected cache ops.
parents 98e58b01 7170bdc7
Loading
Loading
Loading
Loading
+1 −15
Original line number Diff line number Diff line
@@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 * Initialize a new pgd / pmd table with invalid pointers.
 */
extern void pgd_init(unsigned long page);

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *ret, *init;

	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
	if (ret) {
		init = pgd_offset(&init_mm, 0UL);
		pgd_init((unsigned long)ret);
		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
	}

	return ret;
}
extern pgd_t *pgd_alloc(struct mm_struct *mm);

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
+35 −20
Original line number Diff line number Diff line
@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr)
}

#define protected_cache_op(op,addr)				\
({								\
	int __err = 0;						\
	__asm__ __volatile__(					\
	"	.set	push			\n"		\
	"	.set	noreorder		\n"		\
	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
	"1:	cache	%0, (%1)		\n"		\
	"1:	cache	%1, (%2)		\n"		\
	"2:	.set	pop			\n"		\
	"	.section .fixup,\"ax\"		\n"		\
	"3:	li	%0, %3			\n"		\
	"	j	2b			\n"		\
	"	.previous			\n"		\
	"	.section __ex_table,\"a\"	\n"		\
	"	"STR(PTR)" 1b, 2b		\n"		\
	"	"STR(PTR)" 1b, 3b		\n"		\
	"	.previous"					\
	:							\
	: "i" (op), "r" (addr))
	: "+r" (__err)						\
	: "i" (op), "r" (addr), "i" (-EFAULT));			\
	__err;							\
})


#define protected_cachee_op(op,addr)				\
({								\
	int __err = 0;						\
	__asm__ __volatile__(					\
	"	.set	push			\n"		\
	"	.set	noreorder		\n"		\
	"	.set	mips0			\n"		\
	"	.set	eva			\n"		\
	"1:	cachee	%0, (%1)		\n"		\
	"1:	cachee	%1, (%2)		\n"		\
	"2:	.set	pop			\n"		\
	"	.section .fixup,\"ax\"		\n"		\
	"3:	li	%0, %3			\n"		\
	"	j	2b			\n"		\
	"	.previous			\n"		\
	"	.section __ex_table,\"a\"	\n"		\
	"	"STR(PTR)" 1b, 2b		\n"		\
	"	"STR(PTR)" 1b, 3b		\n"		\
	"	.previous"					\
	:							\
	: "i" (op), "r" (addr))
	: "+r" (__err)						\
	: "i" (op), "r" (addr), "i" (-EFAULT));			\
	__err;							\
})

/*
 * The next two are for badland addresses like signal trampolines.
 */
static inline void protected_flush_icache_line(unsigned long addr)
static inline int protected_flush_icache_line(unsigned long addr)
{
	switch (boot_cpu_type()) {
	case CPU_LOONGSON2:
		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
		break;
		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);

	default:
#ifdef CONFIG_EVA
		protected_cachee_op(Hit_Invalidate_I, addr);
		return protected_cachee_op(Hit_Invalidate_I, addr);
#else
		protected_cache_op(Hit_Invalidate_I, addr);
		return protected_cache_op(Hit_Invalidate_I, addr);
#endif
		break;
	}
}

@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr)
 * caches.  We're talking about one cacheline unnecessarily getting invalidated
 * here so the penalty isn't overly hard.
 */
static inline void protected_writeback_dcache_line(unsigned long addr)
static inline int protected_writeback_dcache_line(unsigned long addr)
{
#ifdef CONFIG_EVA
	protected_cachee_op(Hit_Writeback_Inv_D, addr);
	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
#else
	protected_cache_op(Hit_Writeback_Inv_D, addr);
	return protected_cache_op(Hit_Writeback_Inv_D, addr);
#endif
}

static inline void protected_writeback_scache_line(unsigned long addr)
static inline int protected_writeback_scache_line(unsigned long addr)
{
#ifdef CONFIG_EVA
	protected_cachee_op(Hit_Writeback_Inv_SD, addr);
	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
#else
	protected_cache_op(Hit_Writeback_Inv_SD, addr);
	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
#endif
}

+26 −0
Original line number Diff line number Diff line
#ifndef __ASM_TLBEX_H
#define __ASM_TLBEX_H

#include <asm/uasm.h>

/*
 * Write random or indexed TLB entry, and care about the hazards from
 * the preceding mtc0 and for the following eret.
 */
enum tlb_write_entry {
	tlb_random,
	tlb_indexed
};

extern int pgd_reg;

void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
		      unsigned int tmp, unsigned int ptr);
void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
			   struct uasm_reloc **r,
			   enum tlb_write_entry wmode);

#endif /* __ASM_TLBEX_H */
+5 −0
Original line number Diff line number Diff line
@@ -9,6 +9,9 @@
 * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
 */

#ifndef __ASM_UASM_H
#define __ASM_UASM_H

#include <linux/types.h>

#ifdef CONFIG_EXPORT_UASM
@@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
		 unsigned int reg2, int lid);
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);

#endif /* __ASM_UASM_H */
+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@

obj-y				+= cache.o dma-default.o extable.o fault.o \
				   gup.o init.o mmap.o page.o page-funcs.o \
				   tlbex.o tlbex-fault.o tlb-funcs.o
				   pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o

ifdef CONFIG_CPU_MICROMIPS
obj-y				+= uasm-micromips.o
Loading