Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52644d80 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman
Browse files

KVM: x86: add tracepoints around __direct_map and FNAME(fetch)



commit 335e192a3fa415e1202c8b9ecdaaecd643f823cc upstream.

These are useful in debugging shadow paging.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
[bwh: Backported to 4.9:
 - Keep using PT_PRESENT_MASK to test page write permission
 - Adjust context]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9dc6bc3f
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -131,9 +131,6 @@ module_param(dbg, bool, 0644);

#include <trace/events/kvm.h>

#define CREATE_TRACE_POINTS
#include "mmutrace.h"

#define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))

@@ -193,8 +190,12 @@ static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_present_mask;

static void mmu_spte_set(u64 *sptep, u64 spte);
static bool is_executable_pte(u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);

#define CREATE_TRACE_POINTS
#include "mmutrace.h"

void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
	shadow_mmio_mask = mmio_mask;
@@ -2667,10 +2668,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
		ret = RET_PF_EMULATE;

	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
	pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
		 is_large_pte(*sptep)? "2MB" : "4kB",
		 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
		 *sptep, sptep);
	trace_kvm_mmu_set_spte(level, gfn, sptep);
	if (!was_rmapped && is_large_pte(*sptep))
		++vcpu->kvm->stat.lpages;

@@ -2781,6 +2779,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return RET_PF_RETRY;

	trace_kvm_mmu_spte_requested(gpa, level, pfn);
	for_each_shadow_entry(vcpu, gpa, it) {
		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
+59 −0
Original line number Diff line number Diff line
@@ -322,6 +322,65 @@ TRACE_EVENT(
		  __entry->kvm_gen == __entry->spte_gen
	)
);

TRACE_EVENT(
	kvm_mmu_set_spte,
	TP_PROTO(int level, gfn_t gfn, u64 *sptep),
	TP_ARGS(level, gfn, sptep),

	TP_STRUCT__entry(
		__field(u64, gfn)
		__field(u64, spte)
		__field(u64, sptep)
		__field(u8, level)
		/* These depend on page entry type, so compute them now.  */
		__field(bool, r)
		__field(bool, x)
		__field(u8, u)
	),

	TP_fast_assign(
		__entry->gfn = gfn;
		__entry->spte = *sptep;
		__entry->sptep = virt_to_phys(sptep);
		__entry->level = level;
		__entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
		__entry->x = is_executable_pte(__entry->spte);
		__entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
	),

	TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
		  __entry->gfn, __entry->spte,
		  __entry->r ? "r" : "-",
		  __entry->spte & PT_PRESENT_MASK ? "w" : "-",
		  __entry->x ? "x" : "-",
		  __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
		  __entry->level, __entry->sptep
	)
);

TRACE_EVENT(
	kvm_mmu_spte_requested,
	TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
	TP_ARGS(addr, level, pfn),

	TP_STRUCT__entry(
		__field(u64, gfn)
		__field(u64, pfn)
		__field(u8, level)
	),

	TP_fast_assign(
		__entry->gfn = addr >> PAGE_SHIFT;
		__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
		__entry->level = level;
	),

	TP_printk("gfn %llx pfn %llx level %d",
		  __entry->gfn, __entry->pfn, __entry->level
	)
);

#endif /* _TRACE_KVMMMU_H */

#undef TRACE_INCLUDE_PATH
+2 −0
Original line number Diff line number Diff line
@@ -626,6 +626,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,

	base_gfn = gw->gfn;

	trace_kvm_mmu_spte_requested(addr, gw->level, pfn);

	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
		clear_sp_write_flooding_count(it.sptep);
		base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);