Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6751b8d9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "On the kernel side there's a bunch of ring-buffer ordering fixes for a
  reproducible bug, plus a PEBS constraints regression fix.

  Plus tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools headers UAPI: Sync kvm.h headers with the kernel sources
  perf record: Fix s390 missing module symbol and warning for non-root users
  perf machine: Read also the end of the kernel
  perf test vmlinux-kallsyms: Ignore aliases to _etext when searching on kallsyms
  perf session: Add missing swap ops for namespace events
  perf namespace: Protect reading thread's namespace
  tools headers UAPI: Sync drm/drm.h with the kernel
  tools headers UAPI: Sync drm/i915_drm.h with the kernel
  tools headers UAPI: Sync linux/fs.h with the kernel
  tools headers UAPI: Sync linux/sched.h with the kernel
  tools arch x86: Sync asm/cpufeatures.h with the with the kernel
  tools include UAPI: Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls
  perf arm64: Fix mksyscalltbl when system kernel headers are ahead of the kernel
  perf data: Fix 'strncat may truncate' build failure with recent gcc
  perf/ring-buffer: Use regular variables for nesting
  perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data
  perf/ring_buffer: Add ordering to rb->nest increment
  perf/ring_buffer: Fix exposing a temporarily decreased data_head
  perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints
parents af042452 849e96f3
Loading
Loading
Loading
Loading
+14 −14
Original line number Diff line number Diff line
@@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
	EVENT_CONSTRAINT_END
};

@@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
	EVENT_CONSTRAINT_END
@@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {

struct event_constraint intel_slm_pebs_event_constraints[] = {
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
	EVENT_CONSTRAINT_END
@@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
	EVENT_CONSTRAINT_END
};

@@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
	EVENT_CONSTRAINT_END
};

@@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
	INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
        INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
struct event_constraint intel_skl_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
	/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+2 −2
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ struct ring_buffer {
	atomic_t			poll;		/* POLL_ for wakeups */

	local_t				head;		/* write position    */
	local_t				nest;		/* nested writers    */
	unsigned int			nest;		/* nested writers    */
	local_t				events;		/* event limit       */
	local_t				wakeup;		/* wakeup stamp      */
	local_t				lost;		/* nr records lost   */
@@ -41,7 +41,7 @@ struct ring_buffer {

	/* AUX area */
	long				aux_head;
	local_t				aux_nest;
	unsigned int			aux_nest;
	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
	unsigned long			aux_pgoff;
	int				aux_nr_pages;
+50 −14
Original line number Diff line number Diff line
@@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
	struct ring_buffer *rb = handle->rb;

	preempt_disable();
	local_inc(&rb->nest);

	/*
	 * Avoid an explicit LOAD/STORE such that architectures with memops
	 * can use them.
	 */
	(*(volatile unsigned int *)&rb->nest)++;
	handle->wakeup = local_read(&rb->wakeup);
}

@@ -46,17 +51,35 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
{
	struct ring_buffer *rb = handle->rb;
	unsigned long head;
	unsigned int nest;

	/*
	 * If this isn't the outermost nesting, we don't have to update
	 * @rb->user_page->data_head.
	 */
	nest = READ_ONCE(rb->nest);
	if (nest > 1) {
		WRITE_ONCE(rb->nest, nest - 1);
		goto out;
	}

again:
	/*
	 * In order to avoid publishing a head value that goes backwards,
	 * we must ensure the load of @rb->head happens after we've
	 * incremented @rb->nest.
	 *
	 * Otherwise we can observe a @rb->head value before one published
	 * by an IRQ/NMI happening between the load and the increment.
	 */
	barrier();
	head = local_read(&rb->head);

	/*
	 * IRQ/NMI can happen here, which means we can miss a head update.
	 * IRQ/NMI can happen here and advance @rb->head, causing our
	 * load above to be stale.
	 */

	if (!local_dec_and_test(&rb->nest))
		goto out;

	/*
	 * Since the mmap() consumer (userspace) can run on a different CPU:
	 *
@@ -84,14 +107,23 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
	 * See perf_output_begin().
	 */
	smp_wmb(); /* B, matches C */
	rb->user_page->data_head = head;
	WRITE_ONCE(rb->user_page->data_head, head);

	/*
	 * Now check if we missed an update -- rely on previous implied
	 * compiler barriers to force a re-read.
	 * We must publish the head before decrementing the nest count,
	 * otherwise an IRQ/NMI can publish a more recent head value and our
	 * write will (temporarily) publish a stale value.
	 */
	barrier();
	WRITE_ONCE(rb->nest, 0);

	/*
	 * Ensure we decrement @rb->nest before we validate the @rb->head.
	 * Otherwise we cannot be sure we caught the 'last' nested update.
	 */
	barrier();
	if (unlikely(head != local_read(&rb->head))) {
		local_inc(&rb->nest);
		WRITE_ONCE(rb->nest, 1);
		goto again;
	}

@@ -330,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
	struct perf_event *output_event = event;
	unsigned long aux_head, aux_tail;
	struct ring_buffer *rb;
	unsigned int nest;

	if (output_event->parent)
		output_event = output_event->parent;
@@ -360,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
	if (!refcount_inc_not_zero(&rb->aux_refcount))
		goto err;

	nest = READ_ONCE(rb->aux_nest);
	/*
	 * Nesting is not supported for AUX area, make sure nested
	 * writers are caught early
	 */
	if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
	if (WARN_ON_ONCE(nest))
		goto err_put;

	WRITE_ONCE(rb->aux_nest, nest + 1);

	aux_head = rb->aux_head;

	handle->rb = rb;
@@ -394,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
		if (!handle->size) { /* A, matches D */
			event->pending_disable = smp_processor_id();
			perf_output_wakeup(handle);
			local_set(&rb->aux_nest, 0);
			WRITE_ONCE(rb->aux_nest, 0);
			goto err_put;
		}
	}
@@ -471,7 +507,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
		perf_event_aux_event(handle->event, aux_head, size,
				     handle->aux_flags);

	rb->user_page->aux_head = rb->aux_head;
	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
	if (rb_need_aux_wakeup(rb))
		wakeup = true;

@@ -483,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)

	handle->event = NULL;

	local_set(&rb->aux_nest, 0);
	WRITE_ONCE(rb->aux_nest, 0);
	/* can't be last */
	rb_free_aux(rb);
	ring_buffer_put(rb);
@@ -503,7 +539,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)

	rb->aux_head += size;

	rb->user_page->aux_head = rb->aux_head;
	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
	if (rb_need_aux_wakeup(rb)) {
		perf_output_wakeup(handle);
		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
+43 −0
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/sve_context.h>

#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
@@ -102,6 +103,9 @@ struct kvm_regs {
#define KVM_ARM_VCPU_EL1_32BIT		1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2		2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3		3 /* Support guest PMUv3 */
#define KVM_ARM_VCPU_SVE		4 /* enable SVE for this CPU */
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS	5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC	6 /* VCPU uses generic authentication */

struct kvm_vcpu_init {
	__u32 target;
@@ -226,6 +230,45 @@ struct kvm_vcpu_events {
					 KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)

/* SVE registers */
#define KVM_REG_ARM64_SVE		(0x15 << KVM_REG_ARM_COPROC_SHIFT)

/* Z- and P-regs occupy blocks at the following offsets within this range: */
#define KVM_REG_ARM64_SVE_ZREG_BASE	0
#define KVM_REG_ARM64_SVE_PREG_BASE	0x400
#define KVM_REG_ARM64_SVE_FFR_BASE	0x600

#define KVM_ARM64_SVE_NUM_ZREGS		__SVE_NUM_ZREGS
#define KVM_ARM64_SVE_NUM_PREGS		__SVE_NUM_PREGS

#define KVM_ARM64_SVE_MAX_SLICES	32

#define KVM_REG_ARM64_SVE_ZREG(n, i)					\
	(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \
	 KVM_REG_SIZE_U2048 |						\
	 (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) |			\
	 ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))

#define KVM_REG_ARM64_SVE_PREG(n, i)					\
	(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \
	 KVM_REG_SIZE_U256 |						\
	 (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) |			\
	 ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))

#define KVM_REG_ARM64_SVE_FFR(i)					\
	(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \
	 KVM_REG_SIZE_U256 |						\
	 ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))

#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX

/* Vector lengths pseudo-register: */
#define KVM_REG_ARM64_SVE_VLS		(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
					 KVM_REG_SIZE_U512 | 0xffff)
#define KVM_ARM64_SVE_VLS_WORDS	\
	((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)

/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR	0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
+46 −0
Original line number Diff line number Diff line
@@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char {
#define  KVM_REG_PPC_ICP_PPRI_SHIFT	16	/* pending irq priority */
#define  KVM_REG_PPC_ICP_PPRI_MASK	0xff

#define KVM_REG_PPC_VP_STATE	(KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)

/* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC		1
#define   KVM_DEV_MPIC_BASE_ADDR	0	/* 64-bit */
@@ -677,4 +679,48 @@ struct kvm_ppc_cpu_char {
#define  KVM_XICS_PRESENTED		(1ULL << 43)
#define  KVM_XICS_QUEUED		(1ULL << 44)

/* POWER9 XIVE Native Interrupt Controller */
#define KVM_DEV_XIVE_GRP_CTRL		1
#define   KVM_DEV_XIVE_RESET		1
#define   KVM_DEV_XIVE_EQ_SYNC		2
#define KVM_DEV_XIVE_GRP_SOURCE		2	/* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG	3	/* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_EQ_CONFIG	4	/* 64-bit EQ identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_SYNC	5       /* 64-bit source identifier */

/* Layout of 64-bit XIVE source attribute values */
#define KVM_XIVE_LEVEL_SENSITIVE	(1ULL << 0)
#define KVM_XIVE_LEVEL_ASSERTED		(1ULL << 1)

/* Layout of 64-bit XIVE source configuration attribute values */
#define KVM_XIVE_SOURCE_PRIORITY_SHIFT	0
#define KVM_XIVE_SOURCE_PRIORITY_MASK	0x7
#define KVM_XIVE_SOURCE_SERVER_SHIFT	3
#define KVM_XIVE_SOURCE_SERVER_MASK	0xfffffff8ULL
#define KVM_XIVE_SOURCE_MASKED_SHIFT	32
#define KVM_XIVE_SOURCE_MASKED_MASK	0x100000000ULL
#define KVM_XIVE_SOURCE_EISN_SHIFT	33
#define KVM_XIVE_SOURCE_EISN_MASK	0xfffffffe00000000ULL

/* Layout of 64-bit EQ identifier */
#define KVM_XIVE_EQ_PRIORITY_SHIFT	0
#define KVM_XIVE_EQ_PRIORITY_MASK	0x7
#define KVM_XIVE_EQ_SERVER_SHIFT	3
#define KVM_XIVE_EQ_SERVER_MASK		0xfffffff8ULL

/* Layout of EQ configuration values (64 bytes) */
struct kvm_ppc_xive_eq {
	__u32 flags;
	__u32 qshift;
	__u64 qaddr;
	__u32 qtoggle;
	__u32 qindex;
	__u8  pad[40];
};

#define KVM_XIVE_EQ_ALWAYS_NOTIFY	0x00000001

#define KVM_XIVE_TIMA_PAGE_OFFSET	0
#define KVM_XIVE_ESB_PAGE_OFFSET	4

#endif /* __LINUX_KVM_POWERPC_H */
Loading