Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81340006 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86/urgent' into x86/mce3



Conflicts:
	arch/x86/kernel/cpu/mcheck/mce_intel.c

Merge reason: merge with an urgent-branch MCE fix.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 1bf7b31e fe955e5c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
 * touching registers they shouldn't be.
 */

	.code16
	.code16gcc
	.text
	.globl	intcall
	.type	intcall, @function
+2 −0
Original line number Diff line number Diff line
@@ -29,9 +29,11 @@ extern void amd_iommu_detect(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_shutdown(void);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
static inline void amd_iommu_shutdown(void) { }
#endif

#endif /* _ASM_X86_AMD_IOMMU_H */
+1 −2
Original line number Diff line number Diff line
@@ -257,7 +257,7 @@ typedef struct {

/**
 * atomic64_read - read atomic64 variable
 * @v: pointer of type atomic64_t
 * @ptr: pointer of type atomic64_t
 *
 * Atomically reads the value of @v.
 * Doesn't imply a read memory barrier.
@@ -294,7 +294,6 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
 * atomic64_xchg - xchg atomic64 variable
 * @ptr:      pointer to type atomic64_t
 * @new_val:  value to assign
 * @old_val:  old value that was there
 *
 * Atomically xchgs the value of @ptr to @new_val and returns
 * the old value.
+16 −0
Original line number Diff line number Diff line
@@ -434,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
	iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
}

/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
{
       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;

       INC_STATS_COUNTER(domain_flush_single);

       iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
}

/*
 * This function is used to flush the IO/TLB for a given protection domain
 * on every IOMMU in the system
@@ -1078,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu,
	amd_iommu_pd_table[devid] = domain;
	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

       /*
        * We might boot into a crash-kernel here. The crashed kernel
        * left the caches in the IOMMU dirty. So we have to flush
        * here to evict all dirty stuff.
        */
	iommu_queue_inv_dev_entry(iommu, devid);
	iommu_flush_tlb_pde(iommu, domain->id);
}

/*
+19 −7
Original line number Diff line number Diff line
@@ -260,6 +260,14 @@ static void iommu_enable(struct amd_iommu *iommu)

static void iommu_disable(struct amd_iommu *iommu)
{
	/* Disable command buffer */
	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);

	/* Disable event logging and event interrupts */
	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);

	/* Disable IOMMU hardware itself */
	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
}

@@ -478,6 +486,10 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
		    &entry, sizeof(entry));

	/* set head and tail to zero manually */
	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);

	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
}

@@ -1042,6 +1054,7 @@ static void enable_iommus(void)
	struct amd_iommu *iommu;

	for_each_iommu(iommu) {
		iommu_disable(iommu);
		iommu_set_device_table(iommu);
		iommu_enable_command_buffer(iommu);
		iommu_enable_event_buffer(iommu);
@@ -1066,12 +1079,6 @@ static void disable_iommus(void)

static int amd_iommu_resume(struct sys_device *dev)
{
	/*
	 * Disable IOMMUs before reprogramming the hardware registers.
	 * IOMMU is still enabled from the resume kernel.
	 */
	disable_iommus();

	/* re-load the hardware */
	enable_iommus();

@@ -1079,8 +1086,8 @@ static int amd_iommu_resume(struct sys_device *dev)
	 * we have to flush after the IOMMUs are enabled because a
	 * disabled IOMMU will never execute the commands we send
	 */
	amd_iommu_flush_all_domains();
	amd_iommu_flush_all_devices();
	amd_iommu_flush_all_domains();

	return 0;
}
@@ -1273,6 +1280,11 @@ int __init amd_iommu_init(void)
	goto out;
}

void amd_iommu_shutdown(void)
{
	disable_iommus();
}

/****************************************************************************
 *
 * Early detect code. This code runs at IOMMU detection time in the DMA
Loading