Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 993ca6e0 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Joerg Roedel
Browse files

iommu/amd: Drop the lock while allocating new irq remap table



The irq_remap_table is allocated while the iommu_table_lock is held with
interrupts disabled.
>From looking at the call sites, all callers are in the early device
initialisation (apic_bsp_setup(), pci_enable_device(),
pci_enable_msi()) so make sense to drop the lock which also enables
interrupts and try to allocate that memory with GFP_KERNEL instead
GFP_ATOMIC.

Since during the allocation the iommu_table_lock is dropped, we need to
recheck if table exists after the lock has been reacquired. I *think*
that it is impossible that the "devid" entry appears in irq_lookup_table
while the lock is dropped since the same device can only be probed once.
However I check for both cases, just to be sure.

Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2fcc1e8a
Loading
Loading
Loading
Loading
+45 −18
Original line number Diff line number Diff line
@@ -3617,6 +3617,30 @@ static struct irq_remap_table *get_irq_table(u16 devid)
	return table;
}

static struct irq_remap_table *__alloc_irq_table(void)
{
	struct irq_remap_table *table;

	table = kzalloc(sizeof(*table), GFP_KERNEL);
	if (!table)
		return NULL;

	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
	if (!table->table) {
		kfree(table);
		return NULL;
	}
	raw_spin_lock_init(&table->lock);

	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
		memset(table->table, 0,
		       MAX_IRQS_PER_TABLE * sizeof(u32));
	else
		memset(table->table, 0,
		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
	return table;
}

static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
				  struct irq_remap_table *table)
{
@@ -3628,6 +3652,7 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
static struct irq_remap_table *alloc_irq_table(u16 devid)
{
	struct irq_remap_table *table = NULL;
	struct irq_remap_table *new_table = NULL;
	struct amd_iommu *iommu;
	unsigned long flags;
	u16 alias;
@@ -3646,42 +3671,44 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
	table = irq_lookup_table[alias];
	if (table) {
		set_remap_table_entry(iommu, devid, table);
		goto out;
		goto out_wait;
	}
	spin_unlock_irqrestore(&iommu_table_lock, flags);

	/* Nothing there yet, allocate new irq remapping table */
	table = kzalloc(sizeof(*table), GFP_ATOMIC);
	if (!table)
		goto out_unlock;
	new_table = __alloc_irq_table();
	if (!new_table)
		return NULL;

	/* Initialize table spin-lock */
	raw_spin_lock_init(&table->lock);
	spin_lock_irqsave(&iommu_table_lock, flags);

	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
	if (!table->table) {
		kfree(table);
		table = NULL;
	table = irq_lookup_table[devid];
	if (table)
		goto out_unlock;
	}

	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
		memset(table->table, 0,
		       MAX_IRQS_PER_TABLE * sizeof(u32));
	else
		memset(table->table, 0,
		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
	table = irq_lookup_table[alias];
	if (table) {
		set_remap_table_entry(iommu, devid, table);
		goto out_wait;
	}

	table = new_table;
	new_table = NULL;

	set_remap_table_entry(iommu, devid, table);
	if (devid != alias)
		set_remap_table_entry(iommu, alias, table);

out:
out_wait:
	iommu_completion_wait(iommu);

out_unlock:
	spin_unlock_irqrestore(&iommu_table_lock, flags);

	if (new_table) {
		kmem_cache_free(amd_iommu_irq_cache, new_table->table);
		kfree(new_table);
	}
	return table;
}