Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77bdab46 authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel
Browse files

iommu/amd: Add support for multiple IRTE formats



This patch enables support for the new 128-bit IOMMU IRTE format,
which can be used for both legacy and vapic interrupt remapping modes.
It replaces the existing operations on IRTE, which can only support
the older 32-bit IRTE format, with calls to the new struct amd_irt_ops.

It also provides helper functions for setting up, accessing, and
updating interrupt remapping table entries in different mode.

Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 880ac60e
Loading
Loading
Loading
Loading
+48 −24
Original line number Diff line number Diff line
@@ -3532,8 +3532,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
	amd_iommu_dev_table[devid].data[2] = dte;
}

#define IRTE_ALLOCATED (~1U)

static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{
	struct irq_remap_table *table = NULL;
@@ -3579,13 +3577,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
		goto out;
	}

	memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
		memset(table->table, 0,
		       MAX_IRQS_PER_TABLE * sizeof(u32));
	else
		memset(table->table, 0,
		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));

	if (ioapic) {
		int i;

		for (i = 0; i < 32; ++i)
			table->table[i] = IRTE_ALLOCATED;
			iommu->irte_ops->set_allocated(table, i);
	}

	irq_lookup_table[devid] = table;
@@ -3611,6 +3614,10 @@ static int alloc_irq_index(u16 devid, int count)
	struct irq_remap_table *table;
	unsigned long flags;
	int index, c;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return -ENODEV;

	table = get_irq_table(devid, false);
	if (!table)
@@ -3622,14 +3629,14 @@ static int alloc_irq_index(u16 devid, int count)
	for (c = 0, index = table->min_index;
	     index < MAX_IRQS_PER_TABLE;
	     ++index) {
		if (table->table[index] == 0)
		if (!iommu->irte_ops->is_allocated(table, index))
			c += 1;
		else
			c = 0;

		if (c == count)	{
			for (; c != 0; --c)
				table->table[index - c + 1] = IRTE_ALLOCATED;
				iommu->irte_ops->set_allocated(table, index - c + 1);

			index -= count - 1;
			goto out;
@@ -3715,7 +3722,7 @@ static void free_irte(u16 devid, int index)
		return;

	spin_lock_irqsave(&table->lock, flags);
	table->table[index] = 0;
	iommu->irte_ops->clear_allocated(table, index);
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
@@ -3805,6 +3812,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
	modify_irte_ga(devid, index, irte);
}

#define IRTE_ALLOCATED (~1U)
static void irte_set_allocated(struct irq_remap_table *table, int index)
{
	table->table[index] = IRTE_ALLOCATED;
@@ -3934,19 +3942,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
{
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	struct msi_msg *msg = &data->msi_entry;
	union irte *irte = &data->irte_entry;
	struct IO_APIC_route_entry *entry;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return;

	data->irq_2_irte.devid = devid;
	data->irq_2_irte.index = index + sub_handle;

	/* Setup IRTE for IOMMU */
	irte->val = 0;
	irte->fields.vector      = irq_cfg->vector;
	irte->fields.int_type    = apic->irq_delivery_mode;
	irte->fields.destination = irq_cfg->dest_apicid;
	irte->fields.dm          = apic->irq_dest_mode;
	irte->fields.valid       = 1;
	iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
				 apic->irq_dest_mode, irq_cfg->vector,
				 irq_cfg->dest_apicid);

	switch (info->type) {
	case X86_IRQ_ALLOC_TYPE_IOAPIC:
@@ -4002,7 +4008,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
{
	struct irq_alloc_info *info = arg;
	struct irq_data *irq_data;
	struct amd_ir_data *data;
	struct amd_ir_data *data = NULL;
	struct irq_cfg *cfg;
	int i, ret, devid;
	int index = -1;
@@ -4054,6 +4060,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
		if (!data)
			goto out_free_data;

		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
		else
			data->entry = kzalloc(sizeof(struct irte_ga),
						     GFP_KERNEL);
		if (!data->entry) {
			kfree(data);
			goto out_free_data;
		}

		irq_data->hwirq = (devid << 16) + i;
		irq_data->chip_data = data;
		irq_data->chip = &amd_ir_chip;
@@ -4090,6 +4106,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
			data = irq_data->chip_data;
			irte_info = &data->irq_2_irte;
			free_irte(irte_info->devid, irte_info->index);
			kfree(data->entry);
			kfree(data);
		}
	}
@@ -4101,8 +4118,11 @@ static void irq_remapping_activate(struct irq_domain *domain,
{
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];

	modify_irte(irte_info->devid, irte_info->index, &data->irte_entry);
	if (iommu)
		iommu->irte_ops->activate(data->entry, irte_info->devid,
					  irte_info->index);
}

static void irq_remapping_deactivate(struct irq_domain *domain,
@@ -4110,10 +4130,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	union irte entry;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];

	entry.val = 0;
	modify_irte(irte_info->devid, irte_info->index, &data->irte_entry);
	if (iommu)
		iommu->irte_ops->deactivate(data->entry, irte_info->devid,
					    irte_info->index);
}

static struct irq_domain_ops amd_ir_domain_ops = {
@@ -4130,8 +4151,12 @@ static int amd_ir_set_affinity(struct irq_data *data,
	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
	struct irq_cfg *cfg = irqd_cfg(data);
	struct irq_data *parent = data->parent_data;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
	int ret;

	if (!iommu)
		return -ENODEV;

	ret = parent->chip->irq_set_affinity(parent, mask, force);
	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
		return ret;
@@ -4140,9 +4165,8 @@ static int amd_ir_set_affinity(struct irq_data *data,
	 * Atomically updates the IRTE with the new destination, vector
	 * and flushes the interrupt entry cache.
	 */
	ir_data->irte_entry.fields.vector = cfg->vector;
	ir_data->irte_entry.fields.destination = cfg->dest_apicid;
	modify_irte(irte_info->devid, irte_info->index, &ir_data->irte_entry);
	iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
			    irte_info->index, cfg->vector, cfg->dest_apicid);

	/*
	 * After this point, all the interrupts will start arriving
+2 −0
Original line number Diff line number Diff line
@@ -1893,8 +1893,10 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
		/* Fall through */
	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
		iommu_feature_enable(iommu, CONTROL_GA_EN);
		iommu->irte_ops = &irte_128_ops;
		break;
	default:
		iommu->irte_ops = &irte_32_ops;
		break;
	}
#endif
+0 −1
Original line number Diff line number Diff line
@@ -780,7 +780,6 @@ struct irq_2_irte {

struct amd_ir_data {
	struct irq_2_irte irq_2_irte;
	union irte irte_entry;
	struct msi_msg msi_entry;
	void *entry;    /* Pointer to union irte or struct irte_ga */
};