Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 368d06cd authored by Joerg Roedel's avatar Joerg Roedel
Browse files

Merge branch 'x86/amd-avic' into x86/amd

parents 395adae4 d98de49a
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -460,6 +460,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
			driver will print ACPI tables for AMD IOMMU during
			IOMMU initialization.

	amd_iommu_intr=	[HW,X86-64]
			Specifies one of the following AMD IOMMU interrupt
			remapping modes:
			legacy     - Use legacy interrupt remapping mode.
			vapic      - Use virtual APIC mode, which allows IOMMU
			             to inject interrupts directly into guest.
			             This mode requires kvm-amd.avic=1.
			             (Default when IOMMU HW support is present.)

	amijoy.map=	[HW,JOY] Amiga joystick support
			Map of devices attached to JOY0DAT and JOY1DAT
			Format: <a>,<b>
+427 −57
Original line number Diff line number Diff line
@@ -137,6 +137,7 @@ struct iommu_dev_data {
	bool pri_tlp;			  /* PASID TLB required for
					     PPR completions */
	u32 errata;			  /* Bitmap for errata to apply */
	bool use_vapic;			  /* Enable device to use vapic mode */
};

/*
@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
	}
}

#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);

int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
	iommu_ga_log_notifier = notifier;

	return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);

static void iommu_poll_ga_log(struct amd_iommu *iommu)
{
	u32 head, tail, cnt = 0;

	if (iommu->ga_log == NULL)
		return;

	head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
	tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);

	while (head != tail) {
		volatile u64 *raw;
		u64 log_entry;

		raw = (u64 *)(iommu->ga_log + head);
		cnt++;

		/* Avoid memcpy function-call overhead */
		log_entry = *raw;

		/* Update head pointer of hardware ring-buffer */
		head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
		writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);

		/* Handle GA entry */
		switch (GA_REQ_TYPE(log_entry)) {
		case GA_GUEST_NR:
			if (!iommu_ga_log_notifier)
				break;

			pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
				 __func__, GA_DEVID(log_entry),
				 GA_TAG(log_entry));

			if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
				pr_err("AMD-Vi: GA log notifier failed.\n");
			break;
		default:
			break;
		}
	}
}
#endif /* CONFIG_IRQ_REMAP */

#define AMD_IOMMU_INT_MASK	\
	(MMIO_STATUS_EVT_INT_MASK | \
	 MMIO_STATUS_PPR_INT_MASK | \
	 MMIO_STATUS_GALOG_INT_MASK)

irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
	struct amd_iommu *iommu = (struct amd_iommu *) data;
	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);

	while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
		/* Enable EVT and PPR interrupts again */
		writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
	while (status & AMD_IOMMU_INT_MASK) {
		/* Enable EVT and PPR and GA interrupts again */
		writel(AMD_IOMMU_INT_MASK,
			iommu->mmio_base + MMIO_STATUS_OFFSET);

		if (status & MMIO_STATUS_EVT_INT_MASK) {
@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
			iommu_poll_ppr_log(iommu);
		}

#ifdef CONFIG_IRQ_REMAP
		if (status & MMIO_STATUS_GALOG_INT_MASK) {
			pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
			iommu_poll_ga_log(iommu);
		}
#endif

		/*
		 * Hardware bug: ERBT1312
		 * When re-enabling interrupt (by writing 1
@@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
	if (!iommu)
		return;

#ifdef CONFIG_IRQ_REMAP
	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
	    (dom->type == IOMMU_DOMAIN_UNMANAGED))
		dev_data->use_vapic = 0;
#endif

	iommu_completion_wait(iommu);
}

@@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,

	ret = attach_device(dev, domain);

#ifdef CONFIG_IRQ_REMAP
	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
		if (dom->type == IOMMU_DOMAIN_UNMANAGED)
			dev_data->use_vapic = 1;
		else
			dev_data->use_vapic = 0;
	}
#endif

	iommu_completion_wait(iommu);

	return ret;
@@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info);
 *
 *****************************************************************************/

union irte {
	u32 val;
	struct {
		u32 valid	: 1,
		    no_fault	: 1,
		    int_type	: 3,
		    rq_eoi	: 1,
		    dm		: 1,
		    rsvd_1	: 1,
		    destination	: 8,
		    vector	: 8,
		    rsvd_2	: 8;
	} fields;
};

struct irq_2_irte {
	u16 devid; /* Device ID for IRTE table */
	u16 index; /* Index into IRTE table*/
};

struct amd_ir_data {
	struct irq_2_irte			irq_2_irte;
	union irte				irte_entry;
	union {
		struct msi_msg			msi_entry;
	};
};

static struct irq_chip amd_ir_chip;

#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
@@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
	amd_iommu_dev_table[devid].data[2] = dte;
}

#define IRTE_ALLOCATED (~1U)

static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{
	struct irq_remap_table *table = NULL;
@@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
		goto out;
	}

	memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
		memset(table->table, 0,
		       MAX_IRQS_PER_TABLE * sizeof(u32));
	else
		memset(table->table, 0,
		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));

	if (ioapic) {
		int i;

		for (i = 0; i < 32; ++i)
			table->table[i] = IRTE_ALLOCATED;
			iommu->irte_ops->set_allocated(table, i);
	}

	irq_lookup_table[devid] = table;
@@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count)
	struct irq_remap_table *table;
	unsigned long flags;
	int index, c;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return -ENODEV;

	table = get_irq_table(devid, false);
	if (!table)
@@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count)
	for (c = 0, index = table->min_index;
	     index < MAX_IRQS_PER_TABLE;
	     ++index) {
		if (table->table[index] == 0)
		if (!iommu->irte_ops->is_allocated(table, index))
			c += 1;
		else
			c = 0;

		if (c == count)	{
			for (; c != 0; --c)
				table->table[index - c + 1] = IRTE_ALLOCATED;
				iommu->irte_ops->set_allocated(table, index - c + 1);

			index -= count - 1;
			goto out;
@@ -3672,7 +3734,42 @@ static int alloc_irq_index(u16 devid, int count)
	return index;
}

static int modify_irte(u16 devid, int index, union irte irte)
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
			  struct amd_ir_data *data)
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
	unsigned long flags;
	struct irte_ga *entry;

	iommu = amd_iommu_rlookup_table[devid];
	if (iommu == NULL)
		return -EINVAL;

	table = get_irq_table(devid, false);
	if (!table)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);

	entry = (struct irte_ga *)table->table;
	entry = &entry[index];
	entry->lo.fields_remap.valid = 0;
	entry->hi.val = irte->hi.val;
	entry->lo.val = irte->lo.val;
	entry->lo.fields_remap.valid = 1;
	if (data)
		data->ref = entry;

	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);

	return 0;
}

static int modify_irte(u16 devid, int index, union irte *irte)
{
	struct irq_remap_table *table;
	struct amd_iommu *iommu;
@@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte)
		return -ENOMEM;

	spin_lock_irqsave(&table->lock, flags);
	table->table[index] = irte.val;
	table->table[index] = irte->val;
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
@@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index)
		return;

	spin_lock_irqsave(&table->lock, flags);
	table->table[index] = 0;
	iommu->irte_ops->clear_allocated(table, index);
	spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
}

static void irte_prepare(void *entry,
			 u32 delivery_mode, u32 dest_mode,
			 u8 vector, u32 dest_apicid, int devid)
{
	union irte *irte = (union irte *) entry;

	irte->val                = 0;
	irte->fields.vector      = vector;
	irte->fields.int_type    = delivery_mode;
	irte->fields.destination = dest_apicid;
	irte->fields.dm          = dest_mode;
	irte->fields.valid       = 1;
}

static void irte_ga_prepare(void *entry,
			    u32 delivery_mode, u32 dest_mode,
			    u8 vector, u32 dest_apicid, int devid)
{
	struct irte_ga *irte = (struct irte_ga *) entry;
	struct iommu_dev_data *dev_data = search_dev_data(devid);

	irte->lo.val                      = 0;
	irte->hi.val                      = 0;
	irte->lo.fields_remap.guest_mode  = dev_data ? dev_data->use_vapic : 0;
	irte->lo.fields_remap.int_type    = delivery_mode;
	irte->lo.fields_remap.dm          = dest_mode;
	irte->hi.fields.vector            = vector;
	irte->lo.fields_remap.destination = dest_apicid;
	irte->lo.fields_remap.valid       = 1;
}

static void irte_activate(void *entry, u16 devid, u16 index)
{
	union irte *irte = (union irte *) entry;

	irte->fields.valid = 1;
	modify_irte(devid, index, irte);
}

static void irte_ga_activate(void *entry, u16 devid, u16 index)
{
	struct irte_ga *irte = (struct irte_ga *) entry;

	irte->lo.fields_remap.valid = 1;
	modify_irte_ga(devid, index, irte, NULL);
}

static void irte_deactivate(void *entry, u16 devid, u16 index)
{
	union irte *irte = (union irte *) entry;

	irte->fields.valid = 0;
	modify_irte(devid, index, irte);
}

static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
{
	struct irte_ga *irte = (struct irte_ga *) entry;

	irte->lo.fields_remap.valid = 0;
	modify_irte_ga(devid, index, irte, NULL);
}

static void irte_set_affinity(void *entry, u16 devid, u16 index,
			      u8 vector, u32 dest_apicid)
{
	union irte *irte = (union irte *) entry;

	irte->fields.vector = vector;
	irte->fields.destination = dest_apicid;
	modify_irte(devid, index, irte);
}

static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
				 u8 vector, u32 dest_apicid)
{
	struct irte_ga *irte = (struct irte_ga *) entry;
	struct iommu_dev_data *dev_data = search_dev_data(devid);

	if (!dev_data || !dev_data->use_vapic) {
		irte->hi.fields.vector = vector;
		irte->lo.fields_remap.destination = dest_apicid;
		irte->lo.fields_remap.guest_mode = 0;
		modify_irte_ga(devid, index, irte, NULL);
	}
}

#define IRTE_ALLOCATED (~1U)
static void irte_set_allocated(struct irq_remap_table *table, int index)
{
	table->table[index] = IRTE_ALLOCATED;
}

static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	memset(&irte->lo.val, 0, sizeof(u64));
	memset(&irte->hi.val, 0, sizeof(u64));
	irte->hi.fields.vector = 0xff;
}

static bool irte_is_allocated(struct irq_remap_table *table, int index)
{
	union irte *ptr = (union irte *)table->table;
	union irte *irte = &ptr[index];

	return irte->val != 0;
}

static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	return irte->hi.fields.vector != 0;
}

static void irte_clear_allocated(struct irq_remap_table *table, int index)
{
	table->table[index] = 0;
}

static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
{
	struct irte_ga *ptr = (struct irte_ga *)table->table;
	struct irte_ga *irte = &ptr[index];

	memset(&irte->lo.val, 0, sizeof(u64));
	memset(&irte->hi.val, 0, sizeof(u64));
}

static int get_devid(struct irq_alloc_info *info)
{
	int devid = -1;
@@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
{
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	struct msi_msg *msg = &data->msi_entry;
	union irte *irte = &data->irte_entry;
	struct IO_APIC_route_entry *entry;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];

	if (!iommu)
		return;

	data->irq_2_irte.devid = devid;
	data->irq_2_irte.index = index + sub_handle;

	/* Setup IRTE for IOMMU */
	irte->val = 0;
	irte->fields.vector      = irq_cfg->vector;
	irte->fields.int_type    = apic->irq_delivery_mode;
	irte->fields.destination = irq_cfg->dest_apicid;
	irte->fields.dm          = apic->irq_dest_mode;
	irte->fields.valid       = 1;
	iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
				 apic->irq_dest_mode, irq_cfg->vector,
				 irq_cfg->dest_apicid, devid);

	switch (info->type) {
	case X86_IRQ_ALLOC_TYPE_IOAPIC:
@@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
	}
}

struct amd_irte_ops irte_32_ops = {
	.prepare = irte_prepare,
	.activate = irte_activate,
	.deactivate = irte_deactivate,
	.set_affinity = irte_set_affinity,
	.set_allocated = irte_set_allocated,
	.is_allocated = irte_is_allocated,
	.clear_allocated = irte_clear_allocated,
};

struct amd_irte_ops irte_128_ops = {
	.prepare = irte_ga_prepare,
	.activate = irte_ga_activate,
	.deactivate = irte_ga_deactivate,
	.set_affinity = irte_ga_set_affinity,
	.set_allocated = irte_ga_set_allocated,
	.is_allocated = irte_ga_is_allocated,
	.clear_allocated = irte_ga_clear_allocated,
};

static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
			       unsigned int nr_irqs, void *arg)
{
	struct irq_alloc_info *info = arg;
	struct irq_data *irq_data;
	struct amd_ir_data *data;
	struct amd_ir_data *data = NULL;
	struct irq_cfg *cfg;
	int i, ret, devid;
	int index = -1;
@@ -3903,6 +4151,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
		if (!data)
			goto out_free_data;

		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
		else
			data->entry = kzalloc(sizeof(struct irte_ga),
						     GFP_KERNEL);
		if (!data->entry) {
			kfree(data);
			goto out_free_data;
		}

		irq_data->hwirq = (devid << 16) + i;
		irq_data->chip_data = data;
		irq_data->chip = &amd_ir_chip;
@@ -3939,6 +4197,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
			data = irq_data->chip_data;
			irte_info = &data->irq_2_irte;
			free_irte(irte_info->devid, irte_info->index);
			kfree(data->entry);
			kfree(data);
		}
	}
@@ -3950,8 +4209,11 @@ static void irq_remapping_activate(struct irq_domain *domain,
{
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];

	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
	if (iommu)
		iommu->irte_ops->activate(data->entry, irte_info->devid,
					  irte_info->index);
}

static void irq_remapping_deactivate(struct irq_domain *domain,
@@ -3959,10 +4221,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{
	struct amd_ir_data *data = irq_data->chip_data;
	struct irq_2_irte *irte_info = &data->irq_2_irte;
	union irte entry;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];

	entry.val = 0;
	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
	if (iommu)
		iommu->irte_ops->deactivate(data->entry, irte_info->devid,
					    irte_info->index);
}

static struct irq_domain_ops amd_ir_domain_ops = {
@@ -3972,6 +4235,70 @@ static struct irq_domain_ops amd_ir_domain_ops = {
	.deactivate = irq_remapping_deactivate,
};

static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
	struct amd_iommu *iommu;
	struct amd_iommu_pi_data *pi_data = vcpu_info;
	struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
	struct amd_ir_data *ir_data = data->chip_data;
	struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
	struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);

	/* Note:
	 * This device has never been set up for guest mode.
	 * we should not modify the IRTE
	 */
	if (!dev_data || !dev_data->use_vapic)
		return 0;

	pi_data->ir_data = ir_data;

	/* Note:
	 * SVM tries to set up for VAPIC mode, but we are in
	 * legacy mode. So, we force legacy mode instead.
	 */
	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
		pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
			 __func__);
		pi_data->is_guest_mode = false;
	}

	iommu = amd_iommu_rlookup_table[irte_info->devid];
	if (iommu == NULL)
		return -EINVAL;

	pi_data->prev_ga_tag = ir_data->cached_ga_tag;
	if (pi_data->is_guest_mode) {
		/* Setting */
		irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
		irte->hi.fields.vector = vcpu_pi_info->vector;
		irte->lo.fields_vapic.guest_mode = 1;
		irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;

		ir_data->cached_ga_tag = pi_data->ga_tag;
	} else {
		/* Un-Setting */
		struct irq_cfg *cfg = irqd_cfg(data);

		irte->hi.val = 0;
		irte->lo.val = 0;
		irte->hi.fields.vector = cfg->vector;
		irte->lo.fields_remap.guest_mode = 0;
		irte->lo.fields_remap.destination = cfg->dest_apicid;
		irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
		irte->lo.fields_remap.dm = apic->irq_dest_mode;

		/*
		 * This communicates the ga_tag back to the caller
		 * so that it can do all the necessary clean up.
		 */
		ir_data->cached_ga_tag = 0;
	}

	return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
}

static int amd_ir_set_affinity(struct irq_data *data,
			       const struct cpumask *mask, bool force)
{
@@ -3979,8 +4306,12 @@ static int amd_ir_set_affinity(struct irq_data *data,
	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
	struct irq_cfg *cfg = irqd_cfg(data);
	struct irq_data *parent = data->parent_data;
	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
	int ret;

	if (!iommu)
		return -ENODEV;

	ret = parent->chip->irq_set_affinity(parent, mask, force);
	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
		return ret;
@@ -3989,9 +4320,8 @@ static int amd_ir_set_affinity(struct irq_data *data,
	 * Atomically updates the IRTE with the new destination, vector
	 * and flushes the interrupt entry cache.
	 */
	ir_data->irte_entry.fields.vector = cfg->vector;
	ir_data->irte_entry.fields.destination = cfg->dest_apicid;
	modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
	iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
			    irte_info->index, cfg->vector, cfg->dest_apicid);

	/*
	 * After this point, all the interrupts will start arriving
@@ -4013,6 +4343,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
static struct irq_chip amd_ir_chip = {
	.irq_ack = ir_ack_apic_edge,
	.irq_set_affinity = amd_ir_set_affinity,
	.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
	.irq_compose_msi_msg = ir_compose_msi_msg,
};

@@ -4027,4 +4358,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)

	return 0;
}

int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
	unsigned long flags;
	struct amd_iommu *iommu;
	struct irq_remap_table *irt;
	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
	int devid = ir_data->irq_2_irte.devid;
	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;

	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
		return 0;

	iommu = amd_iommu_rlookup_table[devid];
	if (!iommu)
		return -ENODEV;

	irt = get_irq_table(devid, false);
	if (!irt)
		return -ENODEV;

	spin_lock_irqsave(&irt->lock, flags);

	if (ref->lo.fields_vapic.guest_mode) {
		if (cpu >= 0)
			ref->lo.fields_vapic.destination = cpu;
		ref->lo.fields_vapic.is_run = is_run;
		barrier();
	}

	spin_unlock_irqrestore(&irt->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
	return 0;
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif
+175 −6

File changed.

Preview size limit exceeded, changes collapsed.

+1 −0
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@ extern int amd_iommu_enable(void);
extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;

/* IOMMUv2 specific functions */
struct iommu_domain;
+149 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@

#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
@@ -69,6 +70,8 @@
#define MMIO_EXCL_LIMIT_OFFSET  0x0028
#define MMIO_EXT_FEATURES	0x0030
#define MMIO_PPR_LOG_OFFSET	0x0038
#define MMIO_GA_LOG_BASE_OFFSET	0x00e0
#define MMIO_GA_LOG_TAIL_OFFSET	0x00e8
#define MMIO_CMD_HEAD_OFFSET	0x2000
#define MMIO_CMD_TAIL_OFFSET	0x2008
#define MMIO_EVT_HEAD_OFFSET	0x2010
@@ -76,6 +79,8 @@
#define MMIO_STATUS_OFFSET	0x2020
#define MMIO_PPR_HEAD_OFFSET	0x2030
#define MMIO_PPR_TAIL_OFFSET	0x2038
#define MMIO_GA_HEAD_OFFSET	0x2040
#define MMIO_GA_TAIL_OFFSET	0x2048
#define MMIO_CNTR_CONF_OFFSET	0x4000
#define MMIO_CNTR_REG_OFFSET	0x40000
#define MMIO_REG_END_OFFSET	0x80000
@@ -92,6 +97,7 @@
#define FEATURE_GA		(1ULL<<7)
#define FEATURE_HE		(1ULL<<8)
#define FEATURE_PC		(1ULL<<9)
#define FEATURE_GAM_VAPIC	(1ULL<<21)

#define FEATURE_PASID_SHIFT	32
#define FEATURE_PASID_MASK	(0x1fULL << FEATURE_PASID_SHIFT)
@@ -110,6 +116,9 @@
#define MMIO_STATUS_EVT_INT_MASK	(1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK	(1 << 2)
#define MMIO_STATUS_PPR_INT_MASK	(1 << 6)
#define MMIO_STATUS_GALOG_RUN_MASK	(1 << 8)
#define MMIO_STATUS_GALOG_OVERFLOW_MASK	(1 << 9)
#define MMIO_STATUS_GALOG_INT_MASK	(1 << 10)

/* event logging constants */
#define EVENT_ENTRY_SIZE	0x10
@@ -146,6 +155,10 @@
#define CONTROL_PPFINT_EN       0x0eULL
#define CONTROL_PPR_EN          0x0fULL
#define CONTROL_GT_EN           0x10ULL
#define CONTROL_GA_EN           0x11ULL
#define CONTROL_GAM_EN          0x19ULL
#define CONTROL_GALOG_EN        0x1CULL
#define CONTROL_GAINT_EN        0x1DULL

#define CTRL_INV_TO_MASK	(7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE	0
@@ -224,6 +237,19 @@

#define PPR_REQ_FAULT		0x01

/* Constants for GA Log handling */
#define GA_LOG_ENTRIES		512
#define GA_LOG_SIZE_SHIFT	56
#define GA_LOG_SIZE_512		(0x8ULL << GA_LOG_SIZE_SHIFT)
#define GA_ENTRY_SIZE		8
#define GA_LOG_SIZE		(GA_ENTRY_SIZE * GA_LOG_ENTRIES)

#define GA_TAG(x)		(u32)(x & 0xffffffffULL)
#define GA_DEVID(x)		(u16)(((x) >> 32) & 0xffffULL)
#define GA_REQ_TYPE(x)		(((x) >> 60) & 0xfULL)

#define GA_GUEST_NR		0x1

#define PAGE_MODE_NONE    0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -329,6 +355,12 @@
#define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR     27

/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT	6

/* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_GASUP_SHIFT	7

#define MAX_DOMAIN_ID 65536

/* Protection domain flags */
@@ -400,6 +432,7 @@ struct amd_iommu_fault {

struct iommu_domain;
struct irq_domain;
struct amd_irte_ops;

/*
 * This structure contains generic data for  IOMMU protection domains
@@ -490,6 +523,12 @@ struct amd_iommu {
	/* Base of the PPR log, if present */
	u8 *ppr_log;

	/* Base of the GA log, if present */
	u8 *ga_log;

	/* Tail of the GA log, if present */
	u8 *ga_log_tail;

	/* true if interrupts for this IOMMU are already enabled */
	bool int_enabled;

@@ -523,6 +562,8 @@ struct amd_iommu {
#ifdef CONFIG_IRQ_REMAP
	struct irq_domain *ir_domain;
	struct irq_domain *msi_domain;

	struct amd_irte_ops *irte_ops;
#endif
};

@@ -681,4 +722,112 @@ static inline int get_hpet_devid(int id)
	return -EINVAL;
}

enum amd_iommu_intr_mode_type {
	AMD_IOMMU_GUEST_IR_LEGACY,

	/* This mode is not visible to users. It is used when
	 * we cannot fully enable vAPIC and fallback to only support
	 * legacy interrupt remapping via 128-bit IRTE.
	 */
	AMD_IOMMU_GUEST_IR_LEGACY_GA,
	AMD_IOMMU_GUEST_IR_VAPIC,
};

#define AMD_IOMMU_GUEST_IR_GA(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC || \
					 x == AMD_IOMMU_GUEST_IR_LEGACY_GA)

#define AMD_IOMMU_GUEST_IR_VAPIC(x)	(x == AMD_IOMMU_GUEST_IR_VAPIC)

union irte {
	u32 val;
	struct {
		u32 valid	: 1,
		    no_fault	: 1,
		    int_type	: 3,
		    rq_eoi	: 1,
		    dm		: 1,
		    rsvd_1	: 1,
		    destination	: 8,
		    vector	: 8,
		    rsvd_2	: 8;
	} fields;
};

union irte_ga_lo {
	u64 val;

	/* For int remapping */
	struct {
		u64 valid	: 1,
		    no_fault	: 1,
		    /* ------ */
		    int_type	: 3,
		    rq_eoi	: 1,
		    dm		: 1,
		    /* ------ */
		    guest_mode	: 1,
		    destination	: 8,
		    rsvd	: 48;
	} fields_remap;

	/* For guest vAPIC */
	struct {
		u64 valid	: 1,
		    no_fault	: 1,
		    /* ------ */
		    ga_log_intr	: 1,
		    rsvd1	: 3,
		    is_run	: 1,
		    /* ------ */
		    guest_mode	: 1,
		    destination	: 8,
		    rsvd2	: 16,
		    ga_tag	: 32;
	} fields_vapic;
};

union irte_ga_hi {
	u64 val;
	struct {
		u64 vector	: 8,
		    rsvd_1	: 4,
		    ga_root_ptr	: 40,
		    rsvd_2	: 12;
	} fields;
};

struct irte_ga {
	union irte_ga_lo lo;
	union irte_ga_hi hi;
};

struct irq_2_irte {
	u16 devid; /* Device ID for IRTE table */
	u16 index; /* Index into IRTE table*/
};

struct amd_ir_data {
	u32 cached_ga_tag;
	struct irq_2_irte irq_2_irte;
	struct msi_msg msi_entry;
	void *entry;    /* Pointer to union irte or struct irte_ga */
	void *ref;      /* Pointer to the actual irte */
};

struct amd_irte_ops {
	void (*prepare)(void *, u32, u32, u8, u32, int);
	void (*activate)(void *, u16, u16);
	void (*deactivate)(void *, u16, u16);
	void (*set_affinity)(void *, u16, u16, u8, u32);
	void *(*get)(struct irq_remap_table *, int);
	void (*set_allocated)(struct irq_remap_table *, int);
	bool (*is_allocated)(struct irq_remap_table *, int);
	void (*clear_allocated)(struct irq_remap_table *, int);
};

#ifdef CONFIG_IRQ_REMAP
extern struct amd_irte_ops irte_32_ops;
extern struct amd_irte_ops irte_128_ops;
#endif

#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
Loading