Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 96f8e98b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

locking, x86, iommu: Annotate irq_2_ir_lock as raw



The irq_2_ir_lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1f5b3c3f
Loading
Loading
Loading
Loading
+14 −14
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ static __init int setup_intremap(char *str)
}
early_param("intremap", setup_intremap);

static DEFINE_SPINLOCK(irq_2_ir_lock);
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);

static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
@@ -62,12 +62,12 @@ int get_irte(int irq, struct irte *entry)
	if (!entry || !irq_iommu)
		return -1;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	index = irq_iommu->irte_index + irq_iommu->sub_handle;
	*entry = *(irq_iommu->iommu->ir_table->base + index);

	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	return 0;
}

@@ -101,7 +101,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
		return -1;
	}

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	do {
		for (i = index; i < index + count; i++)
			if  (table->base[i].present)
@@ -113,7 +113,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
		index = (index + count) % INTR_REMAP_TABLE_ENTRIES;

		if (index == start_index) {
			spin_unlock_irqrestore(&irq_2_ir_lock, flags);
			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
			printk(KERN_ERR "can't allocate an IRTE\n");
			return -1;
		}
@@ -127,7 +127,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = mask;

	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
@@ -152,10 +152,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
	if (!irq_iommu)
		return -1;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	*sub_handle = irq_iommu->sub_handle;
	index = irq_iommu->irte_index;
	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	return index;
}

@@ -167,14 +167,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
	if (!irq_iommu)
		return -1;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	irq_iommu->iommu = iommu;
	irq_iommu->irte_index = index;
	irq_iommu->sub_handle = subhandle;
	irq_iommu->irte_mask = 0;

	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return 0;
}
@@ -190,7 +190,7 @@ int modify_irte(int irq, struct irte *irte_modified)
	if (!irq_iommu)
		return -1;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	iommu = irq_iommu->iommu;

@@ -202,7 +202,7 @@ int modify_irte(int irq, struct irte *irte_modified)
	__iommu_flush_cache(iommu, irte, sizeof(*irte));

	rc = qi_flush_iec(iommu, index, 0);
	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return rc;
}
@@ -270,7 +270,7 @@ int free_irte(int irq)
	if (!irq_iommu)
		return -1;

	spin_lock_irqsave(&irq_2_ir_lock, flags);
	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);

	rc = clear_entries(irq_iommu);

@@ -279,7 +279,7 @@ int free_irte(int irq)
	irq_iommu->sub_handle = 0;
	irq_iommu->irte_mask = 0;

	spin_unlock_irqrestore(&irq_2_ir_lock, flags);
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return rc;
}