Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87bbcfde authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20160216' of git://git.infradead.org/intel-iommu

Pull IOMMU SVM fixes from David Woodhouse:
 "Minor register size and interrupt acknowledgement fixes which only
  showed up in testing on newer hardware, but mostly a fix to the MM
  refcount handling to prevent a recursive refcount issue when mmap() is
  used on the file descriptor associated with a bound PASID"

* tag 'for-linus-20160216' of git://git.infradead.org/intel-iommu:
  iommu/vt-d: Clear PPR bit to ensure we get more page request interrupts
  iommu/vt-d: Fix 64-bit accesses to 32-bit DMAR_GSTS_REG
  iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
parents e5310a1c 46924008
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1353,7 +1353,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)


	raw_spin_lock_irqsave(&iommu->register_lock, flags);
	raw_spin_lock_irqsave(&iommu->register_lock, flags);


	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
	sts =  readl(iommu->reg + DMAR_GSTS_REG);
	if (!(sts & DMA_GSTS_QIES))
	if (!(sts & DMA_GSTS_QIES))
		goto end;
		goto end;


+31 −6
Original line number Original line Diff line number Diff line
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
{
	struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
	struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
	struct intel_svm_dev *sdev;


	/* This might end up being called from exit_mmap(), *before* the page
	 * tables are cleared. And __mmu_notifier_release() will delete us from
	 * the list of notifiers so that our invalidate_range() callback doesn't
	 * get called when the page tables are cleared. So we need to protect
	 * against hardware accessing those page tables.
	 *
	 * We do it by clearing the entry in the PASID table and then flushing
	 * the IOTLB and the PASID table caches. This might upset hardware;
	 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
	 * page) so that we end up taking a fault that the hardware really
	 * *has* to handle gracefully without affecting other processes.
	 */
	svm->iommu->pasid_table[svm->pasid].val = 0;
	svm->iommu->pasid_table[svm->pasid].val = 0;
	wmb();

	rcu_read_lock();
	list_for_each_entry_rcu(sdev, &svm->devs, list) {
		intel_flush_pasid_dev(svm, sdev, svm->pasid);
		intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
	}
	rcu_read_unlock();


	/* There's no need to do any flush because we can't get here if there
	 * are any devices left anyway. */
	WARN_ON(!list_empty(&svm->devs));
}
}


static const struct mmu_notifier_ops intel_mmuops = {
static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
				goto out;
				goto out;
			}
			}
			iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
			iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
			mm = NULL;
		} else
		} else
			iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
			iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
		wmb();
		wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
				kfree_rcu(sdev, rcu);
				kfree_rcu(sdev, rcu);


				if (list_empty(&svm->devs)) {
				if (list_empty(&svm->devs)) {
					mmu_notifier_unregister(&svm->notifier, svm->mm);


					idr_remove(&svm->iommu->pasid_idr, svm->pasid);
					idr_remove(&svm->iommu->pasid_idr, svm->pasid);
					if (svm->mm)
					if (svm->mm)
						mmput(svm->mm);
						mmu_notifier_unregister(&svm->notifier, svm->mm);

					/* We mandate that no page faults may be outstanding
					/* We mandate that no page faults may be outstanding
					 * for the PASID when intel_svm_unbind_mm() is called.
					 * for the PASID when intel_svm_unbind_mm() is called.
					 * If that is not obeyed, subtle errors will happen.
					 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
	struct intel_svm *svm = NULL;
	struct intel_svm *svm = NULL;
	int head, tail, handled = 0;
	int head, tail, handled = 0;


	/* Clear PPR bit before reading head/tail registers, to
	 * ensure that we get a new interrupt if needed. */
	writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);

	tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
	tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
	head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
	head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
	while (head != tail) {
	while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
		 * any faults on kernel addresses. */
		 * any faults on kernel addresses. */
		if (!svm->mm)
		if (!svm->mm)
			goto bad_req;
			goto bad_req;
		/* If the mm is already defunct, don't handle faults. */
		if (!atomic_inc_not_zero(&svm->mm->mm_users))
			goto bad_req;
		down_read(&svm->mm->mmap_sem);
		down_read(&svm->mm->mmap_sem);
		vma = find_extend_vma(svm->mm, address);
		vma = find_extend_vma(svm->mm, address);
		if (!vma || address < vma->vm_start)
		if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
		result = QI_RESP_SUCCESS;
		result = QI_RESP_SUCCESS;
	invalid:
	invalid:
		up_read(&svm->mm->mmap_sem);
		up_read(&svm->mm->mmap_sem);
		mmput(svm->mm);
	bad_req:
	bad_req:
		/* Accounting for major/minor faults? */
		/* Accounting for major/minor faults? */
		rcu_read_lock();
		rcu_read_lock();
+1 −1
Original line number Original line Diff line number Diff line
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)


	raw_spin_lock_irqsave(&iommu->register_lock, flags);
	raw_spin_lock_irqsave(&iommu->register_lock, flags);


	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
	sts = readl(iommu->reg + DMAR_GSTS_REG);
	if (!(sts & DMA_GSTS_IRES))
	if (!(sts & DMA_GSTS_IRES))
		goto end;
		goto end;


+3 −0
Original line number Original line Diff line number Diff line
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* low 64 bit */
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))


/* PRS_REG */
#define DMA_PRS_PPR	((u32)1)

#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
do {									\
do {									\
	cycles_t start_time = get_cycles();				\
	cycles_t start_time = get_cycles();				\