Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 619d144b authored by Patrick Daly's avatar Patrick Daly Committed by Sudarshan Rajagopalan
Browse files

arm64: dma-mapping: Support attach after detach case



Iommu client drivers which use DOMAIN_ATTR_ATOMIC need to call
arm_iommu_detach_device() when their driver enters low power mode to
disable their clock/power votes. On wakeup they call
arm_iommu_attach_device() to reenable the iommu. To support this
usecase, ensure dma allocator state is freed as part of
arm_iommu_release_mapping().

Additionally, move memory allocations for dma allocator state to the
first arm_iommu_attach_device() call to allow clients to set iommu
domain attributes first.  This is expected to reduce memory use.

Change-Id: I0df29ee0de52c6f9b6621ec6f0105201c6ee5996
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
Signed-off-by: default avatarSudarshan Rajagopalan <sudaraja@codeaurora.org>
parent b31361a5
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -14,14 +14,16 @@
struct dma_iommu_mapping {
	/* iommu specific data */
	struct iommu_domain	*domain;
	bool			init;
	struct kref		kref;
	const struct dma_map_ops *ops;

	/* Protects bitmap */
	spinlock_t		lock;
	void			*bitmap;
	size_t			bits;
	dma_addr_t		base;

	spinlock_t		lock;
	struct kref		kref;

	struct dma_fast_smmu_mapping *fast;
};

+124 −59
Original line number Diff line number Diff line
@@ -1876,23 +1876,42 @@ const struct dma_map_ops iommu_ops = {
 * IO address ranges, which is required to perform memory allocation and
 * mapping with IOMMU aware functions.
 *
 * The client device need to be attached to the mapping with
 * arm_iommu_attach_device function.
 * Clients may use iommu_domain_set_attr() to set additional flags prior
 * to calling arm_iommu_attach_device() to complete initialization.
 */
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
{
	unsigned int bits = size >> PAGE_SHIFT;
	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
	struct dma_iommu_mapping *mapping;
	int err = -ENOMEM;

	if (!bitmap_size)
	if (!bits)
		return ERR_PTR(-EINVAL);

	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
	if (!mapping)
		goto err;
		return ERR_PTR(-ENOMEM);

	mapping->base = base;
	mapping->bits = bits;

	mapping->domain = iommu_domain_alloc(bus);
	if (!mapping->domain)
		goto err_domain_alloc;

	mapping->init = false;
	return mapping;

err_domain_alloc:
	kfree(mapping);
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(arm_iommu_create_mapping);

static int
bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
{
	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);

	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
							__GFP_NORETRY);
@@ -1900,59 +1919,72 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
		mapping->bitmap = vzalloc(bitmap_size);

	if (!mapping->bitmap)
		goto err2;
		return -ENOMEM;

	mapping->base = base;
	mapping->bits = bits;
	spin_lock_init(&mapping->lock);
	mapping->ops = &iommu_ops;
	return 0;
}

	mapping->domain = iommu_domain_alloc(bus);
	if (!mapping->domain)
		goto err3;
static void bitmap_iommu_release_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	kref_init(&mapping->kref);
	return mapping;
err3:
	kvfree(mapping->bitmap);
err2:
	kfree(mapping->bitmap);
	iommu_domain_free(mapping->domain);
	kfree(mapping);
err:
	return ERR_PTR(err);
}
EXPORT_SYMBOL(arm_iommu_create_mapping);

static void release_iommu_mapping(struct kref *kref)
static void bypass_iommu_release_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	iommu_domain_free(mapping->domain);
	kvfree(mapping->bitmap);
	kfree(mapping);
}

/*
 * arm_iommu_release_mapping
 * @mapping: allocted via arm_iommu_create_mapping()
 *
 * Frees all resources associated with the iommu mapping.
 * The device associated with this mapping must be in the 'detached' state
 */
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{
	if (mapping)
		kref_put(&mapping->kref, release_iommu_mapping);
	int s1_bypass = 0, is_fast = 0;
	void (*release)(struct kref *kref);

	if (!mapping)
		return;

	if (!mapping->init) {
		iommu_domain_free(mapping->domain);
		kfree(mapping);
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);

	if (s1_bypass)
		release = bypass_iommu_release_mapping;
	else if (is_fast)
		release = fast_smmu_release_mapping;
	else
		release = bitmap_iommu_release_mapping;

	kref_put(&mapping->kref, release);
}
EXPORT_SYMBOL(arm_iommu_release_mapping);

/**
 * arm_iommu_attach_device
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version. Only one device in an iommu_group may use this
 * function.
 */
int arm_iommu_attach_device(struct device *dev,
static int arm_iommu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int err;
	int err = -EINVAL;
	int s1_bypass = 0, is_fast = 0;
	struct iommu_group *group;
	dma_addr_t iova_end;
@@ -1960,7 +1992,7 @@ int arm_iommu_attach_device(struct device *dev,
	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return -ENODEV;
		return -EINVAL;
	}

	if (iommu_get_domain_for_dev(dev)) {
@@ -1968,6 +2000,11 @@ int arm_iommu_attach_device(struct device *dev,
		return -EINVAL;
	}

	if (mapping->init) {
		kref_get(&mapping->kref);
		return 0;
	}

	iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1;
	if (iova_end > dma_get_mask(dev)) {
		dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n",
@@ -1975,21 +2012,54 @@ int arm_iommu_attach_device(struct device *dev,
		return -EINVAL;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
	if (is_fast)
		return fast_smmu_attach_device(dev, mapping);

	err = iommu_attach_group(mapping->domain, group);
	if (s1_bypass) {
		mapping->ops = &swiotlb_dma_ops;
		err = 0;
	} else if (is_fast) {
		err = fast_smmu_init_mapping(dev, mapping);
	} else {
		err = bitmap_iommu_init_mapping(dev, mapping);
	}
	if (!err) {
		kref_init(&mapping->kref);
		mapping->init = true;
	}
	return err;
}

/**
 * arm_iommu_attach_device
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version.
 *
 * Clients are expected to call arm_iommu_attach_device() prior to sharing
 * the dma_iommu_mapping structure with another device. This ensures
 * initialization is complete.
 */
int arm_iommu_attach_device(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int err;

	err = arm_iommu_init_mapping(dev, mapping);
	if (err)
		return err;

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	err = iommu_attach_group(mapping->domain, dev->iommu_group);
	if (err)
		return err;

	kref_get(&mapping->kref);
	dev->archdata.mapping = mapping;
	if (!s1_bypass)
		set_dma_ops(dev, &iommu_ops);
	set_dma_ops(dev, mapping->ops);

	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
	return 0;
@@ -2006,8 +2076,7 @@ EXPORT_SYMBOL(arm_iommu_attach_device);
void arm_iommu_detach_device(struct device *dev)
{
	struct dma_iommu_mapping *mapping;
	int is_fast, s1_bypass = 0;
	struct iommu_group *group;
	int s1_bypass = 0;

	mapping = to_dma_iommu_mapping(dev);
	if (!mapping) {
@@ -2015,26 +2084,22 @@ void arm_iommu_detach_device(struct device *dev)
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
	if (is_fast) {
		fast_smmu_detach_device(dev, mapping);
	if (!dev->iommu_group) {
		dev_err(dev, "No iommu associated with device\n");
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);

	/*
	 * ION defers dma_unmap calls. Ensure they have all completed prior to
	 * setting dma_ops to NULL.
	 */
	if (msm_dma_unmap_all_for_dev(dev))
		dev_warn(dev, "IOMMU detach with outstanding mappings\n");

	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return;
	}

	iommu_detach_group(mapping->domain, group);
	kref_put(&mapping->kref, release_iommu_mapping);
	iommu_detach_group(mapping->domain, dev->iommu_group);
	dev->archdata.mapping = NULL;
	if (!s1_bypass)
		set_dma_ops(dev, NULL);
+8 −1
Original line number Diff line number Diff line
@@ -2884,9 +2884,16 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
		}
		smmu_domain->secure_vmid = *((int *)data);
		break;
		/*
		 * fast_smmu_unmap_page() and fast_smmu_alloc_iova() both
		 * expect that the bus/clock/regulator are already on. Thus also
		 * force DOMAIN_ATTR_ATOMIC to bet set.
		 */
	case DOMAIN_ATTR_FAST:
		if (*((int *)data))
		if (*((int *)data)) {
			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
			smmu_domain->attributes |= 1 << DOMAIN_ATTR_ATOMIC;
		}
		ret = 0;
		break;
	case DOMAIN_ATTR_USE_UPSTREAM_HINT:
+44 −36
Original line number Diff line number Diff line
@@ -856,33 +856,28 @@ static void fast_smmu_reserve_pci_windows(struct device *dev,
	spin_unlock_irqrestore(&mapping->lock, flags);
}


/**
 * fast_smmu_attach_device
 * fast_smmu_init_mapping
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	fast_smmu_create_mapping)
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version. More than one client might be attached to
 * the same io address space mapping.
 * Called the first time a device is attached to this mapping.
 * Not for dma client use.
 */
int fast_smmu_attach_device(struct device *dev,
int fast_smmu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int atomic_domain = 1;
	int err;
	struct iommu_domain *domain = mapping->domain;
	struct iommu_group *group;
	struct iommu_pgtbl_info info;
	u64 size = (u64)mapping->bits << PAGE_SHIFT;

	if (mapping->base + size > (SZ_1G * 4ULL))
		return -EINVAL;

	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
				  &atomic_domain))
	if (mapping->base + size > (SZ_1G * 4ULL)) {
		dev_err(dev, "Iova end address too large\n");
		return -EINVAL;
	}

	mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
	if (IS_ERR(mapping->fast))
@@ -895,54 +890,67 @@ int fast_smmu_attach_device(struct device *dev,
	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return -ENODEV;
		err = -ENODEV;
		goto release_mapping;
	}

	if (iommu_get_domain_for_dev(dev)) {
		dev_err(dev, "Device already attached to other iommu_domain\n");
		return -EINVAL;
		err = -EINVAL;
		goto release_mapping;
	}

	if (iommu_attach_group(mapping->domain, group))
		return -EINVAL;
	/*
	 * Need to attach prior to calling DOMAIN_ATTR_PGTBL_INFO and then
	 * detach to be in the expected state. Its a bit messy.
	 */
	if (iommu_attach_group(mapping->domain, group)) {
		err = -EINVAL;
		goto release_mapping;
	}

	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
				  &info)) {
		dev_err(dev, "Couldn't get page table info\n");
		fast_smmu_detach_device(dev, mapping);
		return -EINVAL;
		err = -EINVAL;
		goto detach_group;
	}
	mapping->fast->pgtbl_pmds = info.pmds;

	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
				  &mapping->fast->is_smmu_pt_coherent))
		return -EINVAL;
				  &mapping->fast->is_smmu_pt_coherent)) {
		err = -EINVAL;
		goto detach_group;
	}

	mapping->fast->notifier.notifier_call = fast_smmu_notify;
	av8l_register_notify(&mapping->fast->notifier);

	dev->archdata.mapping = mapping;
	set_dma_ops(dev, &fast_smmu_dma_ops);

	iommu_detach_group(mapping->domain, group);
	mapping->ops = &fast_smmu_dma_ops;
	return 0;

detach_group:
	iommu_detach_group(mapping->domain, group);
release_mapping:
	kfree(mapping->fast->bitmap);
	kfree(mapping->fast);
	return err;
}
EXPORT_SYMBOL(fast_smmu_attach_device);

/**
 * fast_smmu_detach_device
 * @dev: valid struct device pointer
 * fast_smmu_release_mapping
 * @kref: dma_iommu_mapping->kref
 *
 * Detaches the provided device from a previously attached map.
 * This voids the dma operations (dma_map_ops pointer)
 * Cleans up the given iommu mapping.
 */
void fast_smmu_detach_device(struct device *dev,
			     struct dma_iommu_mapping *mapping)
void fast_smmu_release_mapping(struct kref *kref)
{
	iommu_detach_group(mapping->domain, dev->iommu_group);
	dev->archdata.mapping = NULL;
	set_dma_ops(dev, NULL);
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	kvfree(mapping->fast->bitmap);
	kfree(mapping->fast);
	iommu_domain_free(mapping->domain);
	kfree(mapping);
}
EXPORT_SYMBOL(fast_smmu_detach_device);
+5 −7
Original line number Diff line number Diff line
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -41,19 +41,17 @@ struct dma_fast_smmu_mapping {
};

#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
int fast_smmu_attach_device(struct device *dev,
			    struct dma_iommu_mapping *mapping);
void fast_smmu_detach_device(struct device *dev,
int fast_smmu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping);
void fast_smmu_release_mapping(struct kref *kref);
#else
static inline int fast_smmu_attach_device(struct device *dev,
static inline int fast_smmu_init_mapping(struct device *dev,
					  struct dma_iommu_mapping *mapping)
{
	return -ENODEV;
}

static inline void fast_smmu_detach_device(struct device *dev,
					   struct dma_iommu_mapping *mapping)
static inline void fast_smmu_release_mapping(struct kref *kref)
{
}
#endif