Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4f085eca authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm64: dma-mapping: Allow drivers to use the upstream iova allocator"

parents 7c9dd4b8 1e279926
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -14,14 +14,16 @@
struct dma_iommu_mapping {
	/* iommu specific data */
	struct iommu_domain	*domain;
	bool			init;
	struct kref		kref;
	const struct dma_map_ops *ops;

	/* Protects bitmap */
	spinlock_t		lock;
	void			*bitmap;
	size_t			bits;
	dma_addr_t		base;

	spinlock_t		lock;
	struct kref		kref;

	struct dma_fast_smmu_mapping *fast;
};

+174 −60
Original line number Diff line number Diff line
@@ -1906,23 +1906,42 @@ const struct dma_map_ops iommu_ops = {
 * IO address ranges, which is required to perform memory allocation and
 * mapping with IOMMU aware functions.
 *
 * The client device need to be attached to the mapping with
 * arm_iommu_attach_device function.
 * Clients may use iommu_domain_set_attr() to set additional flags prior
 * to calling arm_iommu_attach_device() to complete initialization.
 */
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
{
	unsigned int bits = size >> PAGE_SHIFT;
	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
	struct dma_iommu_mapping *mapping;
	int err = -ENOMEM;

	if (!bitmap_size)
	if (!bits)
		return ERR_PTR(-EINVAL);

	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
	if (!mapping)
		goto err;
		return ERR_PTR(-ENOMEM);

	mapping->base = base;
	mapping->bits = bits;

	mapping->domain = iommu_domain_alloc(bus);
	if (!mapping->domain)
		goto err_domain_alloc;

	mapping->init = false;
	return mapping;

err_domain_alloc:
	kfree(mapping);
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(arm_iommu_create_mapping);

static int
bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
{
	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);

	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
							__GFP_NORETRY);
@@ -1930,67 +1949,124 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
		mapping->bitmap = vzalloc(bitmap_size);

	if (!mapping->bitmap)
		goto err2;
		return -ENOMEM;

	mapping->base = base;
	mapping->bits = bits;
	spin_lock_init(&mapping->lock);
	mapping->ops = &iommu_ops;
	return 0;
}

	mapping->domain = iommu_domain_alloc(bus);
	if (!mapping->domain)
		goto err3;
static void bitmap_iommu_release_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	kref_init(&mapping->kref);
	return mapping;
err3:
	kvfree(mapping->bitmap);
err2:
	kfree(mapping->bitmap);
	iommu_domain_free(mapping->domain);
	kfree(mapping);
err:
	return ERR_PTR(err);
}
EXPORT_SYMBOL(arm_iommu_create_mapping);

static void release_iommu_mapping(struct kref *kref)
static void bypass_iommu_release_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	iommu_domain_free(mapping->domain);
	kvfree(mapping->bitmap);
	kfree(mapping);
}

void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
static int upstream_iommu_init_mapping(struct device *dev,
					struct dma_iommu_mapping *mapping)
{
	if (mapping)
		kref_put(&mapping->kref, release_iommu_mapping);
	struct iommu_domain *domain = mapping->domain;
	struct iommu_group *group = dev->iommu_group;
	dma_addr_t base = mapping->base;
	u64 size = mapping->bits << PAGE_SHIFT;

	if (iommu_get_dma_cookie(domain))
		return -EINVAL;

	/* Need to attach to get geometry */
	if (iommu_attach_group(domain, group))
		goto out_put_cookie;

	if (iommu_dma_init_domain(domain, base, size, dev))
		goto out_detach_group;

	mapping->ops = &iommu_dma_ops;
	iommu_detach_group(domain, group);
	return 0;

out_detach_group:
	iommu_detach_group(domain, group);
out_put_cookie:
	iommu_put_dma_cookie(domain);
	return -EINVAL;
}
EXPORT_SYMBOL(arm_iommu_release_mapping);

/**
 * arm_iommu_attach_device
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	arm_iommu_create_mapping)
static void upstream_iommu_release_mapping(struct kref *kref)
{
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	iommu_put_dma_cookie(mapping->domain);
	iommu_domain_free(mapping->domain);
	kfree(mapping);
}

/*
 * arm_iommu_release_mapping
 * @mapping: allocted via arm_iommu_create_mapping()
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version. Only one device in an iommu_group may use this
 * function.
 * Frees all resources associated with the iommu mapping.
 * The device associated with this mapping must be in the 'detached' state
 */
int arm_iommu_attach_device(struct device *dev,
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{
	int s1_bypass = 0, is_fast = 0, is_upstream = 0;
	void (*release)(struct kref *kref);

	if (!mapping)
		return;

	if (!mapping->init) {
		iommu_domain_free(mapping->domain);
		kfree(mapping);
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
	iommu_domain_get_attr(mapping->domain,
				DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
				&is_upstream);

	if (s1_bypass)
		release = bypass_iommu_release_mapping;
	else if (is_fast)
		release = fast_smmu_release_mapping;
	else if (is_upstream)
		release = upstream_iommu_release_mapping;
	else
		release = bitmap_iommu_release_mapping;

	kref_put(&mapping->kref, release);
}
EXPORT_SYMBOL(arm_iommu_release_mapping);

static int arm_iommu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int err;
	int s1_bypass = 0, is_fast = 0;
	int err = -EINVAL;
	int s1_bypass = 0, is_fast = 0, is_upstream = 0;
	struct iommu_group *group;
	dma_addr_t iova_end;

	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return -ENODEV;
		return -EINVAL;
	}

	if (iommu_get_domain_for_dev(dev)) {
@@ -1998,6 +2074,11 @@ int arm_iommu_attach_device(struct device *dev,
		return -EINVAL;
	}

	if (mapping->init) {
		kref_get(&mapping->kref);
		return 0;
	}

	iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1;
	if (iova_end > dma_get_mask(dev)) {
		dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n",
@@ -2005,21 +2086,59 @@ int arm_iommu_attach_device(struct device *dev,
		return -EINVAL;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
	if (is_fast)
		return fast_smmu_attach_device(dev, mapping);
	iommu_domain_get_attr(mapping->domain,
				DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
				&is_upstream);

	if (s1_bypass) {
		mapping->ops = &swiotlb_dma_ops;
		err = 0;
	} else if (is_fast) {
		err = fast_smmu_init_mapping(dev, mapping);
	} else if (is_upstream) {
		err = upstream_iommu_init_mapping(dev, mapping);
	} else {
		err = bitmap_iommu_init_mapping(dev, mapping);
	}
	if (!err) {
		kref_init(&mapping->kref);
		mapping->init = true;
	}
	return err;
}

/**
 * arm_iommu_attach_device
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version.
 *
 * Clients are expected to call arm_iommu_attach_device() prior to sharing
 * the dma_iommu_mapping structure with another device. This ensures
 * initialization is complete.
 */
int arm_iommu_attach_device(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int err;

	err = iommu_attach_group(mapping->domain, group);
	err = arm_iommu_init_mapping(dev, mapping);
	if (err)
		return err;

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);
	err = iommu_attach_group(mapping->domain, dev->iommu_group);
	if (err)
		return err;

	kref_get(&mapping->kref);
	dev->archdata.mapping = mapping;
	if (!s1_bypass)
		set_dma_ops(dev, &iommu_ops);
	set_dma_ops(dev, mapping->ops);

	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
	return 0;
@@ -2036,8 +2155,7 @@ EXPORT_SYMBOL(arm_iommu_attach_device);
void arm_iommu_detach_device(struct device *dev)
{
	struct dma_iommu_mapping *mapping;
	int is_fast, s1_bypass = 0;
	struct iommu_group *group;
	int s1_bypass = 0;

	mapping = to_dma_iommu_mapping(dev);
	if (!mapping) {
@@ -2045,26 +2163,22 @@ void arm_iommu_detach_device(struct device *dev)
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
	if (is_fast) {
		fast_smmu_detach_device(dev, mapping);
	if (!dev->iommu_group) {
		dev_err(dev, "No iommu associated with device\n");
		return;
	}

	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);

	/*
	 * ION defers dma_unmap calls. Ensure they have all completed prior to
	 * setting dma_ops to NULL.
	 */
	if (msm_dma_unmap_all_for_dev(dev))
		dev_warn(dev, "IOMMU detach with outstanding mappings\n");

	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return;
	}

	iommu_detach_group(mapping->domain, group);
	kref_put(&mapping->kref, release_iommu_mapping);
	iommu_detach_group(mapping->domain, dev->iommu_group);
	dev->archdata.mapping = NULL;
	if (!s1_bypass)
		set_dma_ops(dev, NULL);
+11 −0
Original line number Diff line number Diff line
@@ -2766,6 +2766,11 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
					& (1 << DOMAIN_ATTR_FAST));
		ret = 0;
		break;
	case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
		*((int *)data) = !!(smmu_domain->attributes
			& (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
		ret = 0;
		break;
	case DOMAIN_ATTR_USE_UPSTREAM_HINT:
		*((int *)data) = !!(smmu_domain->attributes &
				   (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
@@ -2918,6 +2923,12 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
		}
		smmu_domain->secure_vmid = *((int *)data);
		break;
	case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
		if (*((int *)data))
			smmu_domain->attributes |=
				1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
		ret = 0;
		break;
	case DOMAIN_ATTR_FAST:
		if (*((int *)data))
			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
+44 −32
Original line number Diff line number Diff line
@@ -855,29 +855,28 @@ static void fast_smmu_reserve_pci_windows(struct device *dev,
	spin_unlock_irqrestore(&mapping->lock, flags);
}


/**
 * fast_smmu_attach_device
 * fast_smmu_init_mapping
 * @dev: valid struct device pointer
 * @mapping: io address space mapping structure (returned from
 *	fast_smmu_create_mapping)
 *	arm_iommu_create_mapping)
 *
 * Attaches specified io address space mapping to the provided device,
 * this replaces the dma operations (dma_map_ops pointer) with the
 * IOMMU aware version. More than one client might be attached to
 * the same io address space mapping.
 * Called the first time a device is attached to this mapping.
 * Not for dma client use.
 */
int fast_smmu_attach_device(struct device *dev,
int fast_smmu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping)
{
	int atomic_domain = 1;
	int err, atomic_domain = 1;
	struct iommu_domain *domain = mapping->domain;
	struct iommu_group *group;
	struct iommu_pgtbl_info info;
	u64 size = (u64)mapping->bits << PAGE_SHIFT;

	if (mapping->base + size > (SZ_1G * 4ULL))
	if (mapping->base + size > (SZ_1G * 4ULL)) {
		dev_err(dev, "Iova end address too large\n");
		return -EINVAL;
	}

	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
				  &atomic_domain))
@@ -894,54 +893,67 @@ int fast_smmu_attach_device(struct device *dev,
	group = dev->iommu_group;
	if (!group) {
		dev_err(dev, "No iommu associated with device\n");
		return -ENODEV;
		err = -ENODEV;
		goto release_mapping;
	}

	if (iommu_get_domain_for_dev(dev)) {
		dev_err(dev, "Device already attached to other iommu_domain\n");
		return -EINVAL;
		err = -EINVAL;
		goto release_mapping;
	}

	if (iommu_attach_group(mapping->domain, group))
		return -EINVAL;
	/*
	 * Need to attach prior to calling DOMAIN_ATTR_PGTBL_INFO and then
	 * detach to be in the expected state. Its a bit messy.
	 */
	if (iommu_attach_group(mapping->domain, group)) {
		err = -EINVAL;
		goto release_mapping;
	}

	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
				  &info)) {
		dev_err(dev, "Couldn't get page table info\n");
		fast_smmu_detach_device(dev, mapping);
		return -EINVAL;
		err = -EINVAL;
		goto detach_group;
	}
	mapping->fast->pgtbl_pmds = info.pmds;

	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
				  &mapping->fast->is_smmu_pt_coherent))
		return -EINVAL;
				  &mapping->fast->is_smmu_pt_coherent)) {
		err = -EINVAL;
		goto detach_group;
	}

	mapping->fast->notifier.notifier_call = fast_smmu_notify;
	av8l_register_notify(&mapping->fast->notifier);

	dev->archdata.mapping = mapping;
	set_dma_ops(dev, &fast_smmu_dma_ops);

	iommu_detach_group(mapping->domain, group);
	mapping->ops = &fast_smmu_dma_ops;
	return 0;

detach_group:
	iommu_detach_group(mapping->domain, group);
release_mapping:
	kfree(mapping->fast->bitmap);
	kfree(mapping->fast);
	return err;
}
EXPORT_SYMBOL(fast_smmu_attach_device);

/**
 * fast_smmu_detach_device
 * @dev: valid struct device pointer
 * fast_smmu_release_mapping
 * @kref: dma_iommu_mapping->kref
 *
 * Detaches the provided device from a previously attached map.
 * This voids the dma operations (dma_map_ops pointer)
 * Cleans up the given iommu mapping.
 */
void fast_smmu_detach_device(struct device *dev,
			     struct dma_iommu_mapping *mapping)
void fast_smmu_release_mapping(struct kref *kref)
{
	iommu_detach_group(mapping->domain, dev->iommu_group);
	dev->archdata.mapping = NULL;
	set_dma_ops(dev, NULL);
	struct dma_iommu_mapping *mapping =
		container_of(kref, struct dma_iommu_mapping, kref);

	kvfree(mapping->fast->bitmap);
	kfree(mapping->fast);
	iommu_domain_free(mapping->domain);
	kfree(mapping);
}
EXPORT_SYMBOL(fast_smmu_detach_device);
+5 −7
Original line number Diff line number Diff line
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -41,19 +41,17 @@ struct dma_fast_smmu_mapping {
};

#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
int fast_smmu_attach_device(struct device *dev,
			    struct dma_iommu_mapping *mapping);
void fast_smmu_detach_device(struct device *dev,
int fast_smmu_init_mapping(struct device *dev,
			    struct dma_iommu_mapping *mapping);
void fast_smmu_release_mapping(struct kref *kref);
#else
static inline int fast_smmu_attach_device(struct device *dev,
static inline int fast_smmu_init_mapping(struct device *dev,
					  struct dma_iommu_mapping *mapping)
{
	return -ENODEV;
}

static inline void fast_smmu_detach_device(struct device *dev,
					   struct dma_iommu_mapping *mapping)
static inline void fast_smmu_release_mapping(struct kref *kref)
{
}
#endif
Loading