Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1db0ee0e authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: smem: map only partitions used by local HOST"

parents 09d2ba2e 12def20f
Loading
Loading
Loading
Loading
+173 −61
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2015, Sony Mobile Communications AB.
 * Copyright (c) 2012-2013, 2018-2019 The Linux Foundation. All rights reserved.
 * Copyright (c) 2012-2013, 2018-2020 The Linux Foundation. All rights reserved.
 */

#include <linux/hwspinlock.h>
@@ -192,6 +192,19 @@ struct smem_partition_header {
	__le32 offset_free_cached;
	__le32 reserved[3];
};
/**
 * struct smem_partition_desc - descriptor for partition
 * @virt_base:	starting virtual address of partition
 * @phys_base:	starting physical address of partition
 * @cacheline:	alignment for "cached" entries
 * @size:	size of partition
 */
struct smem_partition_desc {
	void __iomem *virt_base;
	u32 phys_base;
	u32 cacheline;
	u32 size;
};

static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };

@@ -248,9 +261,9 @@ struct smem_region {
 * struct qcom_smem - device data for the smem device
 * @dev:	device pointer
 * @hwlock:	reference to a hwspinlock
 * @global_partition_entry: pointer to global partition entry when in use
 * @ptable_entries: list of pointers to partitions table entry of current
 *		processor/host
 * @ptable_base: virtual base of partition table
 * @global_partition_desc: descriptor for global partition when in use
 * @partition_desc: list of partition descriptor of current processor/host
 * @item_count: max accepted item number
 * @num_regions: number of @regions
 * @regions:	list of the memory regions defining the shared memory
@@ -260,9 +273,10 @@ struct qcom_smem {

	struct hwspinlock *hwlock;

	struct smem_ptable_entry *global_partition_entry;
	struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
	u32 item_count;
	struct smem_ptable *ptable_base;
	struct smem_partition_desc global_partition_desc;
	struct smem_partition_desc partition_desc[SMEM_HOST_COUNT];

	unsigned num_regions;
	struct smem_region regions[0];
@@ -274,12 +288,6 @@ static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT	1000

static struct smem_partition_header *
ptable_entry_to_phdr(struct smem_ptable_entry *entry)
{
	return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
}

static struct smem_private_entry *
phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
{
@@ -346,7 +354,7 @@ static void *cached_entry_to_item(struct smem_private_entry *e)
}

static int qcom_smem_alloc_private(struct qcom_smem *smem,
				   struct smem_ptable_entry *entry,
				   struct smem_partition_desc *p_desc,
				   unsigned item,
				   size_t size)
{
@@ -356,8 +364,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
	void *cached;
	void *p_end;

	phdr = ptable_entry_to_phdr(entry);
	p_end = (void *)phdr + le32_to_cpu(entry->size);
	phdr = p_desc->virt_base;
	p_end = (void *)phdr + p_desc->size;

	hdr = phdr_to_first_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);
@@ -450,7 +458,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
 */
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
	struct smem_ptable_entry *entry;
	struct smem_partition_desc *p_desc;
	unsigned long flags;
	int ret;

@@ -472,12 +480,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
	if (ret)
		return ret;

	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
		entry = __smem->ptable_entries[host];
		ret = qcom_smem_alloc_private(__smem, entry, item, size);
	} else if (__smem->global_partition_entry) {
		entry = __smem->global_partition_entry;
		ret = qcom_smem_alloc_private(__smem, entry, item, size);
	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
		p_desc = &__smem->partition_desc[host];
		ret = qcom_smem_alloc_private(__smem, p_desc, item, size);
	} else if (__smem->global_partition_desc.virt_base) {
		p_desc = &__smem->global_partition_desc;
		ret = qcom_smem_alloc_private(__smem, p_desc, item, size);
	} else {
		ret = qcom_smem_alloc_global(__smem, item, size);
	}
@@ -528,22 +536,20 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}

static void *qcom_smem_get_private(struct qcom_smem *smem,
				   struct smem_ptable_entry *entry,
				   struct smem_partition_desc *p_desc,
				   unsigned item,
				   size_t *size)
{
	struct smem_private_entry *e, *end;
	struct smem_partition_header *phdr;
	void *item_ptr, *p_end;
	u32 partition_size;
	size_t cacheline;
	u32 padding_data;
	u32 e_size;

	phdr = ptable_entry_to_phdr(entry);
	partition_size = le32_to_cpu(entry->size);
	p_end = (void *)phdr + partition_size;
	cacheline = le32_to_cpu(entry->cacheline);
	phdr = p_desc->virt_base;
	p_end = (void *)phdr + p_desc->size;
	cacheline = p_desc->cacheline;

	e = phdr_to_first_uncached_entry(phdr);
	end = phdr_to_last_uncached_entry(phdr);
@@ -560,7 +566,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
				e_size = le32_to_cpu(e->size);
				padding_data = le16_to_cpu(e->padding_data);

				if (e_size < partition_size
				if (e_size < p_desc->size
				    && padding_data < e_size)
					*size = e_size - padding_data;
				else
@@ -596,7 +602,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
				e_size = le32_to_cpu(e->size);
				padding_data = le16_to_cpu(e->padding_data);

				if (e_size < partition_size
				if (e_size < p_desc->size
				    && padding_data < e_size)
					*size = e_size - padding_data;
				else
@@ -635,7 +641,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
 */
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
	struct smem_ptable_entry *entry;
	struct smem_partition_desc *p_desc;
	unsigned long flags;
	int ret;
	void *ptr = ERR_PTR(-EPROBE_DEFER);
@@ -652,12 +658,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
	if (ret)
		return ERR_PTR(ret);

	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
		entry = __smem->ptable_entries[host];
		ptr = qcom_smem_get_private(__smem, entry, item, size);
	} else if (__smem->global_partition_entry) {
		entry = __smem->global_partition_entry;
		ptr = qcom_smem_get_private(__smem, entry, item, size);
	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
		p_desc = &__smem->partition_desc[host];
		ptr = qcom_smem_get_private(__smem, p_desc, item, size);
	} else if (__smem->global_partition_desc.virt_base) {
		p_desc = &__smem->global_partition_desc;
		ptr = qcom_smem_get_private(__smem, p_desc, item, size);
	} else {
		ptr = qcom_smem_get_global(__smem, item, size);
	}
@@ -679,30 +685,30 @@ EXPORT_SYMBOL(qcom_smem_get);
int qcom_smem_get_free_space(unsigned host)
{
	struct smem_partition_header *phdr;
	struct smem_ptable_entry *entry;
	struct smem_partition_desc *p_desc;
	struct smem_header *header;
	unsigned ret;

	if (!__smem)
		return -EPROBE_DEFER;

	if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
		entry = __smem->ptable_entries[host];
		phdr = ptable_entry_to_phdr(entry);
	if (host < SMEM_HOST_COUNT && __smem->partition_desc[host].virt_base) {
		p_desc = &__smem->partition_desc[host];
		phdr = p_desc->virt_base;

		ret = le32_to_cpu(phdr->offset_free_cached) -
		      le32_to_cpu(phdr->offset_free_uncached);

		if (ret > le32_to_cpu(entry->size))
		if (ret > p_desc->size)
			return -EINVAL;
	} else if (__smem->global_partition_entry) {
		entry = __smem->global_partition_entry;
		phdr = ptable_entry_to_phdr(entry);
	} else if (__smem->global_partition_desc.virt_base) {
		p_desc = &__smem->global_partition_desc;
		phdr = p_desc->virt_base;

		ret = le32_to_cpu(phdr->offset_free_cached) -
		      le32_to_cpu(phdr->offset_free_uncached);

		if (ret > le32_to_cpu(entry->size))
		if (ret > p_desc->size)
			return -EINVAL;
	} else {
		header = __smem->regions[0].virt_base;
@@ -716,6 +722,15 @@ int qcom_smem_get_free_space(unsigned host)
}
EXPORT_SYMBOL(qcom_smem_get_free_space);

static int addr_in_range(void *virt_base, unsigned int size, void *addr)
{
	if (virt_base && addr >= virt_base &&
			addr < virt_base + size)
		return 1;

	return 0;
}

/**
 * qcom_smem_virt_to_phys() - return the physical address associated
 * with an smem item pointer (previously returned by qcom_smem_get()
@@ -725,17 +740,36 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
 */
phys_addr_t qcom_smem_virt_to_phys(void *p)
{
	unsigned i;
	struct smem_partition_desc *p_desc;
	struct smem_region *area;
	u64 offset;
	u32 i;

	for (i = 0; i < SMEM_HOST_COUNT; i++) {
		p_desc = &__smem->partition_desc[i];

		if (addr_in_range(p_desc->virt_base, p_desc->size, p)) {
			offset = p - p_desc->virt_base;

			return (phys_addr_t)p_desc->phys_base + offset;
		}
	}

	p_desc = &__smem->global_partition_desc;

	if (addr_in_range(p_desc->virt_base, p_desc->size, p)) {
		offset = p - p_desc->virt_base;

		return (phys_addr_t)p_desc->phys_base + offset;
	}

	for (i = 0; i < __smem->num_regions; i++) {
		struct smem_region *region = &__smem->regions[i];
		area = &__smem->regions[i];

		if (p < region->virt_base)
			continue;
		if (p < region->virt_base + region->size) {
			u64 offset = p - region->virt_base;
		if (addr_in_range(area->virt_base, area->size, p)) {
			offset = p - area->virt_base;

			return (phys_addr_t)region->aux_base + offset;
			return (phys_addr_t)area->aux_base + offset;
		}
	}

@@ -759,7 +793,7 @@ static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
	struct smem_ptable *ptable;
	u32 version;

	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
	ptable = smem->ptable_base;
	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
		return ERR_PTR(-ENOENT);

@@ -793,11 +827,12 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
	struct smem_partition_header *header;
	struct smem_ptable_entry *entry;
	struct smem_ptable *ptable;
	u32 phys_addr;
	u32 host0, host1, size;
	bool found = false;
	int i;

	if (smem->global_partition_entry) {
	if (smem->global_partition_desc.virt_base) {
		dev_err(smem->dev, "Already found the global partition\n");
		return -EINVAL;
	}
@@ -827,7 +862,12 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
		return -EINVAL;
	}

	header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
	header = devm_ioremap_wc(smem->dev,
				  phys_addr, le32_to_cpu(entry->size));
	if (!header)
		return -ENOMEM;

	host0 = le16_to_cpu(header->host0);
	host1 = le16_to_cpu(header->host1);

@@ -853,7 +893,10 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
		return -EINVAL;
	}

	smem->global_partition_entry = entry;
	smem->global_partition_desc.virt_base = (void __iomem *)header;
	smem->global_partition_desc.phys_base = phys_addr;
	smem->global_partition_desc.size = le32_to_cpu(entry->size);
	smem->global_partition_desc.cacheline = le32_to_cpu(entry->cacheline);

	return 0;
}
@@ -864,6 +907,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
	struct smem_partition_header *header;
	struct smem_ptable_entry *entry;
	struct smem_ptable *ptable;
	u32 phys_addr;
	unsigned int remote_host;
	u32 host0, host1;
	int i;
@@ -898,14 +942,20 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
			return -EINVAL;
		}

		if (smem->ptable_entries[remote_host]) {
		if (smem->partition_desc[remote_host].virt_base) {
			dev_err(smem->dev,
				"Already found a partition for host %d\n",
				remote_host);
			return -EINVAL;
		}

		header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
		phys_addr = smem->regions[0].aux_base +
				le32_to_cpu(entry->offset);
		header = devm_ioremap_wc(smem->dev,
					  phys_addr, le32_to_cpu(entry->size));
		if (!header)
			return -ENOMEM;

		host0 = le16_to_cpu(header->host0);
		host1 = le16_to_cpu(header->host1);

@@ -940,7 +990,13 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
			return -EINVAL;
		}

		smem->ptable_entries[remote_host] = entry;
		smem->partition_desc[remote_host].virt_base =
						(void __iomem *)header;
		smem->partition_desc[remote_host].phys_base = phys_addr;
		smem->partition_desc[remote_host].size =
						le32_to_cpu(entry->size);
		smem->partition_desc[remote_host].cacheline =
						le32_to_cpu(entry->cacheline);
	}

	return 0;
@@ -973,6 +1029,61 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
	return 0;
}

static int qcom_smem_map_toc(struct qcom_smem *smem, struct device *dev,
				const char *name, int i)
{
	struct device_node *np;
	struct resource r;
	int ret;

	np = of_parse_phandle(dev->of_node, name, 0);
	if (!np) {
		dev_err(dev, "No %s specified\n", name);
		return -EINVAL;
	}

	ret = of_address_to_resource(np, 0, &r);
	of_node_put(np);
	if (ret)
		return ret;

	smem->regions[i].aux_base = (u32)r.start;
	smem->regions[i].size = resource_size(&r);
	/* map starting 4K for smem header */
	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, SZ_4K);
	/* map last 4k for toc */
	smem->ptable_base = devm_ioremap_wc(dev,
				r.start + resource_size(&r) - SZ_4K, SZ_4K);

	if (!smem->regions[i].virt_base || !smem->ptable_base)
		return -ENOMEM;

	return 0;
}

static int qcom_smem_mamp_legacy(struct qcom_smem *smem)
{
	struct smem_header *header;
	u32 phys_addr;
	u32 p_size;

	phys_addr = smem->regions[0].aux_base;
	header = smem->regions[0].virt_base;
	p_size = header->available;

	/* unmap previously mapped starting 4k for smem header */
	devm_iounmap(smem->dev, smem->regions[0].virt_base);

	smem->regions[0].size = p_size;
	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev,
						      phys_addr, p_size);

	if (!smem->regions[0].virt_base)
		return -ENOMEM;

	return 0;
}

static int qcom_smem_probe(struct platform_device *pdev)
{
	struct smem_header *header;
@@ -995,7 +1106,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
	smem->dev = &pdev->dev;
	smem->num_regions = num_regions;

	ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
	ret = qcom_smem_map_toc(smem, &pdev->dev, "memory-region", 0);
	if (ret)
		return ret;

@@ -1019,6 +1130,7 @@ static int qcom_smem_probe(struct platform_device *pdev)
		smem->item_count = qcom_smem_get_item_count(smem);
		break;
	case SMEM_GLOBAL_HEAP_VERSION:
		qcom_smem_mamp_legacy(smem);
		smem->item_count = SMEM_ITEM_COUNT;
		break;
	default: