Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c7716d1 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes...

Merge changes Ieb9231c5,I7c7e15cf,Id9ccab35,Ie1658731,I9668e674,Ifb294696,I983043f3,Idae945d1,Ib31e5aca,Ifacc1ff9,I31e8e0b5,I530cfdce,I85078f35,I38ebb9bb,I35e38b93,I99868cf4,Ia76f9743,If7598c26,Ie2f9e620 into msm-next

* changes:
  dma-mapping: Add dma_remap functions
  mm: Remove __init annotations from free_bootmem_late
  arm: Add option to skip buffer zeroing
  of: reserved_mem: increase max number reserved regions
  mm: showmem: make the notifiers atomic
  mm: Add notifier framework for showing memory
  arm64: Add dma mapping APIs for other memory types
  soc: qcom: secure_buffer: Fix memory leak
  soc: qcom: secure_buffer: Fix an uninitialized variable
  msm: secure_buffer: Add VMID_CP_CAMERA_PREVIEW
  msm: secure_buffer: fix scm call argument layout
  msm: secure_buffer: Limit continuous time spent in hypervisor
  secure_buffer: Remove all-in-one buffer usage
  msm: secure_buffer: Fix possible NULL pointer dereference
  qcom: secure_buffer: Use enum type to hold VMID definitions
  msm: secure_buffer: Add WLAN support
  drivers: qcom: secure_buffer: Optimize by re-using the same buffer
  msm: secure_buffer: Add missing include
  msm: secure_buffer: fix stub functions
parents a75db7c0 36e68e77
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -444,7 +444,7 @@ static int __init atomic_pool_init(void)
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
@@ -692,14 +692,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,

		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
	} else if (is_vmalloc_addr(cpu_addr)){
		struct vm_struct *area = find_vm_area(cpu_addr);

		if (WARN_ON(!area || !area->pages))
			return;
		iommu_dma_free(dev, area->pages, iosize, &handle);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
	} else {
		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
		__free_pages(virt_to_page(cpu_addr), get_order(size));
+4 −2
Original line number Diff line number Diff line
@@ -329,12 +329,14 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
/*
 * unmaps a range previously mapped by dma_common_*_remap
 */
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
			   bool no_warn)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || (area->flags & vm_flags) != vm_flags) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		WARN(!no_warn, "trying to free invalid coherent area: %p\n",
			cpu_addr);
		return;
	}

+2 −2
Original line number Diff line number Diff line
/*
 * Device tree based initialization code for reserved memory.
 *
 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
 * Copyright (c) 2013, 2015, 2017 The Linux Foundation. All Rights Reserved.
 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
@@ -25,7 +25,7 @@
#include <linux/sort.h>
#include <linux/slab.h>

#define MAX_RESERVED_REGIONS	16
#define MAX_RESERVED_REGIONS	32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;

+151 −107
Original line number Diff line number Diff line
/*
 * Copyright (C) 2011 Google, Inc
 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
 * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>

@@ -42,11 +43,6 @@ struct mem_prot_info {
	u64 size;
};

struct info_list {
	struct mem_prot_info *list_head;
	u64 list_size;
};

#define MEM_PROT_ASSIGN_ID		0x16
#define MEM_PROTECT_LOCK_ID2		0x0A
#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
@@ -56,14 +52,12 @@ struct info_list {
struct dest_vm_and_perm_info {
	u32 vm;
	u32 perm;
	u32 *ctx;
	u64 ctx;
	u32 ctx_size;
};

struct dest_info_list {
	struct dest_vm_and_perm_info *dest_info;
	u64 list_size;
};
static void *qcom_secure_mem;
#define QCOM_SECURE_MEM_SIZE (512*1024)

static int secure_buffer_change_chunk(u32 chunks,
				u32 nchunks,
@@ -196,141 +190,174 @@ int msm_unsecure_table(struct sg_table *table)

}

static struct dest_info_list *populate_dest_info(int *dest_vmids, int nelements,
								int *dest_perms)
static struct dest_vm_and_perm_info *
populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
		   size_t *size_in_bytes)
{
	struct dest_vm_and_perm_info *dest_info;
	struct dest_info_list *list;
	int i;
	size_t size;

	dest_info = kmalloc_array(nelements,
			(sizeof(struct dest_vm_and_perm_info)),
				GFP_KERNEL | __GFP_ZERO);
	/* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */
	size = nelements * sizeof(*dest_info);
	if (size > PAGE_SIZE)
		return NULL;

	dest_info = kzalloc(size, GFP_KERNEL);
	if (!dest_info)
		return NULL;

	for (i = 0; i < nelements; i++) {
		dest_info[i].vm = dest_vmids[i];
		dest_info[i].perm = dest_perms[i];
		dest_info[i].ctx = NULL;
		dest_info[i].ctx = 0x0;
		dest_info[i].ctx_size = 0;
	}
	list = kzalloc(sizeof(struct dest_info_list), GFP_KERNEL);
	if (!list) {
		kfree(dest_info);
		return NULL;
	}

	list->dest_info = dest_info;
	list->list_size = nelements * sizeof(struct dest_vm_and_perm_info);

	return list;
	*size_in_bytes = size;
	return dest_info;
}

static struct info_list *get_info_list_from_table(struct sg_table *table)
/* Must hold secure_buffer_mutex while allocated buffer is in use */
static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
						      size_t *size_in_bytes)
{
	int i;
	struct scatterlist *sg;
	struct mem_prot_info *info;
	struct info_list *list;
	size_t size;

	info = kmalloc_array(table->nents, (sizeof(struct mem_prot_info)),
					GFP_KERNEL | __GFP_ZERO);
	if (!info)
		return NULL;
	size = table->nents * sizeof(*info);

	for_each_sg(table->sgl, sg, table->nents, i) {
		info[i].addr = page_to_phys(sg_page(sg));
		info[i].size = sg->length;
	if (size >= QCOM_SECURE_MEM_SIZE) {
		pr_err("%s: Not enough memory allocated. Required size %zd\n",
				__func__, size);
		return NULL;
	}

	list = kzalloc(sizeof(struct info_list), GFP_KERNEL);
	if (!list) {
		kfree(info);
	if (!qcom_secure_mem) {
		pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
				__func__);
		return NULL;
	}

	list->list_head = info;
	list->list_size = table->nents * sizeof(struct mem_prot_info);
	return list;
}
	/* "Allocate" it */
	info = qcom_secure_mem;

static void destroy_info_list(struct info_list *info_list)
{
	kfree(info_list->list_head);
	kfree(info_list);
	for_each_sg(table->sgl, sg, table->nents, i) {
		info[i].addr = page_to_phys(sg_page(sg));
		info[i].size = sg->length;
	}

static void destroy_dest_info_list(struct dest_info_list *dest_list)
{
	kfree(dest_list->dest_info);
	kfree(dest_list);
	*size_in_bytes = size;
	return info;
}

#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32

int hyp_assign_table(struct sg_table *table,
			u32 *source_vm_list, int source_nelems,
			int *dest_vmids, int *dest_perms,
			int dest_nelems)
{
	int ret;
	struct info_list *info_list = NULL;
	struct dest_info_list *dest_info_list = NULL;
	int ret = 0;
	struct scm_desc desc = {0};
	u32 *source_vm_copy;
	size_t source_vm_copy_size;
	struct dest_vm_and_perm_info *dest_vm_copy;
	size_t dest_vm_copy_size;
	struct mem_prot_info *sg_table_copy;
	size_t sg_table_copy_size;

	info_list = get_info_list_from_table(table);
	if (!info_list)
		return -ENOMEM;

	dest_info_list = populate_dest_info(dest_vmids, dest_nelems,
							dest_perms);
	if (!dest_info_list) {
		ret = -ENOMEM;
		goto err1;
	}
	int batch_start, batch_end;
	u64 batch_size;

	/*
	 * We can only pass cache-aligned sizes to hypervisor, so we need
	 * to kmalloc and memcpy the source_vm_list here.
	 */
	source_vm_copy = kmalloc_array(
		source_nelems, sizeof(*source_vm_copy), GFP_KERNEL);
	if (!source_vm_copy) {
	source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems;
	source_vm_copy = kzalloc(source_vm_copy_size, GFP_KERNEL);
	if (!source_vm_copy)
		return -ENOMEM;

	memcpy(source_vm_copy, source_vm_list, source_vm_copy_size);


	dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms,
					  &dest_vm_copy_size);
	if (!dest_vm_copy) {
		ret = -ENOMEM;
		goto err2;
		goto out_free;
	}
	memcpy(source_vm_copy, source_vm_list,
	       sizeof(*source_vm_list) * source_nelems);

	desc.args[0] = virt_to_phys(info_list->list_head);
	desc.args[1] = info_list->list_size;
	mutex_lock(&secure_buffer_mutex);

	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
	if (!sg_table_copy) {
		ret = -ENOMEM;
		goto out_unlock;
	}

	desc.args[0] = virt_to_phys(sg_table_copy);
	desc.args[1] = sg_table_copy_size;
	desc.args[2] = virt_to_phys(source_vm_copy);
	desc.args[3] = sizeof(*source_vm_copy) * source_nelems;
	desc.args[4] = virt_to_phys(dest_info_list->dest_info);
	desc.args[5] = dest_info_list->list_size;
	desc.args[3] = source_vm_copy_size;
	desc.args[4] = virt_to_phys(dest_vm_copy);
	desc.args[5] = dest_vm_copy_size;
	desc.args[6] = 0;

	desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
				SCM_VAL, SCM_VAL);

	dmac_flush_range(source_vm_copy, source_vm_copy + source_nelems);
	dmac_flush_range(info_list->list_head, info_list->list_head +
		(info_list->list_size / sizeof(*info_list->list_head)));
	dmac_flush_range(dest_info_list->dest_info, dest_info_list->dest_info +
		(dest_info_list->list_size /
				sizeof(*dest_info_list->dest_info)));
	dmac_flush_range(source_vm_copy,
			 (void *)source_vm_copy + source_vm_copy_size);
	dmac_flush_range(sg_table_copy,
			 (void *)sg_table_copy + sg_table_copy_size);
	dmac_flush_range(dest_vm_copy,
			 (void *)dest_vm_copy + dest_vm_copy_size);

	batch_start = 0;
	while (batch_start < table->nents) {
		/* Ensure no size zero batches */
		batch_size = sg_table_copy[batch_start].size;
		batch_end = batch_start + 1;
		while (1) {
			u64 size;

			if (batch_end >= table->nents)
				break;
			if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
				break;

			size = sg_table_copy[batch_end].size;
			if (size + batch_size >= BATCH_MAX_SIZE)
				break;

			batch_size += size;
			batch_end++;
		}

		desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
		desc.args[1] = (batch_end - batch_start) *
				sizeof(sg_table_copy[0]);

		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
				MEM_PROT_ASSIGN_ID), &desc);
	if (ret)
		if (ret) {
			pr_info("%s: Failed to assign memory protection, ret = %d\n",
				__func__, ret);
			break;
		}
		batch_start = batch_end;
	}

out_unlock:
	mutex_unlock(&secure_buffer_mutex);
	kfree(dest_vm_copy);
out_free:
	kfree(source_vm_copy);
err2:
	destroy_dest_info_list(dest_info_list);
err1:
	destroy_info_list(info_list);
	return ret;
}

@@ -338,28 +365,19 @@ int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
			int source_nelems, int *dest_vmids,
			int *dest_perms, int dest_nelems)
{
	struct sg_table *table;
	struct sg_table table;
	int ret;

	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!table)
		return -ENOMEM;
	ret = sg_alloc_table(table, 1, GFP_KERNEL);
	ret = sg_alloc_table(&table, 1, GFP_KERNEL);
	if (ret)
		goto err1;
		return ret;

	sg_set_page(table->sgl, phys_to_page(addr), size, 0);
	sg_set_page(table.sgl, phys_to_page(addr), size, 0);

	ret = hyp_assign_table(table, source_vm_list, source_nelems, dest_vmids,
						dest_perms, dest_nelems);
	if (ret)
		goto err2;
	ret = hyp_assign_table(&table, source_vm_list, source_nelems,
			       dest_vmids, dest_perms, dest_nelems);

	return ret;
err2:
	sg_free_table(table);
err1:
	kfree(table);
	sg_free_table(&table);
	return ret;
}

@@ -388,6 +406,12 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
		return "VMID_CP_SEC_DISPLAY";
	case VMID_CP_APP:
		return "VMID_CP_APP";
	case VMID_WLAN:
		return "VMID_WLAN";
	case VMID_WLAN_CE:
		return "VMID_WLAN_CE";
	case VMID_CP_CAMERA_PREVIEW:
		return "VMID_CP_CAMERA_PREVIEW";
	case VMID_INVAL:
		return "VMID_INVAL";
	default:
@@ -408,3 +432,23 @@ bool msm_secure_v2_is_supported(void)
	 */
	return version >= MAKE_CP_VERSION(1, 1, 0);
}

static int __init alloc_secure_shared_memory(void)
{
	int ret = 0;
	dma_addr_t dma_handle;

	qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
	if (!qcom_secure_mem) {
		/* Fallback to CMA-DMA memory */
		qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
						&dma_handle, GFP_KERNEL);
		if (!qcom_secure_mem) {
			pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
			return -ENOMEM;
		}
	}

	return ret;
}
pure_initcall(alloc_secure_shared_memory);
+63 −1
Original line number Diff line number Diff line
@@ -69,6 +69,11 @@
 */
#define DMA_ATTR_PRIVILEGED		(1UL << 9)

/*
 * DMA_ATTR_SKIP_ZEROING: Do not zero mapping.
 */
#define DMA_ATTR_SKIP_ZEROING		(1UL << 10)

/*
 * A dma_addr_t can hold any valid DMA or bus address for the platform.
 * It can be given to a device to use as a DMA source or target.  A CPU cannot
@@ -128,6 +133,10 @@ struct dma_map_ops {
	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
	int (*dma_supported)(struct device *dev, u64 mask);
	int (*set_dma_mask)(struct device *dev, u64 mask);
	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
			size_t size, unsigned long attrs);
	void (*unremap)(struct device *dev, void *remapped_address,
			size_t size);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
	u64 (*get_required_mask)(struct device *dev);
#endif
@@ -422,7 +431,8 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
void *dma_common_pages_remap(struct page **pages, size_t size,
			unsigned long vm_flags, pgprot_t prot,
			const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
			   bool nowarn);

/**
 * dma_mmap_attrs - map a coherent DMA allocation into user space
@@ -581,6 +591,35 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
	return 0;
}
#endif
static inline void *dma_remap(struct device *dev, void *cpu_addr,
		dma_addr_t dma_handle, size_t size, unsigned long attrs)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);

	if (!ops->remap) {
		WARN_ONCE(1, "Remap function not implemented for %pS\n",
				ops->remap);
		return NULL;
	}

	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
}


static inline void dma_unremap(struct device *dev, void *remapped_addr,
				size_t size)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);

	if (!ops->unremap) {
		WARN_ONCE(1, "unremap function not implemented for %pS\n",
				ops->unremap);
		return;
	}

	return ops->unremap(dev, remapped_addr, size);
}


static inline u64 dma_get_mask(struct device *dev)
{
@@ -802,6 +841,29 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc
#endif

static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
					dma_addr_t *dma_handle, gfp_t flag)
{
	unsigned long attrs = DMA_ATTR_NON_CONSISTENT;

	return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
}

static inline void dma_free_nonconsistent(struct device *dev, size_t size,
					void *cpu_addr, dma_addr_t dma_handle)
{
	unsigned long attrs = DMA_ATTR_NON_CONSISTENT;

	return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
}

static inline int dma_mmap_nonconsistent(struct device *dev,
		struct vm_area_struct *vma, void *cpu_addr,
		dma_addr_t dma_addr, size_t size)
{
	return -ENODEV;
}

#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
Loading