Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1f7061c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: msm: fix checkpatch errors"

parents df16bd49 6c374de4
Loading
Loading
Loading
Loading
+21 −9
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@
#include <linux/regulator/consumer.h>
#include <linux/notifier.h>
#include <linux/qcom_iommu.h>
#include <asm/sizes.h>
#include <linux/sizes.h>

#include "msm_iommu_hw-v1.h"
#include "msm_iommu_priv.h"
@@ -63,6 +63,7 @@ struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
{
	int ret = 0;

	if (drvdata->gdsc) {
		ret = regulator_enable(drvdata->gdsc);
		if (ret)
@@ -312,9 +313,9 @@ void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
		int res;

		SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
		res = readl_poll_timeout(
			GLB_REG(MICRO_MMU_CTRL, base), val,
			     (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
		res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
			(val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE,
			0, 5000000);

		if (res)
			check_halt_state(iommu_drvdata);
@@ -415,6 +416,7 @@ static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
	for (i = 0; i < smt_size; i++)
		SET_SMR_VALID(base, i, 0);

	/* make sure SMR programming is done*/
	mb();
}

@@ -427,6 +429,8 @@ static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
	SET_NSCR2(base, 0);
	SET_NSGFAR(base, 0);
	SET_NSGFSRRESTORE(base, 0);

	/* make sure reset is done */
	mb();
}

@@ -487,6 +491,7 @@ void program_iommu_bfb_settings(void __iomem *base,
			const struct msm_iommu_bfb_settings *bfb_settings)
{
	unsigned int i;

	if (bfb_settings)
		for (i = 0; i < bfb_settings->length; i++)
			SET_GLOBAL_REG(base, bfb_settings->regs[i],
@@ -514,12 +519,15 @@ static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
	SET_TTBCR(base, ctx, 0);
	SET_TTBR0(base, ctx, 0);
	SET_TTBR1(base, ctx, 0);

	/* make sure reset is done */
	mb();
}

static void __release_smg(void __iomem *base)
{
	int i, smt_size;

	smt_size = GET_IDR0_NUMSMRG(base);

	/* Invalidate all SMGs */
@@ -717,7 +725,7 @@ static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,

	/* Enable the MMU */
	SET_CB_SCTLR_M(cb_base, ctx, 1);
	mb();
	mb(); /* make sure MMU is enabled */
}

#ifdef CONFIG_IOMMU_PGTABLES_L2
@@ -1131,12 +1139,13 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,

	spin_lock_irqsave(&msm_iommu_spin_lock, flags);
	SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
	/* make sure ATS1PR is visible */
	mb();
	for (i = 0; i < IOMMU_USEC_TIMEOUT; i += IOMMU_USEC_STEP)
	for (i = 0; i < IOMMU_USEC_TIMEOUT; i += IOMMU_USEC_STEP) {
		if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
			break;
		else
		udelay(IOMMU_USEC_STEP);
	}

	if (i >= IOMMU_USEC_TIMEOUT) {
		pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
@@ -1152,6 +1161,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,

	if (par & CB_PAR_F) {
		unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;

		pr_err("IOMMU translation fault!\n");
		pr_err("name = %s\n", iommu_drvdata->name);
		pr_err("context = %s (%d)\n", ctx_drvdata->name,
@@ -1253,14 +1263,15 @@ static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
	void __iomem *base = drvdata->base;
	void __iomem *cb_base = drvdata->cb_base;
	bool is_secure = drvdata->sec_id != -1;

	struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
	unsigned int i;

	memset(regs, 0, sizeof(regs));

	for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
		struct msm_iommu_context_reg *r = &regs[i];
		unsigned long regaddr = dump_regs_tbl[i].reg_offset;

		if (is_secure &&
			dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
			r->valid = 0;
@@ -1518,6 +1529,7 @@ static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
				enum iommu_attr attr, void *data)
{
	struct msm_iommu_priv *priv = domain->priv;

	switch (attr) {
	case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
		__do_get_redirect(domain, data);
+1 −1
Original line number Diff line number Diff line
@@ -581,7 +581,7 @@ static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
	/* Calculate the context bank number using the base addresses.
	 * Typically CB0 base address is 0x8000 pages away if the number
	 * of CBs are <=8. So, assume the offset 0x8000 until mentioned
	 * explicitely.
	 * explicitly.
	 */
	cb_offset = drvdata->cb_base - drvdata->base;
	ctx_drvdata->num = ((r->start - rp.start - cb_offset)
+13 −10
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/idr.h>
#include <asm/sizes.h>
#include <linux/sizes.h>
#include <asm/page.h>
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
@@ -48,10 +48,11 @@ static DEFINE_IDA(domain_nums);
void msm_iommu_set_client_name(struct iommu_domain *domain, char const *name)
{
	struct msm_iommu_priv *priv = domain->priv;

	priv->client_name = name;
}

int msm_use_iommu()
int msm_use_iommu(void)
{
	return iommu_present(&platform_bus_type);
}
@@ -157,6 +158,7 @@ static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
{
	int ret;
	int prot = IOMMU_WRITE | IOMMU_READ;

	prot |= cached ? IOMMU_CACHE : 0;

	ret = iommu_map(domain, iova, phys, size, prot);
@@ -267,6 +269,7 @@ static int add_domain(struct msm_iova_data *node)
	mutex_lock(&domain_mutex);
	while (*p) {
		struct msm_iova_data *tmp;

		parent = *p;

		tmp = rb_entry(parent, struct msm_iova_data, node);
@@ -342,6 +345,7 @@ EXPORT_SYMBOL(msm_find_domain_no);
struct iommu_domain *msm_iommu_domain_find(const char *name)
{
	struct iommu_group *group = iommu_group_find(name);

	if (!group)
		return NULL;
	return iommu_group_get_iommudata(group);
@@ -351,6 +355,7 @@ EXPORT_SYMBOL(msm_iommu_domain_find);
int msm_iommu_domain_no_find(const char *name)
{
	struct iommu_domain *domain = msm_iommu_domain_find(name);

	if (!domain)
		return -EINVAL;
	return msm_find_domain_no(domain);
@@ -473,7 +478,7 @@ int msm_register_domain(struct msm_iova_layout *layout)
	if (!data)
		return -ENOMEM;

	pools = kzalloc(sizeof(struct mem_pool) * layout->npartitions,
	pools = kcalloc(layout->npartitions, sizeof(struct mem_pool),
			GFP_KERNEL);

	if (!pools)
@@ -523,9 +528,8 @@ free_domain_num:
	ida_simple_remove(&domain_nums, data->domain_num);

free_pools:
	for (i = 0; i < layout->npartitions; i++) {
	for (i = 0; i < layout->npartitions; i++)
		kfree(pools[i].bitmap);
	}
	kfree(pools);
free_data:
	kfree(data);
@@ -627,8 +631,6 @@ static int create_and_add_domain(struct iommu_group *group,
		}
		addr_array = kmalloc(array_size, GFP_KERNEL);
		if (!addr_array) {
			pr_err("%s: could not allocate space for partition",
				__func__);
			ret_val = -ENOMEM;
			goto free_mem;
		}
@@ -730,6 +732,7 @@ static int iommu_domain_parse_dt(const struct device_node *dt_node)
	struct msm_iommu_data_entry *grp_list_entry;
	struct msm_iommu_data_entry *tmp;
	struct list_head iommu_group_list;

	INIT_LIST_HEAD(&iommu_group_list);

	for_each_child_of_node(dt_node, node) {
@@ -765,9 +768,9 @@ static int iommu_domain_parse_dt(const struct device_node *dt_node)
		num_contexts = sz / sizeof(unsigned int);

		ret_val = find_and_add_contexts(group, node, num_contexts);
		if (ret_val) {
		if (ret_val)
			goto free_group;
		}

		ret_val = create_and_add_domain(group, node, name);
		if (ret_val) {
			ret_val = -EINVAL;
@@ -891,6 +894,7 @@ static struct platform_driver iommu_domain_driver = {
static int __init msm_subsystem_iommu_init(void)
{
	int ret;

	ret = platform_driver_register(&iommu_domain_driver);
	if (ret != 0)
		pr_err("Failed to register IOMMU domain driver\n");
@@ -904,4 +908,3 @@ static void __exit msm_subsystem_iommu_exit(void)

device_initcall(msm_subsystem_iommu_init);
module_exit(msm_subsystem_iommu_exit);
+3 −6
Original line number Diff line number Diff line
@@ -164,6 +164,7 @@ static struct msm_iommu_map *msm_iommu_lookup(
	struct rb_node *parent = NULL;
	struct msm_iommu_map *entry;
	uint64_t key = domain_no;

	key = key << 32 | partition_no;

	while (*p) {
@@ -238,6 +239,7 @@ static int msm_iommu_map_iommu(struct msm_iommu_meta *meta,
	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + size;
		unsigned long phys_addr = sg_phys(table->sgl);

		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
					extra, SZ_4K, prot);
		if (ret)
@@ -279,8 +281,6 @@ static void msm_iommu_heap_unmap_iommu(struct msm_iommu_map *data)
	WARN_ON(ret < 0);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}


@@ -391,6 +391,7 @@ static int __msm_map_iommu_common(

	if (!msm_use_iommu()) {
		unsigned long pa = sg_dma_address(table->sgl);

		if (pa == 0)
			pa = sg_phys(table->sgl);
		*iova = pa;
@@ -636,9 +637,5 @@ void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
	table = ion_sg_table(client, handle);

	__msm_unmap_iommu_common(table, domain_num, partition_num);

	return;
}
EXPORT_SYMBOL(ion_unmap_iommu);

+3 −2
Original line number Diff line number Diff line
@@ -208,6 +208,7 @@ static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
				u32 *fl_pte_shadow)
{
	u32 *sl;

	sl = (u32 *) __get_free_pages(GFP_ATOMIC,
			get_order(SZ_4K));

@@ -218,8 +219,7 @@ static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
	memset(sl, 0, SZ_4K);
	clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);

	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
			FL_TYPE_TABLE);
	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
	*fl_pte_shadow = *fl_pte & ~0x1FF;

	clean_pte(fl_pte, fl_pte + 1, pt->redirect);
@@ -277,6 +277,7 @@ static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
{
	int i;
	int ret = 0;

	for (i = 0; i < 16; i++)
		if (*(fl_pte+i)) {
			ret = -EBUSY;
Loading