Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef4e0c21 authored by Mrugesh Katepallewar's avatar Mrugesh Katepallewar Committed by Artem Bityutskiy
Browse files

mtd: davinci_nand: Use managed resources



davinci_nand driver currently uses normal kzalloc, ioremap and get_clk
routines. This patch replaces these routines with devm_kzalloc,
devm_request_and_ioremap and devm_clk_get resp.

Signed-off-by: default avatarMrugesh Katepallewar <mrugesh.mk@ti.com>
Signed-off-by: default avatarArtem Bityutskiy <artem.bityutskiy@linux.intel.com>
parent 221b1bd3
Loading
Loading
Loading
Loading
+5 −19
Original line number Original line Diff line number Diff line
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
	if (pdev->id < 0 || pdev->id > 3)
	if (pdev->id < 0 || pdev->id > 3)
		return -ENODEV;
		return -ENODEV;


	info = kzalloc(sizeof(*info), GFP_KERNEL);
	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
	if (!info) {
	if (!info) {
		dev_err(&pdev->dev, "unable to allocate memory\n");
		dev_err(&pdev->dev, "unable to allocate memory\n");
		ret = -ENOMEM;
		ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
		goto err_nomem;
		goto err_nomem;
	}
	}


	vaddr = ioremap(res1->start, resource_size(res1));
	vaddr = devm_request_and_ioremap(&pdev->dev, res1);
	base = ioremap(res2->start, resource_size(res2));
	base = devm_request_and_ioremap(&pdev->dev, res2);
	if (!vaddr || !base) {
	if (!vaddr || !base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -EINVAL;
		ret = -EADDRNOTAVAIL;
		goto err_ioremap;
		goto err_ioremap;
	}
	}


@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
	}
	}
	info->chip.ecc.mode = ecc_mode;
	info->chip.ecc.mode = ecc_mode;


	info->clk = clk_get(&pdev->dev, "aemif");
	info->clk = devm_clk_get(&pdev->dev, "aemif");
	if (IS_ERR(info->clk)) {
	if (IS_ERR(info->clk)) {
		ret = PTR_ERR(info->clk);
		ret = PTR_ERR(info->clk);
		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
	clk_disable_unprepare(info->clk);
	clk_disable_unprepare(info->clk);


err_clk_enable:
err_clk_enable:
	clk_put(info->clk);

	spin_lock_irq(&davinci_nand_lock);
	spin_lock_irq(&davinci_nand_lock);
	if (ecc_mode == NAND_ECC_HW_SYNDROME)
	if (ecc_mode == NAND_ECC_HW_SYNDROME)
		ecc4_busy = false;
		ecc4_busy = false;
@@ -855,13 +853,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
err_ecc:
err_ecc:
err_clk:
err_clk:
err_ioremap:
err_ioremap:
	if (base)
		iounmap(base);
	if (vaddr)
		iounmap(vaddr);

err_nomem:
err_nomem:
	kfree(info);
	return ret;
	return ret;
}
}


@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
		ecc4_busy = false;
		ecc4_busy = false;
	spin_unlock_irq(&davinci_nand_lock);
	spin_unlock_irq(&davinci_nand_lock);


	iounmap(info->base);
	iounmap(info->vaddr);

	nand_release(&info->mtd);
	nand_release(&info->mtd);


	clk_disable_unprepare(info->clk);
	clk_disable_unprepare(info->clk);
	clk_put(info->clk);

	kfree(info);


	return 0;
	return 0;
}
}