Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc2bdefd authored by Matt Fleming's avatar Matt Fleming Committed by Paul Mundt
Browse files

sh: Plug PMB alloc memory leak



If we fail to allocate a PMB entry in pmb_remap() we must remember to
clear and free any PMB entries that we may have previously allocated,
e.g. if we were allocating a multiple entry mapping.

Signed-off-by: default avatarMatt Fleming <matt@console-pimps.org>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent a6325247
Loading
Loading
Loading
Loading
+24 −6
Original line number Original line Diff line number Diff line
@@ -33,6 +33,8 @@


#define NR_PMB_ENTRIES	16
#define NR_PMB_ENTRIES	16


static void __pmb_unmap(struct pmb_entry *);

static struct kmem_cache *pmb_cache;
static struct kmem_cache *pmb_cache;
static unsigned long pmb_map;
static unsigned long pmb_map;


@@ -218,9 +220,10 @@ static struct {
long pmb_remap(unsigned long vaddr, unsigned long phys,
long pmb_remap(unsigned long vaddr, unsigned long phys,
	       unsigned long size, unsigned long flags)
	       unsigned long size, unsigned long flags)
{
{
	struct pmb_entry *pmbp;
	struct pmb_entry *pmbp, *pmbe;
	unsigned long wanted;
	unsigned long wanted;
	int pmb_flags, i;
	int pmb_flags, i;
	long err;


	/* Convert typical pgprot value to the PMB equivalent */
	/* Convert typical pgprot value to the PMB equivalent */
	if (flags & _PAGE_CACHABLE) {
	if (flags & _PAGE_CACHABLE) {
@@ -236,20 +239,22 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,


again:
again:
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
		struct pmb_entry *pmbe;
		int ret;
		int ret;


		if (size < pmb_sizes[i].size)
		if (size < pmb_sizes[i].size)
			continue;
			continue;


		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
		if (IS_ERR(pmbe))
		if (IS_ERR(pmbe)) {
			return PTR_ERR(pmbe);
			err = PTR_ERR(pmbe);
			goto out;
		}


		ret = set_pmb_entry(pmbe);
		ret = set_pmb_entry(pmbe);
		if (ret != 0) {
		if (ret != 0) {
			pmb_free(pmbe);
			pmb_free(pmbe);
			return -EBUSY;
			err = -EBUSY;
			goto out;
		}
		}


		phys	+= pmb_sizes[i].size;
		phys	+= pmb_sizes[i].size;
@@ -270,6 +275,12 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,
		goto again;
		goto again;


	return wanted - size;
	return wanted - size;

out:
	if (pmbp)
		__pmb_unmap(pmbp);

	return err;
}
}


void pmb_unmap(unsigned long addr)
void pmb_unmap(unsigned long addr)
@@ -283,12 +294,19 @@ void pmb_unmap(unsigned long addr)
	if (unlikely(!pmbe))
	if (unlikely(!pmbe))
		return;
		return;


	__pmb_unmap(pmbe);
}

static void __pmb_unmap(struct pmb_entry *pmbe)
{
	WARN_ON(!test_bit(pmbe->entry, &pmb_map));
	WARN_ON(!test_bit(pmbe->entry, &pmb_map));


	do {
	do {
		struct pmb_entry *pmblink = pmbe;
		struct pmb_entry *pmblink = pmbe;


		if (pmbe->entry != PMB_NO_ENTRY)
			clear_pmb_entry(pmbe);
			clear_pmb_entry(pmbe);

		pmbe = pmblink->link;
		pmbe = pmblink->link;


		pmb_free(pmblink);
		pmb_free(pmblink);