Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 546d3c74 authored by Reinette Chatre's avatar Reinette Chatre Committed by Thomas Gleixner
Browse files

x86/intel_rdt: Fix cleanup of plr structure on error



When a resource group enters pseudo-locksetup mode a pseudo_lock_region is
associated with it. When the user writes to the resource group's schemata
file the CBM of the requested pseudo-locked region is entered into the
pseudo_lock_region struct. If any part of pseudo-lock region creation fails
the resource group will remain in pseudo-locksetup mode with the
pseudo_lock_region associated with it.

In case of failure during pseudo-lock region creation care needs to be
taken to ensure that the pseudo_lock_region struct associated with the
resource group is cleared from any pseudo-locking data - especially the
CBM. This is because the existence of a pseudo_lock_region struct with a
CBM is significant in other areas of the code, for example, the display of
bit_usage and initialization of a new resource group.

Fix the error path of pseudo-lock region creation to ensure that the
pseudo_lock_region struct is cleared at each error exit.

Fixes: 018961ae ("x86/intel_rdt: Pseudo-lock region creation/removal core")
Signed-off-by: default avatarReinette Chatre <reinette.chatre@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: fenghua.yu@intel.com
Cc: tony.luck@intel.com
Cc: vikas.shivappa@linux.intel.com
Cc: gavin.hindman@intel.com
Cc: jithu.joseph@intel.com
Cc: dave.hansen@intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/49b4782f6d204d122cee3499e642b2772a98d2b4.1530421026.git.reinette.chatre@intel.com
parent ce730f1c
Loading
Loading
Loading
Loading
+17 −5
Original line number Original line Diff line number Diff line
@@ -290,6 +290,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
{
{
	struct cpu_cacheinfo *ci;
	struct cpu_cacheinfo *ci;
	int ret;
	int i;
	int i;


	/* Pick the first cpu we find that is associated with the cache. */
	/* Pick the first cpu we find that is associated with the cache. */
@@ -298,7 +299,8 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
	if (!cpu_online(plr->cpu)) {
	if (!cpu_online(plr->cpu)) {
		rdt_last_cmd_printf("cpu %u associated with cache not online\n",
		rdt_last_cmd_printf("cpu %u associated with cache not online\n",
				    plr->cpu);
				    plr->cpu);
		return -ENODEV;
		ret = -ENODEV;
		goto out_region;
	}
	}


	ci = get_cpu_cacheinfo(plr->cpu);
	ci = get_cpu_cacheinfo(plr->cpu);
@@ -312,8 +314,11 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
		}
		}
	}
	}


	ret = -1;
	rdt_last_cmd_puts("unable to determine cache line size\n");
	rdt_last_cmd_puts("unable to determine cache line size\n");
	return -1;
out_region:
	pseudo_lock_region_clear(plr);
	return ret;
}
}


/**
/**
@@ -365,16 +370,23 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
	 */
	 */
	if (plr->size > KMALLOC_MAX_SIZE) {
	if (plr->size > KMALLOC_MAX_SIZE) {
		rdt_last_cmd_puts("requested region exceeds maximum size\n");
		rdt_last_cmd_puts("requested region exceeds maximum size\n");
		return -E2BIG;
		ret = -E2BIG;
		goto out_region;
	}
	}


	plr->kmem = kzalloc(plr->size, GFP_KERNEL);
	plr->kmem = kzalloc(plr->size, GFP_KERNEL);
	if (!plr->kmem) {
	if (!plr->kmem) {
		rdt_last_cmd_puts("unable to allocate memory\n");
		rdt_last_cmd_puts("unable to allocate memory\n");
		return -ENOMEM;
		ret = -ENOMEM;
		goto out_region;
	}
	}


	return 0;
	ret = 0;
	goto out;
out_region:
	pseudo_lock_region_clear(plr);
out:
	return ret;
}
}


/**
/**