Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 201ea482 authored by Satya Durga Srinivasu Prabhala's avatar Satya Durga Srinivasu Prabhala
Browse files

kernel: Add snapshot of changes to support cpu isolation



This snapshot is taken from msm-4.19 as of commit 5debecbe7195
("trace: filter out spurious preemption and IRQs disable traces").

Change-Id: I222aa448ac68f7365065f62dba9db94925da38a0
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 8573d7bf
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1156,6 +1156,11 @@ int lock_device_hotplug_sysfs(void)
	return restart_syscall();
}

void lock_device_hotplug_assert(void)
{
	lockdep_assert_held(&device_hotplug_lock);
}

#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
+34 −0
Original line number Diff line number Diff line
@@ -183,6 +183,32 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_HOTPLUG_CPU
static ssize_t isolate_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int isolated = cpu_isolated(cpuid);

	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", isolated);

	return rc;
}

static DEVICE_ATTR_RO(isolate);

static struct attribute *cpu_isolated_attrs[] = {
	&dev_attr_isolate.attr,
	NULL
};

static struct attribute_group cpu_isolated_attr_group = {
	.attrs = cpu_isolated_attrs,
};
#endif

#ifdef CONFIG_SCHED_WALT
static ssize_t sched_load_boost_show(struct device *dev,
				struct device_attribute *attr, char *buf)
@@ -240,6 +266,9 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
@@ -250,6 +279,9 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
@@ -282,6 +314,7 @@ static struct cpu_attr cpu_attrs[] = {
	_CPU_ATTR(online, &__cpu_online_mask),
	_CPU_ATTR(possible, &__cpu_possible_mask),
	_CPU_ATTR(present, &__cpu_present_mask),
	_CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask),
};

/*
@@ -531,6 +564,7 @@ static struct attribute *cpu_root_attrs[] = {
	&cpu_attrs[0].attr.attr,
	&cpu_attrs[1].attr.attr,
	&cpu_attrs[2].attr.attr,
	&cpu_attrs[3].attr.attr,
	&dev_attr_kernel_max.attr,
	&dev_attr_offline.attr,
	&dev_attr_isolated.attr,
+25 −0
Original line number Diff line number Diff line
@@ -55,6 +55,7 @@ extern unsigned int nr_cpu_ids;
 *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
 *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
 *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
 *     cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
 *
 *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
 *
@@ -91,10 +92,12 @@ extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_isolated_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask   ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask  ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask   ((const struct cpumask *)&__cpu_active_mask)
#define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask)

extern atomic_t __num_online_cpus;

@@ -114,19 +117,31 @@ static inline unsigned int num_online_cpus(void)
#define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
#define num_present_cpus()	cpumask_weight(cpu_present_mask)
#define num_active_cpus()	cpumask_weight(cpu_active_mask)
#define num_isolated_cpus()	cpumask_weight(cpu_isolated_mask)
#define num_online_uniso_cpus()						\
({									\
	cpumask_t mask;							\
									\
	cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask);	\
	cpumask_weight(&mask);						\
})
#define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
#define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
#define cpu_isolated(cpu)	cpumask_test_cpu((cpu), cpu_isolated_mask)
#else
#define num_online_cpus()	1U
#define num_possible_cpus()	1U
#define num_present_cpus()	1U
#define num_active_cpus()	1U
#define num_isolated_cpus()	0U
#define num_online_uniso_cpus()	1U
#define cpu_online(cpu)		((cpu) == 0)
#define cpu_possible(cpu)	((cpu) == 0)
#define cpu_present(cpu)	((cpu) == 0)
#define cpu_active(cpu)		((cpu) == 0)
#define cpu_isolated(cpu)	((cpu) != 0)
#endif

extern cpumask_t cpus_booted_once_mask;
@@ -806,6 +821,7 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)

/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
@@ -846,6 +862,15 @@ set_cpu_active(unsigned int cpu, bool active)
		cpumask_clear_cpu(cpu, &__cpu_active_mask);
}

static inline void
set_cpu_isolated(unsigned int cpu, bool isolated)
{
	if (isolated)
		cpumask_set_cpu(cpu, &__cpu_isolated_mask);
	else
		cpumask_clear_cpu(cpu, &__cpu_isolated_mask);
}


/**
 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+1 −0
Original line number Diff line number Diff line
@@ -1551,6 +1551,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern void lock_device_hotplug_assert(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+17 −0
Original line number Diff line number Diff line
@@ -13,6 +13,9 @@

#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
extern void watchdog_enable(unsigned int cpu);
extern void watchdog_disable(unsigned int cpu);
extern bool watchdog_configured(unsigned int cpu);
void lockup_detector_soft_poweroff(void);
void lockup_detector_cleanup(void);
bool is_hardlockup(void);
@@ -37,6 +40,20 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
static inline void lockup_detector_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
static inline void lockup_detector_cleanup(void) { }
static inline void watchdog_enable(unsigned int cpu)
{
}
static inline void watchdog_disable(unsigned int cpu)
{
}
static inline bool watchdog_configured(unsigned int cpu)
{
	/*
	 * Pretend the watchdog is always configured.
	 * We will be waiting for the watchdog to be enabled in core isolation
	 */
	return true;
}
#endif /* !CONFIG_LOCKUP_DETECTOR */

#ifdef CONFIG_SOFTLOCKUP_DETECTOR
Loading