Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2dc36ecf authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branch 'pm-cpufreq' into pm-opp

parents 19445b25 144c8e17
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -361,6 +361,7 @@ enum opal_msg_type {
	OPAL_MSG_HMI_EVT,
	OPAL_MSG_DPO,
	OPAL_MSG_PRD,
	OPAL_MSG_OCC,
	OPAL_MSG_TYPE_MAX,
};

@@ -700,6 +701,17 @@ struct opal_prd_msg_header {

struct opal_prd_msg;

#define OCC_RESET                       0
#define OCC_LOAD                        1
#define OCC_THROTTLE                    2
#define OCC_MAX_THROTTLE_STATUS         5

struct opal_occ_msg {
	__be64 type;
	__be64 chip;
	__be64 throttle_status;
};

/*
 * SG entries
 *
+1 −3
Original line number Diff line number Diff line
@@ -784,9 +784,7 @@ acpi_processor_register_performance(struct acpi_processor_performance

EXPORT_SYMBOL(acpi_processor_register_performance);

void
acpi_processor_unregister_performance(struct acpi_processor_performance
				      *performance, unsigned int cpu)
void acpi_processor_unregister_performance(unsigned int cpu)
{
	struct acpi_processor *pr;

+51 −42
Original line number Diff line number Diff line
@@ -65,18 +65,21 @@ enum {
#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)

struct acpi_cpufreq_data {
	struct acpi_processor_performance *acpi_data;
	struct cpufreq_frequency_table *freq_table;
	unsigned int resume;
	unsigned int cpu_feature;
	unsigned int acpi_perf_cpu;
	cpumask_var_t freqdomain_cpus;
};

static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);

/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;

static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
{
	return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
}

static struct cpufreq_driver acpi_cpufreq_driver;

static unsigned int acpi_pstate_strict;
@@ -144,7 +147,7 @@ static int _store_boost(int val)

static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
	struct acpi_cpufreq_data *data = policy->driver_data;

	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}
@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
	struct acpi_processor_performance *perf;
	int i;

	perf = data->acpi_data;
	perf = to_perf_data(data);

	for (i = 0; i < perf->state_count; i++) {
		if (value == perf->states[i].status)
@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
	else
		msr &= INTEL_MSR_RANGE;

	perf = data->acpi_data;
	perf = to_perf_data(data);

	cpufreq_for_each_entry(pos, data->freq_table)
		if (msr == perf->states[pos->driver_data].status)
@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
	put_cpu();
}

static u32 get_cur_val(const struct cpumask *mask)
static u32
get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
	if (unlikely(cpumask_empty(mask)))
		return 0;

	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
	switch (data->cpu_feature) {
	case SYSTEM_INTEL_MSR_CAPABLE:
		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
		break;
	case SYSTEM_IO_CAPABLE:
		cmd.type = SYSTEM_IO_CAPABLE;
		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
		perf = to_perf_data(data);
		cmd.addr.io.port = perf->control_register.address;
		cmd.addr.io.bit_width = perf->control_register.bit_width;
		break;
@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)

static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
	struct acpi_cpufreq_data *data;
	struct cpufreq_policy *policy;
	unsigned int freq;
	unsigned int cached_freq;

	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);

	if (unlikely(data == NULL ||
		     data->acpi_data == NULL || data->freq_table == NULL)) {
	policy = cpufreq_cpu_get(cpu);
	if (unlikely(!policy))
		return 0;

	data = policy->driver_data;
	cpufreq_cpu_put(policy);
	if (unlikely(!data || !data->freq_table))
		return 0;
	}

	cached_freq = data->freq_table[data->acpi_data->state].frequency;
	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
	cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
	freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
	unsigned int i;

	for (i = 0; i < 100; i++) {
		cur_freq = extract_freq(get_cur_val(mask), data);
		cur_freq = extract_freq(get_cur_val(mask, data), data);
		if (cur_freq == freq)
			return 1;
		udelay(10);
@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
			       unsigned int index)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
	struct acpi_cpufreq_data *data = policy->driver_data;
	struct acpi_processor_performance *perf;
	struct drv_cmd cmd;
	unsigned int next_perf_state = 0; /* Index into perf table */
	int result = 0;

	if (unlikely(data == NULL ||
	     data->acpi_data == NULL || data->freq_table == NULL)) {
	if (unlikely(data == NULL || data->freq_table == NULL)) {
		return -ENODEV;
	}

	perf = data->acpi_data;
	perf = to_perf_data(data);
	next_perf_state = data->freq_table[index].driver_data;
	if (perf->state == next_perf_state) {
		if (unlikely(data->resume)) {
@@ -482,8 +490,9 @@ out:
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
	struct acpi_processor_performance *perf = data->acpi_data;
	struct acpi_processor_performance *perf;

	perf = to_perf_data(data);
	if (cpu_khz) {
		/* search the closest match to cpu_khz */
		unsigned int i;
@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
		goto err_free;
	}

	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
	per_cpu(acfreq_data, cpu) = data;
	perf = per_cpu_ptr(acpi_perf_data, cpu);
	data->acpi_perf_cpu = cpu;
	policy->driver_data = data;

	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;

	result = acpi_processor_register_performance(data->acpi_data, cpu);
	result = acpi_processor_register_performance(perf, cpu);
	if (result)
		goto err_free_mask;

	perf = data->acpi_data;
	policy->shared_type = perf->shared_type;

	/*
@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
err_freqfree:
	kfree(data->freq_table);
err_unreg:
	acpi_processor_unregister_performance(perf, cpu);
	acpi_processor_unregister_performance(cpu);
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
err_free:
	kfree(data);
	per_cpu(acfreq_data, cpu) = NULL;
	policy->driver_data = NULL;

	return result;
}

static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
	struct acpi_cpufreq_data *data = policy->driver_data;

	pr_debug("acpi_cpufreq_cpu_exit\n");

	if (data) {
		per_cpu(acfreq_data, policy->cpu) = NULL;
		acpi_processor_unregister_performance(data->acpi_data,
						      policy->cpu);
		policy->driver_data = NULL;
		acpi_processor_unregister_performance(data->acpi_perf_cpu);
		free_cpumask_var(data->freqdomain_cpus);
		kfree(data->freq_table);
		kfree(data);
@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)

static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
	struct acpi_cpufreq_data *data = policy->driver_data;

	pr_debug("acpi_cpufreq_resume\n");

@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
static struct freq_attr *acpi_cpufreq_attr[] = {
	&cpufreq_freq_attr_scaling_available_freqs,
	&freqdomain_cpus,
	NULL,	/* this is a placeholder for cpb, do not remove */
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
	&cpb,
#endif
	NULL,
};

@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
	 * only if configured. This is considered legacy code, which
	 * will probably be removed at some point in the future.
	 */
	if (check_amd_hwpstate_cpu(0)) {
		struct freq_attr **iter;

		pr_debug("adding sysfs entry for cpb\n");
	if (!check_amd_hwpstate_cpu(0)) {
		struct freq_attr **attr;

		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
			;
		pr_debug("CPB unsupported, do not expose it\n");

		/* make sure there is a terminator behind it */
		if (iter[1] == NULL)
			*iter = &cpb;
		for (attr = acpi_cpufreq_attr; *attr; attr++)
			if (*attr == &cpb) {
				*attr = NULL;
				break;
			}
	}
#endif
	acpi_cpufreq_boost_init();
+145 −177
Original line number Diff line number Diff line
@@ -112,12 +112,6 @@ static inline bool has_target(void)
	return cpufreq_driver->target_index || cpufreq_driver->target;
}

/*
 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
 * sections
 */
static DECLARE_RWSEM(cpufreq_rwsem);

/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
		unsigned int event);
@@ -277,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
 * freed as that depends on the kobj count.
 *
 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
 * valid policy is found. This is done to make sure the driver doesn't get
 * unregistered while the policy is being used.
 *
 * Return: A valid policy on success, otherwise NULL on failure.
 */
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@@ -291,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
	if (WARN_ON(cpu >= nr_cpu_ids))
		return NULL;

	if (!down_read_trylock(&cpufreq_rwsem))
		return NULL;

	/* get the cpufreq driver */
	read_lock_irqsave(&cpufreq_driver_lock, flags);

@@ -306,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)

	read_unlock_irqrestore(&cpufreq_driver_lock, flags);

	if (!policy)
		up_read(&cpufreq_rwsem);

	return policy;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -320,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 *
 * This decrements the kobject reference count incremented earlier by calling
 * cpufreq_cpu_get().
 *
 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
 */
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
	kobject_put(&policy->kobj);
	up_read(&cpufreq_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);

@@ -851,9 +832,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
	struct freq_attr *fattr = to_attr(attr);
	ssize_t ret;

	if (!down_read_trylock(&cpufreq_rwsem))
		return -EINVAL;

	down_read(&policy->rwsem);

	if (fattr->show)
@@ -862,7 +840,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
		ret = -EIO;

	up_read(&policy->rwsem);
	up_read(&cpufreq_rwsem);

	return ret;
}
@@ -879,9 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
	if (!cpu_online(policy->cpu))
		goto unlock;

	if (!down_read_trylock(&cpufreq_rwsem))
		goto unlock;

	down_write(&policy->rwsem);

	/* Updating inactive policies is invalid, so avoid doing that. */
@@ -897,8 +871,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,

unlock_policy_rwsem:
	up_write(&policy->rwsem);

	up_read(&cpufreq_rwsem);
unlock:
	put_online_cpus();

@@ -1027,8 +999,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
	}
}

static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
				     struct device *dev)
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
{
	struct freq_attr **drv_attr;
	int ret = 0;
@@ -1060,11 +1031,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
	return cpufreq_add_dev_symlink(policy);
}

static void cpufreq_init_policy(struct cpufreq_policy *policy)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
	struct cpufreq_governor *gov = NULL;
	struct cpufreq_policy new_policy;
	int ret = 0;

	memcpy(&new_policy, policy, sizeof(*policy));

@@ -1083,16 +1053,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);

	/* set default policy */
	ret = cpufreq_set_policy(policy, &new_policy);
	if (ret) {
		pr_debug("setting policy failed\n");
		if (cpufreq_driver->exit)
			cpufreq_driver->exit(policy);
	}
	return cpufreq_set_policy(policy, &new_policy);
}

static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
				  unsigned int cpu, struct device *dev)
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
	int ret = 0;

@@ -1126,33 +1090,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
	return 0;
}

static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
{
	struct cpufreq_policy *policy;
	unsigned long flags;

	read_lock_irqsave(&cpufreq_driver_lock, flags);
	policy = per_cpu(cpufreq_cpu_data, cpu);
	read_unlock_irqrestore(&cpufreq_driver_lock, flags);

	if (likely(policy)) {
		/* Policy should be inactive here */
		WARN_ON(!policy_is_inactive(policy));

		down_write(&policy->rwsem);
		policy->cpu = cpu;
		policy->governor = NULL;
		up_write(&policy->rwsem);
	}

	return policy;
}

static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
	struct device *dev = get_cpu_device(cpu);
	struct cpufreq_policy *policy;
	int ret;

	if (WARN_ON(!dev))
		return NULL;

	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
	if (!policy)
		return NULL;
@@ -1180,10 +1126,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
	init_completion(&policy->kobj_unregister);
	INIT_WORK(&policy->update, handle_update);

	policy->cpu = dev->id;
	policy->cpu = cpu;

	/* Set this once on allocation */
	policy->kobj_cpu = dev->id;
	policy->kobj_cpu = cpu;

	return policy;

@@ -1245,59 +1191,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
	kfree(policy);
}

/**
 * cpufreq_add_dev - add a CPU device
 *
 * Adds the cpufreq interface for a CPU device.
 *
 * The Oracle says: try running cpufreq registration/unregistration concurrently
 * with with cpu hotplugging and all hell will break loose. Tried to clean this
 * mess up, but more thorough testing is needed. - Mathieu
 */
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
static int cpufreq_online(unsigned int cpu)
{
	unsigned int j, cpu = dev->id;
	int ret = -ENOMEM;
	struct cpufreq_policy *policy;
	bool new_policy;
	unsigned long flags;
	bool recover_policy = !sif;

	pr_debug("adding CPU %u\n", cpu);

	if (cpu_is_offline(cpu)) {
		/*
		 * Only possible if we are here from the subsys_interface add
		 * callback.  A hotplug notifier will follow and we will handle
		 * it as CPU online then.  For now, just create the sysfs link,
		 * unless there is no policy or the link is already present.
		 */
		policy = per_cpu(cpufreq_cpu_data, cpu);
		return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
			? add_cpu_dev_symlink(policy, cpu) : 0;
	}
	unsigned int j;
	int ret;

	if (!down_read_trylock(&cpufreq_rwsem))
		return 0;
	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);

	/* Check if this CPU already has a policy to manage it */
	policy = per_cpu(cpufreq_cpu_data, cpu);
	if (policy && !policy_is_inactive(policy)) {
	if (policy) {
		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
		ret = cpufreq_add_policy_cpu(policy, cpu, dev);
		up_read(&cpufreq_rwsem);
		return ret;
	}
		if (!policy_is_inactive(policy))
			return cpufreq_add_policy_cpu(policy, cpu);

	/*
	 * Restore the saved policy when doing light-weight init and fall back
	 * to the full init if that fails.
	 */
	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
	if (!policy) {
		recover_policy = false;
		policy = cpufreq_policy_alloc(dev);
		/* This is the only online CPU for the policy.  Start over. */
		new_policy = false;
		down_write(&policy->rwsem);
		policy->cpu = cpu;
		policy->governor = NULL;
		up_write(&policy->rwsem);
	} else {
		new_policy = true;
		policy = cpufreq_policy_alloc(cpu);
		if (!policy)
			goto nomem_out;
			return -ENOMEM;
	}

	cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1308,17 +1229,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
	ret = cpufreq_driver->init(policy);
	if (ret) {
		pr_debug("initialization failed\n");
		goto err_set_policy_cpu;
		goto out_free_policy;
	}

	down_write(&policy->rwsem);

	/* related cpus should atleast have policy->cpus */
	if (new_policy) {
		/* related_cpus should at least include policy->cpus. */
		cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);

	/* Remember which CPUs have been present at the policy creation time. */
	if (!recover_policy)
		/* Remember CPUs present at the policy creation time. */
		cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
	}

	/*
	 * affected cpus must always be the one, which are online. We aren't
@@ -1326,7 +1247,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
	 */
	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);

	if (!recover_policy) {
	if (new_policy) {
		policy->user_policy.min = policy->min;
		policy->user_policy.max = policy->max;

@@ -1340,7 +1261,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
		policy->cur = cpufreq_driver->get(policy->cpu);
		if (!policy->cur) {
			pr_err("%s: ->get() failed\n", __func__);
			goto err_get_freq;
			goto out_exit_policy;
		}
	}

@@ -1387,10 +1308,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
				     CPUFREQ_START, policy);

	if (!recover_policy) {
		ret = cpufreq_add_dev_interface(policy, dev);
	if (new_policy) {
		ret = cpufreq_add_dev_interface(policy);
		if (ret)
			goto err_out_unregister;
			goto out_exit_policy;
		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
				CPUFREQ_CREATE_POLICY, policy);

@@ -1399,9 +1320,16 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
	}

	cpufreq_init_policy(policy);
	ret = cpufreq_init_policy(policy);
	if (ret) {
		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
		       __func__, cpu, ret);
		/* cpufreq_policy_free() will notify based on this */
		new_policy = false;
		goto out_exit_policy;
	}

	if (!recover_policy) {
	if (new_policy) {
		policy->user_policy.policy = policy->policy;
		policy->user_policy.governor = policy->governor;
	}
@@ -1409,8 +1337,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)

	kobject_uevent(&policy->kobj, KOBJ_ADD);

	up_read(&cpufreq_rwsem);

	/* Callback for handling stuff after policy is ready */
	if (cpufreq_driver->ready)
		cpufreq_driver->ready(policy);
@@ -1419,24 +1345,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)

	return 0;

err_out_unregister:
err_get_freq:
out_exit_policy:
	up_write(&policy->rwsem);

	if (cpufreq_driver->exit)
		cpufreq_driver->exit(policy);
err_set_policy_cpu:
	cpufreq_policy_free(policy, recover_policy);
nomem_out:
	up_read(&cpufreq_rwsem);
out_free_policy:
	cpufreq_policy_free(policy, !new_policy);
	return ret;
}

/**
 * cpufreq_add_dev - the cpufreq interface for a CPU device.
 * @dev: CPU device.
 * @sif: Subsystem interface structure pointer (not used)
 */
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
	unsigned cpu = dev->id;
	int ret;

	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);

	if (cpu_online(cpu)) {
		ret = cpufreq_online(cpu);
	} else {
		/*
		 * A hotplug notifier will follow and we will handle it as CPU
		 * online then.  For now, just create the sysfs link, unless
		 * there is no policy or the link is already present.
		 */
		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);

		ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
			? add_cpu_dev_symlink(policy, cpu) : 0;
	}

	return ret;
}

static int __cpufreq_remove_dev_prepare(struct device *dev)
static void cpufreq_offline_prepare(unsigned int cpu)
{
	unsigned int cpu = dev->id;
	int ret = 0;
	struct cpufreq_policy *policy;

	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1444,11 +1393,11 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
	policy = cpufreq_cpu_get_raw(cpu);
	if (!policy) {
		pr_debug("%s: No cpu_data found\n", __func__);
		return -EINVAL;
		return;
	}

	if (has_target()) {
		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
		if (ret)
			pr_err("%s: Failed to stop governor\n", __func__);
	}
@@ -1469,7 +1418,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
	/* Start governor again for active policy */
	if (!policy_is_inactive(policy)) {
		if (has_target()) {
			ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
			int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
			if (!ret)
				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);

@@ -1479,28 +1428,24 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
	} else if (cpufreq_driver->stop_cpu) {
		cpufreq_driver->stop_cpu(policy);
	}

	return ret;
}

static int __cpufreq_remove_dev_finish(struct device *dev)
static void cpufreq_offline_finish(unsigned int cpu)
{
	unsigned int cpu = dev->id;
	int ret;
	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);

	if (!policy) {
		pr_debug("%s: No cpu_data found\n", __func__);
		return -EINVAL;
		return;
	}

	/* Only proceed for inactive policies */
	if (!policy_is_inactive(policy))
		return 0;
		return;

	/* If cpu is last user of policy, free policy */
	if (has_target()) {
		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
		if (ret)
			pr_err("%s: Failed to exit governor\n", __func__);
	}
@@ -1512,8 +1457,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev)
	 */
	if (cpufreq_driver->exit)
		cpufreq_driver->exit(policy);

	return 0;
}

/**
@@ -1530,8 +1473,8 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
		return 0;

	if (cpu_online(cpu)) {
		__cpufreq_remove_dev_prepare(dev);
		__cpufreq_remove_dev_finish(dev);
		cpufreq_offline_prepare(cpu);
		cpufreq_offline_finish(cpu);
	}

	cpumask_clear_cpu(cpu, policy->real_cpus);
@@ -2247,7 +2190,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,

	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));

	if (new_policy->min > policy->max || new_policy->max < policy->min)
	/*
	* This check works well when we store new min/max freq attributes,
	* because new_policy is a copy of policy with one field updated.
	*/
	if (new_policy->min > new_policy->max)
		return -EINVAL;

	/* verify the cpu speed can be set within this limit */
@@ -2296,16 +2243,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
	old_gov = policy->governor;
	/* end old governor */
	if (old_gov) {
		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
		if (ret) {
			/* This can happen due to race with other operations */
			pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
				 __func__, old_gov->name, ret);
			return ret;
		}

		up_write(&policy->rwsem);
		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
		down_write(&policy->rwsem);

		if (ret) {
			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
			       __func__, old_gov->name, ret);
			return ret;
		}
	}

	/* start new governor */
	policy->governor = new_policy->governor;
	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
	ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
	if (!ret) {
		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
		if (!ret)
			goto out;

		up_write(&policy->rwsem);
@@ -2317,11 +2279,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
	pr_debug("starting governor %s failed\n", policy->governor->name);
	if (old_gov) {
		policy->governor = old_gov;
		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
		if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
			policy->governor = NULL;
		else
			__cpufreq_governor(policy, CPUFREQ_GOV_START);
	}

	return -EINVAL;
	return ret;

 out:
	pr_debug("governor: change or update limits\n");
@@ -2387,28 +2351,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
					unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct device *dev;

	dev = get_cpu_device(cpu);
	if (dev) {
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
			cpufreq_add_dev(dev, NULL);
		cpufreq_online(cpu);
		break;

	case CPU_DOWN_PREPARE:
			__cpufreq_remove_dev_prepare(dev);
		cpufreq_offline_prepare(cpu);
		break;

	case CPU_POST_DEAD:
			__cpufreq_remove_dev_finish(dev);
		cpufreq_offline_finish(cpu);
		break;

	case CPU_DOWN_FAILED:
			cpufreq_add_dev(dev, NULL);
		cpufreq_online(cpu);
		break;
	}
	}
	return NOTIFY_OK;
}

@@ -2515,10 +2475,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)

	pr_debug("trying to register driver %s\n", driver_data->name);

	/* Protect against concurrent CPU online/offline. */
	get_online_cpus();

	write_lock_irqsave(&cpufreq_driver_lock, flags);
	if (cpufreq_driver) {
		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
		return -EEXIST;
		ret = -EEXIST;
		goto out;
	}
	cpufreq_driver = driver_data;
	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2557,7 +2521,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
	register_hotcpu_notifier(&cpufreq_cpu_notifier);
	pr_debug("driver %s up and running\n", driver_data->name);

	return 0;
out:
	put_online_cpus();
	return ret;

err_if_unreg:
	subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
@@ -2567,7 +2534,7 @@ err_null_driver:
	write_lock_irqsave(&cpufreq_driver_lock, flags);
	cpufreq_driver = NULL;
	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
	return ret;
	goto out;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);

@@ -2588,19 +2555,20 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)

	pr_debug("unregistering driver %s\n", driver->name);

	/* Protect against concurrent cpu hotplug */
	get_online_cpus();
	subsys_interface_unregister(&cpufreq_interface);
	if (cpufreq_boost_supported())
		cpufreq_sysfs_remove_file(&boost.attr);

	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);

	down_write(&cpufreq_rwsem);
	write_lock_irqsave(&cpufreq_driver_lock, flags);

	cpufreq_driver = NULL;

	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
	up_write(&cpufreq_rwsem);
	put_online_cpus();

	return 0;
}
+7 −18
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
static void cs_check_cpu(int cpu, unsigned int load)
{
	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;

@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
	}
}

static void cs_dbs_timer(struct work_struct *work)
static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
				 struct dbs_data *dbs_data, bool modify_all)
{
	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
			struct cs_cpu_dbs_info_s, cdbs.work.work);
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
			cpu);
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
	bool modify_all = true;

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
	if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
		modify_all = false;
	else
		dbs_check_cpu(dbs_data, cpu);
	if (modify_all)
		dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);

	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
	return delay_for_sampling_rate(cs_tuners->sampling_rate);
}

static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
	if (!dbs_info->enable)
		return 0;

	policy = dbs_info->cdbs.cur_policy;
	policy = dbs_info->cdbs.shared->policy;

	/*
	 * we only care if our internally tracked freq moves outside the 'valid'
Loading