Loading Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt 0 → 100644 +15 −0 Original line number Diff line number Diff line Qualcomm Technologies, Inc. Core Control for Hypervisor Required properties: - compatible: should be "qcom,hyp-core-ctl" - reg: An array of u32 values. reg[0] contains the token id to be used for hyp core_ctl system calls to set/get physical CPUs corresponding to the virtual CPUs. reg[1] ... reg[n] indicate the token ids to be used while referring to the virtual CPUs respectively. Example: hyp-core-ctl@346 { compatible = "qcom,hyp-core-ctl"; reg = <0x346 0x347 0x348>; }; drivers/soc/qcom/Kconfig +1 −8 Original line number Diff line number Diff line Loading @@ -846,16 +846,9 @@ endmenu config QCOM_HYP_CORE_CTL bool "CPU reservation scheme for Hypervisor" depends on OKL4_GUEST help This driver reserve the specified CPUS by isolating them. The reserved CPUs can be assigned to the other guest OS by the hypervisor. An offline CPU is considered as a reserved CPU since this OS can't use it. config QCOM_HYP_CORE_CTL_RESERVE_CPUS string "Reserve CPUs for HYP_CORE_CTL" depends on QCOM_HYP_CORE_CTL default "4-5" if ARCH_SDM670 help A compile time knob for specifying the cpumask that contains the CPUs to be reserved by the QCOM_HYP_CORE_CTL driver. drivers/soc/qcom/hyp_core_ctl.c +120 −5 Original line number Diff line number Diff line Loading @@ -20,6 +20,24 @@ #include <linux/slab.h> #include <linux/cpuhotplug.h> #include <uapi/linux/sched/types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <microvisor/microvisor.h> #define MAX_RESERVE_CPUS (num_possible_cpus()/2) /** * struct hyp_core_ctl_cpumap - vcpu to pcpu mapping for the other guest * @sid: System call id to be used while referring to this vcpu * @pcpu: The physical CPU number corresponding to this vcpu * */ struct hyp_core_ctl_cpu_map { okl4_kcap_t sid; okl4_cpu_id_t pcpu; }; /** * struct hyp_core_ctl_data - The private data structure of this driver Loading @@ -32,6 +50,8 @@ * @our_isolated_cpus: The CPUs isolated by hyp_core_ctl driver. output. * @final_reserved_cpus: The CPUs reserved for the Hypervisor. output. * * @syscall_id: The system call id for manipulating vcpu to pcpu mappings. * @cpumap: The vcpu to pcpu mapping table */ struct hyp_core_ctl_data { spinlock_t lock; Loading @@ -41,6 +61,8 @@ struct hyp_core_ctl_data { cpumask_t reserve_cpus; cpumask_t our_isolated_cpus; cpumask_t final_reserved_cpus; okl4_kcap_t syscall_id; struct hyp_core_ctl_cpu_map cpumap[NR_CPUS]; }; #define CREATE_TRACE_POINTS Loading Loading @@ -190,6 +212,77 @@ static int hyp_core_ctl_hp_online(unsigned int cpu) return 0; } static int hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd) { struct _okl4_sys_scheduler_affinity_get_return result; int i, ret = 0; cpumask_clear(&hcd->reserve_cpus); for (i = 0; i < MAX_RESERVE_CPUS; i++) { if (hcd->cpumap[i].sid == 0) break; result = _okl4_sys_scheduler_affinity_get(hcd->syscall_id, hcd->cpumap[i].sid); if (result.error != OKL4_ERROR_OK) { pr_err("fail to get pcpu for vcpu%d. err=%u\n", i, result.error); ret = -EPERM; break; } hcd->cpumap[i].pcpu = result.cpu_index; cpumask_set_cpu(hcd->cpumap[i].pcpu, &hcd->reserve_cpus); pr_debug("vcpu%u map to pcpu%u\n", i, result.cpu_index); } cpumask_copy(&hcd->final_reserved_cpus, &hcd->reserve_cpus); pr_info("reserve_cpus=%*pbl ret=%d\n", cpumask_pr_args(&hcd->reserve_cpus), ret); return ret; } static int hyp_core_ctl_parse_dt(struct platform_device *pdev, struct hyp_core_ctl_data *hcd) { struct device_node *np = pdev->dev.of_node; int len, ret, i; u32 *reg_values; len = of_property_count_u32_elems(np, "reg"); if (len < 2 || len > MAX_RESERVE_CPUS + 1) { pr_err("incorrect reg dt param. err=%d\n", len); return -EINVAL; } reg_values = kmalloc_array(len, sizeof(*reg_values), GFP_KERNEL); if (!reg_values) return -ENOMEM; ret = of_property_read_u32_array(np, "reg", reg_values, len); if (ret < 0) { pr_err("fail to read reg dt param. err=%d\n", ret); return -EINVAL; } hcd->syscall_id = reg_values[0]; ret = 0; for (i = 1; i < len; i++) { if (reg_values[i] == 0) { ret = -EINVAL; pr_err("incorrect sid for vcpu%d\n", i); } hcd->cpumap[i-1].sid = reg_values[i]; pr_debug("vcpu=%d sid=%u\n", i-1, hcd->cpumap[i-1].sid); } kfree(reg_values); return ret; } static void hyp_core_ctl_enable(bool enable) { spin_lock(&the_hcd->lock); Loading Loading @@ -242,7 +335,7 @@ static struct attribute_group hyp_core_ctl_attr_group = { .name = "hyp_core_ctl", }; static int __init hyp_core_ctl_init(void) static int hyp_core_ctl_probe(struct platform_device *pdev) { int ret; struct hyp_core_ctl_data *hcd; Loading @@ -254,10 +347,15 @@ static int __init hyp_core_ctl_init(void) goto out; } ret = cpulist_parse(CONFIG_QCOM_HYP_CORE_CTL_RESERVE_CPUS, &hcd->reserve_cpus); ret = hyp_core_ctl_parse_dt(pdev, hcd); if (ret < 0) { pr_err("Fail to parse dt. ret=%d\n", ret); goto free_hcd; } ret = hyp_core_ctl_init_reserve_cpus(hcd); if (ret < 0) { pr_err("Incorrect default reserve CPUs. ret=%d\n", ret); pr_err("Fail to get reserve CPUs from Hyp. ret=%d\n", ret); goto free_hcd; } Loading Loading @@ -297,4 +395,21 @@ static int __init hyp_core_ctl_init(void) out: return ret; } late_initcall(hyp_core_ctl_init); static const struct of_device_id hyp_core_ctl_match_table[] = { { .compatible = "qcom,hyp-core-ctl" }, {}, }; static struct platform_driver hyp_core_ctl_driver = { .probe = hyp_core_ctl_probe, .driver = { .name = "hyp_core_ctl", .owner = THIS_MODULE, .of_match_table = hyp_core_ctl_match_table, }, }; builtin_platform_driver(hyp_core_ctl_driver); MODULE_DESCRIPTION("Core Control for Hypervisor"); MODULE_LICENSE("GPL v2"); drivers/thermal/cpu_cooling.c +25 −0 Original line number Diff line number Diff line Loading @@ -131,6 +131,24 @@ static DEFINE_IDA(cpufreq_ida); static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_cdev_list); static struct cpumask cpus_in_max_cooling_level; static BLOCKING_NOTIFIER_HEAD(cpu_max_cooling_level_notifer); void cpu_cooling_max_level_notifier_register(struct notifier_block *n) { blocking_notifier_chain_register(&cpu_max_cooling_level_notifer, n); } void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n) { blocking_notifier_chain_unregister(&cpu_max_cooling_level_notifer, n); } const struct cpumask *cpu_cooling_get_max_level_cpumask(void) { return &cpus_in_max_cooling_level; } /* Below code defines functions to be used for cpufreq as cooling device */ /** Loading Loading @@ -677,6 +695,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, cpumask_clear_cpu(cpu, &cpus_isolated_by_thermal); } cpumask_set_cpu(cpu, &cpus_in_max_cooling_level); blocking_notifier_call_chain(&cpu_max_cooling_level_notifer, 1, (void *)(long)cpu); return ret; } else if ((prev_state == cpufreq_cdev->max_level) && (state < cpufreq_cdev->max_level)) { Loading @@ -690,6 +711,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, &cpus_isolated_by_thermal)) { sched_unisolate_cpu(cpu); } cpumask_clear_cpu(cpu, &cpus_in_max_cooling_level); blocking_notifier_call_chain(&cpu_max_cooling_level_notifer, 0, (void *)(long)cpu); } update_frequency: clip_freq = cpufreq_cdev->freq_table[state].frequency; Loading Loading @@ -1071,6 +1095,7 @@ __cpufreq_cooling_register(struct device_node *np, register_pm_notifier(&cpufreq_cooling_pm_nb); cpumask_clear(&cpus_pending_online); cpumask_clear(&cpus_isolated_by_thermal); cpumask_clear(&cpus_in_max_cooling_level); INIT_WORK(&cpuhp_register_work, register_cdev); queue_work(system_wq, &cpuhp_register_work); } Loading include/linux/cpu_cooling.h +18 −0 Original line number Diff line number Diff line Loading @@ -93,6 +93,9 @@ of_cpufreq_power_cooling_register(struct device_node *np, */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); extern void cpu_cooling_max_level_notifier_register(struct notifier_block *n); extern void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n); extern const struct cpumask *cpu_cooling_get_max_level_cpumask(void); #else /* !CONFIG_CPU_THERMAL */ static inline struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) Loading Loading @@ -134,6 +137,21 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { return; } static inline void cpu_cooling_max_level_notifier_register(struct notifier_block *n) { } static inline void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n) { } static inline const struct cpumask *cpu_cooling_get_max_level_cpumask(void) { return cpu_none_mask; } #endif /* CONFIG_CPU_THERMAL */ #endif /* __CPU_COOLING_H__ */ Loading
Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt 0 → 100644 +15 −0 Original line number Diff line number Diff line Qualcomm Technologies, Inc. Core Control for Hypervisor Required properties: - compatible: should be "qcom,hyp-core-ctl" - reg: An array of u32 values. reg[0] contains the token id to be used for hyp core_ctl system calls to set/get physical CPUs corresponding to the virtual CPUs. reg[1] ... reg[n] indicate the token ids to be used while referring to the virtual CPUs respectively. Example: hyp-core-ctl@346 { compatible = "qcom,hyp-core-ctl"; reg = <0x346 0x347 0x348>; };
drivers/soc/qcom/Kconfig +1 −8 Original line number Diff line number Diff line Loading @@ -846,16 +846,9 @@ endmenu config QCOM_HYP_CORE_CTL bool "CPU reservation scheme for Hypervisor" depends on OKL4_GUEST help This driver reserve the specified CPUS by isolating them. The reserved CPUs can be assigned to the other guest OS by the hypervisor. An offline CPU is considered as a reserved CPU since this OS can't use it. config QCOM_HYP_CORE_CTL_RESERVE_CPUS string "Reserve CPUs for HYP_CORE_CTL" depends on QCOM_HYP_CORE_CTL default "4-5" if ARCH_SDM670 help A compile time knob for specifying the cpumask that contains the CPUs to be reserved by the QCOM_HYP_CORE_CTL driver.
drivers/soc/qcom/hyp_core_ctl.c +120 −5 Original line number Diff line number Diff line Loading @@ -20,6 +20,24 @@ #include <linux/slab.h> #include <linux/cpuhotplug.h> #include <uapi/linux/sched/types.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <microvisor/microvisor.h> #define MAX_RESERVE_CPUS (num_possible_cpus()/2) /** * struct hyp_core_ctl_cpumap - vcpu to pcpu mapping for the other guest * @sid: System call id to be used while referring to this vcpu * @pcpu: The physical CPU number corresponding to this vcpu * */ struct hyp_core_ctl_cpu_map { okl4_kcap_t sid; okl4_cpu_id_t pcpu; }; /** * struct hyp_core_ctl_data - The private data structure of this driver Loading @@ -32,6 +50,8 @@ * @our_isolated_cpus: The CPUs isolated by hyp_core_ctl driver. output. * @final_reserved_cpus: The CPUs reserved for the Hypervisor. output. * * @syscall_id: The system call id for manipulating vcpu to pcpu mappings. * @cpumap: The vcpu to pcpu mapping table */ struct hyp_core_ctl_data { spinlock_t lock; Loading @@ -41,6 +61,8 @@ struct hyp_core_ctl_data { cpumask_t reserve_cpus; cpumask_t our_isolated_cpus; cpumask_t final_reserved_cpus; okl4_kcap_t syscall_id; struct hyp_core_ctl_cpu_map cpumap[NR_CPUS]; }; #define CREATE_TRACE_POINTS Loading Loading @@ -190,6 +212,77 @@ static int hyp_core_ctl_hp_online(unsigned int cpu) return 0; } static int hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd) { struct _okl4_sys_scheduler_affinity_get_return result; int i, ret = 0; cpumask_clear(&hcd->reserve_cpus); for (i = 0; i < MAX_RESERVE_CPUS; i++) { if (hcd->cpumap[i].sid == 0) break; result = _okl4_sys_scheduler_affinity_get(hcd->syscall_id, hcd->cpumap[i].sid); if (result.error != OKL4_ERROR_OK) { pr_err("fail to get pcpu for vcpu%d. err=%u\n", i, result.error); ret = -EPERM; break; } hcd->cpumap[i].pcpu = result.cpu_index; cpumask_set_cpu(hcd->cpumap[i].pcpu, &hcd->reserve_cpus); pr_debug("vcpu%u map to pcpu%u\n", i, result.cpu_index); } cpumask_copy(&hcd->final_reserved_cpus, &hcd->reserve_cpus); pr_info("reserve_cpus=%*pbl ret=%d\n", cpumask_pr_args(&hcd->reserve_cpus), ret); return ret; } static int hyp_core_ctl_parse_dt(struct platform_device *pdev, struct hyp_core_ctl_data *hcd) { struct device_node *np = pdev->dev.of_node; int len, ret, i; u32 *reg_values; len = of_property_count_u32_elems(np, "reg"); if (len < 2 || len > MAX_RESERVE_CPUS + 1) { pr_err("incorrect reg dt param. err=%d\n", len); return -EINVAL; } reg_values = kmalloc_array(len, sizeof(*reg_values), GFP_KERNEL); if (!reg_values) return -ENOMEM; ret = of_property_read_u32_array(np, "reg", reg_values, len); if (ret < 0) { pr_err("fail to read reg dt param. err=%d\n", ret); return -EINVAL; } hcd->syscall_id = reg_values[0]; ret = 0; for (i = 1; i < len; i++) { if (reg_values[i] == 0) { ret = -EINVAL; pr_err("incorrect sid for vcpu%d\n", i); } hcd->cpumap[i-1].sid = reg_values[i]; pr_debug("vcpu=%d sid=%u\n", i-1, hcd->cpumap[i-1].sid); } kfree(reg_values); return ret; } static void hyp_core_ctl_enable(bool enable) { spin_lock(&the_hcd->lock); Loading Loading @@ -242,7 +335,7 @@ static struct attribute_group hyp_core_ctl_attr_group = { .name = "hyp_core_ctl", }; static int __init hyp_core_ctl_init(void) static int hyp_core_ctl_probe(struct platform_device *pdev) { int ret; struct hyp_core_ctl_data *hcd; Loading @@ -254,10 +347,15 @@ static int __init hyp_core_ctl_init(void) goto out; } ret = cpulist_parse(CONFIG_QCOM_HYP_CORE_CTL_RESERVE_CPUS, &hcd->reserve_cpus); ret = hyp_core_ctl_parse_dt(pdev, hcd); if (ret < 0) { pr_err("Fail to parse dt. ret=%d\n", ret); goto free_hcd; } ret = hyp_core_ctl_init_reserve_cpus(hcd); if (ret < 0) { pr_err("Incorrect default reserve CPUs. ret=%d\n", ret); pr_err("Fail to get reserve CPUs from Hyp. ret=%d\n", ret); goto free_hcd; } Loading Loading @@ -297,4 +395,21 @@ static int __init hyp_core_ctl_init(void) out: return ret; } late_initcall(hyp_core_ctl_init); static const struct of_device_id hyp_core_ctl_match_table[] = { { .compatible = "qcom,hyp-core-ctl" }, {}, }; static struct platform_driver hyp_core_ctl_driver = { .probe = hyp_core_ctl_probe, .driver = { .name = "hyp_core_ctl", .owner = THIS_MODULE, .of_match_table = hyp_core_ctl_match_table, }, }; builtin_platform_driver(hyp_core_ctl_driver); MODULE_DESCRIPTION("Core Control for Hypervisor"); MODULE_LICENSE("GPL v2");
drivers/thermal/cpu_cooling.c +25 −0 Original line number Diff line number Diff line Loading @@ -131,6 +131,24 @@ static DEFINE_IDA(cpufreq_ida); static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_cdev_list); static struct cpumask cpus_in_max_cooling_level; static BLOCKING_NOTIFIER_HEAD(cpu_max_cooling_level_notifer); void cpu_cooling_max_level_notifier_register(struct notifier_block *n) { blocking_notifier_chain_register(&cpu_max_cooling_level_notifer, n); } void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n) { blocking_notifier_chain_unregister(&cpu_max_cooling_level_notifer, n); } const struct cpumask *cpu_cooling_get_max_level_cpumask(void) { return &cpus_in_max_cooling_level; } /* Below code defines functions to be used for cpufreq as cooling device */ /** Loading Loading @@ -677,6 +695,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, cpumask_clear_cpu(cpu, &cpus_isolated_by_thermal); } cpumask_set_cpu(cpu, &cpus_in_max_cooling_level); blocking_notifier_call_chain(&cpu_max_cooling_level_notifer, 1, (void *)(long)cpu); return ret; } else if ((prev_state == cpufreq_cdev->max_level) && (state < cpufreq_cdev->max_level)) { Loading @@ -690,6 +711,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, &cpus_isolated_by_thermal)) { sched_unisolate_cpu(cpu); } cpumask_clear_cpu(cpu, &cpus_in_max_cooling_level); blocking_notifier_call_chain(&cpu_max_cooling_level_notifer, 0, (void *)(long)cpu); } update_frequency: clip_freq = cpufreq_cdev->freq_table[state].frequency; Loading Loading @@ -1071,6 +1095,7 @@ __cpufreq_cooling_register(struct device_node *np, register_pm_notifier(&cpufreq_cooling_pm_nb); cpumask_clear(&cpus_pending_online); cpumask_clear(&cpus_isolated_by_thermal); cpumask_clear(&cpus_in_max_cooling_level); INIT_WORK(&cpuhp_register_work, register_cdev); queue_work(system_wq, &cpuhp_register_work); } Loading
include/linux/cpu_cooling.h +18 −0 Original line number Diff line number Diff line Loading @@ -93,6 +93,9 @@ of_cpufreq_power_cooling_register(struct device_node *np, */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); extern void cpu_cooling_max_level_notifier_register(struct notifier_block *n); extern void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n); extern const struct cpumask *cpu_cooling_get_max_level_cpumask(void); #else /* !CONFIG_CPU_THERMAL */ static inline struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) Loading Loading @@ -134,6 +137,21 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { return; } static inline void cpu_cooling_max_level_notifier_register(struct notifier_block *n) { } static inline void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n) { } static inline const struct cpumask *cpu_cooling_get_max_level_cpumask(void) { return cpu_none_mask; } #endif /* CONFIG_CPU_THERMAL */ #endif /* __CPU_COOLING_H__ */