--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -315,7 +315,6 @@ static inline int skip_secondary_calibra
#else
return -ENXIO;
#endif
-
}
/*
@@ -325,20 +324,28 @@ static inline int skip_secondary_calibra
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
pr_debug("CPU%u: Booted secondary processor\n", cpu);
/*
+ * The identity mapping is uncached (strongly ordered), so
+ * switch away from it before attempting any exclusive accesses.
+ */
+ cpu_switch_mm(mm->pgd, mm);
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
+
+ /*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
+ cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
- cpu_switch_mm(mm->pgd, mm);
- enter_lazy_tlb(mm, current);
- local_flush_tlb_all();
+
+ printk("CPU%u: Booted secondary processor\n", cpu);
cpu_init();
preempt_disable();
@@ -349,9 +356,6 @@ asmlinkage void __cpuinit secondary_star
*/
platform_secondary_init(cpu);
- /*
- * Enable local interrupts.
- */
notify_cpu_starting(cpu);
if (skip_secondary_calibrate())
--- a/arch/arm/mach-msm/cpufreq.c
+++ b/arch/arm/mach-msm/cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/earlysuspend.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -27,6 +28,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <mach/socinfo.h>
+#include <mach/cpufreq.h>
#include "acpuclock.h"
@@ -52,10 +54,36 @@ static DEFINE_PER_CPU(struct cpufreq_sus
static int override_cpu;
+struct cpu_freq {
+ uint32_t max;
+ uint32_t min;
+ uint32_t allowed_max;
+ uint32_t allowed_min;
+ uint32_t limits_init;
+};
+
+static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
+
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
{
int ret = 0;
+ int saved_sched_policy = -EINVAL;
+ int saved_sched_rt_prio = -EINVAL;
struct cpufreq_freqs freqs;
+ struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ if (limit->limits_init) {
+ if (new_freq > limit->allowed_max) {
+ new_freq = limit->allowed_max;
+ pr_debug("max: limiting freq to %d\n", new_freq);
+ }
+
+ if (new_freq < limit->allowed_min) {
+ new_freq = limit->allowed_min;
+ pr_debug("min: limiting freq to %d\n", new_freq);
+ }
+ }
freqs.old = policy->cur;
if (override_cpu) {
@@ -66,15 +94,61 @@ static int set_cpu_freq(struct cpufreq_p
} else
freqs.new = new_freq;
freqs.cpu = policy->cpu;
+
+ /*
+ * Put the caller into SCHED_FIFO priority to avoid cpu starvation
+ * in the acpuclk_set_rate path while increasing frequencies
+ */
+
+ if (freqs.new > freqs.old && current->policy != SCHED_FIFO) {
+ saved_sched_policy = current->policy;
+ saved_sched_rt_prio = current->rt_priority;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
+ }
+
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
if (!ret)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ /* Restore priority after clock ramp-up */
+ if (freqs.new > freqs.old && saved_sched_policy >= 0) {
+ param.sched_priority = saved_sched_rt_prio;
+ sched_setscheduler_nocheck(current, saved_sched_policy, ¶m);
+ }
return ret;
}
#ifdef CONFIG_SMP
+static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
+ mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
+ .notifier_call = msm_cpufreq_cpu_callback,
+};
+
static void set_cpu_work(struct work_struct *work)
{
struct cpufreq_work_struct *cpu_work =
@@ -166,6 +240,72 @@ static int msm_cpufreq_verify(struct cpu
return 0;
}
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+ return acpuclk_get_rate(cpu);
+}
+
+static inline int msm_cpufreq_limits_init(void)
+{
+ int cpu = 0;
+ int i = 0;
+ struct cpufreq_frequency_table *table = NULL;
+ uint32_t min = (uint32_t) -1;
+ uint32_t max = 0;
+ struct cpu_freq *limit = NULL;
+
+ for_each_possible_cpu(cpu) {
+ limit = &per_cpu(cpu_freq_info, cpu);
+ table = cpufreq_frequency_get_table(cpu);
+ if (table == NULL) {
+ pr_err("%s: error reading cpufreq table for cpu %d\n",
+ __func__, cpu);
+ continue;
+ }
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (table[i].frequency > max)
+ max = table[i].frequency;
+ if (table[i].frequency < min)
+ min = table[i].frequency;
+ }
+ limit->allowed_min = min;
+ limit->allowed_max = max;
+ limit->min = min;
+ limit->max = max;
+ limit->limits_init = 1;
+ }
+
+ return 0;
+}
+
+int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
+{
+ struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
+
+ if (!limit->limits_init)
+ msm_cpufreq_limits_init();
+
+ if ((min != MSM_CPUFREQ_NO_LIMIT) &&
+ min >= limit->min && min <= limit->max)
+ limit->allowed_min = min;
+ else
+ limit->allowed_min = limit->min;
+
+
+ if ((max != MSM_CPUFREQ_NO_LIMIT) &&
+ max <= limit->max && max >= limit->min)
+ limit->allowed_max = max;
+ else
+ limit->allowed_max = limit->max;
+
+ pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
+ __func__, cpu,
+ limit->allowed_min, limit->allowed_max);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
+
static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
{
int cur_freq;
@@ -301,6 +441,7 @@ static struct cpufreq_driver msm_cpufreq
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
+ .get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
};
@@ -327,6 +468,8 @@ static int __init msm_cpufreq_register(v
msm_cpufreq_wq = create_workqueue("msm-cpufreq");
if (!msm_cpufreq_wq)
return -1;
+
+ register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
#endif
register_pm_notifier(&msm_cpufreq_pm_notifier);
@@ -334,4 +477,3 @@ static int __init msm_cpufreq_register(v
}
late_initcall(msm_cpufreq_register);
-
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/cpufreq.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_MACH_CPUFREQ_H
+#define __ARCH_ARM_MACH_MSM_MACH_CPUFREQ_H
+
+#define MSM_CPUFREQ_NO_LIMIT 0xFFFFFFFF
+
+#ifdef CONFIG_CPU_FREQ_MSM
+
+/**
+ * msm_cpufreq_set_freq_limit() - Set max/min freq limits on cpu
+ *
+ * @cpu: The cpu core for which the limits apply
+ * @max: The max frequency allowed
+ * @min: The min frequency allowed
+ *
+ * If the @max or @min is set to MSM_CPUFREQ_NO_LIMIT, the limit
+ * will default to the CPUFreq limit.
+ *
+ * returns 0 on success, errno on failure
+ */
+extern int msm_cpufreq_set_freq_limits(
+ uint32_t cpu, uint32_t min, uint32_t max);
+#else
+static inline int msm_cpufreq_set_freq_limits(
+ uint32_t cpu, uint32_t min, uint32_t max)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /* __ARCH_ARM_MACH_MSM_MACH_CPUFREQ_H */
--- a/arch/arm/mach-msm/msm-krait-l2-accessors.c
+++ b/arch/arm/mach-msm/msm-krait-l2-accessors.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,14 +29,13 @@ u32 set_get_l2_indirect_reg(u32 reg_addr
mb();
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
+ "isb\n\t"
"mcr p15, 3, %[l2cpdr], c15, c0, 7\n\t"
- :
+ "isb\n\t"
+ "mrc p15, 3, %[l2cpdr_read], c15, c0, 7\n\t"
+ : [l2cpdr_read]"=r" (ret_val)
: [l2cpselr]"r" (reg_addr), [l2cpdr]"r" (val)
);
- isb();
- /* Ensure the value took */
- asm volatile ("mrc p15, 3, %0, c15, c0, 7" : "=r" (ret_val));
-
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
return ret_val;
@@ -53,11 +52,12 @@ void set_l2_indirect_reg(u32 reg_addr, u
raw_spin_lock_irqsave(&l2_access_lock, flags);
mb();
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
+ "isb\n\t"
"mcr p15, 3, %[l2cpdr], c15, c0, 7\n\t"
+ "isb\n\t"
:
: [l2cpselr]"r" (reg_addr), [l2cpdr]"r" (val)
);
- isb();
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
}
EXPORT_SYMBOL(set_l2_indirect_reg);
@@ -72,6 +72,7 @@ u32 get_l2_indirect_reg(u32 reg_addr)
raw_spin_lock_irqsave(&l2_access_lock, flags);
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
+ "isb\n\t"
"mrc p15, 3, %[l2cpdr], c15, c0, 7\n\t"
: [l2cpdr]"=r" (val)
: [l2cpselr]"r" (reg_addr)
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,7 @@
*
*/
/*
- * Qualcomm MSM Runqueue Stats Interface for Userspace
+ * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -26,6 +26,9 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
#ifdef CONFIG_SEC_DVFS_DUAL
#include <linux/cpufreq.h>
@@ -55,6 +58,174 @@ unsigned int get_rq_info(void)
EXPORT_SYMBOL(get_rq_info);
#endif
+struct notifier_block freq_transition;
+struct notifier_block cpu_hotplug;
+
+struct cpu_load_data {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_iowait;
+ unsigned int avg_load_maxfreq;
+ unsigned int samples;
+ unsigned int window_size;
+ unsigned int cur_freq;
+ unsigned int policy_max;
+ cpumask_var_t related_cpus;
+ struct mutex cpu_load_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kstat_cpu(cpu).cpustat.user;
+ busy_time += kstat_cpu(cpu).cpustat.system;
+ busy_time += kstat_cpu(cpu).cpustat.irq;
+ busy_time += kstat_cpu(cpu).cpustat.softirq;
+ busy_time += kstat_cpu(cpu).cpustat.steal;
+ busy_time += kstat_cpu(cpu).cpustat.nice;
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = jiffies_to_usecs(cur_wall_time);
+
+ return jiffies_to_usecs(idle_time);
+}
+
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+
+static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
+ cputime64_t *wall)
+{
+ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+
+ if (iowait_time == -1ULL)
+ return 0;
+
+ return iowait_time;
+}
+
+static int update_average_load(unsigned int freq, unsigned int cpu)
+{
+
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
+ unsigned int cur_load, load_at_max_freq;
+
+ cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
+
+ wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+ pcpu->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+ pcpu->prev_cpu_idle = cur_idle_time;
+
+ iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
+ pcpu->prev_cpu_iowait = cur_iowait_time;
+
+ if (idle_time >= iowait_time)
+ idle_time -= iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ return 0;
+
+ cur_load = 100 * (wall_time - idle_time) / wall_time;
+
+ /* Calculate the scaled load across CPU */
+ load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+
+ if (!pcpu->avg_load_maxfreq) {
+ /* This is the first sample in this window*/
+ pcpu->avg_load_maxfreq = load_at_max_freq;
+ pcpu->window_size = wall_time;
+ } else {
+ /*
+ * The is already a sample available in this window.
+ * Compute weighted average with prev entry, so that we get
+ * the precise weighted load.
+ */
+ pcpu->avg_load_maxfreq =
+ ((pcpu->avg_load_maxfreq * pcpu->window_size) +
+ (load_at_max_freq * wall_time)) /
+ (wall_time + pcpu->window_size);
+
+ pcpu->window_size += wall_time;
+ }
+
+ return 0;
+}
+
+static unsigned int report_load_at_max_freq(void)
+{
+ int cpu;
+ struct cpu_load_data *pcpu;
+ unsigned int total_load = 0;
+
+ for_each_online_cpu(cpu) {
+ pcpu = &per_cpu(cpuload, cpu);
+ mutex_lock(&pcpu->cpu_load_mutex);
+ update_average_load(pcpu->cur_freq, cpu);
+ total_load += pcpu->avg_load_maxfreq;
+ pcpu->avg_load_maxfreq = 0;
+ mutex_unlock(&pcpu->cpu_load_mutex);
+ }
+ return total_load;
+}
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+ int j;
+
+ switch (val) {
+ case CPUFREQ_POSTCHANGE:
+ for_each_cpu(j, this_cpu->related_cpus) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+ mutex_lock(&pcpu->cpu_load_mutex);
+ update_average_load(freqs->old, freqs->cpu);
+ pcpu->cur_freq = freqs->new;
+ mutex_unlock(&pcpu->cpu_load_mutex);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int cpu_hotplug_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ unsigned int cpu = (unsigned long)data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+
+ switch (val) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ this_cpu->avg_load_maxfreq = 0;
+ }
+
+ return NOTIFY_OK;
+}
+
static void def_work_fn(struct work_struct *work)
{
int64_t diff;
@@ -158,7 +329,7 @@ void dual_boost(unsigned int boost_on)
}
#endif
-static ssize_t show_run_queue_avg(struct kobject *kobj,
+static ssize_t run_queue_avg_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
unsigned int val = 0;
@@ -178,6 +349,8 @@ static ssize_t show_run_queue_avg(struct
return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
}
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -212,6 +385,10 @@ static ssize_t store_run_queue_poll_ms(s
return count;
}
+static struct kobj_attribute run_queue_poll_ms_attr =
+ __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+ store_run_queue_poll_ms);
+
static ssize_t show_def_timer_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -230,67 +407,44 @@ static ssize_t store_def_timer_ms(struct
return count;
}
-#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
- struct attribute *attrib = NULL; \
- struct kobj_attribute *ptr = NULL; \
- ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
- if (ptr) { \
- ptr->attr.name = #att; \
- ptr->attr.mode = S_IRUGO; \
- ptr->show = show_##att; \
- ptr->store = NULL; \
- attrib = &ptr->attr; \
- } \
- attrib; })
-
-#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
- struct attribute *attrib = NULL; \
- struct kobj_attribute *ptr = NULL; \
- ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
- if (ptr) { \
- ptr->attr.name = #att; \
- ptr->attr.mode = S_IWUSR|S_IRUSR; \
- ptr->show = show_##att; \
- ptr->store = store_##att; \
- attrib = &ptr->attr; \
- } \
- attrib; })
+static struct kobj_attribute def_timer_ms_attr =
+ __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+ store_def_timer_ms);
-static int init_rq_attribs(void)
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- int i;
- int err = 0;
- const int attr_count = 4;
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
+}
- struct attribute **attribs =
- kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
+static struct kobj_attribute cpu_normalized_load_attr =
+ __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
+ NULL);
+
+static struct attribute *rq_attrs[] = {
+ &cpu_normalized_load_attr.attr,
+ &def_timer_ms_attr.attr,
+ &run_queue_avg_attr.attr,
+ &run_queue_poll_ms_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+ .attrs = rq_attrs,
+};
- if (!attribs)
- goto rel;
+static int init_rq_attribs(void)
+{
+ int err;
rq_info.rq_avg = 0;
-
- attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
- attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
- attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
- attribs[3] = NULL;
-
- for (i = 0; i < attr_count - 1 ; i++) {
- if (!attribs[i])
- goto rel2;
- }
-
- rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
- GFP_KERNEL);
- if (!rq_info.attr_group)
- goto rel3;
- rq_info.attr_group->attrs = attribs;
+ rq_info.attr_group = &rq_attr_group;
/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
rq_info.kobj = kobject_create_and_add("rq-stats",
&get_cpu_sysdev(0)->kobj);
if (!rq_info.kobj)
- goto rel3;
+ return -ENOMEM;
err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
if (err)
@@ -298,24 +452,14 @@ static int init_rq_attribs(void)
else
kobject_uevent(rq_info.kobj, KOBJ_ADD);
- if (!err)
- return err;
-
-rel3:
- kfree(rq_info.attr_group);
- kfree(rq_info.kobj);
-rel2:
- for (i = 0; i < attr_count - 1; i++)
- kfree(attribs[i]);
-rel:
- kfree(attribs);
-
- return -ENOMEM;
+ return err;
}
static int __init msm_rq_stats_init(void)
{
int ret;
+ int i;
+ struct cpufreq_policy cpu_policy;
rq_wq = create_singlethread_workqueue("rq_stats");
BUG_ON(!rq_wq);
@@ -332,6 +476,20 @@ static int __init msm_rq_stats_init(void
ret = init_rq_attribs();
rq_info.init = 1;
+
+ for_each_possible_cpu(i) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+ mutex_init(&pcpu->cpu_load_mutex);
+ cpufreq_get_policy(&cpu_policy, i);
+ pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+ cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+ }
+ freq_transition.notifier_call = cpufreq_transition_handler;
+ cpu_hotplug.notifier_call = cpu_hotplug_handler;
+ cpufreq_register_notifier(&freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ register_hotcpu_notifier(&cpu_hotplug);
+
return ret;
}
late_initcall(msm_rq_stats_init);
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -33,32 +33,6 @@
#include <trace/events/power.h>
#include <linux/semaphore.h>
-unsigned int thermal_max = 1512000;
-
-#if !defined(__MP_DECISION_PATCH__)
-#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
-#endif
-/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- *
- * When the kobject of cpufreq's ref count is zero in show/store function,
- * cpufreq_cpu_put() causes a deadlock because the active count of the
- * accessing file is incremented just before calling show/store at
- * fill_read(write)_buffer.
- * (This happens when show/store is called first and then the cpu_down is called
- * before the show/store function is finished)
- * So basically, cpufreq_cpu_put() in show/store must not release the kobject
- * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
- * case, a per cpu mutex is used.
- * This per cpu mutex wraps the whole show/store function and kobject_put()
- * function in __cpufreq_remove_dev().
- */
- #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
-#endif
-
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -227,27 +201,6 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-/* just peek to see if the cpufreq policy is available.
- * The caller must hold cpufreq_driver_lock
- */
-struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
-{
- struct cpufreq_policy *data;
-
- if (cpu >= nr_cpu_ids)
- return NULL;
-
- if (!cpufreq_driver)
- return NULL;
-
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- return data;
-}
-#endif
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -340,30 +293,31 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu))
+ if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
+ sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
+ }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-
-#if defined(__MP_DECISION_PATCH__)
-/*
- * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
+/**
+ * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
* change
*
- * This function calls the sysfs notifiers function.
- * It is called every ondemand load evaluation to compute CPU loading.
+ * This function is called everytime the CPU load is evaluated by the
+ * ondemand governor. It notifies userspace of cpu load changes via sysfs.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int utils)
+ unsigned int util)
{
if (policy)
- policy->utils = utils;
+ policy->util = util;
+
+ if (policy->util >= MIN_CPU_UTIL_NOTIFY)
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
-#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -445,23 +399,13 @@ static ssize_t show_##file_name \
return sprintf(buf, "%u\n", policy->object); \
}
-#define findmax( a, b ) ( ((a) > (b)) ? (a) : (b) )
-
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-
-static ssize_t show_thermal_max_freq(struct cpufreq_policy *policy, char *buf)
-{
- return sprintf(buf, "%u\n", thermal_max);
-}
-
-#if defined(__MP_DECISION_PATCH__)
-show_one(cpu_utilization, utils);
-#endif
+show_one(cpu_utilization, util);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -528,27 +472,12 @@ static ssize_t store_scaling_max_freq
cpufreq_set_limit_defered(USER_MAX_START, value);
}
- thermal_max = findmax(policy->max, thermal_max);
-
return count;
}
#else
store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);
#endif
-static ssize_t store_thermal_max_freq
- (struct cpufreq_policy *policy, const char *buf, size_t count)
-{
- unsigned int ret = -EINVAL;
- unsigned int value = 0;
-
- ret = sscanf(buf, "%u", &value);
- if (ret != 1)
- return -EINVAL;
-
- thermal_max = value;
- return count;
-}
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
@@ -588,6 +517,9 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
+ char *envp[3];
+ char buf1[64];
+ char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -608,6 +540,15 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
+ sysfs_notify(&policy->kobj, NULL, "scaling_governor");
+
+ snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
+ snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
+ envp[0] = buf1;
+ envp[1] = buf2;
+ envp[2] = NULL;
+ kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
+
if (ret)
return ret;
else
@@ -786,30 +727,24 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
-#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
-#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
#ifdef CONFIG_VDD_USERSPACE
-cpufreq_freq_attr_rw(thermal_max_freq);
define_one_global_rw(vdd_levels);
#endif
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
- &thermal_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
-#if defined(__MP_DECISION_PATCH__)
- &cpu_utilization.attr,
-#endif
&affected_cpus.attr,
+ &cpu_utilization.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -841,27 +776,6 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -878,9 +792,6 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -890,27 +801,6 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -927,9 +817,6 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -1390,13 +1277,8 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
kobject_put(kobj);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
+
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -2098,6 +1980,7 @@ no_policy:
return ret;
}
+
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2156,6 +2039,12 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
+ /* thermald */
+ if (freq_limit_start_flag & USER_MAX_BIT) {
+ if (max_value > user_max_freq_limit)
+ max_value = user_max_freq_limit;
+ }
+
/* set min freq */
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
@@ -2222,6 +2111,7 @@ int cpufreq_set_limit_defered(unsigned i
}
#endif
+
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2307,7 +2197,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2370,14 +2260,14 @@ int cpufreq_register_driver(struct cpufr
}
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2436,9 +2326,6 @@ static int __init cpufreq_core_init(void
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,8 +32,6 @@
#define CPUFREQ_TRANSITION_NOTIFIER (0)
#define CPUFREQ_POLICY_NOTIFIER (1)
-#define __MP_DECISION_PATCH__
-
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
@@ -58,6 +56,10 @@ static inline int cpufreq_unregister_not
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
+/* Minimum frequency cutoff to notify the userspace about cpu utilization
+ * changes */
+#define MIN_CPU_UTIL_NOTIFY 40
+
/* Frequency values here are CPU kHz so that hardware which doesn't run
* with some frequencies can complain without having to guess what per
* cent / per mille means.
@@ -98,9 +100,7 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
-#if defined(__MP_DECISION_PATCH__)
- unsigned int utils; /* in %, CPU utilization */
-#endif
+ unsigned int util; /* CPU utilization at max frequency */
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
@@ -259,14 +259,9 @@ int cpufreq_register_driver(struct cpufr
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
-#if defined(__MP_DECISION_PATCH__)
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
unsigned int load);
-#else
-void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
-#endif
-
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
{
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -49,8 +49,6 @@ static inline s64 div64_s64(s64 dividend
#define div64_long(x,y) div_s64((x),(y))
-#define div64_long(x,y) div_s64((x),(y))
-
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{