aboutsummaryrefslogtreecommitdiffstats
path: root/xen/drivers/cpufreq
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-05-19 01:31:26 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-05-19 01:31:26 +0100
commitf7a4f4101decbb3a9b977e79cd5ebe6e200e3fc5 (patch)
tree1ffcd790429c6c77234d6b3f83f0dcc435c9ad4e /xen/drivers/cpufreq
parent5bc663c75918a223ae43e685ed69e9f09d108d99 (diff)
downloadxen-f7a4f4101decbb3a9b977e79cd5ebe6e200e3fc5.tar.gz
xen-f7a4f4101decbb3a9b977e79cd5ebe6e200e3fc5.tar.bz2
xen-f7a4f4101decbb3a9b977e79cd5ebe6e200e3fc5.zip
x86, cpufreq: fix ondemand governor to take aperf/mperf feedback
APERF/MPERF MSRs provides feedback about actual freq in eplased time, which could be different from requested freq by governor. However currently ondemand governor only takes that feedback at freq down path. We should do that for scale up too. Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
Diffstat (limited to 'xen/drivers/cpufreq')
-rw-r--r--xen/drivers/cpufreq/cpufreq_ondemand.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/xen/drivers/cpufreq/cpufreq_ondemand.c b/xen/drivers/cpufreq/cpufreq_ondemand.c
index a4ff4f9848..2e5a1c5832 100644
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c
@@ -97,9 +97,8 @@ int get_cpufreq_ondemand_para(uint32_t *sampling_rate_max,
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
- unsigned int load = 0;
- uint64_t cur_ns, idle_ns, total_ns;
-
+ uint64_t cur_ns, total_ns;
+ uint64_t max_load_freq = 0;
struct cpufreq_policy *policy;
unsigned int j;
@@ -121,30 +120,34 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
return;
/* Get Idle Time */
- idle_ns = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
- uint64_t total_idle_ns;
- unsigned int tmp_idle_ns;
+ uint64_t idle_ns, total_idle_ns;
+ uint64_t load, load_freq, freq_avg;
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
total_idle_ns = get_cpu_idle_time(j);
- tmp_idle_ns = total_idle_ns - j_dbs_info->prev_cpu_idle;
+ idle_ns = total_idle_ns - j_dbs_info->prev_cpu_idle;
j_dbs_info->prev_cpu_idle = total_idle_ns;
- if (tmp_idle_ns < idle_ns)
- idle_ns = tmp_idle_ns;
- }
+ if (unlikely(total_ns < idle_ns))
+ continue;
- if (likely(total_ns > idle_ns))
- load = (100 * (total_ns - idle_ns)) / total_ns;
+ load = 100 * (total_ns - idle_ns) / total_ns;
+
+ freq_avg = cpufreq_driver_getavg(j, GOV_GETAVG);
+
+ load_freq = load * freq_avg;
+ if (load_freq > max_load_freq)
+ max_load_freq = load_freq;
+ }
/* Check for frequency increase */
- if (load > dbs_tuners_ins.up_threshold) {
+ if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
/* if we are already at full speed then break out early */
if (policy->cur == policy->max)
return;
- __cpufreq_driver_target(policy, policy->max,CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
return;
}
@@ -158,12 +161,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
- if (load < (dbs_tuners_ins.up_threshold - 10)) {
- unsigned int freq_next, freq_cur;
-
- freq_cur = cpufreq_driver_getavg(policy->cpu, GOV_GETAVG);
+ if (max_load_freq < (dbs_tuners_ins.up_threshold - 10) * policy->cur) {
+ uint64_t freq_next;
- freq_next = (freq_cur * load) / (dbs_tuners_ins.up_threshold - 10);
+ freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - 10);
__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
}