>From 7d56933ac832a951928bcd5dbc454950b9989673 Mon Sep 17 00:00:00 2001
From: Chris Redpath <chris.redpath@arm.com>
Date: Mon, 17 Jun 2013 15:48:15 +0100
Subject: HMP: Use unweighted load for hmp migration decisions

Normal task and runqueue loading is scaled according to priority
to end up with a weighted load, known as the contribution.

We want the CPU time to be allotted according to priority, but
we also want to make big/little decisions based upon raw load.

It is common, for example, for Android apps following the dev
guide to end up with all their long-running or async action
threads as low priority unless they override the AsyncThread
constructor. All these threads are such low priority that they
become invisible to the hmp_offload routine.

Using unweighted load here allows us to maximise CPU usage in busy
situations.

Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com>
---
 kernel/sched/fair.c |   33 +++++++++++++++++----------------
 1 file changed, 17 insertions(+), 16 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 90f61d8..a90a638 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3841,20 +3841,24 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
 						int *min_cpu)
 {
 	int cpu;
-	int min_load = INT_MAX;
-	int min_cpu_temp = NR_CPUS;
+	int min_cpu_runnable_temp = NR_CPUS;
+	unsigned long min_runnable_load = INT_MAX;
+	unsigned long contrib;
 
 	for_each_cpu_mask(cpu, hmpd->cpus) {
-		if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
-			min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
-			min_cpu_temp = cpu;
+		/* don't use the divisor in the loop, just at the end */
+		contrib = cpu_rq(cpu)->avg.runnable_avg_sum * scale_load_down(1024);
+		if (contrib < min_runnable_load) {
+			min_runnable_load = contrib;
+			min_cpu_runnable_temp = cpu;
 		}
 	}
 
 	if (min_cpu)
-		*min_cpu = min_cpu_temp;
+		*min_cpu = min_cpu_runnable_temp;
 
-	return min_load;
+	/* domain will often have at least one empty CPU */
+	return min_runnable_load ? min_runnable_load / (LOAD_AVG_MAX + 1) : 0;
 }
 
 /*
@@ -3882,22 +3886,18 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
 		return NR_CPUS;
 
 	/* Is the current domain fully loaded? */
-	/* load < ~94% */
+	/* load < ~50% */
 	min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
-	if (min_usage < NICE_0_LOAD-64)
-		return NR_CPUS;
-
-	/* Is the cpu oversubscribed? */
-	/* load < ~194% */
-	if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+	if (min_usage < (NICE_0_LOAD>>1))
 		return NR_CPUS;
 
 	/* Is the task alone on the cpu? */
-	if (cpu_rq(cpu)->nr_running < 2)
+	if (cpu_rq(cpu)->cfs.nr_running < 2)
 		return NR_CPUS;
 
 	/* Is the task actually starving? */
-	if (hmp_task_starvation(se) > 768) /* <25% waiting */
+	/* >=25% ratio running/runnable = starving */
+	if (hmp_task_starvation(se) > 768)
 		return NR_CPUS;
 
 	/* Does the slower domain have spare cycles? */
@@ -3908,6 +3908,7 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
 
 	if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
 		return dest_cpu;
+
 	return NR_CPUS;
 }
 #endif /* CONFIG_SCHED_HMP */
-- 
1.7.9.5
