From: Mark Brown <broonie(a)linaro.org>
serial_s3c.h uses upf_t which is defined in serial_core.h but does not
include that itself meaning that users which include serial_s3c.h by
itself don't build.
Signed-off-by: Mark Brown <broonie(a)linaro.org>
---
This is needed together with patch 2 to fix build failures in -next, I
didn't have the time/enthusiasm to isolate the issue.
include/linux/serial_s3c.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 907d9d1d56cf..44e4a6b92ddb 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -29,6 +29,8 @@
#ifndef __ASM_ARM_REGS_SERIAL_H
#define __ASM_ARM_REGS_SERIAL_H
+#include <linux/serial_core.h>
+
#define S3C2410_URXH (0x24)
#define S3C2410_UTXH (0x20)
#define S3C2410_ULCON (0x00)
--
1.9.0
Hello,
We have been using the linaro's OMAP4 kernels (tilt-3.4 tree). When I checked a few months ago for updates to tilt trees, I see that not much of development is happening here. Can someone point to a tree I can use with OMAP4? I tried rebasing tilt-3.4 with linux-linaro-tracking but there were *lots* of conflicts. Any help/advice/pointers will be appreciated.
Thanks
Ramakrishnan
Whenever we are changing frequency of a cpu, we are calling PRECHANGE and
POSTCHANGE notifiers. They must be serialized. i.e. PRECHANGE or POSTCHANGE
shouldn't be called twice continuously. Following examples show why this is
important:
Scenario 1:
-----------
One thread reading value of cpuinfo_cur_freq, which will call
__cpufreq_cpu_get()->cpufreq_out_of_sync()->cpufreq_notify_transition()..
And ondemand governor trying to change freq of cpu at the same time and so
sending notification via ->target()..
Notifiers are not serialized and suppose this is what happened
- PRECHANGE Notification for freq A (from cpuinfo_cur_freq)
- PRECHANGE Notification for freq B (from target())
- Freq changed by target() to B
- POSTCHANGE Notification for freq B
- POSTCHANGE Notification for freq A
Now the last POSTCHANGE Notification happened for freq A and hardware is
currently running at freq B :)
Where would we break then?: adjust_jiffies() in cpufreq.c and cpufreq_callback()
in arch/arm/kernel/smp.c (which is also adjusting jiffies).. All loops_per_jiffy
based stuff is broken..
Scenario 2:
-----------
Governor is changing freq and has called __cpufreq_driver_target(). At the same
time we are changing scaling_{min|max}_freq from sysfs, which would eventually
end up calling governor's: CPUFREQ_GOV_LIMITS notification, that will also call:
__cpufreq_driver_target().. And hence concurrent calls to ->target()
And Platform have something like this in their ->target() (Like: cpufreq-cpu0,
omap, exynos, etc)
A. If new freq is more than old: Increase voltage
B. Change freq
C. If new freq is less than old: decrease voltage
Now, two concurrent calls to target are X and Y, where X is trying to increase
freq and Y is trying to decrease it..
And this is the sequence that followed due to races..
X.A: voltage increased for larger freq
Y.A: nothing happened here
Y.B: freq decreased
Y.C: voltage decreased
X.B: freq increased
X.C: nothing happened..
We ended up setting a freq which is not supported by the voltage we have set..
That will probably make clock to CPU unstable and system might not be workable
anymore...
This patch adds protection in cpufreq_notify_transition() to make transitions
serialized. It runs WARN() if POSTCHANGE notification is sent when we are not in
middle of a transition. And PRECHANGE notification is forced to wait while the
current transition is in progress.
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
This was discussed earlier here:
https://lkml.org/lkml/2013/9/25/402
Where Rafael asked for better fix, as he called the V1 fixes: "quick and dirty".
This is another approach, much simpler than previous one. Please see if this
looks fine. There is a TODO note in there as I wanted some suggestions on how
exactly should we wait for a transition to get over.
drivers/cpufreq/cpufreq.c | 39 +++++++++++++++++++++++++++++++++++++--
include/linux/cpufreq.h | 2 ++
2 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2677ff1..66bdfff 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -324,6 +324,13 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
}
}
+static void notify_transition_for_each_cpu(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
+{
+ for_each_cpu(freqs->cpu, policy->cpus)
+ __cpufreq_notify_transition(policy, freqs, state);
+}
+
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies
* on frequency transition.
@@ -335,8 +342,35 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
- for_each_cpu(freqs->cpu, policy->cpus)
- __cpufreq_notify_transition(policy, freqs, state);
+ if ((state != CPUFREQ_PRECHANGE) && (state != CPUFREQ_POSTCHANGE))
+ return notify_transition_for_each_cpu(policy, freqs, state);
+
+ /* Serialize pre-post notifications */
+ mutex_lock(&policy->transition_lock);
+ if (unlikely(WARN_ON(!policy->transition_ongoing &&
+ (state == CPUFREQ_POSTCHANGE)))) {
+ mutex_unlock(&policy->transition_lock);
+ return;
+ }
+
+ if (state == CPUFREQ_PRECHANGE) {
+ while (policy->transition_ongoing) {
+ mutex_unlock(&policy->transition_lock);
+ /* TODO: Can we do something better here? */
+ cpu_relax();
+ mutex_lock(&policy->transition_lock);
+ }
+
+ policy->transition_ongoing = true;
+ mutex_unlock(&policy->transition_lock);
+ }
+
+ notify_transition_for_each_cpu(policy, freqs, state);
+
+ if (state == CPUFREQ_POSTCHANGE) {
+ policy->transition_ongoing = false;
+ mutex_unlock(&policy->transition_lock);
+ }
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
@@ -983,6 +1017,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
+ mutex_init(&policy->transition_lock);
return policy;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 31c431e..e5cebce 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -104,6 +104,8 @@ struct cpufreq_policy {
* __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
*/
struct rw_semaphore rwsem;
+ bool transition_ongoing; /* Tracks transition status */
+ struct mutex transition_lock;
};
/* Only for ACPI */
--
1.7.12.rc2.18.g61b472e
This patchset was previously part of the larger tasks packing patchset [1].
I have splitted the latter in 3 different patchsets (at least) to make the
thing easier.
-configuration of sched_domain topology (this patchset)
-update and consolidation of cpu_power
-tasks packing algorithm
Based on Peter Z's proposal [2][3], this patchset modifies the way to configure
the sched_domain level in order to let architectures to add specific level like
the current BOOK level or the proposed power gating level for ARM architecture.
[1] https://lkml.org/lkml/2013/10/18/121
[2] https://lkml.org/lkml/2013/11/5/239
[3] https://lkml.org/lkml/2013/11/5/449
Vincent Guittot (6):
sched: remove unused SCHED_INIT_NODE
sched: rework of sched_domain topology definition
sched: s390: create a dedicated topology table
sched: powerpc: create a dedicated topology table
sched: add a new SD_SHARE_POWERDOMAIN for sched_domain
sched: ARM: create a dedicated scheduler topology table
arch/arm/kernel/topology.c | 26 ++++
arch/ia64/include/asm/topology.h | 24 ----
arch/metag/include/asm/topology.h | 27 -----
arch/powerpc/kernel/smp.c | 35 ++++--
arch/s390/include/asm/topology.h | 13 +-
arch/s390/kernel/topology.c | 25 ++++
arch/tile/include/asm/topology.h | 33 ------
include/linux/sched.h | 30 +++++
include/linux/topology.h | 128 ++------------------
kernel/sched/core.c | 235 ++++++++++++++++++-------------------
10 files changed, 237 insertions(+), 339 deletions(-)
--
1.7.9.5
Hi Guys,
Here is series that enable KVM in V8 BE image. It is addition
on top of previously posted V7 BE KVM support [1].
It was tested on aarch64 fastmodels and APM mustang board.
It was tested only with kvmtool at this point. In case of V8
BE KVM host was tested that V8 BE guest runs fine and V8 LE
guest runs too. Also V8 LE KVM regression was tested on both
V8 LE guest and V8 BE guest. Note for mixed mode Marc's
kvmtool was used and guest image had minor change that treats
all virtio in LE form.
Note first two patches are similar to V7 BE KVM patches. Last
three are new specific for V8 image.
Thanks,
Victor
[1] https://lists.cs.columbia.edu/pipermail/kvmarm/2014-February/009446.html
Victor Kamensky (5):
ARM64: KVM: MMIO support BE host running LE code
ARM64: KVM: set and get of sys registers in BE case
ARM64: KVM: store kvm_vcpu_fault_info est_el2 as word
ARM64: KVM: vgic_elrsr and vgic_eisr need to by byteswapped in BE case
ARM64: KVM: fix vgic_bitmap_get_reg function for BE 64bit case
arch/arm64/include/asm/kvm_emulate.h | 22 ++++++++++++++++++++
arch/arm64/kvm/hyp.S | 9 ++++++++-
arch/arm64/kvm/sys_regs.c | 39 ++++++++++++++++++++++++++++++------
virt/kvm/arm/vgic.c | 27 +++++++++++++++++++++++--
4 files changed, 88 insertions(+), 9 deletions(-)
--
1.8.1.4
Thanks,
Victor
timer_cpu_notify() should return NOTIFY_OK and nothing else. Anything else would
trigger a BUG_ON(). Return value of this routine is already checked correctly
but is done after issuing a call to init_timer_stats(). The right order would be
to check the error case first and then call init_timer_stats(). Lets do it.
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
kernel/timer.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/timer.c b/kernel/timer.c
index f64a98c..e8e7839 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1674,9 +1674,9 @@ void __init init_timers(void)
err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
- init_timer_stats();
-
BUG_ON(err != NOTIFY_OK);
+
+ init_timer_stats();
register_cpu_notifier(&timers_nb);
open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
}
--
1.7.12.rc2.18.g61b472e
From: Mark Brown <broonie(a)linaro.org>
Add support for parsing the explicit topology bindings to discover the
topology of the system.
Since it is not currently clear how to map multi-level clusters for the
scheduler all leaf clusters are presented to the scheduler at the same
level. This should be enough to provide good support for current systems.
Signed-off-by: Mark Brown <broonie(a)linaro.org>
---
This revision of the patch changes the parsing code to error out on any
failures it detects and discard any information already obtained,
reverting to the default flat topology.
arch/arm64/kernel/topology.c | 172 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 167 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 3e06b0b..8e0f29a 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -17,10 +17,161 @@
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
+#include <linux/of.h>
#include <linux/sched.h>
#include <asm/topology.h>
+#ifdef CONFIG_OF
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node)
+ return cpu;
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ return -EINVAL;
+ }
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int __initdata cluster_id;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ parse_cluster(c, depth + 1);
+ leaf = false;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0)
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ if (ret != 0) {
+ return ret;
+ }
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ return -EINVAL;
+ }
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ cn = of_get_child_by_name(cn, "cpu-map");
+ if (!cn)
+ return 0;
+ return parse_cluster(cn, 0);
+}
+
+#else
+static inline int parse_dt_topology(void) { return 0; }
+#endif
+
/*
* cpu topology table
*/
@@ -74,11 +225,7 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
}
-/*
- * init_cpu_topology is called at boot when only one cpu is running
- * which prevent simultaneous write access to cpu_topology array
- */
-void __init init_cpu_topology(void)
+static void __init reset_cpu_topology(void)
{
unsigned int cpu;
@@ -93,3 +240,18 @@ void __init init_cpu_topology(void)
cpumask_clear(&cpu_topo->thread_sibling);
}
}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void __init init_cpu_topology(void)
+{
+ int ret;
+
+ reset_cpu_topology();
+
+ ret = parse_dt_topology();
+ if (ret != 0)
+ reset_cpu_topology();
+}
--
1.9.0