The housekeeping CPU masks, set up by the "isolcpus" and "nohz_full" boot command line options, are used at boot time to exclude selected CPUs from running some kernel background processes to minimize disturbance to latency sensitive userspace applications. Some of housekeeping CPU masks are also checked at run time to avoid using those isolated CPUs.
The cpuset subsystem is now able to dynamically create a set of isolated CPUs to be used in isolated cpuset partitions. The long term goal is to make the degree of isolation as close as possible to what can be done statically using those boot command line options.
This patch is a step in that direction by providing a new housekeep_exclude_cpumask() API to exclude only the given cpumask from the housekeeping cpumasks. Existing boot time "isolcpus" and "nohz_full" cpumask setup, if present, can be overwritten.
Two set of cpumasks are now kept internally. One set are used by the callers while the other set are being updated before the new set are atomically switched on.
Signed-off-by: Waiman Long longman@redhat.com --- include/linux/sched/isolation.h | 6 +++ kernel/sched/isolation.c | 95 +++++++++++++++++++++++++++++---- 2 files changed, 91 insertions(+), 10 deletions(-)
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index d8501f4709b5..af38d21d0d00 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -32,6 +32,7 @@ extern bool housekeeping_enabled(enum hk_type type); extern void housekeeping_affine(struct task_struct *t, enum hk_type type); extern bool housekeeping_test_cpu(int cpu, enum hk_type type); extern void __init housekeeping_init(void); +extern int housekeeping_exclude_cpumask(struct cpumask *cpumask, unsigned long flags);
#else
@@ -59,6 +60,11 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) }
static inline void housekeeping_init(void) { } + +static inline housekeeping_exclude_cpumask(struct cpumask *cpumask, unsigned long flags) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_CPU_ISOLATION */
static inline bool housekeeping_cpu(int cpu, enum hk_type type) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index a4cf17b1fab0..3fb0e8ccce26 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -19,8 +19,16 @@ enum hk_flags { DEFINE_STATIC_KEY_FALSE(housekeeping_overridden); EXPORT_SYMBOL_GPL(housekeeping_overridden);
+/* + * The housekeeping cpumasks can now be dynamically updated at run time. + * Two set of cpumasks are kept. One set can be used while the other set are + * being updated concurrently. + */ +static DEFINE_RAW_SPINLOCK(cpumask_lock); struct housekeeping { - cpumask_var_t cpumasks[HK_TYPE_MAX]; + struct cpumask *cpumask_ptrs[HK_TYPE_MAX]; + cpumask_var_t cpumasks[HK_TYPE_MAX][2]; + unsigned int seq_nrs[HK_TYPE_MAX]; unsigned long flags; };
@@ -38,11 +46,13 @@ int housekeeping_any_cpu(enum hk_type type)
if (static_branch_unlikely(&housekeeping_overridden)) { if (housekeeping.flags & BIT(type)) { - cpu = sched_numa_find_closest(housekeeping.cpumasks[type], smp_processor_id()); + struct cpumask *cpumask = READ_ONCE(housekeeping.cpumask_ptrs[type]); + + cpu = sched_numa_find_closest(cpumask, smp_processor_id()); if (cpu < nr_cpu_ids) return cpu;
- cpu = cpumask_any_and_distribute(housekeeping.cpumasks[type], cpu_online_mask); + cpu = cpumask_any_and_distribute(cpumask, cpu_online_mask); if (likely(cpu < nr_cpu_ids)) return cpu; /* @@ -62,7 +72,7 @@ const struct cpumask *housekeeping_cpumask(enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) - return housekeeping.cpumasks[type]; + return READ_ONCE(housekeeping.cpumask_ptrs[type]); return cpu_possible_mask; } EXPORT_SYMBOL_GPL(housekeeping_cpumask); @@ -71,7 +81,7 @@ void housekeeping_affine(struct task_struct *t, enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) - set_cpus_allowed_ptr(t, housekeeping.cpumasks[type]); + set_cpus_allowed_ptr(t, READ_ONCE(housekeeping.cpumask_ptrs[type])); } EXPORT_SYMBOL_GPL(housekeeping_affine);
@@ -79,7 +89,7 @@ bool housekeeping_test_cpu(int cpu, enum hk_type type) { if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) - return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]); + return cpumask_test_cpu(cpu, READ_ONCE(housekeeping.cpumask_ptrs[type])); return true; } EXPORT_SYMBOL_GPL(housekeeping_test_cpu); @@ -98,7 +108,7 @@ void __init housekeeping_init(void)
for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) { /* We need at least one CPU to handle housekeeping work */ - WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); + WARN_ON_ONCE(cpumask_empty(housekeeping.cpumask_ptrs[type])); } }
@@ -106,8 +116,10 @@ static void __init housekeeping_setup_type(enum hk_type type, cpumask_var_t housekeeping_staging) {
- alloc_bootmem_cpumask_var(&housekeeping.cpumasks[type]); - cpumask_copy(housekeeping.cpumasks[type], + alloc_bootmem_cpumask_var(&housekeeping.cpumasks[type][0]); + alloc_bootmem_cpumask_var(&housekeeping.cpumasks[type][1]); + housekeeping.cpumask_ptrs[type] = housekeeping.cpumasks[type][0]; + cpumask_copy(housekeeping.cpumask_ptrs[type], housekeeping_staging); }
@@ -161,7 +173,7 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
for_each_set_bit(type, &iter_flags, HK_TYPE_MAX) { if (!cpumask_equal(housekeeping_staging, - housekeeping.cpumasks[type])) { + housekeeping.cpumask_ptrs[type])) { pr_warn("Housekeeping: nohz_full= must match isolcpus=\n"); goto free_housekeeping_staging; } @@ -251,3 +263,66 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup); + +/** + * housekeeping_exclude_cpumask - Update housekeeping cpumasks to exclude only the given cpumask + * @cpumask: new cpumask to be excluded from housekeeping cpumasks + * @hk_flags: bit mask of housekeeping types to be excluded + * Return: 0 if successful, error code if an error happens. + * + * Exclude the given cpumask from the housekeeping cpumasks associated with + * the given hk_flags. If the given cpumask is NULL, no CPU will need to be + * excluded. + */ +int housekeeping_exclude_cpumask(struct cpumask *cpumask, unsigned long hk_flags) +{ + unsigned long type; + +#ifdef CONFIG_CPUMASK_OFFSTACK + /* + * Pre-allocate cpumasks, if needed + */ + for_each_set_bit(type, &hk_flags, HK_TYPE_MAX) { + cpumask_var_t mask0, mask1; + + if (housekeeping.cpumask_ptrs[type]) + continue; + if (!zalloc_cpumask_var(&mask0, GFP_KERNEL) || + !zalloc_cpumask_var(&mask1, GFP_KERNEL)) + return -ENOMEM; + + /* + * cpumasks[type][] should be NULL, still do a swap & free + * dance just in case the cpumasks are allocated but + * cpumask_ptrs not setup somehow. + */ + mask0 = xchg(&housekeeping.cpumasks[type][0], mask0); + mask1 = xchg(&housekeeping.cpumasks[type][1], mask1); + free_cpumask_var(mask0); + free_cpumask_var(mask1); + } +#endif + + raw_spin_lock(&cpumask_lock); + + for_each_set_bit(type, &hk_flags, HK_TYPE_MAX) { + int idx = ++housekeeping.seq_nrs[type] & 1; + struct cpumask *dst_cpumask = housekeeping.cpumasks[type][idx]; + + if (!cpumask) { + cpumask_copy(dst_cpumask, cpu_possible_mask); + housekeeping.flags &= ~BIT(type); + } else { + cpumask_andnot(dst_cpumask, cpu_possible_mask, cpumask); + housekeeping.flags |= BIT(type); + } + WRITE_ONCE(housekeeping.cpumask_ptrs[type], dst_cpumask); + } + raw_spin_unlock(&cpumask_lock); + + if (!housekeeping.flags && static_key_enabled(&housekeeping_overridden)) + static_key_disable(&housekeeping_overridden.key); + else if (housekeeping.flags && !static_key_enabled(&housekeeping_overridden)) + static_key_enable(&housekeeping_overridden.key); + return 0; +}