A cpuset partition is a collection of cpusets with a partition root
and its descendants from that root downward excluding any cpusets that
are part of other partitions. A partition has exclusive access to a set
of CPUs granted to it. Other cpusets outside of a partition cannot use
any CPUs in that set.
Currently, creation of partitions requires a hierarchical CPUs
distribution model where the parent of a partition root has to be
a partition root itself. Hence all the partition roots have to be
clustered around the cgroup root.
To enable the creation of a remote partition down in the hierarchy
without a parental partition root, we need a way to reserve the CPUs
that will be used in a remote partition. Introduce a new root-only
"cpuset.cpus.reserve" control file in the top cpuset for this particular
purpose.
By default, the new "cpuset.cpus.reserve" control file will track
the subparts_cpus cpumask in the top cpuset. By writing into this new
control file, however, we can reserve additional CPUs that can be used
in a remote partition. Any CPUs that are in "cpuset.cpus.reserve" will
have to be removed from the effective_cpus of all the cpusets that are
not part of that valid partitions.
The prefix "+" and "-" can be used to indicate the addition to or the
subtraction from the existing CPUs in "cpuset.cpus.reserve". A single
"-" character indicate the deletion of all the free reserve CPUs not
allocated to any existing partition.
Signed-off-by: Waiman Long <longman(a)redhat.com>
---
kernel/cgroup/cpuset.c | 253 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 239 insertions(+), 14 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8604c919e1e4..69abe95a9969 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -208,7 +208,33 @@ struct cpuset {
struct cgroup_file partition_file;
};
-static cpumask_var_t cs_tmp_cpus; /* Temp cpumask for partition */
+/*
+ * Reserved CPUs for partitions.
+ *
+ * By default, CPUs used in partitions are tracked in the parent's
+ * subparts_cpus mask following a hierarchical CPUs distribution model.
+ * To enable the creation of a remote partition down in the hierarchy
+ * without a parental partition root, one can write directly to
+ * cpuset.cpus.reserve in the root cgroup to allocate more CPUs that can
+ * be used by remote partitions. Removal of existing reserved CPUs may
+ * also cause some existing partitions to become invalid.
+ *
+ * All the cpumasks below should only be used with cpuset_mutex held.
+ * Modification of cs_reserve_cpus & cs_free_reserve_cpus also requires
+ * holding the callback_lock.
+ *
+ * Relationship among cs_reserve_cpus, cs_free_reserve_cpus and
+ * top_cpuset.subparts_cpus are:
+ *
+ * top_cpuset.subparts_cpus ⊆ cs_reserve_cpus
+ * cs_free_reserve_cpus ⊆ cs_reserve_cpus
+ * top_cpuset.subparts_cpus ∩ cs_free_reserve_cpus = ∅
+ * cs_reserve_cpus - cs_free_reserve_cpus - top_cpuset.subparts_cpus
+ * = CPUs dedicated to remote partitions
+ */
+static cpumask_var_t cs_reserve_cpus; /* Reserved CPUs */
+static cpumask_var_t cs_free_reserve_cpus; /* Unallocated reserved CPUs */
+static cpumask_var_t cs_tmp_cpus; /* Temp cpumask for partition */
/*
* Partition root states:
@@ -1202,13 +1228,13 @@ static void rebuild_sched_domains_locked(void)
* should be the same as the active CPUs, so checking only top_cpuset
* is enough to detect racing CPU offlines.
*/
- if (!top_cpuset.nr_subparts_cpus &&
+ if (cpumask_empty(cs_reserve_cpus) &&
!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
return;
/*
* With subpartition CPUs, however, the effective CPUs of a partition
- * root should be only a subset of the active CPUs. Since a CPU in any
+ * root should only be a subset of the active CPUs. Since a CPU in any
* partition root could be offlined, all must be checked.
*/
if (top_cpuset.nr_subparts_cpus) {
@@ -1275,7 +1301,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
*/
if ((task->flags & PF_KTHREAD) && kthread_is_per_cpu(task))
continue;
- cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
+ cpumask_andnot(new_cpus, possible_mask, cs_reserve_cpus);
} else {
cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
}
@@ -1406,6 +1432,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
+ bool update_reserve = (parent == &top_cpuset);
lockdep_assert_held(&cpuset_mutex);
@@ -1576,7 +1603,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
}
/*
- * Change the parent's subparts_cpus.
+ * Change the parent's subparts_cpus and maybe cs_reserve_cpus.
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
@@ -1586,10 +1613,25 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
parent->subparts_cpus, tmp->addmask);
cpumask_andnot(parent->effective_cpus,
parent->effective_cpus, tmp->addmask);
+ if (update_reserve) {
+ cpumask_or(cs_reserve_cpus,
+ cs_reserve_cpus, tmp->addmask);
+ cpumask_andnot(cs_free_reserve_cpus,
+ cs_free_reserve_cpus, tmp->addmask);
+ }
}
if (deleting) {
cpumask_andnot(parent->subparts_cpus,
parent->subparts_cpus, tmp->delmask);
+ /*
+ * The automatic cpu reservation of adjacent partition
+ * won't add back the deleted CPUs to cs_free_reserve_cpus.
+ * Instead, they are returned back to effective_cpus of top
+ * cpuset.
+ */
+ if (update_reserve)
+ cpumask_andnot(cs_reserve_cpus,
+ cs_reserve_cpus, tmp->delmask);
/*
* Some of the CPUs in subparts_cpus might have been offlined.
*/
@@ -1783,6 +1825,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
if (need_rebuild_sched_domains)
rebuild_sched_domains_locked();
+
+ return;
}
/**
@@ -1955,6 +1999,167 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
return 0;
}
+/**
+ * update_reserve_cpumask - update cs_reserve_cpus
+ * @trialcs: trial cpuset
+ * @buf: buffer of cpu numbers written to this cpuset
+ * Return: 0 if successful, < 0 if error
+ */
+static int update_reserve_cpumask(struct cpuset *trialcs, const char *buf)
+{
+ struct cgroup_subsys_state *css;
+ struct cpuset *cs;
+ bool adding, deleting;
+ struct tmpmasks tmp;
+
+ adding = deleting = false;
+ if (*buf == '+') {
+ adding = true;
+ buf++;
+ } else if (*buf == '-') {
+ deleting = true;
+ buf++;
+ }
+
+ if (!*buf) {
+ if (adding)
+ return -EINVAL;
+
+ if (deleting) {
+ if (cpumask_empty(cs_free_reserve_cpus))
+ return 0;
+ cpumask_copy(trialcs->cpus_allowed, cs_free_reserve_cpus);
+ } else {
+ cpumask_clear(trialcs->cpus_allowed);
+ }
+ } else {
+ int retval = cpulist_parse(buf, trialcs->cpus_allowed);
+
+ if (retval < 0)
+ return retval;
+ }
+
+ if (!adding && !deleting &&
+ cpumask_equal(trialcs->cpus_allowed, cs_reserve_cpus))
+ return 0;
+
+ /* Preserve trialcs->cpus_allowed for now */
+ init_tmpmasks(&tmp, NULL, trialcs->subparts_cpus,
+ trialcs->effective_cpus);
+
+ /*
+ * Compute the addition and removal of CPUs to/from cs_reserve_cpus
+ */
+ if (!adding && !deleting) {
+ adding = cpumask_andnot(tmp.addmask, trialcs->cpus_allowed,
+ cs_reserve_cpus);
+ deleting = cpumask_andnot(tmp.delmask, cs_reserve_cpus,
+ trialcs->cpus_allowed);
+ } else if (adding) {
+ adding = cpumask_andnot(tmp.addmask,
+ trialcs->cpus_allowed, cs_reserve_cpus);
+ cpumask_or(trialcs->cpus_allowed, cs_reserve_cpus, tmp.addmask);
+ } else { /* deleting */
+ deleting = cpumask_and(tmp.delmask,
+ trialcs->cpus_allowed, cs_reserve_cpus);
+ cpumask_andnot(trialcs->cpus_allowed, cs_reserve_cpus, tmp.delmask);
+ }
+
+ if (!adding && !deleting)
+ return 0;
+
+ /*
+ * Invalidate remote partitions if necessary
+ */
+ if (deleting) {
+ /* TODO */
+ }
+
+ /*
+ * Cannot use up all the CPUs in top_cpuset.effective_cpus
+ */
+ if (!deleting && adding &&
+ cpumask_subset(top_cpuset.effective_cpus, tmp.addmask))
+ return -EINVAL;
+
+ spin_lock_irq(&callback_lock);
+ /*
+ * Update top_cpuset.effective_cpus, cs_reserve_cpus &
+ * cs_free_reserve_cpus.
+ */
+ if (adding)
+ cpumask_or(cs_free_reserve_cpus, cs_free_reserve_cpus,
+ tmp.addmask);
+ cpumask_copy(cs_reserve_cpus, trialcs->cpus_allowed);
+ cpumask_andnot(top_cpuset.effective_cpus,
+ cpu_active_mask, cs_reserve_cpus);
+
+ /*
+ * Remove CPUs from cs_free_reserve_cpus first. Anything left
+ * means some partitions has to be made invalid.
+ */
+ if (deleting & cpumask_and(cs_tmp_cpus, cs_free_reserve_cpus,
+ tmp.delmask)) {
+ cpumask_andnot(cs_free_reserve_cpus, cs_free_reserve_cpus,
+ cs_tmp_cpus);
+ deleting = cpumask_andnot(tmp.delmask, tmp.delmask,
+ cs_tmp_cpus);
+ }
+ spin_unlock_irq(&callback_lock);
+
+ /*
+ * Invalidate some adjacent partitions under top cpuset, if necessary
+ */
+ if (deleting && cpumask_and(cs_tmp_cpus, tmp.delmask,
+ top_cpuset.subparts_cpus)) {
+ struct cgroup_subsys_state *css;
+ struct cpuset *cp;
+
+ /*
+ * Temporarily save the remaining CPUs to be deleted in
+ * trialcs->cpus_allowed to be restored back to tmp.delmask
+ * later.
+ */
+ deleting = cpumask_andnot(trialcs->cpus_allowed, tmp.delmask,
+ cs_tmp_cpus);
+ rcu_read_lock();
+ cpuset_for_each_child(cp, css, &top_cpuset)
+ if (is_partition_valid(cp) &&
+ cpumask_intersects(cs_tmp_cpus, cp->cpus_allowed)) {
+ rcu_read_unlock();
+ update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+ if (deleting)
+ cpumask_copy(tmp.delmask, trialcs->cpus_allowed);
+ }
+
+ /* Can now use all of trialcs */
+ init_tmpmasks(&tmp, trialcs->cpus_allowed, trialcs->subparts_cpus,
+ trialcs->effective_cpus);
+
+ /*
+ * Update effective_cpus of all descendants that are not in
+ * partitions and rebuild sched domaiins.
+ */
+ rcu_read_lock();
+ cpuset_for_each_child(cs, css, &top_cpuset) {
+ compute_effective_cpumask(tmp.new_cpus, cs, &top_cpuset);
+ if (cpumask_equal(tmp.new_cpus, cs->effective_cpus))
+ continue;
+ if (!css_tryget_online(&cs->css))
+ continue;
+ rcu_read_unlock();
+ update_cpumasks_hier(cs, &tmp, false);
+ rcu_read_lock();
+ css_put(&cs->css);
+ }
+ rcu_read_unlock();
+ rebuild_sched_domains_locked();
+ return 0;
+}
+
/*
* Migrate memory region from one set of nodes to another. This is
* performed asynchronously as it can be called from process migration path
@@ -2743,6 +2948,7 @@ typedef enum {
FILE_EFFECTIVE_CPULIST,
FILE_EFFECTIVE_MEMLIST,
FILE_SUBPARTS_CPULIST,
+ FILE_RESERVE_CPULIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_MEM_HARDWALL,
@@ -2880,6 +3086,9 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
case FILE_CPULIST:
retval = update_cpumask(cs, trialcs, buf);
break;
+ case FILE_RESERVE_CPULIST:
+ retval = update_reserve_cpumask(trialcs, buf);
+ break;
case FILE_MEMLIST:
retval = update_nodemask(cs, trialcs, buf);
break;
@@ -2927,6 +3136,9 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
case FILE_EFFECTIVE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
break;
+ case FILE_RESERVE_CPULIST:
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs_reserve_cpus));
+ break;
case FILE_SUBPARTS_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
break;
@@ -3200,6 +3412,14 @@ static struct cftype dfl_files[] = {
.file_offset = offsetof(struct cpuset, partition_file),
},
+ {
+ .name = "cpus.reserve",
+ .seq_show = cpuset_common_seq_show,
+ .write = cpuset_write_resmask,
+ .private = FILE_RESERVE_CPULIST,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
{
.name = "cpus.subpartitions",
.seq_show = cpuset_common_seq_show,
@@ -3510,6 +3730,8 @@ int __init cpuset_init(void)
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&cs_tmp_cpus, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&cs_reserve_cpus, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&cs_free_reserve_cpus, GFP_KERNEL));
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
@@ -3788,10 +4010,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
/*
- * In the rare case that hotplug removes all the cpus in subparts_cpus,
+ * In the rare case that hotplug removes all the reserve cpus,
* we assumed that cpus are updated.
*/
- if (!cpus_updated && top_cpuset.nr_subparts_cpus)
+ if (!cpus_updated && !cpumask_empty(cs_reserve_cpus))
cpus_updated = true;
/* synchronize cpus_allowed to cpu_active_mask */
@@ -3801,18 +4023,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
* Make sure that CPUs allocated to child partitions
- * do not show up in effective_cpus. If no CPU is left,
- * we clear the subparts_cpus & let the child partitions
- * fight for the CPUs again.
+ * do not show up in top_cpuset's effective_cpus. In the
+ * unlikely event tht no effective CPU is left in top_cpuset,
+ * we clear all the reserve cpus and let the non-remote child
+ * partitions fight for the CPUs again.
*/
- if (top_cpuset.nr_subparts_cpus) {
- if (cpumask_subset(&new_cpus,
- top_cpuset.subparts_cpus)) {
+ if (!cpumask_empty(cs_reserve_cpus)) {
+
+ if (cpumask_subset(&new_cpus, cs_reserve_cpus)) {
top_cpuset.nr_subparts_cpus = 0;
cpumask_clear(top_cpuset.subparts_cpus);
+ cpumask_clear(cs_free_reserve_cpus);
+ cpumask_clear(cs_reserve_cpus);
} else {
cpumask_andnot(&new_cpus, &new_cpus,
- top_cpuset.subparts_cpus);
+ cs_reserve_cpus);
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
--
2.31.1
v2:
- [v1] https://lore.kernel.org/lkml/20230412153758.3088111-1-longman@redhat.com/
- Dropped the special "isolcpus" partition in v1
- Add the root only "cpuset.cpus.reserve" control file for reserving
CPUs used for remote isolated partitions.
- Update the test_cpuset_prs.sh test script and documentation
accordingly.
This patch series introduces a new category of cpuset partition called
remote partitions. The existing partition category where the partition
roots have to be clustered around the root cgroup in a hierarchical way
is now referred to as adjacent partitions.
A remote partition can be formed far from the root cgroup with no
partition root parent. The only commonality is that the CPUs that are
used in the partition as specified in "cpuset.cpus" have to be present
in the "cpuset.cpus" of all its ancestors.
It is relatively rare to have applications that require creation of
a separate scheduling domain (root). However, it is more common to
have applications that require the use of isolated CPUs (isolated),
e.g. DPDK. One can use the "isolcpus" or "nohz_full" boot command options
to get that statically. Of course, the "isolated" partition is another
way to achieve that dynamically.
Modern container orchestration tools like Kubernetes use the cgroup
hierarchy to manage different containers. And it is relying on other
middleware like systemd to help managing it. If a container needs to
use isolated CPUs, it is hard to get those with the adjacent partitions
as it will require the administrative parent cgroup to be a partition
root too which tool like systemd may not be ready to manage.
With this patch series, a new root cgroup only "cpuset.cpus.reserve"
file is added to specify the set of CPUs that can be used in partitions
(whether remote or adjacent). To create a remote partition, the set
of CPUs to be used in that partition (the "cpuset.cpus" file of the
partition root) has to be reserved by manually adding them to that
control file first. Then that partition can be activated by writing
"isolated" into its "cpuset.cpus.partition". CPU reservation of adjacent
partitions is done automatically without touching "cpuset.cpus.reserve"
at all.
Currently only remote isolated partitions are supported, we could
support a scheduling partition ("root") in the future if the need arises.
Additional isolation attributes like those with the "isolcpus" or "nohz"
boot command line options may be supported in the isolated partitions
in the future.
Waiman Long (6):
cgroup/cpuset: Extract out CS_CPU_EXCLUSIVE & CS_SCHED_LOAD_BALANCE
handling
cgroup/cpuset: Improve temporary cpumasks handling
cgroup/cpuset: Add cpuset.cpus.reserve for top cpuset
cgroup/cpuset: Introduce remote isolated partition
cgroup/cpuset: Documentation update for partition
cgroup/cpuset: Extend test_cpuset_prs.sh to test remote partition
Documentation/admin-guide/cgroup-v2.rst | 92 ++-
kernel/cgroup/cpuset.c | 749 +++++++++++++++---
.../selftests/cgroup/test_cpuset_prs.sh | 403 ++++++----
3 files changed, 988 insertions(+), 256 deletions(-)
--
2.31.1
From: Mirsad Todorovac <mirsad.todorovac(a)alu.unizg.hr>
[ Upstream commit 976d3c6778e99390c6d854d140b746d12ea18a51 ]
According to Mirsad the gpio-sim.sh test appears to FAIL in a wrong way
due to missing initialisation of shell variables:
4.2. Bias settings work correctly
cat: /sys/devices/platform/gpio-sim.0/gpiochip18/sim_gpio0/value: No such file or directory
./gpio-sim.sh: line 393: test: =: unary operator expected
bias setting does not work
GPIO gpio-sim test FAIL
After this change the test passed:
4.2. Bias settings work correctly
GPIO gpio-sim test PASS
His testing environment is AlmaLinux 8.7 on Lenovo desktop box with
the latest Linux kernel based on v6.2:
Linux 6.2.0-mglru-kmlk-andy-09238-gd2980d8d8265 x86_64
Suggested-by: Mirsad Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Andy Shevchenko <andriy.shevchenko(a)linux.intel.com>
Tested-by: Mirsad Goran Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Mirsad Goran Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski(a)linaro.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/gpio/gpio-sim.sh | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
index 341e3de008968..bf67b23ed29ac 100755
--- a/tools/testing/selftests/gpio/gpio-sim.sh
+++ b/tools/testing/selftests/gpio/gpio-sim.sh
@@ -389,6 +389,9 @@ create_chip chip
create_bank chip bank
set_num_lines chip bank 8
enable_chip chip
+DEVNAME=`configfs_dev_name chip`
+CHIPNAME=`configfs_chip_name chip bank`
+SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
$BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
remove_chip chip
--
2.39.2
From: Mirsad Todorovac <mirsad.todorovac(a)alu.unizg.hr>
[ Upstream commit 976d3c6778e99390c6d854d140b746d12ea18a51 ]
According to Mirsad the gpio-sim.sh test appears to FAIL in a wrong way
due to missing initialisation of shell variables:
4.2. Bias settings work correctly
cat: /sys/devices/platform/gpio-sim.0/gpiochip18/sim_gpio0/value: No such file or directory
./gpio-sim.sh: line 393: test: =: unary operator expected
bias setting does not work
GPIO gpio-sim test FAIL
After this change the test passed:
4.2. Bias settings work correctly
GPIO gpio-sim test PASS
His testing environment is AlmaLinux 8.7 on Lenovo desktop box with
the latest Linux kernel based on v6.2:
Linux 6.2.0-mglru-kmlk-andy-09238-gd2980d8d8265 x86_64
Suggested-by: Mirsad Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Andy Shevchenko <andriy.shevchenko(a)linux.intel.com>
Tested-by: Mirsad Goran Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Mirsad Goran Todorovac <mirsad.todorovac(a)alu.unizg.hr>
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski(a)linaro.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
tools/testing/selftests/gpio/gpio-sim.sh | 3 +++
1 file changed, 3 insertions(+)
diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
index 9f539d454ee4d..fa2ce2b9dd5fc 100755
--- a/tools/testing/selftests/gpio/gpio-sim.sh
+++ b/tools/testing/selftests/gpio/gpio-sim.sh
@@ -389,6 +389,9 @@ create_chip chip
create_bank chip bank
set_num_lines chip bank 8
enable_chip chip
+DEVNAME=`configfs_dev_name chip`
+CHIPNAME=`configfs_chip_name chip bank`
+SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
$BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
remove_chip chip
--
2.39.2
The kunit_add_action() and related functions named the kunit_action_t
parameter 'func' in early drafts, which was later renamed to 'action'
However, the doc comments were not properly updated.
Fix these to avoid confusion and 'make htmldocs' warnings.
Fixes: b9dce8a1ed3e ("kunit: Add kunit_add_action() to defer a call until test exit")
Reported-by: Stephen Rothwell <sfr(a)canb.auug.org.au>
Closes: https://lore.kernel.org/lkml/20230530151840.16a56460@canb.auug.org.au/
Signed-off-by: David Gow <davidgow(a)google.com>
---
include/kunit/resource.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
index b64eb783b1bc..c7383e90f5c9 100644
--- a/include/kunit/resource.h
+++ b/include/kunit/resource.h
@@ -393,7 +393,7 @@ typedef void (kunit_action_t)(void *);
/**
* kunit_add_action() - Call a function when the test ends.
* @test: Test case to associate the action with.
- * @func: The function to run on test exit
+ * @action: The function to run on test exit
* @ctx: Data passed into @func
*
* Defer the execution of a function until the test exits, either normally or
@@ -415,7 +415,7 @@ int kunit_add_action(struct kunit *test, kunit_action_t *action, void *ctx);
/**
* kunit_add_action_or_reset() - Call a function when the test ends.
* @test: Test case to associate the action with.
- * @func: The function to run on test exit
+ * @action: The function to run on test exit
* @ctx: Data passed into @func
*
* Defer the execution of a function until the test exits, either normally or
@@ -441,7 +441,7 @@ int kunit_add_action_or_reset(struct kunit *test, kunit_action_t *action,
/**
* kunit_remove_action() - Cancel a matching deferred action.
* @test: Test case the action is associated with.
- * @func: The deferred function to cancel.
+ * @action: The deferred function to cancel.
* @ctx: The context passed to the deferred function to trigger.
*
* Prevent an action deferred via kunit_add_action() from executing when the
@@ -459,7 +459,7 @@ void kunit_remove_action(struct kunit *test,
/**
* kunit_release_action() - Run a matching action call immediately.
* @test: Test case the action is associated with.
- * @func: The deferred function to trigger.
+ * @action: The deferred function to trigger.
* @ctx: The context passed to the deferred function to trigger.
*
* Execute a function deferred via kunit_add_action()) immediately, rather than
--
2.41.0.rc0.172.g3f132b7071-goog
The sample code has Kconfig for tristate configuration. In the case, it
could be friendly to developers that the code has MODULE_LICENSE, since
the missing MODULE_LICENSE brings error to modpost when the code is built
as loadable kernel module.
Signed-off-by: Takashi Sakamoto <o-takashi(a)sakamocchi.jp>
---
Documentation/dev-tools/kunit/start.rst | 2 ++
1 file changed, 2 insertions(+)
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index c736613c9b19..d4f99ef94f71 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -250,6 +250,8 @@ Now we are ready to write the test cases.
};
kunit_test_suite(misc_example_test_suite);
+ MODULE_LICENSE("GPL");
+
2. Add the following lines to ``drivers/misc/Kconfig``:
.. code-block:: kconfig
--
2.39.2
User processes register name_args for events. If the same name but different
args event are registered. The trace outputs of second event are printed
as the first event. This is incorrect.
Return EADDRINUSE back to the user process if the same name but different args
event has being registered.
Signed-off-by: sunliming <sunliming(a)kylinos.cn>
---
kernel/trace/trace_events_user.c | 36 +++++++++++++++----
.../selftests/user_events/ftrace_test.c | 6 ++++
2 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index b1ecd7677642..e90161294698 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -1753,6 +1753,8 @@ static int user_event_parse(struct user_event_group *group, char *name,
int ret;
u32 key;
struct user_event *user;
+ int argc = 0;
+ char **argv;
/* Prevent dyn_event from racing */
mutex_lock(&event_mutex);
@@ -1760,13 +1762,35 @@ static int user_event_parse(struct user_event_group *group, char *name,
mutex_unlock(&event_mutex);
if (user) {
- *newuser = user;
- /*
- * Name is allocated by caller, free it since it already exists.
- * Caller only worries about failure cases for freeing.
- */
- kfree(name);
+ if (args) {
+ argv = argv_split(GFP_KERNEL, args, &argc);
+ if (!argv) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = user_fields_match(user, argc, (const char **)argv);
+ argv_free(argv);
+
+ } else
+ ret = list_empty(&user->fields);
+
+ if (ret) {
+ *newuser = user;
+ /*
+ * Name is allocated by caller, free it since it already exists.
+ * Caller only worries about failure cases for freeing.
+ */
+ kfree(name);
+ } else {
+ ret = -EADDRINUSE;
+ goto error;
+ }
+
return 0;
+error:
+ refcount_dec(&user->refcnt);
+ return ret;
}
user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c
index 7c99cef94a65..6e8c4b47281c 100644
--- a/tools/testing/selftests/user_events/ftrace_test.c
+++ b/tools/testing/selftests/user_events/ftrace_test.c
@@ -228,6 +228,12 @@ TEST_F(user, register_events) {
ASSERT_EQ(0, ioctl(self->data_fd, DIAG_IOCSREG, ®));
ASSERT_EQ(0, reg.write_index);
+ /* Multiple registers to same name but different args should fail */
+ reg.enable_bit = 29;
+ reg.name_args = (__u64)"__test_event u32 field1;";
+ ASSERT_EQ(-1, ioctl(self->data_fd, DIAG_IOCSREG, ®));
+ ASSERT_EQ(EADDRINUSE, errno);
+
/* Ensure disabled */
self->enable_fd = open(enable_file, O_RDWR);
ASSERT_NE(-1, self->enable_fd);
--
2.25.1
Hallo, es tut mir so leid, Ihre Privatsphäre zu verletzen. Es heißt:
„Ein Bild sagt mehr als tausend Worte, aber als ich Ihres sah, war es
mehr, als Worte erklären könnten.“ Das charmante Profil ist
unwiderstehlich, obwohl es eine kleine persönliche Nachricht ist, aber
Ihr Aussehen verrät viel über eine nette Person ... Also musste ich
der charmanten Person mit diesem tollen Profil eine Nachricht
hinterlassen. Ich glaube, es ist die Neugier, die mich in einer
solchen Zeit zu Ihnen führt. Ich muss noch einmal sagen, dass es mir
leid tut, wenn das Schreiben an Sie Ihrer moralischen Ethik
widerspricht. Ich möchte dich einfach besser kennenlernen und ein
Freund sein oder mehr. Ich hoffe, irgendwann von Ihnen zu hören.