From: Abhishek Mainkar abmainkar@nvidia.com
[ Upstream commit 3a21ffdbc825e0919db9da0e27ee5ff2cc8a863e ]
ACPICA commit 90310989a0790032f5a0140741ff09b545af4bc5
According to the ACPI specification 19.6.134, no argument is required to be passed for ASL Timer instruction. For taking care of no argument, AML_NO_OPERAND_RESOLVE flag is added to ASL Timer instruction opcode.
When ASL timer instruction interpreted by ACPI interpreter, getting error. After adding AML_NO_OPERAND_RESOLVE flag to ASL Timer instruction opcode, issue is not observed.
============================================================= UBSAN: array-index-out-of-bounds in acpica/dswexec.c:401:12 index -1 is out of range for type 'union acpi_operand_object *[9]' CPU: 37 PID: 1678 Comm: cat Not tainted 6.0.0-dev-th500-6.0.y-1+bcf8c46459e407-generic-64k HW name: NVIDIA BIOS v1.1.1-d7acbfc-dirty 12/19/2022 Call trace: dump_backtrace+0xe0/0x130 show_stack+0x20/0x60 dump_stack_lvl+0x68/0x84 dump_stack+0x18/0x34 ubsan_epilogue+0x10/0x50 __ubsan_handle_out_of_bounds+0x80/0x90 acpi_ds_exec_end_op+0x1bc/0x6d8 acpi_ps_parse_loop+0x57c/0x618 acpi_ps_parse_aml+0x1e0/0x4b4 acpi_ps_execute_method+0x24c/0x2b8 acpi_ns_evaluate+0x3a8/0x4bc acpi_evaluate_object+0x15c/0x37c acpi_evaluate_integer+0x54/0x15c show_power+0x8c/0x12c [acpi_power_meter]
Link: https://github.com/acpica/acpica/commit/90310989 Signed-off-by: Abhishek Mainkar abmainkar@nvidia.com Signed-off-by: Bob Moore robert.moore@intel.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/acpi/acpica/psopcode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 09029fe545f14..39e31030e5f49 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R), + AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* ACPI 5.0 opcodes */
From: Wander Lairson Costa wander@redhat.com
[ Upstream commit d243b34459cea30cfe5f3a9b2feb44e7daff9938 ]
Under PREEMPT_RT, __put_task_struct() indirectly acquires sleeping locks. Therefore, it can't be called from an non-preemptible context.
One practical example is splat inside inactive_task_timer(), which is called in a interrupt context:
CPU: 1 PID: 2848 Comm: life Kdump: loaded Tainted: G W --------- Hardware name: HP ProLiant DL388p Gen8, BIOS P70 07/15/2012 Call Trace: dump_stack_lvl+0x57/0x7d mark_lock_irq.cold+0x33/0xba mark_lock+0x1e7/0x400 mark_usage+0x11d/0x140 __lock_acquire+0x30d/0x930 lock_acquire.part.0+0x9c/0x210 rt_spin_lock+0x27/0xe0 refill_obj_stock+0x3d/0x3a0 kmem_cache_free+0x357/0x560 inactive_task_timer+0x1ad/0x340 __run_hrtimer+0x8a/0x1a0 __hrtimer_run_queues+0x91/0x130 hrtimer_interrupt+0x10f/0x220 __sysvec_apic_timer_interrupt+0x7b/0xd0 sysvec_apic_timer_interrupt+0x4f/0xd0 asm_sysvec_apic_timer_interrupt+0x12/0x20 RIP: 0033:0x7fff196bf6f5
Instead of calling __put_task_struct() directly, we defer it using call_rcu(). A more natural approach would use a workqueue, but since in PREEMPT_RT, we can't allocate dynamic memory from atomic context, the code would become more complex because we would need to put the work_struct instance in the task_struct and initialize it when we allocate a new task_struct.
The issue is reproducible with stress-ng:
while true; do stress-ng --sched deadline --sched-period 1000000000 \ --sched-runtime 800000000 --sched-deadline \ 1000000000 --mmapfork 23 -t 20 done
Reported-by: Hu Chunyu chuhu@redhat.com Suggested-by: Oleg Nesterov oleg@redhat.com Suggested-by: Valentin Schneider vschneid@redhat.com Suggested-by: Peter Zijlstra peterz@infradead.org Signed-off-by: Wander Lairson Costa wander@redhat.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lore.kernel.org/r/20230614122323.37957-2-wander@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/sched/task.h | 28 +++++++++++++++++++++++++++- kernel/fork.c | 8 ++++++++ 2 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index dd35ce28bb908..6b687c155fb6c 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -118,10 +118,36 @@ static inline struct task_struct *get_task_struct(struct task_struct *t) }
extern void __put_task_struct(struct task_struct *t); +extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t) { - if (refcount_dec_and_test(&t->usage)) + if (!refcount_dec_and_test(&t->usage)) + return; + + /* + * under PREEMPT_RT, we can't call put_task_struct + * in atomic context because it will indirectly + * acquire sleeping locks. + * + * call_rcu() will schedule delayed_put_task_struct_rcu() + * to be called in process context. + * + * __put_task_struct() is called when + * refcount_dec_and_test(&t->usage) succeeds. + * + * This means that it can't "conflict" with + * put_task_struct_rcu_user() which abuses ->rcu the same + * way; rcu_users has a reference so task->usage can't be + * zero after rcu_users 1 -> 0 transition. + * + * delayed_free_task() also uses ->rcu, but it is only called + * when it fails to fork a process. Therefore, there is no + * way it can conflict with put_task_struct(). + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) + call_rcu(&t->rcu, __put_task_struct_rcu_cb); + else __put_task_struct(t); }
diff --git a/kernel/fork.c b/kernel/fork.c index d2e12b6d2b180..f81149739eb9f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -985,6 +985,14 @@ void __put_task_struct(struct task_struct *tsk) } EXPORT_SYMBOL_GPL(__put_task_struct);
+void __put_task_struct_rcu_cb(struct rcu_head *rhp) +{ + struct task_struct *task = container_of(rhp, struct task_struct, rcu); + + __put_task_struct(task); +} +EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); + void __init __weak arch_task_cache_init(void) { }
/*
From: Zqiang qiang.zhang1211@gmail.com
[ Upstream commit e60c122a1614b4f65b29a7bef9d83b9fd30e937a ]
The rcuscale.holdoff module parameter can be used to delay the start of rcu_scale_writer() kthread. However, the hung-task timeout will trigger when the timeout specified by rcuscale.holdoff is greater than hung_task_timeout_secs:
runqemu kvm nographic slirp qemuparams="-smp 4 -m 2048M" bootparams="rcuscale.shutdown=0 rcuscale.holdoff=300"
[ 247.071753] INFO: task rcu_scale_write:59 blocked for more than 122 seconds. [ 247.072529] Not tainted 6.4.0-rc1-00134-gb9ed6de8d4ff #7 [ 247.073400] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 247.074331] task:rcu_scale_write state:D stack:30144 pid:59 ppid:2 flags:0x00004000 [ 247.075346] Call Trace: [ 247.075660] <TASK> [ 247.075965] __schedule+0x635/0x1280 [ 247.076448] ? __pfx___schedule+0x10/0x10 [ 247.076967] ? schedule_timeout+0x2dc/0x4d0 [ 247.077471] ? __pfx_lock_release+0x10/0x10 [ 247.078018] ? enqueue_timer+0xe2/0x220 [ 247.078522] schedule+0x84/0x120 [ 247.078957] schedule_timeout+0x2e1/0x4d0 [ 247.079447] ? __pfx_schedule_timeout+0x10/0x10 [ 247.080032] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.080591] ? __pfx_process_timeout+0x10/0x10 [ 247.081163] ? __pfx_sched_set_fifo_low+0x10/0x10 [ 247.081760] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.082287] rcu_scale_writer+0x6b1/0x7f0 [ 247.082773] ? mark_held_locks+0x29/0xa0 [ 247.083252] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.083865] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.084412] kthread+0x179/0x1c0 [ 247.084759] ? __pfx_kthread+0x10/0x10 [ 247.085098] ret_from_fork+0x2c/0x50 [ 247.085433] </TASK>
This commit therefore replaces schedule_timeout_uninterruptible() with schedule_timeout_idle().
Signed-off-by: Zqiang qiang.zhang1211@gmail.com Signed-off-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/rcu/rcuscale.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index d1221731c7cfd..35aab6cbba583 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -424,7 +424,7 @@ rcu_scale_writer(void *arg) sched_set_fifo_low(current);
if (holdoff) - schedule_timeout_uninterruptible(holdoff * HZ); + schedule_timeout_idle(holdoff * HZ);
/* * Wait until rcu_end_inkernel_boot() is called for normal GP tests
From: "Paul E. McKenney" paulmck@kernel.org
[ Upstream commit 013608cd0812bdb21fc26d39ed8fdd2fc76e8b9b ]
Kernels built with CONFIG_KASAN=y quarantine newly freed memory in order to better detect use-after-free errors. However, this can exhaust memory more quickly in allocator-heavy tests, which can result in spurious scftorture failure. This commit therefore forgives memory-allocation failure in kernels built with CONFIG_KASAN=y, but continues counting the errors for use in detailed test-result analyses.
Signed-off-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/scftorture.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 5d113aa59e773..83c33ba0ca7e0 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -171,7 +171,8 @@ static void scf_torture_stats_print(void) scfs.n_all_wait += scf_stats_p[i].n_all_wait; } if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || - atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs)) + atomic_read(&n_mb_out_errs) || + (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs))) bangstr = "!!! "; pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ", SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched, @@ -323,7 +324,8 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra preempt_disable(); if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); - if (WARN_ON_ONCE(!scfcp)) { + if (!scfcp) { + WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN)); atomic_inc(&n_alloc_errs); } else { scfcp->scfc_cpu = -1;
From: "Jiri Slaby (SUSE)" jirislaby@kernel.org
[ Upstream commit 96b709be183c56293933ef45b8b75f8af268c6de ]
The Lenovo Ideapad Z470 predates Windows 8, so it defaults to using acpi_video for backlight control. But this is not functional on this model.
Add a DMI quirk to use the native backlight interface which works.
Link: https://bugzilla.suse.com/show_bug.cgi?id=1208724 Signed-off-by: Jiri Slaby (SUSE) jirislaby@kernel.org Reviewed-by: Hans de Goede hdegoede@redhat.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/acpi/video_detect.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 18cc08c858cf2..0c376edd64fe1 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -445,6 +445,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "Lenovo IdeaPad S405"), }, }, + { + /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */ + .callback = video_detect_force_native, + /* Lenovo Ideapad Z470 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ .callback = video_detect_force_native,
From: Rob Barnes robbarnes@google.com
[ Upstream commit f2d4dced9a584612b25adb559c1350243d2bb544 ]
Remove the 1 second timeout applied to hw_protection_shutdown after an EC panic. On some platforms this 1 second timeout is insufficient to allow the filesystem to fully sync. Independently the EC will force a full system reset after a short period. So this backup timeout is unnecessary.
Signed-off-by: Rob Barnes robbarnes@google.com Reviewed-by: Guenter Roeck groeck@chromium.org Link: https://lore.kernel.org/r/20230802175847.1.Ie9fc53b6a1f4c6661c5376286a50e0cf... Signed-off-by: Tzung-Bi Shih tzungbi@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/platform/chrome/cros_ec_lpc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 500a61b093e47..356572452898d 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -327,8 +327,8 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!"); blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev); kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env); - /* Begin orderly shutdown. Force shutdown after 1 second. */ - hw_protection_shutdown("CrOS EC Panic", 1000); + /* Begin orderly shutdown. EC will force reset after a short period. */ + hw_protection_shutdown("CrOS EC Panic", -1); /* Do not query for other events after a panic is reported */ return; }
From: Avadhut Naik Avadhut.Naik@amd.com
[ Upstream commit c64016609b6f66b753b5f37929a191477fa584c0 ]
Add new PCI Device IDs required to support AMD's new Family 1Ah-based models 00h-1Fh, 20h and 40h-4Fh.
[ bp: Zap a useless sentence. ]
Co-developed-by: Mario Limonciello mario.limonciello@amd.com Signed-off-by: Mario Limonciello mario.limonciello@amd.com Signed-off-by: Avadhut Naik Avadhut.Naik@amd.com Signed-off-by: Borislav Petkov (AMD) bp@alien8.de Link: https://lore.kernel.org/r/20230809035244.2722455-2-avadhut.naik@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kernel/amd_nb.c | 8 ++++++++ include/linux/pci_ids.h | 2 ++ 2 files changed, 10 insertions(+)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 035a3db5330b0..356de955e78dd 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -24,6 +24,8 @@ #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 +#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 @@ -39,6 +41,7 @@ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc +#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4
/* Protect the PCI config register pairs used for SMN. */ @@ -56,6 +59,8 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, {} }; @@ -85,6 +90,8 @@ static const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, {} }; @@ -106,6 +113,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, {} }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2dc75df1437fb..8f9a459e16718 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -576,6 +576,8 @@ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb +#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 +#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000
From: Yicong Yang yangyicong@hisilicon.com
[ Upstream commit 0242737dc4eb9f6e9a5ea594b3f93efa0b12f28d ]
Some HiSilicon SMMU PMCG suffers the erratum 162001900 that the PMU disable control sometimes fail to disable the counters. This will lead to error or inaccurate data since before we enable the counters the counter's still counting for the event used in last perf session.
This patch tries to fix this by hardening the global disable process. Before disable the PMU, writing an invalid event type (0xffff) to focibly stop the counters. Correspondingly restore each events on pmu::pmu_enable().
Signed-off-by: Yicong Yang yangyicong@hisilicon.com Link: https://lore.kernel.org/r/20230814124012.58013-1-yangyicong@huawei.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- Documentation/arch/arm64/silicon-errata.rst | 3 ++ drivers/acpi/arm64/iort.c | 5 ++- drivers/perf/arm_smmuv3_pmu.c | 46 ++++++++++++++++++++- include/linux/acpi_iort.h | 1 + 4 files changed, 53 insertions(+), 2 deletions(-)
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index bedd3a1d7b423..0ac452333eb4f 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -198,6 +198,9 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A | +| | Hip09 SMMU PMCG | | | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 56d887323ae52..6496ff5a6ba20 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1708,7 +1708,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, static struct acpi_platform_list pmcg_plat_info[] __initdata = { /* HiSilicon Hip08 Platform */ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, - "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, + "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08}, + /* HiSilicon Hip09 Platform */ + {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal, + "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09}, { } };
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 25a269d431e45..0e17c57ddb876 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -115,6 +115,7 @@ #define SMMU_PMCG_PA_SHIFT 12
#define SMMU_PMCG_EVCNTR_RDONLY BIT(0) +#define SMMU_PMCG_HARDEN_DISABLE BIT(1)
static int cpuhp_state_num;
@@ -159,6 +160,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu) writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); }
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, + struct perf_event *event, int idx); + +static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx); + + smmu_pmu_enable(pmu); +} + static inline void smmu_pmu_disable(struct pmu *pmu) { struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); @@ -167,6 +182,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu) writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); }
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + /* + * The global disable of PMU sometimes fail to stop the counting. + * Harden this by writing an invalid event type to each used counter + * to forcibly stop counting. + */ + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); + + smmu_pmu_disable(pmu); +} + static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, u32 idx, u64 value) { @@ -765,7 +796,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) switch (model) { case IORT_SMMU_V3_PMCG_HISI_HIP08: /* HiSilicon Erratum 162001800 */ - smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY; + smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE; + break; + case IORT_SMMU_V3_PMCG_HISI_HIP09: + smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE; break; }
@@ -890,6 +924,16 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (!dev->of_node) smmu_pmu_get_acpi_options(smmu_pmu);
+ /* + * For platforms suffer this quirk, the PMU disable sometimes fails to + * stop the counters. This will leads to inaccurate or error counting. + * Forcibly disable the counters with these quirk handler. + */ + if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) { + smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09; + smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09; + } + /* Pick one CPU to be the preferred one to use */ smmu_pmu->on_cpu = raw_smp_processor_id(); WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index ee7cb6aaff718..1cb65592c95dd 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -21,6 +21,7 @@ */ #define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ #define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node);
From: Alexander Gordeev agordeev@linux.ibm.com
[ Upstream commit 8ddccc8a7d06f7ea4d8579970c95609d1b1de77b ]
The separate vmalloc area size check against _REGION2_SIZE is needed in case user provided insanely large value using vmalloc= kernel command line parameter. That could lead to overflow and selecting 3 page table levels instead of 4.
Use size_add() for the overflow check and get rid of the extra vmalloc area check.
With the current values of CONFIG_MAX_PHYSMEM_BITS and PAGES_PER_SECTION the sum of maximal possible size of identity mapping and vmemmap area (derived from these macros) plus modules area size MODULES_LEN can not overflow. Thus, that sum is used as first addend while vmalloc area size is second addend for size_add().
Suggested-by: Heiko Carstens hca@linux.ibm.com Acked-by: Heiko Carstens hca@linux.ibm.com Signed-off-by: Alexander Gordeev agordeev@linux.ibm.com Signed-off-by: Heiko Carstens hca@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/s390/boot/startup.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 64bd7ac3e35d1..f8d0550e5d2af 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -176,6 +176,7 @@ static unsigned long setup_kernel_memory_layout(void) unsigned long asce_limit; unsigned long rte_size; unsigned long pages; + unsigned long vsize; unsigned long vmax;
pages = ident_map_size / PAGE_SIZE; @@ -183,11 +184,9 @@ static unsigned long setup_kernel_memory_layout(void) vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */ - vmemmap_start = round_up(ident_map_size, _REGION3_SIZE); - if (IS_ENABLED(CONFIG_KASAN) || - vmalloc_size > _REGION2_SIZE || - vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > - _REGION2_SIZE) { + vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size + MODULES_LEN; + vsize = size_add(vsize, vmalloc_size); + if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) { asce_limit = _REGION1_SIZE; rte_size = _REGION2_SIZE; } else {
From: Ding Xiang dingxiang@cmss.chinamobile.com
[ Upstream commit 46862da15e37efedb7d2d21e167f506c0b533772 ]
If memcmp() does not return 0, "zeros" need to be freed to prevent memleak
Signed-off-by: Ding Xiang dingxiang@cmss.chinamobile.com Link: https://lore.kernel.org/r/20230815074915.245528-1-dingxiang@cmss.chinamobile... Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/arm64/signal/testcases/zt_regs.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c index e1eb4d5c027ab..2e384d731618b 100644 --- a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c @@ -65,6 +65,7 @@ int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc) if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET, ZT_SIG_REGS_SIZE(zt->nregs)) != 0) { fprintf(stderr, "ZT data invalid\n"); + free(zeros); return 1; }
From: Xu Yang xu.yang_2@nxp.com
[ Upstream commit e89ecd8368860bf05437eabd07d292c316221cfc ]
For i.MX8MP, we cannot ensure that cycle counter overflow occurs at least 4 times as often as other events. Due to byte counters will count for any event configured, it will overflow more often. And if byte counters overflow that related counters would stop since they share the COUNTER_CNTL. We can speed up cycle counter overflow frequency by setting counter parameter (CP) field of cycle counter. In this way, we can avoid stop counting byte counters when interrupt didn't come and the byte counters can be fetched or updated from each cycle counter overflow interrupt.
Because we initialize CP filed to shorten counter0 overflow time, the cycle counter will start couting from a fixed/base value each time. We need to remove the base from the result too. Therefore, we could get precise result from cycle counter.
Signed-off-by: Xu Yang xu.yang_2@nxp.com Reviewed-by: Frank Li Frank.Li@nxp.com Link: https://lore.kernel.org/r/20230811015438.1999307-1-xu.yang_2@nxp.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/perf/fsl_imx8_ddr_perf.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+)
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 5222ba1e79d0e..b022755e6ab91 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -28,6 +28,8 @@ #define CNTL_CLEAR_MASK 0xFFFFFFFD #define CNTL_OVER_MASK 0xFFFFFFFE
+#define CNTL_CP_SHIFT 16 +#define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT) #define CNTL_CSV_SHIFT 24 #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
@@ -35,6 +37,8 @@ #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4
+/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */ +#define CYCLES_COUNTER_MASK 0x0FFFFFFF #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) @@ -427,6 +431,17 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; val |= FIELD_PREP(CNTL_CSV_MASK, config); + + /* + * On i.MX8MP we need to bias the cycle counter to overflow more often. + * We do this by initializing bits [23:16] of the counter value via the + * COUNTER_CTRL Counter Parameter (CP) field. + */ + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { + if (counter == EVENT_CYCLES_COUNTER) + val |= FIELD_PREP(CNTL_CP_MASK, 0xf0); + } + writel(val, pmu->base + reg); } else { /* Disable counter */ @@ -466,6 +481,12 @@ static void ddr_perf_event_update(struct perf_event *event) int ret;
new_raw_count = ddr_perf_read_counter(pmu, counter); + /* Remove the bias applied in ddr_perf_counter_enable(). */ + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) { + if (counter == EVENT_CYCLES_COUNTER) + new_raw_count &= CYCLES_COUNTER_MASK; + } + local64_add(new_raw_count, &event->count);
/*
From: Hans de Goede hdegoede@redhat.com
[ Upstream commit 8cf04bb321f036dd2e523e993897e0789bd5265c ]
Linux defaults to picking the non-working ACPI video backlight interface on the Apple iMac12,1 and iMac12,2.
Add a DMI quirk to pick the working native radeon_bl0 interface instead.
Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1838 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2753 Signed-off-by: Hans de Goede hdegoede@redhat.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/acpi/video_detect.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 0c376edd64fe1..442396f6ed1f9 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -495,6 +495,24 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"), }, }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */ + .callback = video_detect_force_native, + /* Apple iMac12,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */ + .callback = video_detect_force_native, + /* Apple iMac12,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native,
From: Tomislav Novak tnovak@meta.com
[ Upstream commit d11a69873d9a7435fe6a48531e165ab80a8b1221 ]
Arm platforms use is_default_overflow_handler() to determine if the hw_breakpoint code should single-step over the breakpoint trigger or let the custom handler deal with it.
Since bpf_overflow_handler() currently isn't recognized as a default handler, attaching a BPF program to a PERF_TYPE_BREAKPOINT event causes it to keep firing (the instruction triggering the data abort exception is never skipped). For example:
# bpftrace -e 'watchpoint:0x10000:4:w { print("hit") }' -c ./test Attaching 1 probe... hit hit [...] ^C
(./test performs a single 4-byte store to 0x10000)
This patch replaces the check with uses_default_overflow_handler(), which accounts for the bpf_overflow_handler() case by also testing if one of the perf_event_output functions gets invoked indirectly, via orig_default_handler.
Signed-off-by: Tomislav Novak tnovak@meta.com Tested-by: Samuel Gosselin sgosselin@google.com # arm64 Reviewed-by: Catalin Marinas catalin.marinas@arm.com Acked-by: Alexei Starovoitov ast@kernel.org Link: https://lore.kernel.org/linux-arm-kernel/20220923203644.2731604-1-tnovak@fb.... Link: https://lore.kernel.org/r/20230605191923.1219974-1-tnovak@meta.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/kernel/hw_breakpoint.c | 8 ++++---- arch/arm64/kernel/hw_breakpoint.c | 4 ++-- include/linux/perf_event.h | 22 +++++++++++++++++++--- 3 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 054e9199f30db..dc0fb7a813715 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, hw->address &= ~alignment_mask; hw->ctrl.len <<= offset;
- if (is_default_overflow_handler(bp)) { + if (uses_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. @@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. */ - if (!is_default_overflow_handler(wp)) + if (!uses_default_overflow_handler(wp)) continue; step: enable_single_step(wp, instruction_pointer(regs)); @@ -811,7 +811,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, info->trigger = addr; pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); perf_bp_event(wp, regs); - if (is_default_overflow_handler(wp)) + if (uses_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); }
@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index db2a1861bb978..35225632d70ad 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr, perf_bp_event(bp, regs);
/* Do we need to handle the stepping? */ - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) step = 1; unlock: rcu_read_unlock(); @@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, static int watchpoint_report(struct perf_event *wp, unsigned long addr, struct pt_regs *regs) { - int step = is_default_overflow_handler(wp); + int step = uses_default_overflow_handler(wp); struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2166a69e3bf2e..e657916c9509c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1316,15 +1316,31 @@ extern int perf_event_output(struct perf_event *event, struct pt_regs *regs);
static inline bool -is_default_overflow_handler(struct perf_event *event) +__is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { - if (likely(event->overflow_handler == perf_event_output_forward)) + if (likely(overflow_handler == perf_event_output_forward)) return true; - if (unlikely(event->overflow_handler == perf_event_output_backward)) + if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; }
+#define is_default_overflow_handler(event) \ + __is_default_overflow_handler((event)->overflow_handler) + +#ifdef CONFIG_BPF_SYSCALL +static inline bool uses_default_overflow_handler(struct perf_event *event) +{ + if (likely(is_default_overflow_handler(event))) + return true; + + return __is_default_overflow_handler(event->orig_overflow_handler); +} +#else +#define uses_default_overflow_handler(event) \ + is_default_overflow_handler(event) +#endif + extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data,
From: Mario Limonciello mario.limonciello@amd.com
[ Upstream commit 883cf0d4cf288313b71146ddebdf5d647b76c78b ]
If a badly constructed firmware includes multiple `ACPI_TYPE_PACKAGE` objects while evaluating the AMD LPS0 _DSM, there will be a memory leak. Explicitly guard against this.
Suggested-by: Bjorn Helgaas helgaas@kernel.org Signed-off-by: Mario Limonciello mario.limonciello@amd.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/acpi/x86/s2idle.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index ce62e61a9605e..9a179bb5823a0 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -113,6 +113,12 @@ static void lpi_device_get_constraints_amd(void) union acpi_object *package = &out_obj->package.elements[i];
if (package->type == ACPI_TYPE_PACKAGE) { + if (lpi_constraints_table) { + acpi_handle_err(lps0_device_handle, + "Duplicate constraints list\n"); + goto free_acpi_buffer; + } + lpi_constraints_table = kcalloc(package->package.count, sizeof(*lpi_constraints_table), GFP_KERNEL);
From: Zhangjin Wu falcon@tinylab.org
[ Upstream commit c388c9920da2679f62bec48d00ca9e80e9d0a364 ]
kernel parameters allow pass two types of strings, one type is like 'noapic', another type is like 'panic=5', the first type is passed as arguments of the init program, the second type is passed as environment variables of the init program.
when users pass kernel parameters like this:
noapic NOLIBC_TEST=syscall
our nolibc-test program will use the test setting from argv[1] and ignore the one from NOLIBC_TEST environment variable, and at last, it will print the following line and ignore the whole test setting.
Ignoring unknown test name 'noapic'
reversing the parsing order does solve the above issue:
test = getenv("NOLIBC_TEST"); if (test) test = argv[1];
but it still doesn't work with such kernel parameters (without NOLIBC_TEST environment variable):
noapic FOO=bar
To support all of the potential kernel parameters, let's verify the test setting from both of argv[1] and NOLIBC_TEST environment variable.
Reviewed-by: Thomas Weißschuh linux@weissschuh.net Signed-off-by: Zhangjin Wu falcon@tinylab.org Signed-off-by: Willy Tarreau w@1wt.eu Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/nolibc/nolibc-test.c | 33 ++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c index 486334981e601..55628a25df0a3 100644 --- a/tools/testing/selftests/nolibc/nolibc-test.c +++ b/tools/testing/selftests/nolibc/nolibc-test.c @@ -939,6 +939,35 @@ static const struct test test_names[] = { { 0 } };
+int is_setting_valid(char *test) +{ + int idx, len, test_len, valid = 0; + char delimiter; + + if (!test) + return valid; + + test_len = strlen(test); + + for (idx = 0; test_names[idx].name; idx++) { + len = strlen(test_names[idx].name); + if (test_len < len) + continue; + + if (strncmp(test, test_names[idx].name, len) != 0) + continue; + + delimiter = test[len]; + if (delimiter != ':' && delimiter != ',' && delimiter != '\0') + continue; + + valid = 1; + break; + } + + return valid; +} + int main(int argc, char **argv, char **envp) { int min = 0; @@ -964,10 +993,10 @@ int main(int argc, char **argv, char **envp) * syscall:5-15[:.*],stdlib:8-10 */ test = argv[1]; - if (!test) + if (!is_setting_valid(test)) test = getenv("NOLIBC_TEST");
- if (test) { + if (is_setting_valid(test)) { char *comma, *colon, *dash, *value;
do {
From: Thomas Weißschuh linux@weissschuh.net
[ Upstream commit 9c5e490093e83e165022e0311bd7df5aa06cc860 ]
If read() fails and returns -1 (or returns garbage for some other reason) buf would be accessed out of bounds. Only use the return value of read() after it has been validated.
Signed-off-by: Thomas Weißschuh linux@weissschuh.net Signed-off-by: Willy Tarreau w@1wt.eu Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/nolibc/nolibc-test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c index 55628a25df0a3..8e7750e2eb97c 100644 --- a/tools/testing/selftests/nolibc/nolibc-test.c +++ b/tools/testing/selftests/nolibc/nolibc-test.c @@ -769,7 +769,6 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char lseek(fd, 0, SEEK_SET);
r = read(fd, buf, sizeof(buf) - 1); - buf[r] = '\0';
fclose(memfile);
@@ -779,6 +778,7 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char return 1; }
+ buf[r] = '\0'; llen += printf(" "%s" = "%s"", expected, buf); ret = strncmp(expected, buf, c);
linux-stable-mirror@lists.linaro.org