On some platforms there are some platform devices created with
invalid names. For example: "HID-SENSOR-INT-020b?.39.auto" instead
of "HID-SENSOR-INT-020b.39.auto"
This string include some invalid characters, hence it will fail to
properly load the driver which will handle this custom sensor. Also
it is a problem for some user space tools, which parses the device
names from ftrace and dmesg.
This is because the string, real_usage, is not NULL terminated and
printed with %s to form device name.
To address this, initialize the real_usage string with 0s.
Reported-and-tested-by: Todd Brandt <todd.e.brandt(a)linux.intel.com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217169
Fixes: 98c062e82451 ("HID: hid-sensor-custom: Allow more custom iio sensors")
Cc: stable(a)vger.kernel.org
Suggested-by: Philipp Jungkamp <p.jungkamp(a)gmx.net>
Signed-off-by: Philipp Jungkamp <p.jungkamp(a)gmx.net>
Signed-off-by: Todd Brandt <todd.e.brandt(a)intel.com>
Reviewed-by: Andi Shyti <andi.shyti(a)kernel.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
Changes in v4:
- add the Fixes line
- add patch version change list
Changes in v3:
- update the changelog
- add proper reviewed/signed/suggested links
Changes in v2:
- update the changelog
drivers/hid/hid-sensor-custom.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 3e3f89e01d81..d85398721659 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -940,7 +940,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
const struct hid_sensor_custom_match *match)
{
- char real_usage[HID_SENSOR_USAGE_LENGTH];
+ char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
struct platform_device *custom_pdev;
const char *dev_name;
char *c;
--
2.17.1
Read mmu_invalidate_seq before dropping the mmap_lock so that KVM can
detect if the results of vma_lookup() (e.g. vma_shift) become stale
before it acquires kvm->mmu_lock. This fixes a theoretical bug where a
VMA could be changed by userspace after vma_lookup() and before KVM
reads the mmu_invalidate_seq, causing KVM to install page table entries
based on a (possibly) no-longer-valid vma_shift.
Re-order the MMU cache top-up to earlier in user_mem_abort() so that it
is not done after KVM has read mmu_invalidate_seq (i.e. so as to avoid
inducing spurious fault retries).
It's unlikely that any sane userspace currently modifies VMAs in such a
way as to trigger this race. And even with directed testing I was unable
to reproduce it. But a sufficiently motivated host userspace might be
able to exploit this race.
Note KVM/ARM had the same bug and was fixed in a separate, near
identical patch (see Link).
Link: https://lore.kernel.org/kvm/20230313235454.2964067-1-dmatlack@google.com/
Fixes: 9955371cc014 ("RISC-V: KVM: Implement MMU notifiers")
Cc: stable(a)vger.kernel.org
Signed-off-by: David Matlack <dmatlack(a)google.com>
---
Note: Compile-tested only.
arch/riscv/kvm/mmu.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 78211aed36fa..46d692995830 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -628,6 +628,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
+ /* We need minimum second+third level pages */
+ ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+ if (ret) {
+ kvm_err("Failed to topup G-stage cache\n");
+ return ret;
+ }
+
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, hva);
@@ -648,6 +655,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
+ /*
+ * Read mmu_invalidate_seq so that KVM can detect if the results of
+ * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
+ * kvm->mmu_lock.
+ *
+ * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
+ * with the smp_wmb() in kvm_mmu_invalidate_end().
+ */
+ mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
if (vma_pagesize != PUD_SIZE &&
@@ -657,15 +673,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return -EFAULT;
}
- /* We need minimum second+third level pages */
- ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
- if (ret) {
- kvm_err("Failed to topup G-stage cache\n");
- return ret;
- }
-
- mmu_seq = kvm->mmu_invalidate_seq;
-
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
base-commit: eeac8ede17557680855031c6f305ece2378af326
--
2.40.0.rc2.332.ga46443480c-goog
The crypto_unregister_alg() function expects callers to ensure that any
algorithm that is unregistered has a refcnt of exactly 1, and issues a
BUG_ON() if this is not the case. However, there are in fact drivers that
will call crypto_unregister_alg() without ensuring that the refcnt has been
lowered first, most notably on system shutdown. This causes the BUG_ON() to
trigger, which prevents a clean shutdown and hangs the system.
To avoid such hangs on shutdown, demote the BUG_ON() in
crypto_unregister_alg() to a WARN_ON() with early return. Cc stable because
this problem was observed on a 6.2 kernel, cf the link below.
Link: https://lore.kernel.org/r/87r0tyq8ph.fsf@toke.dk
Cc: stable(a)vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen <toke(a)redhat.com>
---
v2:
- Return early if the WARN_ON() triggers
crypto/algapi.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d08f864f08be..9de0677b3643 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -493,7 +493,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
+ return;
+
if (alg->cra_destroy)
alg->cra_destroy(alg);
--
2.39.2
Hi, Thorsten here, the Linux kernel's regression tracker.
I noticed a regression report in bugzilla.kernel.org. As many (most?)
kernel developers don't keep an eye on it, I decided to forward it by
mail (note, the reporter *is not* CCed to this mail, see[1]).
Note, it's a stable regression, so it's a bit unclear who's responsible.
I decided to forward it nevertheless, as some of you might want to be
aware of this or might even have an idea what's wrong.
Quoting from https://bugzilla.kernel.org/show_bug.cgi?id=217225 :
> runrin 2023-03-21 17:49:34 UTC
>
> I occasionally manage my fan speed manually by changing the `level' in
> `/proc/acpi/ibm/fan'. As of upgrading to 6.1.20, I am now getting the
> following error when attempting to change the fan speed:
>
> $ echo 'level auto' > /proc/acpi/ibm/fan
> echo: write error: invalid argument
>
> While troubleshooting, I tried double checking that fan_control had been
> set to 1 as noted in the documentation.
>
> I recently upgraded my kernel a few versions at once, so unfortunately i
> can't be sure when this bug originated. It was working with 6.1.15, and
> since upgrading to 6.1.20 it is no longer working.
See the ticket for more details. The reporter was already asked to
bisect. I'll ask to consider testing latest mainline.
[TLDR for the rest of this mail: I'm adding this report to the list of
tracked Linux kernel regressions; the text you find below is based on a
few templates paragraphs you might have encountered already in similar
form.]
BTW, let me use this mail to also add the report to the list of tracked
regressions to ensure it's doesn't fall through the cracks:
#regzbot introduced: v6.1.15..v6.1.20
https://bugzilla.kernel.org/show_bug.cgi?id=217225
#regzbot title: platform: thinkpad: can no longer alter /proc/acpi/ibm/fan
#regzbot ignore-activity
This isn't a regression? This issue or a fix for it are already
discussed somewhere else? It was fixed already? You want to clarify when
the regression started to happen? Or point out I got the title or
something else totally wrong? Then just reply and tell me -- ideally
while also telling regzbot about it, as explained by the page listed in
the footer of this mail.
Developers: When fixing the issue, remember to add 'Link:' tags pointing
to the report (e.g. the buzgzilla ticket and maybe this mail as well, if
this thread sees some discussion). See page linked in footer for details.
Ciao, Thorsten (wearing his 'the Linux kernel's regression tracker' hat)
--
Everything you wanna know about Linux kernel regression tracking:
https://linux-regtracking.leemhuis.info/about/#tldr
If I did something stupid, please tell me, as explained on that page.
[1] because bugzilla.kernel.org tells users upon registration their
"email address will never be displayed to logged out users"
From: Martin Leung <Martin.Leung(a)amd.com>
[Why & How]
when trying to fix a nullptr dereference on VMs,
accidentally doubly allocated memory for the non VM
case. removed the extra link_srv creation since
dc_construct_ctx is called in both VM and non VM cases
Also added a proper fail check for if kzalloc fails
Cc: stable(a)vger.kernel.org
Cc: Mario Limonciello <mario.limonciello(a)amd.com>
Reviewed-by: Leo Ma <Hanghong.Ma(a)amd.com>
Acked-by: Qingqing Zhuo <qingqing.zhuo(a)amd.com>
Signed-off-by: Martin Leung <Martin.Leung(a)amd.com>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 40f2e174c524..52564b93f7eb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -887,7 +887,10 @@ static bool dc_construct_ctx(struct dc *dc,
}
dc->ctx = dc_ctx;
+
dc->link_srv = link_create_link_service();
+ if (!dc->link_srv)
+ return false;
return true;
}
@@ -986,8 +989,6 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
- dc->link_srv = link_create_link_service();
-
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
--
2.34.1
From: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
[Why]
While scanning the top_pipe connections we can run into a case where
the bottom pipe is still connected to a top_pipe but with a NULL
plane_state.
[How]
Treat a NULL plane_state the same as the plane being invisible for
pipe cursor disable logic.
Cc: stable(a)vger.kernel.org
Cc: Mario Limonciello <mario.limonciello(a)amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu(a)amd.com>
Acked-by: Qingqing Zhuo <qingqing.zhuo(a)amd.com>
Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
---
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 7f9cceb49f4e..46ca88741cb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3385,7 +3385,9 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
// Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
+ if (!test_pipe->plane_state ||
+ !test_pipe->plane_state->visible ||
+ test_pipe->plane_state->layer_index == cur_layer)
continue;
r2 = test_pipe->plane_res.scl_data.recout;
--
2.34.1
It looks like HDMI audio stopped working in 5.17-rc1. I ran a bisect
which points to 636110411ca726f19ef8e87b0be51bb9a4cdef06. I built
5.17.14 with it reverted and it restored HDMI output, but it doesn't
revert cleanly from 5.18 onward.
From what I can tell it looks like -ENOTSUPP is returned from
snd_soc_dai_set_stream for hdmi1 and hdmi2 now. I'm not sure if that's
expected, but I made the following change and I have working HDMI
audio now. https://gist.github.com/jmontleon/4780154c309f956d97ca9a304a00da3f
Thank you,
Jason Montleon
Make num_cache_leaves a per-CPU variable. Otherwise, populate_cache_
leaves() fails on systems with asymmetric number of subleaves in CPUID
leaf 0x4. Intel Meteor Lake is an example of such a system.
Cc: Srinivas Pandruvada <srinivas.pandruvada(a)linux.intel.com>
Cc: Len Brown <len.brown(a)intel.com>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki(a)intel.com>
Cc: Zhang Rui <rui.zhang(a)intel.com>
Cc: Chen Yu <yu.c.chen(a)intel.com>
Cc: stable(a)vger.kernel.org
Reviewed-by: Len Brown <len.brown(a)intel.com>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon(a)linux.intel.com>
---
After this change, all CPUs will traverse CPUID leaf 0x4 when booted for
the first time. On systems with asymmetric cache topologies this is
useless work.
Creating a list of processor models that have asymmetric cache topologies
was considered. The burden of maintaining such list would outweigh the
performance benefit of skipping this extra step.
---
arch/x86/kernel/cpu/cacheinfo.c | 48 ++++++++++++++++++++-------------
1 file changed, 29 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 4063e8991211..6ad51657c853 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -176,7 +176,18 @@ struct _cpuid4_info_regs {
struct amd_northbridge *nb;
};
-static unsigned short num_cache_leaves;
+static DEFINE_PER_CPU(unsigned short, num_cache_leaves);
+
+static inline unsigned short get_num_cache_leaves(unsigned int cpu)
+{
+ return per_cpu(num_cache_leaves, cpu);
+}
+
+static inline void
+set_num_cache_leaves(unsigned short nr_leaves, unsigned int cpu)
+{
+ per_cpu(num_cache_leaves, cpu) = nr_leaves;
+}
/* AMD doesn't have CPUID4. Emulate it here to report the same
information to the user. This makes some assumptions about the machine:
@@ -716,19 +727,21 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
+ unsigned int cpu = c->cpu_index;
+
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
- num_cache_leaves = find_num_cache_leaves(c);
+ set_num_cache_leaves(find_num_cache_leaves(c), cpu);
} else if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000)
- num_cache_leaves = 4;
+ set_num_cache_leaves(4, cpu);
else
- num_cache_leaves = 3;
+ set_num_cache_leaves(3, cpu);
}
}
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
{
- num_cache_leaves = find_num_cache_leaves(c);
+ set_num_cache_leaves(find_num_cache_leaves(c), c->cpu_index);
}
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
@@ -738,24 +751,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_SMP
unsigned int cpu = c->cpu_index;
-#endif
if (c->cpuid_level > 3) {
- static int is_initialized;
-
- if (is_initialized == 0) {
- /* Init num_cache_leaves from boot CPU */
- num_cache_leaves = find_num_cache_leaves(c);
- is_initialized++;
- }
+ /*
+ * There should be at least one leaf. A non-zero value means
+ * that the number of leaves has been initialized.
+ */
+ if (!get_num_cache_leaves(cpu))
+ set_num_cache_leaves(find_num_cache_leaves(c), cpu);
/*
* Whenever possible use cpuid(4), deterministic cache
* parameters cpuid leaf to find the cache details
*/
- for (i = 0; i < num_cache_leaves; i++) {
+ for (i = 0; i < get_num_cache_leaves(cpu); i++) {
struct _cpuid4_info_regs this_leaf = {};
int retval;
@@ -791,14 +801,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
* trace cache
*/
- if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
+ if ((!get_num_cache_leaves(cpu) || c->x86 == 15) && c->cpuid_level > 1) {
/* supports eax=2 call */
int j, n;
unsigned int regs[4];
unsigned char *dp = (unsigned char *)regs;
int only_trace = 0;
- if (num_cache_leaves != 0 && c->x86 == 15)
+ if (get_num_cache_leaves(cpu) && c->x86 == 15)
only_trace = 1;
/* Number of times to iterate */
@@ -1000,12 +1010,12 @@ int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- if (!num_cache_leaves)
+ if (!get_num_cache_leaves(cpu))
return -ENOENT;
if (!this_cpu_ci)
return -EINVAL;
this_cpu_ci->num_levels = 3;
- this_cpu_ci->num_leaves = num_cache_leaves;
+ this_cpu_ci->num_leaves = get_num_cache_leaves(cpu);
return 0;
}
--
2.25.1