The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From a7d48886cacf8b426e0079bca9639d2657cf2d38 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:03 +0100
Subject: [PATCH] um: defer killing userspace on page table update failures
In some cases we can get to fix_range_common() with mmap_sem held,
and in others we get there without it being held. For example, we
get there with it held from sys_mprotect(), and without it held
from fork_handler().
Avoid any issues in this and simply defer killing the task until
it runs the next time. Do it on the mm so that another task that
shares the same mm can't continue running afterwards.
Cc: stable(a)vger.kernel.org
Fixes: 468f65976a8d ("um: Fix hung task in fix_range_common()")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 4337b4ced095..e82e203f5f41 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -12,6 +12,7 @@ struct mm_id {
int pid;
} u;
unsigned long stack;
+ int kill;
};
#endif
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 89468da6bf88..5be1b0da9f3b 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -352,12 +352,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
+ struct mm_id *mm_idp = ¤t->mm->context.id;
+
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_lock, release it such that current can terminate */
- mmap_write_unlock(current->mm);
- force_sig(SIGKILL);
- do_signal(¤t->thread.regs);
+ mm_idp->kill = 1;
}
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index ed4bbffe8d7a..d910e25c273e 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -300,6 +300,7 @@ static int userspace_tramp(void *stack)
}
int userspace_pid[NR_CPUS];
+int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -393,6 +394,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
interrupt_end();
while (1) {
+ if (kill_userspace_mm[0])
+ fatal_sigsegv();
/*
* This can legitimately fail if the process loads a
@@ -714,4 +717,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
+ kill_userspace_mm[0] = mm_idp->kill;
}
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From a7d48886cacf8b426e0079bca9639d2657cf2d38 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:03 +0100
Subject: [PATCH] um: defer killing userspace on page table update failures
In some cases we can get to fix_range_common() with mmap_sem held,
and in others we get there without it being held. For example, we
get there with it held from sys_mprotect(), and without it held
from fork_handler().
Avoid any issues in this and simply defer killing the task until
it runs the next time. Do it on the mm so that another task that
shares the same mm can't continue running afterwards.
Cc: stable(a)vger.kernel.org
Fixes: 468f65976a8d ("um: Fix hung task in fix_range_common()")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 4337b4ced095..e82e203f5f41 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -12,6 +12,7 @@ struct mm_id {
int pid;
} u;
unsigned long stack;
+ int kill;
};
#endif
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 89468da6bf88..5be1b0da9f3b 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -352,12 +352,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
+ struct mm_id *mm_idp = ¤t->mm->context.id;
+
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_lock, release it such that current can terminate */
- mmap_write_unlock(current->mm);
- force_sig(SIGKILL);
- do_signal(¤t->thread.regs);
+ mm_idp->kill = 1;
}
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index ed4bbffe8d7a..d910e25c273e 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -300,6 +300,7 @@ static int userspace_tramp(void *stack)
}
int userspace_pid[NR_CPUS];
+int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -393,6 +394,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
interrupt_end();
while (1) {
+ if (kill_userspace_mm[0])
+ fatal_sigsegv();
/*
* This can legitimately fail if the process loads a
@@ -714,4 +717,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
+ kill_userspace_mm[0] = mm_idp->kill;
}
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From a7d48886cacf8b426e0079bca9639d2657cf2d38 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:03 +0100
Subject: [PATCH] um: defer killing userspace on page table update failures
In some cases we can get to fix_range_common() with mmap_sem held,
and in others we get there without it being held. For example, we
get there with it held from sys_mprotect(), and without it held
from fork_handler().
Avoid any issues in this and simply defer killing the task until
it runs the next time. Do it on the mm so that another task that
shares the same mm can't continue running afterwards.
Cc: stable(a)vger.kernel.org
Fixes: 468f65976a8d ("um: Fix hung task in fix_range_common()")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index 4337b4ced095..e82e203f5f41 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -12,6 +12,7 @@ struct mm_id {
int pid;
} u;
unsigned long stack;
+ int kill;
};
#endif
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 89468da6bf88..5be1b0da9f3b 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -352,12 +352,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
/* This is not an else because ret is modified above */
if (ret) {
+ struct mm_id *mm_idp = ¤t->mm->context.id;
+
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_lock, release it such that current can terminate */
- mmap_write_unlock(current->mm);
- force_sig(SIGKILL);
- do_signal(¤t->thread.regs);
+ mm_idp->kill = 1;
}
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index ed4bbffe8d7a..d910e25c273e 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -300,6 +300,7 @@ static int userspace_tramp(void *stack)
}
int userspace_pid[NR_CPUS];
+int kill_userspace_mm[NR_CPUS];
/**
* start_userspace() - prepare a new userspace process
@@ -393,6 +394,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
interrupt_end();
while (1) {
+ if (kill_userspace_mm[0])
+ fatal_sigsegv();
/*
* This can legitimately fail if the process loads a
@@ -714,4 +717,5 @@ void reboot_skas(void)
void __switch_mm(struct mm_id *mm_idp)
{
userspace_pid[0] = mm_idp->u.pid;
+ kill_userspace_mm[0] = mm_idp->kill;
}
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 47da29763ec9a153b9b685bff9db659e4e09e494 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:02 +0100
Subject: [PATCH] um: mm: check more comprehensively for stub changes
If userspace tries to change the stub, we need to kill it,
because otherwise it can escape the virtual machine. In a
few cases the stub checks weren't good, e.g. if userspace
just tries to
mmap(0x100000 - 0x1000, 0x3000, ...)
it could succeed to get a new private/anonymous mapping
replacing the stubs. Fix this by checking everywhere, and
checking for _overlap_, not just direct changes.
Cc: stable(a)vger.kernel.org
Fixes: 3963333fe676 ("uml: cover stubs with a VMA")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 61776790cd67..89468da6bf88 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
+ if (virt + len > STUB_START && virt < STUB_END)
+ return -EINVAL;
+
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if ((addr >= STUB_START) && (addr < STUB_END))
+ if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
+ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@@ -472,6 +478,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
+
+ if (address >= STUB_START && address < STUB_END)
+ goto kill;
+
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 47da29763ec9a153b9b685bff9db659e4e09e494 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:02 +0100
Subject: [PATCH] um: mm: check more comprehensively for stub changes
If userspace tries to change the stub, we need to kill it,
because otherwise it can escape the virtual machine. In a
few cases the stub checks weren't good, e.g. if userspace
just tries to
mmap(0x100000 - 0x1000, 0x3000, ...)
it could succeed to get a new private/anonymous mapping
replacing the stubs. Fix this by checking everywhere, and
checking for _overlap_, not just direct changes.
Cc: stable(a)vger.kernel.org
Fixes: 3963333fe676 ("uml: cover stubs with a VMA")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 61776790cd67..89468da6bf88 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
+ if (virt + len > STUB_START && virt < STUB_END)
+ return -EINVAL;
+
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if ((addr >= STUB_START) && (addr < STUB_END))
+ if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
+ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@@ -472,6 +478,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
+
+ if (address >= STUB_START && address < STUB_END)
+ goto kill;
+
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 47da29763ec9a153b9b685bff9db659e4e09e494 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:02 +0100
Subject: [PATCH] um: mm: check more comprehensively for stub changes
If userspace tries to change the stub, we need to kill it,
because otherwise it can escape the virtual machine. In a
few cases the stub checks weren't good, e.g. if userspace
just tries to
mmap(0x100000 - 0x1000, 0x3000, ...)
it could succeed to get a new private/anonymous mapping
replacing the stubs. Fix this by checking everywhere, and
checking for _overlap_, not just direct changes.
Cc: stable(a)vger.kernel.org
Fixes: 3963333fe676 ("uml: cover stubs with a VMA")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 61776790cd67..89468da6bf88 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
+ if (virt + len > STUB_START && virt < STUB_END)
+ return -EINVAL;
+
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if ((addr >= STUB_START) && (addr < STUB_END))
+ if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
+ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@@ -472,6 +478,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
+
+ if (address >= STUB_START && address < STUB_END)
+ goto kill;
+
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 47da29763ec9a153b9b685bff9db659e4e09e494 Mon Sep 17 00:00:00 2001
From: Johannes Berg <johannes.berg(a)intel.com>
Date: Wed, 13 Jan 2021 22:08:02 +0100
Subject: [PATCH] um: mm: check more comprehensively for stub changes
If userspace tries to change the stub, we need to kill it,
because otherwise it can escape the virtual machine. In a
few cases the stub checks weren't good, e.g. if userspace
just tries to
mmap(0x100000 - 0x1000, 0x3000, ...)
it could succeed to get a new private/anonymous mapping
replacing the stubs. Fix this by checking everywhere, and
checking for _overlap_, not just direct changes.
Cc: stable(a)vger.kernel.org
Fixes: 3963333fe676 ("uml: cover stubs with a VMA")
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 61776790cd67..89468da6bf88 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
+ if (virt + len > STUB_START && virt < STUB_END)
+ return -EINVAL;
+
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
- if ((addr >= STUB_START) && (addr < STUB_END))
+ if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
+ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@@ -472,6 +478,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
+
+ if (address >= STUB_START && address < STUB_END)
+ goto kill;
+
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;
The patch below does not apply to the 5.10-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 26521412ae22d06caab98721757b2721c6d7c46c Mon Sep 17 00:00:00 2001
From: Sven Schnelle <svens(a)linux.ibm.com>
Date: Wed, 3 Feb 2021 09:16:45 +0100
Subject: [PATCH] s390: fix kernel asce loading when sie is interrupted
If a machine check is coming in during sie, the PU saves the
control registers to the machine check save area. Afterwards
mcck_int_handler is called, which loads __LC_KERNEL_ASCE into
%cr1. Later the code restores %cr1 from the machine check area,
but that is wrong when SIE was interrupted because the machine
check area still contains the gmap asce. Instead it should return
with either __KERNEL_ASCE in %cr1 when interrupted in SIE or
the previous %cr1 content saved in the machine check save area.
Fixes: 87d598634521 ("s390/mm: remove set_fs / rework address space handling")
Signed-off-by: Sven Schnelle <svens(a)linux.ibm.com>
Cc: <stable(a)kernel.org> # v5.8+
Reviewed-by: Heiko Carstens <hca(a)linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor(a)linux.ibm.com>
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f7953bb17558..377294969954 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -558,6 +558,7 @@ ENTRY(mcck_int_handler)
lg %r15,__LC_MCCK_STACK
.Lmcck_skip:
la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stctg %c1,%c1,__PT_CR1(%r11)
lctlg %c1,%c1,__LC_KERNEL_ASCE
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lghi %r14,__LC_GPREGS_SAVE_AREA+64
@@ -573,8 +574,6 @@ ENTRY(mcck_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
- la %r14,4095
- mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
The patch below does not apply to the 5.11-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 26521412ae22d06caab98721757b2721c6d7c46c Mon Sep 17 00:00:00 2001
From: Sven Schnelle <svens(a)linux.ibm.com>
Date: Wed, 3 Feb 2021 09:16:45 +0100
Subject: [PATCH] s390: fix kernel asce loading when sie is interrupted
If a machine check is coming in during sie, the PU saves the
control registers to the machine check save area. Afterwards
mcck_int_handler is called, which loads __LC_KERNEL_ASCE into
%cr1. Later the code restores %cr1 from the machine check area,
but that is wrong when SIE was interrupted because the machine
check area still contains the gmap asce. Instead it should return
with either __KERNEL_ASCE in %cr1 when interrupted in SIE or
the previous %cr1 content saved in the machine check save area.
Fixes: 87d598634521 ("s390/mm: remove set_fs / rework address space handling")
Signed-off-by: Sven Schnelle <svens(a)linux.ibm.com>
Cc: <stable(a)kernel.org> # v5.8+
Reviewed-by: Heiko Carstens <hca(a)linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor(a)linux.ibm.com>
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f7953bb17558..377294969954 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -558,6 +558,7 @@ ENTRY(mcck_int_handler)
lg %r15,__LC_MCCK_STACK
.Lmcck_skip:
la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stctg %c1,%c1,__PT_CR1(%r11)
lctlg %c1,%c1,__LC_KERNEL_ASCE
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lghi %r14,__LC_GPREGS_SAVE_AREA+64
@@ -573,8 +574,6 @@ ENTRY(mcck_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
- la %r14,4095
- mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs