The patch titled
Subject: writeback: safer lock nesting
has been removed from the -mm tree. Its filename was
writeback-safer-lock-nesting.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Greg Thelen <gthelen(a)google.com>
Subject: writeback: safer lock nesting
lock_page_memcg()/unlock_page_memcg() use spin_lock_irqsave/restore() if
the page's memcg is undergoing move accounting, which occurs when a
process leaves its memcg for a new one that has
memory.move_charge_at_immigrate set.
unlocked_inode_to_wb_begin,end() use spin_lock_irq/spin_unlock_irq() if
the given inode is switching writeback domains. Switches occur when
enough writes are issued from a new domain.
This existing pattern is thus suspicious:
lock_page_memcg(page);
unlocked_inode_to_wb_begin(inode, &locked);
...
unlocked_inode_to_wb_end(inode, locked);
unlock_page_memcg(page);
If both inode switch and process memcg migration are both in-flight then
unlocked_inode_to_wb_end() will unconditionally enable interrupts while
still holding the lock_page_memcg() irq spinlock. This suggests the
possibility of deadlock if an interrupt occurs before unlock_page_memcg().
truncate
__cancel_dirty_page
lock_page_memcg
unlocked_inode_to_wb_begin
unlocked_inode_to_wb_end
<interrupts mistakenly enabled>
<interrupt>
end_page_writeback
test_clear_page_writeback
lock_page_memcg
<deadlock>
unlock_page_memcg
Due to configuration limitations this deadlock is not currently possible
because we don't mix cgroup writeback (a cgroupv2 feature) and
memory.move_charge_at_immigrate (a cgroupv1 feature).
If the kernel is hacked to always claim inode switching and memcg
moving_account, then this script triggers lockup in less than a minute:
cd /mnt/cgroup/memory
mkdir a b
echo 1 > a/memory.move_charge_at_immigrate
echo 1 > b/memory.move_charge_at_immigrate
(
echo $BASHPID > a/cgroup.procs
while true; do
dd if=/dev/zero of=/mnt/big bs=1M count=256
done
) &
while true; do
sync
done &
sleep 1h &
SLEEP=$!
while true; do
echo $SLEEP > a/cgroup.procs
echo $SLEEP > b/cgroup.procs
done
The deadlock does not seem possible, so it's debatable if there's any
reason to modify the kernel. I suggest we should to prevent future
surprises. And Wang Long said "this deadlock occurs three times in our
environment", so there's more reason to apply this, even to stable.
Stable 4.4 has minor conflicts applying this patch. For a clean 4.4 patch
see "[PATCH for-4.4] writeback: safer lock nesting"
https://lkml.org/lkml/2018/4/11/146
Wang Long said "this deadlock occurs three times in our environment"
[gthelen(a)google.com: v4]
Link: http://lkml.kernel.org/r/20180411084653.254724-1-gthelen@google.com
[akpm(a)linux-foundation.org: comment tweaks, struct initialization simplification]
Change-Id: Ibb773e8045852978f6207074491d262f1b3fb613
Link: http://lkml.kernel.org/r/20180410005908.167976-1-gthelen@google.com
Fixes: 682aa8e1a6a1 ("writeback: implement unlocked_inode_to_wb transaction and use it for stat updates")
Signed-off-by: Greg Thelen <gthelen(a)google.com>
Reported-by: Wang Long <wanglong19(a)meituan.com>
Acked-by: Wang Long <wanglong19(a)meituan.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Reviewed-by: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Tejun Heo <tj(a)kernel.org>
Cc: Nicholas Piggin <npiggin(a)gmail.com>
Cc: <stable(a)vger.kernel.org> [v4.2+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
fs/fs-writeback.c | 7 +++---
include/linux/backing-dev-defs.h | 5 ++++
include/linux/backing-dev.h | 30 +++++++++++++++--------------
mm/page-writeback.c | 18 ++++++++---------
4 files changed, 34 insertions(+), 26 deletions(-)
diff -puN fs/fs-writeback.c~writeback-safer-lock-nesting fs/fs-writeback.c
--- a/fs/fs-writeback.c~writeback-safer-lock-nesting
+++ a/fs/fs-writeback.c
@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode,
*/
if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
- bool locked, congested;
+ struct wb_lock_cookie lock_cookie = {};
+ bool congested;
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
congested = wb_congested(wb, cong_bits);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &lock_cookie);
return congested;
}
diff -puN include/linux/backing-dev-defs.h~writeback-safer-lock-nesting include/linux/backing-dev-defs.h
--- a/include/linux/backing-dev-defs.h~writeback-safer-lock-nesting
+++ a/include/linux/backing-dev-defs.h
@@ -223,6 +223,11 @@ static inline void set_bdi_congested(str
set_wb_congested(bdi->wb.congested, sync);
}
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
+};
+
#ifdef CONFIG_CGROUP_WRITEBACK
/**
diff -puN include/linux/backing-dev.h~writeback-safer-lock-nesting include/linux/backing-dev.h
--- a/include/linux/backing-dev.h~writeback-safer-lock-nesting
+++ a/include/linux/backing-dev.h
@@ -347,7 +347,7 @@ static inline struct bdi_writeback *inod
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
* holding inode->i_lock, the i_pages lock or wb->list_lock. This
@@ -355,12 +355,12 @@ static inline struct bdi_writeback *inod
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
@@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode
* Paired with store_release in inode_switch_wb_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
- if (unlikely(*lockedp))
- xa_lock_irq(&inode->i_mapping->i_pages);
+ if (unlikely(cookie->locked))
+ xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/*
* Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
@@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- xa_unlock_irq(&inode->i_mapping->i_pages);
+ if (unlikely(cookie->locked))
+ xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
rcu_read_unlock();
}
@@ -435,12 +436,13 @@ static inline struct bdi_writeback *inod
}
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
}
diff -puN mm/page-writeback.c~writeback-safer-lock-nesting mm/page-writeback.c
--- a/mm/page-writeback.c~writeback-safer-lock-nesting
+++ a/mm/page-writeback.c
@@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *p
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied--;
dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
}
}
EXPORT_SYMBOL(account_page_redirty);
@@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *pa
if (mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
lock_page_memcg(page);
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page))
account_page_cleaned(page, mapping, wb);
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
unlock_page_memcg(page);
} else {
ClearPageDirty(page);
@@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
- bool locked;
+ struct wb_lock_cookie cookie = {};
/*
* Yes, Virginia, this is indeed insane.
@@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page
* always locked coming in here, so we get the desired
* exclusion.
*/
- wb = unlocked_inode_to_wb_begin(inode, &locked);
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) {
dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
}
- unlocked_inode_to_wb_end(inode, locked);
+ unlocked_inode_to_wb_end(inode, &cookie);
return ret;
}
return TestClearPageDirty(page);
_
Patches currently in -mm which might be from gthelen(a)google.com are
From: Joerg Roedel <jroedel(a)suse.de>
This reverts commit 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89.
This commit is broken for x86, as it unmaps the PTE and PMD
pages and immediatly frees them without doing a TLB flush.
Further this lacks synchronization with other page-tables in
the system when the PMD pages are not shared between
mm_structs.
On x86-32 with PAE and PTI patches on-top this patch
triggers the BUG_ON in vmalloc_sync_one() because the kernel
and the process page-table were not synchronized.
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
---
arch/x86/mm/pgtable.c | 28 ++--------------------------
1 file changed, 2 insertions(+), 26 deletions(-)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index ae98d4c5e32a..fd02a537a80f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -787,22 +787,7 @@ int pmd_clear_huge(pmd_t *pmd)
*/
int pud_free_pmd_page(pud_t *pud)
{
- pmd_t *pmd;
- int i;
-
- if (pud_none(*pud))
- return 1;
-
- pmd = (pmd_t *)pud_page_vaddr(*pud);
-
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i]))
- return 0;
-
- pud_clear(pud);
- free_page((unsigned long)pmd);
-
- return 1;
+ return pud_none(*pud);
}
/**
@@ -814,15 +799,6 @@ int pud_free_pmd_page(pud_t *pud)
*/
int pmd_free_pte_page(pmd_t *pmd)
{
- pte_t *pte;
-
- if (pmd_none(*pmd))
- return 1;
-
- pte = (pte_t *)pmd_page_vaddr(*pmd);
- pmd_clear(pmd);
- free_page((unsigned long)pte);
-
- return 1;
+ return pmd_none(*pmd);
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
--
2.13.6
From: Dave Hansen <dave.hansen(a)linux.intel.com>
I got a bug report that the following code (roughly) was
causing a SIGSEGV:
mprotect(ptr, size, PROT_EXEC);
mprotect(ptr, size, PROT_NONE);
mprotect(ptr, size, PROT_READ);
*ptr = 100;
The problem is hit when the mprotect(PROT_EXEC)
is implicitly assigned a protection key to the VMA, and made
that key ACCESS_DENY|WRITE_DENY. The PROT_NONE mprotect()
failed to remove the protection key, and the PROT_NONE->
PROT_READ left the PTE usable, but the pkey still in place
and left the memory inaccessible.
To fix this, we ensure that we always "override" the pkee
at mprotect() if the VMA does not have execute-only
permissions, but the VMA has the execute-only pkey.
We had a check for PROT_READ/WRITE, but it did not work
for PROT_NONE. This entirely removes the PROT_* checks,
which ensures that PROT_NONE now works.
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Fixes: 62b5f7d013f ("mm/core, x86/mm/pkeys: Add execute-only protection keys support")
Reported-by: Shakeel Butt <shakeelb(a)google.com>
Cc: stable(a)vger.kernel.org
Cc: Ram Pai <linuxram(a)us.ibm.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Michael Ellermen <mpe(a)ellerman.id.au>
Cc: Ingo Molnar <mingo(a)kernel.org>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Shuah Khan <shuah(a)kernel.org>
---
b/arch/x86/include/asm/pkeys.h | 12 +++++++++++-
b/arch/x86/mm/pkeys.c | 21 +++++++++++----------
2 files changed, 22 insertions(+), 11 deletions(-)
diff -puN arch/x86/include/asm/pkeys.h~pkeys-abandon-exec-only-pkey-more-aggressively arch/x86/include/asm/pkeys.h
--- a/arch/x86/include/asm/pkeys.h~pkeys-abandon-exec-only-pkey-more-aggressively 2018-04-26 10:42:18.971487371 -0700
+++ b/arch/x86/include/asm/pkeys.h 2018-04-26 10:42:18.977487371 -0700
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_PKEYS_H
#define _ASM_X86_PKEYS_H
+#define ARCH_DEFAULT_PKEY 0
+
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
@@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm
static inline int execute_only_pkey(struct mm_struct *mm)
{
if (!boot_cpu_has(X86_FEATURE_OSPKE))
- return 0;
+ return ARCH_DEFAULT_PKEY;
return __execute_only_pkey(mm);
}
@@ -56,6 +58,14 @@ bool mm_pkey_is_allocated(struct mm_stru
return false;
if (pkey >= arch_max_pkey())
return false;
+ /*
+ * The exec-only pkey is set in the allocation map, but
+ * is not available to any of the user interfaces like
+ * mprotect_pkey().
+ */
+ if (pkey == mm->context.execute_only_pkey)
+ return false;
+
return mm_pkey_allocation_map(mm) & (1U << pkey);
}
diff -puN arch/x86/mm/pkeys.c~pkeys-abandon-exec-only-pkey-more-aggressively arch/x86/mm/pkeys.c
--- a/arch/x86/mm/pkeys.c~pkeys-abandon-exec-only-pkey-more-aggressively 2018-04-26 10:42:18.973487371 -0700
+++ b/arch/x86/mm/pkeys.c 2018-04-26 10:47:34.806486584 -0700
@@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct
*/
if (pkey != -1)
return pkey;
- /*
- * Look for a protection-key-drive execute-only mapping
- * which is now being given permissions that are not
- * execute-only. Move it back to the default pkey.
- */
- if (vma_is_pkey_exec_only(vma) &&
- (prot & (PROT_READ|PROT_WRITE))) {
- return 0;
- }
+
/*
* The mapping is execute-only. Go try to get the
* execute-only protection key. If we fail to do that,
* fall through as if we do not have execute-only
- * support.
+ * support in this mm.
*/
if (prot == PROT_EXEC) {
pkey = execute_only_pkey(vma->vm_mm);
if (pkey > 0)
return pkey;
+ } else if (vma_is_pkey_exec_only(vma)) {
+ /*
+ * Protections are *not* PROT_EXEC, but the mapping
+ * is using the exec-only pkey. This mapping was
+ * PROT_EXEC and will no longer be. Move back to
+ * the default pkey.
+ */
+ return ARCH_DEFAULT_PKEY;
}
+
/*
* This is a vanilla, non-pkey mprotect (or we failed to
* setup execute-only), inherit the pkey from the VMA we
_
From: Dave Hansen <dave.hansen(a)linux.intel.com>
mm_pkey_is_allocated() treats pkey 0 as unallocated. That is
inconsistent with the manpages, and also inconsistent with
mm->context.pkey_allocation_map. Stop special casing it and only
disallow values that are actually bad (< 0).
The end-user visible effect of this is that you can now use
mprotect_pkey() to set pkey=0.
This is a bit nicer than what Ram proposed because it is simpler
and removes special-casing for pkey 0. On the other hand, it does
allow applciations to pkey_free() pkey-0, but that's just a silly
thing to do, so we are not going to protect against it.
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Fixes: 58ab9a088dda ("x86/pkeys: Check against max pkey to avoid overflows")
Cc: stable(a)kernel.org
Cc: Ram Pai <linuxram(a)us.ibm.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Michael Ellermen <mpe(a)ellerman.id.au>
Cc: Ingo Molnar <mingo(a)kernel.org>
Cc: Andrew Morton <akpm(a)linux-foundation.org>p
Cc: Shuah Khan <shuah(a)kernel.org>
---
b/arch/x86/include/asm/mmu_context.h | 2 +-
b/arch/x86/include/asm/pkeys.h | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff -puN arch/x86/include/asm/mmu_context.h~x86-pkey-0-default-allocated arch/x86/include/asm/mmu_context.h
--- a/arch/x86/include/asm/mmu_context.h~x86-pkey-0-default-allocated 2018-03-26 10:22:33.742170197 -0700
+++ b/arch/x86/include/asm/mmu_context.h 2018-03-26 10:22:33.747170197 -0700
@@ -192,7 +192,7 @@ static inline int init_new_context(struc
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
- /* pkey 0 is the default and always allocated */
+ /* pkey 0 is the default and allocated implicitly */
mm->context.pkey_allocation_map = 0x1;
/* -1 means unallocated or invalid */
mm->context.execute_only_pkey = -1;
diff -puN arch/x86/include/asm/pkeys.h~x86-pkey-0-default-allocated arch/x86/include/asm/pkeys.h
--- a/arch/x86/include/asm/pkeys.h~x86-pkey-0-default-allocated 2018-03-26 10:22:33.744170197 -0700
+++ b/arch/x86/include/asm/pkeys.h 2018-03-26 10:22:33.747170197 -0700
@@ -49,10 +49,10 @@ bool mm_pkey_is_allocated(struct mm_stru
{
/*
* "Allocated" pkeys are those that have been returned
- * from pkey_alloc(). pkey 0 is special, and never
- * returned from pkey_alloc().
+ * from pkey_alloc() or pkey 0 which is allocated
+ * implicitly when the mm is created.
*/
- if (pkey <= 0)
+ if (pkey < 0)
return false;
if (pkey >= arch_max_pkey())
return false;
_
Greetings,
this series is the backport of 18 upstream patches to add the
current s390 spectre mitigation to kernel version 4.14. One
less patch than 4.14 and 4.9 as there is no nested KVM guest
support (aka vsie) in 4.4.
It follows the x86 approach with array_index_nospec for the v1
spectre attack and retpoline/expoline for v2. As a fallback
there is the ppa-12/ppa-13 based defense which requires an
micro-code update.
Christian Borntraeger (2):
KVM: s390: wire up bpb feature
s390/entry.S: fix spurious zeroing of r0
Eugeniu Rosca (1):
s390: Replace IS_ENABLED(EXPOLINE_*) with
IS_ENABLED(CONFIG_EXPOLINE_*)
Heiko Carstens (1):
s390: enable CPU alternatives unconditionally
Martin Schwidefsky (13):
s390: scrub registers on kernel entry and KVM exit
s390: add optimized array_index_mask_nospec
s390/alternative: use a copy of the facility bit mask
s390: add options to change branch prediction behaviour for the kernel
s390: run user space and KVM guests with modified branch prediction
s390: introduce execute-trampolines for branches
s390: do not bypass BPENTER for interrupt system calls
s390: move nobp parameter functions to nospec-branch.c
s390: add automatic detection of the spectre defense
s390: report spectre mitigation via syslog
s390: add sysfs attributes for spectre
s390: correct nospec auto detection init order
s390: correct module section names for expoline code revert
Vasily Gorbik (1):
s390: introduce CPU alternatives
Documentation/kernel-parameters.txt | 3 +
arch/s390/Kconfig | 47 +++++++
arch/s390/Makefile | 10 ++
arch/s390/include/asm/alternative.h | 149 ++++++++++++++++++++
arch/s390/include/asm/barrier.h | 24 ++++
arch/s390/include/asm/facility.h | 18 +++
arch/s390/include/asm/kvm_host.h | 3 +-
arch/s390/include/asm/lowcore.h | 7 +-
arch/s390/include/asm/nospec-branch.h | 17 +++
arch/s390/include/asm/processor.h | 4 +
arch/s390/include/asm/thread_info.h | 4 +
arch/s390/include/uapi/asm/kvm.h | 3 +
arch/s390/kernel/Makefile | 5 +-
arch/s390/kernel/alternative.c | 112 +++++++++++++++
arch/s390/kernel/early.c | 5 +
arch/s390/kernel/entry.S | 250 ++++++++++++++++++++++++++++++----
arch/s390/kernel/ipl.c | 1 +
arch/s390/kernel/module.c | 65 ++++++++-
arch/s390/kernel/nospec-branch.c | 169 +++++++++++++++++++++++
arch/s390/kernel/processor.c | 18 +++
arch/s390/kernel/setup.c | 14 +-
arch/s390/kernel/smp.c | 7 +-
arch/s390/kernel/vmlinux.lds.S | 37 +++++
arch/s390/kvm/kvm-s390.c | 13 +-
drivers/s390/char/Makefile | 2 +
include/uapi/linux/kvm.h | 1 +
26 files changed, 952 insertions(+), 36 deletions(-)
create mode 100644 arch/s390/include/asm/alternative.h
create mode 100644 arch/s390/include/asm/nospec-branch.h
create mode 100644 arch/s390/kernel/alternative.c
create mode 100644 arch/s390/kernel/nospec-branch.c
--
2.13.5
Update SECONDARY_EXEC_DESC in SECONDARY_VM_EXEC_CONTROL for UMIP
emulation if and only if CR4.UMIP is being modified and UMIP is
not supported by hardware, i.e. we're emulating UMIP. If CR4.UMIP
is not being changed then it's safe to assume that the previous
invocation of vmx_set_cr4() correctly set SECONDARY_EXEC_DESC,
i.e. the desired value is already the current value. This avoids
unnecessary VMREAD/VMWRITE to SECONDARY_VM_EXEC_CONTROL, which
is critical as not all processors support SECONDARY_VM_EXEC_CONTROL.
WARN once and signal a fault if CR4.UMIP is changing and UMIP can't
be emulated, i.e. SECONDARY_EXEC_DESC can't be set. Prior checks
should prevent setting UMIP if it can't be emulated, i.e. UMIP
shouldn't have been advertised to the guest if it can't be emulated,
regardless of whether or not UMIP is supported in bare metal.
Fixes: 0367f205a3b7 ("KVM: vmx: add support for emulating UMIP")
Cc: stable(a)vger.kernel.org #4.16
Reported-by: Paolo Zeppegno <pzeppegno(a)gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson(a)intel.com>
---
arch/x86/kvm/vmx.c | 34 ++++++++++++++++++++--------------
1 file changed, 20 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aafcc9881e88..1502a2ac7884 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
SECONDARY_EXEC_ENABLE_VMFUNC;
}
+static bool vmx_umip_emulated(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_DESC;
+}
+
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
@@ -4776,14 +4782,20 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
else
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
- if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
- vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_DESC);
- hw_cr4 &= ~X86_CR4_UMIP;
- } else if (!is_guest_mode(vcpu) ||
- !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_DESC);
+ if (((cr4 ^ kvm_read_cr4(vcpu)) & X86_CR4_UMIP) &&
+ !boot_cpu_has(X86_FEATURE_UMIP)) {
+ if (WARN_ON_ONCE(!vmx_umip_emulated()))
+ return 1;
+
+ if (cr4 & X86_CR4_UMIP) {
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+ hw_cr4 &= ~X86_CR4_UMIP;
+ } else if (!is_guest_mode(vcpu) ||
+ !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+ }
if (cr4 & X86_CR4_VMXE) {
/*
@@ -9512,12 +9524,6 @@ static bool vmx_xsaves_supported(void)
SECONDARY_EXEC_XSAVES;
}
-static bool vmx_umip_emulated(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_DESC;
-}
-
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
--
2.16.2