This is a note to let you know that I've just added the patch titled
ALSA: usb-audio: Add a quirck for B&W PX headphones
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
alsa-usb-audio-add-a-quirck-for-b-w-px-headphones.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 240a8af929c7c57dcde28682725b29cf8474e8e5 Mon Sep 17 00:00:00 2001
From: Erik Veijola <erik.veijola(a)gmail.com>
Date: Fri, 23 Feb 2018 14:06:52 +0200
Subject: ALSA: usb-audio: Add a quirck for B&W PX headphones
From: Erik Veijola <erik.veijola(a)gmail.com>
commit 240a8af929c7c57dcde28682725b29cf8474e8e5 upstream.
The capture interface doesn't work and the playback interface only
supports 48 kHz sampling rate even though it advertises more rates.
Signed-off-by: Erik Veijola <erik.veijola(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai(a)suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
sound/usb/quirks-table.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 47 insertions(+)
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge
}
},
+{
+ /*
+ * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+ * even though it advertises more. The capture interface doesn't work
+ * even on windows.
+ */
+ USB_DEVICE(0x19b5, 0x0021),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
Patches currently in stable-queue which might be from erik.veijola(a)gmail.com are
queue-4.4/alsa-usb-audio-add-a-quirck-for-b-w-px-headphones.patch
This is a note to let you know that I've just added the patch titled
ALSA: hda: Add a power_save blacklist
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
alsa-hda-add-a-power_save-blacklist.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 1ba8f9d308174e647b864c36209b4d7934d99888 Mon Sep 17 00:00:00 2001
From: Hans de Goede <hdegoede(a)redhat.com>
Date: Thu, 22 Feb 2018 14:20:35 +0100
Subject: ALSA: hda: Add a power_save blacklist
From: Hans de Goede <hdegoede(a)redhat.com>
commit 1ba8f9d308174e647b864c36209b4d7934d99888 upstream.
On some boards setting power_save to a non 0 value leads to clicking /
popping sounds when ever we enter/leave powersaving mode. Ideally we would
figure out how to avoid these sounds, but that is not always feasible.
This commit adds a blacklist for devices where powersaving is known to
cause problems and disables it on these devices.
Note I tried to put this blacklist in userspace first:
https://github.com/systemd/systemd/pull/8128
But the systemd maintainers rightfully pointed out that it would be
impossible to then later remove entries once we actually find a way to
make power-saving work on listed boards without issues. Having this list
in the kernel will allow removal of the blacklist entry in the same commit
which fixes the clicks / plops.
The blacklist only applies to the default power_save module-option value,
if a user explicitly sets the module-option then the blacklist is not
used.
[ added an ifdef CONFIG_PM for the build error -- tiwai]
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1525104
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=198611
Cc: stable(a)vger.kernel.org
Signed-off-by: Hans de Goede <hdegoede(a)redhat.com>
Signed-off-by: Takashi Iwai <tiwai(a)suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
sound/pci/hda/hda_intel.c | 38 ++++++++++++++++++++++++++++++++++++--
1 file changed, 36 insertions(+), 2 deletions(-)
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -179,7 +179,7 @@ static const struct kernel_param_ops par
};
#define param_check_xint param_check_int
-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+static int power_save = -1;
module_param(power_save, xint, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable).");
@@ -2055,6 +2055,24 @@ out_free:
return err;
}
+#ifdef CONFIG_PM
+/* On some boards setting power_save to a non 0 value leads to clicking /
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
+ * figure out how to avoid these sounds, but that is not always feasible.
+ * So we keep a list of devices where we disable powersaving as its known
+ * to causes problems on these devices.
+ */
+static struct snd_pci_quirk power_save_blacklist[] = {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ {}
+};
+#endif /* CONFIG_PM */
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2067,6 +2085,7 @@ static int azx_probe_continue(struct azx
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
int dev = chip->dev_index;
+ int val;
int err;
hda->probe_continued = 1;
@@ -2142,7 +2161,22 @@ static int azx_probe_continue(struct azx
chip->running = 1;
azx_add_card_list(chip);
- snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+ val = power_save;
+#ifdef CONFIG_PM
+ if (val == -1) {
+ const struct snd_pci_quirk *q;
+
+ val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
pm_runtime_put_noidle(&pci->dev);
Patches currently in stable-queue which might be from hdegoede(a)redhat.com are
queue-4.4/alsa-hda-add-a-power_save-blacklist.patch
This is a note to let you know that I've just added the patch titled
x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-xen-zero-msr_ia32_spec_ctrl-before-suspend.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 71c208dd54ab971036d83ff6d9837bae4976e623 Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross(a)suse.com>
Date: Mon, 26 Feb 2018 15:08:18 +0100
Subject: x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend
From: Juergen Gross <jgross(a)suse.com>
commit 71c208dd54ab971036d83ff6d9837bae4976e623 upstream.
Older Xen versions (4.5 and before) might have problems migrating pv
guests with MSR_IA32_SPEC_CTRL having a non-zero value. So before
suspending zero that MSR and restore it after being resumed.
Signed-off-by: Juergen Gross <jgross(a)suse.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Jan Beulich <jbeulich(a)suse.com>
Cc: stable(a)vger.kernel.org
Cc: xen-devel(a)lists.xenproject.org
Cc: boris.ostrovsky(a)oracle.com
Link: https://lkml.kernel.org/r/20180226140818.4849-1-jgross@suse.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/xen/suspend.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/tick.h>
+#include <linux/percpu-defs.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
#include <xen/events.h>
+#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
@@ -15,6 +18,8 @@
#include "mmu.h"
#include "pmu.h"
+static DEFINE_PER_CPU(u64, spec_ctrl);
+
void xen_arch_pre_suspend(void)
{
xen_save_time_memory_area();
@@ -35,6 +40,9 @@ void xen_arch_post_suspend(int cancelled
static void xen_vcpu_notify_restore(void *data)
{
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+
/* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0)
return;
@@ -44,7 +52,15 @@ static void xen_vcpu_notify_restore(void
static void xen_vcpu_notify_suspend(void *data)
{
+ u64 tmp;
+
tick_suspend_local();
+
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
+ rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+ this_cpu_write(spec_ctrl, tmp);
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ }
}
void xen_arch_resume(void)
Patches currently in stable-queue which might be from jgross(a)suse.com are
queue-4.15/x86-xen-zero-msr_ia32_spec_ctrl-before-suspend.patch
This is a note to let you know that I've just added the patch titled
x86/platform/intel-mid: Handle Intel Edison reboot correctly
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-platform-intel-mid-handle-intel-edison-reboot-correctly.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 028091f82eefd5e84f81cef81a7673016ecbe78b Mon Sep 17 00:00:00 2001
From: Sebastian Panceac <sebastian(a)resin.io>
Date: Wed, 28 Feb 2018 11:40:49 +0200
Subject: x86/platform/intel-mid: Handle Intel Edison reboot correctly
From: Sebastian Panceac <sebastian(a)resin.io>
commit 028091f82eefd5e84f81cef81a7673016ecbe78b upstream.
When the Intel Edison module is powered with 3.3V, the reboot command makes
the module stuck. If the module is powered at a greater voltage, like 4.4V
(as the Edison Mini Breakout board does), reboot works OK.
The official Intel Edison BSP sends the IPCMSG_COLD_RESET message to the
SCU by default. The IPCMSG_COLD_BOOT which is used by the upstream kernel
is only sent when explicitely selected on the kernel command line.
Use IPCMSG_COLD_RESET unconditionally which makes reboot work independent
of the power supply voltage.
[ tglx: Massaged changelog ]
Fixes: bda7b072de99 ("x86/platform/intel-mid: Implement power off sequence")
Signed-off-by: Sebastian Panceac <sebastian(a)resin.io>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Andy Shevchenko <andy.shevchenko(a)gmail.com>
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/1519810849-15131-1-git-send-email-sebastian@resin…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/platform/intel-mid/intel-mid.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
static void intel_mid_reboot(void)
{
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
}
static unsigned long __init intel_mid_calibrate_tsc(void)
Patches currently in stable-queue which might be from sebastian(a)resin.io are
queue-4.15/x86-platform-intel-mid-handle-intel-edison-reboot-correctly.patch
This is a note to let you know that I've just added the patch titled
x86/cpu_entry_area: Sync cpu_entry_area to initial_page_table
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-cpu_entry_area-sync-cpu_entry_area-to-initial_page_table.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 945fd17ab6bab8a4d05da6c3170519fbcfe62ddb Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx(a)linutronix.de>
Date: Wed, 28 Feb 2018 21:14:26 +0100
Subject: x86/cpu_entry_area: Sync cpu_entry_area to initial_page_table
From: Thomas Gleixner <tglx(a)linutronix.de>
commit 945fd17ab6bab8a4d05da6c3170519fbcfe62ddb upstream.
The separation of the cpu_entry_area from the fixmap missed the fact that
on 32bit non-PAE kernels the cpu_entry_area mapping might not be covered in
initial_page_table by the previous synchronizations.
This results in suspend/resume failures because 32bit utilizes initial page
table for resume. The absence of the cpu_entry_area mapping results in a
triple fault, aka. insta reboot.
With PAE enabled this works by chance because the PGD entry which covers
the fixmap and other parts incindentally provides the cpu_entry_area
mapping as well.
Synchronize the initial page table after setting up the cpu entry
area. Instead of adding yet another copy of the same code, move it to a
function and invoke it from the various places.
It needs to be investigated if the existing calls in setup_arch() and
setup_per_cpu_areas() can be replaced by the later invocation from
setup_cpu_entry_areas(), but that's beyond the scope of this fix.
Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap")
Reported-by: Woody Suwalski <terraluna977(a)gmail.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Tested-by: Woody Suwalski <terraluna977(a)gmail.com>
Cc: William Grant <william.grant(a)canonical.com>
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1802282137290.1392@nanos.tec.linu…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/pgtable_32.h | 1 +
arch/x86/include/asm/pgtable_64.h | 1 +
arch/x86/kernel/setup.c | 17 +++++------------
arch/x86/kernel/setup_percpu.c | 17 ++++-------------
arch/x86/mm/cpu_entry_area.c | 6 ++++++
arch/x86/mm/init_32.c | 15 +++++++++++++++
6 files changed, 32 insertions(+), 25 deletions(-)
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
void paging_init(void);
+void sync_initial_page_table(void);
/*
* Define this if things work differently on an i386 and an i486:
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt
extern void paging_init(void);
+static inline void sync_initial_page_table(void) { }
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %p(%016lx)\n", \
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1205,20 +1205,13 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
-#ifdef CONFIG_X86_32
- /* sync back kernel address range */
- clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- KERNEL_PGD_PTRS);
-
/*
- * sync back low identity map too. It is used for example
- * in the 32-bit EFI stub.
+ * Sync back kernel address range.
+ *
+ * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+ * this call?
*/
- clone_pgd_range(initial_page_table,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+ sync_initial_page_table();
tboot_probe();
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
/* Setup cpu initialized, callin, callout masks */
setup_cpu_local_masks();
-#ifdef CONFIG_X86_32
/*
* Sync back kernel address range again. We already did this in
* setup_arch(), but percpu data also needs to be available in
* the smpboot asm. We can't reliably pick up percpu mappings
* using vmalloc_fault(), because exception dispatch needs
* percpu data.
+ *
+ * FIXME: Can the later sync in setup_cpu_entry_areas() replace
+ * this call?
*/
- clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- KERNEL_PGD_PTRS);
-
- /*
- * sync back low identity map too. It is used for example
- * in the 32-bit EFI stub.
- */
- clone_pgd_range(initial_page_table,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-#endif
+ sync_initial_page_table();
}
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
for_each_possible_cpu(cpu)
setup_cpu_entry_area(cpu);
+
+ /*
+ * This is the last essential update to swapper_pgdir which needs
+ * to be synchronized to initial_page_table on 32bit.
+ */
+ sync_initial_page_table();
}
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(
}
#endif /* CONFIG_HIGHMEM */
+void __init sync_initial_page_table(void)
+{
+ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ KERNEL_PGD_PTRS);
+
+ /*
+ * sync back low identity map too. It is used for example
+ * in the 32-bit EFI stub.
+ */
+ clone_pgd_range(initial_page_table,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+}
+
void __init native_pagetable_init(void)
{
unsigned long pfn, va;
Patches currently in stable-queue which might be from tglx(a)linutronix.de are
queue-4.15/x86-xen-zero-msr_ia32_spec_ctrl-before-suspend.patch
queue-4.15/x86-cpu_entry_area-sync-cpu_entry_area-to-initial_page_table.patch
queue-4.15/x86-platform-intel-mid-handle-intel-edison-reboot-correctly.patch
queue-4.15/timers-forward-timer-base-before-migrating-timers.patch
queue-4.15/hrtimer-ensure-posix-compliance-relative-clock_realtime-hrtimers.patch
This is a note to let you know that I've just added the patch titled
vfio: disable filesystem-dax page pinning
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
vfio-disable-filesystem-dax-page-pinning.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 94db151dc89262bfa82922c44e8320cea2334667 Mon Sep 17 00:00:00 2001
From: Dan Williams <dan.j.williams(a)intel.com>
Date: Sun, 4 Feb 2018 10:34:02 -0800
Subject: vfio: disable filesystem-dax page pinning
From: Dan Williams <dan.j.williams(a)intel.com>
commit 94db151dc89262bfa82922c44e8320cea2334667 upstream.
Filesystem-DAX is incompatible with 'longterm' page pinning. Without
page cache indirection a DAX mapping maps filesystem blocks directly.
This means that the filesystem must not modify a file's block map while
any page in a mapping is pinned. In order to prevent the situation of
userspace holding of filesystem operations indefinitely, disallow
'longterm' Filesystem-DAX mappings.
RDMA has the same conflict and the plan there is to add a 'with lease'
mechanism to allow the kernel to notify userspace that the mapping is
being torn down for block-map maintenance. Perhaps something similar can
be put in place for vfio.
Note that xfs and ext4 still report:
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk"
...at mount time, and resolving the dax-dma-vs-truncate problem is one
of the last hurdles to remove that designation.
Acked-by: Alex Williamson <alex.williamson(a)redhat.com>
Cc: Michal Hocko <mhocko(a)suse.com>
Cc: kvm(a)vger.kernel.org
Cc: <stable(a)vger.kernel.org>
Reported-by: Haozhong Zhang <haozhong.zhang(a)intel.com>
Tested-by: Haozhong Zhang <haozhong.zhang(a)intel.com>
Fixes: d475c6346a38 ("dax,ext2: replace XIP read and write with DAX I/O")
Reviewed-by: Christoph Hellwig <hch(a)lst.de>
Signed-off-by: Dan Williams <dan.j.williams(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/vfio/vfio_iommu_type1.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struc
{
struct page *page[1];
struct vm_area_struct *vma;
+ struct vm_area_struct *vmas[1];
int ret;
if (mm == current->mm) {
- ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
- page);
+ ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
+ page, vmas);
} else {
unsigned int flags = 0;
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struc
down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
- NULL, NULL);
+ vmas, NULL);
+ /*
+ * The lifetime of a vaddr_get_pfn() page pin is
+ * userspace-controlled. In the fs-dax case this could
+ * lead to indefinite stalls in filesystem operations.
+ * Disallow attempts to pin fs-dax pages via this
+ * interface.
+ */
+ if (ret > 0 && vma_is_fsdax(vmas[0])) {
+ ret = -EOPNOTSUPP;
+ put_page(page[0]);
+ }
up_read(&mm->mmap_sem);
}
Patches currently in stable-queue which might be from dan.j.williams(a)intel.com are
queue-4.15/dax-fix-vma_is_fsdax-helper.patch
queue-4.15/vfio-disable-filesystem-dax-page-pinning.patch
This is a note to let you know that I've just added the patch titled
timers: Forward timer base before migrating timers
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
timers-forward-timer-base-before-migrating-timers.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From c52232a49e203a65a6e1a670cd5262f59e9364a0 Mon Sep 17 00:00:00 2001
From: Lingutla Chandrasekhar <clingutla(a)codeaurora.org>
Date: Thu, 18 Jan 2018 17:20:22 +0530
Subject: timers: Forward timer base before migrating timers
From: Lingutla Chandrasekhar <clingutla(a)codeaurora.org>
commit c52232a49e203a65a6e1a670cd5262f59e9364a0 upstream.
On CPU hotunplug the enqueued timers of the unplugged CPU are migrated to a
live CPU. This happens from the control thread which initiated the unplug.
If the CPU on which the control thread runs came out from a longer idle
period then the base clock of that CPU might be stale because the control
thread runs prior to any event which forwards the clock.
In such a case the timers from the unplugged CPU are queued on the live CPU
based on the stale clock which can cause large delays due to increased
granularity of the outer timer wheels which are far away from base:;clock.
But there is a worse problem than that. The following sequence of events
illustrates it:
- CPU0 timer1 is queued expires = 59969 and base->clk = 59131.
The timer is queued at wheel level 2, with resulting expiry time = 60032
(due to level granularity).
- CPU1 enters idle @60007, with next timer expiry @60020.
- CPU0 is hotplugged at @60009
- CPU1 exits idle and runs the control thread which migrates the
timers from CPU0
timer1 is now queued in level 0 for immediate handling in the next
softirq because the requested expiry time 59969 is before CPU1 base->clk
60007
- CPU1 runs code which forwards the base clock which succeeds because the
next expiring timer. which was collected at idle entry time is still set
to 60020.
So it forwards beyond 60007 and therefore misses to expire the migrated
timer1. That timer gets expired when the wheel wraps around again, which
takes between 63 and 630ms depending on the HZ setting.
Address both problems by invoking forward_timer_base() for the control CPUs
timer base. All other places, which might run into a similar problem
(mod_timer()/add_timer_on()) already invoke forward_timer_base() to avoid
that.
[ tglx: Massaged comment and changelog ]
Fixes: a683f390b93f ("timers: Forward the wheel clock whenever possible")
Co-developed-by: Neeraj Upadhyay <neeraju(a)codeaurora.org>
Signed-off-by: Neeraj Upadhyay <neeraju(a)codeaurora.org>
Signed-off-by: Lingutla Chandrasekhar <clingutla(a)codeaurora.org>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Anna-Maria Gleixner <anna-maria(a)linutronix.de>
Cc: linux-arm-msm(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20180118115022.6368-1-clingutla@codeaurora.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
kernel/time/timer.c | 6 ++++++
1 file changed, 6 insertions(+)
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1886,6 +1886,12 @@ int timers_dead_cpu(unsigned int cpu)
raw_spin_lock_irq(&new_base->lock);
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ /*
+ * The current CPUs base clock might be stale. Update it
+ * before moving the timers over.
+ */
+ forward_timer_base(new_base);
+
BUG_ON(old_base->running_timer);
for (i = 0; i < WHEEL_SIZE; i++)
Patches currently in stable-queue which might be from clingutla(a)codeaurora.org are
queue-4.15/timers-forward-timer-base-before-migrating-timers.patch
This is a note to let you know that I've just added the patch titled
parisc: Use cr16 interval timers unconditionally on qemu
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
parisc-use-cr16-interval-timers-unconditionally-on-qemu.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 5ffa8518851f1401817c15d2a7eecc0373c26ff9 Mon Sep 17 00:00:00 2001
From: Helge Deller <deller(a)gmx.de>
Date: Fri, 12 Jan 2018 22:44:00 +0100
Subject: parisc: Use cr16 interval timers unconditionally on qemu
From: Helge Deller <deller(a)gmx.de>
commit 5ffa8518851f1401817c15d2a7eecc0373c26ff9 upstream.
When running on qemu we know that the (emulated) cr16 cpu-internal
clocks are syncronized. So let's use them unconditionally on qemu.
Signed-off-by: Helge Deller <deller(a)gmx.de>
Cc: stable(a)vger.kernel.org # 4.14+
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/parisc/include/asm/processor.h | 2 ++
arch/parisc/kernel/time.c | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
#define parisc_requires_coherency() (0)
#endif
+extern int running_on_qemu;
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -248,7 +248,7 @@ static int __init init_cr16_clocksource(
* different sockets, so mark them unstable and lower rating on
* multi-socket SMP systems.
*/
- if (num_online_cpus() > 1) {
+ if (num_online_cpus() > 1 && !running_on_qemu) {
int cpu;
unsigned long cpu0_loc;
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
Patches currently in stable-queue which might be from deller(a)gmx.de are
queue-4.15/parisc-hide-virtual-kernel-memory-layout.patch
queue-4.15/parisc-use-cr16-interval-timers-unconditionally-on-qemu.patch
queue-4.15/parisc-fix-ordering-of-cache-and-tlb-flushes.patch
queue-4.15/parisc-reduce-irq-overhead-when-run-in-qemu.patch
This is a note to let you know that I've just added the patch titled
parisc: Reduce irq overhead when run in qemu
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
parisc-reduce-irq-overhead-when-run-in-qemu.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 636a415bcc7f4fd020ece8fd5fc648c4cef19c34 Mon Sep 17 00:00:00 2001
From: Helge Deller <deller(a)gmx.de>
Date: Mon, 12 Feb 2018 21:43:55 +0100
Subject: parisc: Reduce irq overhead when run in qemu
From: Helge Deller <deller(a)gmx.de>
commit 636a415bcc7f4fd020ece8fd5fc648c4cef19c34 upstream.
When run under QEMU, calling mfctl(16) creates some overhead because the
qemu timer has to be scaled and moved into the register. This patch
reduces the number of calls to mfctl(16) by moving the calls out of the
loops.
Additionally, increase the minimal time interval to 8000 cycles instead
of 500 to compensate possible QEMU delays when delivering interrupts.
Signed-off-by: Helge Deller <deller(a)gmx.de>
Cc: stable(a)vger.kernel.org # 4.14+
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/parisc/kernel/time.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(
next_tick = cpuinfo->it_value;
/* Calculate how many ticks have elapsed. */
+ now = mfctl(16);
do {
++ticks_elapsed;
next_tick += cpt;
- now = mfctl(16);
} while (next_tick - now > cpt);
/* Store (in CR16 cycles) up to when we are accounting right now. */
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(
* if one or the other wrapped. If "now" is "bigger" we'll end up
* with a very large unsigned number.
*/
- while (next_tick - mfctl(16) > cpt)
+ now = mfctl(16);
+ while (next_tick - now > cpt)
next_tick += cpt;
/* Program the IT when to deliver the next interrupt.
* Only bottom 32-bits of next_tick are writable in CR16!
* Timer interrupt will be delivered at least a few hundred cycles
- * after the IT fires, so if we are too close (<= 500 cycles) to the
+ * after the IT fires, so if we are too close (<= 8000 cycles) to the
* next cycle, simply skip it.
*/
- if (next_tick - mfctl(16) <= 500)
+ if (next_tick - now <= 8000)
next_tick += cpt;
mtctl(next_tick, 16);
Patches currently in stable-queue which might be from deller(a)gmx.de are
queue-4.15/parisc-hide-virtual-kernel-memory-layout.patch
queue-4.15/parisc-use-cr16-interval-timers-unconditionally-on-qemu.patch
queue-4.15/parisc-fix-ordering-of-cache-and-tlb-flushes.patch
queue-4.15/parisc-reduce-irq-overhead-when-run-in-qemu.patch
This is a note to let you know that I've just added the patch titled
parisc: Fix ordering of cache and TLB flushes
to the 4.15-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
parisc-fix-ordering-of-cache-and-tlb-flushes.patch
and it can be found in the queue-4.15 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 0adb24e03a124b79130c9499731936b11ce2677d Mon Sep 17 00:00:00 2001
From: John David Anglin <dave.anglin(a)bell.net>
Date: Tue, 27 Feb 2018 08:16:07 -0500
Subject: parisc: Fix ordering of cache and TLB flushes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
From: John David Anglin <dave.anglin(a)bell.net>
commit 0adb24e03a124b79130c9499731936b11ce2677d upstream.
The change to flush_kernel_vmap_range() wasn't sufficient to avoid the
SMP stalls. The problem is some drivers call these routines with
interrupts disabled. Interrupts need to be enabled for flush_tlb_all()
and flush_cache_all() to work. This version adds checks to ensure
interrupts are not disabled before calling routines that need IPI
interrupts. When interrupts are disabled, we now drop into slower code.
The attached change fixes the ordering of cache and TLB flushes in
several cases. When we flush the cache using the existing PTE/TLB
entries, we need to flush the TLB after doing the cache flush. We don't
need to do this when we flush the entire instruction and data caches as
these flushes don't use the existing TLB entries. The same is true for
tmpalias region flushes.
The flush_kernel_vmap_range() and invalidate_kernel_vmap_range()
routines have been updated.
Secondly, we added a new purge_kernel_dcache_range_asm() routine to
pacache.S and use it in invalidate_kernel_vmap_range(). Nominally,
purges are faster than flushes as the cache lines don't have to be
written back to memory.
Hopefully, this is sufficient to resolve the remaining problems due to
cache speculation. So far, testing indicates that this is the case. I
did work up a patch using tmpalias flushes, but there is a performance
hit because we need the physical address for each page, and we also need
to sequence access to the tmpalias flush code. This increases the
probability of stalls.
Signed-off-by: John David Anglin <dave.anglin(a)bell.net>
Cc: stable(a)vger.kernel.org # 4.9+
Signed-off-by: Helge Deller <deller(a)gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/parisc/include/asm/cacheflush.h | 1
arch/parisc/kernel/cache.c | 57 +++++++++++++++++++----------------
arch/parisc/kernel/pacache.S | 22 +++++++++++++
3 files changed, 54 insertions(+), 26 deletions(-)
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigne
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
- unsigned long flags, size;
+ unsigned long flags;
- size = (end - start);
- if (size >= parisc_tlb_flush_threshold) {
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all();
return 1;
}
@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm
struct vm_area_struct *vma;
pgd_t *pgd;
- /* Flush the TLB to avoid speculation if coherency is required. */
- if (parisc_requires_coherency())
- flush_tlb_all();
-
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_tlb_all();
flush_cache_all();
return;
}
@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm
if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
- if ((vma->vm_flags & VM_EXEC) == 0)
- continue;
- flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end);
}
return;
}
@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- BUG_ON(!vma->vm_mm->context);
-
- /* Flush the TLB to avoid speculation if coherency is required. */
- if (parisc_requires_coherency())
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ end - start >= parisc_cache_flush_threshold) {
flush_tlb_range(vma, start, end);
-
- if ((end - start) >= parisc_cache_flush_threshold
- || vma->vm_mm->context != mfsp(3)) {
flush_cache_all();
return;
}
@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_st
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
+ flush_tlb_range(vma, start, end);
}
void
@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *
BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) {
- if (parisc_requires_coherency())
- flush_tlb_page(vma, vmaddr);
+ flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
- if ((unsigned long)size > parisc_cache_flush_threshold)
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ (unsigned long)size >= parisc_cache_flush_threshold) {
+ flush_tlb_kernel_range(start, end);
flush_data_cache();
- else
- flush_kernel_dcache_range_asm(start, start + size);
+ return;
+ }
+
+ flush_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
- if ((unsigned long)size > parisc_cache_flush_threshold)
+ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+ (unsigned long)size >= parisc_cache_flush_threshold) {
+ flush_tlb_kernel_range(start, end);
flush_data_cache();
- else
- flush_kernel_dcache_range_asm(start, start + size);
+ return;
+ }
+
+ purge_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
.procend
ENDPROC_CFI(flush_kernel_dcache_range_asm)
+ENTRY_CFI(purge_kernel_dcache_range_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: cmpb,COND(<<),n %r26, %r25,1b
+ pdc,m %r23(%r26)
+
+ sync
+ syncdma
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
+
ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
Patches currently in stable-queue which might be from dave.anglin(a)bell.net are
queue-4.15/parisc-fix-ordering-of-cache-and-tlb-flushes.patch