This patch fixes a circular locking dependency in the CI introduced by
commit f21916ec4826 ("s390/vfio-ap: clean up vfio_ap resources when KVM
pointer invalidated"). The lockdep only occurs when starting a Secure
Execution guest. Crypto virtualization (vfio_ap) is not yet supported for
SE guests; however, in order to avoid CI errors, this fix is being
provided.
The circular lockdep was introduced when the masks in the guest's APCB
were taken under the matrix_dev->lock. While the lock is definitely
needed to protect the setting/unsetting of the KVM pointer, it is not
necessarily critical for setting the masks, so this will not be done under
protection of the matrix_dev->lock.
Fixes: f21916ec4826 ("s390/vfio-ap: clean up vfio_ap resources when KVM pointer invalidated")
Cc: stable(a)vger.kernel.org
Signed-off-by: Tony Krowiak <akrowiak(a)linux.ibm.com>
Acked-by: Cornelia Huck <cohuck(a)redhat.com>
---
drivers/s390/crypto/vfio_ap_ops.c | 169 ++++++++++++++++++--------
drivers/s390/crypto/vfio_ap_private.h | 1 +
2 files changed, 122 insertions(+), 48 deletions(-)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 41fc2e4135fe..82b204f573b7 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -294,6 +294,21 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
struct ap_matrix_mdev, pqap_hook);
+ /*
+ * If the KVM pointer is in the process of being set, wait until the
+ * process has completed. Unlock the matrix_dev->lock while waiting to
+ * allow the value of the KVM pointer to be set.
+ */
+ while (matrix_mdev->kvm_busy) {
+ mutex_unlock(&matrix_dev->lock);
+ msleep(100);
+ mutex_lock(&matrix_dev->lock);
+ }
+
+ /* If the there is no guest using the mdev, there is nothing to do */
+ if (!matrix_mdev->kvm)
+ goto out_unlock;
+
q = vfio_ap_get_queue(matrix_mdev, apqn);
if (!q)
goto out_unlock;
@@ -350,8 +365,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
static int vfio_ap_mdev_remove(struct mdev_device *mdev)
{
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
-
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * removal of the mdev
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
mutex_lock(&matrix_dev->lock);
@@ -606,8 +624,11 @@ static ssize_t assign_adapter_store(struct device *dev,
struct mdev_device *mdev = mdev_from_dev(dev);
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- /* If the guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of adapter
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &apid);
@@ -672,8 +693,11 @@ static ssize_t unassign_adapter_store(struct device *dev,
struct mdev_device *mdev = mdev_from_dev(dev);
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- /* If the guest is running, disallow un-assignment of adapter */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of adapter
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &apid);
@@ -753,8 +777,11 @@ static ssize_t assign_domain_store(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
- /* If the guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of domain
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &apqi);
@@ -814,8 +841,11 @@ static ssize_t unassign_domain_store(struct device *dev,
struct mdev_device *mdev = mdev_from_dev(dev);
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- /* If the guest is running, disallow un-assignment of domain */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of domain
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &apqi);
@@ -858,8 +888,11 @@ static ssize_t assign_control_domain_store(struct device *dev,
struct mdev_device *mdev = mdev_from_dev(dev);
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- /* If the guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of control domain
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &id);
@@ -908,8 +941,11 @@ static ssize_t unassign_control_domain_store(struct device *dev,
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
- /* If the guest is running, disallow un-assignment of control domain */
- if (matrix_mdev->kvm)
+ /*
+ * If the KVM pointer is in flux or the guest is running, disallow
+ * un-assignment of control domain
+ */
+ if (matrix_mdev->kvm_busy || matrix_mdev->kvm)
return -EBUSY;
ret = kstrtoul(buf, 0, &domid);
@@ -1027,8 +1063,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
* @matrix_mdev: a mediated matrix device
* @kvm: reference to KVM instance
*
- * Verifies no other mediated matrix device has @kvm and sets a reference to
- * it in @matrix_mdev->kvm.
+ * Sets all data for @matrix_mdev that are needed to manage AP resources
+ * for the guest whose state is represented by @kvm.
+ *
+ * Note: The matrix_dev->lock must be taken prior to calling
+ * this function; however, the lock will be temporarily released while the
+ * guest's AP configuration is set to avoid a potential lockdep splat.
+ * The kvm->lock is taken to set the guest's AP configuration which, under
+ * certain circumstances, will result in a circular lock dependency if this is
+ * done under the @matrix_mdev->lock.
*
* Return 0 if no other mediated matrix device has a reference to @kvm;
* otherwise, returns an -EPERM.
@@ -1043,9 +1086,18 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
return -EPERM;
}
- matrix_mdev->kvm = kvm;
- kvm_get_kvm(kvm);
- kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+ if (kvm->arch.crypto.crycbd) {
+ matrix_mdev->kvm_busy = true;
+ kvm_get_kvm(kvm);
+ mutex_unlock(&matrix_dev->lock);
+ kvm_arch_crypto_set_masks(kvm,
+ matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
+ mutex_lock(&matrix_dev->lock);
+ matrix_mdev->kvm = kvm;
+ matrix_mdev->kvm_busy = false;
+ }
return 0;
}
@@ -1079,51 +1131,54 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+/**
+ * vfio_ap_mdev_unset_kvm
+ *
+ * @matrix_mdev: a matrix mediated device
+ *
+ * Performs clean-up of resources no longer needed by @matrix_mdev.
+ *
+ * Note: The matrix_dev->lock must be taken prior to calling
+ * this function; however, the lock will be temporarily released while the
+ * guest's AP configuration is cleared to avoid a potential lockdep splat.
+ * The kvm->lock is taken to clear the guest's AP configuration which, under
+ * certain circumstances, will result in a circular lock dependency if this is
+ * done under the @matrix_mdev->lock.
+ *
+ */
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
{
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
- vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
- kvm_put_kvm(matrix_mdev->kvm);
- matrix_mdev->kvm = NULL;
+ if (matrix_mdev->kvm) {
+ matrix_mdev->kvm_busy = true;
+ mutex_unlock(&matrix_dev->lock);
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ matrix_mdev->kvm_busy = false;
+ }
}
static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret, notify_rc = NOTIFY_OK;
+ int notify_rc = NOTIFY_OK;
struct ap_matrix_mdev *matrix_mdev;
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
return NOTIFY_OK;
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
mutex_lock(&matrix_dev->lock);
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
- if (!data) {
- if (matrix_mdev->kvm)
- vfio_ap_mdev_unset_kvm(matrix_mdev);
- goto notify_done;
- }
-
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret) {
- notify_rc = NOTIFY_DONE;
- goto notify_done;
- }
-
- /* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+ if (!data)
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+ else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
notify_rc = NOTIFY_DONE;
- goto notify_done;
- }
-
- kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
-notify_done:
mutex_unlock(&matrix_dev->lock);
+
return notify_rc;
}
@@ -1258,8 +1313,20 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock);
+ /*
+ * If the KVM pointer is in the process of being set, wait until the
+ * process has completed. Unlock the matrix_dev->lock while waiting to
+ * allow the value of the KVM pointer to be set.
+ */
+ while (matrix_mdev->kvm_busy) {
+ mutex_unlock(&matrix_dev->lock);
+ msleep(100);
+ mutex_lock(&matrix_dev->lock);
+ }
+
if (matrix_mdev->kvm)
vfio_ap_mdev_unset_kvm(matrix_mdev);
+
mutex_unlock(&matrix_dev->lock);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
@@ -1293,6 +1360,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd, unsigned long arg)
{
int ret;
+ struct ap_matrix_mdev *matrix_mdev;
mutex_lock(&matrix_dev->lock);
switch (cmd) {
@@ -1300,7 +1368,12 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(mdev);
+ matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm_busy)
+ ret = -EBUSY;
+ else
+ ret = vfio_ap_mdev_reset_queues(mdev);
break;
default:
ret = -EOPNOTSUPP;
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 28e9d9989768..d1c8d25a899e 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -83,6 +83,7 @@ struct ap_matrix_mdev {
struct ap_matrix matrix;
struct notifier_block group_notifier;
struct notifier_block iommu_notifier;
+ bool kvm_busy;
struct kvm *kvm;
struct kvm_s390_module_hook pqap_hook;
struct mdev_device *mdev;
--
2.21.3
There are a couple new drivers and support for the new management
interface for mlx under review now. I figured I'll send them separately
if review is done in time, lots of people are waiting for the vdpa tool
patches to I want to make sure they make this release.
The following changes since commit f40ddce88593482919761f74910f42f4b84c004b:
Linux 5.11 (2021-02-14 14:32:24 -0800)
are available in the Git repository at:
https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git tags/for_linus
for you to fetch changes up to 16c10bede8b3d8594279752bf53153491f3f944f:
virtio-input: add multi-touch support (2021-02-23 07:52:59 -0500)
----------------------------------------------------------------
virtio: features, fixes
new vdpa features to allow creation and deletion of new devices
virtio-blk support per-device queue depth
fixes, cleanups all over the place
Signed-off-by: Michael S. Tsirkin <mst(a)redhat.com>
----------------------------------------------------------------
Colin Xu (1):
virtio_input: Prevent EV_MSC/MSC_TIMESTAMP loop storm for MT.
Dongli Zhang (1):
vhost scsi: alloc vhost_scsi with kvzalloc() to avoid delay
Gustavo A. R. Silva (1):
virtio_net: Fix fall-through warnings for Clang
Jason Wang (17):
virtio-pci: do not access iomem via struct virtio_pci_device directly
virtio-pci: split out modern device
virtio-pci-modern: factor out modern device initialization logic
virtio-pci-modern: introduce vp_modern_remove()
virtio-pci-modern: introduce helper to set config vector
virtio-pci-modern: introduce helpers for setting and getting status
virtio-pci-modern: introduce helpers for setting and getting features
virtio-pci-modern: introduce vp_modern_generation()
virtio-pci-modern: introduce vp_modern_set_queue_vector()
virtio-pci-modern: introduce vp_modern_queue_address()
virtio-pci-modern: introduce helper to set/get queue_enable
virtio-pci-modern: introduce helper for setting/geting queue size
virtio-pci-modern: introduce helper for getting queue nums
virtio-pci-modern: introduce helper to get notification offset
virito-pci-modern: rename map_capability() to vp_modern_map_capability()
virtio-pci: introduce modern device module
virtio_vdpa: don't warn when fail to disable vq
Jiapeng Zhong (1):
virtio-mem: Assign boolean values to a bool variable
Joseph Qi (1):
virtio-blk: support per-device queue depth
Mathias Crombez (1):
virtio-input: add multi-touch support
Parav Pandit (6):
vdpa_sim_net: Make mac address array static
vdpa: Extend routine to accept vdpa device name
vdpa: Define vdpa mgmt device, ops and a netlink interface
vdpa: Enable a user to add and delete a vdpa device
vdpa: Enable user to query vdpa device info
vdpa_sim_net: Add support for user supported devices
Stefano Garzarella (1):
vdpa/mlx5: fix param validation in mlx5_vdpa_get_config()
Xianting Tian (1):
virtio_mmio: fix one typo
drivers/block/virtio_blk.c | 11 +-
drivers/net/virtio_net.c | 1 +
drivers/vdpa/Kconfig | 1 +
drivers/vdpa/ifcvf/ifcvf_main.c | 2 +-
drivers/vdpa/mlx5/net/mlx5_vnet.c | 4 +-
drivers/vdpa/vdpa.c | 503 ++++++++++++++++++++++++++-
drivers/vdpa/vdpa_sim/vdpa_sim.c | 3 +-
drivers/vdpa/vdpa_sim/vdpa_sim.h | 2 +
drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 100 ++++--
drivers/vhost/scsi.c | 9 +-
drivers/virtio/Kconfig | 9 +
drivers/virtio/Makefile | 1 +
drivers/virtio/virtio_input.c | 26 +-
drivers/virtio/virtio_mem.c | 2 +-
drivers/virtio/virtio_mmio.c | 2 +-
drivers/virtio/virtio_pci_common.h | 22 +-
drivers/virtio/virtio_pci_modern.c | 504 ++++-----------------------
drivers/virtio/virtio_pci_modern_dev.c | 599 +++++++++++++++++++++++++++++++++
drivers/virtio/virtio_vdpa.c | 3 +-
include/linux/vdpa.h | 44 ++-
include/linux/virtio_pci_modern.h | 111 ++++++
include/uapi/linux/vdpa.h | 40 +++
22 files changed, 1492 insertions(+), 507 deletions(-)
create mode 100644 drivers/virtio/virtio_pci_modern_dev.c
create mode 100644 include/linux/virtio_pci_modern.h
create mode 100644 include/uapi/linux/vdpa.h
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 7ffddd499ba6122b1a07828f023d1d67629aa017 Mon Sep 17 00:00:00 2001
From: Muchun Song <songmuchun(a)bytedance.com>
Date: Thu, 4 Feb 2021 18:32:06 -0800
Subject: [PATCH] mm: hugetlb: fix a race between freeing and dissolving the
page
There is a race condition between __free_huge_page()
and dissolve_free_huge_page().
CPU0: CPU1:
// page_count(page) == 1
put_page(page)
__free_huge_page(page)
dissolve_free_huge_page(page)
spin_lock(&hugetlb_lock)
// PageHuge(page) && !page_count(page)
update_and_free_page(page)
// page is freed to the buddy
spin_unlock(&hugetlb_lock)
spin_lock(&hugetlb_lock)
clear_page_huge_active(page)
enqueue_huge_page(page)
// It is wrong, the page is already freed
spin_unlock(&hugetlb_lock)
The race window is between put_page() and dissolve_free_huge_page().
We should make sure that the page is already on the free list when it is
dissolved.
As a result __free_huge_page would corrupt page(s) already in the buddy
allocator.
Link: https://lkml.kernel.org/r/20210115124942.46403-4-songmuchun@bytedance.com
Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Signed-off-by: Muchun Song <songmuchun(a)bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Yang Shi <shy828301(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6f0e242d38ca..c6ee3c28a04e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
@@ -1505,6 +1522,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
}
@@ -1755,6 +1773,7 @@ int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
+retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
@@ -1771,6 +1790,26 @@ int dissolve_free_huge_page(struct page *page)
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 7ffddd499ba6122b1a07828f023d1d67629aa017 Mon Sep 17 00:00:00 2001
From: Muchun Song <songmuchun(a)bytedance.com>
Date: Thu, 4 Feb 2021 18:32:06 -0800
Subject: [PATCH] mm: hugetlb: fix a race between freeing and dissolving the
page
There is a race condition between __free_huge_page()
and dissolve_free_huge_page().
CPU0: CPU1:
// page_count(page) == 1
put_page(page)
__free_huge_page(page)
dissolve_free_huge_page(page)
spin_lock(&hugetlb_lock)
// PageHuge(page) && !page_count(page)
update_and_free_page(page)
// page is freed to the buddy
spin_unlock(&hugetlb_lock)
spin_lock(&hugetlb_lock)
clear_page_huge_active(page)
enqueue_huge_page(page)
// It is wrong, the page is already freed
spin_unlock(&hugetlb_lock)
The race window is between put_page() and dissolve_free_huge_page().
We should make sure that the page is already on the free list when it is
dissolved.
As a result __free_huge_page would corrupt page(s) already in the buddy
allocator.
Link: https://lkml.kernel.org/r/20210115124942.46403-4-songmuchun@bytedance.com
Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Signed-off-by: Muchun Song <songmuchun(a)bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Yang Shi <shy828301(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6f0e242d38ca..c6ee3c28a04e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
@@ -1505,6 +1522,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
}
@@ -1755,6 +1773,7 @@ int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
+retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
@@ -1771,6 +1790,26 @@ int dissolve_free_huge_page(struct page *page)
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
Hello,
We ran automated tests on a recent commit from this kernel tree:
Kernel repo: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git
Commit: 9c7a62998dea - KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped()
The results of these automated tests are provided below.
Overall result: PASSED
Merge: OK
Compile: OK
Tests: OK
All kernel binaries, config files, and logs are available for download here:
https://arr-cki-prod-datawarehouse-public.s3.amazonaws.com/index.html?prefi…
Please reply to this email if you have any questions about the tests that we
ran or if you have any suggestions on how to make future tests more effective.
,-. ,-.
( C ) ( K ) Continuous
`-',-.`-' Kernel
( I ) Integration
`-'
______________________________________________________________________________
Compile testing
---------------
We compiled the kernel for 4 architectures:
aarch64:
make options: make -j30 INSTALL_MOD_STRIP=1 targz-pkg
ppc64le:
make options: make -j30 INSTALL_MOD_STRIP=1 targz-pkg
s390x:
make options: make -j30 INSTALL_MOD_STRIP=1 targz-pkg
x86_64:
make options: make -j30 INSTALL_MOD_STRIP=1 targz-pkg
Hardware testing
----------------
We booted each kernel and ran the following tests:
aarch64:
Host 1:
⚡ Internal infrastructure issues prevented one or more tests (marked
with ⚡⚡⚡) from running on this architecture.
This is not the fault of the kernel that was tested.
✅ Boot test
✅ ACPI table test
✅ ACPI enabled test
✅ LTP
✅ Loopdev Sanity
✅ Memory: fork_mem
✅ Memory function: memfd_create
✅ AMTU (Abstract Machine Test Utility)
✅ Networking bridge: sanity
✅ Networking socket: fuzz
⏱ Networking: igmp conformance test
⚡⚡⚡ Networking route: pmtu
⚡⚡⚡ Networking route_func - local
⚡⚡⚡ Networking route_func - forward
⚡⚡⚡ Networking TCP: keepalive test
⚡⚡⚡ Networking UDP: socket
⚡⚡⚡ Networking tunnel: geneve basic test
⚡⚡⚡ Networking tunnel: gre basic
⚡⚡⚡ L2TP basic test
⚡⚡⚡ Networking tunnel: vxlan basic
⚡⚡⚡ Networking ipsec: basic netns - transport
⚡⚡⚡ Networking ipsec: basic netns - tunnel
⚡⚡⚡ Libkcapi AF_ALG test
⚡⚡⚡ pciutils: update pci ids test
⚡⚡⚡ ALSA PCM loopback test
⚡⚡⚡ ALSA Control (mixer) Userspace Element test
⚡⚡⚡ storage: SCSI VPD
⚡⚡⚡ trace: ftrace/tracer
🚧 ⚡⚡⚡ CIFS Connectathon
🚧 ⚡⚡⚡ POSIX pjd-fstest suites
🚧 ⚡⚡⚡ Firmware test suite
🚧 ⚡⚡⚡ jvm - jcstress tests
🚧 ⚡⚡⚡ Memory function: kaslr
🚧 ⚡⚡⚡ Ethernet drivers sanity
🚧 ⚡⚡⚡ Networking firewall: basic netfilter test
🚧 ⚡⚡⚡ audit: audit testsuite test
ppc64le:
Host 1:
⚡ Internal infrastructure issues prevented one or more tests (marked
with ⚡⚡⚡) from running on this architecture.
This is not the fault of the kernel that was tested.
⚡⚡⚡ Boot test
⚡⚡⚡ selinux-policy: serge-testsuite
⚡⚡⚡ storage: software RAID testing
🚧 ⚡⚡⚡ xfstests - ext4
🚧 ⚡⚡⚡ xfstests - xfs
🚧 ⚡⚡⚡ xfstests - btrfs
🚧 ⚡⚡⚡ IPMI driver test
🚧 ⚡⚡⚡ IPMItool loop stress test
🚧 ⚡⚡⚡ Storage blktests
🚧 ⚡⚡⚡ Storage block - filesystem fio test
🚧 ⚡⚡⚡ Storage block - queue scheduler test
🚧 ⚡⚡⚡ Storage nvme - tcp
🚧 ⚡⚡⚡ Storage: swraid mdadm raid_module test
Host 2:
⚡ Internal infrastructure issues prevented one or more tests (marked
with ⚡⚡⚡) from running on this architecture.
This is not the fault of the kernel that was tested.
⚡⚡⚡ Boot test
⚡⚡⚡ LTP
⚡⚡⚡ Loopdev Sanity
⚡⚡⚡ Memory: fork_mem
⚡⚡⚡ Memory function: memfd_create
⚡⚡⚡ AMTU (Abstract Machine Test Utility)
⚡⚡⚡ Networking bridge: sanity
⚡⚡⚡ Networking socket: fuzz
⚡⚡⚡ Networking route: pmtu
⚡⚡⚡ Networking route_func - local
⚡⚡⚡ Networking route_func - forward
⚡⚡⚡ Networking TCP: keepalive test
⚡⚡⚡ Networking UDP: socket
⚡⚡⚡ Networking tunnel: geneve basic test
⚡⚡⚡ Networking tunnel: gre basic
⚡⚡⚡ L2TP basic test
⚡⚡⚡ Networking tunnel: vxlan basic
⚡⚡⚡ Networking ipsec: basic netns - tunnel
⚡⚡⚡ Libkcapi AF_ALG test
⚡⚡⚡ pciutils: update pci ids test
⚡⚡⚡ ALSA PCM loopback test
⚡⚡⚡ ALSA Control (mixer) Userspace Element test
⚡⚡⚡ trace: ftrace/tracer
🚧 ⚡⚡⚡ CIFS Connectathon
🚧 ⚡⚡⚡ POSIX pjd-fstest suites
🚧 ⚡⚡⚡ jvm - jcstress tests
🚧 ⚡⚡⚡ Memory function: kaslr
🚧 ⚡⚡⚡ Ethernet drivers sanity
🚧 ⚡⚡⚡ Networking firewall: basic netfilter test
🚧 ⚡⚡⚡ audit: audit testsuite test
s390x:
Host 1:
⚡ Internal infrastructure issues prevented one or more tests (marked
with ⚡⚡⚡) from running on this architecture.
This is not the fault of the kernel that was tested.
✅ Boot test
⚡⚡⚡ LTP
⚡⚡⚡ Loopdev Sanity
⚡⚡⚡ Memory: fork_mem
⚡⚡⚡ Memory function: memfd_create
⚡⚡⚡ AMTU (Abstract Machine Test Utility)
⚡⚡⚡ Networking bridge: sanity
⚡⚡⚡ Networking route: pmtu
⚡⚡⚡ Networking route_func - local
⚡⚡⚡ Networking route_func - forward
⚡⚡⚡ Networking TCP: keepalive test
⚡⚡⚡ Networking UDP: socket
⚡⚡⚡ Networking tunnel: geneve basic test
⚡⚡⚡ Networking tunnel: gre basic
⚡⚡⚡ L2TP basic test
⚡⚡⚡ Networking tunnel: vxlan basic
⚡⚡⚡ Networking ipsec: basic netns - transport
⚡⚡⚡ Networking ipsec: basic netns - tunnel
⚡⚡⚡ Libkcapi AF_ALG test
⚡⚡⚡ trace: ftrace/tracer
🚧 ⚡⚡⚡ CIFS Connectathon
🚧 ⚡⚡⚡ POSIX pjd-fstest suites
🚧 ⚡⚡⚡ jvm - jcstress tests
🚧 ⚡⚡⚡ Memory function: kaslr
🚧 ⚡⚡⚡ Ethernet drivers sanity
🚧 ⚡⚡⚡ Networking firewall: basic netfilter test
🚧 ⚡⚡⚡ audit: audit testsuite test
x86_64:
Host 1:
⚡ Internal infrastructure issues prevented one or more tests (marked
with ⚡⚡⚡) from running on this architecture.
This is not the fault of the kernel that was tested.
⚡⚡⚡ Boot test
⚡⚡⚡ selinux-policy: serge-testsuite
⚡⚡⚡ storage: software RAID testing
🚧 ⚡⚡⚡ CPU: Frequency Driver Test
🚧 ⚡⚡⚡ CPU: Idle Test
🚧 ⚡⚡⚡ xfstests - ext4
🚧 ⚡⚡⚡ xfstests - xfs
🚧 ⚡⚡⚡ xfstests - btrfs
🚧 ⚡⚡⚡ xfstests - nfsv4.2
🚧 ⚡⚡⚡ xfstests - cifsv3.11
🚧 ⚡⚡⚡ IPMI driver test
🚧 ⚡⚡⚡ IPMItool loop stress test
🚧 ⚡⚡⚡ power-management: cpupower/sanity test
🚧 ⚡⚡⚡ Storage blktests
🚧 ⚡⚡⚡ Storage block - filesystem fio test
🚧 ⚡⚡⚡ Storage block - queue scheduler test
🚧 ⚡⚡⚡ Storage nvme - tcp
🚧 ⚡⚡⚡ Storage: swraid mdadm raid_module test
🚧 ⚡⚡⚡ stress: stress-ng
Test sources: https://gitlab.com/cki-project/kernel-tests
💚 Pull requests are welcome for new tests or improvements to existing tests!
Aborted tests
-------------
Tests that didn't complete running successfully are marked with ⚡⚡⚡.
If this was caused by an infrastructure issue, we try to mark that
explicitly in the report.
Waived tests
------------
If the test run included waived tests, they are marked with 🚧. Such tests are
executed but their results are not taken into account. Tests are waived when
their results are not reliable enough, e.g. when they're just introduced or are
being fixed.
Testing timeout
---------------
We aim to provide a report within reasonable timeframe. Tests that haven't
finished running yet are marked with ⏱.
On 2/25/21 8:53 AM, Tony Krowiak wrote:
>
>
> On 2/25/21 6:28 AM, Halil Pasic wrote:
>> On Wed, 24 Feb 2021 22:28:50 -0500
>> Tony Krowiak<akrowiak(a)linux.ibm.com> wrote:
>>
>>>>> static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
>>>>> {
>>>>> - kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
>>>>> - matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
>>>>> - vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
>>>>> - kvm_put_kvm(matrix_mdev->kvm);
>>>>> - matrix_mdev->kvm = NULL;
>>>>> + struct kvm *kvm;
>>>>> +
>>>>> + if (matrix_mdev->kvm) {
>>>>> + kvm = matrix_mdev->kvm;
>>>>> + kvm_get_kvm(kvm);
>>>>> + matrix_mdev->kvm = NULL;
>>>> I think if there were two threads dong the unset in parallel, one
>>>> of them could bail out and carry on before the cleanup is done. But
>>>> since nothing much happens in release after that, I don't see an
>>>> immediate problem.
>>>>
>>>> Another thing to consider is, that setting ->kvm to NULL arms
>>>> vfio_ap_mdev_remove()...
>>> I'm not entirely sure what you mean by this, but my
>>> assumption is that you are talking about the check
>>> for matrix_mdev->kvm != NULL at the start of
>>> that function.
>> Yes I was talking about the check
>>
>> static int vfio_ap_mdev_remove(struct mdev_device *mdev)
>> {
>> struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
>>
>> if (matrix_mdev->kvm)
>> return -EBUSY;
>> ...
>> kfree(matrix_mdev);
>> ...
>> }
>>
>> As you see, we bail out if kvm is still set, otherwise we clean up the
>> matrix_mdev which includes kfree-ing it. And vfio_ap_mdev_remove() is
>> initiated via the sysfs, i.e. can be initiated at any time. If we were
>> to free matrix_mdev in mdev_remove() and then carry on with kvm_unset()
>> with mutex_lock(&matrix_dev->lock); that would be bad.
>
> I agree.
>
>>
>>> The reason
>>> matrix_mdev->kvm is set to NULL before giving up
>>> the matrix_dev->lock is so that functions that check
>>> for the presence of the matrix_mdev->kvm pointer,
>>> such as assign_adapter_store() - will exit if they get
>>> control while the masks are being cleared.
>> I disagree!
>>
>> static ssize_t assign_adapter_store(struct device *dev,
>> struct device_attribute *attr,
>> const char *buf, size_t count)
>> {
>> int ret;
>> unsigned long apid;
>> struct mdev_device *mdev = mdev_from_dev(dev);
>> struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
>>
>> /* If the guest is running, disallow assignment of adapter */
>> if (matrix_mdev->kvm)
>> return -EBUSY;
>>
>> We bail out when kvm != NULL, so having it set to NULL while the
>> mask are being cleared will make these not bail out.
>
> You are correct, I am an idiot.
>
>>> So what we have
>>> here is a catch-22; in other words, we have the case
>>> you pointed out above and the cases related to
>>> assigning/unassigning adapters, domains and
>>> control domains which should exit when a guest
>>> is running.
>> See above.
>
> Ditto.
>
>>> I may have an idea to resolve this. Suppose we add:
>>>
>>> struct ap_matrix_mdev {
>>> ...
>>> bool kvm_busy;
>>> ...
>>> }
>>>
>>> This flag will be set to true at the start of both the
>>> vfio_ap_mdev_set_kvm() and vfio_ap_mdev_unset_kvm()
>>> and set to false at the end. The assignment/unassignment
>>> and remove callback functions can test this flag and
>>> return -EBUSY if the flag is true. That will preclude assigning
>>> or unassigning adapters, domains and control domains when
>>> the KVM pointer is being set/unset. Likewise, removal of the
>>> mediated device will also be prevented while the KVM pointer
>>> is being set/unset.
>>>
>>> In the case of the PQAP handler function, it can wait for the
>>> set/unset of the KVM pointer as follows:
>>>
>>> /while (matrix_mdev->kvm_busy) {//
>>> // mutex_unlock(&matrix_dev->lock);//
>>> // msleep(100);//
>>> // mutex_lock(&matrix_dev->lock);//
>>> //}//
>>> //
>>> //if (!matrix_mdev->kvm)//
>>> // goto out_unlock;
>>>
>>> /What say you?
>>> //
>> I'm not sure. Since I disagree with your analysis above it is difficult
>> to deal with the conclusion. I'm not against decoupling the tracking of
>> the state of the mdev_matrix device from the value of the kvm pointer. I
>> think we should first get a common understanding of the problem, before
>> we proceed to the solution.
>
> Regardless of my brain fog regarding the testing of the
> matrix_mdev->kvm pointer, I stand by what I stated
> in the paragraphs just before the code snippet.
>
> The problem is there are 10 functions that depend upon
> the value of the matrix_mdev->kvm pointer that can get
> control while the pointer is being set/unset and the
> matrix_dev->lock is given up to set/clear the masks:
* vfio_ap_irq_enable: called by handle_pqap() when AQIC is intercepted
* vfio_ap_irq_disable: called by handle_pqap() when AQIC is intercepted
* assign_adapter_store: sysfs
* unassign_adapter_store: sysfs
* assign_domain_store: sysfs
* unassign_domain_store: sysfs
* assign__control_domain_store: sysfs
* unassign_control_domain_store: sysfs
* vfio_ap_mdev_remove: sysfs
* vfio_ap_mdev_release: mdev fd closed by userspace (i.e., qemu)If we
add the proposed flag to indicate when the matrix_mdev->kvm
> pointer is in flux, then we can check that before allowing the functions
> in the list above to proceed.
>
>> Regards,
>> Halil
>
The patch titled
Subject: mm, compaction: make fast_isolate_freepages() stay within zone
has been removed from the -mm tree. Its filename was
mm-compaction-make-fast_isolate_freepages-stay-within-zone.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Vlastimil Babka <vbabka(a)suse.cz>
Subject: mm, compaction: make fast_isolate_freepages() stay within zone
Compaction always operates on pages from a single given zone when
isolating both pages to migrate and freepages. Pageblock boundaries are
intersected with zone boundaries to be safe in case zone starts or ends in
the middle of pageblock. The use of pageblock_pfn_to_page() protects
against non-contiguous pageblocks.
The functions fast_isolate_freepages() and fast_isolate_around() don't
currently protect the fast freepage isolation thoroughly enough against
these corner cases, and can result in freepage isolation operate outside
of zone boundaries:
- in fast_isolate_freepages() if we get a pfn from the first pageblock
of a zone that starts in the middle of that pageblock, 'highest' can be
a pfn outside of the zone. If we fail to isolate anything in this
function, we may then call fast_isolate_around() on a pfn outside of the
zone and there effectively do a set_pageblock_skip(page_to_pfn(highest))
which may currently hit a VM_BUG_ON() in some configurations
- fast_isolate_around() checks only the zone end boundary and not
beginning, nor that the pageblock is contiguous (with
pageblock_pfn_to_page()) so it's possible that we end up calling
isolate_freepages_block() on a range of pfn's from two different zones
and end up e.g. isolating freepages under the wrong zone's lock.
This patch should fix the above issues.
Link: https://lkml.kernel.org/r/20210217173300.6394-1-vbabka@suse.cz
Fixes: 5a811889de10 ("mm, compaction: use free lists to quickly locate a migration target")
Signed-off-by: Vlastimil Babka <vbabka(a)suse.cz>
Acked-by: David Rientjes <rientjes(a)google.com>
Acked-by: Mel Gorman <mgorman(a)techsingularity.net>
Cc: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Mike Rapoport <rppt(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/compaction.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
--- a/mm/compaction.c~mm-compaction-make-fast_isolate_freepages-stay-within-zone
+++ a/mm/compaction.c
@@ -1284,7 +1284,7 @@ static void
fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
{
unsigned long start_pfn, end_pfn;
- struct page *page = pfn_to_page(pfn);
+ struct page *page;
/* Do not search around if there are enough pages already */
if (cc->nr_freepages >= cc->nr_migratepages)
@@ -1295,8 +1295,12 @@ fast_isolate_around(struct compact_contr
return;
/* Pageblock boundaries */
- start_pfn = pageblock_start_pfn(pfn);
- end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
+ start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
+ end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
+
+ page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
+ if (!page)
+ return;
/* Scan before */
if (start_pfn != pfn) {
@@ -1398,7 +1402,8 @@ fast_isolate_freepages(struct compact_co
pfn = page_to_pfn(freepage);
if (pfn >= highest)
- highest = pageblock_start_pfn(pfn);
+ highest = max(pageblock_start_pfn(pfn),
+ cc->zone->zone_start_pfn);
if (pfn >= low_pfn) {
cc->fast_search_fail = 0;
@@ -1468,7 +1473,8 @@ fast_isolate_freepages(struct compact_co
} else {
if (cc->direct_compaction && pfn_valid(min_pfn)) {
page = pageblock_pfn_to_page(min_pfn,
- pageblock_end_pfn(min_pfn),
+ min(pageblock_end_pfn(min_pfn),
+ zone_end_pfn(cc->zone)),
cc->zone);
cc->free_pfn = min_pfn;
}
_
Patches currently in -mm which might be from vbabka(a)suse.cz are
maintainers-add-uapi-directories-to-api-abi-section.patch
The patch titled
Subject: mm/vmscan: restore zone_reclaim_mode ABI
has been removed from the -mm tree. Its filename was
mm-vmscan-restore-zone_reclaim_mode-abi.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Subject: mm/vmscan: restore zone_reclaim_mode ABI
I went to go add a new RECLAIM_* mode for the zone_reclaim_mode
sysctl. Like a good kernel developer, I also went to go update the
documentation. I noticed that the bits in the documentation didn't
match the bits in the #defines.
The VM never explicitly checks the RECLAIM_ZONE bit. The bit is,
however implicitly checked when checking 'node_reclaim_mode==0'. The
RECLAIM_ZONE #define was removed in a cleanup. That, by itself is
fine.
But, when the bit was removed (bit 0) the _other_ bit locations also got
changed. That's not OK because the bit values are documented to mean one
specific thing. Users surely do not expect the meaning to change from
kernel to kernel.
The end result is that if someone had a script that did:
sysctl vm.zone_reclaim_mode=1
it would have gone from enabling node reclaim for clean unmapped pages to
writing out pages during node reclaim after the commit in question.
That's not great.
Put the bits back the way they were and add a comment so something like
this is a bit harder to do again. Update the documentation to make it
clear that the first bit is ignored.
Link: https://lkml.kernel.org/r/20210219172555.FF0CDF23@viggo.jf.intel.com
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Fixes: 648b5cf368e0 ("mm/vmscan: remove unused RECLAIM_OFF/RECLAIM_ZONE")
Reviewed-by: Ben Widawsky <ben.widawsky(a)intel.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Acked-by: David Rientjes <rientjes(a)google.com>
Acked-by: Christoph Lameter <cl(a)linux.com>
Cc: Alex Shi <alex.shi(a)linux.alibaba.com>
Cc: Daniel Wagner <dwagner(a)suse.de>
Cc: "Tobin C. Harding" <tobin(a)kernel.org>
Cc: Christoph Lameter <cl(a)linux.com>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Huang Ying <ying.huang(a)intel.com>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: Qian Cai <cai(a)lca.pw>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
Documentation/admin-guide/sysctl/vm.rst | 10 +++++-----
mm/vmscan.c | 9 +++++++--
2 files changed, 12 insertions(+), 7 deletions(-)
--- a/Documentation/admin-guide/sysctl/vm.rst~mm-vmscan-restore-zone_reclaim_mode-abi
+++ a/Documentation/admin-guide/sysctl/vm.rst
@@ -983,11 +983,11 @@ that benefit from having their data cach
left disabled as the caching effect is likely to be more important than
data locality.
-zone_reclaim may be enabled if it's known that the workload is partitioned
-such that each partition fits within a NUMA node and that accessing remote
-memory would cause a measurable performance reduction. The page allocator
-will then reclaim easily reusable pages (those page cache pages that are
-currently not used) before allocating off node pages.
+Consider enabling one or more zone_reclaim mode bits if it's known that the
+workload is partitioned such that each partition fits within a NUMA node
+and that accessing remote memory would cause a measurable performance
+reduction. The page allocator will take additional actions before
+allocating off node pages.
Allowing zone reclaim to write out pages stops processes that are
writing large amounts of data from dirtying pages on other nodes. Zone
--- a/mm/vmscan.c~mm-vmscan-restore-zone_reclaim_mode-abi
+++ a/mm/vmscan.c
@@ -4085,8 +4085,13 @@ module_init(kswapd_init)
*/
int node_reclaim_mode __read_mostly;
-#define RECLAIM_WRITE (1<<0) /* Writeout pages during reclaim */
-#define RECLAIM_UNMAP (1<<1) /* Unmap pages during reclaim */
+/*
+ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
+ * ABI. New bits are OK, but existing bits can never change.
+ */
+#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
+#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
+#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
/*
* Priority for NODE_RECLAIM. This determines the fraction of pages
_
Patches currently in -mm which might be from dave.hansen(a)linux.intel.com are
The patch titled
Subject: hugetlb: fix copy_huge_page_from_user contig page struct assumption
has been removed from the -mm tree. Its filename was
hugetlb-fix-copy_huge_page_from_user-contig-page-struct-assumption.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Mike Kravetz <mike.kravetz(a)oracle.com>
Subject: hugetlb: fix copy_huge_page_from_user contig page struct assumption
page structs are not guaranteed to be contiguous for gigantic pages. The
routine copy_huge_page_from_user can encounter gigantic pages, yet it
assumes page structs are contiguous when copying pages from user space.
Since page structs for the target gigantic page are not contiguous, the
data copied from user space could overwrite other pages not associated
with the gigantic page and cause data corruption.
Non-contiguous page structs are generally not an issue. However, they can
exist with a specific kernel configuration and hotplug operations. For
example: Configure the kernel with CONFIG_SPARSEMEM and
!CONFIG_SPARSEMEM_VMEMMAP. Then, hotplug add memory for the area where
the gigantic page will be allocated.
Link: https://lkml.kernel.org/r/20210217184926.33567-2-mike.kravetz@oracle.com
Fixes: 8fb5debc5fcd ("userfaultfd: hugetlbfs: add hugetlb_mcopy_atomic_pte for userfaultfd support")
Signed-off-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Cc: Zi Yan <ziy(a)nvidia.com>
Cc: Davidlohr Bueso <dbueso(a)suse.de>
Cc: "Kirill A . Shutemov" <kirill.shutemov(a)linux.intel.com>
Cc: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: Oscar Salvador <osalvador(a)suse.de>
Cc: Joao Martins <joao.m.martins(a)oracle.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/memory.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
--- a/mm/memory.c~hugetlb-fix-copy_huge_page_from_user-contig-page-struct-assumption
+++ a/mm/memory.c
@@ -5177,17 +5177,19 @@ long copy_huge_page_from_user(struct pag
void *page_kaddr;
unsigned long i, rc = 0;
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+ struct page *subpage = dst_page;
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0; i < pages_per_huge_page;
+ i++, subpage = mem_map_next(subpage, dst_page, i)) {
if (allow_pagefault)
- page_kaddr = kmap(dst_page + i);
+ page_kaddr = kmap(subpage);
else
- page_kaddr = kmap_atomic(dst_page + i);
+ page_kaddr = kmap_atomic(subpage);
rc = copy_from_user(page_kaddr,
(const void __user *)(src + i * PAGE_SIZE),
PAGE_SIZE);
if (allow_pagefault)
- kunmap(dst_page + i);
+ kunmap(subpage);
else
kunmap_atomic(page_kaddr);
_
Patches currently in -mm which might be from mike.kravetz(a)oracle.com are
The patch titled
Subject: hugetlb: fix update_and_free_page contig page struct assumption
has been removed from the -mm tree. Its filename was
hugetlb-fix-update_and_free_page-contig-page-struct-assumption.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Mike Kravetz <mike.kravetz(a)oracle.com>
Subject: hugetlb: fix update_and_free_page contig page struct assumption
page structs are not guaranteed to be contiguous for gigantic pages. The
routine update_and_free_page can encounter a gigantic page, yet it assumes
page structs are contiguous when setting page flags in subpages.
If update_and_free_page encounters non-contiguous page structs, we can see
“BUG: Bad page state in process …” errors.
Non-contiguous page structs are generally not an issue. However, they can
exist with a specific kernel configuration and hotplug operations. For
example: Configure the kernel with CONFIG_SPARSEMEM and
!CONFIG_SPARSEMEM_VMEMMAP. Then, hotplug add memory for the area where
the gigantic page will be allocated. Zi Yan outlined steps to reproduce
here [1].
[1] https://lore.kernel.org/linux-mm/16F7C58B-4D79-41C5-9B64-A1A1628F4AF2@nvidi…
Link: https://lkml.kernel.org/r/20210217184926.33567-1-mike.kravetz@oracle.com
Fixes: 944d9fec8d7a ("hugetlb: add support for gigantic page allocation at runtime")
Signed-off-by: Zi Yan <ziy(a)nvidia.com>
Signed-off-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Cc: Zi Yan <ziy(a)nvidia.com>
Cc: Davidlohr Bueso <dbueso(a)suse.de>
Cc: "Kirill A . Shutemov" <kirill.shutemov(a)linux.intel.com>
Cc: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: Oscar Salvador <osalvador(a)suse.de>
Cc: Joao Martins <joao.m.martins(a)oracle.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/hugetlb.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
--- a/mm/hugetlb.c~hugetlb-fix-update_and_free_page-contig-page-struct-assumption
+++ a/mm/hugetlb.c
@@ -1321,14 +1321,16 @@ static inline void destroy_compound_giga
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
+ struct page *subpage = page;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
- for (i = 0; i < pages_per_huge_page(h); i++) {
- page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+ for (i = 0; i < pages_per_huge_page(h);
+ i++, subpage = mem_map_next(subpage, page, i)) {
+ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
1 << PG_writeback);
_
Patches currently in -mm which might be from mike.kravetz(a)oracle.com are