The patch below does not apply to the 5.10-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
The patch below does not apply to the 5.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 07956b6269d3ed05d854233d5bb776dca91751dd Mon Sep 17 00:00:00 2001
From: Alex Williamson <alex.williamson(a)redhat.com>
Date: Tue, 16 Feb 2021 15:49:34 -0700
Subject: [PATCH] vfio/type1: Use follow_pte()
follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.
Fixes: 5cbf3264bc71 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: Jason Gunthorpe <jgg(a)nvidia.com>
Reviewed-by: Cornelia Huck <cohuck(a)redhat.com>
Reviewed-by: Peter Xu <peterx(a)redhat.com>
Signed-off-by: Alex Williamson <alex.williamson(a)redhat.com>
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3df383d7028..ed03f3fcb07e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
+ pte_t *ptep;
+ spinlock_t *ptl;
int ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
if (ret) {
bool unlocked = false;
@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret)
return ret;
- ret = follow_pfn(vma, vaddr, pfn);
+ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret)
+ return ret;
}
+ if (write_fault && !pte_write(*ptep))
+ ret = -EFAULT;
+ else
+ *pfn = pte_pfn(*ptep);
+
+ pte_unmap_unlock(ptep, ptl);
return ret;
}