From: Austin Zheng <Austin.Zheng(a)amd.com>
Early return possible if context has no clk_mgr.
This will lead to an invalid power profile being returned
which looks identical to a profile with the lowest power level.
Add back logic that populated the power profile and overwrite
the value if needed.
Cc: stable(a)vger.kernel.org
Fixes: fc8c959496fa ("drm/amd/display: Update Interface to Check UCLK DPM")
Reviewed-by: Dillon Varone <dillon.varone(a)amd.com>
Signed-off-by: Austin Zheng <Austin.Zheng(a)amd.com>
Signed-off-by: Hamza Mahfooz <hamza.mahfooz(a)amd.com>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0c1875d35a95..1dd26d5df6b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -6100,11 +6100,11 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
{
struct dc_power_profile profile = { 0 };
- if (!context || !context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc)
+ profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc)
return profile;
struct dc *dc = context->clk_mgr->ctx->dc;
-
if (dc->res_pool->funcs->get_power_profile)
profile.power_level = dc->res_pool->funcs->get_power_profile(context);
return profile;
--
2.46.1
From: Joshua Aberback <joshua.aberback(a)amd.com>
[Why]
The mechanism to backup and restore plane states doesn't maintain
refcount, which can cause issues if the refcount of the plane changes
in between backup and restore operations, such as memory leaks if the
refcount was supposed to go down, or double frees / invalid memory
accesses if the refcount was supposed to go up.
[How]
Cache and re-apply current refcount when restoring plane states.
Cc: stable(a)vger.kernel.org
Reviewed-by: Josip Pavic <josip.pavic(a)amd.com>
Signed-off-by: Joshua Aberback <joshua.aberback(a)amd.com>
Signed-off-by: Hamza Mahfooz <hamza.mahfooz(a)amd.com>
---
drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7872c6cabb14..0c1875d35a95 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3141,7 +3141,10 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
+ /* refcount will always be valid, restore everything else */
+ struct kref refcount = status->plane_states[i]->refcount;
*status->plane_states[i] = scratch->plane_states[i];
+ status->plane_states[i]->refcount = refcount;
}
*stream = scratch->stream_state;
}
--
2.46.1
From: Chris Park <chris.park(a)amd.com>
[Why]
There are some pipe scaler validation failure when the pipe is phantom
and causes crash in DML validation. Since, scalar parameters are not
as important in phantom pipe and we require this plane to do successful
MCLK switches, the failure condition can be ignored.
[How]
Ignore scalar validation failure if the pipe validation is marked as
phantom pipe.
Cc: stable(a)vger.kernel.org # 6.11+
Reviewed-by: Dillon Varone <dillon.varone(a)amd.com>
Signed-off-by: Chris Park <chris.park(a)amd.com>
Signed-off-by: Hamza Mahfooz <hamza.mahfooz(a)amd.com>
---
drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 33125b95c3a1..619fad17de55 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1501,6 +1501,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = spl_calculate_scaler_params(spl_in, spl_out);
// Convert respective out params from SPL to scaler data
translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
+
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
} else {
#endif
/* depends on h_active */
@@ -1571,6 +1575,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
&plane_state->scaling_quality);
}
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
+
if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
--
2.46.1
From: Yihan Zhu <Yihan.Zhu(a)amd.com>
[Why]
No check on head pipe during the dml to dc hw mapping will allow illegal
pipe usage. This will result in a wrong pipe topology to cause mpcc tree
totally mess up then cause a display hang.
[How]
Avoid to use the pipe is head in all check and avoid ODM slice during
preferred pipe check.
v2: Added pipe type check for DPP pipe type before executing head pipe
check in the pipe selection logic in DML2 to avoid NULL pointer
de-reference.
Cc: stable(a)vger.kernel.org
Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
Signed-off-by: Yihan Zhu <Yihan.Zhu(a)amd.com>
Signed-off-by: Hamza Mahfooz <hamza.mahfooz(a)amd.com>
---
.../display/dc/dml2/dml2_dc_resource_mgmt.c | 23 ++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 6eccf0241d85..1ed21c1b86a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -258,12 +258,25 @@ static unsigned int find_preferred_pipe_candidates(const struct dc_state *existi
* However this condition comes with a caveat. We need to ignore pipes that will
* require a change in OPP but still have the same stream id. For example during
* an MPC to ODM transiton.
+ *
+ * Adding check to avoid pipe select on the head pipe by utilizing dc resource
+ * helper function resource_get_primary_dpp_pipe and comparing the pipe index.
*/
if (existing_state) {
for (i = 0; i < pipe_count; i++) {
if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
+ struct pipe_ctx *head_pipe =
+ resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
+ resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
+ NULL;
+
+ // we should always respect the head pipe from selection
+ if (head_pipe && head_pipe->pipe_idx == i)
+ continue;
if (existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
- existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i)
+ existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i &&
+ (existing_state->res_ctx.pipe_ctx[i].prev_odm_pipe ||
+ existing_state->res_ctx.pipe_ctx[i].next_odm_pipe))
continue;
preferred_pipe_candidates[num_preferred_candidates++] = i;
@@ -292,6 +305,14 @@ static unsigned int find_last_resort_pipe_candidates(const struct dc_state *exis
*/
if (existing_state) {
for (i = 0; i < pipe_count; i++) {
+ struct pipe_ctx *head_pipe =
+ resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
+ resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
+ NULL;
+
+ // we should always respect the head pipe from selection
+ if (head_pipe && head_pipe->pipe_idx == i)
+ continue;
if ((existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i) ||
existing_state->res_ctx.pipe_ctx[i].stream_res.tg)
--
2.46.1
In the bitmap_ip_uadt function, if ip is greater than ip_to, they are swapped.
However, there is no check to see if ip is smaller than map->first, which
causes an out-of-bounds vulnerability. Therefore, you need to add a missing
bounds check to prevent out-of-bounds.
Cc: <stable(a)vger.kernel.org>
Reported-by: syzbot+58c872f7790a4d2ac951(a)syzkaller.appspotmail.com
Fixes: 72205fc68bd1 ("netfilter: ipset: bitmap:ip set type support")
Signed-off-by: Jeongjun Park <aha310510(a)gmail.com>
---
net/netfilter/ipset/ip_set_bitmap_ip.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e4fa00abde6a..705c316b001a 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -178,7 +178,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
ip_to = ip;
}
- if (ip_to > map->last_ip)
+ if (ip < map->first_ip || ip_to > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
for (; !before(ip_to, ip); ip += map->hosts) {
--
The commit 8396c793ffdf ("mmc: dw_mmc: Fix IDMAC operation with pages
bigger than 4K") increased the max_req_size, even for 4K pages, causing
various issues:
- Panic booting the kernel/rootfs from an SD card on Rockchip RK3566
- Panic booting the kernel/rootfs from an SD card on StarFive JH7100
- "swiotlb buffer is full" and data corruption on StarFive JH7110
At this stage no fix have been found, so it's probably better to just
revert the change.
This reverts commit 8396c793ffdf28bb8aee7cfe0891080f8cab7890.
Cc: stable(a)vger.kernel.org
Cc: Sam Protsenko <semen.protsenko(a)linaro.org>
Fixes: 8396c793ffdf ("mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K")
Closes: https://lore.kernel.org/linux-mmc/614692b4-1dbe-31b8-a34d-cb6db1909bb7@w6rz…
Closes: https://lore.kernel.org/linux-mmc/CAC8uq=Ppnmv98mpa1CrWLawWoPnu5abtU69v-=G-…
Signed-off-by: Aurelien Jarno <aurelien(a)aurel32.net>
---
drivers/mmc/host/dw_mmc.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
I have posted a patch to fix the issue, but unfortunately it only fixes
the JH7110 case:
https://lore.kernel.org/linux-mmc/20241020142931.138277-1-aurelien@aurel32.…
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 41e451235f637..e9f6e4e622901 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,8 +2957,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
- mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
- mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
--
2.45.2
It turns out that the Allwinner A100/A133 SoC only supports 8K DMA
blocks (13 bits wide), for both the SD/SDIO and eMMC instances.
And while this alone would make a trivial fix, the H616 falls back to
the A100 compatible string, so we have to now match the H616 compatible
string explicitly against the description advertising 64K DMA blocks.
As the A100 is now compatible with the D1 description, let the A100
compatible string point to that block instead, and introduce an explicit
match against the H616 string, pointing to the old description.
Also remove the redundant setting of clk_delays to NULL on the way.
Fixes: 3536b82e5853 ("mmc: sunxi: add support for A100 mmc controller")
Cc: stable(a)vger.kernel.org
Signed-off-by: Andre Przywara <andre.przywara(a)arm.com>
---
drivers/mmc/host/sunxi-mmc.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d3bd0ac99ec46..e0ab5fd635e6c 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
.needs_new_timings = true,
};
-static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
+static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
.idma_des_size_bits = 16,
.idma_des_shift = 2,
- .clk_delays = NULL,
.can_calibrate = true,
.mask_data0 = true,
.needs_new_timings = true,
@@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg },
- { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
+ { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
{ .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg },
+ { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
--
2.46.2
The quilt patch titled
Subject: selftests: hugetlb_dio: fixup check for initial conditions to skip in the start
has been removed from the -mm tree. Its filename was
selftests-hugetlb_dio-fixup-check-for-initial-conditions-to-skip-in-the-start.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Donet Tom <donettom(a)linux.ibm.com>
Subject: selftests: hugetlb_dio: fixup check for initial conditions to skip in the start
Date: Sun, 10 Nov 2024 00:49:03 -0600
This test verifies that a hugepage, used as a user buffer for DIO
operations, is correctly freed upon unmapping. To test this, we read the
count of free hugepages before and after the mmap, DIO, and munmap
operations, then check if the free hugepage count is the same.
Reading free hugepages before the test was removed by commit 0268d4579901
('selftests: hugetlb_dio: check for initial conditions to skip at the
start'), causing the test to always fail.
This patch adds back reading the free hugepages before starting the test.
With this patch, the tests are now passing.
Test results without this patch:
./tools/testing/selftests/mm/hugetlb_dio
TAP version 13
1..4
# No. Free pages before allocation : 0
# No. Free pages after munmap : 100
not ok 1 : Huge pages not freed!
# No. Free pages before allocation : 0
# No. Free pages after munmap : 100
not ok 2 : Huge pages not freed!
# No. Free pages before allocation : 0
# No. Free pages after munmap : 100
not ok 3 : Huge pages not freed!
# No. Free pages before allocation : 0
# No. Free pages after munmap : 100
not ok 4 : Huge pages not freed!
# Totals: pass:0 fail:4 xfail:0 xpass:0 skip:0 error:0
Test results with this patch:
/tools/testing/selftests/mm/hugetlb_dio
TAP version 13
1..4
# No. Free pages before allocation : 100
# No. Free pages after munmap : 100
ok 1 : Huge pages freed successfully !
# No. Free pages before allocation : 100
# No. Free pages after munmap : 100
ok 2 : Huge pages freed successfully !
# No. Free pages before allocation : 100
# No. Free pages after munmap : 100
ok 3 : Huge pages freed successfully !
# No. Free pages before allocation : 100
# No. Free pages after munmap : 100
ok 4 : Huge pages freed successfully !
# Totals: pass:4 fail:0 xfail:0 xpass:0 skip:0 error:0
Link: https://lkml.kernel.org/r/20241110064903.23626-1-donettom@linux.ibm.com
Fixes: 0268d4579901 ("selftests: hugetlb_dio: check for initial conditions to skip in the start")
Signed-off-by: Donet Tom <donettom(a)linux.ibm.com>
Cc: Muhammad Usama Anjum <usama.anjum(a)collabora.com>
Cc: Shuah Khan <shuah(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
tools/testing/selftests/mm/hugetlb_dio.c | 7 +++++++
1 file changed, 7 insertions(+)
--- a/tools/testing/selftests/mm/hugetlb_dio.c~selftests-hugetlb_dio-fixup-check-for-initial-conditions-to-skip-in-the-start
+++ a/tools/testing/selftests/mm/hugetlb_dio.c
@@ -44,6 +44,13 @@ void run_dio_using_hugetlb(unsigned int
if (fd < 0)
ksft_exit_fail_perror("Error opening file\n");
+ /* Get the free huge pages before allocation */
+ free_hpage_b = get_free_hugepages();
+ if (free_hpage_b == 0) {
+ close(fd);
+ ksft_exit_skip("No free hugepage, exiting!\n");
+ }
+
/* Allocate a hugetlb page */
orig_buffer = mmap(NULL, h_pagesize, mmap_prot, mmap_flags, -1, 0);
if (orig_buffer == MAP_FAILED) {
_
Patches currently in -mm which might be from donettom(a)linux.ibm.com are
The quilt patch titled
Subject: mm/gup: avoid an unnecessary allocation call for FOLL_LONGTERM cases
has been removed from the -mm tree. Its filename was
mm-gup-avoid-an-unnecessary-allocation-call-for-foll_longterm-cases.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch
of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: John Hubbard <jhubbard(a)nvidia.com>
Subject: mm/gup: avoid an unnecessary allocation call for FOLL_LONGTERM cases
Date: Mon, 4 Nov 2024 19:29:44 -0800
commit 53ba78de064b ("mm/gup: introduce
check_and_migrate_movable_folios()") created a new constraint on the
pin_user_pages*() API family: a potentially large internal allocation must
now occur, for FOLL_LONGTERM cases.
A user-visible consequence has now appeared: user space can no longer pin
more than 2GB of memory anymore on x86_64. That's because, on a 4KB
PAGE_SIZE system, when user space tries to (indirectly, via a device
driver that calls pin_user_pages()) pin 2GB, this requires an allocation
of a folio pointers array of MAX_PAGE_ORDER size, which is the limit for
kmalloc().
In addition to the directly visible effect described above, there is also
the problem of adding an unnecessary allocation. The **pages array
argument has already been allocated, and there is no need for a redundant
**folios array allocation in this case.
Fix this by avoiding the new allocation entirely. This is done by
referring to either the original page[i] within **pages, or to the
associated folio. Thanks to David Hildenbrand for suggesting this
approach and for providing the initial implementation (which I've tested
and adjusted slightly) as well.
[jhubbard(a)nvidia.com: whitespace tweak, per David]
Link: https://lkml.kernel.org/r/131cf9c8-ebc0-4cbb-b722-22fa8527bf3c@nvidia.com
[jhubbard(a)nvidia.com: bypass pofs_get_folio(), per Oscar]
Link: https://lkml.kernel.org/r/c1587c7f-9155-45be-bd62-1e36c0dd6923@nvidia.com
Link: https://lkml.kernel.org/r/20241105032944.141488-2-jhubbard@nvidia.com
Fixes: 53ba78de064b ("mm/gup: introduce check_and_migrate_movable_folios()")
Signed-off-by: John Hubbard <jhubbard(a)nvidia.com>
Suggested-by: David Hildenbrand <david(a)redhat.com>
Acked-by: David Hildenbrand <david(a)redhat.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Cc: Vivek Kasireddy <vivek.kasireddy(a)intel.com>
Cc: Dave Airlie <airlied(a)redhat.com>
Cc: Gerd Hoffmann <kraxel(a)redhat.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: Christoph Hellwig <hch(a)infradead.org>
Cc: Jason Gunthorpe <jgg(a)nvidia.com>
Cc: Peter Xu <peterx(a)redhat.com>
Cc: Arnd Bergmann <arnd(a)arndb.de>
Cc: Daniel Vetter <daniel.vetter(a)ffwll.ch>
Cc: Dongwon Kim <dongwon.kim(a)intel.com>
Cc: Hugh Dickins <hughd(a)google.com>
Cc: Junxiao Chang <junxiao.chang(a)intel.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/gup.c | 116 +++++++++++++++++++++++++++++++++++------------------
1 file changed, 77 insertions(+), 39 deletions(-)
--- a/mm/gup.c~mm-gup-avoid-an-unnecessary-allocation-call-for-foll_longterm-cases
+++ a/mm/gup.c
@@ -2273,20 +2273,57 @@ struct page *get_dump_page(unsigned long
#endif /* CONFIG_ELF_CORE */
#ifdef CONFIG_MIGRATION
+
+/*
+ * An array of either pages or folios ("pofs"). Although it may seem tempting to
+ * avoid this complication, by simply interpreting a list of folios as a list of
+ * pages, that approach won't work in the longer term, because eventually the
+ * layouts of struct page and struct folio will become completely different.
+ * Furthermore, this pof approach avoids excessive page_folio() calls.
+ */
+struct pages_or_folios {
+ union {
+ struct page **pages;
+ struct folio **folios;
+ void **entries;
+ };
+ bool has_folios;
+ long nr_entries;
+};
+
+static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
+{
+ if (pofs->has_folios)
+ return pofs->folios[i];
+ return page_folio(pofs->pages[i]);
+}
+
+static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
+{
+ pofs->entries[i] = NULL;
+}
+
+static void pofs_unpin(struct pages_or_folios *pofs)
+{
+ if (pofs->has_folios)
+ unpin_folios(pofs->folios, pofs->nr_entries);
+ else
+ unpin_user_pages(pofs->pages, pofs->nr_entries);
+}
+
/*
* Returns the number of collected folios. Return value is always >= 0.
*/
static unsigned long collect_longterm_unpinnable_folios(
- struct list_head *movable_folio_list,
- unsigned long nr_folios,
- struct folio **folios)
+ struct list_head *movable_folio_list,
+ struct pages_or_folios *pofs)
{
unsigned long i, collected = 0;
struct folio *prev_folio = NULL;
bool drain_allow = true;
- for (i = 0; i < nr_folios; i++) {
- struct folio *folio = folios[i];
+ for (i = 0; i < pofs->nr_entries; i++) {
+ struct folio *folio = pofs_get_folio(pofs, i);
if (folio == prev_folio)
continue;
@@ -2327,16 +2364,15 @@ static unsigned long collect_longterm_un
* Returns -EAGAIN if all folios were successfully migrated or -errno for
* failure (or partial success).
*/
-static int migrate_longterm_unpinnable_folios(
- struct list_head *movable_folio_list,
- unsigned long nr_folios,
- struct folio **folios)
+static int
+migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
+ struct pages_or_folios *pofs)
{
int ret;
unsigned long i;
- for (i = 0; i < nr_folios; i++) {
- struct folio *folio = folios[i];
+ for (i = 0; i < pofs->nr_entries; i++) {
+ struct folio *folio = pofs_get_folio(pofs, i);
if (folio_is_device_coherent(folio)) {
/*
@@ -2344,7 +2380,7 @@ static int migrate_longterm_unpinnable_f
* convert the pin on the source folio to a normal
* reference.
*/
- folios[i] = NULL;
+ pofs_clear_entry(pofs, i);
folio_get(folio);
gup_put_folio(folio, 1, FOLL_PIN);
@@ -2363,8 +2399,8 @@ static int migrate_longterm_unpinnable_f
* calling folio_isolate_lru() which takes a reference so the
* folio won't be freed if it's migrating.
*/
- unpin_folio(folios[i]);
- folios[i] = NULL;
+ unpin_folio(folio);
+ pofs_clear_entry(pofs, i);
}
if (!list_empty(movable_folio_list)) {
@@ -2387,12 +2423,26 @@ static int migrate_longterm_unpinnable_f
return -EAGAIN;
err:
- unpin_folios(folios, nr_folios);
+ pofs_unpin(pofs);
putback_movable_pages(movable_folio_list);
return ret;
}
+static long
+check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
+{
+ LIST_HEAD(movable_folio_list);
+ unsigned long collected;
+
+ collected = collect_longterm_unpinnable_folios(&movable_folio_list,
+ pofs);
+ if (!collected)
+ return 0;
+
+ return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
+}
+
/*
* Check whether all folios are *allowed* to be pinned indefinitely (long term).
* Rather confusingly, all folios in the range are required to be pinned via
@@ -2417,16 +2467,13 @@ err:
static long check_and_migrate_movable_folios(unsigned long nr_folios,
struct folio **folios)
{
- unsigned long collected;
- LIST_HEAD(movable_folio_list);
+ struct pages_or_folios pofs = {
+ .folios = folios,
+ .has_folios = true,
+ .nr_entries = nr_folios,
+ };
- collected = collect_longterm_unpinnable_folios(&movable_folio_list,
- nr_folios, folios);
- if (!collected)
- return 0;
-
- return migrate_longterm_unpinnable_folios(&movable_folio_list,
- nr_folios, folios);
+ return check_and_migrate_movable_pages_or_folios(&pofs);
}
/*
@@ -2436,22 +2483,13 @@ static long check_and_migrate_movable_fo
static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages)
{
- struct folio **folios;
- long i, ret;
-
- folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL);
- if (!folios) {
- unpin_user_pages(pages, nr_pages);
- return -ENOMEM;
- }
-
- for (i = 0; i < nr_pages; i++)
- folios[i] = page_folio(pages[i]);
+ struct pages_or_folios pofs = {
+ .pages = pages,
+ .has_folios = false,
+ .nr_entries = nr_pages,
+ };
- ret = check_and_migrate_movable_folios(nr_pages, folios);
-
- kfree(folios);
- return ret;
+ return check_and_migrate_movable_pages_or_folios(&pofs);
}
#else
static long check_and_migrate_movable_pages(unsigned long nr_pages,
_
Patches currently in -mm which might be from jhubbard(a)nvidia.com are