On Thu, Aug 28, 2025 at 12:01:24AM +0200, David Hildenbrand wrote:
Let's make it clearer that we are operating within a single folio by providing both the folio and the page.
This implies that for flush_dcache_folio() we'll now avoid one more page->folio lookup, and that we can safely drop the "nth_page" usage.
Cc: Thomas Bogendoerfer tsbogend@alpha.franken.de Signed-off-by: David Hildenbrand david@redhat.com
arch/mips/include/asm/cacheflush.h | 11 +++++++---- arch/mips/mm/cache.c | 8 ++++---- 2 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 5d283ef89d90d..8d79bfc687d21 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -50,13 +50,14 @@ extern void (*flush_cache_mm)(struct mm_struct *mm); extern void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); -extern void __flush_dcache_pages(struct page *page, unsigned int nr); +extern void __flush_dcache_folio_pages(struct folio *folio, struct page *page, unsigned int nr);
NIT: Be good to drop the extern.
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_folio(struct folio *folio) { if (cpu_has_dc_aliases)
__flush_dcache_pages(&folio->page, folio_nr_pages(folio));
__flush_dcache_folio_pages(folio, folio_page(folio, 0),
else if (!cpu_has_ic_fills_f_dc) folio_set_dcache_dirty(folio);folio_nr_pages(folio));
} @@ -64,10 +65,12 @@ static inline void flush_dcache_folio(struct folio *folio)
static inline void flush_dcache_page(struct page *page) {
- struct folio *folio = page_folio(page);
- if (cpu_has_dc_aliases)
__flush_dcache_pages(page, 1);
__flush_dcache_folio_pages(folio, page, folio_nr_pages(folio));
Hmmm, shouldn't this be 1 not folio_nr_pages()? Seems that the original implementation only flushed a single page even if contained within a larger folio?
else if (!cpu_has_ic_fills_f_dc)
folio_set_dcache_dirty(page_folio(page));
folio_set_dcache_dirty(folio);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0) diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index bf9a37c60e9f0..e3b4224c9a406 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -99,9 +99,9 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, return 0; }
-void __flush_dcache_pages(struct page *page, unsigned int nr) +void __flush_dcache_folio_pages(struct folio *folio, struct page *page,
unsigned int nr)
{
- struct folio *folio = page_folio(page); struct address_space *mapping = folio_flush_mapping(folio); unsigned long addr; unsigned int i;
@@ -117,12 +117,12 @@ void __flush_dcache_pages(struct page *page, unsigned int nr) * get faulted into the tlb (and thus flushed) anyways. */ for (i = 0; i < nr; i++) {
addr = (unsigned long)kmap_local_page(nth_page(page, i));
flush_data_cache_page(addr); kunmap_local((void *)addr); }addr = (unsigned long)kmap_local_page(page + i);
} -EXPORT_SYMBOL(__flush_dcache_pages); +EXPORT_SYMBOL(__flush_dcache_folio_pages);
void __flush_anon_page(struct page *page, unsigned long vmaddr) { -- 2.50.1