Page Detective uses info level, while dump_page() uses warn level. Add a new function dump_page_lvl() that accepts log level argument to be able to dump pages at specific level. Also, this enables adding a modules specific prefix to output of this function.
Signed-off-by: Pasha Tatashin pasha.tatashin@soleen.com --- fs/inode.c | 18 +++++++------- include/linux/fs.h | 2 +- include/linux/mmdebug.h | 1 + mm/debug.c | 53 ++++++++++++++++++++++------------------- 4 files changed, 39 insertions(+), 35 deletions(-)
diff --git a/fs/inode.c b/fs/inode.c index 8dabb224f941..1114319d82b2 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -603,7 +603,7 @@ void __remove_inode_hash(struct inode *inode) } EXPORT_SYMBOL(__remove_inode_hash);
-void dump_mapping(const struct address_space *mapping) +void dump_mapping(const char *loglvl, const struct address_space *mapping) { struct inode *host; const struct address_space_operations *a_ops; @@ -619,31 +619,31 @@ void dump_mapping(const struct address_space *mapping) */ if (get_kernel_nofault(host, &mapping->host) || get_kernel_nofault(a_ops, &mapping->a_ops)) { - pr_warn("invalid mapping:%px\n", mapping); + printk("%sinvalid mapping:%px\n", loglvl, mapping); return; }
if (!host) { - pr_warn("aops:%ps\n", a_ops); + printk("%saops:%ps\n", loglvl, a_ops); return; }
if (get_kernel_nofault(dentry_first, &host->i_dentry.first) || get_kernel_nofault(ino, &host->i_ino)) { - pr_warn("aops:%ps invalid inode:%px\n", a_ops, host); + printk("%saops:%ps invalid inode:%px\n", loglvl, a_ops, host); return; }
if (!dentry_first) { - pr_warn("aops:%ps ino:%lx\n", a_ops, ino); + printk("%saops:%ps ino:%lx\n", loglvl, a_ops, ino); return; }
dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); if (get_kernel_nofault(dentry, dentry_ptr) || !dentry.d_parent || !dentry.d_name.name) { - pr_warn("aops:%ps ino:%lx invalid dentry:%px\n", - a_ops, ino, dentry_ptr); + printk("%saops:%ps ino:%lx invalid dentry:%px\n", + loglvl, a_ops, ino, dentry_ptr); return; }
@@ -653,8 +653,8 @@ void dump_mapping(const struct address_space *mapping) * Even if strncpy_from_kernel_nofault() succeeded, * the fname could be unreliable */ - pr_warn("aops:%ps ino:%lx dentry name(?):"%s"\n", - a_ops, ino, fname); + printk("%saops:%ps ino:%lx dentry name(?):"%s"\n", + loglvl, a_ops, ino, fname); }
void clear_inode(struct inode *inode) diff --git a/include/linux/fs.h b/include/linux/fs.h index a25b72397af5..fa2b04bed9d6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3137,7 +3137,7 @@ extern void unlock_new_inode(struct inode *); extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); extern void evict_inodes(struct super_block *sb); -void dump_mapping(const struct address_space *); +void dump_mapping(const char *loglvl, const struct address_space *);
/* * Userspace may rely on the inode number being non-zero. For example, glibc diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 39a7714605a7..69849d457f4c 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -11,6 +11,7 @@ struct mm_struct; struct vma_iterator;
void dump_page(const struct page *page, const char *reason); +void dump_page_lvl(const char *loglvl, const struct page *page); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); void vma_iter_dump_tree(const struct vma_iterator *vmi); diff --git a/mm/debug.c b/mm/debug.c index aa57d3ffd4ed..0df242c77c7c 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -67,36 +67,38 @@ static const char *page_type_name(unsigned int page_type) return page_type_names[i]; }
-static void __dump_folio(struct folio *folio, struct page *page, - unsigned long pfn, unsigned long idx) +static void __dump_folio(const char *loglvl, struct folio *folio, + struct page *page, unsigned long pfn, + unsigned long idx) { struct address_space *mapping = folio_mapping(folio); int mapcount = atomic_read(&page->_mapcount); char *type = "";
mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1; - pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", - folio_ref_count(folio), mapcount, mapping, - folio->index + idx, pfn); + printk("%spage: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", + loglvl, folio_ref_count(folio), mapcount, mapping, + folio->index + idx, pfn); if (folio_test_large(folio)) { - pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", - folio_order(folio), - folio_mapcount(folio), - folio_entire_mapcount(folio), - folio_nr_pages_mapped(folio), - atomic_read(&folio->_pincount)); + printk("%shead: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", + loglvl, + folio_order(folio), + folio_mapcount(folio), + folio_entire_mapcount(folio), + folio_nr_pages_mapped(folio), + atomic_read(&folio->_pincount)); }
#ifdef CONFIG_MEMCG if (folio->memcg_data) - pr_warn("memcg:%lx\n", folio->memcg_data); + printk("%smemcg:%lx\n", loglvl, folio->memcg_data); #endif if (folio_test_ksm(folio)) type = "ksm "; else if (folio_test_anon(folio)) type = "anon "; else if (mapping) - dump_mapping(mapping); + dump_mapping(loglvl, mapping); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
/* @@ -105,22 +107,22 @@ static void __dump_folio(struct folio *folio, struct page *page, * state for debugging, it should be fine to accept a bit of * inaccuracy here due to racing. */ - pr_warn("%sflags: %pGp%s\n", type, &folio->flags, - is_migrate_cma_folio(folio, pfn) ? " CMA" : ""); + printk("%s%sflags: %pGp%s\n", loglvl, type, &folio->flags, + is_migrate_cma_folio(folio, pfn) ? " CMA" : ""); if (page_has_type(&folio->page)) pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24, page_type_name(folio->page.page_type));
- print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, - sizeof(unsigned long), page, - sizeof(struct page), false); + print_hex_dump(loglvl, "raw: ", DUMP_PREFIX_NONE, 32, + sizeof(unsigned long), page, + sizeof(struct page), false); if (folio_test_large(folio)) - print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, - sizeof(unsigned long), folio, - 2 * sizeof(struct page), false); + print_hex_dump(loglvl, "head: ", DUMP_PREFIX_NONE, 32, + sizeof(unsigned long), folio, + 2 * sizeof(struct page), false); }
-static void __dump_page(const struct page *page) +void dump_page_lvl(const char *loglvl, const struct page *page) { struct folio *foliop, folio; struct page precise; @@ -149,22 +151,23 @@ static void __dump_page(const struct page *page) if (idx > nr_pages) { if (loops-- > 0) goto again; - pr_warn("page does not match folio\n"); + printk("%spage does not match folio\n", loglvl); precise.compound_head &= ~1UL; foliop = (struct folio *)&precise; idx = 0; }
dump: - __dump_folio(foliop, &precise, pfn, idx); + __dump_folio(loglvl, foliop, &precise, pfn, idx); } +EXPORT_SYMBOL_GPL(dump_page_lvl);
void dump_page(const struct page *page, const char *reason) { if (PagePoisoned(page)) pr_warn("page:%p is uninitialized and poisoned", page); else - __dump_page(page); + dump_page_lvl(KERN_WARNING, page); if (reason) pr_warn("page dumped because: %s\n", reason); dump_page_owner(page);