On 2025/4/8 2:01, Johannes Weiner wrote:
...
@@ -2934,6 +2981,7 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, { struct page *page; unsigned long flags;
- enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
do { page = NULL; @@ -2945,7 +2993,7 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, if (alloc_flags & ALLOC_HIGHATOMIC) page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (!page) {
page = __rmqueue(zone, order, migratetype, alloc_flags);
page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
/* * If the allocation fails, allow OOM handling and
It was not in the diff, but it seems the zone->lock is held inside the do..while loop, doesn't it mean that the freelists are subject to outside changes and rmqm is stale?