@@ -2053,8 +2053,7 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
ac->nodemask) {
- /* Preserve at least one pageblock */
- if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+ if (!zone->nr_reserved_highatomic)
continue;
spin_lock_irqsave(&zone->lock, flags);
@@ -3276,11 +3275,10 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
/*
* If an allocation failed after direct reclaim, it could be because
- * pages are pinned on the per-cpu lists or in high alloc reserves.
+ * pages are pinned on the per-cpu lists.
* Shrink them them and try again
*/
if (!page && !drained) {
- unreserve_highatomic_pageblock(ac);
drain_all_pages(NULL);
drained = true;
goto retry;
@@ -3636,6 +3634,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
/*
+ * Make sure we are not pinning atomic higher order reserves when we
+ * are really fighting to get !costly order and running out of memory
+ */
+ unreserve_highatomic_pageblock(ac);
+
+ /*
* It doesn't make any sense to retry for the compaction if the order-0
* reclaim is not able to make any progress because the current
* implementation of the compaction depends on the sufficient amount