mm: page_alloc: only check the alloc flags and gfp_mask for dirty once
authorMel Gorman <[email protected]>
Wed, 4 Jun 2014 23:10:12 +0000 (16:10 -0700)
committerLinus Torvalds <[email protected]>
Wed, 4 Jun 2014 23:54:09 +0000 (16:54 -0700)
Currently it's calculated once per zone in the zonelist.

Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Theodore Ts'o <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/page_alloc.c

index da526905b4a571efee3dec741d50fd4020017e7a..30f327a720fd746f1841a36e7db26bb440406716 100644 (file)
@@ -1917,6 +1917,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
+       bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
+                               (gfp_mask & __GFP_WRITE);
 
        classzone_idx = zone_idx(preferred_zone);
 zonelist_scan:
@@ -1976,8 +1978,7 @@ zonelist_scan:
                 * will require awareness of zones in the
                 * dirty-throttling and the flusher threads.
                 */
-               if ((alloc_flags & ALLOC_WMARK_LOW) &&
-                   (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
+               if (consider_zone_dirty && !zone_dirty_ok(zone))
                        continue;
 
                mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];