mm/free_pcppages_bulk: update pcp->count inside
authorAaron Lu <[email protected]>
Thu, 5 Apr 2018 23:24:06 +0000 (16:24 -0700)
committerLinus Torvalds <[email protected]>
Fri, 6 Apr 2018 04:36:26 +0000 (21:36 -0700)
Matthew Wilcox found that all callers of free_pcppages_bulk() currently
update pcp->count immediately after so it's natural to do it inside
free_pcppages_bulk().

No functionality or performance change is expected from this patch.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Aaron Lu <[email protected]>
Suggested-by: Matthew Wilcox <[email protected]>
Acked-by: David Rientjes <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Andrew Morton <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Huang Ying <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Kemi Wang <[email protected]>
Cc: Tim Chen <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Mel Gorman <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
mm/page_alloc.c

index 86b7f0430e02ba73f181c68195580d022099eff3..08c195cdf161a822af3eee86d70cfe2e03e4a566 100644 (file)
@@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        page = list_last_entry(list, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
+                       pcp->count--;
 
                        mt = get_pcppage_migratetype(page);
                        /* MIGRATE_ISOLATE page should not go to pcplists */
@@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
        local_irq_save(flags);
        batch = READ_ONCE(pcp->batch);
        to_drain = min(pcp->count, batch);
-       if (to_drain > 0) {
+       if (to_drain > 0)
                free_pcppages_bulk(zone, to_drain, pcp);
-               pcp->count -= to_drain;
-       }
        local_irq_restore(flags);
 }
 #endif
@@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
        pset = per_cpu_ptr(zone->pageset, cpu);
 
        pcp = &pset->pcp;
-       if (pcp->count) {
+       if (pcp->count)
                free_pcppages_bulk(zone, pcp->count, pcp);
-               pcp->count = 0;
-       }
        local_irq_restore(flags);
 }
 
@@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
        if (pcp->count >= pcp->high) {
                unsigned long batch = READ_ONCE(pcp->batch);
                free_pcppages_bulk(zone, batch, pcp);
-               pcp->count -= batch;
        }
 }