mm, devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support
authorDan Williams <[email protected]>
Fri, 28 Dec 2018 08:35:01 +0000 (00:35 -0800)
committerLinus Torvalds <[email protected]>
Fri, 28 Dec 2018 20:11:47 +0000 (12:11 -0800)
In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

[[email protected]: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
Link: http://lkml.kernel.org/r/154275559036.76910.12434636179931292607.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <[email protected]>
Reviewed-by: Jérôme Glisse <[email protected]>
Acked-by: Christoph Hellwig <[email protected]>
Reported-by: Logan Gunthorpe <[email protected]>
Reviewed-by: Logan Gunthorpe <[email protected]>
Cc: Balbir Singh <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
kernel/memremap.c

index 5e45f0c327a58a5f6f9d1f41d2d19eda9691ebbe..3eef989ef0352b6d07ae44c7ea426ef272b9b0e8 100644 (file)
@@ -98,9 +98,15 @@ static void devm_memremap_pages_release(void *data)
                - align_start;
 
        mem_hotplug_begin();
-       arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
-                       &pgmap->altmap : NULL);
-       kasan_remove_zero_shadow(__va(align_start), align_size);
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               pfn = align_start >> PAGE_SHIFT;
+               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+                               align_size >> PAGE_SHIFT, NULL);
+       } else {
+               arch_remove_memory(align_start, align_size,
+                               pgmap->altmap_valid ? &pgmap->altmap : NULL);
+               kasan_remove_zero_shadow(__va(align_start), align_size);
+       }
        mem_hotplug_done();
 
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -187,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
                goto err_pfn_remap;
 
        mem_hotplug_begin();
-       error = kasan_add_zero_shadow(__va(align_start), align_size);
-       if (error) {
-               mem_hotplug_done();
-               goto err_kasan;
+
+       /*
+        * For device private memory we call add_pages() as we only need to
+        * allocate and initialize struct page for the device memory. More-
+        * over the device memory is un-accessible thus we do not want to
+        * create a linear mapping for the memory like arch_add_memory()
+        * would do.
+        *
+        * For all other device memory types, which are accessible by
+        * the CPU, we do want the linear mapping and thus use
+        * arch_add_memory().
+        */
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               error = add_pages(nid, align_start >> PAGE_SHIFT,
+                               align_size >> PAGE_SHIFT, NULL, false);
+       } else {
+               error = kasan_add_zero_shadow(__va(align_start), align_size);
+               if (error) {
+                       mem_hotplug_done();
+                       goto err_kasan;
+               }
+
+               error = arch_add_memory(nid, align_start, align_size, altmap,
+                               false);
+       }
+
+       if (!error) {
+               struct zone *zone;
+
+               zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+               move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
+                               align_size >> PAGE_SHIFT, altmap);
        }
 
-       error = arch_add_memory(nid, align_start, align_size, altmap, false);
-       if (!error)
-               move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-                                       align_start >> PAGE_SHIFT,
-                                       align_size >> PAGE_SHIFT, altmap);
        mem_hotplug_done();
        if (error)
                goto err_add_memory;