x86-64: fall back to regular page vmemmap on allocation failure
authorJohannes Weiner <[email protected]>
Mon, 29 Apr 2013 22:07:56 +0000 (15:07 -0700)
committerLinus Torvalds <[email protected]>
Mon, 29 Apr 2013 22:54:35 +0000 (15:54 -0700)
Memory hotplug can happen on a machine under load, memory shortness
and fragmentation, so huge page allocations for the vmemmap are not
guaranteed to succeed.

Try to fall back to regular pages before failing the hotplug event
completely.

Signed-off-by: Johannes Weiner <[email protected]>
Cc: Ben Hutchings <[email protected]>
Cc: Bernhard Schmidt <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Russell King <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: "Luck, Tony" <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: David Miller <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
arch/x86/mm/init_64.c

index 9f6347c468b005f9fa0239815e3726d16ad89baf..71ff55a1b28781c5d0bef9fbc75c85449dd7ce67 100644 (file)
@@ -1303,31 +1303,37 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
 
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
-                       pte_t entry;
                        void *p;
 
                        p = vmemmap_alloc_block_buf(PMD_SIZE, node);
-                       if (!p)
-                               return -ENOMEM;
-
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
-                                       PAGE_KERNEL_LARGE);
-                       set_pmd(pmd, __pmd(pte_val(entry)));
-
-                       /* check to see if we have contiguous blocks */
-                       if (p_end != p || node_start != node) {
-                               if (p_start)
-                                       printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
-                                              addr_start, addr_end-1, p_start, p_end-1, node_start);
-                               addr_start = addr;
-                               node_start = node;
-                               p_start = p;
-                       }
+                       if (p) {
+                               pte_t entry;
+
+                               entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
+                                               PAGE_KERNEL_LARGE);
+                               set_pmd(pmd, __pmd(pte_val(entry)));
+
+                               /* check to see if we have contiguous blocks */
+                               if (p_end != p || node_start != node) {
+                                       if (p_start)
+                                               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                                                      addr_start, addr_end-1, p_start, p_end-1, node_start);
+                                       addr_start = addr;
+                                       node_start = node;
+                                       p_start = p;
+                               }
 
-                       addr_end = addr + PMD_SIZE;
-                       p_end = p + PMD_SIZE;
-               } else
+                               addr_end = addr + PMD_SIZE;
+                               p_end = p + PMD_SIZE;
+                               continue;
+                       }
+               } else if (pmd_large(*pmd)) {
                        vmemmap_verify((pte_t *)pmd, node, addr, next);
+                       continue;
+               }
+               pr_warn_once("vmemmap: falling back to regular page backing\n");
+               if (vmemmap_populate_basepages(addr, next, node))
+                       return -ENOMEM;
        }
        return 0;
 }