4K config use one full page at level 4 of the pagetable. Add support for single
fragment allocation in pagetable fragment code and and use that for 4K config.
This makes both 4k and 64k use the same code path. Later we will switch pmd to
use the page table fragment code. This is done only for 64bit platforms which
is using page table fragment support.
Signed-off-by: Aneesh Kumar K.V <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
#define H_PAGE_4K_PFN 0x0
#define H_PAGE_THP_HUGE 0x0
#define H_PAGE_COMBO 0x0
-#define H_PTE_FRAG_NR 0
-#define H_PTE_FRAG_SIZE_SHIFT 0
+
+/* 8 bytes per each pte entry */
+#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
+#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
/* memory key bits, only 8 keys supported */
#define H_PTE_PKEY_BIT0 0
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
-#ifdef CONFIG_PPC_64K_PAGES
- /* for 4K PTE fragment support */
+ /*
+ * pagetable fragment support
+ */
void *pte_frag;
-#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
return (pgtable_t)pmd_page_vaddr(pmd);
}
-#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- struct page *page;
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return pte;
-}
-#else /* if CONFIG_PPC_64K_PAGES */
-
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
-#endif
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
mm->context.id = index;
-#ifdef CONFIG_PPC_64K_PAGES
mm->context.pte_frag = NULL;
-#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(mm);
#endif
spin_unlock(&mmu_context_lock);
}
-#ifdef CONFIG_PPC_64K_PAGES
static void destroy_pagetable_page(struct mm_struct *mm)
{
int count;
}
}
-#else
-static inline void destroy_pagetable_page(struct mm_struct *mm)
-{
- return;
-}
-#endif
-
void destroy_context(struct mm_struct *mm)
{
#ifdef CONFIG_SPAPR_TCE_IOMMU
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
-#ifdef CONFIG_PPC_64K_PAGES
+
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;
return NULL;
}
+
ret = page_address(page);
+ /*
+ * if we support only one fragment just return the
+ * allocated page.
+ */
+ if (PTE_FRAG_NR == 1)
+ return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
return __alloc_for_ptecache(mm, kernel);
}
-#endif /* CONFIG_PPC_64K_PAGES */
-
void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);