/* Turn on the MMU */
mov r0, #DISABLE_DCACHE
- bl enable_mmu_secure
+ bl enable_mmu_svc_mon
/* Enable the data cache. */
ldcopr r9, SCTLR
#define TTBCR_T0SZ_SHIFT U(0)
#define TTBCR_T0SZ_MASK U(0x7)
+/*
+ * HTCR definitions
+ */
+#define HTCR_RES1 ((U(1) << 31) | (U(1) << 23))
+
+#define HTCR_SH0_NON_SHAREABLE (U(0x0) << 12)
+#define HTCR_SH0_OUTER_SHAREABLE (U(0x2) << 12)
+#define HTCR_SH0_INNER_SHAREABLE (U(0x3) << 12)
+
+#define HTCR_RGN0_OUTER_NC (U(0x0) << 10)
+#define HTCR_RGN0_OUTER_WBA (U(0x1) << 10)
+#define HTCR_RGN0_OUTER_WT (U(0x2) << 10)
+#define HTCR_RGN0_OUTER_WBNA (U(0x3) << 10)
+
+#define HTCR_RGN0_INNER_NC (U(0x0) << 8)
+#define HTCR_RGN0_INNER_WBA (U(0x1) << 8)
+#define HTCR_RGN0_INNER_WT (U(0x2) << 8)
+#define HTCR_RGN0_INNER_WBNA (U(0x3) << 8)
+
+#define HTCR_T0SZ_SHIFT U(0)
+#define HTCR_T0SZ_MASK U(0x7)
+
#define MODE_RW_SHIFT U(0x4)
#define MODE_RW_MASK U(0x1)
#define MODE_RW_32 U(0x1)
#define TLBIMVA p15, 0, c8, c7, 1
#define TLBIMVAA p15, 0, c8, c7, 3
#define TLBIMVAAIS p15, 0, c8, c3, 3
+#define TLBIMVAHIS p15, 4, c8, c3, 1
#define BPIALLIS p15, 0, c7, c1, 6
#define BPIALL p15, 0, c7, c5, 6
#define ICIALLU p15, 0, c7, c5, 0
#define CLIDR p15, 1, c0, c0, 1
#define CSSELR p15, 2, c0, c0, 0
#define CCSIDR p15, 1, c0, c0, 0
+#define HTCR p15, 4, c2, c0, 2
+#define HMAIR0 p15, 4, c10, c2, 0
#define DBGOSDLR p14, 0, c1, c3, 4
/* Debug register defines. The format is: coproc, opt1, CRn, CRm, opt2 */
#define CNTVOFF_64 p15, 4, c14
#define VTTBR_64 p15, 6, c2
#define CNTPCT_64 p15, 0, c14
+#define HTTBR_64 p15, 4, c2
/* 64 bit GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRm */
#define ICC_SGI1R_EL1_64 p15, 0, c12
/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
DEFINE_TLBIOP_PARAM_FUNC(mva, TLBIMVA)
DEFINE_TLBIOP_PARAM_FUNC(mvaa, TLBIMVAA)
DEFINE_TLBIOP_PARAM_FUNC(mvaais, TLBIMVAAIS)
+DEFINE_TLBIOP_PARAM_FUNC(mvahis, TLBIMVAHIS)
/*
* BPI operation prototypes.
#define IS_IN_SECURE() \
(GET_NS_BIT(read_scr()) == 0)
+#define IS_IN_HYP() (GET_M32(read_cpsr()) == MODE32_hyp)
+#define IS_IN_SVC() (GET_M32(read_cpsr()) == MODE32_svc)
+#define IS_IN_MON() (GET_M32(read_cpsr()) == MODE32_mon)
+#define IS_IN_EL2() IS_IN_HYP()
/*
* If EL3 is AArch32, then secure PL1 and monitor mode correspond to EL3
*/
* TCR defintions
*/
#define TCR_EL3_RES1 ((U(1) << 31) | (U(1) << 23))
+#define TCR_EL2_RES1 ((ULL(1) << 31) | (ULL(1) << 23))
#define TCR_EL1_IPS_SHIFT U(32)
+#define TCR_EL2_PS_SHIFT U(16)
#define TCR_EL3_PS_SHIFT U(16)
#define TCR_TxSZ_MIN ULL(16)
#ifdef AARCH32
/* AArch32 specific translation table API */
+#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags);
-
void enable_mmu_direct(unsigned int flags);
+#endif
+
+void enable_mmu_svc_mon(unsigned int flags);
+void enable_mmu_hyp(unsigned int flags);
+
+void enable_mmu_direct_svc_mon(unsigned int flags);
+void enable_mmu_direct_hyp(unsigned int flags);
#else
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el2(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el2(unsigned int flags);
void enable_mmu_direct_el3(unsigned int flags);
#endif /* AARCH32 */
* library to detect it at runtime.
*/
#define EL1_EL0_REGIME 1
+#define EL2_REGIME 2
#define EL3_REGIME 3
#define EL_REGIME_INVALID -1
* Function for enabling the MMU in Secure PL1, assuming that the
* page-tables have already been created.
******************************************************************************/
+#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
+{
+ enable_mmu_svc_mon(flags);
+}
+
+void enable_mmu_direct(unsigned int flags)
+{
+ enable_mmu_direct_svc_mon(flags);
+}
+#endif
+
+void enable_mmu_svc_mon(unsigned int flags)
{
unsigned int mair0, ttbcr, sctlr;
uint64_t ttbr0;
isb();
}
-void enable_mmu_direct(unsigned int flags)
+void enable_mmu_direct_svc_mon(unsigned int flags)
{
- enable_mmu_secure(flags);
+ enable_mmu_svc_mon(flags);
}
#include <assert_macros.S>
#include <xlat_tables_v2.h>
- .global enable_mmu_direct
+ .global enable_mmu_direct_svc_mon
+ .global enable_mmu_direct_hyp
-func enable_mmu_direct
+ /* void enable_mmu_direct_svc_mon(unsigned int flags) */
+func enable_mmu_direct_svc_mon
/* Assert that MMU is turned off */
#if ENABLE_ASSERTIONS
ldcopr r1, SCTLR
isb
bx lr
-endfunc enable_mmu_direct
+endfunc enable_mmu_direct_svc_mon
+
+
+ /* void enable_mmu_direct_hyp(unsigned int flags) */
+func enable_mmu_direct_hyp
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, HSCTLR
+ tst r1, #HSCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* HMAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR << 3)]
+ stcopr r1, HMAIR0
+
+ /* HTCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 3)]
+ stcopr r2, HTCR
+
+ /* HTTBR */
+ ldr r1, [r0, #(MMU_CFG_TTBR0 << 3)]
+ ldr r2, [r0, #((MMU_CFG_TTBR0 << 3) + 4)]
+ stcopr16 r1, r2, HTTBR_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, HSCTLR
+ ldr r2, =(HSCTLR_WXN_BIT | HSCTLR_C_BIT | HSCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #HSCTLR_C_BIT
+
+ stcopr r1, HSCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct_hyp
}
#endif /* ENABLE_ASSERTIONS*/
-bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
+bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
- return (read_sctlr() & SCTLR_M_BIT) != 0;
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() == 1U);
+ return (read_sctlr() & SCTLR_M_BIT) != 0U;
+ } else {
+ assert(ctx->xlat_regime == EL2_REGIME);
+ assert(xlat_arch_current_el() == 2U);
+ return (read_hsctlr() & HSCTLR_M_BIT) != 0U;
+ }
}
bool is_dcache_enabled(void)
{
- return (read_sctlr() & SCTLR_C_BIT) != 0;
+ if (IS_IN_EL2()) {
+ return (read_hsctlr() & HSCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr() & SCTLR_C_BIT) != 0U;
+ }
}
-uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused)
+uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
{
- return UPPER_ATTRS(XN);
+ if (xlat_regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(XN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ return UPPER_ATTRS(XN);
+ }
}
-void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
*/
dsbishst();
- tlbimvaais(TLBI_ADDR(va));
+ if (xlat_regime == EL1_EL0_REGIME) {
+ tlbimvaais(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ tlbimvahis(TLBI_ADDR(va));
+ }
}
void xlat_arch_tlbi_va_sync(void)
unsigned int xlat_arch_current_el(void)
{
- /*
- * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
- * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
- *
- * The PL1&0 translation regime in AArch32 behaves like the EL1&0 regime
- * in AArch64 except for the XN bits, but we set and unset them at the
- * same time, so there's no difference in practice.
- */
- return 1U;
+ if (IS_IN_HYP()) {
+ return 2U;
+ } else {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor,
+ * System, SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
+ *
+ * The PL1&0 translation regime in AArch32 behaves like the
+ * EL1&0 regime in AArch64 except for the XN bits, but we set
+ * and unset them at the same time, so there's no difference in
+ * practice.
+ */
+ return 1U;
+ }
}
/*******************************************************************************
- * Function for enabling the MMU in Secure PL1, assuming that the page tables
+ * Function for enabling the MMU in PL1 or PL2, assuming that the page tables
* have already been created.
******************************************************************************/
void setup_mmu_cfg(uint64_t *params, unsigned int flags,
uint64_t mair, ttbr0;
uint32_t ttbcr;
- assert(IS_IN_SECURE());
-
/* Set attributes in the right indices of the MAIR */
mair = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ATTR_NON_CACHEABLE_INDEX);
/*
- * Configure the control register for stage 1 of the PL1&0 translation
- * regime.
+ * Configure the control register for stage 1 of the PL1&0 or EL2
+ * translation regimes.
*/
/* Use the Long-descriptor translation table format. */
ttbcr = TTBCR_EAE_BIT;
- /*
- * Disable translation table walk for addresses that are translated
- * using TTBR1. Therefore, only TTBR0 is used.
- */
- ttbcr |= TTBCR_EPD1_BIT;
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(IS_IN_SVC() || IS_IN_MON());
+ /*
+ * Disable translation table walk for addresses that are
+ * translated using TTBR1. Therefore, only TTBR0 is used.
+ */
+ ttbcr |= TTBCR_EPD1_BIT;
+ } else {
+ assert(xlat_regime == EL2_REGIME);
+ assert(IS_IN_HYP());
+
+ /*
+ * Set HTCR bits as well. Set HTTBR table properties
+ * as Inner & outer WBWA & shareable.
+ */
+ ttbcr |= HTCR_RES1 |
+ HTCR_SH0_INNER_SHAREABLE | HTCR_RGN0_OUTER_WBA |
+ HTCR_RGN0_INNER_WBA;
+ }
/*
* Limit the input address ranges and memory region sizes translated
#include <xlat_tables_v2.h>
.global enable_mmu_direct_el1
+ .global enable_mmu_direct_el2
.global enable_mmu_direct_el3
/* Macros to read and write to system register for a given EL. */
mrs \gp_reg, \reg_name\()_el\()\el
.endm
+ .macro tlbi_invalidate_all el
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .elseif \el == 2
+ TLB_INVALIDATE(alle2)
+ .elseif \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1, 2 or 3"
+ .endif
+ .endm
+
+ /* void enable_mmu_direct_el<x>(unsigned int flags) */
.macro define_mmu_enable_func el
func enable_mmu_direct_\()el\el
#if ENABLE_ASSERTIONS
tst x1, #SCTLR_M_BIT
ASM_ASSERT(eq)
#endif
-
- /* Invalidate TLB entries */
- .if \el == 1
- TLB_INVALIDATE(vmalle1)
- .else
- .if \el == 3
- TLB_INVALIDATE(alle3)
- .else
- .error "EL must be 1 or 3"
- .endif
- .endif
+ /* Invalidate all TLB entries */
+ tlbi_invalidate_all \el
mov x7, x0
ldr x0, =mmu_cfg_params
* enable_mmu_direct_el3
*/
define_mmu_enable_func 1
+ define_mmu_enable_func 2
define_mmu_enable_func 3
if (ctx->xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U);
return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
} else {
assert(ctx->xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U);
if (el == 1U) {
return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else if (el == 2U) {
+ return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
} else {
return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
}
if (xlat_regime == EL1_EL0_REGIME) {
return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
} else {
- assert(xlat_regime == EL3_REGIME);
+ assert((xlat_regime == EL2_REGIME) ||
+ (xlat_regime == EL3_REGIME));
return UPPER_ATTRS(XN);
}
}
if (xlat_regime == EL1_EL0_REGIME) {
assert(xlat_arch_current_el() >= 1U);
tlbivaae1is(TLBI_ADDR(va));
+ } else if (xlat_regime == EL2_REGIME) {
+ assert(xlat_arch_current_el() >= 2U);
+ tlbivae2is(TLBI_ADDR(va));
} else {
assert(xlat_regime == EL3_REGIME);
assert(xlat_arch_current_el() >= 3U);
* that are translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
+ } else if (xlat_regime == EL2_REGIME) {
+ tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
} else {
assert(xlat_regime == EL3_REGIME);
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
if (current_el == 1U) {
tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
+ } else if (current_el == 2U) {
+ tf_xlat_ctx.xlat_regime = EL2_REGIME;
} else {
assert(current_el == 3U);
tf_xlat_ctx.xlat_regime = EL3_REGIME;
#ifdef AARCH32
+#if !ERROR_DEPRECATED
void enable_mmu_secure(unsigned int flags)
+{
+ enable_mmu_svc_mon(flags);
+}
+
+void enable_mmu_direct(unsigned int flags)
+{
+ enable_mmu_direct_svc_mon(flags);
+}
+#endif
+
+void enable_mmu_svc_mon(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
- enable_mmu_direct(flags);
+ enable_mmu_direct_svc_mon(flags);
+}
+
+void enable_mmu_hyp(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_hyp(flags);
}
#else
enable_mmu_direct_el1(flags);
}
+void enable_mmu_el2(unsigned int flags)
+{
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
+ tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ tf_xlat_ctx.va_max_address, EL2_REGIME);
+ enable_mmu_direct_el2(flags);
+}
+
void enable_mmu_el3(unsigned int flags)
{
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
}
} else {
- assert(ctx->xlat_regime == EL3_REGIME);
+ assert((ctx->xlat_regime == EL2_REGIME) ||
+ (ctx->xlat_regime == EL3_REGIME));
desc |= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1);
}
assert(ctx != NULL);
assert(!ctx->initialized);
assert((ctx->xlat_regime == EL3_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL1_EL0_REGIME));
assert(!is_mmu_enabled_ctx(ctx));
tf_printf("DEV");
}
- if (xlat_regime == EL3_REGIME) {
- /* For EL3 only check the AP[2] and XN bits. */
+ if ((xlat_regime == EL3_REGIME) || (xlat_regime == EL2_REGIME)) {
+ /* For EL3 and EL2 only check the AP[2] and XN bits. */
tf_printf(((desc & LOWER_ATTRS(AP_RO)) != 0ULL) ? "-RO" : "-RW");
tf_printf(((desc & UPPER_ATTRS(XN)) != 0ULL) ? "-XN" : "-EXEC");
} else {
if (ctx->xlat_regime == EL1_EL0_REGIME) {
xlat_regime_str = "1&0";
+ } else if (ctx->xlat_regime == EL2_REGIME) {
+ xlat_regime_str = "2";
} else {
assert(ctx->xlat_regime == EL3_REGIME);
xlat_regime_str = "3";
assert(ctx != NULL);
assert(ctx->initialized);
assert((ctx->xlat_regime == EL1_EL0_REGIME) ||
+ (ctx->xlat_regime == EL2_REGIME) ||
(ctx->xlat_regime == EL3_REGIME));
virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1ULL;
void bl32_plat_enable_mmu(uint32_t flags)
{
- enable_mmu_secure(flags);
+ enable_mmu_svc_mon(flags);
}