#include #include #include #include /* * Register use: * x0-x3 Arguments * x9-x15 Scratch * x18 Shadow stack pointer (if enabled) * x19-x28 Globals */ tmp .req x9 tmp2 .req x10 wtmp2 .req w10 index .req x11 canary .req x12 ssp .req x18 cpuid .req x19 page_table0 .req x20 page_table1 .req x21 size .req x22 supports_mte .req x23 .section .text.boot .globl arm_reset arm_reset: .globl _start .type _start,STT_OBJECT _start: /* This instruction is read by the bootloader to determine image type */ bl arm64_elX_to_el1 /* Initialize VBAR to the temporary exception vector table */ adrl tmp, .Learly_exception_base msr vbar_el1, tmp isb mrs tmp2, id_aa64pfr1_el1 tst tmp2, #0xe00 cset supports_mte, ne #if WITH_KERNEL_VM /* enable caches so atomics and spinlocks work */ mrs tmp, sctlr_el1 orr tmp, tmp, #(1<<12) /* Enable icache */ orr tmp, tmp, #(1<<2) /* Enable dcache/ucache */ orr tmp, tmp, #(1<<3) /* Enable Stack Alignment Check EL1 */ orr tmp, tmp, #(1<<4) /* Enable Stack Alignment Check EL0 */ cbz supports_mte, .Ldont_set_mte_flags orr tmp, tmp, #(1<<43) /* Allocation Tag Access in EL1 */ orr tmp, tmp, #(1<<42) /* Allocation Tag Access in EL0 */ bic tmp, tmp, #(1<<40) /* No tag check faults in EL1 */ orr tmp, tmp, #(1<<38) /* Tag check faults in EL0 are synchronous */ .Ldont_set_mte_flags: bic tmp, tmp, #(1<<1) /* Disable Alignment Checking for EL1 EL0 */ msr sctlr_el1, tmp /* set up the mmu according to mmu_initial_mappings */ /* load the base of the translation table and clear the table */ adrl page_table1, arm64_kernel_translation_table /* Prepare tt_trampoline page table */ /* Calculate pagetable physical addresses */ adrl page_table0, tt_trampoline #if WITH_SMP /* * Stash x0 as it will be clobbered * We place it in size as x0 contains the size passed to the entry point. */ mov size, x0 /* Get the CPU number */ bl arm64_curr_cpu_num mov cpuid, x0 /* Restore registers */ mov x0, size cbnz cpuid, .Ltt_trampoline_check_secondary #endif /* Zero the top level kernel page table */ mov tmp, #0 /* walk through all the entries in the translation table, setting them up */ .Lclear_top_page_table_loop: str xzr, [page_table1, tmp, lsl #3] add tmp, tmp, #1 cmp tmp, #MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP bne .Lclear_top_page_table_loop /* Prepare tt_trampoline page table */ /* Zero tt_trampoline translation tables */ mov tmp, #0 .Lclear_tt_trampoline: str xzr, [page_table0, tmp, lsl#3] add tmp, tmp, #1 cmp tmp, #MMU_PAGE_TABLE_ENTRIES_IDENT blt .Lclear_tt_trampoline /* Setup mapping at phys -> phys */ /* * Map from the start of the kernel to the end of RAM * so we have enough pages for boot_alloc memory. */ adr index, _start lsr tmp, index, #MMU_IDENT_TOP_SHIFT /* tmp = paddr index */ /* Check that the start index falls inside the table */ cmp tmp, #MMU_PAGE_TABLE_ENTRIES_IDENT b.hs platform_early_halt #if ARM64_BOOT_PROTOCOL_X0_MEMSIZE /* * The physical address of end of ram (exclusive) is (_start + x0). */ add index, index, x0 #elif ARM64_BOOT_PROTOCOL_X0_DTB /* * The physical address if end of kernel (exclusive) that could be used * before any dynamic memory allocations are made is &_end. * (The rest will be mapped in arm64_early_mmu_init) */ adrl index, _end #else #error Unknown ARM64_BOOT_PROTOCOL #endif /* * `index` is the first byte of memory after the kernel that we don't need * mapped at this point. We subtract one and round that down to a multiple * of 1<