Mailing List Archive

[PATCH 03/14] arm: implement startup code.
arm: implement startup code.

xen/arch/arm/xen/Makefile | 1 +
xen/arch/arm/xen/start.S | 273 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-arm/mmu.h | 198 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r e6ac8b686aa6 xen/arch/arm/xen/Makefile
--- a/xen/arch/arm/xen/Makefile Fri Feb 03 16:07:33 2012 +0900
+++ b/xen/arch/arm/xen/Makefile Fri Feb 03 16:26:34 2012 +0900
@@ -1,3 +1,4 @@
+obj-y += start.o
obj-y += setup.o
obj-y += mm.o
obj-y += irq.o
diff -r e6ac8b686aa6 xen/arch/arm/xen/start.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/start.S Fri Feb 03 16:26:34 2012 +0900
@@ -0,0 +1,273 @@
+/*
+ * start.S
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ * Sang-bum Suh <sbuk.suh@samsung.com>
+ * Jaemin Ryu <jm77.ryu@samsung.com>
+ *
+ * Secure Xen on ARM architecture designed by Sang-bum Suh consists of
+ * Xen on ARM and the associated access control.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/config.h>
+#include <asm/cpu-domain.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/asm-macros.h>
+
+/*
+ * Initial stack for core 0
+ */
+#define SVC_STACK_SIZE STACK_SIZE
+
+.macro pa rd, rs
+1:
+ adr \rs, 1b
+ lsr \rs, \rs, #20
+ sub \rd, \rd, #XEN_VIRT_START
+ add \rd, \rd, \rs, lsl #20
+.endm
+
+ .section .head
+ENTRY(start)
+ msr cpsr_c, #(PSR_F_BIT | PSR_I_BIT | PSR_MODE_SVC)
+
+#ifdef SMP
+ mrc ACTLR(r2)
+ orr r2, r2, #(ACTLR_SMP) | (ACTLR_FW)
+ mcr ACTLR(r2)
+#endif
+
+ adr r0, start
+ mov r1, r0
+ sub r0, r0, #0x4000
+ mov r2, #0
+1: str r2, [r1, #-4]!
+ str r2, [r1, #-4]!
+ str r2, [r1, #-4]!
+ str r2, [r1, #-4]!
+ cmp r0, r1
+ bne 1b
+
+ ldr r2, =(XEN_VIRT_START >> 20)
+ ldr r7, =(L1E_TYPE_HYPERVISOR)
+
+ @ Start section no.
+ mov r3, pc
+ lsr r3, r3, #20
+
+ @ Initial VMM mapping
+ orr r4, r7, r3, lsl #20
+ str r4, [r0, r2, lsl #2]
+ @add r4, r4, #0x100000
+ @add r2, r2, #1
+ @str r4, [r0, r2, lsl #2]
+
+ ldr r5, =_smemtable
+ pa r5, r6
+ ldr r6, =_ememtable
+ pa r6, r7
+
+1:
+ cmp r5, r6
+ beq 3f
+
+ @ r1 : base
+ @ r2 : size
+ @ r3 : type
+ @ r4 : mmu_flags
+
+ ldmia r5!, {r1, r2, r3, r4}
+ lsr r1, r1, #20
+ orr r4, r4, r1, lsl #20
+
+ @ Round up
+ add r2, r2, #0xFF00
+ add r2, r2, #0x00FF
+ lsr r2, r2, #20
+2:
+ str r4, [r0, r1, lsl #2]
+ add r1, r1, #1
+ add r4, r4, #0x100000
+ adds r2, r2, #-1
+ bhi 2b
+ b 1b
+3:
+
+ @ Load Translation Table Base
+ orr r0, r0, #(TTB_FLAGS)
+ mcr TTBR0(r0)
+ mcr TTBR1(r0)
+
+ @ TTBCR Setting
+ mrc p15, 0, r5, c1, c0, 2
+ orr r5,r5, #((3 << (10 * 2)) |(3 << (11 * 2)))
+ mcr p15, 0, r5, c1, c0, 2
+
+ @ Load DAC
+ ldr r5, =0x55555555
+ mcr DACR(r5)
+
+ ldr r5, =0xFF0A89A8
+ ldr r6, =0x40E040E0
+ mcr p15, 0, r5, c10, c2, 0
+ mcr p15, 0, r6, c10, c2, 1
+
+ @ Turn on MMU
+ ldr r0, =(SCTLR_TRE | SCTLR_SW | SCTLR_Z | SCTLR_I | SCTLR_C | SCTLR_A | SCTLR_M)
+ mcr SCTLR(r0)
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+
+ @ Invalidate I/D TLBs
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0
+ dsb
+ isb
+
+ @ Clear BSS section
+ adr r0, 2f
+ ldmia r0, {r1, r2}
+ mov r0, #0
+1:
+ str r0, [r1], #4
+ cmp r1, r2
+ blo 1b
+
+ /* Stack Setup */
+ @ Get processor ID
+ mrc MPIDR(r4)
+ and r4, r4, #15
+
+ @ r0 = r0 * STACK_SIZE
+ mov r1, #STACK_SIZE
+ mul r4, r4, r1
+
+ msr cpsr_c, #PSR_MODE_IRQ | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(irq_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_ABT | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(abt_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_UND | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(und_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(svc_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ adr r12, 3f
+ ldr pc, [r12]
+
+2: .word _sbss
+ .word _ebss
+
+3:
+ .long start_xen
+
+#ifdef SMP
+ /*
+ * Common entry point for secondary CPUs.
+ *
+ * Ensure that we're in SVC mode, and IRQs are disabled.
+ */
+ .section .head
+ENTRY(slave_cpu_start)
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | PSR_MODE_SVC
+
+ mrc ACTLR(r2)
+ orr r2, r2, #(ACTLR_SMP) | (ACTLR_FW)
+ mcr ACTLR(r2)
+
+ @ Load Translation Table Base
+ adr r4, start
+ sub r4, r4, #0x4000
+ orr r4, r4, #(TTB_FLAGS)
+ mcr TTBR0(r4)
+ mcr TTBR1(r4)
+
+ @ TTBCR Setting
+ mrc p15, 0, r5, c1, c0, 2
+ orr r5,r5, #((3 << (10 * 2)) |(3 << (11 * 2)))
+ mcr p15, 0, r5, c1, c0, 2
+
+ @ Load DAC
+ ldr r5, =0x55555555
+ mcr DACR(r5)
+
+ ldr r5, =0xFF0A89A8
+ ldr r6, =0x40E040E0
+ mcr p15, 0, r5, c10, c2, 0
+ mcr p15, 0, r6, c10, c2, 1
+
+ @ Turn on MMU
+ ldr r0, =(SCTLR_TRE | SCTLR_SW | SCTLR_Z | SCTLR_I | SCTLR_C | SCTLR_A | SCTLR_M)
+ mcr SCTLR(r0)
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+
+ @ Invalidate I, D TLBs
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0
+ dsb
+ isb
+
+ /* Stack Setup */
+ @ get processor id
+ mrc MPIDR(r4)
+ and r4, r4, #15
+
+ @ r0 = r0 * STACK_SIZE
+ mov r1, #STACK_SIZE
+ mul r4, r4, r1
+
+ msr cpsr_c, #PSR_MODE_IRQ | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(irq_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_ABT | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(abt_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_UND | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(und_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ msr cpsr_c, #PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT
+ ldr sp, =(svc_stacks + STACK_SIZE)
+ add sp, sp, r4
+
+ adr r12, 2f
+ ldmia r12, {lr, pc}
+
+2:
+ .long 2b
+ .long start_xen_on_slave_cpu
+#endif
+
+ .section .bss.stack_aligned,"w"
+svc_stacks: .fill SVC_STACK_SIZE, MAX_PHYS_CPUS, 0
+irq_stacks: .fill SVC_STACK_SIZE, MAX_PHYS_CPUS, 0
+und_stacks: .fill SVC_STACK_SIZE, MAX_PHYS_CPUS, 0
+abt_stacks: .fill SVC_STACK_SIZE, MAX_PHYS_CPUS, 0
+fiq_stacks: .fill SVC_STACK_SIZE, MAX_PHYS_CPUS, 0
diff -r e6ac8b686aa6 xen/include/asm-arm/mmu.h
--- a/xen/include/asm-arm/mmu.h Fri Feb 03 16:07:33 2012 +0900
+++ b/xen/include/asm-arm/mmu.h Fri Feb 03 16:26:34 2012 +0900
@@ -1,11 +1,209 @@
#ifndef __ARM_MMU_H__
#define __ARM_MMU_H__

+#include <asm/system.h>
+#include <asm/cpu-domain.h>
+
+#define L1E_FLAG_MASK (0x3FF)
+
+#define L1E_TYPE_FAULT (0x00)
+#define L1E_TYPE_TABLE (0x01)
+#define L1E_TYPE_SECTION (0x02)
+#define L1E_TYPE_MASK (0x03)
+
+#define L1E_BIT4 (1 << 4)
+
+#define L1E_AP_SRW_UNO (0x01 << 10)
+#define L1E_AP_SRW_URO (0x02 << 10)
+#define L1E_AP_SRW_URW (0x03 << 10)
+
+#define L1E_BUFFERABLE (0x04)
+#define L1E_CACHEABLE (0x08)
+
+#define L1E_TEX(x) ((x) <<12)
+#define L1E_APX (1 << 15)
+#define L1E_S (1 << 16)
+#define L1E_nG (1 << 17)
+
+#define L1E_STRONGORDERED (0)
+#define L1E_DEVICE (L1E_TEX(1))
+#define L1E_WRITEBACK (L1E_CACHEABLE | L1E_BUFFERABLE)
+#define L1E_WRITETHROUGH (L1E_CACHEABLE)
+#define L1E_WRITEALLOC (L1E_TEX(1) | L1E_CACHEABLE | L1E_BUFFERABLE)
+#define L1E_SHARED (0)
+
+#define L1E_DOMAIN_HYP (DOMAIN_HYP << 5)
+#define L1E_DOMAIN_SVC (DOMAIN_SVC << 5)
+#define L1E_DOMAIN_USR (DOMAIN_USR << 5)
+#define L1E_DOMAIN_IO (DOMAIN_IO << 5)
+
+#define L1E_WBWA (L1E_TEX(1) | L1E_WRITEBACK)
+
+#define SECTION_SHIFT (20)
+#define SECTION_SIZE (1 << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE - 1))
+
+#define L1E_TYPE_HYPERVISOR (L1E_TYPE_SECTION | L1E_DOMAIN_HYP | L1E_S | L1E_AP_SRW_UNO | L1E_WRITEALLOC)
+#define L1E_TYPE_GUEST (L1E_TYPE_SECTION | L1E_DOMAIN_SVC | L1E_S | L1E_AP_SRW_URW | L1E_WRITEALLOC)
+#define L1E_TYPE_DEVICE (L1E_TYPE_SECTION | L1E_DOMAIN_IO | L1E_S | L1E_AP_SRW_URW | L1E_DEVICE)
+
+/*
+ * Definition for Page Table Entries
+ */
+
+#define L2E_FLAG_MASK (0xFFF)
+
+#define L2E_TYPE_FAULT (0x00)
+#define L2E_TYPE_LARGE (0x01)
+#define L2E_TYPE_SMALL (0x02)
+#define L2E_TYPE_TINY (0x03)
+#define L2E_TYPE_EXT (0x02)
+
+#define L2E_TYPE_MASK (0x03)
+
+#define L2E_BUFFERABLE (0x04)
+#define L2E_CACHEABLE (0x08)
+
+#define L1E_SHIFT (20)
+#define L2E_SHIFT (12)
+
+#define L2E_EXT_XN (1 << 0)
+#define L2E_EXT_AP_MASK (3 << 4)
+#define L2E_EXT_AP0 (1 << 4)
+#define L2E_EXT_AP1 (2 << 4)
+#define L2E_EXT_AP_UNO_SRO (0 << 4)
+#define L2E_EXT_AP_UNO_SRW (L2E_EXT_AP0)
+#define L2E_EXT_AP_URO_SRW (L2E_EXT_AP1)
+#define L2E_EXT_AP_URW_SRW (L2E_EXT_AP1|L2E_EXT_AP0)
+#define L2E_EXT_TEX(x) ((x) << 6)
+#define L2E_EXT_APX (1 << 9)
+#define L2E_EXT_COHERENT (1 << 9)
+#define L2E_EXT_SHARED (1 << 10)
+#define L2E_EXT_NG (1 << 11)
+
+
+#define L1_TABLE_ENTRIES (4096)
+#define L2_TABLE_ENTRIES (256)
+
+#define L1_TABLE_SIZE (0x4000)
+
+#define L2E_GUEST_AP_MASK L2E_EXT_AP_MASK
+#define L2E_GUEST_AP_NO L2E_EXT_AP_UNO_SRW
+#define L2E_GUEST_AP_RO L2E_EXT_AP_URO_SRW
+#define L2E_GUEST_AP_RW L2E_EXT_AP_URW_SRW
+
+#define L1E_GUEST_TABLE (L1E_DOMAIN_SVC | L1E_TYPE_TABLE)
+#define L1E_VECTOR_TABLE (L1E_DOMAIN_SVC | L1E_TYPE_TABLE)
+
+#define L2E_GUEST_PAGE (L2E_EXT_SHARED | L2E_GUEST_AP_RW | L2E_EXT_TEX(1) | L2E_BUFFERABLE | L2E_CACHEABLE | L2E_TYPE_EXT)
+
+#define L2E_VECTOR_PAGE (L2E_GUEST_AP_RO | L2E_EXT_TEX(1) | L2E_BUFFERABLE | L2E_CACHEABLE | L2E_TYPE_EXT)
+#define L2E_GRANT_PAGE (L2E_TYPE_EXT | L2E_EXT_SHARED | L2E_EXT_TEX(1) | L2E_BUFFERABLE | L2E_CACHEABLE | L2E_GUEST_AP_RW)
+#define L2E_SHARED_INFO (L2E_TYPE_EXT | L2E_EXT_TEX(1) | L2E_EXT_XN | L2E_EXT_SHARED | L2E_BUFFERABLE | L2E_CACHEABLE | L2E_GUEST_AP_RW)
+#define L2E_DEVICE (L2E_TYPE_EXT | L2E_EXT_TEX(1) | L2E_EXT_XN | L2E_EXT_SHARED | L2E_GUEST_AP_RW)
+
#define PADDR_BITS 32
#define PADDR_MASK ((1UL << PADDR_BITS) - 1)

#define VADDR_BITS 32
#define VADDR_MASK ((1UL << VADDR_BITS) - 1)

+#define TTB_S (1 << 1)
+#define TTB_RGN_NC (0 << 3)
+#define TTB_RGN_OC_WBWA (1 << 3)
+#define TTB_RGN_OC_WT (2 << 3)
+#define TTB_RGN_OC_WB (3 << 3)
+#define TTB_NOS (1 << 5)
+#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
+#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
+#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
+#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
+
+
+#define TTB_FLAGS (TTB_IRGN_WBWA | TTB_S | TTB_NOS | TTB_RGN_OC_WBWA)
+
+#define TTB_MASK (~0x3FFF)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+
+#define l2e_val(x) ((x).l2e)
+#define l1e_val(x) ((x).l1e)
+
+#define MK_L2E(x, flags) ((l2e_t) { ((unsigned long)(x) & (~L2E_FLAG_MASK)) | flags } )
+#define MK_L1E(x, flags) ((l1e_t) { ((unsigned long)(x) & (~L1E_FLAG_MASK)) | flags } )
+
+#define l1t_index(x) (((unsigned long)(x) >> L1E_SHIFT) & (L1_TABLE_ENTRIES - 1))
+#define l2t_index(x) (((unsigned long)(x) >> L2E_SHIFT) & (L2_TABLE_ENTRIES - 1))
+
+#define l1_linear_offset_xen(va) \
+ (l1_linear_offset((xen_translation_table), va))
+
+typedef struct { unsigned long l2e; } l2e_t;
+typedef struct { unsigned long l1e; } l1e_t;
+
+static inline l1e_t *l1_linear_offset(l1e_t *l1e, unsigned long virt)
+{
+ return l1e + l1t_index(virt);
+}
+
+static inline l2e_t *l2_linear_offset(l1e_t *l1e, unsigned long virt)
+{
+ l2e_t *l2e;
+
+ l2e = (l2e_t *) (l1e_val(*l1e) & ~L1E_FLAG_MASK);
+ l2e = l2e + l2t_index(virt);
+
+ return l2e;
+}
+
+static inline unsigned int get_dacr(void)
+{
+ unsigned int val;
+
+ asm("mrc p15, 0, %0, c3, c0, 0" : "=r" (val) : : "cc");
+
+ return val;
+}
+
+
+static inline void set_dacr(unsigned long val)
+{
+ asm("mrc p15, 0, %0, c3, c0, 0" : "=r" (val) : : "cc");
+}
+
+
+static inline unsigned int get_ttbr(void)
+{
+ unsigned int val;
+
+ asm("mrc p15, 0, %0, c2, c0, 0" : "=r" (val) : : "cc");
+
+ return val;
+}
+
+static inline void set_ttbr(unsigned int ttb)
+{
+ asm volatile("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttb) : "cc");
+
+ isb();
+}
+
+static inline void set_contextidr(unsigned long id)
+{
+ asm("mcr p15, 0, %0, c13, c0, 1" : : "r" (id) : "cc");
+}
+
+static inline unsigned int get_contextidr(void)
+{
+ unsigned int val;
+
+ asm("mrc p15, 0, %0, c13, c0, 1" : "=r" (val) : : "cc");
+
+ return val;
+}
+
+#endif
#endif