1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
|
/*
* sheevaplug reboot code
*
* R11 is used by the loader as a temporary, so avoid it.
*/
#include "arm.s"
/*
* Turn off MMU, then copy the new kernel to its correct location
* in physical memory. Then jump to the start of the kernel.
*/
/* main(PADDR(entry), PADDR(code), size); */
TEXT main(SB), 1, $-4
MOVW $setR12(SB), R12
MOVW R0, p1+0(FP) /* destination, passed in R0 */
/* copy in arguments from frame */
MOVW R0, R8 /* entry point */
MOVW p2+4(FP), R9 /* source */
MOVW n+8(FP), R10 /* byte count */
WAVE('R')
BL cachesoff(SB)
/* now back in 29- or 26-bit addressing, mainly for SB */
/* turn the MMU off */
WAVE('e')
MOVW $KSEGM, R7
MOVW $PHYSDRAM, R0
BL _r15warp(SB)
BIC R7, R12 /* SB */
BIC R7, R13 /* SP */
/* don't care about R14 */
WAVE('b')
BL mmuinvalidate(SB)
WAVE('o')
BL mmudisable(SB)
WAVE('o')
MOVW R9, R4 /* restore regs across function calls */
MOVW R10, R5
MOVW R8, R6
/* set up a new stack for local vars and memmove args */
MOVW R6, SP /* tiny trampoline stack */
SUB $(0x20 + 4), SP /* back up before a.out header */
MOVW R14, -48(SP) /* store return addr */
SUB $48, SP /* allocate stack frame */
MOVW R6, 44(SP) /* save dest/entry */
MOVW R5, 40(SP) /* save count */
WAVE('t')
MOVW R6, 0(SP)
MOVW R6, 4(SP) /* push dest */
MOVW R6, R0
MOVW R4, 8(SP) /* push src */
MOVW R5, 12(SP) /* push size */
BL memmove(SB)
MOVW 44(SP), R6 /* restore R6 (dest/entry) */
MOVW 40(SP), R5 /* restore R5 (count) */
WAVE('-')
/*
* flush caches
*/
BL cacheuwbinv(SB)
WAVE('>')
WAVE('\r');
WAVE('\n');
/*
* jump to kernel entry point. Note the true kernel entry point is
* the virtual address KZERO|R6, but this must wait until
* the MMU is enabled by the kernel in l.s
*/
ORR R6, R6 /* NOP: avoid link bug */
B (R6)
/*
* turn the caches off, double map 0 & KZERO, invalidate TLBs, revert to
* tiny addresses. upon return, it will be safe to turn off the mmu.
*/
TEXT cachesoff(SB), 1, $-4
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R0
MOVW R0, CPSR
MOVW $KADDR(0x100-4), R7 /* just before this code */
MOVW R14, (R7) /* save link */
BL cacheuwbinv(SB)
MRC CpSC, 0, R0, C(CpCONTROL), C(0)
BIC $(CpCwb|CpCicache|CpCdcache|CpCalign), R0
MCR CpSC, 0, R0, C(CpCONTROL), C(0)
BARRIERS
/* redo double map of 0, KZERO */
MOVW $(L1+L1X(PHYSDRAM)), R4 /* address of PTE for 0 */
MOVW $PTEDRAM, R2 /* PTE bits */
// MOVW $PTEIO, R2 /* PTE bits */
MOVW $PHYSDRAM, R3
MOVW $512, R5
_ptrdbl:
ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
MOVW R1, (R4)
ADD $4, R4 /* bump PTE address */
ADD $MiB, R3 /* bump pa */
SUB.S $1, R5
BNE _ptrdbl
BARRIERS
MOVW $0, R0
MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvd), CpTLBinv
MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
BARRIERS
/* back to 29- or 26-bit addressing, mainly for SB */
MRC CpSC, 0, R0, C(CpCONTROL), C(0)
BIC $(CpCd32|CpCi32), R0
MCR CpSC, 0, R0, C(CpCONTROL), C(0)
BARRIERS
MOVW $KADDR(0x100-4), R7 /* just before this code */
MOVW (R7), R14 /* restore link */
RET
TEXT _r15warp(SB), 1, $-4
BIC $KSEGM, R14
ORR R0, R14
RET
TEXT mmudisable(SB), 1, $-4
MRC CpSC, 0, R0, C(CpCONTROL), C(0)
BIC $(CpChv|CpCmmu|CpCdcache|CpCicache|CpCwb), R0
MCR CpSC, 0, R0, C(CpCONTROL), C(0)
BARRIERS
RET
TEXT mmuinvalidate(SB), 1, $-4 /* invalidate all */
MOVW $0, R0
MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
BARRIERS
RET
TEXT cacheuwbinv(SB), 1, $-4 /* D+I writeback+invalidate */
BARRIERS
MOVW CPSR, R3 /* splhi */
ORR $(PsrDirq), R3, R1
MOVW R1, CPSR
_uwbinv: /* D writeback+invalidate */
MRC CpSC, 0, PC, C(CpCACHE), C(CpCACHEwbi), CpCACHEtest
BNE _uwbinv
MOVW $0, R0 /* I invalidate */
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
/* drain L1 write buffer, also drains L2 eviction buffer on sheeva */
BARRIERS
MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2flush), CpTCl2all
BARRIERS
MCR CpSC, CpL2, R0, C(CpTESTCFG), C(CpTCl2inv), CpTCl2all
BARRIERS
MOVW R3, CPSR /* splx */
RET
|