Xen Test Framework
entry_32.S
Go to the documentation of this file.
1 #include <arch/idt.h>
2 #include <arch/page.h>
3 #include <arch/processor.h>
4 #include <arch/segment.h>
5 #include <xtf/asm_macros.h>
6 
7 /*
8 
9 Stack frame layout:
10 
11 | Xen | Hardware | Notes |
12 |-------------------+-------------------+----------------------|
13 | <r> | <r> | <l> |
14 |-------------------+-------------------+----------------------|
15 | %ss | %ss | only on stack switch |
16 | %esp | %esp | only on stack switch |
17 | eflags | eflags | |
18 | upcall_mask / %cs | %cs | |
19 | %eip | %eip | |
20 | %esp-> error_code | %esp-> error_code | if applicable |
21 
22 These stubs push an error_code of zero (if applicable) to make a common layout
23 for the frame. A further word of metadata is then pushed, currently just
24 containing the entry vector.
25 
26 */
27 
28 .macro env_IRET /* Environment specific version of `iret`. */
29 #if defined(CONFIG_PV)
30 
31  jmp HYPERCALL_iret /* PV guests use the 'iret' hypercall. */
32 
33 #else
34  iretl /* HVM guests use a real 'iret' instruction. */
35 #endif
36 .endm
37 
38 .macro exception_entry sym vec
39 
40 ENTRY(entry_\sym)
41 
42  .if !((1 << \vec) & X86_EXC_HAVE_EC)
43  /* Push dummy error code (if needed) to align stack. */
44  push $0
45  .endif
46 
47  /* Push metadata (entry vector). */
48  push $\vec
49 
50  jmp handle_exception
51 
52 ENDFUNC(entry_\sym)
53 .endm
54 
55 exception_entry DE X86_EXC_DE
56 exception_entry DB X86_EXC_DB
57 exception_entry NMI X86_EXC_NMI
58 exception_entry BP X86_EXC_BP
59 exception_entry OF X86_EXC_OF
60 exception_entry BR X86_EXC_BR
61 exception_entry UD X86_EXC_UD
62 exception_entry NM X86_EXC_NM
63 exception_entry DF X86_EXC_DF
64 exception_entry TS X86_EXC_TS
65 exception_entry NP X86_EXC_NP
66 exception_entry SS X86_EXC_SS
67 exception_entry GP X86_EXC_GP
68 exception_entry PF X86_EXC_PF
69 exception_entry MF X86_EXC_MF
70 exception_entry AC X86_EXC_AC
71 exception_entry MC X86_EXC_MC
72 exception_entry XM X86_EXC_XM
73 exception_entry VE X86_EXC_VE
74 
75  .align 16
76 handle_exception:
77 
78  push %es
79  push %ds
80 
81  SAVE_ALL
82 
83  mov $__KERN_DS, %eax /* Restore data segments. */
84  mov %eax, %ds
85  mov %eax, %es
86 
87  push %esp /* struct cpu_regs * */
88  call do_exception
89  add $4, %esp
90 
91  RESTORE_ALL
92 
93  pop %ds
94  pop %es
95 
96  add $8, %esp /* Pop error_code/entry_vector. */
97 
98  env_IRET
99 ENDFUNC(handle_exception)
100 
101 
102 ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
103  mov %ebp, %esp /* Restore %esp to exec_user_param()'s context. */
104  ret
105 ENDFUNC(entry_ret_to_kernel)
106 
107 ENTRY(exec_user_param)
108  /*
109  * 2*4(%esp) ulong p1
110  * 1*4(%esp) ulong (*fn)(ulong)
111  * 0*4(%esp) return address
112  */
113  push %ebp
114 
115  /* Prepare to "call" exec_user_stub(). */
116  mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */
117  mov (1+2)*4(%esp), %ecx /* Pass p1 in %ecx */
118  push $1f /* Fake return addr as if we'd called exec_user_stub(). */
119  mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */
120 
121  /* Prepare an IRET frame. */
122  push exec_user_ss /* SS */
123  /* ESP */
124  push $user_stack + PAGE_SIZE
125  pushf /* EFLAGS */
126 
127  /* Apply and/or masks to eflags. */
128  mov exec_user_efl_and_mask, %edx
129  and %edx, (%esp)
130  mov exec_user_efl_or_mask, %edx
131  or %edx, (%esp)
132 
133  push exec_user_cs /* CS */
134  push $exec_user_stub /* EIP */
135 
136  env_IRET /* Drop to user privilege. */
137 
138 1: /* entry_ret_to_kernel() returns here with a sensible stack. */
139  pop %ebp
140  ret
141 
142 ENDFUNC(exec_user_param)
143 
144 .pushsection .text.user, "ax", @progbits
145 ENTRY(exec_user_stub)
146  /*
147  * For SMEP/SMAP safety, no shared stack can be used, so all
148  * parameters are passed in registers.
149  */
150  push %ecx /* Push p1 for fn()'s call frame. */
151  call *%eax /* fn(p1) */
152 
153  int $X86_VEC_RET2KERN /* Return to kernel privilege. */
154 ENDFUNC(exec_user_stub)
155 .popsection
156 
157 ENTRY(entry_EVTCHN)
158  push $0
159  push $0x200
160 
161  push %es
162  push %ds
163 
164  SAVE_ALL
165 
166  mov $__KERN_DS, %eax /* Restore data segments. */
167  mov %eax, %ds
168  mov %eax, %es
169 
170  push %esp /* struct cpu_regs * */
171  call do_evtchn
172  add $4, %esp
173 
174  RESTORE_ALL
175 
176  pop %ds
177  pop %es
178 
179  add $8, %esp /* Pop error_code/entry_vector. */
180 
181  env_IRET
182 ENDFUNC(entry_EVTCHN)
183 
184 #if defined(CONFIG_PV)
185 ENTRY(entry_SYSCALL)
186  push $0
187  push $0x100
188 
189  push %es
190  push %ds
191 
192  SAVE_ALL
193 
194  mov $__KERN_DS, %eax /* Restore data segments. */
195  mov %eax, %ds
196  mov %eax, %es
197 
198  push %esp /* struct cpu_regs * */
199  call do_syscall
200  add $4, %esp
201 
202  RESTORE_ALL
203 
204  pop %ds
205  pop %es
206 
207  add $8, %esp /* Pop error_code/entry_vector. */
208 
209  jmp HYPERCALL_iret
210 ENDFUNC(entry_SYSCALL)
211 
212 ENTRY(entry_SYSENTER)
213  push $0
214  push $0x200
215 
216  push %es
217  push %ds
218 
219  SAVE_ALL
220 
221  mov $__KERN_DS, %eax /* Restore data segments. */
222  mov %eax, %ds
223  mov %eax, %es
224 
225  push %esp /* struct cpu_regs * */
226  call do_sysenter
227  add $4, %esp
228 
229  RESTORE_ALL
230 
231  pop %ds
232  pop %es
233 
234  add $8, %esp /* Pop error_code/entry_vector. */
235 
236  jmp HYPERCALL_iret
237 ENDFUNC(entry_SYSENTER)
238 #endif
239 
240 /*
241  * Local variables:
242  * tab-width: 8
243  * indent-tabs-mode: nil
244  * End:
245  */