3 #include <arch/processor.h>
4 #include <arch/segment.h>
5 #include <xtf/asm_macros.h>
11 | Xen | Hardware | Notes |
12 |-------------------+-------------------+----------------------|
14 |-------------------+-------------------+----------------------|
15 | %ss | %ss | only on stack switch |
16 | %esp | %esp | only on stack switch |
18 | upcall_mask / %cs | %cs | |
20 | %esp-> error_code | %esp-> error_code | if applicable |
22 These stubs push an error_code of zero (if applicable) to make a common layout
23 for the frame. A further word of metadata is then pushed, currently just
24 containing the entry vector.
28 .macro env_IRET /* Environment specific version of `iret`. */
29 #if defined(CONFIG_PV)
31 jmp HYPERCALL_iret /* PV guests use the 'iret' hypercall. */
34 iretl /* HVM guests use a real 'iret' instruction. */
38 .macro exception_entry sym vec
42 .if !((1 << \vec) & X86_EXC_HAVE_EC)
43 /* Push dummy error code (if needed) to align stack. */
47 /* Push metadata (entry vector). */
55 exception_entry DE X86_EXC_DE
56 exception_entry DB X86_EXC_DB
57 exception_entry NMI X86_EXC_NMI
58 exception_entry BP X86_EXC_BP
59 exception_entry OF X86_EXC_OF
60 exception_entry BR X86_EXC_BR
61 exception_entry UD X86_EXC_UD
62 exception_entry NM X86_EXC_NM
63 exception_entry DF X86_EXC_DF
64 exception_entry TS X86_EXC_TS
65 exception_entry NP X86_EXC_NP
66 exception_entry SS X86_EXC_SS
67 exception_entry GP X86_EXC_GP
68 exception_entry PF X86_EXC_PF
69 exception_entry MF X86_EXC_MF
70 exception_entry AC X86_EXC_AC
71 exception_entry MC X86_EXC_MC
72 exception_entry XM X86_EXC_XM
73 exception_entry VE X86_EXC_VE
83 mov $__KERN_DS, %eax /* Restore data segments. */
87 push %esp /* struct cpu_regs * */
96 add $8, %esp /* Pop error_code/entry_vector. */
99 ENDFUNC(handle_exception)
102 ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
103 mov %ebp, %esp /* Restore %esp to exec_user_param()'s context. */
105 ENDFUNC(entry_ret_to_kernel)
107 ENTRY(exec_user_param)
110 * 1*4(%esp) ulong (*fn)(ulong)
111 * 0*4(%esp) return address
115 /* Prepare to "call" exec_user_stub(). */
116 mov (1+1)*4(%esp), %eax /* Pass fn() in %eax */
117 mov (1+2)*4(%esp), %ecx /* Pass p1 in %ecx */
118 push $1f /* Fake return addr as if we'd called exec_user_stub(). */
119 mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */
121 /* Prepare an IRET frame. */
122 push exec_user_ss /* SS */
124 push $user_stack + PAGE_SIZE
127 /* Apply and/or masks to eflags. */
128 mov exec_user_efl_and_mask, %edx
130 mov exec_user_efl_or_mask, %edx
133 push exec_user_cs /* CS */
134 push $exec_user_stub /* EIP */
136 env_IRET /* Drop to user privilege. */
138 1: /* entry_ret_to_kernel() returns here with a sensible stack. */
142 ENDFUNC(exec_user_param)
144 .pushsection .text.user, "ax", @progbits
145 ENTRY(exec_user_stub)
147 * For SMEP/SMAP safety, no shared stack can be used, so all
148 * parameters are passed in registers.
150 push %ecx /* Push p1 for fn()'s call frame. */
151 call *%eax /* fn(p1) */
153 int $X86_VEC_RET2KERN /* Return to kernel privilege. */
154 ENDFUNC(exec_user_stub)
166 mov $__KERN_DS, %eax /* Restore data segments. */
170 push %esp /* struct cpu_regs * */
179 add $8, %esp /* Pop error_code/entry_vector. */
182 ENDFUNC(entry_EVTCHN)
184 #if defined(CONFIG_PV)
194 mov $__KERN_DS, %eax /* Restore data segments. */
198 push %esp /* struct cpu_regs * */
207 add $8, %esp /* Pop error_code/entry_vector. */
210 ENDFUNC(entry_SYSCALL)
212 ENTRY(entry_SYSENTER)
221 mov $__KERN_DS, %eax /* Restore data segments. */
225 push %esp /* struct cpu_regs * */
234 add $8, %esp /* Pop error_code/entry_vector. */
237 ENDFUNC(entry_SYSENTER)
243 * indent-tabs-mode: nil