3 #include <arch/processor.h>
4 #include <arch/segment.h>
5 #include <xtf/asm_macros.h>
9 Stack frame layout: (first aligned to 16 byte boundary)
11 | Xen | Hardware | Notes |
12 |-------------------+-------------------+---------------|
14 |-------------------+-------------------+---------------|
18 | upcall_mask / %cs | %cs | |
20 | error_code | %rsp-> error_code | if applicable |
24 The %rcx and %r11 parameters are because Xen will typically SYSRET to the
25 entry point; they should be restored promptly.
27 The stubs then push an error_code (if required) to make a common layout for
28 the frame, then use the upper 32bits of the error_code to stash additional
29 metadata. Currently just the entry vector.
33 .macro env_ADJUST_FRAME /* Environment specific exception entry. */
34 #if defined(CONFIG_PV)
35 /* Restore results of Xen SYSRET'ing to this point. */
41 .macro env_IRETQ /* Environment specific version of `iretq`. */
42 #if defined(CONFIG_PV)
44 push $0 /* Indicate that this isn't a SYSRET'able */
45 jmp HYPERCALL_iret /* situation, and use the 'iret' hypercall. */
48 iretq /* HVM guests use a real 'iretq' instruction. */
52 .macro exception_entry sym vec
57 .if !((1 << \vec) & X86_EXC_HAVE_EC)
58 /* Push dummy error code (if needed) to align stack. */
62 /* Store entry vector in the top 32 bits of error_code. */
70 exception_entry DE X86_EXC_DE
71 exception_entry DB X86_EXC_DB
72 exception_entry NMI X86_EXC_NMI
73 exception_entry BP X86_EXC_BP
74 exception_entry OF X86_EXC_OF
75 exception_entry BR X86_EXC_BR
76 exception_entry UD X86_EXC_UD
77 exception_entry NM X86_EXC_NM
78 exception_entry DF X86_EXC_DF
79 exception_entry TS X86_EXC_TS
80 exception_entry NP X86_EXC_NP
81 exception_entry SS X86_EXC_SS
82 exception_entry GP X86_EXC_GP
83 exception_entry PF X86_EXC_PF
84 exception_entry MF X86_EXC_MF
85 exception_entry AC X86_EXC_AC
86 exception_entry MC X86_EXC_MC
87 exception_entry XM X86_EXC_XM
88 exception_entry VE X86_EXC_VE
95 mov %rsp, %rdi /* struct cpu_regs * */
99 add $8, %rsp /* Pop error_code/entry_vector. */
102 ENDFUNC(handle_exception)
105 ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
108 mov %rbp, %rsp /* Restore %rsp to exec_user_param()'s context. */
110 ENDFUNC(entry_ret_to_kernel)
112 ENTRY(exec_user_param) /* ulong (*fn)(ulong), ulong p1 */
115 /* Prepare to "call" exec_user_stub(). */
116 push $1f /* Fake return addr as if we'd called exec_user_stub(). */
117 mov %rsp, %rbp /* Stash %rsp for entry_ret_to_kernel(). */
119 /* Prepare an IRET frame. */
120 push exec_user_ss(%rip) /* SS */
122 push $user_stack + PAGE_SIZE
125 /* Apply and/or masks to eflags. */
126 mov exec_user_efl_and_mask(%rip), %rdx
128 mov exec_user_efl_or_mask(%rip), %rdx
131 push exec_user_cs(%rip) /* CS */
132 push $exec_user_stub /* RIP */
134 env_IRETQ /* Drop to user privilege. */
136 1: /* entry_ret_to_kernel() returns here with a sensible stack. */
140 ENDFUNC(exec_user_param)
142 .pushsection .text.user, "ax", @progbits
143 ENTRY(exec_user_stub)
144 xchg %rdi, %rsi /* Swap p1 to be first parameter to fn(). */
145 call *%rsi /* fn(p1) */
147 int $X86_VEC_RET2KERN /* Return to kernel privilege. */
148 ENDFUNC(exec_user_stub)
159 mov %rsp, %rdi /* struct cpu_regs * */
163 add $8, %rsp /* Pop error_code/entry_vector. */
166 ENDFUNC(entry_EVTCHN)
168 #if defined(CONFIG_PV)
177 mov %rsp, %rdi /* struct cpu_regs * */
182 movq $VGCF_in_syscall, (%rsp) /* Clobber error_code/entry_vector */
185 ENDFUNC(entry_SYSCALL)
187 ENTRY(entry_SYSENTER)
195 mov %rsp, %rdi /* struct cpu_regs * */
200 movq $0, (%rsp) /* Clobber error_code/entry_vector */
203 ENDFUNC(entry_SYSENTER)
204 #endif /* CONFIG_PV */
209 * indent-tabs-mode: nil