SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
- movq PER_CPU_VAR(kernel_stack),%rsp
+ movq PER_CPU_VAR(cpu_tss + TSS_sp0),%rsp
ENABLE_INTERRUPTS(CLBR_NONE)
/* Zero-extending 32-bit regs, do not remove */
#else /* !__ASSEMBLY__ */
/* Load thread_info address into "reg" */
+#ifdef CONFIG_X86_32
#define GET_THREAD_INFO(reg) \
- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
_ASM_SUB $(THREAD_SIZE),reg ;
+#else
+#define GET_THREAD_INFO(reg) \
+ _ASM_MOV PER_CPU_VAR(cpu_tss + TSS_sp0),reg ; \
+ _ASM_SUB $(THREAD_SIZE),reg ;
+#endif
/*
* ASM operand which evaluates to a 'thread_info' address of
GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(rsp_scratch)
- movq PER_CPU_VAR(kernel_stack),%rsp
+ movq PER_CPU_VAR(cpu_tss + TSS_sp0),%rsp
/* Construct struct pt_regs on stack */
pushq_cfi $__USER_DS /* pt_regs->ss */
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
+#include <asm/asm-offsets.h>
#include <xen/interface/xen.h>
* still with the kernel gs, so we can easily switch back
*/
movq %rsp, PER_CPU_VAR(rsp_scratch)
- movq PER_CPU_VAR(kernel_stack), %rsp
+ movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
pushq $__USER_DS
pushq PER_CPU_VAR(rsp_scratch)
* still with the kernel gs, so we can easily switch back
*/
movq %rsp, PER_CPU_VAR(rsp_scratch)
- movq PER_CPU_VAR(kernel_stack), %rsp
+ movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
pushq $__USER32_DS
pushq PER_CPU_VAR(rsp_scratch)