#include <asm/asm-offsets.h>
#include <asm/bug.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/exception-64s.h>
#else
#include <asm/exception-64e.h>
#endif
#include <asm/feature-fixups.h>
#include <asm/head-64.h>
#include <asm/hw_irq.h>
#include <asm/kup.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/ptrace.h>

	.align 7

.macro DEBUG_SRR_VALID srr
#ifdef CONFIG_PPC_RFI_SRR_DEBUG
	.ifc \srr,srr
	mfspr	r11,SPRN_SRR0
	ld	r12,_NIP(r1)
	clrrdi  r11,r11,2
	clrrdi  r12,r12,2
100:	tdne	r11,r12
	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
	mfspr	r11,SPRN_SRR1
	ld	r12,_MSR(r1)
100:	tdne	r11,r12
	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
	.else
	mfspr	r11,SPRN_HSRR0
	ld	r12,_NIP(r1)
	clrrdi  r11,r11,2
	clrrdi  r12,r12,2
100:	tdne	r11,r12
	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
	mfspr	r11,SPRN_HSRR1
	ld	r12,_MSR(r1)
100:	tdne	r11,r12
	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
	.endif
#endif
.endm

#ifdef CONFIG_PPC_BOOK3S
.macro system_call_vectored name trapnr
	.globl system_call_vectored_\name
system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
	SCV_INTERRUPT_TO_KERNEL
	mr	r10,r1
	ld	r1,PACAKSAVE(r13)
	std	r10,0(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	std	r0,GPR0(r1)
	std	r10,GPR1(r1)
	std	r2,GPR2(r1)
	LOAD_PACA_TOC()
	mfcr	r12
	li	r11,0
	/* Save syscall parameters in r3-r8 */
	SAVE_GPRS(3, 8, r1)
	/* Zero r9-r12, this should only be required when restoring all GPRs */
	std	r11,GPR9(r1)
	std	r11,GPR10(r1)
	std	r11,GPR11(r1)
	std	r11,GPR12(r1)
	std	r9,GPR13(r1)
	SAVE_NVGPRS(r1)
	std	r11,_XER(r1)
	std	r11,_LINK(r1)
	std	r11,_CTR(r1)

	li	r11,\trapnr
	std	r11,_TRAP(r1)
	std	r12,_CCR(r1)
	std	r3,ORIG_GPR3(r1)
	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
	std	r11,STACK_INT_FRAME_MARKER(r1)		/* "regs" marker */
	/* Calling convention has r3 = regs, r4 = orig r0 */
	addi	r3,r1,STACK_INT_FRAME_REGS
	mr	r4,r0

BEGIN_FTR_SECTION
	HMT_MEDIUM
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

	/*
	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
	 * and interrupts may be masked and pending already.
	 * system_call_exception() will call trace_hardirqs_off() which means
	 * interrupts could already have been blocked before trace_hardirqs_off,
	 * but this is the best we can do.
	 */

	/*
	 * Zero user registers to prevent influencing speculative execution
	 * state of kernel code.
	 */
	SANITIZE_SYSCALL_GPRS()
	bl	CFUNC(system_call_exception)

.Lsyscall_vectored_\name\()_exit:
	addi	r4,r1,STACK_INT_FRAME_REGS
	li	r5,1 /* scv */
	bl	CFUNC(syscall_exit_prepare)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Lsyscall_vectored_\name\()_rst_start:
	lbz	r11,PACAIRQHAPPENED(r13)
	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
	bne-	syscall_vectored_\name\()_restart
	li	r11,IRQS_ENABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	li	r11,0
	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS

	ld	r2,_CCR(r1)
	ld	r4,_NIP(r1)
	ld	r5,_MSR(r1)

BEGIN_FTR_SECTION
	stdcx.	r0,0,r1			/* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

BEGIN_FTR_SECTION
	HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

	SANITIZE_RESTORE_NVGPRS()
	cmpdi	r3,0
	bne	.Lsyscall_vectored_\name\()_restore_regs

	/* rfscv returns with LR->NIA and CTR->MSR */
	mtlr	r4
	mtctr	r5

	/* Could zero these as per ABI, but we may consider a stricter ABI
	 * which preserves these if libc implementations can benefit, so
	 * restore them for now until further measurement is done. */
	REST_GPR(0, r1)
	REST_GPRS(4, 8, r1)
	/* Zero volatile regs that may contain sensitive kernel data */
	ZEROIZE_GPRS(9, 12)
	mtspr	SPRN_XER,r0

	/*
	 * We don't need to restore AMR on the way back to userspace for KUAP.
	 * The value of AMR only matters while we're in the kernel.
	 */
	mtcr	r2
	REST_GPRS(2, 3, r1)
	REST_GPR(13, r1)
	REST_GPR(1, r1)
	RFSCV_TO_USER
	b	.	/* prevent speculative execution */

.Lsyscall_vectored_\name\()_restore_regs:
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r5

	ld	r3,_CTR(r1)
	ld	r4,_LINK(r1)
	ld	r5,_XER(r1)

	HANDLER_RESTORE_NVGPRS()
	REST_GPR(0, r1)
	mtcr	r2
	mtctr	r3
	mtlr	r4
	mtspr	SPRN_XER,r5
	REST_GPRS(2, 13, r1)
	REST_GPR(1, r1)
	RFI_TO_USER
.Lsyscall_vectored_\name\()_rst_end:

syscall_vectored_\name\()_restart:
_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
	GET_PACA(r13)
	ld	r1,PACA_EXIT_SAVE_R1(r13)
	LOAD_PACA_TOC()
	ld	r3,RESULT(r1)
	addi	r4,r1,STACK_INT_FRAME_REGS
	li	r11,IRQS_ALL_DISABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	bl	CFUNC(syscall_exit_restart)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Lsyscall_vectored_\name\()_rst_start
1:

SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)

.endm

system_call_vectored common 0x3000

/*
 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
 * which is tested by system_call_exception when r0 is -1 (as set by vector
 * entry code).
 */
system_call_vectored sigill 0x7ff0

#endif /* CONFIG_PPC_BOOK3S */

	.balign IFETCH_ALIGN_BYTES
	.globl system_call_common_real
system_call_common_real:
_ASM_NOKPROBE_SYMBOL(system_call_common_real)
	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
	mtmsrd	r10

	.balign IFETCH_ALIGN_BYTES
	.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
	mr	r10,r1
	ld	r1,PACAKSAVE(r13)
	std	r10,0(r1)
	std	r11,_NIP(r1)
	std	r12,_MSR(r1)
	std	r0,GPR0(r1)
	std	r10,GPR1(r1)
	std	r2,GPR2(r1)
#ifdef CONFIG_PPC_E500
START_BTB_FLUSH_SECTION
	BTB_FLUSH(r10)
END_BTB_FLUSH_SECTION
#endif
	LOAD_PACA_TOC()
	mfcr	r12
	li	r11,0
	/* Save syscall parameters in r3-r8 */
	SAVE_GPRS(3, 8, r1)
	/* Zero r9-r12, this should only be required when restoring all GPRs */
	std	r11,GPR9(r1)
	std	r11,GPR10(r1)
	std	r11,GPR11(r1)
	std	r11,GPR12(r1)
	std	r9,GPR13(r1)
	SAVE_NVGPRS(r1)
	std	r11,_XER(r1)
	std	r11,_CTR(r1)
	mflr	r10

	/*
	 * This clears CR0.SO (bit 28), which is the error indication on
	 * return from this system call.
	 */
	rldimi	r12,r11,28,(63-28)
	li	r11,0xc00
	std	r10,_LINK(r1)
	std	r11,_TRAP(r1)
	std	r12,_CCR(r1)
	std	r3,ORIG_GPR3(r1)
	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
	std	r11,STACK_INT_FRAME_MARKER(r1)		/* "regs" marker */
	/* Calling convention has r3 = regs, r4 = orig r0 */
	addi	r3,r1,STACK_INT_FRAME_REGS
	mr	r4,r0

#ifdef CONFIG_PPC_BOOK3S
	li	r11,1
	stb	r11,PACASRR_VALID(r13)
#endif

	/*
	 * We always enter kernel from userspace with irq soft-mask enabled and
	 * nothing pending. system_call_exception() will call
	 * trace_hardirqs_off().
	 */
	li	r11,IRQS_ALL_DISABLED
	stb	r11,PACAIRQSOFTMASK(r13)
#ifdef CONFIG_PPC_BOOK3S
	li	r12,-1 /* Set MSR_EE and MSR_RI */
	mtmsrd	r12,1
#else
	wrteei	1
#endif

	/*
	 * Zero user registers to prevent influencing speculative execution
	 * state of kernel code.
	 */
	SANITIZE_SYSCALL_GPRS()
	bl	CFUNC(system_call_exception)

.Lsyscall_exit:
	addi	r4,r1,STACK_INT_FRAME_REGS
	li	r5,0 /* !scv */
	bl	CFUNC(syscall_exit_prepare)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
#ifdef CONFIG_PPC_BOOK3S
.Lsyscall_rst_start:
	lbz	r11,PACAIRQHAPPENED(r13)
	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
	bne-	syscall_restart
#endif
	li	r11,IRQS_ENABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	li	r11,0
	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS

	ld	r2,_CCR(r1)
	ld	r6,_LINK(r1)
	mtlr	r6

#ifdef CONFIG_PPC_BOOK3S
	lbz	r4,PACASRR_VALID(r13)
	cmpdi	r4,0
	bne	1f
	li	r4,0
	stb	r4,PACASRR_VALID(r13)
#endif
	ld	r4,_NIP(r1)
	ld	r5,_MSR(r1)
	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r5
1:
	DEBUG_SRR_VALID srr

BEGIN_FTR_SECTION
	stdcx.	r0,0,r1			/* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	SANITIZE_RESTORE_NVGPRS()
	cmpdi	r3,0
	bne	.Lsyscall_restore_regs
	/* Zero volatile regs that may contain sensitive kernel data */
	ZEROIZE_GPR(0)
	ZEROIZE_GPRS(4, 12)
	mtctr	r0
	mtspr	SPRN_XER,r0
.Lsyscall_restore_regs_cont:

BEGIN_FTR_SECTION
	HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

	/*
	 * We don't need to restore AMR on the way back to userspace for KUAP.
	 * The value of AMR only matters while we're in the kernel.
	 */
	mtcr	r2
	REST_GPRS(2, 3, r1)
	REST_GPR(13, r1)
	REST_GPR(1, r1)
	RFI_TO_USER
	b	.	/* prevent speculative execution */

.Lsyscall_restore_regs:
	ld	r3,_CTR(r1)
	ld	r4,_XER(r1)
	HANDLER_RESTORE_NVGPRS()
	mtctr	r3
	mtspr	SPRN_XER,r4
	REST_GPR(0, r1)
	REST_GPRS(4, 12, r1)
	b	.Lsyscall_restore_regs_cont
.Lsyscall_rst_end:

#ifdef CONFIG_PPC_BOOK3S
syscall_restart:
_ASM_NOKPROBE_SYMBOL(syscall_restart)
	GET_PACA(r13)
	ld	r1,PACA_EXIT_SAVE_R1(r13)
	LOAD_PACA_TOC()
	ld	r3,RESULT(r1)
	addi	r4,r1,STACK_INT_FRAME_REGS
	li	r11,IRQS_ALL_DISABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	bl	CFUNC(syscall_exit_restart)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Lsyscall_rst_start
1:

SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif

	/*
	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
	 * touched, no exit work created, then this can be used.
	 */
	.balign IFETCH_ALIGN_BYTES
	.globl fast_interrupt_return_srr
fast_interrupt_return_srr:
_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
	kuap_check_amr r3, r4
	ld	r5,_MSR(r1)
	andi.	r0,r5,MSR_PR
#ifdef CONFIG_PPC_BOOK3S
	beq	1f
	kuap_user_restore r3, r4
	b	.Lfast_user_interrupt_return_srr
1:	kuap_kernel_restore r3, r4
	andi.	r0,r5,MSR_RI
	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
	bne+	.Lfast_kernel_interrupt_return_srr
	addi	r3,r1,STACK_INT_FRAME_REGS
	bl	CFUNC(unrecoverable_exception)
	b	. /* should not get here */
#else
	bne	.Lfast_user_interrupt_return_srr
	b	.Lfast_kernel_interrupt_return_srr
#endif

.macro interrupt_return_macro srr
	.balign IFETCH_ALIGN_BYTES
	.globl interrupt_return_\srr
interrupt_return_\srr\():
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
	ld	r4,_MSR(r1)
	andi.	r0,r4,MSR_PR
	beq	interrupt_return_\srr\()_kernel
interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
	addi	r3,r1,STACK_INT_FRAME_REGS
	bl	CFUNC(interrupt_exit_user_prepare)
#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
	cmpdi	r3,0
	bne-	.Lrestore_nvgprs_\srr
.Lrestore_nvgprs_\srr\()_cont:
#endif
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
#ifdef CONFIG_PPC_BOOK3S
.Linterrupt_return_\srr\()_user_rst_start:
	lbz	r11,PACAIRQHAPPENED(r13)
	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
	bne-	interrupt_return_\srr\()_user_restart
#endif
	li	r11,IRQS_ENABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	li	r11,0
	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS

.Lfast_user_interrupt_return_\srr\():
	SANITIZE_RESTORE_NVGPRS()
#ifdef CONFIG_PPC_BOOK3S
	.ifc \srr,srr
	lbz	r4,PACASRR_VALID(r13)
	.else
	lbz	r4,PACAHSRR_VALID(r13)
	.endif
	cmpdi	r4,0
	li	r4,0
	bne	1f
#endif
	ld	r11,_NIP(r1)
	ld	r12,_MSR(r1)
	.ifc \srr,srr
	mtspr	SPRN_SRR0,r11
	mtspr	SPRN_SRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
	stb	r4,PACASRR_VALID(r13)
#endif
	.else
	mtspr	SPRN_HSRR0,r11
	mtspr	SPRN_HSRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
	stb	r4,PACAHSRR_VALID(r13)
#endif
	.endif
	DEBUG_SRR_VALID \srr

#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
	lbz	r4,PACAIRQSOFTMASK(r13)
	tdnei	r4,IRQS_ENABLED
#endif

BEGIN_FTR_SECTION
	ld	r10,_PPR(r1)
	mtspr	SPRN_PPR,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)

BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	ld	r3,_CCR(r1)
	ld	r4,_LINK(r1)
	ld	r5,_CTR(r1)
	ld	r6,_XER(r1)
	li	r0,0

	REST_GPRS(7, 13, r1)

	mtcr	r3
	mtlr	r4
	mtctr	r5
	mtspr	SPRN_XER,r6

	REST_GPRS(2, 6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	.ifc \srr,srr
	RFI_TO_USER
	.else
	HRFI_TO_USER
	.endif
	b	.	/* prevent speculative execution */
.Linterrupt_return_\srr\()_user_rst_end:

#ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
.Lrestore_nvgprs_\srr\():
	REST_NVGPRS(r1)
	b	.Lrestore_nvgprs_\srr\()_cont
#endif

#ifdef CONFIG_PPC_BOOK3S
interrupt_return_\srr\()_user_restart:
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
	GET_PACA(r13)
	ld	r1,PACA_EXIT_SAVE_R1(r13)
	LOAD_PACA_TOC()
	addi	r3,r1,STACK_INT_FRAME_REGS
	li	r11,IRQS_ALL_DISABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	bl	CFUNC(interrupt_exit_user_restart)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Linterrupt_return_\srr\()_user_rst_start
1:

SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
#endif

	.balign IFETCH_ALIGN_BYTES
interrupt_return_\srr\()_kernel:
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
	addi	r3,r1,STACK_INT_FRAME_REGS
	bl	CFUNC(interrupt_exit_kernel_prepare)

	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Linterrupt_return_\srr\()_kernel_rst_start:
	ld	r11,SOFTE(r1)
	cmpwi	r11,IRQS_ENABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	beq	.Linterrupt_return_\srr\()_soft_enabled

	/*
	 * Returning to soft-disabled context.
	 * Check if a MUST_HARD_MASK interrupt has become pending, in which
	 * case we need to disable MSR[EE] in the return context.
	 *
	 * The MSR[EE] check catches among other things the short incoherency
	 * in hard_irq_disable() between clearing MSR[EE] and setting
	 * PACA_IRQ_HARD_DIS.
	 */
	ld	r12,_MSR(r1)
	andi.	r10,r12,MSR_EE
	beq	.Lfast_kernel_interrupt_return_\srr\() // EE already disabled
	lbz	r11,PACAIRQHAPPENED(r13)
	andi.	r10,r11,PACA_IRQ_MUST_HARD_MASK
	bne	1f // HARD_MASK is pending
	// No HARD_MASK pending, clear possible HARD_DIS set by interrupt
	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
	stb	r11,PACAIRQHAPPENED(r13)
	b	.Lfast_kernel_interrupt_return_\srr\()


1:	/* Must clear MSR_EE from _MSR */
#ifdef CONFIG_PPC_BOOK3S
	li	r10,0
	/* Clear valid before changing _MSR */
	.ifc \srr,srr
	stb	r10,PACASRR_VALID(r13)
	.else
	stb	r10,PACAHSRR_VALID(r13)
	.endif
#endif
	xori	r12,r12,MSR_EE
	std	r12,_MSR(r1)
	b	.Lfast_kernel_interrupt_return_\srr\()

.Linterrupt_return_\srr\()_soft_enabled:
	/*
	 * In the soft-enabled case, need to double-check that we have no
	 * pending interrupts that might have come in before we reached the
	 * restart section of code, and restart the exit so those can be
	 * handled.
	 *
	 * If there are none, it is be possible that the interrupt still
	 * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
	 * interrupted context. This clear will not clobber a new pending
	 * interrupt coming in, because we're in the restart section, so
	 * such would return to the restart location.
	 */
#ifdef CONFIG_PPC_BOOK3S
	lbz	r11,PACAIRQHAPPENED(r13)
	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
	bne-	interrupt_return_\srr\()_kernel_restart
#endif
	li	r11,0
	stb	r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS

.Lfast_kernel_interrupt_return_\srr\():
	SANITIZE_RESTORE_NVGPRS()
	cmpdi	cr1,r3,0
#ifdef CONFIG_PPC_BOOK3S
	.ifc \srr,srr
	lbz	r4,PACASRR_VALID(r13)
	.else
	lbz	r4,PACAHSRR_VALID(r13)
	.endif
	cmpdi	r4,0
	li	r4,0
	bne	1f
#endif
	ld	r11,_NIP(r1)
	ld	r12,_MSR(r1)
	.ifc \srr,srr
	mtspr	SPRN_SRR0,r11
	mtspr	SPRN_SRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
	stb	r4,PACASRR_VALID(r13)
#endif
	.else
	mtspr	SPRN_HSRR0,r11
	mtspr	SPRN_HSRR1,r12
1:
#ifdef CONFIG_PPC_BOOK3S
	stb	r4,PACAHSRR_VALID(r13)
#endif
	.endif
	DEBUG_SRR_VALID \srr

BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	ld	r3,_LINK(r1)
	ld	r4,_CTR(r1)
	ld	r5,_XER(r1)
	ld	r6,_CCR(r1)
	li	r0,0

	REST_GPRS(7, 12, r1)

	mtlr	r3
	mtctr	r4
	mtspr	SPRN_XER,r5

	/*
	 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
	 * the reliable stack unwinder later on. Clear it.
	 */
	std	r0,STACK_INT_FRAME_MARKER(r1)

	REST_GPRS(2, 5, r1)

	bne-	cr1,1f /* emulate stack store */
	mtcr	r6
	REST_GPR(6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	.ifc \srr,srr
	RFI_TO_KERNEL
	.else
	HRFI_TO_KERNEL
	.endif
	b	.	/* prevent speculative execution */

1:	/*
	 * Emulate stack store with update. New r1 value was already calculated
	 * and updated in our interrupt regs by emulate_loadstore, but we can't
	 * store the previous value of r1 to the stack before re-loading our
	 * registers from it, otherwise they could be clobbered.  Use
	 * PACA_EXGEN as temporary storage to hold the store data, as
	 * interrupts are disabled here so it won't be clobbered.
	 */
	mtcr	r6
	std	r9,PACA_EXGEN+0(r13)
	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
	REST_GPR(6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	std	r9,0(r1) /* perform store component of stdu */
	ld	r9,PACA_EXGEN+0(r13)

	.ifc \srr,srr
	RFI_TO_KERNEL
	.else
	HRFI_TO_KERNEL
	.endif
	b	.	/* prevent speculative execution */
.Linterrupt_return_\srr\()_kernel_rst_end:

#ifdef CONFIG_PPC_BOOK3S
interrupt_return_\srr\()_kernel_restart:
_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
	GET_PACA(r13)
	ld	r1,PACA_EXIT_SAVE_R1(r13)
	LOAD_PACA_TOC()
	addi	r3,r1,STACK_INT_FRAME_REGS
	li	r11,IRQS_ALL_DISABLED
	stb	r11,PACAIRQSOFTMASK(r13)
	bl	CFUNC(interrupt_exit_kernel_restart)
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Linterrupt_return_\srr\()_kernel_rst_start
1:

SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
#endif

.endm

interrupt_return_macro srr
#ifdef CONFIG_PPC_BOOK3S
interrupt_return_macro hsrr

	.globl __end_soft_masked
__end_soft_masked:
DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
#endif /* CONFIG_PPC_BOOK3S */

#ifdef CONFIG_PPC_BOOK3S
_GLOBAL(ret_from_fork_scv)
	bl	CFUNC(schedule_tail)
	HANDLER_RESTORE_NVGPRS()
	li	r3,0	/* fork() return value */
	b	.Lsyscall_vectored_common_exit
#endif

_GLOBAL(ret_from_fork)
	bl	CFUNC(schedule_tail)
	HANDLER_RESTORE_NVGPRS()
	li	r3,0	/* fork() return value */
	b	.Lsyscall_exit

_GLOBAL(ret_from_kernel_user_thread)
	bl	CFUNC(schedule_tail)
	mtctr	r14
	mr	r3,r15
#ifdef CONFIG_PPC64_ELF_ABI_V2
	mr	r12,r14
#endif
	bctrl
	li	r3,0
	/*
	 * It does not matter whether this returns via the scv or sc path
	 * because it returns as execve() and therefore has no calling ABI
	 * (i.e., it sets registers according to the exec()ed entry point).
	 */
	b	.Lsyscall_exit

_GLOBAL(start_kernel_thread)
	bl	CFUNC(schedule_tail)
	mtctr	r14
	mr	r3,r15
#ifdef CONFIG_PPC64_ELF_ABI_V2
	mr	r12,r14
#endif
	bctrl
	/*
	 * This must not return. We actually want to BUG here, not WARN,
	 * because BUG will exit the process which is what the kernel thread
	 * should have done, which may give some hope of continuing.
	 */
100:	trap
	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0