summary refs log tree commit diff
path: root/arch/sparc/kernel/hvtramp.S
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 03:11:52 -0800
committerDavid S. Miller <davem@davemloft.net>2008-12-04 09:17:21 -0800
commita88b5ba8bd8ac18aad65ee6c6a254e2e74876db3 (patch)
treeeb3d0ffaf53c3f7ec6083752c2097cecd1cb892a /arch/sparc/kernel/hvtramp.S
parentd670bd4f803c8b646acd20f3ba21e65458293faf (diff)
downloadlinux-a88b5ba8bd8ac18aad65ee6c6a254e2e74876db3.tar.gz
sparc,sparc64: unify kernel/
o Move all files from sparc64/kernel/ to sparc/kernel
  - rename as appropriate
o Update sparc/Makefile to the changes
o Update sparc/kernel/Makefile to include the sparc64 files

NOTE: This commit changes link order on sparc64!

Link order had to change for either of sparc32 and sparc64.
And assuming sparc64 see more testing than sparc32 change link
order on sparc64 where issues will be caught faster.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/hvtramp.S')
-rw-r--r--arch/sparc/kernel/hvtramp.S140
1 files changed, 140 insertions, 0 deletions
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
new file mode 100644
index 000000000000..9365432904d6
--- /dev/null
+++ b/arch/sparc/kernel/hvtramp.S
@@ -0,0 +1,140 @@
+/* hvtramp.S: Hypervisor start-cpu trampoline code.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/init.h>
+
+#include <asm/thread_info.h>
+#include <asm/hypervisor.h>
+#include <asm/scratchpad.h>
+#include <asm/spitfire.h>
+#include <asm/hvtramp.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/pil.h>
+
+	__CPUINIT
+	.align		8
+	.globl		hv_cpu_startup, hv_cpu_startup_end
+
+	/* This code executes directly out of the hypervisor
+	 * with physical addressing (va==pa).  %o0 contains
+	 * our client argument which for Linux points to
+	 * a descriptor data structure which defines the
+	 * MMU entries we need to load up.
+	 *
+	 * After we set things up we enable the MMU and call
+	 * into the kernel.
+	 *
+	 * First setup basic privileged cpu state.
+	 */
+hv_cpu_startup:
+	SET_GL(0)
+	wrpr		%g0, PIL_NORMAL_MAX, %pil
+	wrpr		%g0, 0, %canrestore
+	wrpr		%g0, 0, %otherwin
+	wrpr		%g0, 6, %cansave
+	wrpr		%g0, 6, %cleanwin
+	wrpr		%g0, 0, %cwp
+	wrpr		%g0, 0, %wstate
+	wrpr		%g0, 0, %tl
+
+	sethi		%hi(sparc64_ttable_tl0), %g1
+	wrpr		%g1, %tba
+
+	mov		%o0, %l0
+
+	lduw		[%l0 + HVTRAMP_DESCR_CPU], %g1
+	mov		SCRATCHPAD_CPUID, %g2
+	stxa		%g1, [%g2] ASI_SCRATCHPAD
+
+	ldx		[%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2
+	stxa		%g2, [%g0] ASI_SCRATCHPAD
+
+	mov		0, %l1
+	lduw		[%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2
+	add		%l0, HVTRAMP_DESCR_MAPS, %l3
+
+1:	ldx		[%l3 + HVTRAMP_MAPPING_VADDR], %o0
+	clr		%o1
+	ldx		[%l3 + HVTRAMP_MAPPING_TTE], %o2
+	mov		HV_MMU_IMMU | HV_MMU_DMMU, %o3
+	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
+	ta		HV_FAST_TRAP
+
+	brnz,pn		%o0, 80f
+	 nop
+
+	add		%l1, 1, %l1
+	cmp		%l1, %l2
+	blt,a,pt	%xcc, 1b
+	 add		%l3, HVTRAMP_MAPPING_SIZE, %l3
+
+	ldx		[%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0
+	mov		HV_FAST_MMU_FAULT_AREA_CONF, %o5
+	ta		HV_FAST_TRAP
+
+	brnz,pn		%o0, 80f
+	 nop
+
+	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+
+	ldx		[%l0 + HVTRAMP_DESCR_THREAD_REG], %l6
+
+	mov		1, %o0
+	set		1f, %o1
+	mov		HV_FAST_MMU_ENABLE, %o5
+	ta		HV_FAST_TRAP
+
+	ba,pt		%xcc, 80f
+	 nop
+
+1:
+	wr		%g0, 0, %fprs
+	wr		%g0, ASI_P, %asi
+
+	mov		PRIMARY_CONTEXT, %g7
+	stxa		%g0, [%g7] ASI_MMU
+	membar		#Sync
+
+	mov		SECONDARY_CONTEXT, %g7
+	stxa		%g0, [%g7] ASI_MMU
+	membar		#Sync
+
+	mov		%l6, %g6
+	ldx		[%g6 + TI_TASK], %g4
+
+	mov		1, %g5
+	sllx		%g5, THREAD_SHIFT, %g5
+	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+	add		%g6, %g5, %sp
+	mov		0, %fp
+
+	call		init_irqwork_curcpu
+	 nop
+	call		hard_smp_processor_id
+	 nop
+
+	call		sun4v_register_mondo_queues
+	 nop
+
+	call		init_cur_cpu_trap
+	 mov		%g6, %o0
+
+	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate
+
+	call		smp_callin
+	 nop
+	call		cpu_idle
+	 mov		0, %o0
+	call		cpu_panic
+	 nop
+
+80:	ba,pt		%xcc, 80b
+	 nop
+
+	.align		8
+hv_cpu_startup_end: