summary refs log tree commit diff
path: root/arch/tile/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/backtrace.c4
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/compat_signal.c10
-rw-r--r--arch/tile/kernel/entry.S34
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S101
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/process.c50
-rw-r--r--arch/tile/kernel/ptrace.c78
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c34
-rw-r--r--arch/tile/kernel/signal.c6
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/stack.c35
-rw-r--r--arch/tile/kernel/sys.c9
-rw-r--r--arch/tile/kernel/traps.c4
18 files changed, 283 insertions, 192 deletions
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index d3c41c1ff6bd..55a6a74974b4 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
 					/* Weird; reserved value, ignore it. */
 					continue;
 				}
+				if (info_operand & ENTRY_POINT_INFO_OP)	{
+					/* This info op is ignored by the backtracer. */
+					continue;
+				}
 
 				/* Skip info ops which are not in the
 				 * "one_ago" mode we want right now.
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index b1e06d041555..77739cdd9462 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -154,8 +154,14 @@ long tile_compat_sys_msgrcv(int msqid,
 #define compat_sys_fstat64 sys_newfstat
 #define compat_sys_fstatat64 sys_newfstatat
 
-/* Pass full 64-bit values through ptrace. */
-#define compat_sys_ptrace tile_compat_sys_ptrace
+/* The native sys_ptrace dynamically handles compat binaries. */
+#define compat_sys_ptrace sys_ptrace
+
+/* Call the trampolines to manage pt_regs where necessary. */
+#define compat_sys_execve _compat_sys_execve
+#define compat_sys_sigaltstack _compat_sys_sigaltstack
+#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
+#define sys_clone _sys_clone
 
 /*
  * Note that we can't include <linux/unistd.h> here since the header
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9c710db43f13..fb64b99959d4 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -256,9 +256,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 	return err;
 }
 
-long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
-			     struct compat_sigaltstack __user *uoss_ptr,
-			     struct pt_regs *regs)
+long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+			    struct compat_sigaltstack __user *uoss_ptr,
+			    struct pt_regs *regs)
 {
 	stack_t uss, uoss;
 	int ret;
@@ -291,7 +291,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
 	return ret;
 }
 
-long _compat_sys_rt_sigreturn(struct pt_regs *regs)
+long compat_sys_rt_sigreturn(struct pt_regs *regs)
 {
 	struct compat_rt_sigframe __user *frame =
 		(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
@@ -312,7 +312,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
 		goto badframe;
 
-	if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
+	if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
 		goto badframe;
 
 	return r0;
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d01383b1b0e..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
 #include <linux/linkage.h>
 #include <linux/unistd.h>
 #include <asm/irqflags.h>
+#include <asm/processor.h>
 #include <arch/abi.h>
+#include <arch/spr_def.h>
 
 #ifdef __tilegx__
 #define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
 	{ move r0, lr; jrp lr }
 	STD_ENDPROC(current_text_addr)
 
-STD_ENTRY(_sim_syscall)
-	/*
-	 * Wait for r0-r9 to be ready (and lr on the off chance we
-	 * want the syscall to locate its caller), then make a magic
-	 * simulator syscall.
-	 *
-	 * We carefully stall until the registers are readable in case they
-	 * are the target of a slow load, etc. so that tile-sim will
-	 * definitely be able to read all of them inside the magic syscall.
-	 *
-	 * Technically this is wrong for r3-r9 and lr, since an interrupt
-	 * could come in and restore the registers with a slow load right
-	 * before executing the mtspr. We may need to modify tile-sim to
-	 * explicitly stall for this case, but we do not yet have
-	 * a way to implement such a stall.
-	 */
-	{ and zero, lr, r9 ; and zero, r8, r7 }
-	{ and zero, r6, r5 ; and zero, r4, r3 }
-	{ and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
-	{ jrp lr }
-	STD_ENDPROC(_sim_syscall)
-
 /*
  * Implement execve().  The i386 code has a note that forking from kernel
  * space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
 STD_ENTRY(cpu_idle_on_new_stack)
 	{
 	 move sp, r1
-	 mtspr SYSTEM_SAVE_1_0, r2
+	 mtspr SPR_SYSTEM_SAVE_K_0, r2
 	}
 	jal free_thread_info
 	j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
 STD_ENTRY(_cpu_idle)
 	{
 	 lnk r0
-	 movei r1, 1
+	 movei r1, KERNEL_PL
 	}
 	{
 	 addli r0, r0, _cpu_idle_nap - .
 	 mtspr INTERRUPT_CRITICAL_SECTION, r1
 	}
-	IRQ_ENABLE(r2, r3)         /* unmask, but still with ICS set */
-	mtspr EX_CONTEXT_1_1, r1   /* PL1, ICS clear */
-	mtspr EX_CONTEXT_1_0, r0
+	IRQ_ENABLE(r2, r3)             /* unmask, but still with ICS set */
+	mtspr SPR_EX_CONTEXT_K_1, r1   /* Kernel PL, ICS clear */
+	mtspr SPR_EX_CONTEXT_K_0, r0
 	iret
 	.global _cpu_idle_nap
 _cpu_idle_nap:
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
 #include <asm/asm-offsets.h>
 #include <hv/hypervisor.h>
 #include <arch/chip.h>
+#include <arch/spr_def.h>
 
 /*
  * This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
 	}
 1:
 
-	/* Get our processor number and save it away in SAVE_1_0. */
+	/* Get our processor number and save it away in SAVE_K_0. */
 	jal hv_inquire_topology
 	mulll_uu r4, r1, r2        /* r1 == y, r2 == width */
 	add r4, r4, r0             /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
 	lw r0, r0
 	lw sp, r1
 	or r4, sp, r4
-	mtspr SYSTEM_SAVE_1_0, r4  /* save ksp0 + cpu */
+	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
 	addi sp, sp, -STACK_TOP_DELTA
 	{
 	  move lr, zero   /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 8f58bdff20d7..f5821626247f 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
 # error "No support for kernel preemption currently"
 #endif
 
-#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48
-# error INT_INTCTRL_1 coded to set high interrupt mask
+#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
+# error INT_INTCTRL_K coded to set high interrupt mask
 #endif
 
 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
 
 	/* Temporarily save a register so we have somewhere to work. */
 
-	mtspr   SYSTEM_SAVE_1_1, r0
-	mfspr   r0, EX_CONTEXT_1_1
+	mtspr   SPR_SYSTEM_SAVE_K_1, r0
+	mfspr   r0, SPR_EX_CONTEXT_K_1
 
 	/* The cmpxchg code clears sp to force us to reset it here on fault. */
 	{
@@ -167,18 +167,18 @@ intvec_\vecname:
 	 * The page_fault handler may be downcalled directly by the
 	 * hypervisor even when Linux is running and has ICS set.
 	 *
-	 * In this case the contents of EX_CONTEXT_1_1 reflect the
+	 * In this case the contents of EX_CONTEXT_K_1 reflect the
 	 * previous fault and can't be relied on to choose whether or
 	 * not to reinitialize the stack pointer.  So we add a test
-	 * to see whether SYSTEM_SAVE_1_2 has the high bit set,
+	 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
 	 * and if so we don't reinitialize sp, since we must be coming
 	 * from Linux.  (In fact the precise case is !(val & ~1),
 	 * but any Linux PC has to have the high bit set.)
 	 *
-	 * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for
+	 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
 	 * any path that turns into a downcall to one of our TLB handlers.
 	 */
-	mfspr   r0, SYSTEM_SAVE_1_2
+	mfspr   r0, SPR_SYSTEM_SAVE_K_2
 	{
 	 blz    r0, 0f    /* high bit in S_S_1_2 is for a PC to use */
 	 move   r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
 
 2:
 	/*
-	 * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and
+	 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
 	 * the current stack top in the higher bits.  So we recover
 	 * our stack top by just masking off the low bits, then
 	 * point sp at the top aligned address on the actual stack page.
 	 */
-	mfspr   r0, SYSTEM_SAVE_1_0
+	mfspr   r0, SPR_SYSTEM_SAVE_K_0
 	mm      r0, r0, zero, LOG2_THREAD_SIZE, 31
 
 0:
@@ -254,7 +254,7 @@ intvec_\vecname:
 	 sw     sp, r3
 	 addli  sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
 	}
-	mfspr   r0, EX_CONTEXT_1_0
+	mfspr   r0, SPR_EX_CONTEXT_K_0
 	.ifc \processing,handle_syscall
 	/*
 	 * Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
 	 sw     sp, r0
 	 addli  sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
 	}
-	mfspr   r0, EX_CONTEXT_1_1
+	mfspr   r0, SPR_EX_CONTEXT_K_1
 	{
 	 sw     sp, r0
 	 addi   sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
 	 .endif
 	 addli  sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
 	}
-	mfspr   r0, SYSTEM_SAVE_1_1    /* Original r0 */
+	mfspr   r0, SPR_SYSTEM_SAVE_K_1    /* Original r0 */
 	{
 	 sw     sp, r0
 	 addi   sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
 	 * See discussion below at "finish_interrupt_save".
 	 */
 	.ifc \c_routine, do_page_fault
-	mfspr   r2, SYSTEM_SAVE_1_3   /* address of page fault */
-	mfspr   r3, SYSTEM_SAVE_1_2   /* info about page fault */
+	mfspr   r2, SPR_SYSTEM_SAVE_K_3   /* address of page fault */
+	mfspr   r3, SPR_SYSTEM_SAVE_K_2   /* info about page fault */
 	.else
 	.ifc \vecnum, INT_DOUBLE_FAULT
 	{
-	 mfspr  r2, SYSTEM_SAVE_1_2   /* double fault info from HV */
+	 mfspr  r2, SPR_SYSTEM_SAVE_K_2   /* double fault info from HV */
 	 movei  r3, 0
 	}
 	.else
@@ -467,7 +467,7 @@ intvec_\vecname:
 	/* Load tp with our per-cpu offset. */
 #ifdef CONFIG_SMP
 	{
-	 mfspr  r20, SYSTEM_SAVE_1_0
+	 mfspr  r20, SPR_SYSTEM_SAVE_K_0
 	 moveli r21, lo16(__per_cpu_offset)
 	}
 	{
@@ -487,7 +487,7 @@ intvec_\vecname:
 	 * We load flags in r32 here so we can jump to .Lrestore_regs
 	 * directly after do_page_fault_ics() if necessary.
 	 */
-	mfspr   r32, EX_CONTEXT_1_1
+	mfspr   r32, SPR_EX_CONTEXT_K_1
 	{
 	 andi   r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 	 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
 	pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
 	pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
 	{
-	 mtspr  EX_CONTEXT_1_0, r21
+	 mtspr  SPR_EX_CONTEXT_K_0, r21
 	 move   r5, zero
 	}
 	{
-	 mtspr  EX_CONTEXT_1_1, lr
+	 mtspr  SPR_EX_CONTEXT_K_1, lr
 	 andi   lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 	}
 
@@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
 
 	/* Set r1 to errno if we are returning an error, otherwise zero. */
 	{
-	 moveli r29, 1024
+	 moveli r29, 4096
 	 sub    r1, zero, r0
 	}
 	slt_u   r29, r1, r29
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
 	STD_ENDPROC(interrupt_return)
 
 	/*
-	 * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit
+	 * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
 	 * before returning, so we can properly get more downcalls.
 	 */
 	.pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
 	check_single_stepping normal, .Ldispatch_downcall
 .Ldispatch_downcall:
 
-	/* Clear INTCTRL_1 from the set of interrupts we ever enable. */
+	/* Clear INTCTRL_K from the set of interrupts we ever enable. */
 	GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
 	{
 	 addi   r30, r30, 4
-	 movei  r31, INT_MASK(INT_INTCTRL_1)
+	 movei  r31, INT_MASK(INT_INTCTRL_K)
 	}
 	{
 	 lw     r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
 	}
 	FEEDBACK_REENTER(handle_interrupt_downcall)
 
-	/* Allow INTCTRL_1 to be enabled next time we enable interrupts. */
+	/* Allow INTCTRL_K to be enabled next time we enable interrupts. */
 	lw      r20, r30
 	or      r20, r20, r31
 	sw      r30, r20
@@ -1472,7 +1472,12 @@ handle_ill:
 	lw      r26, r24
 	sw      r28, r26
 
-	/* Clear TIF_SINGLESTEP */
+	/*
+	 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
+	 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
+	 * need to clear it here and can't really impose on all other arches.
+	 * So what's another write between friends?
+	 */
 	GET_THREAD_INFO(r0)
 
 	addi    r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1509,7 +1514,7 @@ handle_ill:
 /* Various stub interrupt handlers and syscall handlers */
 
 STD_ENTRY_LOCAL(_kernel_double_fault)
-	mfspr   r1, EX_CONTEXT_1_0
+	mfspr   r1, SPR_EX_CONTEXT_K_0
 	move    r2, lr
 	move    r3, sp
 	move    r4, r52
@@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
 	STD_ENDPROC(_kernel_double_fault)
 
 STD_ENTRY_LOCAL(bad_intr)
-	mfspr   r2, EX_CONTEXT_1_0
+	mfspr   r2, SPR_EX_CONTEXT_K_0
 	panic   "Unhandled interrupt %#x: PC %#lx"
 	STD_ENDPROC(bad_intr)
 
 /* Put address of pt_regs in reg and jump. */
 #define PTREGS_SYSCALL(x, reg)                          \
-	STD_ENTRY(x);                                   \
+	STD_ENTRY(_##x);                                \
 	{                                               \
 	 PTREGS_PTR(reg, PTREGS_OFFSET_BASE);           \
-	 j      _##x                                    \
+	 j      x                                       \
 	};                                              \
-	STD_ENDPROC(x)
+	STD_ENDPROC(_##x)
 
 PTREGS_SYSCALL(sys_execve, r3)
 PTREGS_SYSCALL(sys_sigaltstack, r2)
 PTREGS_SYSCALL(sys_rt_sigreturn, r0)
+PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
 
-/* Save additional callee-saves to pt_regs, put address in reg and jump. */
-#define PTREGS_SYSCALL_ALL_REGS(x, reg)                 \
-	STD_ENTRY(x);                                   \
-	push_extra_callee_saves reg;                    \
-	j       _##x;                                   \
-	STD_ENDPROC(x)
-
-PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
-PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
-PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
+/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
+STD_ENTRY(_sys_clone)
+	push_extra_callee_saves r4
+	j       sys_clone
+	STD_ENDPROC(_sys_clone)
 
 /*
  * This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
  * to be available to it on entry.  It does not modify any callee-save
  * registers (including "lr").  It does not check what PL it is being
  * called at, so you'd better not call it other than at PL0.
+ * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
+ * it ever is necessary to use more registers, be aware.
  *
  * It does not use the stack, but since it might be re-interrupted by
  * a page fault which would assume the stack was valid, it does
  * save/restore the stack pointer and zero it out to make sure it gets reset.
  * Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them
+ * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
  * (other than to advance the PC on return).
  *
  * We have to manually validate the user vs kernel address range
@@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
 	/* Do slow mtspr here so the following "mf" waits less. */
 	{
 	 move   sp, r27
-	 mtspr  EX_CONTEXT_1_0, r28
+	 mtspr  SPR_EX_CONTEXT_K_0, r28
 	}
 	mf
 
@@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
 	}
 	{
 	 move   sp, r27
-	 mtspr  EX_CONTEXT_1_0, r28
+	 mtspr  SPR_EX_CONTEXT_K_0, r28
 	}
 	iret
 
@@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
 #endif
 
 	/* Issue the slow SPR here while the tns result is in flight. */
-	mfspr   r28, EX_CONTEXT_1_0
+	mfspr   r28, SPR_EX_CONTEXT_K_0
 
 	{
 	 addi   r28, r28, 8    /* return to the instruction after the swint1 */
@@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
 .Lcmpxchg64_mismatch:
 	{
 	 move   sp, r27
-	 mtspr  EX_CONTEXT_1_0, r28
+	 mtspr  SPR_EX_CONTEXT_K_0, r28
 	}
 	mf
 	{
@@ -1985,8 +1987,13 @@ int_unalign:
 	int_hand     INT_PERF_COUNT, PERF_COUNT, \
 		     op_handle_perf_interrupt, handle_nmi
 	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr
+#if CONFIG_KERNEL_PL == 2
+	dc_dispatch  INT_INTCTRL_2, INTCTRL_2
+	int_hand     INT_INTCTRL_1, INTCTRL_1, bad_intr
+#else
 	int_hand     INT_INTCTRL_2, INTCTRL_2, bad_intr
 	dc_dispatch  INT_INTCTRL_1, INTCTRL_1
+#endif
 	int_hand     INT_INTCTRL_0, INTCTRL_0, bad_intr
 	int_hand     INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
 		     hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 9a27d563fc30..e63917687e99 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
 
 #if CHIP_HAS_IPI()
 /* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
+#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
+#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
+#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
 #else
 /* Use HV to manipulate device interrupts. */
 #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
 	 * masked by a previous interrupt.  Then, mask out the ones
 	 * we're going to handle.
 	 */
-	unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
-	original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
-	__insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
+	unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
+	original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
+	__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
 #else
 	/*
 	 * Hypervisor performs the equivalent of the Gx code above and
 	 * then puts the pending interrupt mask into a system save reg
 	 * for us to find.
 	 */
-	original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
+	original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
 #endif
 	remaining_irqs = original_irqs;
 
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
 	/* Enable interrupt delivery. */
 	unmask_irqs(~0UL);
 #if CHIP_HAS_IPI()
-	raw_local_irq_unmask(INT_IPI_1);
+	raw_local_irq_unmask(INT_IPI_K);
 #endif
 }
 
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..997e3933f726 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
 		panic("hv_register_message_state: error %d", rc);
 
 	/* Make sure downcall interrupts will be enabled. */
-	raw_local_irq_unmask(INT_INTCTRL_1);
+	raw_local_irq_unmask(INT_INTCTRL_K);
 }
 
 void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 84c29111756c..8430f45daea6 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 	/*
 	 * Copy the callee-saved registers from the passed pt_regs struct
 	 * into the context-switch callee-saved registers area.
-	 * We have to restore the callee-saved registers since we may
-	 * be cloning a userspace task with userspace register state,
-	 * and we won't be unwinding the same kernel frames to restore them.
+	 * This way when we start the interrupt-return sequence, the
+	 * callee-save registers will be correctly in registers, which
+	 * is how we assume the compiler leaves them as we start doing
+	 * the normal return-from-interrupt path after calling C code.
 	 * Zero out the C ABI save area to mark the top of the stack.
 	 */
 	ksp = (unsigned long) childregs;
@@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 /* Allow user processes to access the DMA SPRs */
 void grant_dma_mpls(void)
 {
+#if CONFIG_KERNEL_PL == 2
+	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
+	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#else
 	__insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
 	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
+#endif
 }
 
 /* Forbid user processes from accessing the DMA SPRs */
 void restrict_dma_mpls(void)
 {
+#if CONFIG_KERNEL_PL == 2
+	__insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
+	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
+#else
 	__insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
 	__insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
+#endif
 }
 
 /* Pause the DMA engine, then save off its state registers. */
@@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
 	 * Switch kernel SP, PC, and callee-saved registers.
 	 * In the context of the new task, return the old task pointer
 	 * (i.e. the task that actually called __switch_to).
-	 * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp.
+	 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
 	 */
 	return __switch_to(prev, next, next_current_ksp0(next));
 }
 
-long _sys_fork(struct pt_regs *regs)
-{
-	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
-
-long _sys_clone(unsigned long clone_flags, unsigned long newsp,
-		void __user *parent_tidptr, void __user *child_tidptr,
-		struct pt_regs *regs)
+SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+		void __user *, parent_tidptr, void __user *, child_tidptr,
+		struct pt_regs *, regs)
 {
 	if (!newsp)
 		newsp = regs->sp;
@@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
 		       parent_tidptr, child_tidptr);
 }
 
-long _sys_vfork(struct pt_regs *regs)
-{
-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
-		       regs, 0, NULL, NULL);
-}
-
 /*
  * sys_execve() executes a new program.
  */
-long _sys_execve(const char __user *path,
-		 const char __user *const __user *argv,
-		 const char __user *const __user *envp, struct pt_regs *regs)
+SYSCALL_DEFINE4(execve, const char __user *, path,
+		const char __user *const __user *, argv,
+		const char __user *const __user *, envp,
+		struct pt_regs *, regs)
 {
 	long error;
 	char *filename;
@@ -570,9 +571,10 @@ out:
 }
 
 #ifdef CONFIG_COMPAT
-long _compat_sys_execve(const char __user *path,
-			const compat_uptr_t __user *argv,
-			const compat_uptr_t __user *envp, struct pt_regs *regs)
+long compat_sys_execve(const char __user *path,
+		       const compat_uptr_t __user *argv,
+		       const compat_uptr_t __user *envp,
+		       struct pt_regs *regs)
 {
 	long error;
 	char *filename;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 7161bd03d2fd..5b20c2874d51 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -32,25 +32,6 @@ void user_disable_single_step(struct task_struct *child)
 }
 
 /*
- * This routine will put a word on the process's privileged stack.
- */
-static void putreg(struct task_struct *task,
-		   unsigned long addr, unsigned long value)
-{
-	unsigned int regno = addr / sizeof(unsigned long);
-	struct pt_regs *childregs = task_pt_regs(task);
-	childregs->regs[regno] = value;
-	childregs->flags |= PT_FLAGS_RESTORE_REGS;
-}
-
-static unsigned long getreg(struct task_struct *task, unsigned long addr)
-{
-	unsigned int regno = addr / sizeof(unsigned long);
-	struct pt_regs *childregs = task_pt_regs(task);
-	return childregs->regs[regno];
-}
-
-/*
  * Called by kernel/ptrace.c when detaching..
  */
 void ptrace_disable(struct task_struct *child)
@@ -66,59 +47,72 @@ void ptrace_disable(struct task_struct *child)
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-	unsigned long __user *datap;
+	unsigned long __user *datap = (long __user __force *)data;
 	unsigned long tmp;
 	int i;
 	long ret = -EIO;
-
-#ifdef CONFIG_COMPAT
-	if (task_thread_info(current)->status & TS_COMPAT)
-		data = (u32)data;
-	if (task_thread_info(child)->status & TS_COMPAT)
-		addr = (u32)addr;
-#endif
-	datap = (unsigned long __user __force *)data;
+	unsigned long *childregs;
+	char *childreg;
 
 	switch (request) {
 
 	case PTRACE_PEEKUSR:  /* Read register from pt_regs. */
-		if (addr & (sizeof(data)-1))
-			break;
 		if (addr < 0 || addr >= PTREGS_SIZE)
 			break;
-		tmp = getreg(child, addr);   /* Read register */
-		ret = put_user(tmp, datap);
+		childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+		if (is_compat_task()) {
+			if (addr & (sizeof(compat_long_t)-1))
+				break;
+			ret = put_user(*(compat_long_t *)childreg,
+				       (compat_long_t __user *)datap);
+		} else
+#endif
+		{
+			if (addr & (sizeof(long)-1))
+				break;
+			ret = put_user(*(long *)childreg, datap);
+		}
 		break;
 
 	case PTRACE_POKEUSR:  /* Write register in pt_regs. */
-		if (addr & (sizeof(data)-1))
-			break;
 		if (addr < 0 || addr >= PTREGS_SIZE)
 			break;
-		putreg(child, addr, data);   /* Write register */
+		childreg = (char *)task_pt_regs(child) + addr;
+#ifdef CONFIG_COMPAT
+		if (is_compat_task()) {
+			if (addr & (sizeof(compat_long_t)-1))
+				break;
+			*(compat_long_t *)childreg = data;
+		} else
+#endif
+		{
+			if (addr & (sizeof(long)-1))
+				break;
+			*(long *)childreg = data;
+		}
 		ret = 0;
 		break;
 
 	case PTRACE_GETREGS:  /* Get all registers from the child. */
 		if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
 			break;
-		for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
-			ret = __put_user(getreg(child, i), datap);
+		childregs = (long *)task_pt_regs(child);
+		for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+			ret = __put_user(childregs[i], &datap[i]);
 			if (ret != 0)
 				break;
-			datap++;
 		}
 		break;
 
 	case PTRACE_SETREGS:  /* Set all registers in the child. */
 		if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
 			break;
-		for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) {
-			ret = __get_user(tmp, datap);
+		childregs = (long *)task_pt_regs(child);
+		for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
+			ret = __get_user(childregs[i], &datap[i]);
 			if (ret != 0)
 				break;
-			putreg(child, i, tmp);
-			datap++;
 		}
 		break;
 
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
 	{
 	  /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
 	  move sp, r13
-	  mtspr SYSTEM_SAVE_1_0, r2
+	  mtspr SPR_SYSTEM_SAVE_K_0, r2
 	}
 	FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
 .L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index e7d54c73d5c1..f3a50e74f9a4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
 
 #ifdef CONFIG_HIGHMEM
 /*
- * Determine for each controller where its lowmem is mapped and how
- * much of it is mapped there.  On controller zero, the first few
- * megabytes are mapped at 0xfd000000 as code, so in principle we
- * could start our data mappings higher up, but for now we don't
- * bother, to avoid additional confusion.
+ * Determine for each controller where its lowmem is mapped and how much of
+ * it is mapped there.  On controller zero, the first few megabytes are
+ * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * start our data mappings higher up, but for now we don't bother, to avoid
+ * additional confusion.
  *
  * One question is whether, on systems with more than 768 Mb and
  * controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +311,7 @@ static void __init setup_memory(void)
 #endif
 
 	/* We are using a char to hold the cpu_2_node[] mapping */
-	BUG_ON(MAX_NUMNODES > 127);
+	BUILD_BUG_ON(MAX_NUMNODES > 127);
 
 	/* Discover the ranges of memory available to us */
 	for (i = 0; ; ++i) {
@@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
 #if CHIP_HAS_SN_PROC()
 	raw_local_irq_unmask(INT_SNITLB_MISS);
 #endif
+#ifdef __tilegx__
+	raw_local_irq_unmask(INT_SINGLE_STEP_K);
+#endif
 
 	/*
 	 * Allow user access to many generic SPRs, like the cycle
@@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
 #endif
 
 	/*
-	 * Set the MPL for interrupt control 0 to user level.
-	 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs,
-	 * as well as the PL 0 interrupt mask.
+	 * Set the MPL for interrupt control 0 & 1 to the corresponding
+	 * values.  This includes access to the SYSTEM_SAVE and EX_CONTEXT
+	 * SPRs, as well as the interrupt mask.
 	 */
 	__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
+	__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
 
 	/* Initialize IRQ support for this cpu. */
 	setup_irq_regs();
@@ -1033,7 +1037,7 @@ static void __init validate_va(void)
 	 * In addition, make sure we CAN'T use the end of memory, since
 	 * we use the last chunk of each pgd for the pgd_list.
 	 */
-	int i, fc_fd_ok = 0;
+	int i, user_kernel_ok = 0;
 	unsigned long max_va = 0;
 	unsigned long list_va =
 		((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1048,13 @@ static void __init validate_va(void)
 			break;
 		if (range.start <= MEM_USER_INTRPT &&
 		    range.start + range.size >= MEM_HV_INTRPT)
-			fc_fd_ok = 1;
+			user_kernel_ok = 1;
 		if (range.start == 0)
 			max_va = range.size;
 		BUG_ON(range.start + range.size > list_va);
 	}
-	if (!fc_fd_ok)
-		early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n");
+	if (!user_kernel_ok)
+		early_panic("Hypervisor not configured for user/kernel VAs\n");
 	if (max_va == 0)
 		early_panic("Hypervisor not configured for low VAs\n");
 	if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1338,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
 	pte_t *pte;
 
 	BUG_ON(pgd_addr_invalid(addr));
+	if (addr < VMALLOC_START || addr >= VMALLOC_END)
+		panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
+		      " try increasing CONFIG_VMALLOC_RESERVE\n",
+		      addr, VMALLOC_START, VMALLOC_END);
 
 	pgd = swapper_pg_dir + pgd_index(addr);
 	pud = pud_offset(pgd, addr);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index ce183aa1492c..fb28e85ae3ae 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -41,8 +41,8 @@
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 
-long _sys_sigaltstack(const stack_t __user *uss,
-		      stack_t __user *uoss, struct pt_regs *regs)
+SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
+		stack_t __user *, uoss, struct pt_regs *, regs)
 {
 	return do_sigaltstack(uss, uoss, regs->sp);
 }
@@ -78,7 +78,7 @@ int restore_sigcontext(struct pt_regs *regs,
 }
 
 /* sigreturn() returns long since it restores r0 in the interrupted code. */
-long _sys_rt_sigreturn(struct pt_regs *regs)
+SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
 {
 	struct rt_sigframe __user *frame =
 		(struct rt_sigframe __user *)(regs->sp);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
  * Derived from iLib's single-stepping code.
  */
 
-#ifndef __tilegx__   /* No support for single-step yet. */
+#ifndef __tilegx__   /* Hardware support for single step unavailable. */
 
 /* These functions are only used on the TILE platform */
 #include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
 		regs->pc += 8;
 }
 
+#else
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <arch/spr_def.h>
+
+static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
+
+
+/*
+ * Called directly on the occasion of an interrupt.
+ *
+ * If the process doesn't have single step set, then we use this as an
+ * opportunity to turn single step off.
+ *
+ * It has been mentioned that we could conditionally turn off single stepping
+ * on each entry into the kernel and rely on single_step_once to turn it
+ * on for the processes that matter (as we already do), but this
+ * implementation is somewhat more efficient in that we muck with registers
+ * once on a bum interrupt rather than on every entry into the kernel.
+ *
+ * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
+ * so we have to run through this process again before we can say that an
+ * instruction has executed.
+ *
+ * swint will set CANCELED, but it's a legitimate instruction.  Fortunately
+ * it changes the PC.  If it hasn't changed, then we know that the interrupt
+ * wasn't generated by swint and we'll need to run this process again before
+ * we can say an instruction has executed.
+ *
+ * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
+ * on with our lives.
+ */
+
+void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
+{
+	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+	struct thread_info *info = (void *)current_thread_info();
+	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
+	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+	if (is_single_step == 0) {
+		__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
+
+	} else if ((*ss_pc != regs->pc) ||
+		   (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
+
+		ptrace_notify(SIGTRAP);
+		control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+		control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+		__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+	}
+}
+
+
+/*
+ * Called from need_singlestep.  Set up the control registers and the enable
+ * register, then return back.
+ */
+
+void single_step_once(struct pt_regs *regs)
+{
+	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
+
+	*ss_pc = regs->pc;
+	control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
+	control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
+	__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+	__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
+}
+
 #endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..75255d90aff3 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -212,7 +212,7 @@ void __init ipi_init(void)
 
 		tile.x = cpu_x(cpu);
 		tile.y = cpu_y(cpu);
-		if (hv_get_ipi_pte(tile, 1, &pte) != 0)
+		if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
 			panic("Failed to initialize IPI for cpu %d\n", cpu);
 
 		offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ea2e0ce28380..0d54106be3d6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,6 +30,10 @@
 #include <arch/abi.h>
 #include <arch/interrupts.h>
 
+#define KBT_ONGOING	0  /* Backtrace still ongoing */
+#define KBT_DONE	1  /* Backtrace cleanly completed */
+#define KBT_RUNNING	2  /* Can't run backtrace on a running task */
+#define KBT_LOOP	3  /* Backtrace entered a loop */
 
 /* Is address on the specified kernel stack? */
 static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
 	for (;;) {
 		do {
 			if (!KBacktraceIterator_is_sigreturn(kbt))
-				return 1;
+				return KBT_ONGOING;
 		} while (backtrace_next(&kbt->it));
 
 		if (!KBacktraceIterator_restart(kbt))
-			return 0;
+			return KBT_DONE;
 	}
 }
 
@@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
 	kbt->pgtable = NULL;
 	kbt->verbose = 0;   /* override in caller if desired */
 	kbt->profile = 0;   /* override in caller if desired */
-	kbt->end = 0;
+	kbt->end = KBT_ONGOING;
 	kbt->new_context = 0;
 	if (is_current) {
 		HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
 	if (regs == NULL) {
 		if (is_current || t->state == TASK_RUNNING) {
 			/* Can't do this; we need registers */
-			kbt->end = 1;
+			kbt->end = KBT_RUNNING;
 			return;
 		}
 		pc = get_switch_to_pc();
@@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
 	}
 
 	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
-	kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
 }
 EXPORT_SYMBOL(KBacktraceIterator_init);
 
 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
 {
-	return kbt->end;
+	return kbt->end != KBT_ONGOING;
 }
 EXPORT_SYMBOL(KBacktraceIterator_end);
 
 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
 {
+	VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
 	kbt->new_context = 0;
-	if (!backtrace_next(&kbt->it) &&
-	    !KBacktraceIterator_restart(kbt)) {
-			kbt->end = 1;
-			return;
-		}
-
-	kbt->end = !KBacktraceIterator_next_item_inclusive(kbt);
+	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
+		kbt->end = KBT_DONE;
+		return;
+	}
+	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
+	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
+		/* Trapped in a loop; give up. */
+		kbt->end = KBT_LOOP;
+	}
 }
 EXPORT_SYMBOL(KBacktraceIterator_next);
 
@@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
 			break;
 		}
 	}
+	if (kbt->end == KBT_LOOP)
+		pr_err("Stack dump stopped; next frame identical to this one\n");
 	if (headers)
 		pr_err("Stack dump complete\n");
 }
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index f0f87eab8c39..7e764669a022 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -110,6 +110,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 #define sys_sync_file_range sys_sync_file_range2
 #endif
 
+/* Call the trampolines to manage pt_regs where necessary. */
+#define sys_execve _sys_execve
+#define sys_sigaltstack _sys_sigaltstack
+#define sys_rt_sigreturn _sys_rt_sigreturn
+#define sys_clone _sys_clone
+#ifndef __tilegx__
+#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
+#endif
+
 /*
  * Note that we can't include <linux/unistd.h> here since the header
  * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0f362dc2c57f..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 		address = regs->pc;
 		break;
 	case INT_UNALIGN_DATA:
-#ifndef __tilegx__  /* FIXME: GX: no single-step yet */
+#ifndef __tilegx__  /* Emulated support for single step debugging */
 		if (unaligned_fixup >= 0) {
 			struct single_step_state *state =
 				current_thread_info()->step_state;
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
 	case INT_DOUBLE_FAULT:
 		/*
 		 * For double fault, "reason" is actually passed as
-		 * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so
+		 * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
 		 * we can provide the original fault number rather than
 		 * the uninteresting "INT_DOUBLE_FAULT" so the user can
 		 * learn what actually struck while PL0 ICS was set.