summary refs log tree commit diff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/entry/entry_32.S171
-rw-r--r--arch/x86/entry/entry_64.S15
-rw-r--r--arch/x86/include/asm/bug.h80
-rw-r--r--arch/x86/include/asm/page_64.h16
-rw-r--r--arch/x86/include/asm/unwind.h2
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/dumpstack_32.c12
-rw-r--r--arch/x86/kernel/dumpstack_64.c10
-rw-r--r--arch/x86/kernel/ftrace_32.S244
-rw-r--r--arch/x86/kernel/ftrace_64.S (renamed from arch/x86/kernel/mcount_64.S)6
-rw-r--r--arch/x86/kernel/traps.c46
-rw-r--r--arch/x86/kernel/unwind_frame.c234
-rw-r--r--arch/x86/kernel/unwind_guess.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/lib/clear_page_64.S17
-rw-r--r--arch/x86/um/Makefile2
-rw-r--r--arch/x86/um/bug.c21
19 files changed, 522 insertions, 371 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a05571937ad3..2b899858532a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -126,7 +126,7 @@ config X86
 	select HAVE_EBPF_JIT			if X86_64
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
 	select HAVE_EXIT_THREAD
-	select HAVE_FENTRY			if X86_64
+	select HAVE_FENTRY			if X86_64 || DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 57f7ec35216e..50bc26949e9e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -35,16 +35,13 @@
 #include <asm/errno.h>
 #include <asm/segment.h>
 #include <asm/smp.h>
-#include <asm/page_types.h>
 #include <asm/percpu.h>
 #include <asm/processor-flags.h>
-#include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-#include <asm/export.h>
 #include <asm/frame.h>
 
 	.section .entry.text, "ax"
@@ -585,7 +582,7 @@ ENTRY(iret_exc	)
 	 * will soon execute iret and the tracer was already set to
 	 * the irqstate after the IRET:
 	 */
-	DISABLE_INTERRUPTS(CLBR_EAX)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	lss	(%esp), %esp			/* switch to espfix segment */
 	jmp	.Lrestore_nocheck
 #endif
@@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
 
 #endif /* CONFIG_HYPERV */
 
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(mcount)
-	ret
-END(mcount)
-
-ENTRY(ftrace_caller)
-	pushl	%eax
-	pushl	%ecx
-	pushl	%edx
-	pushl	$0				/* Pass NULL as regs pointer */
-	movl	4*4(%esp), %eax
-	movl	0x4(%ebp), %edx
-	movl	function_trace_op, %ecx
-	subl	$MCOUNT_INSN_SIZE, %eax
-
-.globl ftrace_call
-ftrace_call:
-	call	ftrace_stub
-
-	addl	$4, %esp			/* skip NULL pointer */
-	popl	%edx
-	popl	%ecx
-	popl	%eax
-.Lftrace_ret:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
-	jmp	ftrace_stub
-#endif
-
-/* This is weak to keep gas from relaxing the jumps */
-WEAK(ftrace_stub)
-	ret
-END(ftrace_caller)
-
-ENTRY(ftrace_regs_caller)
-	pushf	/* push flags before compare (in cs location) */
-
-	/*
-	 * i386 does not save SS and ESP when coming from kernel.
-	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
-	 * Unfortunately, that means eflags must be at the same location
-	 * as the current return ip is. We move the return ip into the
-	 * ip location, and move flags into the return ip location.
-	 */
-	pushl	4(%esp)				/* save return ip into ip slot */
-
-	pushl	$0				/* Load 0 into orig_ax */
-	pushl	%gs
-	pushl	%fs
-	pushl	%es
-	pushl	%ds
-	pushl	%eax
-	pushl	%ebp
-	pushl	%edi
-	pushl	%esi
-	pushl	%edx
-	pushl	%ecx
-	pushl	%ebx
-
-	movl	13*4(%esp), %eax		/* Get the saved flags */
-	movl	%eax, 14*4(%esp)		/* Move saved flags into regs->flags location */
-						/* clobbering return ip */
-	movl	$__KERNEL_CS, 13*4(%esp)
-
-	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
-	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
-	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
-	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
-	pushl	%esp				/* Save pt_regs as 4th parameter */
-
-GLOBAL(ftrace_regs_call)
-	call	ftrace_stub
-
-	addl	$4, %esp			/* Skip pt_regs */
-	movl	14*4(%esp), %eax		/* Move flags back into cs */
-	movl	%eax, 13*4(%esp)		/* Needed to keep addl	from modifying flags */
-	movl	12*4(%esp), %eax		/* Get return ip from regs->ip */
-	movl	%eax, 14*4(%esp)		/* Put return ip back for ret */
-
-	popl	%ebx
-	popl	%ecx
-	popl	%edx
-	popl	%esi
-	popl	%edi
-	popl	%ebp
-	popl	%eax
-	popl	%ds
-	popl	%es
-	popl	%fs
-	popl	%gs
-	addl	$8, %esp			/* Skip orig_ax and ip */
-	popf					/* Pop flags at end (no addl to corrupt flags) */
-	jmp	.Lftrace_ret
-
-	popf
-	jmp	ftrace_stub
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(mcount)
-	cmpl	$__PAGE_OFFSET, %esp
-	jb	ftrace_stub			/* Paging not enabled yet? */
-
-	cmpl	$ftrace_stub, ftrace_trace_function
-	jnz	.Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	cmpl	$ftrace_stub, ftrace_graph_return
-	jnz	ftrace_graph_caller
-
-	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
-	jnz	ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
-	ret
-
-	/* taken from glibc */
-.Ltrace:
-	pushl	%eax
-	pushl	%ecx
-	pushl	%edx
-	movl	0xc(%esp), %eax
-	movl	0x4(%ebp), %edx
-	subl	$MCOUNT_INSN_SIZE, %eax
-
-	call	*ftrace_trace_function
-
-	popl	%edx
-	popl	%ecx
-	popl	%eax
-	jmp	ftrace_stub
-END(mcount)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(mcount)
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-	pushl	%eax
-	pushl	%ecx
-	pushl	%edx
-	movl	0xc(%esp), %eax
-	lea	0x4(%ebp), %edx
-	movl	(%ebp), %ecx
-	subl	$MCOUNT_INSN_SIZE, %eax
-	call	prepare_ftrace_return
-	popl	%edx
-	popl	%ecx
-	popl	%eax
-	ret
-END(ftrace_graph_caller)
-
-.globl return_to_handler
-return_to_handler:
-	pushl	%eax
-	pushl	%edx
-	movl	%ebp, %eax
-	call	ftrace_return_to_handler
-	movl	%eax, %ecx
-	popl	%edx
-	popl	%eax
-	jmp	*%ecx
-#endif
-
 #ifdef CONFIG_TRACING
 ENTRY(trace_page_fault)
 	ASM_CLAC
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 044d18ebc43c..d2b2a2948ffe 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
 	 * If we see that no exit work is required (which we are required
 	 * to check with IRQs off), then we can go straight to SYSRET64.
 	 */
-	DISABLE_INTERRUPTS(CLBR_NONE)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
 	movq	PER_CPU_VAR(current_task), %r11
 	testl	$_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
 	 * raise(3) will trigger this, for example.  IRQs are off.
 	 */
 	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
+	ENABLE_INTERRUPTS(CLBR_ANY)
 	SAVE_EXTRA_REGS
 	movq	%rsp, %rdi
 	call	syscall_return_slowpath	/* returns with IRQs disabled */
@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
 	 * Called from fast path -- disable IRQs again, pop return address
 	 * and jump to slow path
 	 */
-	DISABLE_INTERRUPTS(CLBR_NONE)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
 	popq	%rax
 	jmp	entry_SYSCALL64_slow_path
@@ -518,7 +518,7 @@ common_interrupt:
 	interrupt do_IRQ
 	/* 0(%rsp): old RSP */
 ret_from_intr:
-	DISABLE_INTERRUPTS(CLBR_NONE)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
 	decl	PER_CPU_VAR(irq_count)
 
@@ -1051,7 +1051,7 @@ END(paranoid_entry)
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
 ENTRY(paranoid_exit)
-	DISABLE_INTERRUPTS(CLBR_NONE)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF_DEBUG
 	testl	%ebx, %ebx			/* swapgs needed? */
 	jnz	paranoid_exit_no_swapgs
@@ -1156,10 +1156,9 @@ END(error_entry)
  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
  */
 ENTRY(error_exit)
-	movl	%ebx, %eax
-	DISABLE_INTERRUPTS(CLBR_NONE)
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
-	testl	%eax, %eax
+	testl	%ebx, %ebx
 	jnz	retint_kernel
 	jmp	retint_user
 END(error_exit)
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index ba38ebbaced3..39e702d90cdb 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -1,36 +1,82 @@
 #ifndef _ASM_X86_BUG_H
 #define _ASM_X86_BUG_H
 
-#define HAVE_ARCH_BUG
+#include <linux/stringify.h>
 
-#ifdef CONFIG_DEBUG_BUGVERBOSE
+/*
+ * Since some emulators terminate on UD2, we cannot use it for WARN.
+ * Since various instruction decoders disagree on the length of UD1,
+ * we cannot use it either. So use UD0 for WARN.
+ *
+ * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
+ *  our kernel decoder thinks it takes a ModRM byte, which seems consistent
+ *  with various things like the Intel SDM instruction encoding rules)
+ */
+
+#define ASM_UD0		".byte 0x0f, 0xff"
+#define ASM_UD1		".byte 0x0f, 0xb9" /* + ModRM */
+#define ASM_UD2		".byte 0x0f, 0x0b"
+
+#define INSN_UD0	0xff0f
+#define INSN_UD2	0x0b0f
+
+#define LEN_UD0		2
+
+#ifdef CONFIG_GENERIC_BUG
 
 #ifdef CONFIG_X86_32
-# define __BUG_C0	"2:\t.long 1b, %c0\n"
+# define __BUG_REL(val)	".long " __stringify(val)
 #else
-# define __BUG_C0	"2:\t.long 1b - 2b, %c0 - 2b\n"
+# define __BUG_REL(val)	".long " __stringify(val) " - 2b"
 #endif
 
-#define BUG()							\
-do {								\
-	asm volatile("1:\tud2\n"				\
-		     ".pushsection __bug_table,\"a\"\n"		\
-		     __BUG_C0					\
-		     "\t.word %c1, 0\n"				\
-		     "\t.org 2b+%c2\n"				\
-		     ".popsection"				\
-		     : : "i" (__FILE__), "i" (__LINE__),	\
-		     "i" (sizeof(struct bug_entry)));		\
-	unreachable();						\
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_FLAGS(ins, flags)						\
+do {									\
+	asm volatile("1:\t" ins "\n"					\
+		     ".pushsection __bug_table,\"a\"\n"			\
+		     "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"	\
+		     "\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"	\
+		     "\t.word %c1"        "\t# bug_entry::line\n"	\
+		     "\t.word %c2"        "\t# bug_entry::flags\n"	\
+		     "\t.org 2b+%c3\n"					\
+		     ".popsection"					\
+		     : : "i" (__FILE__), "i" (__LINE__),		\
+			 "i" (flags),					\
+			 "i" (sizeof(struct bug_entry)));		\
 } while (0)
 
+#else /* !CONFIG_DEBUG_BUGVERBOSE */
+
+#define _BUG_FLAGS(ins, flags)						\
+do {									\
+	asm volatile("1:\t" ins "\n"					\
+		     ".pushsection __bug_table,\"a\"\n"			\
+		     "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"	\
+		     "\t.word %c0"        "\t# bug_entry::flags\n"	\
+		     "\t.org 2b+%c1\n"					\
+		     ".popsection"					\
+		     : : "i" (flags),					\
+			 "i" (sizeof(struct bug_entry)));		\
+} while (0)
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
 #else
+
+#define _BUG_FLAGS(ins, flags)  asm volatile(ins)
+
+#endif /* CONFIG_GENERIC_BUG */
+
+#define HAVE_ARCH_BUG
 #define BUG()							\
 do {								\
-	asm volatile("ud2");					\
+	_BUG_FLAGS(ASM_UD2, 0);					\
 	unreachable();						\
 } while (0)
-#endif
+
+#define __WARN_FLAGS(flags)	_BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
 
 #include <asm-generic/bug.h>
 
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index b3bebf9e5746..b4a0d43248cf 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -4,6 +4,7 @@
 #include <asm/page_64_types.h>
 
 #ifndef __ASSEMBLY__
+#include <asm/alternative.h>
 
 /* duplicated to the one in bootmem.h */
 extern unsigned long max_pfn;
@@ -34,7 +35,20 @@ extern unsigned long __phys_addr_symbol(unsigned long);
 #define pfn_valid(pfn)          ((pfn) < max_pfn)
 #endif
 
-void clear_page(void *page);
+void clear_page_orig(void *page);
+void clear_page_rep(void *page);
+void clear_page_erms(void *page);
+
+static inline void clear_page(void *page)
+{
+	alternative_call_2(clear_page_orig,
+			   clear_page_rep, X86_FEATURE_REP_GOOD,
+			   clear_page_erms, X86_FEATURE_ERMS,
+			   "=D" (page),
+			   "0" (page)
+			   : "memory", "rax", "rcx");
+}
+
 void copy_page(void *to, void *from);
 
 #endif	/* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 6fa75b17aec3..9b10dcd51716 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -12,8 +12,10 @@ struct unwind_state {
 	struct task_struct *task;
 	int graph_idx;
 #ifdef CONFIG_FRAME_POINTER
+	bool got_irq;
 	unsigned long *bp, *orig_sp;
 	struct pt_regs *regs;
+	unsigned long ip;
 #else
 	unsigned long *sp;
 #endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 84c00592d359..4b994232cb57 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -27,7 +27,7 @@ KASAN_SANITIZE_stacktrace.o := n
 
 OBJECT_FILES_NON_STANDARD_head_$(BITS).o		:= y
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o	:= y
-OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o		:= y
+OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o		:= y
 OBJECT_FILES_NON_STANDARD_test_nx.o			:= y
 
 # If instrumentation of this dir is enabled, boot hangs during first second.
@@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL)	+= ldt.o
 obj-y			+= setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y			+= probe_roms.o
-obj-$(CONFIG_X86_64)	+= sys_x86_64.o mcount_64.o
+obj-$(CONFIG_X86_64)	+= sys_x86_64.o
 obj-$(CONFIG_X86_ESPFIX64)	+= espfix_64.o
 obj-$(CONFIG_SYSFS)	+= ksysfs.o
 obj-y			+= bootflag.o e820.o
@@ -82,6 +82,7 @@ obj-y				+= apic/
 obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_LIVEPATCH)	+= livepatch.o
+obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace_$(BITS).o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
 obj-$(CONFIG_X86_TSC)		+= trace_clock.o
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 09d4ac0d2661..dbce3cca94cb 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -77,7 +77,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 	 * - softirq stack
 	 * - hardirq stack
 	 */
-	for (regs = NULL; stack; stack = stack_info.next_sp) {
+	for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
 		const char *stack_name;
 
 		/*
@@ -289,9 +289,6 @@ void die(const char *str, struct pt_regs *regs, long err)
 	unsigned long flags = oops_begin();
 	int sig = SIGSEGV;
 
-	if (!user_mode(regs))
-		report_bug(regs->ip, regs);
-
 	if (__die(str, regs, err))
 		sig = 0;
 	oops_end(flags, regs, sig);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index b0b3a3df7c20..e5f0b40e66d2 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -162,15 +162,3 @@ void show_regs(struct pt_regs *regs)
 	}
 	pr_cont("\n");
 }
-
-int is_valid_bugaddr(unsigned long ip)
-{
-	unsigned short ud2;
-
-	if (ip < PAGE_OFFSET)
-		return 0;
-	if (probe_kernel_address((unsigned short *)ip, ud2))
-		return 0;
-
-	return ud2 == 0x0b0f;
-}
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index a8b117e93b46..3e1471d57487 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -178,13 +178,3 @@ void show_regs(struct pt_regs *regs)
 	}
 	pr_cont("\n");
 }
-
-int is_valid_bugaddr(unsigned long ip)
-{
-	unsigned short ud2;
-
-	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
-		return 0;
-
-	return ud2 == 0x0b0f;
-}
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
new file mode 100644
index 000000000000..722a145b4139
--- /dev/null
+++ b/arch/x86/kernel/ftrace_32.S
@@ -0,0 +1,244 @@
+/*
+ *  Copyright (C) 2017  Steven Rostedt, VMware Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+#include <asm/segment.h>
+#include <asm/export.h>
+#include <asm/ftrace.h>
+
+#ifdef CC_USING_FENTRY
+# define function_hook	__fentry__
+EXPORT_SYMBOL(__fentry__)
+#else
+# define function_hook	mcount
+EXPORT_SYMBOL(mcount)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
+#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
+# define USING_FRAME_POINTER
+#endif
+
+#ifdef USING_FRAME_POINTER
+# define MCOUNT_FRAME			1	/* using frame = true  */
+#else
+# define MCOUNT_FRAME			0	/* using frame = false */
+#endif
+
+ENTRY(function_hook)
+	ret
+END(function_hook)
+
+ENTRY(ftrace_caller)
+
+#ifdef USING_FRAME_POINTER
+# ifdef CC_USING_FENTRY
+	/*
+	 * Frame pointers are of ip followed by bp.
+	 * Since fentry is an immediate jump, we are left with
+	 * parent-ip, function-ip. We need to add a frame with
+	 * parent-ip followed by ebp.
+	 */
+	pushl	4(%esp)				/* parent ip */
+	pushl	%ebp
+	movl	%esp, %ebp
+	pushl	2*4(%esp)			/* function ip */
+# endif
+	/* For mcount, the function ip is directly above */
+	pushl	%ebp
+	movl	%esp, %ebp
+#endif
+	pushl	%eax
+	pushl	%ecx
+	pushl	%edx
+	pushl	$0				/* Pass NULL as regs pointer */
+
+#ifdef USING_FRAME_POINTER
+	/* Load parent ebp into edx */
+	movl	4*4(%esp), %edx
+#else
+	/* There's no frame pointer, load the appropriate stack addr instead */
+	lea	4*4(%esp), %edx
+#endif
+
+	movl	(MCOUNT_FRAME+4)*4(%esp), %eax	/* load the rip */
+	/* Get the parent ip */
+	movl	4(%edx), %edx			/* edx has ebp */
+
+	movl	function_trace_op, %ecx
+	subl	$MCOUNT_INSN_SIZE, %eax
+
+.globl ftrace_call
+ftrace_call:
+	call	ftrace_stub
+
+	addl	$4, %esp			/* skip NULL pointer */
+	popl	%edx
+	popl	%ecx
+	popl	%eax
+#ifdef USING_FRAME_POINTER
+	popl	%ebp
+# ifdef CC_USING_FENTRY
+	addl	$4,%esp				/* skip function ip */
+	popl	%ebp				/* this is the orig bp */
+	addl	$4, %esp			/* skip parent ip */
+# endif
+#endif
+.Lftrace_ret:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+	jmp	ftrace_stub
+#endif
+
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
+	ret
+END(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+	/*
+	 * i386 does not save SS and ESP when coming from kernel.
+	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
+	 * Unfortunately, that means eflags must be at the same location
+	 * as the current return ip is. We move the return ip into the
+	 * regs->ip location, and move flags into the return ip location.
+	 */
+	pushl	$__KERNEL_CS
+	pushl	4(%esp)				/* Save the return ip */
+	pushl	$0				/* Load 0 into orig_ax */
+	pushl	%gs
+	pushl	%fs
+	pushl	%es
+	pushl	%ds
+	pushl	%eax
+
+	/* Get flags and place them into the return ip slot */
+	pushf
+	popl	%eax
+	movl	%eax, 8*4(%esp)
+
+	pushl	%ebp
+	pushl	%edi
+	pushl	%esi
+	pushl	%edx
+	pushl	%ecx
+	pushl	%ebx
+
+	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
+	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
+#ifdef CC_USING_FENTRY
+	movl	15*4(%esp), %edx		/* Load parent ip (2nd parameter) */
+#else
+	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
+#endif
+	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
+	pushl	%esp				/* Save pt_regs as 4th parameter */
+
+GLOBAL(ftrace_regs_call)
+	call	ftrace_stub
+
+	addl	$4, %esp			/* Skip pt_regs */
+
+	/* restore flags */
+	push	14*4(%esp)
+	popf
+
+	/* Move return ip back to its original location */
+	movl	12*4(%esp), %eax
+	movl	%eax, 14*4(%esp)
+
+	popl	%ebx
+	popl	%ecx
+	popl	%edx
+	popl	%esi
+	popl	%edi
+	popl	%ebp
+	popl	%eax
+	popl	%ds
+	popl	%es
+	popl	%fs
+	popl	%gs
+
+	/* use lea to not affect flags */
+	lea	3*4(%esp), %esp			/* Skip orig_ax, ip and cs */
+
+	jmp	.Lftrace_ret
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(function_hook)
+	cmpl	$__PAGE_OFFSET, %esp
+	jb	ftrace_stub			/* Paging not enabled yet? */
+
+	cmpl	$ftrace_stub, ftrace_trace_function
+	jnz	.Ltrace
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	cmpl	$ftrace_stub, ftrace_graph_return
+	jnz	ftrace_graph_caller
+
+	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
+	jnz	ftrace_graph_caller
+#endif
+.globl ftrace_stub
+ftrace_stub:
+	ret
+
+	/* taken from glibc */
+.Ltrace:
+	pushl	%eax
+	pushl	%ecx
+	pushl	%edx
+	movl	0xc(%esp), %eax
+	movl	0x4(%ebp), %edx
+	subl	$MCOUNT_INSN_SIZE, %eax
+
+	call	*ftrace_trace_function
+
+	popl	%edx
+	popl	%ecx
+	popl	%eax
+	jmp	ftrace_stub
+END(function_hook)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+	pushl	%eax
+	pushl	%ecx
+	pushl	%edx
+	movl	3*4(%esp), %eax
+	/* Even with frame pointers, fentry doesn't have one here */
+#ifdef CC_USING_FENTRY
+	lea	4*4(%esp), %edx
+	movl	$0, %ecx
+#else
+	lea	0x4(%ebp), %edx
+	movl	(%ebp), %ecx
+#endif
+	subl	$MCOUNT_INSN_SIZE, %eax
+	call	prepare_ftrace_return
+	popl	%edx
+	popl	%ecx
+	popl	%eax
+	ret
+END(ftrace_graph_caller)
+
+.globl return_to_handler
+return_to_handler:
+	pushl	%eax
+	pushl	%edx
+#ifdef CC_USING_FENTRY
+	movl	$0, %eax
+#else
+	movl	%ebp, %eax
+#endif
+	call	ftrace_return_to_handler
+	movl	%eax, %ecx
+	popl	%edx
+	popl	%eax
+	jmp	*%ecx
+#endif
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/ftrace_64.S
index 7b0d3da52fb4..1dfac634bbf7 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/x86_64/mcount_64.S
- *
  *  Copyright (C) 2014  Steven Rostedt, Red Hat Inc
  */
 
@@ -13,9 +11,6 @@
 	.code64
 	.section .entry.text, "ax"
 
-
-#ifdef CONFIG_FUNCTION_TRACER
-
 #ifdef CC_USING_FENTRY
 # define function_hook	__fentry__
 EXPORT_SYMBOL(__fentry__)
@@ -297,7 +292,6 @@ trace:
 	jmp fgraph_trace
 END(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4e496379a871..3995d3a777d4 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -169,6 +169,37 @@ void ist_end_non_atomic(void)
 	preempt_disable();
 }
 
+int is_valid_bugaddr(unsigned long addr)
+{
+	unsigned short ud;
+
+	if (addr < TASK_SIZE_MAX)
+		return 0;
+
+	if (probe_kernel_address((unsigned short *)addr, ud))
+		return 0;
+
+	return ud == INSN_UD0 || ud == INSN_UD2;
+}
+
+static int fixup_bug(struct pt_regs *regs, int trapnr)
+{
+	if (trapnr != X86_TRAP_UD)
+		return 0;
+
+	switch (report_bug(regs->ip, regs)) {
+	case BUG_TRAP_TYPE_NONE:
+	case BUG_TRAP_TYPE_BUG:
+		break;
+
+	case BUG_TRAP_TYPE_WARN:
+		regs->ip += LEN_UD0;
+		return 1;
+	}
+
+	return 0;
+}
+
 static nokprobe_inline int
 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 		  struct pt_regs *regs,	long error_code)
@@ -187,12 +218,15 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 	}
 
 	if (!user_mode(regs)) {
-		if (!fixup_exception(regs, trapnr)) {
-			tsk->thread.error_code = error_code;
-			tsk->thread.trap_nr = trapnr;
-			die(str, regs, error_code);
-		}
-		return 0;
+		if (fixup_exception(regs, trapnr))
+			return 0;
+
+		if (fixup_bug(regs, trapnr))
+			return 0;
+
+		tsk->thread.error_code = error_code;
+		tsk->thread.trap_nr = trapnr;
+		die(str, regs, error_code);
 	}
 
 	return -1;
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 08339262b666..fec70fe3b1ec 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -1,6 +1,8 @@
 #include <linux/sched.h>
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <asm/sections.h>
 #include <asm/ptrace.h>
 #include <asm/bitops.h>
 #include <asm/stacktrace.h>
@@ -23,53 +25,53 @@
 	val;						\
 })
 
-static void unwind_dump(struct unwind_state *state, unsigned long *sp)
+static void unwind_dump(struct unwind_state *state)
 {
 	static bool dumped_before = false;
 	bool prev_zero, zero = false;
-	unsigned long word;
+	unsigned long word, *sp;
+	struct stack_info stack_info = {0};
+	unsigned long visit_mask = 0;
 
 	if (dumped_before)
 		return;
 
 	dumped_before = true;
 
-	printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n",
+	printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
 			state->stack_info.type, state->stack_info.next_sp,
 			state->stack_mask, state->graph_idx);
 
-	for (sp = state->orig_sp; sp < state->stack_info.end; sp++) {
-		word = READ_ONCE_NOCHECK(*sp);
+	for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+		if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
+			break;
 
-		prev_zero = zero;
-		zero = word == 0;
+		for (; sp < stack_info.end; sp++) {
 
-		if (zero) {
-			if (!prev_zero)
-				printk_deferred("%p: %016x ...\n", sp, 0);
-			continue;
-		}
+			word = READ_ONCE_NOCHECK(*sp);
+
+			prev_zero = zero;
+			zero = word == 0;
+
+			if (zero) {
+				if (!prev_zero)
+					printk_deferred("%p: %0*x ...\n",
+							sp, BITS_PER_LONG/4, 0);
+				continue;
+			}
 
-		printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word);
+			printk_deferred("%p: %0*lx (%pB)\n",
+					sp, BITS_PER_LONG/4, word, (void *)word);
+		}
 	}
 }
 
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
-	unsigned long addr;
-	unsigned long *addr_p = unwind_get_return_address_ptr(state);
-
 	if (unwind_done(state))
 		return 0;
 
-	if (state->regs && user_mode(state->regs))
-		return 0;
-
-	addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
-	addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
-				     addr_p);
-
-	return __kernel_text_address(addr) ? addr : 0;
+	return __kernel_text_address(state->ip) ? state->ip : 0;
 }
 EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
@@ -82,16 +84,41 @@ static size_t regs_size(struct pt_regs *regs)
 	return sizeof(*regs);
 }
 
+static bool in_entry_code(unsigned long ip)
+{
+	char *addr = (char *)ip;
+
+	if (addr >= __entry_text_start && addr < __entry_text_end)
+		return true;
+
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+	if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
+		return true;
+#endif
+
+	return false;
+}
+
+static inline unsigned long *last_frame(struct unwind_state *state)
+{
+	return (unsigned long *)task_pt_regs(state->task) - 2;
+}
+
 #ifdef CONFIG_X86_32
 #define GCC_REALIGN_WORDS 3
 #else
 #define GCC_REALIGN_WORDS 1
 #endif
 
+static inline unsigned long *last_aligned_frame(struct unwind_state *state)
+{
+	return last_frame(state) - GCC_REALIGN_WORDS;
+}
+
 static bool is_last_task_frame(struct unwind_state *state)
 {
-	unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
-	unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
+	unsigned long *last_bp = last_frame(state);
+	unsigned long *aligned_bp = last_aligned_frame(state);
 
 	/*
 	 * We have to check for the last task frame at two different locations
@@ -135,26 +162,70 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
 	return (struct pt_regs *)(regs & ~0x1);
 }
 
-static bool update_stack_state(struct unwind_state *state, void *addr,
-			       size_t len)
+static bool update_stack_state(struct unwind_state *state,
+			       unsigned long *next_bp)
 {
 	struct stack_info *info = &state->stack_info;
-	enum stack_type orig_type = info->type;
+	enum stack_type prev_type = info->type;
+	struct pt_regs *regs;
+	unsigned long *frame, *prev_frame_end, *addr_p, addr;
+	size_t len;
+
+	if (state->regs)
+		prev_frame_end = (void *)state->regs + regs_size(state->regs);
+	else
+		prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE;
+
+	/* Is the next frame pointer an encoded pointer to pt_regs? */
+	regs = decode_frame_pointer(next_bp);
+	if (regs) {
+		frame = (unsigned long *)regs;
+		len = regs_size(regs);
+		state->got_irq = true;
+	} else {
+		frame = next_bp;
+		len = FRAME_HEADER_SIZE;
+	}
 
 	/*
-	 * If addr isn't on the current stack, switch to the next one.
+	 * If the next bp isn't on the current stack, switch to the next one.
 	 *
 	 * We may have to traverse multiple stacks to deal with the possibility
-	 * that 'info->next_sp' could point to an empty stack and 'addr' could
-	 * be on a subsequent stack.
+	 * that info->next_sp could point to an empty stack and the next bp
+	 * could be on a subsequent stack.
 	 */
-	while (!on_stack(info, addr, len))
+	while (!on_stack(info, frame, len))
 		if (get_stack_info(info->next_sp, state->task, info,
 				   &state->stack_mask))
 			return false;
 
-	if (!state->orig_sp || info->type != orig_type)
-		state->orig_sp = addr;
+	/* Make sure it only unwinds up and doesn't overlap the prev frame: */
+	if (state->orig_sp && state->stack_info.type == prev_type &&
+	    frame < prev_frame_end)
+		return false;
+
+	/* Move state to the next frame: */
+	if (regs) {
+		state->regs = regs;
+		state->bp = NULL;
+	} else {
+		state->bp = next_bp;
+		state->regs = NULL;
+	}
+
+	/* Save the return address: */
+	if (state->regs && user_mode(state->regs))
+		state->ip = 0;
+	else {
+		addr_p = unwind_get_return_address_ptr(state);
+		addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+		state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+						  addr, addr_p);
+	}
+
+	/* Save the original stack pointer for unwind_dump(): */
+	if (!state->orig_sp)
+		state->orig_sp = frame;
 
 	return true;
 }
@@ -162,14 +233,12 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
 bool unwind_next_frame(struct unwind_state *state)
 {
 	struct pt_regs *regs;
-	unsigned long *next_bp, *next_frame;
-	size_t next_len;
-	enum stack_type prev_type = state->stack_info.type;
+	unsigned long *next_bp;
 
 	if (unwind_done(state))
 		return false;
 
-	/* have we reached the end? */
+	/* Have we reached the end? */
 	if (state->regs && user_mode(state->regs))
 		goto the_end;
 
@@ -197,54 +266,19 @@ bool unwind_next_frame(struct unwind_state *state)
 		 */
 		state->regs = regs;
 		state->bp = NULL;
+		state->ip = 0;
 		return true;
 	}
 
-	/* get the next frame pointer */
+	/* Get the next frame pointer: */
 	if (state->regs)
 		next_bp = (unsigned long *)state->regs->bp;
 	else
-		next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
-
-	/* is the next frame pointer an encoded pointer to pt_regs? */
-	regs = decode_frame_pointer(next_bp);
-	if (regs) {
-		next_frame = (unsigned long *)regs;
-		next_len = sizeof(*regs);
-	} else {
-		next_frame = next_bp;
-		next_len = FRAME_HEADER_SIZE;
-	}
-
-	/* make sure the next frame's data is accessible */
-	if (!update_stack_state(state, next_frame, next_len)) {
-		/*
-		 * Don't warn on bad regs->bp.  An interrupt in entry code
-		 * might cause a false positive warning.
-		 */
-		if (state->regs)
-			goto the_end;
+		next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
 
+	/* Move to the next frame if it's safe: */
+	if (!update_stack_state(state, next_bp))
 		goto bad_address;
-	}
-
-	/* Make sure it only unwinds up and doesn't overlap the last frame: */
-	if (state->stack_info.type == prev_type) {
-		if (state->regs && (void *)next_frame < (void *)state->regs + regs_size(state->regs))
-			goto bad_address;
-
-		if (state->bp && (void *)next_frame < (void *)state->bp + FRAME_HEADER_SIZE)
-			goto bad_address;
-	}
-
-	/* move to the next frame */
-	if (regs) {
-		state->regs = regs;
-		state->bp = NULL;
-	} else {
-		state->bp = next_bp;
-		state->regs = NULL;
-	}
 
 	return true;
 
@@ -259,18 +293,29 @@ bad_address:
 	if (state->task != current)
 		goto the_end;
 
+	/*
+	 * Don't warn if the unwinder got lost due to an interrupt in entry
+	 * code or in the C handler before the first frame pointer got set up:
+	 */
+	if (state->got_irq && in_entry_code(state->ip))
+		goto the_end;
+	if (state->regs &&
+	    state->regs->sp >= (unsigned long)last_aligned_frame(state) &&
+	    state->regs->sp < (unsigned long)task_pt_regs(state->task))
+		goto the_end;
+
 	if (state->regs) {
 		printk_deferred_once(KERN_WARNING
 			"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
 			state->regs, state->task->comm,
-			state->task->pid, next_frame);
-		unwind_dump(state, (unsigned long *)state->regs);
+			state->task->pid, next_bp);
+		unwind_dump(state);
 	} else {
 		printk_deferred_once(KERN_WARNING
 			"WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
 			state->bp, state->task->comm,
-			state->task->pid, next_frame);
-		unwind_dump(state, state->bp);
+			state->task->pid, next_bp);
+		unwind_dump(state);
 	}
 the_end:
 	state->stack_info.type = STACK_TYPE_UNKNOWN;
@@ -281,35 +326,24 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
 void __unwind_start(struct unwind_state *state, struct task_struct *task,
 		    struct pt_regs *regs, unsigned long *first_frame)
 {
-	unsigned long *bp, *frame;
-	size_t len;
+	unsigned long *bp;
 
 	memset(state, 0, sizeof(*state));
 	state->task = task;
+	state->got_irq = (regs);
 
-	/* don't even attempt to start from user mode regs */
+	/* Don't even attempt to start from user mode regs: */
 	if (regs && user_mode(regs)) {
 		state->stack_info.type = STACK_TYPE_UNKNOWN;
 		return;
 	}
 
-	/* set up the starting stack frame */
 	bp = get_frame_pointer(task, regs);
-	regs = decode_frame_pointer(bp);
-	if (regs) {
-		state->regs = regs;
-		frame = (unsigned long *)regs;
-		len = sizeof(*regs);
-	} else {
-		state->bp = bp;
-		frame = bp;
-		len = FRAME_HEADER_SIZE;
-	}
 
-	/* initialize stack info and make sure the frame data is accessible */
-	get_stack_info(frame, state->task, &state->stack_info,
+	/* Initialize stack info and make sure the frame data is accessible: */
+	get_stack_info(bp, state->task, &state->stack_info,
 		       &state->stack_mask);
-	update_stack_state(state, frame, len);
+	update_stack_state(state, bp);
 
 	/*
 	 * The caller can provide the address of the first frame directly
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 22881ddcbb9f..039f36738e49 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -34,7 +34,7 @@ bool unwind_next_frame(struct unwind_state *state)
 				return true;
 		}
 
-		state->sp = info->next_sp;
+		state->sp = PTR_ALIGN(info->next_sp, sizeof(long));
 
 	} while (!get_stack_info(state->sp, state->task, info,
 				 &state->stack_mask));
@@ -49,7 +49,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
 	memset(state, 0, sizeof(*state));
 
 	state->task = task;
-	state->sp   = first_frame;
+	state->sp   = PTR_ALIGN(first_frame, sizeof(long));
 
 	get_stack_info(first_frame, state->task, &state->stack_info,
 		       &state->stack_mask);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c74ae9ce8dc4..c8a3b61be0aa 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -146,6 +146,7 @@ SECTIONS
 		_edata = .;
 	} :data
 
+	BUG_TABLE
 
 	. = ALIGN(PAGE_SIZE);
 	__vvar_page = .;
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 5e2af3a88cf5..81b1635d67de 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -14,20 +14,15 @@
  * Zero a page.
  * %rdi	- page
  */
-ENTRY(clear_page)
-
-	ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
-		      "jmp clear_page_c_e", X86_FEATURE_ERMS
-
+ENTRY(clear_page_rep)
 	movl $4096/8,%ecx
 	xorl %eax,%eax
 	rep stosq
 	ret
-ENDPROC(clear_page)
-EXPORT_SYMBOL(clear_page)
+ENDPROC(clear_page_rep)
+EXPORT_SYMBOL_GPL(clear_page_rep)
 
 ENTRY(clear_page_orig)
-
 	xorl   %eax,%eax
 	movl   $4096/64,%ecx
 	.p2align 4
@@ -47,10 +42,12 @@ ENTRY(clear_page_orig)
 	nop
 	ret
 ENDPROC(clear_page_orig)
+EXPORT_SYMBOL_GPL(clear_page_orig)
 
-ENTRY(clear_page_c_e)
+ENTRY(clear_page_erms)
 	movl $4096,%ecx
 	xorl %eax,%eax
 	rep stosb
 	ret
-ENDPROC(clear_page_c_e)
+ENDPROC(clear_page_erms)
+EXPORT_SYMBOL_GPL(clear_page_erms)
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 69f0827d5f53..46cbbfe03285 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -8,7 +8,7 @@ else
 	BITS := 64
 endif
 
-obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
+obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
 	ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
 	stub_$(BITS).o stub_segv.o \
 	sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
diff --git a/arch/x86/um/bug.c b/arch/x86/um/bug.c
deleted file mode 100644
index e8034e363d83..000000000000
--- a/arch/x86/um/bug.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL V2
- */
-
-#include <linux/uaccess.h>
-
-/*
- * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevant in skas mode.
- */
-
-int is_valid_bugaddr(unsigned long eip)
-{
-	unsigned short ud2;
-
-	if (probe_kernel_address((unsigned short __user *)eip, ud2))
-		return 0;
-
-	return ud2 == 0x0b0f;
-}