summary refs log tree commit diff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 11:28:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 11:28:39 -0800
commit078a5a4faf64fefaf13478a9091782432cad33fa (patch)
tree35dba51671a912a07bd0b02c64921920da280cf0 /arch
parent1205b62390eed4e747232d183fbf412a5aecacd9 (diff)
parent7e0b44e870cf265bb4a73fd25e0508c7363fcbd6 (diff)
downloadlinux-078a5a4faf64fefaf13478a9091782432cad33fa.tar.gz
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
 "I'm safely chained back up to my desk, so please pull these arm64
  fixes for -rc1 that address some issues that cropped up during the
  merge window:

   - Prevent KASLR from mapping the top page of the virtual address
     space

   - Fix device-tree probing of SDEI driver

   - Fix incorrect register offset definition in Hisilicon DDRC PMU
     driver

   - Fix compilation issue with older binutils not liking unsigned
     immediates

   - Fix uapi headers so that libc can provide its own sigcontext
     definition

   - Fix handling of private compat syscalls

   - Hook up compat io_pgetevents() syscall for 32-bit tasks

   - Cleanup to arm64 Makefile (including now to avoid silly conflicts)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: compat: Hook up io_pgetevents() for 32-bit tasks
  arm64: compat: Don't pull syscall number from regs in arm_compat_syscall
  arm64: compat: Avoid sending SIGILL for unallocated syscall numbers
  arm64/sve: Disentangle <uapi/asm/ptrace.h> from <uapi/asm/sigcontext.h>
  arm64/sve: ptrace: Fix SVE_PT_REGS_OFFSET definition
  drivers/perf: hisi: Fixup one DDRC PMU register offset
  arm64: replace arm64-obj-* in Makefile with obj-*
  arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
  firmware: arm_sdei: Fix DT platform device creation
  firmware: arm_sdei: fix wrong of_node_put() in init function
  arm64: entry: remove unused register aliases
  arm64: smp: Fix compilation error
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/smp.h8
-rw-r--r--arch/arm64/include/asm/unistd.h7
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h39
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h56
-rw-r--r--arch/arm64/include/uapi/asm/sve_context.h53
-rw-r--r--arch/arm64/kernel/Makefile61
-rw-r--r--arch/arm64/kernel/entry.S12
-rw-r--r--arch/arm64/kernel/sys_compat.c11
-rw-r--r--arch/arm64/kernel/syscall.c9
-rw-r--r--arch/arm64/mm/init.c2
11 files changed, 151 insertions, 109 deletions
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 1895561839a9..18553f399e08 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -16,9 +16,11 @@
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+#include <linux/const.h>
+
 /* Values for secondary_data.status */
 #define CPU_STUCK_REASON_SHIFT		(8)
-#define CPU_BOOT_STATUS_MASK		((1U << CPU_STUCK_REASON_SHIFT) - 1)
+#define CPU_BOOT_STATUS_MASK		((UL(1) << CPU_STUCK_REASON_SHIFT) - 1)
 
 #define CPU_MMU_OFF			(-1)
 #define CPU_BOOT_SUCCESS		(0)
@@ -29,8 +31,8 @@
 /* Fatal system error detected by secondary CPU, crash the system */
 #define CPU_PANIC_KERNEL		(3)
 
-#define CPU_STUCK_REASON_52_BIT_VA	(1U << CPU_STUCK_REASON_SHIFT)
-#define CPU_STUCK_REASON_NO_GRAN	(2U << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_52_BIT_VA	(UL(1) << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_NO_GRAN	(UL(2) << CPU_STUCK_REASON_SHIFT)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b13ca091f833..a7b1fc58ffdf 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -40,10 +40,11 @@
  * The following SVCs are ARM private.
  */
 #define __ARM_NR_COMPAT_BASE		0x0f0000
-#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE + 2)
+#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE + 5)
+#define __ARM_NR_COMPAT_END		(__ARM_NR_COMPAT_BASE + 0x800)
 
-#define __NR_compat_syscalls		399
+#define __NR_compat_syscalls		400
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 2cd6dcf8d246..04ee190b90fe 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -819,6 +819,8 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
 __SYSCALL(__NR_statx, sys_statx)
 #define __NR_rseq 398
 __SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_io_pgetevents 399
+__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
 
 /*
  * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index c2f249bcd829..28d77c9ed531 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -23,7 +23,7 @@
 #include <linux/types.h>
 
 #include <asm/hwcap.h>
-#include <asm/sigcontext.h>
+#include <asm/sve_context.h>
 
 
 /*
@@ -130,9 +130,9 @@ struct user_sve_header {
  */
 
 /* Offset from the start of struct user_sve_header to the register data */
-#define SVE_PT_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_REGS_OFFSET						\
+	((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
 /*
  * The register data content and layout depends on the value of the
@@ -178,39 +178,36 @@ struct user_sve_header {
  * Additional data might be appended in the future.
  */
 
-#define SVE_PT_SVE_ZREG_SIZE(vq)	SVE_SIG_ZREG_SIZE(vq)
-#define SVE_PT_SVE_PREG_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
-#define SVE_PT_SVE_FFR_SIZE(vq)		SVE_SIG_FFR_SIZE(vq)
+#define SVE_PT_SVE_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
+#define SVE_PT_SVE_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
+#define SVE_PT_SVE_FFR_SIZE(vq)		__SVE_FFR_SIZE(vq)
 #define SVE_PT_SVE_FPSR_SIZE		sizeof(__u32)
 #define SVE_PT_SVE_FPCR_SIZE		sizeof(__u32)
 
-#define __SVE_SIG_TO_PT(offset) \
-	((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
-
 #define SVE_PT_SVE_OFFSET		SVE_PT_REGS_OFFSET
 
 #define SVE_PT_SVE_ZREGS_OFFSET \
-	__SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
+	(SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET)
 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
-	__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
+	(SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
 #define SVE_PT_SVE_ZREGS_SIZE(vq) \
-	(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
+	(SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
 
 #define SVE_PT_SVE_PREGS_OFFSET(vq) \
-	__SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
+	(SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
 #define SVE_PT_SVE_PREG_OFFSET(vq, n) \
-	__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
+	(SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
 #define SVE_PT_SVE_PREGS_SIZE(vq) \
-	(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
+	(SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \
 		SVE_PT_SVE_PREGS_OFFSET(vq))
 
 #define SVE_PT_SVE_FFR_OFFSET(vq) \
-	__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
+	(SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
 
 #define SVE_PT_SVE_FPSR_OFFSET(vq)				\
 	((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) +	\
-			(SVE_VQ_BYTES - 1))			\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+			(__SVE_VQ_BYTES - 1))			\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 #define SVE_PT_SVE_FPCR_OFFSET(vq) \
 	(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
 
@@ -221,8 +218,8 @@ struct user_sve_header {
 
 #define SVE_PT_SVE_SIZE(vq, flags)					\
 	((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE		\
-			- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+			- SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
 #define SVE_PT_SIZE(vq, flags)						\
 	 (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ?		\
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index dca8f8b5168b..5f3c0cec5af9 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -130,6 +130,8 @@ struct sve_context {
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/sve_context.h>
+
 /*
  * The SVE architecture leaves space for future expansion of the
  * vector length beyond its initial architectural limit of 2048 bits
@@ -138,21 +140,20 @@ struct sve_context {
  * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
  * terminology.
  */
-#define SVE_VQ_BYTES		16	/* number of bytes per quadword */
+#define SVE_VQ_BYTES		__SVE_VQ_BYTES	/* bytes per quadword */
 
-#define SVE_VQ_MIN		1
-#define SVE_VQ_MAX		512
+#define SVE_VQ_MIN		__SVE_VQ_MIN
+#define SVE_VQ_MAX		__SVE_VQ_MAX
 
-#define SVE_VL_MIN		(SVE_VQ_MIN * SVE_VQ_BYTES)
-#define SVE_VL_MAX		(SVE_VQ_MAX * SVE_VQ_BYTES)
+#define SVE_VL_MIN		__SVE_VL_MIN
+#define SVE_VL_MAX		__SVE_VL_MAX
 
-#define SVE_NUM_ZREGS		32
-#define SVE_NUM_PREGS		16
+#define SVE_NUM_ZREGS		__SVE_NUM_ZREGS
+#define SVE_NUM_PREGS		__SVE_NUM_PREGS
 
-#define sve_vl_valid(vl) \
-	((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
-#define sve_vq_from_vl(vl)	((vl) / SVE_VQ_BYTES)
-#define sve_vl_from_vq(vq)	((vq) * SVE_VQ_BYTES)
+#define sve_vl_valid(vl)	__sve_vl_valid(vl)
+#define sve_vq_from_vl(vl)	__sve_vq_from_vl(vl)
+#define sve_vl_from_vq(vq)	__sve_vl_from_vq(vq)
 
 /*
  * If the SVE registers are currently live for the thread at signal delivery,
@@ -205,34 +206,33 @@ struct sve_context {
  * Additional data might be appended in the future.
  */
 
-#define SVE_SIG_ZREG_SIZE(vq)	((__u32)(vq) * SVE_VQ_BYTES)
-#define SVE_SIG_PREG_SIZE(vq)	((__u32)(vq) * (SVE_VQ_BYTES / 8))
-#define SVE_SIG_FFR_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
+#define SVE_SIG_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
+#define SVE_SIG_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
+#define SVE_SIG_FFR_SIZE(vq)	__SVE_FFR_SIZE(vq)
 
 #define SVE_SIG_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+	((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
-#define SVE_SIG_ZREGS_OFFSET	SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREGS_OFFSET \
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET)
 #define SVE_SIG_ZREG_OFFSET(vq, n) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
-#define SVE_SIG_ZREGS_SIZE(vq) \
-	(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
+#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq)
 
 #define SVE_SIG_PREGS_OFFSET(vq) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
 #define SVE_SIG_PREG_OFFSET(vq, n) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
-#define SVE_SIG_PREGS_SIZE(vq) \
-	(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
+#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq)
 
 #define SVE_SIG_FFR_OFFSET(vq) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
 
 #define SVE_SIG_REGS_SIZE(vq) \
-	(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
-
-#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+		(__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq))
 
+#define SVE_SIG_CONTEXT_SIZE(vq) \
+		(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
 
 #endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h
new file mode 100644
index 000000000000..754ab751b523
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/sve_context.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017-2018 ARM Limited */
+
+/*
+ * For use by other UAPI headers only.
+ * Do not make direct use of header or its definitions.
+ */
+
+#ifndef _UAPI__ASM_SVE_CONTEXT_H
+#define _UAPI__ASM_SVE_CONTEXT_H
+
+#include <linux/types.h>
+
+#define __SVE_VQ_BYTES		16	/* number of bytes per quadword */
+
+#define __SVE_VQ_MIN		1
+#define __SVE_VQ_MAX		512
+
+#define __SVE_VL_MIN		(__SVE_VQ_MIN * __SVE_VQ_BYTES)
+#define __SVE_VL_MAX		(__SVE_VQ_MAX * __SVE_VQ_BYTES)
+
+#define __SVE_NUM_ZREGS		32
+#define __SVE_NUM_PREGS		16
+
+#define __sve_vl_valid(vl)			\
+	((vl) % __SVE_VQ_BYTES == 0 &&		\
+	 (vl) >= __SVE_VL_MIN &&		\
+	 (vl) <= __SVE_VL_MAX)
+
+#define __sve_vq_from_vl(vl)	((vl) / __SVE_VQ_BYTES)
+#define __sve_vl_from_vq(vq)	((vq) * __SVE_VQ_BYTES)
+
+#define __SVE_ZREG_SIZE(vq)	((__u32)(vq) * __SVE_VQ_BYTES)
+#define __SVE_PREG_SIZE(vq)	((__u32)(vq) * (__SVE_VQ_BYTES / 8))
+#define __SVE_FFR_SIZE(vq)	__SVE_PREG_SIZE(vq)
+
+#define __SVE_ZREGS_OFFSET	0
+#define __SVE_ZREG_OFFSET(vq, n) \
+	(__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
+#define __SVE_ZREGS_SIZE(vq) \
+	(__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
+
+#define __SVE_PREGS_OFFSET(vq) \
+	(__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
+#define __SVE_PREG_OFFSET(vq, n) \
+	(__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n))
+#define __SVE_PREGS_SIZE(vq) \
+	(__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq))
+
+#define __SVE_FFR_OFFSET(vq) \
+	(__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq))
+
+#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index df08d735b21d..cd434d0719c1 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -12,7 +12,7 @@ CFLAGS_REMOVE_insn.o = -pg
 CFLAGS_REMOVE_return_address.o = -pg
 
 # Object file lists.
-arm64-obj-y		:= debug-monitors.o entry.o irq.o fpsimd.o		\
+obj-y			:= debug-monitors.o entry.o irq.o fpsimd.o		\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
 			   hyp-stub.o psci.o cpu_ops.o insn.o	\
@@ -27,41 +27,40 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_
 $(obj)/%.stub.o: $(obj)/%.o FORCE
 	$(call if_changed,objcopy)
 
-arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
+obj-$(CONFIG_COMPAT)			+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o
-arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
-arm64-obj-$(CONFIG_MODULES)		+= module.o
-arm64-obj-$(CONFIG_ARM64_MODULE_PLTS)	+= module-plts.o
-arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
-arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
-arm64-obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
-arm64-obj-$(CONFIG_JUMP_LABEL)		+= jump_label.o
-arm64-obj-$(CONFIG_KGDB)		+= kgdb.o
-arm64-obj-$(CONFIG_EFI)			+= efi.o efi-entry.stub.o		\
+obj-$(CONFIG_FUNCTION_TRACER)		+= ftrace.o entry-ftrace.o
+obj-$(CONFIG_MODULES)			+= module.o
+obj-$(CONFIG_ARM64_MODULE_PLTS)		+= module-plts.o
+obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
+obj-$(CONFIG_HW_PERF_EVENTS)		+= perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
+obj-$(CONFIG_CPU_PM)			+= sleep.o suspend.o
+obj-$(CONFIG_CPU_IDLE)			+= cpuidle.o
+obj-$(CONFIG_JUMP_LABEL)		+= jump_label.o
+obj-$(CONFIG_KGDB)			+= kgdb.o
+obj-$(CONFIG_EFI)			+= efi.o efi-entry.stub.o		\
 					   efi-rt-wrapper.o
-arm64-obj-$(CONFIG_PCI)			+= pci.o
-arm64-obj-$(CONFIG_ARMV8_DEPRECATED)	+= armv8_deprecated.o
-arm64-obj-$(CONFIG_ACPI)		+= acpi.o
-arm64-obj-$(CONFIG_ACPI_NUMA)		+= acpi_numa.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
-arm64-obj-$(CONFIG_PARAVIRT)		+= paravirt.o
-arm64-obj-$(CONFIG_RANDOMIZE_BASE)	+= kaslr.o
-arm64-obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
-arm64-obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
+obj-$(CONFIG_PCI)			+= pci.o
+obj-$(CONFIG_ARMV8_DEPRECATED)		+= armv8_deprecated.o
+obj-$(CONFIG_ACPI)			+= acpi.o
+obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
+obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
+obj-$(CONFIG_PARAVIRT)			+= paravirt.o
+obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
+obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
+obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
 					   cpu-reset.o
-arm64-obj-$(CONFIG_KEXEC_FILE)		+= machine_kexec_file.o kexec_image.o
-arm64-obj-$(CONFIG_ARM64_RELOC_TEST)	+= arm64-reloc-test.o
+obj-$(CONFIG_KEXEC_FILE)		+= machine_kexec_file.o kexec_image.o
+obj-$(CONFIG_ARM64_RELOC_TEST)		+= arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
-arm64-obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
-arm64-obj-$(CONFIG_CRASH_CORE)		+= crash_core.o
-arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)	+= sdei.o
-arm64-obj-$(CONFIG_ARM64_SSBD)		+= ssbd.o
-arm64-obj-$(CONFIG_ARM64_PTR_AUTH)	+= pointer_auth.o
+obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
+obj-$(CONFIG_CRASH_CORE)		+= crash_core.o
+obj-$(CONFIG_ARM_SDE_INTERFACE)		+= sdei.o
+obj-$(CONFIG_ARM64_SSBD)		+= ssbd.o
+obj-$(CONFIG_ARM64_PTR_AUTH)		+= pointer_auth.o
 
-obj-y					+= $(arm64-obj-y) vdso/ probes/
-obj-m					+= $(arm64-obj-m)
+obj-y					+= vdso/ probes/
 head-y					:= head.o
 extra-y					+= $(head-y) vmlinux.lds
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 763f03dc4d9e..0ec0c46b2c0c 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -392,17 +392,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 	mov	sp, x19
 	.endm
 
-/*
- * These are the registers used in the syscall handler, and allow us to
- * have in theory up to 7 arguments to a function - x0 to x6.
- *
- * x7 is reserved for the system call number in 32-bit mode.
- */
-wsc_nr	.req	w25		// number of system calls
-xsc_nr	.req	x25		// number of system calls (zero-extended)
-wscno	.req	w26		// syscall number
-xscno	.req	x26		// syscall number (zero-extended)
-stbl	.req	x27		// syscall table pointer
+/* GPRs used by entry code */
 tsk	.req	x28		// current thread_info
 
 /*
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 21005dfe8406..c832a5c24efc 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
 /*
  * Handle all unrecognised system calls.
  */
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
 {
-	unsigned int no = regs->regs[7];
 	void __user *addr;
 
-	switch (no) {
+	switch (scno) {
 	/*
 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
 	 * _exclusive_.  There is no alignment requirement on either address;
@@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs)
 
 	default:
 		/*
-		 * Calls 9f00xx..9f07ff are defined to return -ENOSYS
+		 * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
 		 * if not implemented, rather than raising SIGILL. This
 		 * way the calling program can gracefully determine whether
 		 * a feature is supported.
 		 */
-		if ((no & 0xffff) <= 0x7ff)
+		if (scno < __ARM_NR_COMPAT_END)
 			return -ENOSYS;
 		break;
 	}
@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
 		(compat_thumb_mode(regs) ? 2 : 4);
 
 	arm64_notify_die("Oops - bad compat syscall(2)", regs,
-			 SIGILL, ILL_ILLTRP, addr, no);
+			 SIGILL, ILL_ILLTRP, addr, scno);
 	return 0;
 }
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d22312881..5610ac01c1ec 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -13,16 +13,15 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
 long sys_ni_syscall(void);
 
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
 {
 #ifdef CONFIG_COMPAT
 	long ret;
 	if (is_compat_task()) {
-		ret = compat_arm_syscall(regs);
+		ret = compat_arm_syscall(regs, scno);
 		if (ret != -ENOSYS)
 			return ret;
 	}
@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
 		ret = __invoke_syscall(regs, syscall_fn);
 	} else {
-		ret = do_ni_syscall(regs);
+		ret = do_ni_syscall(regs, scno);
 	}
 
 	regs->regs[0] = ret;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index a8f2e4792ef9..7205a9085b4d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -439,7 +439,7 @@ void __init arm64_memblock_init(void)
 		 * memory spans, randomize the linear region as well.
 		 */
 		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
-			range = range / ARM64_MEMSTART_ALIGN + 1;
+			range /= ARM64_MEMSTART_ALIGN;
 			memstart_addr -= ARM64_MEMSTART_ALIGN *
 					 ((range * memstart_offset_seed) >> 16);
 		}