From 15759cb054efdd45e6db8433a829a5734e6d50f6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 5 May 2020 12:47:14 +0530 Subject: powerpc/perf/callchain: Use __get_user_pages_fast in read_user_stack_slow read_user_stack_slow is called with interrupts soft disabled and it copies contents from the page which we find mapped to a specific address. To convert userspace address to pfn, the kernel now uses lockless page table walk. The kernel needs to make sure the pfn value read remains stable and is not released and reused for another process while the contents are read from the page. This can only be achieved by holding a page reference. One of the first approaches I tried was to check the pte value after the kernel copies the contents from the page. But as shown below we can still get it wrong CPU0 CPU1 pte = READ_ONCE(*ptep); pte_clear(pte); put_page(page); page = alloc_page(); memcpy(page_address(page), "secret password", nr); memcpy(buf, kaddr + offset, nb); put_page(page); handle_mm_fault() page = alloc_page(); set_pte(pte, page); if (pte_val(pte) != pte_val(*ptep)) Hence switch to __get_user_pages_fast. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200505071729.54912-8-aneesh.kumar@linux.ibm.com --- arch/powerpc/perf/callchain_64.c | 46 ++++++++++++---------------------------- 1 file changed, 14 insertions(+), 32 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index df1ffd8b20f2..b63086b663ef 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -26,43 +26,25 @@ */ int read_user_stack_slow(void __user *ptr, void *buf, int nb) { - int ret = -EFAULT; - pgd_t *pgdir; - pte_t *ptep, pte; - unsigned int shift; + unsigned long addr = (unsigned long) ptr; unsigned long offset; - unsigned long pfn, flags; + struct page *page; + int nrpages; void *kaddr; - pgdir = current->mm->pgd; - if (!pgdir) - return -EFAULT; + nrpages = __get_user_pages_fast(addr, 1, 1, &page); + if (nrpages == 1) { + kaddr = page_address(page); + + /* align address to page boundary */ + offset = addr & ~PAGE_MASK; - local_irq_save(flags); - ptep = find_current_mm_pte(pgdir, addr, NULL, &shift); - if (!ptep) - goto err_out; - if (!shift) - shift = PAGE_SHIFT; - - /* align address to page boundary */ - offset = addr & ((1UL << shift) - 1); - - pte = READ_ONCE(*ptep); - if (!pte_present(pte) || !pte_user(pte)) - goto err_out; - pfn = pte_pfn(pte); - if (!page_is_ram(pfn)) - goto err_out; - - /* no highmem to worry about here */ - kaddr = pfn_to_kaddr(pfn); - memcpy(buf, kaddr + offset, nb); - ret = 0; -err_out: - local_irq_restore(flags); - return ret; + memcpy(buf, kaddr + offset, nb); + put_page(page); + return 0; + } + return -EFAULT; } static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) -- cgit 1.4.1 From 753462512868674a788ecc77bb96752efb818785 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:26 +1000 Subject: powerpc: Use a macro for creating instructions from u32s In preparation for instructions having a more complex data type start using a macro, ppc_inst(), for making an instruction out of a u32. A macro is used so that instructions can be used as initializer elements. Currently this does nothing, but it will allow for creating a data type that can represent prefixed instructions. Signed-off-by: Jordan Niethe [mpe: Change include guard to _ASM_POWERPC_INST_H] Signed-off-by: Michael Ellerman Reviewed-by: Alistair Popple Link: https://lore.kernel.org/r/20200506034050.24806-7-jniethe5@gmail.com --- arch/powerpc/include/asm/code-patching.h | 3 +- arch/powerpc/include/asm/inst.h | 11 ++++++ arch/powerpc/kernel/align.c | 1 + arch/powerpc/kernel/crash_dump.c | 3 +- arch/powerpc/kernel/epapr_paravirt.c | 3 +- arch/powerpc/kernel/hw_breakpoint.c | 3 +- arch/powerpc/kernel/jump_label.c | 3 +- arch/powerpc/kernel/kgdb.c | 5 +-- arch/powerpc/kernel/kprobes.c | 5 +-- arch/powerpc/kernel/module_64.c | 3 +- arch/powerpc/kernel/optprobes.c | 32 +++++++++-------- arch/powerpc/kernel/security.c | 12 ++++--- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/trace/ftrace.c | 25 +++++++------- arch/powerpc/kernel/uprobes.c | 1 + arch/powerpc/kvm/emulate_loadstore.c | 2 +- arch/powerpc/lib/code-patching.c | 57 ++++++++++++++++--------------- arch/powerpc/lib/feature-fixups.c | 39 ++++++++++----------- arch/powerpc/lib/test_emulate_step.c | 39 ++++++++++----------- arch/powerpc/mm/nohash/8xx.c | 5 +-- arch/powerpc/perf/8xx-pmu.c | 9 ++--- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 3 +- arch/powerpc/platforms/powermac/smp.c | 3 +- arch/powerpc/xmon/xmon.c | 7 ++-- 24 files changed, 156 insertions(+), 120 deletions(-) create mode 100644 arch/powerpc/include/asm/inst.h (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 351dda7215b6..48e021957ee5 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -11,6 +11,7 @@ #include #include #include +#include /* Flags for create_branch: * "b" == create_branch(addr, target, 0); @@ -48,7 +49,7 @@ static inline int patch_branch_site(s32 *site, unsigned long target, int flags) static inline int modify_instruction(unsigned int *addr, unsigned int clr, unsigned int set) { - return patch_instruction(addr, (*addr & ~clr) | set); + return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); } static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h new file mode 100644 index 000000000000..b2e93946ce68 --- /dev/null +++ b/arch/powerpc/include/asm/inst.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_INST_H +#define _ASM_POWERPC_INST_H + +/* + * Instruction data type for POWER + */ + +#define ppc_inst(x) (x) + +#endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 92045ed64976..86e9bf62f18c 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -24,6 +24,7 @@ #include #include #include +#include struct aligninfo { unsigned char len; diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 05745ddbd229..78e556b131db 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -18,6 +18,7 @@ #include #include #include +#include #ifdef DEBUG #include @@ -44,7 +45,7 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - patch_instruction(p, PPC_INST_NOP); + patch_instruction(p, ppc_inst(PPC_INST_NOP)); patch_branch(++p, addr + PHYSICAL_START, 0); } diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 9d32158ce36f..e8eb72a65572 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -11,6 +11,7 @@ #include #include #include +#include #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) extern void epapr_ev_idle(void); @@ -36,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node, return -1; for (i = 0; i < (len / 4); i++) { - u32 inst = be32_to_cpu(insts[i]); + u32 inst = ppc_inst(be32_to_cpu(insts[i])); patch_instruction(epapr_hypercall_start + i, inst); #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) patch_instruction(epapr_ev_idle_start + i, inst); diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 72f461bd70fb..46e09ac8b84a 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -243,7 +244,7 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct arch_hw_breakpoint *info) { - unsigned int instr = 0; + unsigned int instr = ppc_inst(0); int ret, type, size; struct instruction_op op; unsigned long addr = info->address; diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index ca37702bde97..daa4afce7ec8 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -6,6 +6,7 @@ #include #include #include +#include void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) @@ -15,5 +16,5 @@ void arch_jump_label_transform(struct jump_entry *entry, if (type == JUMP_LABEL_JMP) patch_branch(addr, entry->target, 0); else - patch_instruction(addr, PPC_INST_NOP); + patch_instruction(addr, ppc_inst(PPC_INST_NOP)); } diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 7dd55eb1259d..a6b38a19133f 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -26,6 +26,7 @@ #include #include #include +#include /* * This table contains the mapping between PowerPC hardware trap types, and @@ -424,7 +425,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) if (err) return err; - err = patch_instruction(addr, BREAK_INSTR); + err = patch_instruction(addr, ppc_inst(BREAK_INSTR)); if (err) return -EFAULT; @@ -439,7 +440,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) unsigned int instr = *(unsigned int *)bpt->saved_instr; unsigned int *addr = (unsigned int *)bpt->bpt_addr; - err = patch_instruction(addr, instr); + err = patch_instruction(addr, ppc_inst(instr)); if (err) return -EFAULT; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 81efb605113e..2378a7ed4438 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -23,6 +23,7 @@ #include #include #include +#include #include DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; @@ -138,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); void arch_arm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, BREAKPOINT_INSTRUCTION); + patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); } NOKPROBE_SYMBOL(arch_arm_kprobe); void arch_disarm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, p->opcode); + patch_instruction(p->addr, ppc_inst(p->opcode)); } NOKPROBE_SYMBOL(arch_disarm_kprobe); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f808159f3dfd..f390451ad915 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -20,6 +20,7 @@ #include #include #include +#include /* FIXME: We don't do .init separately. To do this, we'd need to have a separate r2 value in the init and core section, and stub between @@ -491,7 +492,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me) * "link" branches and they don't return, so they don't need the r2 * restore afterwards. */ - if (!instr_is_relative_link_branch(*prev_insn)) + if (!instr_is_relative_link_branch(ppc_inst(*prev_insn))) return 1; if (*instruction != PPC_INST_NOP) { diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 445b3dad82dc..44006c4ca4f1 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -16,6 +16,7 @@ #include #include #include +#include #define TMPL_CALL_HDLR_IDX \ (optprobe_template_call_handler - optprobe_template_entry) @@ -147,13 +148,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) { /* addis r4,0,(insn)@h */ - patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff))); addr++; /* ori r4,r4,(insn)@l */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | + ___PPC_RS(4) | (val & 0xffff))); } /* @@ -163,28 +164,28 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) { /* lis r3,(op)@highest */ - patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) | - ((val >> 48) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff))); addr++; /* ori r3,r3,(op)@higher */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 32) & 0xffff))); addr++; /* rldicr r3,r3,32,31 */ - patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31)); + patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | + ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; /* oris r3,r3,(op)@h */ - patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 16) & 0xffff))); addr++; /* ori r3,r3,(op)@l */ - patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff)); + patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) @@ -230,7 +231,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); pr_devel("Copying template to %p, size %lu\n", buff, size); for (i = 0; i < size; i++) { - rc = patch_instruction(buff + i, *(optprobe_template_entry + i)); + rc = patch_instruction(buff + i, + ppc_inst(*(optprobe_template_entry + i))); if (rc < 0) goto error; } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 479325baf6a9..d86701ce116b 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,6 +16,7 @@ #include #include #include +#include u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; @@ -439,9 +440,11 @@ static void toggle_count_cache_flush(bool enable) enable = false; if (!enable) { - patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); + patch_instruction_site(&patch__call_flush_count_cache, + ppc_inst(PPC_INST_NOP)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); + patch_instruction_site(&patch__call_kvm_flush_link_stack, + ppc_inst(PPC_INST_NOP)); #endif pr_info("link-stack-flush: software flush disabled.\n"); link_stack_flush_enabled = false; @@ -464,7 +467,8 @@ static void toggle_count_cache_flush(bool enable) // If we just need to flush the link stack, patch an early return if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { - patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + patch_instruction_site(&patch__flush_link_stack_return, + ppc_inst(PPC_INST_BLR)); no_count_cache_flush(); return; } @@ -475,7 +479,7 @@ static void toggle_count_cache_flush(bool enable) return; } - patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); + patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR)); count_cache_flush_type = COUNT_CACHE_FLUSH_HW; pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 3a43e8e847c8..0536e4aed330 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr) /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); - patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP); + patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP)); create_cond_branch(&insn, addr, branch_target(addr), 0x820000); patch_instruction(addr, insn); /* replace b by bne cr0 */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 8799d891320c..00f69b7baa8a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_DYNAMIC_FTRACE @@ -161,7 +162,7 @@ __ftrace_make_nop(struct module *mod, #ifdef CONFIG_MPROFILE_KERNEL /* When using -mkernel_profile there is no load to jump over */ - pop = PPC_INST_NOP; + pop = ppc_inst(PPC_INST_NOP); if (probe_kernel_read(&op, (void *)(ip - 4), 4)) { pr_err("Fetching instruction at %lx failed.\n", ip - 4); @@ -169,7 +170,7 @@ __ftrace_make_nop(struct module *mod, } /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ - if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) { + if (op != ppc_inst(PPC_INST_MFLR) && op != ppc_inst(PPC_INST_STD_LR)) { pr_err("Unexpected instruction %08x around bl _mcount\n", op); return -EINVAL; } @@ -188,7 +189,7 @@ __ftrace_make_nop(struct module *mod, * Use a b +8 to jump over the load. */ - pop = PPC_INST_BRANCH | 8; /* b +8 */ + pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */ /* * Check what is in the next instruction. We can see ld r2,40(r1), but @@ -199,7 +200,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - if (op != PPC_INST_LD_TOC) { + if (op != ppc_inst(PPC_INST_LD_TOC)) { pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); return -EINVAL; } @@ -275,7 +276,7 @@ __ftrace_make_nop(struct module *mod, return -EINVAL; } - op = PPC_INST_NOP; + op = ppc_inst(PPC_INST_NOP); if (patch_instruction((unsigned int *)ip, op)) return -EPERM; @@ -420,7 +421,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) } } - if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) { + if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -442,7 +443,7 @@ int ftrace_make_nop(struct module *mod, if (test_24bit_addr(ip, addr)) { /* within range */ old = ftrace_call_replace(ip, addr, 1); - new = PPC_INST_NOP; + new = ppc_inst(PPC_INST_NOP); return ftrace_modify_code(ip, old, new); } else if (core_kernel_text(ip)) return __ftrace_make_nop_kernel(rec, addr); @@ -496,7 +497,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) * The load offset is different depending on the ABI. For simplicity * just mask it out when doing the compare. */ - if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) + if (op0 != ppc_inst(0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) return 0; return 1; } @@ -505,7 +506,7 @@ static int expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ - if (op0 != PPC_INST_NOP) + if (op0 != ppc_inst(PPC_INST_NOP)) return 0; return 1; } @@ -588,7 +589,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; /* It should be pointing to a nop */ - if (op != PPC_INST_NOP) { + if (op != ppc_inst(PPC_INST_NOP)) { pr_err("Expected NOP but have %x\n", op); return -EINVAL; } @@ -645,7 +646,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; } - if (op != PPC_INST_NOP) { + if (op != ppc_inst(PPC_INST_NOP)) { pr_err("Unexpected call sequence at %p: %x\n", ip, op); return -EINVAL; } @@ -676,7 +677,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) */ if (test_24bit_addr(ip, addr)) { /* within range */ - old = PPC_INST_NOP; + old = ppc_inst(PPC_INST_NOP); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new); } else if (core_kernel_text(ip)) diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 1cfef0e5fec5..31c870287f2b 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c @@ -14,6 +14,7 @@ #include #include +#include #define UPROBE_TRAP_NR UINT_MAX diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 1139bc56e004..135d0e686622 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -95,7 +95,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { + if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 6ed3301c0582..6c30ddadd971 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -17,6 +17,7 @@ #include #include #include +#include static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, unsigned int *patch_addr) @@ -414,37 +415,37 @@ static void __init test_branch_iform(void) addr = (unsigned long)&instr; /* The simplest case, branch to self, no flags */ - check(instr_is_branch_iform(0x48000000)); + check(instr_is_branch_iform(ppc_inst(0x48000000))); /* All bits of target set, and flags */ - check(instr_is_branch_iform(0x4bffffff)); + check(instr_is_branch_iform(ppc_inst(0x4bffffff))); /* High bit of opcode set, which is wrong */ - check(!instr_is_branch_iform(0xcbffffff)); + check(!instr_is_branch_iform(ppc_inst(0xcbffffff))); /* Middle bits of opcode set, which is wrong */ - check(!instr_is_branch_iform(0x7bffffff)); + check(!instr_is_branch_iform(ppc_inst(0x7bffffff))); /* Simplest case, branch to self with link */ - check(instr_is_branch_iform(0x48000001)); + check(instr_is_branch_iform(ppc_inst(0x48000001))); /* All bits of targets set */ - check(instr_is_branch_iform(0x4bfffffd)); + check(instr_is_branch_iform(ppc_inst(0x4bfffffd))); /* Some bits of targets set */ - check(instr_is_branch_iform(0x4bff00fd)); + check(instr_is_branch_iform(ppc_inst(0x4bff00fd))); /* Must be a valid branch to start with */ - check(!instr_is_branch_iform(0x7bfffffd)); + check(!instr_is_branch_iform(ppc_inst(0x7bfffffd))); /* Absolute branch to 0x100 */ - instr = 0x48000103; + instr = ppc_inst(0x48000103); check(instr_is_branch_to_addr(&instr, 0x100)); /* Absolute branch to 0x420fc */ - instr = 0x480420ff; + instr = ppc_inst(0x480420ff); check(instr_is_branch_to_addr(&instr, 0x420fc)); /* Maximum positive relative branch, + 20MB - 4B */ - instr = 0x49fffffc; + instr = ppc_inst(0x49fffffc); check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC)); /* Smallest negative relative branch, - 4B */ - instr = 0x4bfffffc; + instr = ppc_inst(0x4bfffffc); check(instr_is_branch_to_addr(&instr, addr - 4)); /* Largest negative relative branch, - 32 MB */ - instr = 0x4a000000; + instr = ppc_inst(0x4a000000); check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); /* Branch to self, with link */ @@ -478,7 +479,7 @@ static void __init test_branch_iform(void) /* Check flags are masked correctly */ err = create_branch(&instr, &instr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == 0x48000000); + check(instr == ppc_inst(0x48000000)); } static void __init test_create_function_call(void) @@ -505,28 +506,28 @@ static void __init test_branch_bform(void) addr = (unsigned long)iptr; /* The simplest case, branch to self, no flags */ - check(instr_is_branch_bform(0x40000000)); + check(instr_is_branch_bform(ppc_inst(0x40000000))); /* All bits of target set, and flags */ - check(instr_is_branch_bform(0x43ffffff)); + check(instr_is_branch_bform(ppc_inst(0x43ffffff))); /* High bit of opcode set, which is wrong */ - check(!instr_is_branch_bform(0xc3ffffff)); + check(!instr_is_branch_bform(ppc_inst(0xc3ffffff))); /* Middle bits of opcode set, which is wrong */ - check(!instr_is_branch_bform(0x7bffffff)); + check(!instr_is_branch_bform(ppc_inst(0x7bffffff))); /* Absolute conditional branch to 0x100 */ - instr = 0x43ff0103; + instr = ppc_inst(0x43ff0103); check(instr_is_branch_to_addr(&instr, 0x100)); /* Absolute conditional branch to 0x20fc */ - instr = 0x43ff20ff; + instr = ppc_inst(0x43ff20ff); check(instr_is_branch_to_addr(&instr, 0x20fc)); /* Maximum positive relative conditional branch, + 32 KB - 4B */ - instr = 0x43ff7ffc; + instr = ppc_inst(0x43ff7ffc); check(instr_is_branch_to_addr(&instr, addr + 0x7FFC)); /* Smallest negative relative conditional branch, - 4B */ - instr = 0x43fffffc; + instr = ppc_inst(0x43fffffc); check(instr_is_branch_to_addr(&instr, addr - 4)); /* Largest negative relative conditional branch, - 32 KB */ - instr = 0x43ff8000; + instr = ppc_inst(0x43ff8000); check(instr_is_branch_to_addr(&instr, addr - 0x8000)); /* All condition code bits set & link */ @@ -563,7 +564,7 @@ static void __init test_branch_bform(void) /* Check flags are masked correctly */ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC); check(instr_is_branch_to_addr(&instr, addr)); - check(instr == 0x43FF0000); + check(instr == ppc_inst(0x43FF0000)); } static void __init test_translate_branch(void) @@ -597,7 +598,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x4a000000); + check(*q == ppc_inst(0x4a000000)); /* Maximum positive case, move x to x - 32 MB + 4 */ p = buf + 0x2000000; @@ -608,7 +609,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x49fffffc); + check(*q == ppc_inst(0x49fffffc)); /* Jump to x + 16 MB moved to x + 20 MB */ p = buf; @@ -654,7 +655,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x43ff8000); + check(*q == ppc_inst(0x43ff8000)); /* Maximum positive case, move x to x - 32 KB + 4 */ p = buf + 0x8000; @@ -666,7 +667,7 @@ static void __init test_translate_branch(void) patch_instruction(q, instr); check(instr_is_branch_to_addr(p, addr)); check(instr_is_branch_to_addr(q, addr)); - check(*q == 0x43ff7ffc); + check(*q == ppc_inst(0x43ff7ffc)); /* Jump to x + 12 KB moved to x + 20 KB */ p = buf; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index b129d7b4e7dd..6e7479b8887a 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -21,6 +21,7 @@ #include #include #include +#include struct fixup_entry { unsigned long mask; @@ -89,7 +90,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) } for (; dest < end; dest++) - raw_patch_instruction(dest, PPC_INST_NOP); + raw_patch_instruction(dest, ppc_inst(PPC_INST_NOP)); return 0; } @@ -146,15 +147,15 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); + patch_instruction(dest, ppc_inst(instrs[0])); if (types & STF_BARRIER_FALLBACK) patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); else - patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest + 2, ppc_inst(instrs[2])); } printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, @@ -207,12 +208,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); - patch_instruction(dest + 1, instrs[1]); - patch_instruction(dest + 2, instrs[2]); - patch_instruction(dest + 3, instrs[3]); - patch_instruction(dest + 4, instrs[4]); - patch_instruction(dest + 5, instrs[5]); + patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction(dest + 3, ppc_inst(instrs[3])); + patch_instruction(dest + 4, ppc_inst(instrs[4])); + patch_instruction(dest + 5, ppc_inst(instrs[5])); } printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : @@ -260,9 +261,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instrs[0]); - patch_instruction(dest + 1, instrs[1]); - patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction(dest + 2, ppc_inst(instrs[2])); } printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, @@ -295,7 +296,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instr); + patch_instruction(dest, ppc_inst(instr)); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -338,8 +339,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, instr[0]); - patch_instruction(dest + 1, instr[1]); + patch_instruction(dest, ppc_inst(instr[0])); + patch_instruction(dest + 1, ppc_inst(instr[1])); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -353,7 +354,7 @@ static void patch_btb_flush_section(long *curr) end = (void *)curr + *(curr + 1); for (; start < end; start++) { pr_devel("patching dest %lx\n", (unsigned long)start); - patch_instruction(start, PPC_INST_NOP); + patch_instruction(start, ppc_inst(PPC_INST_NOP)); } } @@ -382,7 +383,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) for (; start < end; start++) { dest = (void *)start + *start; - raw_patch_instruction(dest, PPC_INST_LWSYNC); + raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC)); } } @@ -400,7 +401,7 @@ static void do_final_fixups(void) length = (__end_interrupts - _stext) / sizeof(int); while (length--) { - raw_patch_instruction(dest, *src); + raw_patch_instruction(dest, ppc_inst(*src)); src++; dest++; } diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 53df4146dd32..85d62f16d07a 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -11,6 +11,7 @@ #include #include #include +#include #define IMM_L(i) ((uintptr_t)(i) & 0xffff) #define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) @@ -19,40 +20,40 @@ * Defined with TEST_ prefix so it does not conflict with other * definitions. */ -#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \ +#define TEST_LD(r, base, i) ppc_inst(PPC_INST_LD | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \ +#define TEST_LWZ(r, base, i) ppc_inst(PPC_INST_LWZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) -#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \ +#define TEST_LWZX(t, a, b) ppc_inst(PPC_INST_LWZX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \ +#define TEST_STD(r, base, i) ppc_inst(PPC_INST_STD | ___PPC_RS(r) | \ ___PPC_RA(base) | IMM_DS(i)) -#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \ +#define TEST_LDARX(t, a, b, eh) ppc_inst(PPC_INST_LDARX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | \ __PPC_EH(eh)) -#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \ +#define TEST_STDCX(s, a, b) ppc_inst(PPC_INST_STDCX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \ +#define TEST_LFSX(t, a, b) ppc_inst(PPC_INST_LFSX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \ +#define TEST_STFSX(s, a, b) ppc_inst(PPC_INST_STFSX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \ +#define TEST_LFDX(t, a, b) ppc_inst(PPC_INST_LFDX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \ +#define TEST_STFDX(s, a, b) ppc_inst(PPC_INST_STFDX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \ +#define TEST_LVX(t, a, b) ppc_inst(PPC_INST_LVX | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \ +#define TEST_STVX(s, a, b) ppc_inst(PPC_INST_STVX | ___PPC_RS(s) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) -#define TEST_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ +#define TEST_LXVD2X(s, a, b) ppc_inst(PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_STXVD2X(s, a, b) ppc_inst(PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_ADD(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ +#define TEST_ADD_DOT(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | 0x1) -#define TEST_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ +#define TEST_ADDC(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b)) -#define TEST_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ +#define TEST_ADDC_DOT(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \ ___PPC_RA(a) | ___PPC_RB(b) | 0x1) #define MAX_SUBTESTS 16 @@ -472,7 +473,7 @@ static struct compute_test compute_tests[] = { .subtests = { { .descr = "R0 = LONG_MAX", - .instr = PPC_INST_NOP, + .instr = ppc_inst(PPC_INST_NOP), .regs = { .gpr[0] = LONG_MAX, } diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 3189308dece4..b27017109a36 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -101,7 +102,7 @@ static void mmu_patch_addis(s32 *site, long simm) instr &= 0xffff0000; instr |= ((unsigned long)simm) >> 16; - patch_instruction_site(site, instr); + patch_instruction_site(site, ppc_inst(instr)); } static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot) @@ -125,7 +126,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) mapped = 0; mmu_mapin_immr(); if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) - patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); + patch_instruction_site(&patch__dtlbmiss_immr_jmp, ppc_inst(PPC_INST_NOP)); if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); } else { diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index 1ad03c55c88c..acc27fc63eb7 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -15,6 +15,7 @@ #include #include #include +#include #define PERF_8xx_ID_CPU_CYCLES 1 #define PERF_8xx_ID_HW_INSTRUCTIONS 2 @@ -170,8 +171,8 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_dec_return(&itlb_miss_ref) == 0) { /* mfspr r10, SPRN_SPRG_SCRATCH0 */ - unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) | - __PPC_SPR(SPRN_SPRG_SCRATCH0); + struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | + __PPC_SPR(SPRN_SPRG_SCRATCH0)); patch_instruction_site(&patch__itlbmiss_exit_1, insn); #ifndef CONFIG_PIN_TLB_TEXT @@ -182,8 +183,8 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_dec_return(&dtlb_miss_ref) == 0) { /* mfspr r10, SPRN_DAR */ - unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) | - __PPC_SPR(SPRN_DAR); + struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) | + __PPC_SPR(SPRN_DAR)); patch_instruction_site(&patch__dtlbmiss_exit_1, insn); patch_instruction_site(&patch__dtlbmiss_exit_2, insn); diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 5b91ea5694e3..31540ebf1e29 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -82,7 +83,7 @@ smp_86xx_kick_cpu(int nr) mdelay(1); /* Restore the exception vector */ - patch_instruction(vector, save_vector); + patch_instruction(vector, ppc_inst(save_vector)); local_irq_restore(flags); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index be2ab5b11e57..44a00990af9d 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "pmac.h" @@ -826,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, save_vector); + patch_instruction(vector, ppc_inst(save_vector)); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 0fa3aaeee105..a56dcb004396 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -54,6 +54,7 @@ #include #include #include +#include #ifdef CONFIG_PPC64 #include @@ -946,7 +947,7 @@ static void remove_bpts(void) if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) continue; if (mread(bp->address, &instr, 4) == 4 - && instr == bpinstr + && instr == ppc_inst(bpinstr) && patch_instruction( (unsigned int *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", @@ -2847,7 +2848,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, { int nr, dotted; unsigned long first_adr; - unsigned int inst, last_inst = 0; + unsigned int inst, last_inst = ppc_inst(0); unsigned char val[4]; dotted = 0; @@ -2860,7 +2861,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, } break; } - inst = GETWORD(val); + inst = ppc_inst(GETWORD(val)); if (adr > first_adr && inst == last_inst) { if (!dotted) { printf(" ...\n"); -- cgit 1.4.1 From 94afd069d937d84fb4f696eb9a78db4084e43d21 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:31 +1000 Subject: powerpc: Use a datatype for instructions Currently unsigned ints are used to represent instructions on powerpc. This has worked well as instructions have always been 4 byte words. However, ISA v3.1 introduces some changes to instructions that mean this scheme will no longer work as well. This change is Prefixed Instructions. A prefixed instruction is made up of a word prefix followed by a word suffix to make an 8 byte double word instruction. No matter the endianness of the system the prefix always comes first. Prefixed instructions are only planned for powerpc64. Introduce a ppc_inst type to represent both prefixed and word instructions on powerpc64 while keeping it possible to exclusively have word instructions on powerpc32. Signed-off-by: Jordan Niethe [mpe: Fix compile error in emulate_spe()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-12-jniethe5@gmail.com --- arch/powerpc/include/asm/code-patching.h | 32 ++++++------ arch/powerpc/include/asm/inst.h | 18 ++++--- arch/powerpc/include/asm/sstep.h | 5 +- arch/powerpc/include/asm/uprobes.h | 5 +- arch/powerpc/kernel/align.c | 9 ++-- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/epapr_paravirt.c | 6 +-- arch/powerpc/kernel/hw_breakpoint.c | 4 +- arch/powerpc/kernel/jump_label.c | 2 +- arch/powerpc/kernel/kgdb.c | 4 +- arch/powerpc/kernel/kprobes.c | 8 +-- arch/powerpc/kernel/mce_power.c | 5 +- arch/powerpc/kernel/optprobes.c | 64 ++++++++++++++---------- arch/powerpc/kernel/setup_32.c | 4 +- arch/powerpc/kernel/trace/ftrace.c | 83 ++++++++++++++++--------------- arch/powerpc/kernel/vecemu.c | 5 +- arch/powerpc/lib/code-patching.c | 76 ++++++++++++++-------------- arch/powerpc/lib/feature-fixups.c | 62 ++++++++++++----------- arch/powerpc/lib/sstep.c | 4 +- arch/powerpc/lib/test_emulate_step.c | 9 ++-- arch/powerpc/mm/fault.c | 4 +- arch/powerpc/perf/core-book3s.c | 4 +- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 4 +- arch/powerpc/platforms/powermac/smp.c | 4 +- arch/powerpc/xmon/xmon.c | 22 ++++---- arch/powerpc/xmon/xmon_bpts.h | 6 +-- 26 files changed, 237 insertions(+), 214 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 48e021957ee5..eacc9102c251 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -23,33 +23,33 @@ #define BRANCH_ABSOLUTE 0x2 bool is_offset_in_branch_range(long offset); -int create_branch(unsigned int *instr, const unsigned int *addr, +int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags); -int create_cond_branch(unsigned int *instr, const unsigned int *addr, +int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags); -int patch_branch(unsigned int *addr, unsigned long target, int flags); -int patch_instruction(unsigned int *addr, unsigned int instr); -int raw_patch_instruction(unsigned int *addr, unsigned int instr); +int patch_branch(struct ppc_inst *addr, unsigned long target, int flags); +int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr); +int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr); static inline unsigned long patch_site_addr(s32 *site) { return (unsigned long)site + *site; } -static inline int patch_instruction_site(s32 *site, unsigned int instr) +static inline int patch_instruction_site(s32 *site, struct ppc_inst instr) { - return patch_instruction((unsigned int *)patch_site_addr(site), instr); + return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr); } static inline int patch_branch_site(s32 *site, unsigned long target, int flags) { - return patch_branch((unsigned int *)patch_site_addr(site), target, flags); + return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags); } static inline int modify_instruction(unsigned int *addr, unsigned int clr, unsigned int set) { - return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); + return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set)); } static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) @@ -57,13 +57,13 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); } -int instr_is_relative_branch(unsigned int instr); -int instr_is_relative_link_branch(unsigned int instr); -int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); -unsigned long branch_target(const unsigned int *instr); -int translate_branch(unsigned int *instr, const unsigned int *dest, - const unsigned int *src); -extern bool is_conditional_branch(unsigned int instr); +int instr_is_relative_branch(struct ppc_inst instr); +int instr_is_relative_link_branch(struct ppc_inst instr); +int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr); +unsigned long branch_target(const struct ppc_inst *instr); +int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest, + const struct ppc_inst *src); +extern bool is_conditional_branch(struct ppc_inst instr); #ifdef CONFIG_PPC_BOOK3E_64 void __patch_exception(int exc, unsigned long addr); #define patch_exception(exc, name) do { \ diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h index ff8d58671648..f602ca908936 100644 --- a/arch/powerpc/include/asm/inst.h +++ b/arch/powerpc/include/asm/inst.h @@ -6,26 +6,30 @@ * Instruction data type for POWER */ -#define ppc_inst(x) (x) +struct ppc_inst { + u32 val; +} __packed; -static inline u32 ppc_inst_val(u32 x) +#define ppc_inst(x) ((struct ppc_inst){ .val = x }) + +static inline u32 ppc_inst_val(struct ppc_inst x) { - return x; + return x.val; } -static inline int ppc_inst_primary_opcode(u32 x) +static inline int ppc_inst_primary_opcode(struct ppc_inst x) { return ppc_inst_val(x) >> 26; } -static inline u32 ppc_inst_swab(u32 x) +static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) { return ppc_inst(swab32(ppc_inst_val(x))); } -static inline bool ppc_inst_equal(u32 x, u32 y) +static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) { - return x == y; + return ppc_inst_val(x) == ppc_inst_val(y); } #endif /* _ASM_POWERPC_INST_H */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 26d729562fe2..c3ce903ac488 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2004 Paul Mackerras , IBM */ +#include struct pt_regs; @@ -132,7 +133,7 @@ union vsx_reg { * otherwise. */ extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, - unsigned int instr); + struct ppc_inst instr); /* * Emulate an instruction that can be executed just by updating @@ -149,7 +150,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op); * 0 if it could not be emulated, or -1 for an instruction that * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.). */ -extern int emulate_step(struct pt_regs *regs, unsigned int instr); +extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr); /* * Emulate a load or store instruction by reading/writing the diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 2bbdf27d09b5..7e3b329ba2d3 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -11,6 +11,7 @@ #include #include +#include typedef ppc_opcode_t uprobe_opcode_t; @@ -23,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { - u32 insn; - u32 ixol; + struct ppc_inst insn; + struct ppc_inst ixol; }; }; diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a63216da8cf1..9b35d6160507 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -105,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = { * so we don't need the address swizzling. */ static int emulate_spe(struct pt_regs *regs, unsigned int reg, - unsigned int instr) + struct ppc_inst ppc_instr) { int ret; union { @@ -116,8 +116,9 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, } data, temp; unsigned char __user *p, *addr; unsigned long *evr = ¤t->thread.evr[reg]; - unsigned int nb, flags; + unsigned int nb, flags, instr; + instr = ppc_inst_val(ppc_instr); instr = (instr >> 1) & 0x1f; /* DAR has the operand effective address */ @@ -294,7 +295,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, int fix_alignment(struct pt_regs *regs) { - unsigned int instr; + struct ppc_inst instr; struct instruction_op op; int r, type; @@ -304,7 +305,7 @@ int fix_alignment(struct pt_regs *regs) */ CHECK_FULL_REGS(regs); - if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip))) + if (unlikely(__get_user(instr.val, (unsigned int __user *)regs->nip))) return -EFAULT; if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { /* We don't handle PPC little-endian any more... */ diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 78e556b131db..72bafb47e757 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void) static void __init create_trampoline(unsigned long addr) { - unsigned int *p = (unsigned int *)addr; + struct ppc_inst *p = (struct ppc_inst *)addr; /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index e8eb72a65572..2ed14d4a47f5 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -37,10 +37,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node, return -1; for (i = 0; i < (len / 4); i++) { - u32 inst = ppc_inst(be32_to_cpu(insts[i])); - patch_instruction(epapr_hypercall_start + i, inst); + struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i])); + patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst); #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) - patch_instruction(epapr_ev_idle_start + i, inst); + patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst); #endif } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 46e09ac8b84a..2db9a7ac7bcb 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -244,12 +244,12 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct arch_hw_breakpoint *info) { - unsigned int instr = ppc_inst(0); + struct ppc_inst instr = ppc_inst(0); int ret, type, size; struct instruction_op op; unsigned long addr = info->address; - if (__get_user_inatomic(instr, (unsigned int *)regs->nip)) + if (__get_user_inatomic(instr.val, (unsigned int *)regs->nip)) goto fail; ret = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index daa4afce7ec8..144858027fa3 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,7 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 *addr = (u32 *)(unsigned long)entry->code; + struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code; if (type == JUMP_LABEL_JMP) patch_branch(addr, entry->target, 0); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index a6b38a19133f..652b2852bea3 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -419,7 +419,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = probe_kernel_address(addr, instr); if (err) @@ -438,7 +438,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr = *(unsigned int *)bpt->saved_instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = patch_instruction(addr, ppc_inst(instr)); if (err) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 92fa3070d905..a08ae5803622 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -106,7 +106,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; - kprobe_opcode_t insn = *p->addr; + struct ppc_inst insn = *(struct ppc_inst *)p->addr; if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); @@ -139,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); void arch_arm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); } NOKPROBE_SYMBOL(arch_arm_kprobe); void arch_disarm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(p->opcode)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode)); } NOKPROBE_SYMBOL(arch_disarm_kprobe); @@ -217,7 +217,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe); static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; - unsigned int insn = *p->ainsn.insn; + struct ppc_inst insn = *(struct ppc_inst *)p->ainsn.insn; /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 1d18991f3854..08b355f80d9e 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * Convert an address related to an mm to a PFN. NOTE: we are in real @@ -369,7 +370,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, * in real-mode is tricky and can lead to recursive * faults */ - int instr; + struct ppc_inst instr; unsigned long pfn, instr_addr; struct instruction_op op; struct pt_regs tmp = *regs; @@ -377,7 +378,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, pfn = addr_to_pfn(regs, regs->nip); if (pfn != ULONG_MAX) { instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK); - instr = *(unsigned int *)(instr_addr); + instr = *(struct ppc_inst *)(instr_addr); if (!analyse_instr(&op, &tmp, instr)) { pfn = addr_to_pfn(regs, op.ea); *addr = op.ea; diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 44006c4ca4f1..5a71fef71c22 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -100,8 +100,9 @@ static unsigned long can_optimize(struct kprobe *p) * Ensure that the instruction is not a conditional branch, * and that can be emulated. */ - if (!is_conditional_branch(*p->ainsn.insn) && - analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { + if (!is_conditional_branch(*(struct ppc_inst *)p->ainsn.insn) && + analyse_instr(&op, ®s, + *(struct ppc_inst *)p->ainsn.insn) == 1) { emulate_update_regs(®s, &op); nip = regs.nip; } @@ -148,13 +149,15 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) { /* addis r4,0,(insn)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff))); addr++; /* ori r4,r4,(insn)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | + ___PPC_RS(4) | (val & 0xffff))); } /* @@ -164,34 +167,39 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) { /* lis r3,(op)@highest */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | - ((val >> 48) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff))); addr++; /* ori r3,r3,(op)@higher */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 32) & 0xffff))); addr++; /* rldicr r3,r3,32,31 */ - patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | + ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; /* oris r3,r3,(op)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 16) & 0xffff))); addr++; /* ori r3,r3,(op)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { - kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; - kprobe_opcode_t *op_callback_addr, *emulate_step_addr; + struct ppc_inst branch_op_callback, branch_emulate_step; + kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff; long b_offset; unsigned long nip, size; int rc, i; @@ -231,7 +239,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); pr_devel("Copying template to %p, size %lu\n", buff, size); for (i = 0; i < size; i++) { - rc = patch_instruction(buff + i, + rc = patch_instruction((struct ppc_inst *)(buff + i), ppc_inst(*(optprobe_template_entry + i))); if (rc < 0) goto error; @@ -254,20 +262,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) } rc = create_branch(&branch_op_callback, - (unsigned int *)buff + TMPL_CALL_HDLR_IDX, + (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), (unsigned long)op_callback_addr, BRANCH_SET_LINK); rc |= create_branch(&branch_emulate_step, - (unsigned int *)buff + TMPL_EMULATE_IDX, + (struct ppc_inst *)(buff + TMPL_EMULATE_IDX), (unsigned long)emulate_step_addr, BRANCH_SET_LINK); if (rc) goto error; - patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback); - patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step); + patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), + branch_op_callback); + patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX), + branch_emulate_step); /* * 3. load instruction to be emulated into relevant register, and @@ -277,7 +287,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) /* * 4. branch back from trampoline */ - patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0); + patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0); flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX])); @@ -309,7 +319,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) void arch_optimize_kprobes(struct list_head *oplist) { - unsigned int instr; + struct ppc_inst instr; struct optimized_kprobe *op; struct optimized_kprobe *tmp; @@ -321,9 +331,9 @@ void arch_optimize_kprobes(struct list_head *oplist) memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE); create_branch(&instr, - (unsigned int *)op->kp.addr, + (struct ppc_inst *)op->kp.addr, (unsigned long)op->optinsn.insn, 0); - patch_instruction(op->kp.addr, instr); + patch_instruction((struct ppc_inst *)op->kp.addr, instr); list_del_init(&op->list); } } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 0536e4aed330..15f0a7c84944 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -74,8 +74,8 @@ EXPORT_SYMBOL(DMA_MODE_WRITE); */ notrace void __init machine_init(u64 dt_ptr) { - unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache); - unsigned int insn; + struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache); + struct ppc_inst insn; /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index cbb19af4a72a..3117ed675735 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -41,23 +41,23 @@ #define NUM_FTRACE_TRAMPS 8 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; -static unsigned int +static struct ppc_inst ftrace_call_replace(unsigned long ip, unsigned long addr, int link) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* if (link) set op to 'bl' else 'b' */ - create_branch(&op, (unsigned int *)ip, addr, link ? 1 : 0); + create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0); return op; } static int -ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) +ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) { - unsigned int replaced; + struct ppc_inst replaced; /* * Note: @@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) } /* replace the text with the new text */ - if (patch_instruction((unsigned int *)ip, new)) + if (patch_instruction((struct ppc_inst *)ip, new)) return -EPERM; return 0; @@ -90,24 +90,24 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ - return create_branch(&op, (unsigned int *)ip, addr, 0) == 0; + return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0; } -static int is_bl_op(unsigned int op) +static int is_bl_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; } -static int is_b_op(unsigned int op) +static int is_b_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; } -static unsigned long find_bl_target(unsigned long ip, unsigned int op) +static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op) { int offset; @@ -127,7 +127,7 @@ __ftrace_make_nop(struct module *mod, { unsigned long entry, ptr, tramp; unsigned long ip = rec->ip; - unsigned int op, pop; + struct ppc_inst op, pop; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -208,7 +208,7 @@ __ftrace_make_nop(struct module *mod, } #endif /* CONFIG_MPROFILE_KERNEL */ - if (patch_instruction((unsigned int *)ip, pop)) { + if (patch_instruction((struct ppc_inst *)ip, pop)) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -221,7 +221,7 @@ static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned int jmp[4]; unsigned long ip = rec->ip; unsigned long tramp; @@ -280,7 +280,7 @@ __ftrace_make_nop(struct module *mod, op = ppc_inst(PPC_INST_NOP); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -291,7 +291,7 @@ __ftrace_make_nop(struct module *mod, static unsigned long find_ftrace_tramp(unsigned long ip) { int i; - unsigned int instr; + struct ppc_inst instr; /* * We have the compiler generated long_branch tramps at the end @@ -328,9 +328,10 @@ static int add_ftrace_tramp(unsigned long tramp) */ static int setup_mcount_compiler_tramp(unsigned long tramp) { - int i, op; + int i; + struct ppc_inst op; unsigned long ptr; - unsigned int instr; + struct ppc_inst instr; static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; /* Is this a known long jump tramp? */ @@ -379,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) return -1; } - if (patch_branch((unsigned int *)tramp, ptr, 0)) { + if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) { pr_debug("REL24 out of range!\n"); return -1; } @@ -395,7 +396,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) { unsigned long tramp, ip = rec->ip; - unsigned int op; + struct ppc_inst op; /* Read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -423,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) } } - if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) { + if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -435,7 +436,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -488,7 +489,7 @@ int ftrace_make_nop(struct module *mod, */ #ifndef CONFIG_MPROFILE_KERNEL static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* * We expect to see: @@ -506,7 +507,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) } #else static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP))) @@ -518,8 +519,8 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op[2]; - unsigned int instr; + struct ppc_inst op[2]; + struct ppc_inst instr; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -584,7 +585,7 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int err; - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; /* read where this goes */ @@ -604,7 +605,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* create the branch to the trampoline */ - err = create_branch(&op, (unsigned int *)ip, + err = create_branch(&op, (struct ppc_inst *)ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK); if (err) { pr_err("REL24 out of range!\n"); @@ -613,7 +614,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) pr_devel("write to %lx\n", rec->ip); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -623,7 +624,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; void *ip = (void *)rec->ip; unsigned long tramp, entry, ptr; @@ -671,7 +672,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -710,7 +711,7 @@ static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -758,7 +759,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* The new target may be within range */ if (test_24bit_addr(ip, addr)) { /* within range */ - if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -786,12 +787,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* Ensure branch is within 24 bits */ - if (create_branch(&op, (unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } - if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -804,7 +805,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -844,10 +845,10 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned int old, new; + struct ppc_inst old, new; int ret; - old = *(unsigned int *)&ftrace_call; + old = *(struct ppc_inst *)&ftrace_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); @@ -855,7 +856,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - old = *(unsigned int *)&ftrace_regs_call; + old = *(struct ppc_inst *)&ftrace_regs_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); } @@ -929,7 +930,7 @@ int ftrace_enable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, stub, 0); new = ftrace_call_replace(ip, addr, 0); @@ -942,7 +943,7 @@ int ftrace_disable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, addr, 0); new = ftrace_call_replace(ip, stub, 0); diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index a544590b90e5..3dd70eeb10c5 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -261,11 +261,12 @@ static unsigned int rfin(unsigned int x) int emulate_altivec(struct pt_regs *regs) { - unsigned int instr, i, word; + struct ppc_inst instr; + unsigned int i, word; unsigned int va, vb, vc, vd; vector128 *vrs; - if (get_user(instr, (unsigned int __user *) regs->nip)) + if (get_user(instr.val, (unsigned int __user *)regs->nip)) return -EFAULT; word = ppc_inst_val(instr); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index d298bb16936e..1dff9d9d6645 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -19,12 +19,12 @@ #include #include -static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, - unsigned int *patch_addr) +static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr, + struct ppc_inst *patch_addr) { int err = 0; - __put_user_asm(instr, patch_addr, err, "stw"); + __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); if (err) return err; @@ -34,7 +34,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, return 0; } -int raw_patch_instruction(unsigned int *addr, unsigned int instr) +int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { return __patch_instruction(addr, instr, addr); } @@ -137,10 +137,10 @@ static inline int unmap_patch_area(unsigned long addr) return 0; } -static int do_patch_instruction(unsigned int *addr, unsigned int instr) +static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { int err; - unsigned int *patch_addr = NULL; + struct ppc_inst *patch_addr = NULL; unsigned long flags; unsigned long text_poke_addr; unsigned long kaddr = (unsigned long)addr; @@ -161,8 +161,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr) goto out; } - patch_addr = (unsigned int *)(text_poke_addr) + - ((kaddr & ~PAGE_MASK) / sizeof(unsigned int)); + patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK)); __patch_instruction(addr, instr, patch_addr); @@ -177,14 +176,14 @@ out: } #else /* !CONFIG_STRICT_KERNEL_RWX */ -static int do_patch_instruction(unsigned int *addr, unsigned int instr) +static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { return raw_patch_instruction(addr, instr); } #endif /* CONFIG_STRICT_KERNEL_RWX */ -int patch_instruction(unsigned int *addr, unsigned int instr) +int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr) { /* Make sure we aren't patching a freed init section */ if (init_mem_is_free && init_section_contains(addr, 4)) { @@ -195,9 +194,9 @@ int patch_instruction(unsigned int *addr, unsigned int instr) } NOKPROBE_SYMBOL(patch_instruction); -int patch_branch(unsigned int *addr, unsigned long target, int flags) +int patch_branch(struct ppc_inst *addr, unsigned long target, int flags) { - unsigned int instr; + struct ppc_inst instr; create_branch(&instr, addr, target, flags); return patch_instruction(addr, instr); @@ -229,7 +228,7 @@ bool is_offset_in_branch_range(long offset) * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() */ -bool is_conditional_branch(unsigned int instr) +bool is_conditional_branch(struct ppc_inst instr) { unsigned int opcode = ppc_inst_primary_opcode(instr); @@ -247,13 +246,13 @@ bool is_conditional_branch(unsigned int instr) } NOKPROBE_SYMBOL(is_conditional_branch); -int create_branch(unsigned int *instr, - const unsigned int *addr, +int create_branch(struct ppc_inst *instr, + const struct ppc_inst *addr, unsigned long target, int flags) { long offset; - *instr = 0; + *instr = ppc_inst(0); offset = target; if (! (flags & BRANCH_ABSOLUTE)) offset = offset - (unsigned long)addr; @@ -263,12 +262,12 @@ int create_branch(unsigned int *instr, return 1; /* Mask out the flags and target, so they don't step on each other. */ - *instr = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); + *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC)); return 0; } -int create_cond_branch(unsigned int *instr, const unsigned int *addr, +int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags) { long offset; @@ -282,27 +281,27 @@ int create_cond_branch(unsigned int *instr, const unsigned int *addr, return 1; /* Mask out the flags and target, so they don't step on each other. */ - *instr = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); + *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); return 0; } -static unsigned int branch_opcode(unsigned int instr) +static unsigned int branch_opcode(struct ppc_inst instr) { return ppc_inst_primary_opcode(instr) & 0x3F; } -static int instr_is_branch_iform(unsigned int instr) +static int instr_is_branch_iform(struct ppc_inst instr) { return branch_opcode(instr) == 18; } -static int instr_is_branch_bform(unsigned int instr) +static int instr_is_branch_bform(struct ppc_inst instr) { return branch_opcode(instr) == 16; } -int instr_is_relative_branch(unsigned int instr) +int instr_is_relative_branch(struct ppc_inst instr) { if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) return 0; @@ -310,12 +309,12 @@ int instr_is_relative_branch(unsigned int instr) return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); } -int instr_is_relative_link_branch(unsigned int instr) +int instr_is_relative_link_branch(struct ppc_inst instr) { return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); } -static unsigned long branch_iform_target(const unsigned int *instr) +static unsigned long branch_iform_target(const struct ppc_inst *instr) { signed long imm; @@ -331,7 +330,7 @@ static unsigned long branch_iform_target(const unsigned int *instr) return (unsigned long)imm; } -static unsigned long branch_bform_target(const unsigned int *instr) +static unsigned long branch_bform_target(const struct ppc_inst *instr) { signed long imm; @@ -347,7 +346,7 @@ static unsigned long branch_bform_target(const unsigned int *instr) return (unsigned long)imm; } -unsigned long branch_target(const unsigned int *instr) +unsigned long branch_target(const struct ppc_inst *instr) { if (instr_is_branch_iform(*instr)) return branch_iform_target(instr); @@ -357,7 +356,7 @@ unsigned long branch_target(const unsigned int *instr) return 0; } -int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) +int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr) { if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) return branch_target(instr) == addr; @@ -365,8 +364,8 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) return 0; } -int translate_branch(unsigned int *instr, const unsigned int *dest, - const unsigned int *src) +int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest, + const struct ppc_inst *src) { unsigned long target; @@ -392,7 +391,7 @@ void __patch_exception(int exc, unsigned long addr) * instruction of the exception, not the first one */ - patch_branch(ibase + (exc / 4) + 1, addr, 0); + patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0); } #endif @@ -409,7 +408,7 @@ static void __init test_trampoline(void) static void __init test_branch_iform(void) { int err; - unsigned int instr; + struct ppc_inst instr; unsigned long addr; addr = (unsigned long)&instr; @@ -484,12 +483,12 @@ static void __init test_branch_iform(void) static void __init test_create_function_call(void) { - unsigned int *iptr; + struct ppc_inst *iptr; unsigned long dest; - unsigned int instr; + struct ppc_inst instr; /* Check we can create a function call */ - iptr = (unsigned int *)ppc_function_entry(test_trampoline); + iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline); dest = ppc_function_entry(test_create_function_call); create_branch(&instr, iptr, dest, BRANCH_SET_LINK); patch_instruction(iptr, instr); @@ -500,7 +499,8 @@ static void __init test_branch_bform(void) { int err; unsigned long addr; - unsigned int *iptr, instr, flags; + struct ppc_inst *iptr, instr; + unsigned int flags; iptr = &instr; addr = (unsigned long)iptr; @@ -570,8 +570,8 @@ static void __init test_branch_bform(void) static void __init test_translate_branch(void) { unsigned long addr; - unsigned int *p, *q; - unsigned int instr; + struct ppc_inst *p, *q; + struct ppc_inst instr; void *buf; buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 6e7479b8887a..fd978b8ee6d6 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -32,26 +32,26 @@ struct fixup_entry { long alt_end_off; }; -static unsigned int *calc_addr(struct fixup_entry *fcur, long offset) +static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset) { /* * We store the offset to the code as a negative offset from * the start of the alt_entry, to support the VDSO. This * routine converts that back into an actual address. */ - return (unsigned int *)((unsigned long)fcur + offset); + return (struct ppc_inst *)((unsigned long)fcur + offset); } -static int patch_alt_instruction(unsigned int *src, unsigned int *dest, - unsigned int *alt_start, unsigned int *alt_end) +static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest, + struct ppc_inst *alt_start, struct ppc_inst *alt_end) { int err; - unsigned int instr; + struct ppc_inst instr; instr = *src; if (instr_is_relative_branch(*src)) { - unsigned int *target = (unsigned int *)branch_target(src); + struct ppc_inst *target = (struct ppc_inst *)branch_target(src); /* Branch within the section doesn't need translating */ if (target < alt_start || target > alt_end) { @@ -68,7 +68,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) { - unsigned int *start, *end, *alt_start, *alt_end, *src, *dest; + struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest; start = calc_addr(fcur, fcur->start_off); end = calc_addr(fcur, fcur->end_off); @@ -147,15 +147,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); if (types & STF_BARRIER_FALLBACK) - patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, + patch_branch((struct ppc_inst *)(dest + 1), + (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); else - patch_instruction(dest + 1, ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 1), + ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); } printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, @@ -208,12 +210,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); - patch_instruction(dest + 3, ppc_inst(instrs[3])); - patch_instruction(dest + 4, ppc_inst(instrs[4])); - patch_instruction(dest + 5, ppc_inst(instrs[5])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3])); + patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4])); + patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5])); } printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, (types == STF_BARRIER_NONE) ? "no" : @@ -261,9 +263,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instrs[0])); - patch_instruction(dest + 1, ppc_inst(instrs[1])); - patch_instruction(dest + 2, ppc_inst(instrs[2])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); } printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, @@ -296,7 +298,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr)); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instr)); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -339,8 +341,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ dest = (void *)start + *start; pr_devel("patching dest %lx\n", (unsigned long)dest); - patch_instruction(dest, ppc_inst(instr[0])); - patch_instruction(dest + 1, ppc_inst(instr[1])); + patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0])); + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1])); } printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); @@ -354,7 +356,7 @@ static void patch_btb_flush_section(long *curr) end = (void *)curr + *(curr + 1); for (; start < end; start++) { pr_devel("patching dest %lx\n", (unsigned long)start); - patch_instruction(start, ppc_inst(PPC_INST_NOP)); + patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP)); } } @@ -373,7 +375,7 @@ void do_btb_flush_fixups(void) void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; - unsigned int *dest; + struct ppc_inst *dest; if (!(value & CPU_FTR_LWSYNC)) return ; @@ -390,18 +392,18 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) static void do_final_fixups(void) { #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) - int *src, *dest; + struct ppc_inst *src, *dest; unsigned long length; if (PHYSICAL_START == 0) return; - src = (int *)(KERNELBASE + PHYSICAL_START); - dest = (int *)KERNELBASE; - length = (__end_interrupts - _stext) / sizeof(int); + src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START); + dest = (struct ppc_inst *)KERNELBASE; + length = (__end_interrupts - _stext) / sizeof(struct ppc_inst); while (length--) { - raw_patch_instruction(dest, ppc_inst(*src)); + raw_patch_instruction(dest, *src); src++; dest++; } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 7f7be154da7e..95a56bb1ba3f 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1163,7 +1163,7 @@ static nokprobe_inline int trap_compare(long v1, long v2) * otherwise. */ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { unsigned int opcode, ra, rb, rc, rd, spr, u; unsigned long int imm; @@ -3103,7 +3103,7 @@ NOKPROBE_SYMBOL(emulate_loadstore); * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ -int emulate_step(struct pt_regs *regs, unsigned int instr) +int emulate_step(struct pt_regs *regs, struct ppc_inst instr) { struct instruction_op op; int r, err, type; diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index b928b21feac1..46af80279ebc 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -462,7 +462,7 @@ struct compute_test { struct { char *descr; unsigned long flags; - unsigned int instr; + struct ppc_inst instr; struct pt_regs regs; } subtests[MAX_SUBTESTS + 1]; }; @@ -843,7 +843,7 @@ static struct compute_test compute_tests[] = { }; static int __init emulate_compute_instr(struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { struct instruction_op op; @@ -861,7 +861,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs, } static int __init execute_compute_instr(struct pt_regs *regs, - unsigned int instr) + struct ppc_inst instr) { extern int exec_instr(struct pt_regs *regs); extern s32 patch__exec_instr; @@ -892,7 +892,8 @@ static void __init run_tests_compute(void) unsigned long flags; struct compute_test *test; struct pt_regs *regs, exp, got; - unsigned int i, j, k, instr; + unsigned int i, j, k; + struct ppc_inst instr; bool ignore_gpr, ignore_xer, ignore_ccr, passed; for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2c23c3076b1e..4f0ef68a7d31 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -47,7 +47,7 @@ * Check whether the instruction inst is a store using * an update addressing form which will update r1. */ -static bool store_updates_sp(unsigned int inst) +static bool store_updates_sp(struct ppc_inst inst) { /* check for 1 in the rA field */ if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1) @@ -305,7 +305,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && access_ok(nip, sizeof(*nip))) { - unsigned int inst; + struct ppc_inst inst; if (!probe_user_read(&inst, nip, sizeof(inst))) return !store_updates_sp(inst); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 3dcfecf858f3..13b9dd5e4a76 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -421,14 +421,14 @@ static __u64 power_pmu_bhrb_to(u64 addr) if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) return 0; - return branch_target(&instr); + return branch_target((struct ppc_inst *)&instr); } /* Userspace: need copy instruction here then translate it */ if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr))) return 0; - target = branch_target(&instr); + target = branch_target((struct ppc_inst *)&instr); if ((!target) || (instr & BRANCH_ABSOLUTE)) return target; diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 31540ebf1e29..dba3aa73c062 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -73,7 +73,7 @@ smp_86xx_kick_cpu(int nr) /* Setup fake reset vector to call __secondary_start_mpc86xx. */ target = (unsigned long) __secondary_start_mpc86xx; - patch_branch(vector, target, BRANCH_SET_LINK); + patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK); /* Kick that CPU */ smp_86xx_release_core(nr); @@ -83,7 +83,7 @@ smp_86xx_kick_cpu(int nr) mdelay(1); /* Restore the exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector)); local_irq_restore(flags); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 44a00990af9d..9969c07035b6 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -814,7 +814,7 @@ static int smp_core99_kick_cpu(int nr) * b __secondary_start_pmac_0 + nr*8 */ target = (unsigned long) __secondary_start_pmac_0 + nr * 8; - patch_branch(vector, target, BRANCH_SET_LINK); + patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK); /* Put some life in our friend */ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); @@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector)); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4cf998518047..2e3b15813cf1 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -100,7 +100,7 @@ static long *xmon_fault_jmp[NR_CPUS]; /* Breakpoint stuff */ struct bpt { unsigned long address; - unsigned int *instr; + struct ppc_inst *instr; atomic_t ref_count; int enabled; unsigned long pad; @@ -876,8 +876,8 @@ static struct bpt *new_breakpoint(unsigned long a) for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { bp->address = a; - bp->instr = bpt_table + ((bp - bpts) * BPT_WORDS); - patch_instruction(bp->instr + 1, bpinstr); + bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS)); + patch_instruction(bp->instr + 1, ppc_inst(bpinstr)); return bp; } } @@ -889,7 +889,7 @@ static struct bpt *new_breakpoint(unsigned long a) static void insert_bpts(void) { int i; - unsigned int instr; + struct ppc_inst instr; struct bpt *bp; bp = bpts; @@ -911,8 +911,8 @@ static void insert_bpts(void) patch_instruction(bp->instr, instr); if (bp->enabled & BP_CIABR) continue; - if (patch_instruction((unsigned int *)bp->address, - bpinstr) != 0) { + if (patch_instruction((struct ppc_inst *)bp->address, + ppc_inst(bpinstr)) != 0) { printf("Couldn't write instruction at %lx, " "disabling breakpoint there\n", bp->address); bp->enabled &= ~BP_TRAP; @@ -940,7 +940,7 @@ static void remove_bpts(void) { int i; struct bpt *bp; - unsigned instr; + struct ppc_inst instr; bp = bpts; for (i = 0; i < NBPTS; ++i, ++bp) { @@ -949,7 +949,7 @@ static void remove_bpts(void) if (mread(bp->address, &instr, 4) == 4 && ppc_inst_equal(instr, ppc_inst(bpinstr)) && patch_instruction( - (unsigned int *)bp->address, bp->instr[0]) != 0) + (struct ppc_inst *)bp->address, bp->instr[0]) != 0) printf("Couldn't remove breakpoint at %lx\n", bp->address); } @@ -1156,7 +1156,7 @@ static int do_step(struct pt_regs *regs) */ static int do_step(struct pt_regs *regs) { - unsigned int instr; + struct ppc_inst instr; int stepped; force_enable_xmon(); @@ -1322,7 +1322,7 @@ csum(void) */ static long check_bp_loc(unsigned long addr) { - unsigned int instr; + struct ppc_inst instr; addr &= ~3; if (!is_kernel_addr(addr)) { @@ -2848,7 +2848,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr, { int nr, dotted; unsigned long first_adr; - unsigned int inst, last_inst = ppc_inst(0); + struct ppc_inst inst, last_inst = ppc_inst(0); unsigned char val[4]; dotted = 0; diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h index b7e94375db86..57e6fb03de48 100644 --- a/arch/powerpc/xmon/xmon_bpts.h +++ b/arch/powerpc/xmon/xmon_bpts.h @@ -4,11 +4,11 @@ #define NBPTS 256 #ifndef __ASSEMBLY__ -#define BPT_SIZE (sizeof(unsigned int) * 2) -#define BPT_WORDS (BPT_SIZE / sizeof(unsigned int)) +#include +#define BPT_SIZE (sizeof(struct ppc_inst) * 2) +#define BPT_WORDS (BPT_SIZE / sizeof(struct ppc_inst)) extern unsigned int bpt_table[NBPTS * BPT_WORDS]; - #endif /* __ASSEMBLY__ */ #endif /* XMON_BPTS_H */ -- cgit 1.4.1 From 1251288e64ba44969e1c4d59e5ee88a6e873447b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 19 May 2020 05:49:17 +0000 Subject: powerpc/8xx: Remove now unused TLB miss functions The code to setup linear and IMMR mapping via huge TLB entries is not called anymore. Remove it. Also remove the handling of removed code exits in the perf driver. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/75750d25849cb8e73ca519866bb892d7eb9649c0.1589866984.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 8 +-- arch/powerpc/kernel/head_8xx.S | 83 ---------------------------- arch/powerpc/perf/8xx-pmu.c | 10 ---- 3 files changed, 1 insertion(+), 100 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 4d3ef3841b00..e82368838416 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -240,13 +240,7 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) } /* patch sites */ -extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8; -extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; -extern s32 patch__fixupdar_linmem_top; -extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8; - -extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; -extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; +extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index d1546f379757..fb5d17187772 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -278,33 +278,6 @@ InstructionTLBMiss: rfi #endif -#ifndef CONFIG_PIN_TLB_TEXT -ITLBMissLinear: - mtcr r11 -#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23 - patch_site 0f, patch__itlbmiss_linmem_top8 - - mfspr r10, SPRN_SRR0 -0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha - rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K - ori r11, r11, MI_PS512K | MI_SVALID - rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ -#else - /* Set 8M byte page and mark it valid */ - li r11, MI_PS8MEG | MI_SVALID - rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ -#endif - mtspr SPRN_MI_TWC, r11 - ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ - -0: mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - rfi - patch_site 0b, patch__itlbmiss_exit_2 -#endif - . = 0x1200 DataStoreTLBMiss: mtspr SPRN_DAR, r10 @@ -371,62 +344,6 @@ DataStoreTLBMiss: rfi patch_site 0b, patch__dtlbmiss_exit_1 -DTLBMissIMMR: - mtcr r11 - /* Set 512k byte guarded page and mark it valid */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID - mtspr SPRN_MD_TWC, r10 - mfspr r10, SPRN_IMMR /* Get current IMMR */ - rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT | _PAGE_NO_CACHE - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - -0: mfspr r10, SPRN_DAR - mtspr SPRN_DAR, r11 /* Tag DAR */ - mfspr r11, SPRN_M_TW - rfi - patch_site 0b, patch__dtlbmiss_exit_2 - -DTLBMissLinear: - mtcr r11 - rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ -#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_DATA_SHIFT < 23 - patch_site 0f, patch__dtlbmiss_romem_top8 - -0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha - rlwinm r11, r11, 0, 0xff800000 - neg r10, r11 - or r11, r11, r10 - rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K - ori r11, r11, MI_PS512K | MI_SVALID - mfspr r10, SPRN_MD_EPN - rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ -#else - /* Set 8M byte page and mark it valid */ - li r11, MD_PS8MEG | MD_SVALID -#endif - mtspr SPRN_MD_TWC, r11 -#ifdef CONFIG_STRICT_KERNEL_RWX - patch_site 0f, patch__dtlbmiss_romem_top - -0: subis r11, r10, 0 - rlwimi r10, r11, 11, _PAGE_RO -#endif - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ - _PAGE_PRESENT - mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - - li r11, RPN_PATTERN - -0: mfspr r10, SPRN_DAR - mtspr SPRN_DAR, r11 /* Tag DAR */ - mfspr r11, SPRN_M_TW - rfi - patch_site 0b, patch__dtlbmiss_exit_3 - /* This is an instruction TLB error on the MPC8xx. This could be due * to many reasons, such as executing guarded memory or illegal instruction * addresses. There is nothing to do but handle a big time error fault. diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index acc27fc63eb7..e53c3c161257 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -100,9 +100,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) unsigned long target = patch_site_addr(&patch__itlbmiss_perf); patch_branch_site(&patch__itlbmiss_exit_1, target, 0); -#ifndef CONFIG_PIN_TLB_TEXT - patch_branch_site(&patch__itlbmiss_exit_2, target, 0); -#endif } val = itlb_miss_counter; break; @@ -111,8 +108,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) unsigned long target = patch_site_addr(&patch__dtlbmiss_perf); patch_branch_site(&patch__dtlbmiss_exit_1, target, 0); - patch_branch_site(&patch__dtlbmiss_exit_2, target, 0); - patch_branch_site(&patch__dtlbmiss_exit_3, target, 0); } val = dtlb_miss_counter; break; @@ -175,9 +170,6 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) __PPC_SPR(SPRN_SPRG_SCRATCH0)); patch_instruction_site(&patch__itlbmiss_exit_1, insn); -#ifndef CONFIG_PIN_TLB_TEXT - patch_instruction_site(&patch__itlbmiss_exit_2, insn); -#endif } break; case PERF_8xx_ID_DTLB_LOAD_MISS: @@ -187,8 +179,6 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) __PPC_SPR(SPRN_DAR)); patch_instruction_site(&patch__dtlbmiss_exit_1, insn); - patch_instruction_site(&patch__dtlbmiss_exit_2, insn); - patch_instruction_site(&patch__dtlbmiss_exit_3, insn); } break; } -- cgit 1.4.1 From b4ac18eead28611ff470d0f47a35c4e0ac080d9c Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:03 +0530 Subject: powerpc/perf/hv-24x7: Fix inconsistent output values incase multiple hv-24x7 events run Commit 2b206ee6b0df ("powerpc/perf/hv-24x7: Display change in counter values")' added to print _change_ in the counter value rather then raw value for 24x7 counters. Incase of transactions, the event count is set to 0 at the beginning of the transaction. It also sets the event's prev_count to the raw value at the time of initialization. Because of setting event count to 0, we are seeing some weird behaviour, whenever we run multiple 24x7 events at a time. For example: command#: ./perf stat -e "{hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/, hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/}" -C 0 -I 1000 sleep 100 1.000121704 120 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 1.000121704 5 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 2.000357733 8 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 2.000357733 10 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 3.000495215 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 3.000495215 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.000641884 56 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 4.000641884 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 5.000791887 18,446,744,073,709,551,616 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ Getting these large values in case we do -I. As we are setting event_count to 0, for interval case, overall event_count is not coming in incremental order. As we may can get new delta lesser then previous count. Because of which when we print intervals, we are getting negative value which create these large values. This patch removes part where we set event_count to 0 in function 'h_24x7_event_read'. There won't be much impact as we do set event->hw.prev_count to the raw value at the time of initialization to print change value. With this patch In power9 platform command#: ./perf stat -e "{hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/, hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/}" -C 0 -I 1000 sleep 100 1.000117685 93 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 1.000117685 1 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 2.000349331 98 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 2.000349331 2 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 3.000495900 131 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 3.000495900 4 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.000645920 204 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ 4.000645920 61 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=1/ 4.284169997 22 hv_24x7/PM_MCS01_128B_RD_DISP_PORT01,chip=0/ Suggested-by: Sukadev Bhattiprolu Signed-off-by: Kajol Jain Tested-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-2-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 573e0b309c0c..48e8f4b17b91 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1400,16 +1400,6 @@ static void h_24x7_event_read(struct perf_event *event) h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); - /* - * Clear the event count so we can compute the _change_ - * in the 24x7 raw counter value at the end of the txn. - * - * Note that we could alternatively read the 24x7 value - * now and save its value in event->hw.prev_count. But - * that would require issuing a hcall, which would then - * defeat the purpose of using the txn interface. - */ - local64_set(&event->count, 0); } put_cpu_var(hv_24x7_reqb); -- cgit 1.4.1 From 8ba21426738207711347335b2cf3e99c690fc777 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:04 +0530 Subject: powerpc/hv-24x7: Add rtas call in hv-24x7 driver to get processor details For hv_24x7 socket/chip level events, specific chip-id to which the data requested should be added as part of pmu events. But number of chips/socket in the system details are not exposed. Patch implements read_24x7_sys_info() to get system parameter values like number of sockets, cores per chip and chips per socket. Rtas_call with token "PROCESSOR_MODULE_INFO" is used to get these values. Subsequent patch exports these values via sysfs. Patch also make these parameters default to 1. Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-3-kjain@linux.ibm.com --- arch/powerpc/include/asm/rtas.h | 6 ++++ arch/powerpc/perf/hv-24x7.c | 62 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3c1887351c71..7ef81b3e33f6 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -483,5 +483,11 @@ static inline void rtas_initialize(void) { }; extern int call_rtas(const char *, int, int, unsigned long *, ...); +#ifdef CONFIG_HV_PERF_CTRS +void read_24x7_sys_info(void); +#else +static inline void read_24x7_sys_info(void) { } +#endif + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 48e8f4b17b91..fc16d979c191 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -20,6 +20,7 @@ #include #include +#include #include "hv-24x7.h" #include "hv-24x7-catalog.h" #include "hv-common.h" @@ -57,6 +58,65 @@ static bool is_physical_domain(unsigned domain) } } +/* + * The Processor Module Information system parameter allows transferring + * of certain processor module information from the platform to the OS. + * Refer PAPR+ document to get parameter token value as '43'. + */ + +#define PROCESSOR_MODULE_INFO 43 + +static u32 phys_sockets; /* Physical sockets */ +static u32 phys_chipspersocket; /* Physical chips per socket*/ +static u32 phys_coresperchip; /* Physical cores per chip */ + +/* + * read_24x7_sys_info() + * Retrieve the number of sockets and chips per socket and cores per + * chip details through the get-system-parameter rtas call. + */ +void read_24x7_sys_info(void) +{ + int call_status, len, ntypes; + + spin_lock(&rtas_data_buf_lock); + + /* + * Making system parameter: chips and sockets and cores per chip + * default to 1. + */ + phys_sockets = 1; + phys_chipspersocket = 1; + phys_coresperchip = 1; + + call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, + NULL, + PROCESSOR_MODULE_INFO, + __pa(rtas_data_buf), + RTAS_DATA_BUF_SIZE); + + if (call_status != 0) { + pr_err("Error calling get-system-parameter %d\n", + call_status); + } else { + len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); + if (len < 8) + goto out; + + ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); + + if (!ntypes) + goto out; + + phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); + phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); + phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); + } + +out: + spin_unlock(&rtas_data_buf_lock); +} + /* Domains for which more than one result element are returned for each event. */ static bool domain_needs_aggregation(unsigned int domain) { @@ -1605,6 +1665,8 @@ static int hv_24x7_init(void) if (r) return r; + read_24x7_sys_info(); + return 0; } -- cgit 1.4.1 From 60beb65da1efd4cc23d05141181c39b98487950f Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Mon, 25 May 2020 16:13:05 +0530 Subject: powerpc/hv-24x7: Add sysfs files inside hv-24x7 device to show processor details To expose the system dependent parameter like total number of sockets and numbers of chips per socket, patch adds two sysfs files. "sockets" and "chips" are added to /sys/devices/hv_24x7/interface/ of the "hv_24x7" pmu. Signed-off-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200525104308.9814-4-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index fc16d979c191..db213eb7cb02 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -446,6 +446,24 @@ static ssize_t device_show_string(struct device *dev, return sprintf(buf, "%s\n", (char *)d->var); } +static ssize_t sockets_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_sockets); +} + +static ssize_t chipspersocket_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_chipspersocket); +} + +static ssize_t coresperchip_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", phys_coresperchip); +} + static struct attribute *device_str_attr_create_(char *name, char *str) { struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL); @@ -1092,6 +1110,9 @@ PAGE_0_ATTR(catalog_len, "%lld\n", (unsigned long long)be32_to_cpu(page_0->length) * 4096); static BIN_ATTR_RO(catalog, 0/* real length varies */); static DEVICE_ATTR_RO(domains); +static DEVICE_ATTR_RO(sockets); +static DEVICE_ATTR_RO(chipspersocket); +static DEVICE_ATTR_RO(coresperchip); static struct bin_attribute *if_bin_attrs[] = { &bin_attr_catalog, @@ -1102,6 +1123,9 @@ static struct attribute *if_attrs[] = { &dev_attr_catalog_len.attr, &dev_attr_catalog_version.attr, &dev_attr_domains.attr, + &dev_attr_sockets.attr, + &dev_attr_chipspersocket.attr, + &dev_attr_coresperchip.attr, NULL, }; -- cgit 1.4.1