summary refs log tree commit diff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/debug.S19
-rw-r--r--arch/arm/kernel/etm.c4
-rw-r--r--arch/arm/kernel/kprobes-decode.c10
-rw-r--r--arch/arm/kernel/perf_event.c33
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c26
-rw-r--r--arch/arm/kernel/perf_event_xscale.c4
-rw-r--r--arch/arm/kernel/sleep.S14
8 files changed, 67 insertions, 45 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index d2d983be096d..bcd66e00bdbe 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -25,7 +25,7 @@
 		.macro	addruart, rp, rv
 		.endm
 
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
 
 		.macro	senduart, rd, rx
 		mcr	p14, 0, \rd, c0, c5, 0
@@ -49,23 +49,6 @@
 1002:
 		.endm
 
-#elif defined(CONFIG_CPU_V7)
-
-		.macro	senduart, rd, rx
-		mcr	p14, 0, \rd, c0, c5, 0
-		.endm
-
-		.macro	busyuart, rd, rx
-busy:		mrc	p14, 0, pc, c0, c1, 0
-		bcs	busy
-		.endm
-
-		.macro	waituart, rd, rx
-wait:		mrc	p14, 0, pc, c0, c1, 0
-		bcs	wait
-
-		.endm
-
 #elif defined(CONFIG_CPU_XSCALE)
 
 		.macro	senduart, rd, rx
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 052b509e2d5f..1bec8b5f22f0 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = {
 	.fops = &etb_fops,
 };
 
-static int __init etb_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct tracectx *t = &tracer;
 	int ret = 0;
@@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj,
 static struct kobj_attribute trace_mode_attr =
 	__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
-static int __init etm_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct tracectx *t = &tracer;
 	int ret = 0;
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 8f6ed43861f1..23891317dc4b 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -594,7 +594,8 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
 	long cpsr = regs->ARM_cpsr;
 
 	fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
-	regs->uregs[rn] = fnr.r0;  /* Save Rn in case of writeback. */
+	if (rn != 15)
+		regs->uregs[rn] = fnr.r0;  /* Save Rn in case of writeback. */
 	rdv = fnr.r1;
 
 	if (rd == 15) {
@@ -622,10 +623,11 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
 	long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
 	long rnv = (rn == 15) ? iaddr +  8 : regs->uregs[rn];
 	long rmv = regs->uregs[rm];  /* rm/rmv may be invalid, don't care. */
+	long rnv_wb;
 
-	/* Save Rn in case of writeback. */
-	regs->uregs[rn] =
-		insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+	rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+	if (rn != 15)
+		regs->uregs[rn] = rnv_wb;  /* Save Rn in case of writeback. */
 }
 
 static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 22e194eb8536..69cfee0fe00f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -79,6 +79,7 @@ struct arm_pmu {
 	void		(*write_counter)(int idx, u32 val);
 	void		(*start)(void);
 	void		(*stop)(void);
+	void		(*reset)(void *);
 	const unsigned	(*cache_map)[PERF_COUNT_HW_CACHE_MAX]
 				    [PERF_COUNT_HW_CACHE_OP_MAX]
 				    [PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
 static u64
 armpmu_event_update(struct perf_event *event,
 		    struct hw_perf_event *hwc,
-		    int idx)
+		    int idx, int overflow)
 {
-	int shift = 64 - 32;
-	s64 prev_raw_count, new_raw_count;
-	u64 delta;
+	u64 delta, prev_raw_count, new_raw_count;
 
 again:
 	prev_raw_count = local64_read(&hwc->prev_count);
@@ -218,8 +217,13 @@ again:
 			     new_raw_count) != prev_raw_count)
 		goto again;
 
-	delta = (new_raw_count << shift) - (prev_raw_count << shift);
-	delta >>= shift;
+	new_raw_count &= armpmu->max_period;
+	prev_raw_count &= armpmu->max_period;
+
+	if (overflow)
+		delta = armpmu->max_period - prev_raw_count + new_raw_count;
+	else
+		delta = new_raw_count - prev_raw_count;
 
 	local64_add(delta, &event->count);
 	local64_sub(delta, &hwc->period_left);
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event)
 	if (hwc->idx < 0)
 		return;
 
-	armpmu_event_update(event, hwc, hwc->idx);
+	armpmu_event_update(event, hwc, hwc->idx, 0);
 }
 
 static void
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
 	if (!(hwc->state & PERF_HES_STOPPED)) {
 		armpmu->disable(hwc, hwc->idx);
 		barrier(); /* why? */
-		armpmu_event_update(event, hwc, hwc->idx);
+		armpmu_event_update(event, hwc, hwc->idx, 0);
 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 	}
 }
@@ -624,6 +628,19 @@ static struct pmu pmu = {
 #include "perf_event_v6.c"
 #include "perf_event_v7.c"
 
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+armpmu_reset(void)
+{
+	if (armpmu && armpmu->reset)
+		return on_each_cpu(armpmu->reset, NULL, 1);
+	return 0;
+}
+arch_initcall(armpmu_reset);
+
 static int __init
 init_hw_perf_events(void)
 {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 6fc2d228db55..f1e8dd94afe8 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num,
 			continue;
 
 		hwc = &event->hw;
-		armpmu_event_update(event, hwc, idx);
+		armpmu_event_update(event, hwc, idx, 1);
 		data.period = event->hw.last_period;
 		if (!armpmu_event_set_period(event, hwc, idx))
 			continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2e1402556fa0..4960686afb58 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -466,6 +466,7 @@ static inline unsigned long armv7_pmnc_read(void)
 static inline void armv7_pmnc_write(unsigned long val)
 {
 	val &= ARMV7_PMNC_MASK;
+	isb();
 	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 }
 
@@ -502,6 +503,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
 
 	val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
 	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+	isb();
 
 	return idx;
 }
@@ -780,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
 			continue;
 
 		hwc = &event->hw;
-		armpmu_event_update(event, hwc, idx);
+		armpmu_event_update(event, hwc, idx, 1);
 		data.period = event->hw.last_period;
 		if (!armpmu_event_set_period(event, hwc, idx))
 			continue;
@@ -847,6 +849,18 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
 	}
 }
 
+static void armv7pmu_reset(void *info)
+{
+	u32 idx, nb_cnt = armpmu->num_events;
+
+	/* The counter and interrupt enable registers are unknown at reset. */
+	for (idx = 1; idx < nb_cnt; ++idx)
+		armv7pmu_disable_event(NULL, idx);
+
+	/* Initialize & Reset PMNC: C and P bits */
+	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
 static struct arm_pmu armv7pmu = {
 	.handle_irq		= armv7pmu_handle_irq,
 	.enable			= armv7pmu_enable_event,
@@ -856,17 +870,15 @@ static struct arm_pmu armv7pmu = {
 	.get_event_idx		= armv7pmu_get_event_idx,
 	.start			= armv7pmu_start,
 	.stop			= armv7pmu_stop,
+	.reset			= armv7pmu_reset,
 	.raw_event_mask		= 0xFF,
 	.max_period		= (1LLU << 32) - 1,
 };
 
-static u32 __init armv7_reset_read_pmnc(void)
+static u32 __init armv7_read_num_pmnc_events(void)
 {
 	u32 nb_cnt;
 
-	/* Initialize & Reset PMNC: C and P bits */
-	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
-
 	/* Read the nb of CNTx counters supported from PMNC */
 	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
 
@@ -880,7 +892,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void)
 	armv7pmu.name		= "ARMv7 Cortex-A8";
 	armv7pmu.cache_map	= &armv7_a8_perf_cache_map;
 	armv7pmu.event_map	= &armv7_a8_perf_map;
-	armv7pmu.num_events	= armv7_reset_read_pmnc();
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
 	return &armv7pmu;
 }
 
@@ -890,7 +902,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
 	armv7pmu.name		= "ARMv7 Cortex-A9";
 	armv7pmu.cache_map	= &armv7_a9_perf_cache_map;
 	armv7pmu.event_map	= &armv7_a9_perf_map;
-	armv7pmu.num_events	= armv7_reset_read_pmnc();
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
 	return &armv7pmu;
 }
 #else
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 28cd3b025bc3..39affbe4fdb2 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
 			continue;
 
 		hwc = &event->hw;
-		armpmu_event_update(event, hwc, idx);
+		armpmu_event_update(event, hwc, idx, 1);
 		data.period = event->hw.last_period;
 		if (!armpmu_event_set_period(event, hwc, idx))
 			continue;
@@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
 			continue;
 
 		hwc = &event->hw;
-		armpmu_event_update(event, hwc, idx);
+		armpmu_event_update(event, hwc, idx, 1);
 		data.period = event->hw.last_period;
 		if (!armpmu_event_set_period(event, hwc, idx))
 			continue;
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index bfad698a02e7..6398ead9d1c0 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -119,11 +119,19 @@ ENTRY(cpu_resume)
 #else
 	ldr	r0, sleep_save_sp	@ stack phys addr
 #endif
-	msr	cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
+	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
 #ifdef MULTI_CPU
-	ldmia	r0!, {r1, sp, lr, pc}	@ load v:p, stack, return fn, resume fn
+	@ load v:p, stack, return fn, resume fn
+  ARM(	ldmia	r0!, {r1, sp, lr, pc}	)
+THUMB(	ldmia	r0!, {r1, r2, r3, r4}	)
+THUMB(	mov	sp, r2			)
+THUMB(	mov	lr, r3			)
+THUMB(	bx	r4			)
 #else
-	ldmia	r0!, {r1, sp, lr}	@ load v:p, stack, return fn
+	@ load v:p, stack, return fn
+  ARM(	ldmia	r0!, {r1, sp, lr}	)
+THUMB(	ldmia	r0!, {r1, r2, lr}	)
+THUMB(	mov	sp, r2			)
 	b	cpu_do_resume
 #endif
 ENDPROC(cpu_resume)