From 36cd3c9f925b9307236505ae7ad1ad7ac4d4357c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Apr 2009 18:48:34 +0200 Subject: mutex: have non-spinning mutexes on s390 by default Impact: performance regression fix for s390 The adaptive spinning mutexes will not always do what one would expect on virtualized architectures like s390. Especially the cpu_relax() loop in mutex_spin_on_owner might hurt if the mutex holding cpu has been scheduled away by the hypervisor. We would end up in a cpu_relax() loop when there is no chance that the state of the mutex changes until the target cpu has been scheduled again by the hypervisor. For that reason we should change the default behaviour to no-spin on s390. We do have an instruction which allows to yield the current cpu in favour of a different target cpu. Also we have an instruction which allows us to figure out if the target cpu is physically backed. However we need to do some performance tests until we can come up with a solution that will do the right thing on s390. Signed-off-by: Heiko Carstens Acked-by: Peter Zijlstra Cc: Martin Schwidefsky Cc: Christian Borntraeger LKML-Reference: <20090409184834.7a0df7b2@osiris.boeblingen.de.ibm.com> Signed-off-by: Ingo Molnar --- arch/s390/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dcb667c4375a..2eca5fe0e75b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,7 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES select HAVE_KRETPROBES -- cgit 1.4.1 From 25097bf153391f7be4c591d47061b3dc4990dac2 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Tue, 14 Apr 2009 15:36:16 +0200 Subject: [S390] s390: move machine flags to lowcore Currently the storage of the machine flags is a globally exported unsigned long long variable. By moving the storage location into the lowcore struct we allow assembler code to check machine_flags directly even without needing a register. Addtionally the lowcore and therefore the machine flags too will be in cache most of the time. Signed-off-by: Christian Ehrhardt Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpuid.h | 25 +++++++++++++++++++++++++ arch/s390/include/asm/kvm_host.h | 1 + arch/s390/include/asm/lowcore.h | 12 ++++++++---- arch/s390/include/asm/processor.h | 17 +++-------------- arch/s390/include/asm/ptrace.h | 2 -- arch/s390/include/asm/setup.h | 24 ++++++++++++------------ arch/s390/include/asm/thread_info.h | 3 ++- arch/s390/kernel/early.c | 3 +++ arch/s390/kernel/setup.c | 4 +--- arch/s390/kernel/smp.c | 1 + 10 files changed, 56 insertions(+), 36 deletions(-) create mode 100644 arch/s390/include/asm/cpuid.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h new file mode 100644 index 000000000000..07836a2e5222 --- /dev/null +++ b/arch/s390/include/asm/cpuid.h @@ -0,0 +1,25 @@ +/* + * Copyright IBM Corp. 2000,2009 + * Author(s): Hartmut Penner , + * Martin Schwidefsky + * Christian Ehrhardt + */ + +#ifndef _ASM_S390_CPUID_H_ +#define _ASM_S390_CPUID_H_ + +/* + * CPU type and hardware bug flags. Kept separately for each CPU. + * Members of this structure are referenced in head.S, so think twice + * before touching them. [mj] + */ + +typedef struct +{ + unsigned int version : 8; + unsigned int ident : 24; + unsigned int machine : 16; + unsigned int unused : 16; +} __attribute__ ((packed)) cpuid_t; + +#endif /* _ASM_S390_CPUID_H_ */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index c6e674f5fca9..54ea39f96ecd 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -15,6 +15,7 @@ #define ASM_KVM_HOST_H #include #include +#include #define KVM_MAX_VCPUS 64 #define KVM_MEMORY_SLOTS 32 diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index b349f1c7fdfa..3aeca492b147 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -66,6 +66,7 @@ #define __LC_USER_EXEC_ASCE 0x02ac #define __LC_CPUID 0x02b0 #define __LC_INT_CLOCK 0x02c8 +#define __LC_MACHINE_FLAGS 0x02d8 #define __LC_IRB 0x0300 #define __LC_PFAULT_INTPARM 0x0080 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 @@ -110,6 +111,7 @@ #define __LC_CPUID 0x0320 #define __LC_INT_CLOCK 0x0340 #define __LC_VDSO_PER_CPU 0x0350 +#define __LC_MACHINE_FLAGS 0x0358 #define __LC_IRB 0x0380 #define __LC_PASTE 0x03c0 #define __LC_PFAULT_INTPARM 0x11b8 @@ -127,9 +129,9 @@ #ifndef __ASSEMBLY__ -#include +#include +#include #include -#include void restart_int_handler(void); void ext_int_handler(void); @@ -277,7 +279,8 @@ struct _lowcore __u32 ext_call_fast; /* 0x02c4 */ __u64 int_clock; /* 0x02c8 */ __u64 clock_comparator; /* 0x02d0 */ - __u8 pad_0x02d8[0x0300-0x02d8]; /* 0x02d8 */ + __u32 machine_flags; /* 0x02d8 */ + __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ @@ -381,7 +384,8 @@ struct _lowcore __u64 int_clock; /* 0x0340 */ __u64 clock_comparator; /* 0x0348 */ __u64 vdso_per_cpu_data; /* 0x0350 */ - __u8 pad_0x0358[0x0380-0x0358]; /* 0x0358 */ + __u64 machine_flags; /* 0x0358 */ + __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */ /* Interrupt response block. */ __u8 irb[64]; /* 0x0380 */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 61862b3ac794..c139fa7b8e89 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,7 +14,10 @@ #define __ASM_S390_PROCESSOR_H #include +#include +#include #include +#include #ifdef __KERNEL__ /* @@ -23,20 +26,6 @@ */ #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - * Members of this structure are referenced in head.S, so think twice - * before touching them. [mj] - */ - -typedef struct -{ - unsigned int version : 8; - unsigned int ident : 24; - unsigned int machine : 16; - unsigned int unused : 16; -} __attribute__ ((packed)) cpuid_t; - static inline void get_cpu_id(cpuid_t *ptr) { asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f1b051630c50..539263fc9ab9 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -313,8 +313,6 @@ typedef struct #ifdef __KERNEL__ -#include -#include /* * The pt_regs struct defines the way the registers are stored on diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index e8bd6ac22c99..38b0fc221ed7 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -14,6 +14,7 @@ #ifdef __KERNEL__ +#include #include #define PARMAREA 0x10400 @@ -63,7 +64,6 @@ extern unsigned int s390_noexec; /* * Machine features detected in head.S */ -extern unsigned long machine_flags; #define MACHINE_FLAG_VM (1UL << 0) #define MACHINE_FLAG_IEEE (1UL << 1) @@ -77,28 +77,28 @@ extern unsigned long machine_flags; #define MACHINE_FLAG_HPAGE (1UL << 10) #define MACHINE_FLAG_PFMF (1UL << 11) -#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM) -#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM) -#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C) +#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) +#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) +#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) #ifndef __s390x__ -#define MACHINE_HAS_IEEE (machine_flags & MACHINE_FLAG_IEEE) -#define MACHINE_HAS_CSP (machine_flags & MACHINE_FLAG_CSP) +#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE) +#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP) #define MACHINE_HAS_IDTE (0) #define MACHINE_HAS_DIAG44 (1) -#define MACHINE_HAS_MVPG (machine_flags & MACHINE_FLAG_MVPG) +#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) #define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_HPAGE (0) #define MACHINE_HAS_PFMF (0) #else /* __s390x__ */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) -#define MACHINE_HAS_IDTE (machine_flags & MACHINE_FLAG_IDTE) -#define MACHINE_HAS_DIAG44 (machine_flags & MACHINE_FLAG_DIAG44) +#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) +#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) #define MACHINE_HAS_MVPG (1) -#define MACHINE_HAS_MVCOS (machine_flags & MACHINE_FLAG_MVCOS) -#define MACHINE_HAS_HPAGE (machine_flags & MACHINE_FLAG_HPAGE) -#define MACHINE_HAS_PFMF (machine_flags & MACHINE_FLAG_PFMF) +#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) +#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) +#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) #endif /* __s390x__ */ #define ZFCPDUMP_HSA_SIZE (32UL<<20) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index c544aa524535..461f2abd2e6f 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -31,8 +31,9 @@ #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) #ifndef __ASSEMBLY__ -#include #include +#include +#include /* * low level task data that entry.S needs immediate access to diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4d221c81c849..d4e1e5b6cfda 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -34,6 +34,8 @@ char kernel_nss_name[NSS_NAME_SIZE + 1]; +static unsigned long machine_flags; + static void __init setup_boot_command_line(void); @@ -391,5 +393,6 @@ void __init startup_init(void) setup_hpage(); sclp_facilities_detect(); detect_memory_layout(memory_chunk); + S390_lowcore.machine_flags = machine_flags; lockdep_on(); } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 06201b93cbbf..163bdfe5a6be 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -82,9 +82,6 @@ EXPORT_SYMBOL(console_devno); unsigned int console_irq = -1; EXPORT_SYMBOL(console_irq); -unsigned long machine_flags; -EXPORT_SYMBOL(machine_flags); - unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; @@ -426,6 +423,7 @@ setup_lowcore(void) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; + lc->machine_flags = S390_lowcore.machine_flags; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 006ed5016eb4..796630240715 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -571,6 +571,7 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; + cpu_lowcore->machine_flags = S390_lowcore.machine_flags; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) -- cgit 1.4.1 From a93e11f9b9604134373ae790bce738d62109de60 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 14 Apr 2009 15:36:17 +0200 Subject: [S390] wire up preadv/pwritev system calls Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 4 +++- arch/s390/kernel/compat_wrapper.S | 18 ++++++++++++++++++ arch/s390/kernel/syscalls.S | 2 ++ 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index c8ad350d1444..f0f19e6ace6c 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -265,7 +265,9 @@ #define __NR_pipe2 325 #define __NR_dup3 326 #define __NR_epoll_create1 327 -#define NR_syscalls 328 +#define __NR_preadv 328 +#define __NR_pwritev 329 +#define NR_syscalls 330 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 87cf5a79a351..fb38af6316bb 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1805,3 +1805,21 @@ compat_sys_keyctl_wrapper: llgfr %r5,%r5 # u32 llgfr %r6,%r6 # u32 jg compat_sys_keyctl # branch to system call + + .globl compat_sys_preadv_wrapper +compat_sys_preadv_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # compat_iovec * + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg compat_sys_preadv # branch to system call + + .globl compat_sys_pwritev_wrapper +compat_sys_pwritev_wrapper: + llgfr %r2,%r2 # unsigned long + llgtr %r3,%r3 # compat_iovec * + llgfr %r4,%r4 # unsigned long + llgfr %r5,%r5 # u32 + llgfr %r6,%r6 # u32 + jg compat_sys_pwritev # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index fe5b25a988ab..2c7739fe70b1 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -336,3 +336,5 @@ SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper) SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) +SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) +SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) -- cgit 1.4.1 From 81f64b87731aa33eef6b88af9d92c3398d48cf41 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 14 Apr 2009 15:36:18 +0200 Subject: [S390] call nmi_enter/nmi_exit on machine checks nmi_enter/nmi_exit includes the lockdep calls and various other calls which were missing so far. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/nmi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 4bfdc421d7e9..28cf196ba775 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -253,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) struct mci *mci; int umode; - lockdep_off(); + nmi_enter(); s390_idle_check(); mci = (struct mci *) &S390_lowcore.mcck_interruption_code; @@ -363,7 +364,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) mcck->warning = 1; set_thread_flag(TIF_MCCK_PENDING); } - lockdep_on(); + nmi_exit(); } static int __init machine_check_init(void) -- cgit 1.4.1 From 0436230148c55e3afbe5c57775a1fb44ba4834ac Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 14 Apr 2009 15:36:19 +0200 Subject: [S390] stp synchronization retry timer Add a timer that retries the clock synchronization via the server time protocol if there is a usable clock but the synchronization failed. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/time.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index f72d41068dc2..05f93e77870b 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -1423,6 +1423,7 @@ static void *stp_page; static void stp_work_fn(struct work_struct *work); static DEFINE_MUTEX(stp_work_mutex); static DECLARE_WORK(stp_work, stp_work_fn); +static struct timer_list stp_timer; static int __init early_parse_stp(char *p) { @@ -1454,10 +1455,16 @@ static void __init stp_reset(void) } } +static void stp_timeout(unsigned long dummy) +{ + queue_work(time_sync_wq, &stp_work); +} + static int __init stp_init(void) { if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return 0; + setup_timer(&stp_timer, stp_timeout, 0UL); time_init_wq(); if (!stp_online) return 0; @@ -1565,6 +1572,7 @@ static void stp_work_fn(struct work_struct *work) if (!stp_online) { chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); + del_timer_sync(&stp_timer); goto out_unlock; } @@ -1586,6 +1594,13 @@ static void stp_work_fn(struct work_struct *work) stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); put_online_cpus(); + if (!check_sync_clock()) + /* + * There is a usable clock but the synchonization failed. + * Retry after a second. + */ + mod_timer(&stp_timer, jiffies + HZ); + out_unlock: mutex_unlock(&stp_work_mutex); } -- cgit 1.4.1 From b6ecfa9273e27b5c7ba04655eb44f78bf4db5b64 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Tue, 14 Apr 2009 15:36:20 +0200 Subject: [S390] extend virtual timer interface by mod_virt_timer_periodic In case mod_virt_timer is used to add a non pending timer the timer is always added as a one-shot timer. If mod_virt_timer is used for periodic timers they may therfore be degraded to one-shot timers. Add mod_virt_timer_periodic to the interface to allow safe re-programming of the interval value. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/timer.h | 1 + arch/s390/kernel/vtime.c | 57 +++++++++++++++++++++++++------------------ 2 files changed, 34 insertions(+), 24 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h index e4bcab739c19..814243cafdfe 100644 --- a/arch/s390/include/asm/timer.h +++ b/arch/s390/include/asm/timer.h @@ -41,6 +41,7 @@ extern void init_virt_timer(struct vtimer_list *timer); extern void add_virt_timer(void *new); extern void add_virt_timer_periodic(void *new); extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); +extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires); extern int del_virt_timer(struct vtimer_list *timer); extern void init_cpu_vtimer(void); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index ecf0304e61c1..694b44374a2b 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -425,17 +425,7 @@ void add_virt_timer_periodic(void *new) } EXPORT_SYMBOL(add_virt_timer_periodic); -/* - * If we change a pending timer the function must be called on the CPU - * where the timer is running on, e.g. by smp_call_function_single() - * - * The original mod_timer adds the timer if it is not pending. For - * compatibility we do the same. The timer will be added on the current - * CPU as a oneshot timer. - * - * returns whether it has modified a pending timer (1) or not (0) - */ -int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) { struct vtimer_queue *vq; unsigned long flags; @@ -444,39 +434,35 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) BUG_ON(!timer->function); BUG_ON(!expires || expires > VTIMER_MAX_SLICE); - /* - * This is a common optimization triggered by the - * networking code - if the timer is re-modified - * to be the same thing then just return: - */ if (timer->expires == expires && vtimer_pending(timer)) return 1; cpu = get_cpu(); vq = &per_cpu(virt_cpu_timer, cpu); - /* check if we run on the right CPU */ - BUG_ON(timer->cpu != cpu); - /* disable interrupts before test if timer is pending */ spin_lock_irqsave(&vq->lock, flags); /* if timer isn't pending add it on the current CPU */ if (!vtimer_pending(timer)) { spin_unlock_irqrestore(&vq->lock, flags); - /* we do not activate an interval timer with mod_virt_timer */ - timer->interval = 0; + + if (periodic) + timer->interval = expires; + else + timer->interval = 0; timer->expires = expires; timer->cpu = cpu; internal_add_vtimer(timer); return 0; } + /* check if we run on the right CPU */ + BUG_ON(timer->cpu != cpu); + list_del_init(&timer->entry); timer->expires = expires; - - /* also change the interval if we have an interval timer */ - if (timer->interval) + if (periodic) timer->interval = expires; /* the timer can't expire anymore so we can release the lock */ @@ -484,8 +470,31 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) internal_add_vtimer(timer); return 1; } + +/* + * If we change a pending timer the function must be called on the CPU + * where the timer is running on. + * + * returns whether it has modified a pending timer (1) or not (0) + */ +int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +{ + return __mod_vtimer(timer, expires, 0); +} EXPORT_SYMBOL(mod_virt_timer); +/* + * If we change a pending timer the function must be called on the CPU + * where the timer is running on. + * + * returns whether it has modified a pending timer (1) or not (0) + */ +int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) +{ + return __mod_vtimer(timer, expires, 1); +} +EXPORT_SYMBOL(mod_virt_timer_periodic); + /* * delete a virtual timer * -- cgit 1.4.1 From 43ae8a1b32735c662ba7ebf3509c4f670f75e3d5 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Tue, 14 Apr 2009 15:36:21 +0200 Subject: [S390] appldata: Use new mod_virt_timer_periodic() function. mod_virt_timer() was used to modify/add cpu timers for cpus that were set online. This resulted in a one-shot timer for every cpu that was newly added or previously set offline, instead of an interval timer, which broke the appldata vtime interval setup. To fix this, the new mod_virt_timer_periodic() function is used, which adds interval timers instead of one-shot timers. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 27b70d8a359c..aeb3cff95f63 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -176,7 +176,7 @@ static void __appldata_mod_vtimer_wrap(void *p) { struct vtimer_list *timer; u64 expires; } *args = p; - mod_virt_timer(args->timer, args->expires); + mod_virt_timer_periodic(args->timer, args->expires); } #define APPLDATA_ADD_TIMER 0 -- cgit 1.4.1 From 88e012856d05a5d00ae80c691fb7aa5adda268d7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 14 Apr 2009 15:36:25 +0200 Subject: [S390] smp: fix cpu_possible_map initialization The cpu_possible_map by default is initialized with all ones in s390. If the kernel paramert possible_cpus= is passed the cpu_possible_map is supposed to have x bits set. However the current code just sets the x bits without clearing the NR_CPUS bits that were already set. So we end up with an unchanged map that has all bits set. To fix this just clear the map before setting any new bits. This broke with def6cfb70bab83c0094bc0cedd27c4eda563043e "[S390] cpumask: Use accessors code." Cc: Rusty Russell Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 796630240715..a985a3ba4401 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -591,7 +591,8 @@ static int __init setup_possible_cpus(char *s) int pcpus, cpu; pcpus = simple_strtoul(s, NULL, 0); - for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) + init_cpu_possible(cpumask_of(0)); + for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) set_cpu_possible(cpu, true); return 0; } -- cgit 1.4.1 From b86ccca49cd8f22086c1d135ab3051cf48fb1688 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 14 Apr 2009 15:36:26 +0200 Subject: [S390] fix idle time accounting The steal time is calculated by subtracting the time the virtual cpu has been running on a physical cpu from the wall clock time. To make that work all wall time needs to be added to the steal time field first before the virtual cpu time is subtracted. The time between the last clock update and the load of the enabled wait psw needs to be added to the steal_time field as well to make the sum over all cpu accounting numbers match the wall clock. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vtime.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 694b44374a2b..c0870a61f90f 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -134,6 +134,8 @@ void vtime_start_cpu(void) /* Account time spent with enabled wait psw loaded as idle time. */ idle_time = S390_lowcore.int_clock - idle->idle_enter; account_idle_time(idle_time); + S390_lowcore.steal_timer += + idle->idle_enter - S390_lowcore.last_update_clock; S390_lowcore.last_update_clock = S390_lowcore.int_clock; /* Account system time spent going idle. */ -- cgit 1.4.1 From 5b409ed17bb32c8316b1f456466c70529454573a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 14 Apr 2009 15:36:27 +0200 Subject: [S390] cpu hotplug and accounting values Reset the cpu timer to the maximum value and correctly initialize the cpu accounting values in the lowcore when the cpu is started. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/asm-offsets.c | 2 ++ arch/s390/kernel/entry.S | 13 +++++++++++++ arch/s390/kernel/entry64.S | 13 +++++++++++++ 3 files changed, 28 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 67a60016babb..fa9905ce7d0b 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -27,6 +27,8 @@ int main(void) DEFINE(__TI_flags, offsetof(struct thread_info, flags)); DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); + DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); + DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); BLANK(); DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1268aa2991bf..f3e275934213 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -837,16 +837,29 @@ mcck_return: __CPUINIT .globl restart_int_handler restart_int_handler: + basr %r1,0 +restart_base: + spt restart_vtime-restart_base(%r1) + stck __LC_LAST_UPDATE_CLOCK + mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) + mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) l %r15,__LC_SAVE_AREA+60 # load ksp lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lam %a0,%a15,__LC_AREGS_SAVE_AREA lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone + l %r1,__LC_THREAD_INFO + mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) + mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) + xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) br %r14 # branch to start_secondary restart_addr: .long start_secondary + .align 8 +restart_vtime: + .long 0x7fffffff,0xffffffff .previous #else /* diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index c6fbde13971a..84a105838e03 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -831,14 +831,27 @@ mcck_return: __CPUINIT .globl restart_int_handler restart_int_handler: + basr %r1,0 +restart_base: + spt restart_vtime-restart_base(%r1) + stck __LC_LAST_UPDATE_CLOCK + mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) + mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) lg %r15,__LC_SAVE_AREA+120 # load ksp lghi %r10,__LC_CREGS_SAVE_AREA lctlg %c0,%c15,0(%r10) # get new ctl regs lghi %r10,__LC_AREGS_SAVE_AREA lam %a0,%a15,0(%r10) lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone + lg %r1,__LC_THREAD_INFO + mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) + mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) + xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on jg start_secondary + .align 8 +restart_vtime: + .long 0x7fffffff,0xffffffff .previous #else /* -- cgit 1.4.1 From b6112ccbff5ec580d46b584ecc3c3a773b830da2 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 14 Apr 2009 15:36:28 +0200 Subject: [S390] add read_persistent_clock Add a read_persistent_clock function that does not just return 0. Since timekeeping_init calls the function before time_init has been called move reset_tod_clock to early.c to make sure that the TOD clock is running when read_persistent_clock is invoked. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/timex.h | 5 +++++ arch/s390/kernel/early.c | 16 ++++++++++++++++ arch/s390/kernel/head.S | 7 +++++-- arch/s390/kernel/time.c | 28 +++++++++------------------- 4 files changed, 35 insertions(+), 21 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index d744c3d62de5..cc21e3e20fd7 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -11,6 +11,9 @@ #ifndef _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H +/* The value of the TOD clock for 1.1.1970. */ +#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL + /* Inline functions for clock register access. */ static inline int set_clock(__u64 time) { @@ -85,4 +88,6 @@ int get_sync_clock(unsigned long long *clock); void init_cpu_timer(void); unsigned long long monotonic_clock(void); +extern u64 sched_clock_base_cc; + #endif diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index d4e1e5b6cfda..cf09948faad6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -38,6 +38,21 @@ static unsigned long machine_flags; static void __init setup_boot_command_line(void); +/* + * Get the TOD clock running. + */ +static void __init reset_tod_clock(void) +{ + u64 time; + + if (store_clock(&time) == 0) + return; + /* TOD clock not running. Set the clock to Unix Epoch. */ + if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + disabled_wait(0); + + sched_clock_base_cc = TOD_UNIX_EPOCH; +} #ifdef CONFIG_SHARED_KERNEL int __init savesys_ipl_nss(char *cmd, const int cmdlen); @@ -372,6 +387,7 @@ static void __init setup_boot_command_line(void) */ void __init startup_init(void) { + reset_tod_clock(); ipl_save_parameters(); rescue_initrd(); clear_bss_section(); diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 1046c2c9f8d1..16f8975325ed 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -471,6 +471,8 @@ startup:basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 + l %r1,5f-.LPG0(%r13) + stck 0(%r1) #ifndef CONFIG_MARCH_G5 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} @@ -496,9 +498,10 @@ startup:basr %r13,0 # get base brct %r0,0b #endif - l %r13,0f-.LPG0(%r13) + l %r13,4f-.LPG0(%r13) b 0(%r13) -0: .long startup_continue +4: .long startup_continue +5: .long sched_clock_base_cc # # params at 10400 (setup.h) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 05f93e77870b..369ff02c4ab2 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -52,9 +52,6 @@ #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) -/* The value of the TOD clock for 1.1.1970. */ -#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL - /* * Create a small time difference between the timer interrupts * on the different cpus to avoid lock contention. @@ -63,9 +60,10 @@ #define TICK_SIZE tick +u64 sched_clock_base_cc = -1; /* Force to data section. */ + static ext_int_info_t ext_int_info_cc; static ext_int_info_t ext_int_etr_cc; -static u64 sched_clock_base_cc; static DEFINE_PER_CPU(struct clock_event_device, comparators); @@ -195,22 +193,12 @@ static void timing_alert_interrupt(__u16 code) static void etr_reset(void); static void stp_reset(void); -/* - * Get the TOD clock running. - */ -static u64 __init reset_tod_clock(void) +unsigned long read_persistent_clock(void) { - u64 time; + struct timespec ts; - etr_reset(); - stp_reset(); - if (store_clock(&time) == 0) - return time; - /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) - panic("TOD clock not operational."); - - return TOD_UNIX_EPOCH; + tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); + return ts.tv_sec; } static cycle_t read_tod_clock(void) @@ -265,7 +253,9 @@ void update_vsyscall_tz(void) */ void __init time_init(void) { - sched_clock_base_cc = reset_tod_clock(); + /* Reset time synchronization interfaces. */ + etr_reset(); + stp_reset(); /* set xtime */ tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &xtime); -- cgit 1.4.1 From ab96e798cbd562a53edd802272e49a5100b29efb Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 14 Apr 2009 15:36:29 +0200 Subject: [S390] boot cputime accounting Start the cpu time accounting very early to catch the cpu time spent for the initial kernel setup. To make the output of /proc/uptime match the sum of all cpu accounting values of the boot cpu reset xtime and wall_to_monotonic to sane values based on the TOD clock. The values set by timekeeping_init are off by up to a second. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head.S | 8 +++++++- arch/s390/kernel/setup.c | 8 ++++++++ arch/s390/kernel/time.c | 36 ++++++++++++++++++++++++++++-------- arch/s390/kernel/vtime.c | 8 -------- 4 files changed, 43 insertions(+), 17 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 16f8975325ed..bba14494ee00 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -473,7 +473,10 @@ startup:basr %r13,0 # get base xc 0x300(256),0x300 l %r1,5f-.LPG0(%r13) stck 0(%r1) - + spt 6f-.LPG0(%r13) + mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1) + mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) + mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) #ifndef CONFIG_MARCH_G5 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} stidp __LC_CPUID # store cpuid @@ -500,8 +503,11 @@ startup:basr %r13,0 # get base l %r13,4f-.LPG0(%r13) b 0(%r13) + .align 4 4: .long startup_continue 5: .long sched_clock_base_cc + .align 8 +6: .long 0x7fffffff,0xffffffff # # params at 10400 (setup.h) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 163bdfe5a6be..7402b6a39ead 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -434,6 +434,14 @@ setup_lowcore(void) #else lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif + lc->sync_enter_timer = S390_lowcore.sync_enter_timer; + lc->async_enter_timer = S390_lowcore.async_enter_timer; + lc->exit_timer = S390_lowcore.exit_timer; + lc->user_timer = S390_lowcore.user_timer; + lc->system_timer = S390_lowcore.system_timer; + lc->steal_timer = S390_lowcore.steal_timer; + lc->last_update_timer = S390_lowcore.last_update_timer; + lc->last_update_clock = S390_lowcore.last_update_clock; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 369ff02c4ab2..6ded50dfa75a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -253,32 +253,52 @@ void update_vsyscall_tz(void) */ void __init time_init(void) { + struct timespec ts; + unsigned long flags; + cycle_t now; + /* Reset time synchronization interfaces. */ etr_reset(); stp_reset(); - /* set xtime */ - tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &xtime); - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); - /* request the clock comparator external interrupt */ if (register_early_external_interrupt(0x1004, clock_comparator_interrupt, &ext_int_info_cc) != 0) panic("Couldn't request external interrupt 0x1004"); - if (clocksource_register(&clocksource_tod) != 0) - panic("Could not register TOD clock source"); - /* request the timing alert external interrupt */ if (register_early_external_interrupt(0x1406, timing_alert_interrupt, &ext_int_etr_cc) != 0) panic("Couldn't request external interrupt 0x1406"); + if (clocksource_register(&clocksource_tod) != 0) + panic("Could not register TOD clock source"); + + /* + * The TOD clock is an accurate clock. The xtime should be + * initialized in a way that the difference between TOD and + * xtime is reasonably small. Too bad that timekeeping_init + * sets xtime.tv_nsec to zero. In addition the clock source + * change from the jiffies clock source to the TOD clock + * source add another error of up to 1/HZ second. The same + * function sets wall_to_monotonic to a value that is too + * small for /proc/uptime to be accurate. + * Reset xtime and wall_to_monotonic to sane values. + */ + write_seqlock_irqsave(&xtime_lock, flags); + now = get_clock(); + tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); + clocksource_tod.cycle_last = now; + clocksource_tod.raw_time = xtime; + tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); + set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); + write_sequnlock_irqrestore(&xtime_lock, flags); + /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer(); + /* Enable cpu timer interrupts on the boot cpu. */ vtime_init(); } diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c0870a61f90f..38ea92ff04f9 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -527,16 +527,8 @@ EXPORT_SYMBOL(del_virt_timer); */ void init_cpu_vtimer(void) { - struct thread_info *ti = current_thread_info(); struct vtimer_queue *vq; - S390_lowcore.user_timer = ti->user_timer; - S390_lowcore.system_timer = ti->system_timer; - - /* kick the virtual timer */ - asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); - asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer)); - /* initialize per cpu vtimer structure */ vq = &__get_cpu_var(virt_cpu_timer); INIT_LIST_HEAD(&vq->list); -- cgit 1.4.1 From 0112fc2229847feb6c4eb011e6833d8f1742a375 Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Wed, 8 Apr 2009 20:05:42 +0400 Subject: Separate out common fstatat code into vfs_fstatat This is a version incorporating Christoph's suggestion. Separate out common *fstatat functionality into a single function instead of duplicating it all over the code. Signed-off-by: Oleg Drokin Signed-off-by: Al Viro --- arch/arm/kernel/sys_oabi-compat.c | 19 ++++--------- arch/s390/kernel/compat_linux.c | 18 ++++--------- arch/sparc/kernel/sys_sparc32.c | 19 ++++--------- arch/x86/ia32/sys_ia32.c | 19 ++++--------- fs/compat.c | 19 ++++--------- fs/stat.c | 56 +++++++++++++++++++-------------------- include/linux/fs.h | 1 + 7 files changed, 54 insertions(+), 97 deletions(-) (limited to 'arch/s390') diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index e04173c7e621..d59a0cd537f0 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -177,21 +177,12 @@ asmlinkage long sys_oabi_fstatat64(int dfd, int flag) { struct kstat stat; - int error = -EINVAL; + int error; - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_oldabi_stat64(&stat, statbuf); - -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_oldabi_stat64(&stat, statbuf); } struct oabi_flock64 { diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 6cc87d8c8682..002c70d3cb75 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -702,20 +702,12 @@ asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename, struct stat64_emu31 __user* statbuf, int flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); + int error; - if (!error) - error = cp_stat64(statbuf, &stat); -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_stat64(statbuf, &stat); } /* diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index e800503879e4..f5000a460c05 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -206,21 +206,12 @@ asmlinkage long compat_sys_fstatat64(unsigned int dfd, char __user *filename, struct compat_stat64 __user * statbuf, int flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_compat_stat64(&stat, statbuf); + int error; -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_compat_stat64(&stat, statbuf); } asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2) diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index efac92fd1efb..085a8c35f149 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename, struct stat64 __user *statbuf, int flag) { struct kstat stat; - int error = -EINVAL; + int error; - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_stat64(statbuf, &stat); - -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_stat64(statbuf, &stat); } /* diff --git a/fs/compat.c b/fs/compat.c index 3f84d5f15889..dda72e267092 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -204,21 +204,12 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user *filename, struct compat_stat __user *statbuf, int flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_compat_stat(&stat, statbuf); + int error; -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_compat_stat(&stat, statbuf); } #endif diff --git a/fs/stat.c b/fs/stat.c index 2db740a0cfb5..54711662b855 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -109,6 +109,24 @@ int vfs_fstat(unsigned int fd, struct kstat *stat) EXPORT_SYMBOL(vfs_fstat); +int vfs_fstatat(int dfd, char __user *filename, struct kstat *stat, int flag) +{ + int error = -EINVAL; + + if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) + goto out; + + if (flag & AT_SYMLINK_NOFOLLOW) + error = vfs_lstat_fd(dfd, filename, stat); + else + error = vfs_stat_fd(dfd, filename, stat); +out: + return error; +} + +EXPORT_SYMBOL(vfs_fstatat); + + #ifdef __ARCH_WANT_OLD_STAT /* @@ -264,21 +282,12 @@ SYSCALL_DEFINE4(newfstatat, int, dfd, char __user *, filename, struct stat __user *, statbuf, int, flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_new_stat(&stat, statbuf); + int error; -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_new_stat(&stat, statbuf); } #endif @@ -404,21 +413,12 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, struct stat64 __user *, statbuf, int, flag) { struct kstat stat; - int error = -EINVAL; - - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_new_stat64(&stat, statbuf); + int error; -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_new_stat64(&stat, statbuf); } #endif /* __ARCH_WANT_STAT64 */ diff --git a/include/linux/fs.h b/include/linux/fs.h index e766be0d4329..257f4d37ad23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2302,6 +2302,7 @@ extern int vfs_lstat(char __user *, struct kstat *); extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); +extern int vfs_fstatat(int , char __user *, struct kstat *, int); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); -- cgit 1.4.1 From 8e19608e8b5c001e4a66ce482edc474f05fb7355 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Apr 2009 12:24:00 -0700 Subject: clocksource: pass clocksource to read() callback Pass clocksource pointer to the read() callback for clocksources. This allows us to share the callback between multiple instances. [hugh@veritas.com: fix powerpc build of clocksource pass clocksource mods] [akpm@linux-foundation.org: cleanup] Signed-off-by: Magnus Damm Acked-by: John Stultz Cc: Thomas Gleixner Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-at91/at91rm9200_time.c | 2 +- arch/arm/mach-at91/at91sam926x_time.c | 2 +- arch/arm/mach-davinci/time.c | 2 +- arch/arm/mach-imx/time.c | 2 +- arch/arm/mach-ixp4xx/common.c | 2 +- arch/arm/mach-msm/timer.c | 4 ++-- arch/arm/mach-netx/time.c | 2 +- arch/arm/mach-ns9xxx/time-ns9360.c | 2 +- arch/arm/mach-omap1/time.c | 2 +- arch/arm/mach-omap2/timer-gp.c | 2 +- arch/arm/mach-pxa/time.c | 2 +- arch/arm/mach-realview/core.c | 2 +- arch/arm/mach-versatile/core.c | 2 +- arch/arm/plat-mxc/time.c | 2 +- arch/arm/plat-omap/common.c | 4 ++-- arch/arm/plat-orion/time.c | 2 +- arch/avr32/kernel/time.c | 2 +- arch/blackfin/kernel/time-ts.c | 12 ++++++------ arch/ia64/kernel/cyclone.c | 2 +- arch/ia64/kernel/time.c | 4 ++-- arch/ia64/sn/kernel/sn2/timer.c | 2 +- arch/m68knommu/platform/68328/timers.c | 2 +- arch/m68knommu/platform/coldfire/dma_timer.c | 2 +- arch/m68knommu/platform/coldfire/pit.c | 2 +- arch/m68knommu/platform/coldfire/timers.c | 2 +- arch/mips/kernel/cevt-txx9.c | 2 +- arch/mips/kernel/csrc-bcm1480.c | 2 +- arch/mips/kernel/csrc-ioasic.c | 6 +++--- arch/mips/kernel/csrc-r4k.c | 2 +- arch/mips/kernel/csrc-sb1250.c | 2 +- arch/mips/kernel/i8253.c | 2 +- arch/mips/nxp/pnx8550/common/time.c | 2 +- arch/mips/sgi-ip27/ip27-timer.c | 2 +- arch/powerpc/kernel/time.c | 8 ++++---- arch/s390/kernel/time.c | 2 +- arch/sh/kernel/time_32.c | 2 +- arch/sh/kernel/timers/timer-tmu.c | 2 +- arch/sparc/kernel/time_64.c | 7 ++++++- arch/um/kernel/time.c | 2 +- arch/x86/kernel/hpet.c | 6 +++--- arch/x86/kernel/i8253.c | 2 +- arch/x86/kernel/kvmclock.c | 7 ++++++- arch/x86/kernel/tsc.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 2 +- arch/x86/xen/time.c | 7 ++++++- drivers/char/hpet.c | 2 +- drivers/clocksource/acpi_pm.c | 12 ++++++------ drivers/clocksource/cyclone.c | 2 +- drivers/clocksource/scx200_hrt.c | 2 +- drivers/clocksource/tcb_clksrc.c | 2 +- include/linux/clocksource.h | 6 +++--- kernel/time/clocksource.c | 8 ++++---- kernel/time/jiffies.c | 2 +- 54 files changed, 94 insertions(+), 79 deletions(-) (limited to 'arch/s390') diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 1ff1bda0a894..309f3511aa20 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -85,7 +85,7 @@ static struct irqaction at91rm9200_timer_irq = { .handler = at91rm9200_timer_interrupt }; -static cycle_t read_clk32k(void) +static cycle_t read_clk32k(struct clocksource *cs) { return read_CRTR(); } diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index b63e1d5f1bad..4bd56aee4370 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -31,7 +31,7 @@ static u32 pit_cnt; /* access only w/system irq blocked */ * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ -static cycle_t read_pit_clk(void) +static cycle_t read_pit_clk(struct clocksource *cs) { unsigned long flags; u32 elapsed; diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index f8bcd29d17a6..6c227d4ba998 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -238,7 +238,7 @@ static void __init timer_init(void) /* * clocksource */ -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { struct timer_s *t = &timers[TID_CLOCKSOURCE]; diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index aff0ebcfa847..5aef18b599e5 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -73,7 +73,7 @@ static void __init imx_timer_hardware_init(void) IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN; } -cycle_t imx_get_cycles(void) +cycle_t imx_get_cycles(struct clocksource *cs) { return IMX_TCN(TIMER_BASE); } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index f4656d2ac8a8..1e93dfee7543 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -401,7 +401,7 @@ void __init ixp4xx_sys_init(void) /* * clocksource */ -cycle_t ixp4xx_get_cycles(void) +cycle_t ixp4xx_get_cycles(struct clocksource *cs) { return *IXP4XX_OSTS; } diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 444d9c0f5ca6..4855b8ca5101 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -57,12 +57,12 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t msm_gpt_read(void) +static cycle_t msm_gpt_read(struct clocksource *cs) { return readl(MSM_GPT_BASE + TIMER_COUNT_VAL); } -static cycle_t msm_dgt_read(void) +static cycle_t msm_dgt_read(struct clocksource *cs) { return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT; } diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c index f201fddb594f..82801dbf0579 100644 --- a/arch/arm/mach-netx/time.c +++ b/arch/arm/mach-netx/time.c @@ -104,7 +104,7 @@ static struct irqaction netx_timer_irq = { .handler = netx_timer_interrupt, }; -cycle_t netx_get_cycles(void) +cycle_t netx_get_cycles(struct clocksource *cs) { return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index 41df69721769..77281260358a 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c @@ -25,7 +25,7 @@ #define TIMER_CLOCKEVENT 1 static u32 latch; -static cycle_t ns9360_clocksource_read(void) +static cycle_t ns9360_clocksource_read(struct clocksource *cs) { return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 495a32c287b4..4d56408d3cff 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -198,7 +198,7 @@ static struct irqaction omap_mpu_timer2_irq = { .handler = omap_mpu_timer2_interrupt, }; -static cycle_t mpu_read(void) +static cycle_t mpu_read(struct clocksource *cs) { return ~omap_mpu_timer_read(1); } diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 9fc13a2cc3f4..1cb2c0909c2b 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -138,7 +138,7 @@ static inline void __init omap2_gp_clocksource_init(void) {} * clocksource */ static struct omap_dm_timer *gpt_clocksource; -static cycle_t clocksource_read_cycles(void) +static cycle_t clocksource_read_cycles(struct clocksource *cs) { return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource); } diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8eb3830fbb0b..750c448db672 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -125,7 +125,7 @@ static struct clock_event_device ckevt_pxa_osmr0 = { .set_mode = pxa_osmr0_set_mode, }; -static cycle_t pxa_read_oscr(void) +static cycle_t pxa_read_oscr(struct clocksource *cs) { return OSCR; } diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 9ab947c14f26..942e1a7eb9b2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -715,7 +715,7 @@ static struct irqaction realview_timer_irq = { .handler = realview_timer_interrupt, }; -static cycle_t realview_get_cycles(void) +static cycle_t realview_get_cycles(struct clocksource *cs) { return ~readl(timer3_va_base + TIMER_VALUE); } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 565776680d8c..1f929c391af7 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -948,7 +948,7 @@ static struct irqaction versatile_timer_irq = { .handler = versatile_timer_interrupt, }; -static cycle_t versatile_get_cycles(void) +static cycle_t versatile_get_cycles(struct clocksource *cs) { return ~readl(TIMER3_VA_BASE + TIMER_VALUE); } diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index ef1b3cd85bd3..dab3357196fb 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c @@ -36,7 +36,7 @@ static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED; /* clock source */ -static cycle_t mxc_get_cycles(void) +static cycle_t mxc_get_cycles(struct clocksource *cs) { return __raw_readl(TIMER_BASE + MXC_TCN); } diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index d1797147732f..433021f3d7cc 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -185,7 +185,7 @@ console_initcall(omap_add_serial_console); #include -static cycle_t omap_32k_read(void) +static cycle_t omap_32k_read(struct clocksource *cs) { return omap_readl(TIMER_32K_SYNCHRONIZED); } @@ -207,7 +207,7 @@ unsigned long long sched_clock(void) { unsigned long long ret; - ret = (unsigned long long)omap_32k_read(); + ret = (unsigned long long)omap_32k_read(&clocksource_32k); ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; return ret; } diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 6fa2923e6dca..2faf9dba4ef7 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -41,7 +41,7 @@ static u32 ticks_per_jiffy; /* * Clocksource handling. */ -static cycle_t orion_clksrc_read(void) +static cycle_t orion_clksrc_read(struct clocksource *cs) { return 0xffffffff - readl(TIMER0_VAL); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 0ff46bf873b0..f27aa3b259fa 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -18,7 +18,7 @@ #include -static cycle_t read_cycle_count(void) +static cycle_t read_cycle_count(struct clocksource *cs) { return (cycle_t)sysreg_read(COUNT); } diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 0ed2badfd746..27646121280a 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -58,16 +58,11 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc) return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); } -unsigned long long sched_clock(void) -{ - return cycles_2_ns(read_cycles()); -} - static struct clocksource clocksource_bfin = { .name = "bfin_cycles", .rating = 350, @@ -77,6 +72,11 @@ static struct clocksource clocksource_bfin = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +unsigned long long sched_clock(void) +{ + return cycles_2_ns(read_cycles(&clocksource_bfin)); +} + static int __init bfin_clocksource_init(void) { set_cyc2ns_scale(get_cclk() / 1000); diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 790ef0d87e12..71e35864d2e2 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -21,7 +21,7 @@ void __init cyclone_setup(void) static void __iomem *cyclone_mc; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readq((void __iomem *)cyclone_mc); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 641c8b61c4f1..604c1a35db33 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -33,7 +33,7 @@ #include "fsyscall_gtod_data.h" -static cycle_t itc_get_cycles(void); +static cycle_t itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data = { .lock = SEQLOCK_UNLOCKED, @@ -383,7 +383,7 @@ ia64_init_itm (void) } } -static cycle_t itc_get_cycles(void) +static cycle_t itc_get_cycles(struct clocksource *cs) { u64 lcycle, now, ret; diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index cf67fc562054..21d6f09e3447 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -23,7 +23,7 @@ extern unsigned long sn_rtc_cycles_per_second; -static cycle_t read_sn2(void) +static cycle_t read_sn2(struct clocksource *cs) { return (cycle_t)readq(RTC_COUNTER_ADDR); } diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c index 6bafefa546e5..309f725995bf 100644 --- a/arch/m68knommu/platform/68328/timers.c +++ b/arch/m68knommu/platform/68328/timers.c @@ -75,7 +75,7 @@ static struct irqaction m68328_timer_irq = { /***************************************************************************/ -static cycle_t m68328_read_clk(void) +static cycle_t m68328_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/dma_timer.c b/arch/m68knommu/platform/coldfire/dma_timer.c index 772578b1084f..a5f562823d7a 100644 --- a/arch/m68knommu/platform/coldfire/dma_timer.c +++ b/arch/m68knommu/platform/coldfire/dma_timer.c @@ -34,7 +34,7 @@ #define DMA_DTMR_CLK_DIV_16 (2 << 1) #define DMA_DTMR_ENABLE (1 << 0) -static cycle_t cf_dt_get_cycles(void) +static cycle_t cf_dt_get_cycles(struct clocksource *cs) { return __raw_readl(DTCN0); } diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index 2a12e7fa9748..61b96211f8ff 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c @@ -125,7 +125,7 @@ static struct irqaction pit_irq = { /***************************************************************************/ -static cycle_t pit_read_clk(void) +static cycle_t pit_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index 454f25493491..1ba8a3731653 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c @@ -78,7 +78,7 @@ static struct irqaction mcftmr_timer_irq = { /***************************************************************************/ -static cycle_t mcftmr_read_clk(void) +static cycle_t mcftmr_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index eccf7d6096bd..2e911e3da8d3 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -22,7 +22,7 @@ static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; -static cycle_t txx9_cs_read(void) +static cycle_t txx9_cs_read(struct clocksource *cs) { return __raw_readl(&txx9_cs_tmrptr->trr); } diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 868745e7184b..51489f8a825e 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -28,7 +28,7 @@ #include -static cycle_t bcm1480_hpt_read(void) +static cycle_t bcm1480_hpt_read(struct clocksource *cs) { return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); } diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 1d5f63cf8997..b551f48d3a07 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -25,7 +25,7 @@ #include #include -static cycle_t dec_ioasic_hpt_read(void) +static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) { return ioasic_read(IO_REG_FCTR); } @@ -47,13 +47,13 @@ void __init dec_ioasic_clocksource_init(void) while (!ds1287_timer_state()) ; - start = dec_ioasic_hpt_read(); + start = dec_ioasic_hpt_read(&clocksource_dec); while (i--) while (!ds1287_timer_state()) ; - end = dec_ioasic_hpt_read(); + end = dec_ioasic_hpt_read(&clocksource_dec); freq = (end - start) * 10; printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index f1a2893931ed..e95a3cd48eea 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -10,7 +10,7 @@ #include -static cycle_t c0_hpt_read(void) +static cycle_t c0_hpt_read(struct clocksource *cs) { return read_c0_count(); } diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index 92212bbb8e45..d14d3d1907fa 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -33,7 +33,7 @@ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ -static cycle_t sb1250_hpt_read(void) +static cycle_t sb1250_hpt_read(struct clocksource *cs) { unsigned int count; diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 689719e34f08..ed20e7fe65e3 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -128,7 +128,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { unsigned long flags; int count; diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index cf293b279098..8df43e9e4d90 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c @@ -35,7 +35,7 @@ static unsigned long cpj; -static cycle_t hpt_read(void) +static cycle_t hpt_read(struct clocksource *cs) { return read_c0_count2(); } diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index f024057a35f8..f10a7cd64f7e 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -159,7 +159,7 @@ static void __init hub_rt_clock_event_global_init(void) setup_irq(irq, &hub_rt_irqaction); } -static cycle_t hub_rt_read(void) +static cycle_t hub_rt_read(struct clocksource *cs) { return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 926ea864e34f..48571ac56fb7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -77,7 +77,7 @@ #include #include -static cycle_t rtc_read(void); +static cycle_t rtc_read(struct clocksource *); static struct clocksource clocksource_rtc = { .name = "rtc", .rating = 400, @@ -88,7 +88,7 @@ static struct clocksource clocksource_rtc = { .read = rtc_read, }; -static cycle_t timebase_read(void); +static cycle_t timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", .rating = 400, @@ -766,12 +766,12 @@ unsigned long read_persistent_clock(void) } /* clocksource code */ -static cycle_t rtc_read(void) +static cycle_t rtc_read(struct clocksource *cs) { return (cycle_t)get_rtc(); } -static cycle_t timebase_read(void) +static cycle_t timebase_read(struct clocksource *cs) { return (cycle_t)get_tb(); } diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6ded50dfa75a..ef596d020573 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -201,7 +201,7 @@ unsigned long read_persistent_clock(void) return ts.tv_sec; } -static cycle_t read_tod_clock(void) +static cycle_t read_tod_clock(struct clocksource *cs) { return get_clock(); } diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index c34e1e0f9b02..1700d2465f6c 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -208,7 +208,7 @@ unsigned long long sched_clock(void) if (!clocksource_sh.rating) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); - cycles = clocksource_sh.read(); + cycles = clocksource_sh.read(&clocksource_sh); return cyc2ns(&clocksource_sh, cycles); } #endif diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index c5d3396f5960..fe8d8930ccb6 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -81,7 +81,7 @@ static int tmu_timer_stop(void) */ static int tmus_are_scaled; -static cycle_t tmu_timer_read(void) +static cycle_t tmu_timer_read(struct clocksource *cs) { return ((cycle_t)(~_tmu_read(TMU1)))<get_tick(); +} + void __init time_init(void) { unsigned long freq = sparc64_init_timers(); @@ -827,7 +832,7 @@ void __init time_init(void) clocksource_tick.mult = clocksource_hz2mult(freq, clocksource_tick.shift); - clocksource_tick.read = tick_ops->get_tick; + clocksource_tick.read = clocksource_tick_read; printk("clocksource: mult[%x] shift[%d]\n", clocksource_tick.mult, clocksource_tick.shift); diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index b13a87a3ec95..c8b9c469fcd7 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -65,7 +65,7 @@ static irqreturn_t um_timer(int irq, void *dev) return IRQ_HANDLED; } -static cycle_t itimer_read(void) +static cycle_t itimer_read(struct clocksource *cs) { return os_nsecs() / 1000; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a44..3f0019e0a229 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t read_hpet(struct clocksource *cs) { return (cycle_t)hpet_readl(HPET_COUNTER); } @@ -756,7 +756,7 @@ static int hpet_clocksource_register(void) hpet_restart_counter(); /* Verify whether hpet counter works */ - t1 = read_hpet(); + t1 = hpet_readl(HPET_COUNTER); rdtscll(start); /* @@ -770,7 +770,7 @@ static int hpet_clocksource_register(void) rdtscll(now); } while ((now - start) < 200000UL); - if (t1 == read_hpet()) { + if (t1 == hpet_readl(HPET_COUNTER)) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); return -ENODEV; diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa54..c2e0bb0890d4 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -129,7 +129,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132df..223af43f1526 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) return ret; } +static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +{ + return kvm_clock_read(); +} + /* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - @@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) static struct clocksource kvm_clock = { .name = "kvm-clock", - .read = kvm_clock_read, + .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .mult = 1 << KVM_SCALE, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe6361..d57de05dc430 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; * code, which is necessary to support wrapping clocksources like pm * timer. */ -static cycle_t read_tsc(void) +static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7bad..2b3eb82efeeb 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) /** vmi clocksource */ static struct clocksource clocksource_vmi; -static cycle_t read_real_cycles(void) +static cycle_t read_real_cycles(struct clocksource *cs) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); return max(ret, clocksource_vmi.cycle_last); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a2085368a3dc..ca7ec44bafc3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void) /* If we can't use the TSC, the kernel falls back to our lower-priority * "lguest_clock", where we read the time value given to us by the Host. */ -static cycle_t lguest_clock_read(void) +static cycle_t lguest_clock_read(struct clocksource *cs) { unsigned long sec, nsec; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 14f240623497..0a5aa44299a5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void) return ret; } +static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) +{ + return xen_clocksource_read(); +} + static void xen_read_wallclock(struct timespec *ts) { struct shared_info *s = HYPERVISOR_shared_info; @@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now) static struct clocksource xen_clocksource __read_mostly = { .name = "xen", .rating = 400, - .read = xen_clocksource_read, + .read = xen_clocksource_get_cycles, .mask = ~0, .mult = 1< value1) diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 8615059a8729..64e528e8bfa6 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c @@ -19,7 +19,7 @@ int use_cyclone = 0; static void __iomem *cyclone_ptr; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readl(cyclone_ptr); } diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index b92da677aa5d..27f4d9637b62 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -43,7 +43,7 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 -static cycle_t read_hrt(void) +static cycle_t read_hrt(struct clocksource *cs) { /* Read the timer value */ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 254f1064d973..01b886e68822 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -39,7 +39,7 @@ static void __iomem *tcaddr; -static cycle_t tc_get_cycles(void) +static cycle_t tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 573819ef4cc0..0d96cde9ee5d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -143,7 +143,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @read: returns a cycle value + * @read: returns a cycle value, passes clocksource as argument * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -162,7 +162,7 @@ struct clocksource { char *name; struct list_head list; int rating; - cycle_t (*read)(void); + cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -271,7 +271,7 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) */ static inline cycle_t clocksource_read(struct clocksource *cs) { - return cs->read(); + return cs->read(cs); } /** diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931a7fe7..ecfd7b5187e0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) resumed = test_and_clear_bit(0, &watchdog_resumed); - wdnow = watchdog->read(); + wdnow = watchdog->read(watchdog); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { - csnow = cs->read(); + csnow = cs->read(cs); if (unlikely(resumed)) { cs->wd_last = csnow; @@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) list_add(&cs->wd_list, &watchdog_list); if (!started && watchdog) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); @@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) cse->flags &= ~CLOCK_SOURCE_WATCHDOG; /* Start if list is not empty */ if (!list_empty(&watchdog_list)) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f197560f3b..c3f6c30816e3 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -50,7 +50,7 @@ */ #define JIFFIES_SHIFT 8 -static cycle_t jiffies_read(void) +static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } -- cgit 1.4.1