summary refs log tree commit diff
path: root/kernel/sched/cputime.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2016-10-05 16:42:36 -0300
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2016-10-05 16:43:53 -0300
commit9fce0c226536fc36c7fb0a80000ca38a995be43e (patch)
treed1d03d4a831bb143ffd401cefa0f44f636ac0084 /kernel/sched/cputime.c
parent3cc2691227203c00cac1d82d6b0772224d5c87b2 (diff)
parentc8d2bc9bc39ebea8437fd974fdbc21847bb897a3 (diff)
downloadlinux-9fce0c226536fc36c7fb0a80000ca38a995be43e.tar.gz
Merge tag 'v4.8' into patchwork
Linux 4.8

* tag 'v4.8': (1761 commits)
  Linux 4.8
  ARM: 8618/1: decompressor: reset ttbcr fields to use TTBR0 on ARMv7
  MIPS: CM: Fix mips_cm_max_vp_width for non-MT kernels on MT systems
  include/linux/property.h: fix typo/compile error
  ocfs2: fix deadlock on mmapped page in ocfs2_write_begin_nolock()
  mm: workingset: fix crash in shadow node shrinker caused by replace_page_cache_page()
  MAINTAINERS: Switch to kernel.org email address for Javi Merino
  x86/entry/64: Fix context tracking state warning when load_gs_index fails
  x86/boot: Initialize FPU and X86_FEATURE_ALWAYS even if we don't have CPUID
  x86/vdso: Fix building on big endian host
  x86/boot: Fix another __read_cr4() case on 486
  sctp: fix the issue sctp_diag uses lock_sock in rcu_read_lock
  sctp: change to check peer prsctp_capable when using prsctp polices
  sctp: remove prsctp_param from sctp_chunk
  sctp: move sent_count to the memory hole in sctp_chunk
  tg3: Avoid NULL pointer dereference in tg3_io_error_detected()
  x86/init: Fix cr4_init_shadow() on CR4-less machines
  MIPS: Fix detection of unsupported highmem with cache aliases
  MIPS: Malta: Fix IOCU disable switch read for MIPS64
  MIPS: Fix BUILD_ROLLBACK_PROLOGUE for microMIPS
  ...
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r--kernel/sched/cputime.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 1934f658c036..a846cf89eb96 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime)
 		cpustat[CPUTIME_IDLE] += (__force u64) cputime;
 }
 
+/*
+ * When a guest is interrupted for a longer amount of time, missed clock
+ * ticks are not redelivered later. Due to that, this function may on
+ * occasion account more time than the calling functions think elapsed.
+ */
 static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
 {
 #ifdef CONFIG_PARAVIRT
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
 	 * idle, or potentially user or system time. Due to rounding,
 	 * other time can exceed ticks occasionally.
 	 */
-	other = account_other_time(cputime);
+	other = account_other_time(ULONG_MAX);
 	if (other >= cputime)
 		return;
 	cputime -= other;
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
 	}
 
 	cputime = cputime_one_jiffy;
-	steal = steal_account_process_time(cputime);
+	steal = steal_account_process_time(ULONG_MAX);
 
 	if (steal >= cputime)
 		return;
@@ -508,13 +513,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
  */
 void account_idle_ticks(unsigned long ticks)
 {
+	cputime_t cputime, steal;
 
 	if (sched_clock_irqtime) {
 		irqtime_account_idle_ticks(ticks);
 		return;
 	}
 
-	account_idle_time(jiffies_to_cputime(ticks));
+	cputime = jiffies_to_cputime(ticks);
+	steal = steal_account_process_time(ULONG_MAX);
+
+	if (steal >= cputime)
+		return;
+
+	cputime -= steal;
+	account_idle_time(cputime);
 }
 
 /*
@@ -606,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr,
 	stime = curr->stime;
 	utime = curr->utime;
 
-	if (utime == 0) {
-		stime = rtime;
+	/*
+	 * If either stime or both stime and utime are 0, assume all runtime is
+	 * userspace. Once a task gets some ticks, the monotonicy code at
+	 * 'update' will ensure things converge to the observed ratio.
+	 */
+	if (stime == 0) {
+		utime = rtime;
 		goto update;
 	}
 
-	if (stime == 0) {
-		utime = rtime;
+	if (utime == 0) {
+		stime = rtime;
 		goto update;
 	}
 
 	stime = scale_stime((__force u64)stime, (__force u64)rtime,
 			    (__force u64)(stime + utime));
 
+update:
 	/*
 	 * Make sure stime doesn't go backwards; this preserves monotonicity
 	 * for utime because rtime is monotonic.
@@ -641,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr,
 		stime = rtime - utime;
 	}
 
-update:
 	prev->stime = stime;
 	prev->utime = utime;
 out:
@@ -686,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
 	unsigned long now = READ_ONCE(jiffies);
 	cputime_t delta, other;
 
+	/*
+	 * Unlike tick based timing, vtime based timing never has lost
+	 * ticks, and no need for steal time accounting to make up for
+	 * lost ticks. Vtime accounts a rounded version of actual
+	 * elapsed time. Limit account_other_time to prevent rounding
+	 * errors from causing elapsed vtime to go negative.
+	 */
 	delta = jiffies_to_cputime(now - tsk->vtime_snap);
 	other = account_other_time(delta);
 	WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);