summary refs log tree commit diff
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c43
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h32
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c20
-rw-r--r--arch/i386/kernel/cpu/transmeta.c6
-rw-r--r--arch/i386/kernel/machine_kexec.c22
-rw-r--r--arch/i386/kernel/mpparse.c10
-rw-r--r--arch/i386/kernel/numaq.c9
-rw-r--r--arch/i386/kernel/process.c2
-rw-r--r--arch/i386/kernel/syscall_table.S2
10 files changed, 100 insertions, 53 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 963e17aa205d..60a9e54dd20e 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -442,6 +442,13 @@ acpi_cpufreq_cpu_init (
 			(u32) data->acpi_data.states[i].transition_latency);
 
 	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+	
+	/*
+	 * the first call to ->target() should result in us actually
+	 * writing something to the appropriate registers.
+	 */
+	data->resume = 1;
+	
 	return (result);
 
  err_freqfree:
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 10cc096c0ade..ab6e0611303d 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,5 +1,5 @@
 /*
- *   (c) 2003, 2004 Advanced Micro Devices, Inc.
+ *   (c) 2003, 2004, 2005 Advanced Micro Devices, Inc.
  *  Your use of this code is subject to the terms and conditions of the
  *  GNU general public license version 2. See "COPYING" or
  *  http://www.gnu.org/licenses/gpl.html
@@ -44,7 +44,7 @@
 
 #define PFX "powernow-k8: "
 #define BFX PFX "BIOS error: "
-#define VERSION "version 1.40.2"
+#define VERSION "version 1.50.3"
 #include "powernow-k8.h"
 
 /* serialize freq changes  */
@@ -110,14 +110,13 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
 	u32 lo, hi;
 	u32 i = 0;
 
-	lo = MSR_S_LO_CHANGE_PENDING;
-	while (lo & MSR_S_LO_CHANGE_PENDING) {
+	do {
 		if (i++ > 0x1000000) {
 			printk(KERN_ERR PFX "detected change pending stuck\n");
 			return 1;
 		}
 		rdmsr(MSR_FIDVID_STATUS, lo, hi);
-	}
+	} while (lo & MSR_S_LO_CHANGE_PENDING);
 
 	data->currvid = hi & MSR_S_HI_CURRENT_VID;
 	data->currfid = lo & MSR_S_LO_CURRENT_FID;
@@ -232,7 +231,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
 /*
  * Reduce the vid by the max of step or reqvid.
  * Decreasing vid codes represent increasing voltages:
- * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off.
+ * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
  */
 static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
 {
@@ -467,7 +466,7 @@ static int check_supported_cpu(unsigned int cpu)
 	eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
 	if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
 	    ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
-	    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_E)) {
+	    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_F)) {
 		printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
 		goto out;
 	}
@@ -696,6 +695,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
 
 	data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
 	data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+	data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
 	data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
 	data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
 	data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
@@ -735,8 +735,16 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
 	}
 
 	for (i = 0; i < data->acpi_data.state_count; i++) {
-		u32 fid = data->acpi_data.states[i].control & FID_MASK;
-		u32 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
+		u32 fid;
+		u32 vid;
+
+		if (data->exttype) {
+			fid = data->acpi_data.states[i].status & FID_MASK;
+			vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK;
+		} else {
+			fid = data->acpi_data.states[i].control & FID_MASK;
+			vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
+		}
 
 		dprintk("   %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
 
@@ -753,7 +761,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
 		}
 
 		/* verify voltage is OK - BIOSs are using "off" to indicate invalid */
-		if (vid == 0x1f) {
+		if (vid == VID_OFF) {
 			dprintk("invalid vid %u, ignoring\n", vid);
 			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
 			continue;
@@ -930,15 +938,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 
 	down(&fidvid_sem);
 
-	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
-		/* make sure the sibling is initialized */
-		if (!powernow_data[i]) {
-                        ret = 0;
-                        up(&fidvid_sem);
-                        goto err_out;
-                }
-	}
-
 	powernow_k8_acpi_pst_values(data, newstate);
 
 	if (transition_frequency(data, newstate)) {
@@ -978,7 +977,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
 	struct powernow_k8_data *data;
 	cpumask_t oldmask = CPU_MASK_ALL;
-	int rc;
+	int rc, i;
 
 	if (!check_supported_cpu(pol->cpu))
 		return -ENODEV;
@@ -1064,7 +1063,9 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
 	       data->currfid, data->currvid);
 
-	powernow_data[pol->cpu] = data;
+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+		powernow_data[i] = data;
+	}
 
 	return 0;
 
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 9ed5bf221cb7..b1e85bb36396 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -1,5 +1,5 @@
 /*
- *  (c) 2003, 2004 Advanced Micro Devices, Inc.
+ *  (c) 2003, 2004, 2005 Advanced Micro Devices, Inc.
  *  Your use of this code is subject to the terms and conditions of the
  *  GNU general public license version 2. See "COPYING" or
  *  http://www.gnu.org/licenses/gpl.html
@@ -19,6 +19,7 @@ struct powernow_k8_data {
 	u32 vidmvs;  /* usable value calculated from mvs */
 	u32 vstable; /* voltage stabilization time, units 20 us */
 	u32 plllock; /* pll lock time, units 1 us */
+        u32 exttype; /* extended interface = 1 */
 
 	/* keep track of the current fid / vid */
 	u32 currvid, currfid;
@@ -41,7 +42,7 @@ struct powernow_k8_data {
 #define CPUID_XFAM			0x0ff00000	/* extended family */
 #define CPUID_XFAM_K8			0
 #define CPUID_XMOD			0x000f0000	/* extended model */
-#define CPUID_XMOD_REV_E		0x00020000
+#define CPUID_XMOD_REV_F		0x00040000
 #define CPUID_USE_XFAM_XMOD		0x00000f00
 #define CPUID_GET_MAX_CAPABILITIES	0x80000000
 #define CPUID_FREQ_VOLT_CAPABILITIES	0x80000007
@@ -57,25 +58,26 @@ struct powernow_k8_data {
 
 /* Field definitions within the FID VID Low Control MSR : */
 #define MSR_C_LO_INIT_FID_VID     0x00010000
-#define MSR_C_LO_NEW_VID          0x00001f00
-#define MSR_C_LO_NEW_FID          0x0000002f
+#define MSR_C_LO_NEW_VID          0x00003f00
+#define MSR_C_LO_NEW_FID          0x0000003f
 #define MSR_C_LO_VID_SHIFT        8
 
 /* Field definitions within the FID VID High Control MSR : */
-#define MSR_C_HI_STP_GNT_TO       0x000fffff
+#define MSR_C_HI_STP_GNT_TO 	  0x000fffff
 
 /* Field definitions within the FID VID Low Status MSR : */
-#define MSR_S_LO_CHANGE_PENDING   0x80000000	/* cleared when completed */
-#define MSR_S_LO_MAX_RAMP_VID     0x1f000000
+#define MSR_S_LO_CHANGE_PENDING   0x80000000   /* cleared when completed */
+#define MSR_S_LO_MAX_RAMP_VID     0x3f000000
 #define MSR_S_LO_MAX_FID          0x003f0000
 #define MSR_S_LO_START_FID        0x00003f00
 #define MSR_S_LO_CURRENT_FID      0x0000003f
 
 /* Field definitions within the FID VID High Status MSR : */
-#define MSR_S_HI_MAX_WORKING_VID  0x001f0000
-#define MSR_S_HI_START_VID        0x00001f00
-#define MSR_S_HI_CURRENT_VID      0x0000001f
-#define MSR_C_HI_STP_GNT_BENIGN   0x00000001
+#define MSR_S_HI_MIN_WORKING_VID  0x3f000000
+#define MSR_S_HI_MAX_WORKING_VID  0x003f0000
+#define MSR_S_HI_START_VID        0x00003f00
+#define MSR_S_HI_CURRENT_VID      0x0000003f
+#define MSR_C_HI_STP_GNT_BENIGN	  0x00000001
 
 /*
  * There are restrictions frequencies have to follow:
@@ -99,13 +101,15 @@ struct powernow_k8_data {
 #define MIN_FREQ_RESOLUTION  200 /* fids jump by 2 matching freq jumps by 200 */
 
 #define MAX_FID 0x2a	/* Spec only gives FID values as far as 5 GHz */
-#define LEAST_VID 0x1e	/* Lowest (numerically highest) useful vid value */
+#define LEAST_VID 0x3e	/* Lowest (numerically highest) useful vid value */
 
 #define MIN_FREQ 800	/* Min and max freqs, per spec */
 #define MAX_FREQ 5000
 
 #define INVALID_FID_MASK 0xffffffc1  /* not a valid fid if these bits are set */
-#define INVALID_VID_MASK 0xffffffe0  /* not a valid vid if these bits are set */
+#define INVALID_VID_MASK 0xffffffc0  /* not a valid vid if these bits are set */
+
+#define VID_OFF 0x3f
 
 #define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
 
@@ -121,12 +125,14 @@ struct powernow_k8_data {
                                                                                                     
 #define IRT_SHIFT      30
 #define RVO_SHIFT      28
+#define EXT_TYPE_SHIFT 27
 #define PLL_L_SHIFT    20
 #define MVS_SHIFT      18
 #define VST_SHIFT      11
 #define VID_SHIFT       6
 #define IRT_MASK        3
 #define RVO_MASK        3
+#define EXT_TYPE_MASK   1
 #define PLL_L_MASK   0x7f
 #define MVS_MASK        3
 #define VST_MASK     0x7f
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 1d768b263269..6c55b50cf048 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -128,7 +128,7 @@ static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
 	cpuid_count(4, index, &eax, &ebx, &ecx, &edx);
 	cache_eax.full = eax;
 	if (cache_eax.split.type == CACHE_TYPE_NULL)
-		return -1;
+		return -EIO; /* better error ? */
 
 	this_leaf->eax.full = eax;
 	this_leaf->ebx.full = ebx;
@@ -334,6 +334,7 @@ static int __devinit detect_cache_attributes(unsigned int cpu)
 	struct _cpuid4_info	*this_leaf;
 	unsigned long 		j;
 	int 			retval;
+	cpumask_t		oldmask;
 
 	if (num_cache_leaves == 0)
 		return -ENOENT;
@@ -345,19 +346,26 @@ static int __devinit detect_cache_attributes(unsigned int cpu)
 	memset(cpuid4_info[cpu], 0,
 	    sizeof(struct _cpuid4_info) * num_cache_leaves);
 
+	oldmask = current->cpus_allowed;
+	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (retval)
+		goto out;
+
 	/* Do cpuid and store the results */
+	retval = 0;
 	for (j = 0; j < num_cache_leaves; j++) {
 		this_leaf = CPUID4_INFO_IDX(cpu, j);
 		retval = cpuid4_cache_lookup(j, this_leaf);
 		if (unlikely(retval < 0))
-			goto err_out;
+			break;
 		cache_shared_cpu_map_setup(cpu, j);
 	}
-	return 0;
+	set_cpus_allowed(current, oldmask);
 
-err_out:
-	free_cache_attributes(cpu);
-	return -ENOMEM;
+out:
+	if (retval)
+		free_cache_attributes(cpu);
+	return retval;
 }
 
 #ifdef CONFIG_SYSFS
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index f57e5ee94943..fc426380366b 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -76,6 +76,12 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
 #define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
         if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
 		c->x86 = 6;
+
+#ifdef CONFIG_SYSCTL
+	/* randomize_va_space slows us down enormously;
+	   it probably triggers retranslation of x86->native bytecode */
+	randomize_va_space = 0;
+#endif
 }
 
 static void transmeta_identify(struct cpuinfo_x86 * c)
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index 52ed18d8b511..cb699a2aa1f8 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -16,6 +16,7 @@
 #include <asm/io.h>
 #include <asm/apic.h>
 #include <asm/cpufeature.h>
+#include <asm/desc.h>
 
 static inline unsigned long read_cr3(void)
 {
@@ -90,33 +91,32 @@ static void identity_map_page(unsigned long address)
 }
 #endif
 
-
 static void set_idt(void *newidt, __u16 limit)
 {
-	unsigned char curidt[6];
+	struct Xgt_desc_struct curidt;
 
 	/* ia32 supports unaliged loads & stores */
-	(*(__u16 *)(curidt)) = limit;
-	(*(__u32 *)(curidt +2)) = (unsigned long)(newidt);
+	curidt.size    = limit;
+	curidt.address = (unsigned long)newidt;
 
 	__asm__ __volatile__ (
-		"lidt %0\n"
-		: "=m" (curidt)
+		"lidtl %0\n"
+		: : "m" (curidt)
 		);
 };
 
 
 static void set_gdt(void *newgdt, __u16 limit)
 {
-	unsigned char curgdt[6];
+	struct Xgt_desc_struct curgdt;
 
 	/* ia32 supports unaligned loads & stores */
-	(*(__u16 *)(curgdt)) = limit;
-	(*(__u32 *)(curgdt +2)) = (unsigned long)(newgdt);
+	curgdt.size    = limit;
+	curgdt.address = (unsigned long)newgdt;
 
 	__asm__ __volatile__ (
-		"lgdt %0\n"
-		: "=m" (curgdt)
+		"lgdtl %0\n"
+		: : "m" (curgdt)
 		);
 };
 
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index af917f609c7d..ce838abb27d8 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1116,7 +1116,15 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
 		 */
 		int irq = gsi;
 		if (gsi < MAX_GSI_NUM) {
-			gsi = pci_irq++;
+			if (gsi > 15)
+				gsi = pci_irq++;
+#ifdef CONFIG_ACPI_BUS
+			/*
+			 * Don't assign IRQ used by ACPI SCI
+			 */
+			if (gsi == acpi_fadt.sci_int)
+				gsi = pci_irq++;
+#endif
 			gsi_to_irq[irq] = gsi;
 		} else {
 			printk(KERN_ERR "GSI %u is too high\n", gsi);
diff --git a/arch/i386/kernel/numaq.c b/arch/i386/kernel/numaq.c
index e51edf0a6564..5f5b075f860a 100644
--- a/arch/i386/kernel/numaq.c
+++ b/arch/i386/kernel/numaq.c
@@ -31,6 +31,7 @@
 #include <linux/nodemask.h>
 #include <asm/numaq.h>
 #include <asm/topology.h>
+#include <asm/processor.h>
 
 #define	MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
 
@@ -77,3 +78,11 @@ int __init get_memcfg_numaq(void)
 	smp_dump_qct();
 	return 1;
 }
+
+static int __init numaq_dsc_disable(void)
+{
+	printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
+	tsc_disable = 1;
+	return 0;
+}
+core_initcall(numaq_dsc_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index d9492058aaf3..e3f362e8af5b 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -917,6 +917,8 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 		return -EINVAL;
 
+	memset(&info, 0, sizeof(info));
+
 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
 
 	info.entry_number = idx;
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 468500a7e894..9b21a31d4f4e 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -251,7 +251,7 @@ ENTRY(sys_call_table)
 	.long sys_io_submit
 	.long sys_io_cancel
 	.long sys_fadvise64	/* 250 */
-	.long sys_set_zone_reclaim
+	.long sys_ni_syscall
 	.long sys_exit_group
 	.long sys_lookup_dcookie
 	.long sys_epoll_create