summary refs log tree commit diff
path: root/arch/powerpc/kvm/book3s_64_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 83da1f868fd5..774a253ca4e1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -38,7 +38,7 @@
 
 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, MSR_SF);
+	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
 }
 
 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
@@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 	/* Magic page override */
 	if (unlikely(mp_ea) &&
 	    unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
-	    !(vcpu->arch.shared->msr & MSR_PR)) {
+	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 		gpte->eaddr = eaddr;
 		gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
 		gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
@@ -269,18 +269,21 @@ do_second:
 		goto no_page_found;
 	}
 
-	if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
+	if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
 		key = 4;
-	else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
+	else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
 		key = 4;
 
 	for (i=0; i<16; i+=2) {
+		u64 pte0 = be64_to_cpu(pteg[i]);
+		u64 pte1 = be64_to_cpu(pteg[i + 1]);
+
 		/* Check all relevant fields of 1st dword */
-		if ((pteg[i] & v_mask) == v_val) {
+		if ((pte0 & v_mask) == v_val) {
 			/* If large page bit is set, check pgsize encoding */
 			if (slbe->large &&
 			    (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
-				pgsize = decode_pagesize(slbe, pteg[i+1]);
+				pgsize = decode_pagesize(slbe, pte1);
 				if (pgsize < 0)
 					continue;
 			}
@@ -297,8 +300,8 @@ do_second:
 		goto do_second;
 	}
 
-	v = pteg[i];
-	r = pteg[i+1];
+	v = be64_to_cpu(pteg[i]);
+	r = be64_to_cpu(pteg[i+1]);
 	pp = (r & HPTE_R_PP) | key;
 	if (r & HPTE_R_PP0)
 		pp |= 8;
@@ -310,6 +313,9 @@ do_second:
 	gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
 	gpte->page_size = pgsize;
 	gpte->may_execute = ((r & HPTE_R_N) ? false : true);
+	if (unlikely(vcpu->arch.disable_kernel_nx) &&
+	    !(kvmppc_get_msr(vcpu) & MSR_PR))
+		gpte->may_execute = true;
 	gpte->may_read = false;
 	gpte->may_write = false;
 
@@ -342,14 +348,14 @@ do_second:
 		 * non-PAPR platforms such as mac99, and this is
 		 * what real hardware does.
 		 */
-		char __user *addr = (char __user *) &pteg[i+1];
+                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 		r |= HPTE_R_R;
 		put_user(r >> 8, addr + 6);
 	}
 	if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
 		/* Set the dirty flag */
 		/* Use a single byte write */
-		char __user *addr = (char __user *) &pteg[i+1];
+                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 		r |= HPTE_R_C;
 		put_user(r, addr + 7);
 	}
@@ -479,7 +485,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
 		vcpu->arch.slb[i].origv = 0;
 	}
 
-	if (vcpu->arch.shared->msr & MSR_IR) {
+	if (kvmppc_get_msr(vcpu) & MSR_IR) {
 		kvmppc_mmu_flush_segments(vcpu);
 		kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
 	}
@@ -563,7 +569,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
 {
 	ulong mp_ea = vcpu->arch.magic_page_ea;
 
-	return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
+	return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
 		(mp_ea >> SID_SHIFT) == esid;
 }
 #endif
@@ -576,8 +582,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 	u64 gvsid = esid;
 	ulong mp_ea = vcpu->arch.magic_page_ea;
 	int pagesize = MMU_PAGE_64K;
+	u64 msr = kvmppc_get_msr(vcpu);
 
-	if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+	if (msr & (MSR_DR|MSR_IR)) {
 		slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 		if (slb) {
 			gvsid = slb->vsid;
@@ -590,7 +597,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 		}
 	}
 
-	switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+	switch (msr & (MSR_DR|MSR_IR)) {
 	case 0:
 		gvsid = VSID_REAL | esid;
 		break;
@@ -623,7 +630,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 		gvsid |= VSID_64K;
 #endif
 
-	if (vcpu->arch.shared->msr & MSR_PR)
+	if (kvmppc_get_msr(vcpu) & MSR_PR)
 		gvsid |= VSID_PR;
 
 	*vsid = gvsid;
@@ -633,7 +640,7 @@ no_slb:
 	/* Catch magic page case */
 	if (unlikely(mp_ea) &&
 	    unlikely(esid == (mp_ea >> SID_SHIFT)) &&
-	    !(vcpu->arch.shared->msr & MSR_PR)) {
+	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 		*vsid = VSID_REAL | esid;
 		return 0;
 	}