summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorVarun Sethi <Varun.Sethi@freescale.com>2012-04-25 01:26:43 +0000
committerAlexander Graf <agraf@suse.de>2012-05-06 16:19:08 +0200
commit185e4188dab6456409cad66c579501dd89487188 (patch)
treef9d57c7a37d12de21667fdd11312ee5559820c2b /arch/powerpc
parent6e35994d1f6831af1e5577e28c363c9137d7d597 (diff)
downloadlinux-185e4188dab6456409cad66c579501dd89487188.tar.gz
linux-185e4188dab6456409cad66c579501dd89487188.tar.xz
linux-185e4188dab6456409cad66c579501dd89487188.zip
KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.
Introduced PPC_STD/PPC_LD macros for saving/restoring guest registers to/from their 64 bit copies. Signed-off-by: Varun Sethi <Varun.Sethi@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S24
2 files changed, 12 insertions, 20 deletions
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 097815233284..7d4018dd0e11 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,14 @@
#ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
+#endif
+
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 909e96e0650c..41d34850f826 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -93,11 +93,7 @@
#endif
oris r8, r6, MSR_CE@h
-#ifdef CONFIG_64BIT
- std r6, (VCPU_SHARED_MSR)(r11)
-#else
- stw r6, (VCPU_SHARED_MSR + 4)(r11)
-#endif
+ PPC_STD(r6, VCPU_SHARED_MSR, r11)
ori r8, r8, MSR_ME | MSR_RI
PPC_STL r5, VCPU_PC(r4)
@@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host)
stw r5, VCPU_SHARED_MAS0(r11)
mfspr r7, SPRN_MAS2
stw r6, VCPU_SHARED_MAS1(r11)
-#ifdef CONFIG_64BIT
- std r7, (VCPU_SHARED_MAS2)(r11)
-#else
- stw r7, (VCPU_SHARED_MAS2 + 4)(r11)
-#endif
+ PPC_STD(r7, VCPU_SHARED_MAS2, r11)
mfspr r5, SPRN_MAS3
mfspr r6, SPRN_MAS4
stw r5, VCPU_SHARED_MAS7_3+4(r11)
@@ -527,11 +519,7 @@ lightweight_exit:
stw r3, VCPU_HOST_MAS6(r4)
lwz r3, VCPU_SHARED_MAS0(r11)
lwz r5, VCPU_SHARED_MAS1(r11)
-#ifdef CONFIG_64BIT
- ld r6, (VCPU_SHARED_MAS2)(r11)
-#else
- lwz r6, (VCPU_SHARED_MAS2 + 4)(r11)
-#endif
+ PPC_LD(r6, VCPU_SHARED_MAS2, r11)
lwz r7, VCPU_SHARED_MAS7_3+4(r11)
lwz r8, VCPU_SHARED_MAS4(r11)
mtspr SPRN_MAS0, r3
@@ -565,11 +553,7 @@ lightweight_exit:
PPC_LL r6, VCPU_CTR(r4)
PPC_LL r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
-#ifdef CONFIG_64BIT
- ld r9, (VCPU_SHARED_MSR)(r11)
-#else
- lwz r9, (VCPU_SHARED_MSR + 4)(r11)
-#endif
+ PPC_LD(r9, VCPU_SHARED_MSR, r11)
PPC_LL r0, VCPU_GPR(r0)(r4)
PPC_LL r1, VCPU_GPR(r1)(r4)
PPC_LL r2, VCPU_GPR(r2)(r4)