diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2009-07-24 12:32:57 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2009-07-24 12:32:57 +0100 |
commit | 07f33a035ddda78095bed64f39db54334776841d (patch) | |
tree | 279fca43986fe10f7dc4c7b05c851f79ebd7cb0d /arch/arm/vfp | |
parent | 8b592783a2e8b7721a99730bd549aab5208f36af (diff) | |
download | linux-07f33a035ddda78095bed64f39db54334776841d.tar.gz linux-07f33a035ddda78095bed64f39db54334776841d.tar.xz |
Thumb-2: Implement the unified VFP support
This patch modifies the VFP files for the ARM/Thumb-2 unified
assembly syntax.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r-- | arch/arm/vfp/vfphw.S | 48 |
1 files changed, 32 insertions, 16 deletions
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 1aeae38725dd..66dc2d03b7fc 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -209,40 +209,55 @@ ENDPROC(vfp_save_state) last_VFP_context_address: .word last_VFP_context -ENTRY(vfp_get_float) - add pc, pc, r0, lsl #3 + .macro tbl_branch, base, tmp, shift +#ifdef CONFIG_THUMB2_KERNEL + adr \tmp, 1f + add \tmp, \tmp, \base, lsl \shift + mov pc, \tmp +#else + add pc, pc, \base, lsl \shift mov r0, r0 +#endif +1: + .endm + +ENTRY(vfp_get_float) + tbl_branch r0, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 +1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 mov pc, lr - mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 + .org 1b + 8 +1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 mov pc, lr + .org 1b + 8 .endr ENDPROC(vfp_get_float) ENTRY(vfp_put_float) - add pc, pc, r1, lsl #3 - mov r0, r0 + tbl_branch r1, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 +1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 mov pc, lr - mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 + .org 1b + 8 +1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 mov pc, lr + .org 1b + 8 .endr ENDPROC(vfp_put_float) ENTRY(vfp_get_double) - add pc, pc, r0, lsl #3 - mov r0, r0 + tbl_branch r0, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - fmrrd r0, r1, d\dr +1: fmrrd r0, r1, d\dr mov pc, lr + .org 1b + 8 .endr #ifdef CONFIG_VFPv3 @ d16 - d31 registers .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr +1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr mov pc, lr + .org 1b + 8 .endr #endif @@ -253,17 +268,18 @@ ENTRY(vfp_get_double) ENDPROC(vfp_get_double) ENTRY(vfp_put_double) - add pc, pc, r2, lsl #3 - mov r0, r0 + tbl_branch r2, r3, #3 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - fmdrr d\dr, r0, r1 +1: fmdrr d\dr, r0, r1 mov pc, lr + .org 1b + 8 .endr #ifdef CONFIG_VFPv3 @ d16 - d31 registers .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 - mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr +1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr mov pc, lr + .org 1b + 8 .endr #endif ENDPROC(vfp_put_double) |