| 2 |
* ARM micro operations |
* ARM micro operations |
| 3 |
* |
* |
| 4 |
* Copyright (c) 2003 Fabrice Bellard |
* Copyright (c) 2003 Fabrice Bellard |
| 5 |
|
* Copyright (c) 2005 CodeSourcery, LLC |
| 6 |
* |
* |
| 7 |
* This library is free software; you can redistribute it and/or |
* This library is free software; you can redistribute it and/or |
| 8 |
* modify it under the terms of the GNU Lesser General Public |
* modify it under the terms of the GNU Lesser General Public |
| 858 |
cpu_loop_exit(); |
cpu_loop_exit(); |
| 859 |
} |
} |
| 860 |
|
|
| 861 |
/* thread support */ |
/* VFP support. We follow the convention used for VFP instrunctions: |
| 862 |
|
Single precition routines have a "s" suffix, double precision a |
| 863 |
|
"d" suffix. */ |
| 864 |
|
|
| 865 |
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; |
#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void) |
| 866 |
|
|
| 867 |
void cpu_lock(void) |
#define VFP_BINOP(name, op) \ |
| 868 |
|
VFP_OP(name, s) \ |
| 869 |
|
{ \ |
| 870 |
|
FT0s = FT0s op FT1s; \ |
| 871 |
|
} \ |
| 872 |
|
VFP_OP(name, d) \ |
| 873 |
|
{ \ |
| 874 |
|
FT0d = FT0d op FT1d; \ |
| 875 |
|
} |
| 876 |
|
VFP_BINOP(add, +) |
| 877 |
|
VFP_BINOP(sub, -) |
| 878 |
|
VFP_BINOP(mul, *) |
| 879 |
|
VFP_BINOP(div, /) |
| 880 |
|
#undef VFP_BINOP |
| 881 |
|
|
| 882 |
|
#define VFP_HELPER(name) \ |
| 883 |
|
VFP_OP(name, s) \ |
| 884 |
|
{ \ |
| 885 |
|
do_vfp_##name##s(); \ |
| 886 |
|
} \ |
| 887 |
|
VFP_OP(name, d) \ |
| 888 |
|
{ \ |
| 889 |
|
do_vfp_##name##d(); \ |
| 890 |
|
} |
| 891 |
|
VFP_HELPER(abs) |
| 892 |
|
VFP_HELPER(sqrt) |
| 893 |
|
VFP_HELPER(cmp) |
| 894 |
|
VFP_HELPER(cmpe) |
| 895 |
|
#undef VFP_HELPER |
| 896 |
|
|
| 897 |
|
/* XXX: Will this do the right thing for NANs. Should invert the signbit |
| 898 |
|
without looking at the rest of the value. */ |
| 899 |
|
VFP_OP(neg, s) |
| 900 |
|
{ |
| 901 |
|
FT0s = -FT0s; |
| 902 |
|
} |
| 903 |
|
|
| 904 |
|
VFP_OP(neg, d) |
| 905 |
|
{ |
| 906 |
|
FT0d = -FT0d; |
| 907 |
|
} |
| 908 |
|
|
| 909 |
|
VFP_OP(F1_ld0, s) |
| 910 |
|
{ |
| 911 |
|
FT1s = 0.0f; |
| 912 |
|
} |
| 913 |
|
|
| 914 |
|
VFP_OP(F1_ld0, d) |
| 915 |
|
{ |
| 916 |
|
FT1d = 0.0; |
| 917 |
|
} |
| 918 |
|
|
| 919 |
|
/* Helper routines to perform bitwise copies between float and int. */ |
| 920 |
|
static inline float vfp_itos(uint32_t i) |
| 921 |
|
{ |
| 922 |
|
union { |
| 923 |
|
uint32_t i; |
| 924 |
|
float s; |
| 925 |
|
} v; |
| 926 |
|
|
| 927 |
|
v.i = i; |
| 928 |
|
return v.s; |
| 929 |
|
} |
| 930 |
|
|
| 931 |
|
static inline uint32_t vfp_stoi(float s) |
| 932 |
|
{ |
| 933 |
|
union { |
| 934 |
|
uint32_t i; |
| 935 |
|
float s; |
| 936 |
|
} v; |
| 937 |
|
|
| 938 |
|
v.s = s; |
| 939 |
|
return v.i; |
| 940 |
|
} |
| 941 |
|
|
| 942 |
|
/* Integer to float conversion. */ |
| 943 |
|
VFP_OP(uito, s) |
| 944 |
|
{ |
| 945 |
|
FT0s = (float)(uint32_t)vfp_stoi(FT0s); |
| 946 |
|
} |
| 947 |
|
|
| 948 |
|
VFP_OP(uito, d) |
| 949 |
|
{ |
| 950 |
|
FT0d = (double)(uint32_t)vfp_stoi(FT0s); |
| 951 |
|
} |
| 952 |
|
|
| 953 |
|
VFP_OP(sito, s) |
| 954 |
|
{ |
| 955 |
|
FT0s = (float)(int32_t)vfp_stoi(FT0s); |
| 956 |
|
} |
| 957 |
|
|
| 958 |
|
VFP_OP(sito, d) |
| 959 |
|
{ |
| 960 |
|
FT0d = (double)(int32_t)vfp_stoi(FT0s); |
| 961 |
|
} |
| 962 |
|
|
| 963 |
|
/* Float to integer conversion. */ |
| 964 |
|
VFP_OP(toui, s) |
| 965 |
|
{ |
| 966 |
|
FT0s = vfp_itos((uint32_t)FT0s); |
| 967 |
|
} |
| 968 |
|
|
| 969 |
|
VFP_OP(toui, d) |
| 970 |
|
{ |
| 971 |
|
FT0s = vfp_itos((uint32_t)FT0d); |
| 972 |
|
} |
| 973 |
|
|
| 974 |
|
VFP_OP(tosi, s) |
| 975 |
|
{ |
| 976 |
|
FT0s = vfp_itos((int32_t)FT0s); |
| 977 |
|
} |
| 978 |
|
|
| 979 |
|
VFP_OP(tosi, d) |
| 980 |
|
{ |
| 981 |
|
FT0s = vfp_itos((int32_t)FT0d); |
| 982 |
|
} |
| 983 |
|
|
| 984 |
|
/* TODO: Set rounding mode properly. */ |
| 985 |
|
VFP_OP(touiz, s) |
| 986 |
|
{ |
| 987 |
|
FT0s = vfp_itos((uint32_t)FT0s); |
| 988 |
|
} |
| 989 |
|
|
| 990 |
|
VFP_OP(touiz, d) |
| 991 |
|
{ |
| 992 |
|
FT0s = vfp_itos((uint32_t)FT0d); |
| 993 |
|
} |
| 994 |
|
|
| 995 |
|
VFP_OP(tosiz, s) |
| 996 |
|
{ |
| 997 |
|
FT0s = vfp_itos((int32_t)FT0s); |
| 998 |
|
} |
| 999 |
|
|
| 1000 |
|
VFP_OP(tosiz, d) |
| 1001 |
{ |
{ |
| 1002 |
spin_lock(&global_cpu_lock); |
FT0s = vfp_itos((int32_t)FT0d); |
| 1003 |
} |
} |
| 1004 |
|
|
| 1005 |
void cpu_unlock(void) |
/* floating point conversion */ |
| 1006 |
|
VFP_OP(fcvtd, s) |
| 1007 |
{ |
{ |
| 1008 |
spin_unlock(&global_cpu_lock); |
FT0d = (double)FT0s; |
| 1009 |
} |
} |
| 1010 |
|
|
| 1011 |
|
VFP_OP(fcvts, d) |
| 1012 |
|
{ |
| 1013 |
|
FT0s = (float)FT0d; |
| 1014 |
|
} |
| 1015 |
|
|
| 1016 |
|
/* Get and Put values from registers. */ |
| 1017 |
|
VFP_OP(getreg_F0, d) |
| 1018 |
|
{ |
| 1019 |
|
FT0d = *(double *)((char *) env + PARAM1); |
| 1020 |
|
} |
| 1021 |
|
|
| 1022 |
|
VFP_OP(getreg_F0, s) |
| 1023 |
|
{ |
| 1024 |
|
FT0s = *(float *)((char *) env + PARAM1); |
| 1025 |
|
} |
| 1026 |
|
|
| 1027 |
|
VFP_OP(getreg_F1, d) |
| 1028 |
|
{ |
| 1029 |
|
FT1d = *(double *)((char *) env + PARAM1); |
| 1030 |
|
} |
| 1031 |
|
|
| 1032 |
|
VFP_OP(getreg_F1, s) |
| 1033 |
|
{ |
| 1034 |
|
FT1s = *(float *)((char *) env + PARAM1); |
| 1035 |
|
} |
| 1036 |
|
|
| 1037 |
|
VFP_OP(setreg_F0, d) |
| 1038 |
|
{ |
| 1039 |
|
*(double *)((char *) env + PARAM1) = FT0d; |
| 1040 |
|
} |
| 1041 |
|
|
| 1042 |
|
VFP_OP(setreg_F0, s) |
| 1043 |
|
{ |
| 1044 |
|
*(float *)((char *) env + PARAM1) = FT0s; |
| 1045 |
|
} |
| 1046 |
|
|
| 1047 |
|
VFP_OP(foobar, d) |
| 1048 |
|
{ |
| 1049 |
|
FT0d = env->vfp.regs.s[3]; |
| 1050 |
|
} |
| 1051 |
|
|
| 1052 |
|
void OPPROTO op_vfp_movl_T0_fpscr(void) |
| 1053 |
|
{ |
| 1054 |
|
do_vfp_get_fpscr (); |
| 1055 |
|
} |
| 1056 |
|
|
| 1057 |
|
void OPPROTO op_vfp_movl_T0_fpscr_flags(void) |
| 1058 |
|
{ |
| 1059 |
|
T0 = env->vfp.fpscr & (0xf << 28); |
| 1060 |
|
} |
| 1061 |
|
|
| 1062 |
|
void OPPROTO op_vfp_movl_fpscr_T0(void) |
| 1063 |
|
{ |
| 1064 |
|
do_vfp_set_fpscr(); |
| 1065 |
|
} |
| 1066 |
|
|
| 1067 |
|
/* Move between FT0s to T0 */ |
| 1068 |
|
void OPPROTO op_vfp_mrs(void) |
| 1069 |
|
{ |
| 1070 |
|
T0 = vfp_stoi(FT0s); |
| 1071 |
|
} |
| 1072 |
|
|
| 1073 |
|
void OPPROTO op_vfp_msr(void) |
| 1074 |
|
{ |
| 1075 |
|
FT0s = vfp_itos(T0); |
| 1076 |
|
} |
| 1077 |
|
|
| 1078 |
|
/* Move between FT0d and {T0,T1} */ |
| 1079 |
|
void OPPROTO op_vfp_mrrd(void) |
| 1080 |
|
{ |
| 1081 |
|
CPU_DoubleU u; |
| 1082 |
|
|
| 1083 |
|
u.d = FT0d; |
| 1084 |
|
T0 = u.l.lower; |
| 1085 |
|
T1 = u.l.upper; |
| 1086 |
|
} |
| 1087 |
|
|
| 1088 |
|
void OPPROTO op_vfp_mdrr(void) |
| 1089 |
|
{ |
| 1090 |
|
CPU_DoubleU u; |
| 1091 |
|
|
| 1092 |
|
u.l.lower = T0; |
| 1093 |
|
u.l.upper = T1; |
| 1094 |
|
FT0d = u.d; |
| 1095 |
|
} |
| 1096 |
|
|
| 1097 |
|
/* Floating point load/store. Address is in T1 */ |
| 1098 |
|
void OPPROTO op_vfp_lds(void) |
| 1099 |
|
{ |
| 1100 |
|
FT0s = ldfl((void *)T1); |
| 1101 |
|
} |
| 1102 |
|
|
| 1103 |
|
void OPPROTO op_vfp_ldd(void) |
| 1104 |
|
{ |
| 1105 |
|
FT0d = ldfq((void *)T1); |
| 1106 |
|
} |
| 1107 |
|
|
| 1108 |
|
void OPPROTO op_vfp_sts(void) |
| 1109 |
|
{ |
| 1110 |
|
stfl((void *)T1, FT0s); |
| 1111 |
|
} |
| 1112 |
|
|
| 1113 |
|
void OPPROTO op_vfp_std(void) |
| 1114 |
|
{ |
| 1115 |
|
stfq((void *)T1, FT0d); |
| 1116 |
|
} |