Commit 35cf7c7e183775b3a850730804c8d64f56b9c02e
1 parent
56fdd213
target-ppc: Add vmaddfp and vnmsubfp instructions
Signed-off-by: Nathan Froyd <froydnj@codesourcery.com> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6570 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
3 changed files
with
40 additions
and
0 deletions
target-ppc/helper.h
| @@ -236,6 +236,8 @@ DEF_HELPER_3(vaddfp, void, avr, avr, avr) | @@ -236,6 +236,8 @@ DEF_HELPER_3(vaddfp, void, avr, avr, avr) | ||
| 236 | DEF_HELPER_3(vsubfp, void, avr, avr, avr) | 236 | DEF_HELPER_3(vsubfp, void, avr, avr, avr) |
| 237 | DEF_HELPER_3(vmaxfp, void, avr, avr, avr) | 237 | DEF_HELPER_3(vmaxfp, void, avr, avr, avr) |
| 238 | DEF_HELPER_3(vminfp, void, avr, avr, avr) | 238 | DEF_HELPER_3(vminfp, void, avr, avr, avr) |
| 239 | +DEF_HELPER_4(vmaddfp, void, avr, avr, avr, avr) | ||
| 240 | +DEF_HELPER_4(vnmsubfp, void, avr, avr, avr, avr) | ||
| 239 | DEF_HELPER_2(vlogefp, void, avr, avr) | 241 | DEF_HELPER_2(vlogefp, void, avr, avr) |
| 240 | DEF_HELPER_2(vrfim, void, avr, avr) | 242 | DEF_HELPER_2(vrfim, void, avr, avr) |
| 241 | DEF_HELPER_2(vrfin, void, avr, avr) | 243 | DEF_HELPER_2(vrfin, void, avr, avr) |
target-ppc/op_helper.c
| @@ -2220,6 +2220,24 @@ VCMP(gtsw, >, s32) | @@ -2220,6 +2220,24 @@ VCMP(gtsw, >, s32) | ||
| 2220 | #undef VCMP_DO | 2220 | #undef VCMP_DO |
| 2221 | #undef VCMP | 2221 | #undef VCMP |
| 2222 | 2222 | ||
| 2223 | +void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) | ||
| 2224 | +{ | ||
| 2225 | + int i; | ||
| 2226 | + for (i = 0; i < ARRAY_SIZE(r->f); i++) { | ||
| 2227 | + HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { | ||
| 2228 | + /* Need to do the computation in higher precision and round | ||
| 2229 | + * once at the end. */ | ||
| 2230 | + float64 af, bf, cf, t; | ||
| 2231 | + af = float32_to_float64(a->f[i], &env->vec_status); | ||
| 2232 | + bf = float32_to_float64(b->f[i], &env->vec_status); | ||
| 2233 | + cf = float32_to_float64(c->f[i], &env->vec_status); | ||
| 2234 | + t = float64_mul(af, cf, &env->vec_status); | ||
| 2235 | + t = float64_add(t, bf, &env->vec_status); | ||
| 2236 | + r->f[i] = float64_to_float32(t, &env->vec_status); | ||
| 2237 | + } | ||
| 2238 | + } | ||
| 2239 | +} | ||
| 2240 | + | ||
| 2223 | void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) | 2241 | void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) |
| 2224 | { | 2242 | { |
| 2225 | int sat = 0; | 2243 | int sat = 0; |
| @@ -2456,6 +2474,25 @@ VMUL(uh, u16, u32) | @@ -2456,6 +2474,25 @@ VMUL(uh, u16, u32) | ||
| 2456 | #undef VMUL_DO | 2474 | #undef VMUL_DO |
| 2457 | #undef VMUL | 2475 | #undef VMUL |
| 2458 | 2476 | ||
| 2477 | +void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) | ||
| 2478 | +{ | ||
| 2479 | + int i; | ||
| 2480 | + for (i = 0; i < ARRAY_SIZE(r->f); i++) { | ||
| 2481 | + HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) { | ||
| 2482 | + /* Need to do the computation is higher precision and round | ||
| 2483 | + * once at the end. */ | ||
| 2484 | + float64 af, bf, cf, t; | ||
| 2485 | + af = float32_to_float64(a->f[i], &env->vec_status); | ||
| 2486 | + bf = float32_to_float64(b->f[i], &env->vec_status); | ||
| 2487 | + cf = float32_to_float64(c->f[i], &env->vec_status); | ||
| 2488 | + t = float64_mul(af, cf, &env->vec_status); | ||
| 2489 | + t = float64_sub(t, bf, &env->vec_status); | ||
| 2490 | + t = float64_chs(t); | ||
| 2491 | + r->f[i] = float64_to_float32(t, &env->vec_status); | ||
| 2492 | + } | ||
| 2493 | + } | ||
| 2494 | +} | ||
| 2495 | + | ||
| 2459 | void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) | 2496 | void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) |
| 2460 | { | 2497 | { |
| 2461 | ppc_avr_t result; | 2498 | ppc_avr_t result; |
target-ppc/translate.c
| @@ -6584,6 +6584,7 @@ GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) | @@ -6584,6 +6584,7 @@ GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18) | ||
| 6584 | GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) | 6584 | GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19) |
| 6585 | GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) | 6585 | GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20) |
| 6586 | GEN_VAFORM_PAIRED(vsel, vperm, 21) | 6586 | GEN_VAFORM_PAIRED(vsel, vperm, 21) |
| 6587 | +GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) | ||
| 6587 | 6588 | ||
| 6588 | /*** SPE extension ***/ | 6589 | /*** SPE extension ***/ |
| 6589 | /* Register moves */ | 6590 | /* Register moves */ |