Commit e2eb279809e0e2e158d65dd7b448c70bd773f6b7
1 parent
cf7055bd
target-alpha: use CPU_Float/CPU_Double instead of ugly casts
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5771 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
1 changed file
with
34 additions
and
26 deletions
target-alpha/op_helper.c
| ... | ... | @@ -345,13 +345,13 @@ uint64_t helper_cmpbge (uint64_t op1, uint64_t op2) |
| 345 | 345 | /* F floating (VAX) */ |
| 346 | 346 | static always_inline uint64_t float32_to_f (float32 fa) |
| 347 | 347 | { |
| 348 | - uint32_t a; | |
| 349 | 348 | uint64_t r, exp, mant, sig; |
| 349 | + CPU_FloatU a; | |
| 350 | 350 | |
| 351 | - a = *(uint32_t*)(&fa); | |
| 352 | - sig = ((uint64_t)a & 0x80000000) << 32; | |
| 353 | - exp = (a >> 23) & 0xff; | |
| 354 | - mant = ((uint64_t)a & 0x007fffff) << 29; | |
| 351 | + a.f = fa; | |
| 352 | + sig = ((uint64_t)a.l & 0x80000000) << 32; | |
| 353 | + exp = (a.l >> 23) & 0xff; | |
| 354 | + mant = ((uint64_t)a.l & 0x007fffff) << 29; | |
| 355 | 355 | |
| 356 | 356 | if (exp == 255) { |
| 357 | 357 | /* NaN or infinity */ |
| ... | ... | @@ -378,7 +378,8 @@ static always_inline uint64_t float32_to_f (float32 fa) |
| 378 | 378 | |
| 379 | 379 | static always_inline float32 f_to_float32 (uint64_t a) |
| 380 | 380 | { |
| 381 | - uint32_t r, exp, mant_sig; | |
| 381 | + uint32_t exp, mant_sig; | |
| 382 | + CPU_FloatU r; | |
| 382 | 383 | |
| 383 | 384 | exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f); |
| 384 | 385 | mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff); |
| ... | ... | @@ -390,12 +391,12 @@ static always_inline float32 f_to_float32 (uint64_t a) |
| 390 | 391 | |
| 391 | 392 | if (exp < 3) { |
| 392 | 393 | /* Underflow */ |
| 393 | - r = 0; | |
| 394 | + r.l = 0; | |
| 394 | 395 | } else { |
| 395 | - r = ((exp - 2) << 23) | mant_sig; | |
| 396 | + r.l = ((exp - 2) << 23) | mant_sig; | |
| 396 | 397 | } |
| 397 | 398 | |
| 398 | - return *(float32*)(&a); | |
| 399 | + return r.f; | |
| 399 | 400 | } |
| 400 | 401 | |
| 401 | 402 | uint32_t helper_f_to_memory (uint64_t a) |
| ... | ... | @@ -471,12 +472,13 @@ uint64_t helper_sqrtf (uint64_t t) |
| 471 | 472 | /* G floating (VAX) */ |
| 472 | 473 | static always_inline uint64_t float64_to_g (float64 fa) |
| 473 | 474 | { |
| 474 | - uint64_t a, r, exp, mant, sig; | |
| 475 | + uint64_t r, exp, mant, sig; | |
| 476 | + CPU_DoubleU a; | |
| 475 | 477 | |
| 476 | - a = *(uint64_t*)(&fa); | |
| 477 | - sig = a & 0x8000000000000000ull; | |
| 478 | - exp = (a >> 52) & 0x7ff; | |
| 479 | - mant = a & 0x000fffffffffffffull; | |
| 478 | + a.d = fa; | |
| 479 | + sig = a.ll & 0x8000000000000000ull; | |
| 480 | + exp = (a.ll >> 52) & 0x7ff; | |
| 481 | + mant = a.ll & 0x000fffffffffffffull; | |
| 480 | 482 | |
| 481 | 483 | if (exp == 2047) { |
| 482 | 484 | /* NaN or infinity */ |
| ... | ... | @@ -503,7 +505,8 @@ static always_inline uint64_t float64_to_g (float64 fa) |
| 503 | 505 | |
| 504 | 506 | static always_inline float64 g_to_float64 (uint64_t a) |
| 505 | 507 | { |
| 506 | - uint64_t r, exp, mant_sig; | |
| 508 | + uint64_t exp, mant_sig; | |
| 509 | + CPU_DoubleU r; | |
| 507 | 510 | |
| 508 | 511 | exp = (a >> 52) & 0x7ff; |
| 509 | 512 | mant_sig = a & 0x800fffffffffffffull; |
| ... | ... | @@ -515,12 +518,12 @@ static always_inline float64 g_to_float64 (uint64_t a) |
| 515 | 518 | |
| 516 | 519 | if (exp < 3) { |
| 517 | 520 | /* Underflow */ |
| 518 | - r = 0; | |
| 521 | + r.ll = 0; | |
| 519 | 522 | } else { |
| 520 | - r = ((exp - 2) << 52) | mant_sig; | |
| 523 | + r.ll = ((exp - 2) << 52) | mant_sig; | |
| 521 | 524 | } |
| 522 | 525 | |
| 523 | - return *(float64*)(&a); | |
| 526 | + return r.d; | |
| 524 | 527 | } |
| 525 | 528 | |
| 526 | 529 | uint64_t helper_g_to_memory (uint64_t a) |
| ... | ... | @@ -596,21 +599,22 @@ uint64_t helper_sqrtg (uint64_t a) |
| 596 | 599 | /* S floating (single) */ |
| 597 | 600 | static always_inline uint64_t float32_to_s (float32 fa) |
| 598 | 601 | { |
| 599 | - uint32_t a; | |
| 602 | + CPU_FloatU a; | |
| 600 | 603 | uint64_t r; |
| 601 | 604 | |
| 602 | - a = *(uint32_t*)(&fa); | |
| 605 | + a.f = fa; | |
| 603 | 606 | |
| 604 | - r = (((uint64_t)(a & 0xc0000000)) << 32) | (((uint64_t)(a & 0x3fffffff)) << 29); | |
| 605 | - if (((a & 0x7f800000) != 0x7f800000) && (!(a & 0x40000000))) | |
| 607 | + r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29); | |
| 608 | + if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000))) | |
| 606 | 609 | r |= 0x7ll << 59; |
| 607 | 610 | return r; |
| 608 | 611 | } |
| 609 | 612 | |
| 610 | 613 | static always_inline float32 s_to_float32 (uint64_t a) |
| 611 | 614 | { |
| 612 | - uint32_t r = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); | |
| 613 | - return *(float32*)(&r); | |
| 615 | + CPU_FloatU r; | |
| 616 | + r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); | |
| 617 | + return r.f; | |
| 614 | 618 | } |
| 615 | 619 | |
| 616 | 620 | uint32_t helper_s_to_memory (uint64_t a) |
| ... | ... | @@ -680,13 +684,17 @@ uint64_t helper_sqrts (uint64_t a) |
| 680 | 684 | static always_inline float64 t_to_float64 (uint64_t a) |
| 681 | 685 | { |
| 682 | 686 | /* Memory format is the same as float64 */ |
| 683 | - return *(float64*)(&a); | |
| 687 | + CPU_DoubleU r; | |
| 688 | + r.ll = a; | |
| 689 | + return r.d; | |
| 684 | 690 | } |
| 685 | 691 | |
| 686 | 692 | static always_inline uint64_t float64_to_t (float64 fa) |
| 687 | 693 | { |
| 688 | 694 | /* Memory format is the same as float64 */ |
| 689 | - return *(uint64*)(&fa); | |
| 695 | + CPU_DoubleU r; | |
| 696 | + r.d = fa; | |
| 697 | + return r.ll; | |
| 690 | 698 | } |
| 691 | 699 | |
| 692 | 700 | uint64_t helper_addt (uint64_t a, uint64_t b) | ... | ... |