Commit 0487d6a8b4e15383d0651eea1e4e03ded44308b2

Authored by j_mayer
1 parent 75d62a58

PowerPC 2.03 SPE extension - first pass.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@2519 c046a42c-6fe2-441c-8c8c-71466251a162
target-ppc/cpu.h
... ... @@ -26,15 +26,20 @@
26 26 #if defined (TARGET_PPC64)
27 27 typedef uint64_t ppc_gpr_t;
28 28 #define TARGET_LONG_BITS 64
  29 +#define TARGET_GPR_BITS 64
29 30 #define REGX "%016" PRIx64
30   -#elif defined(TARGET_E500)
  31 +/* We can safely use PowerPC SPE extension when compiling PowerPC 64 */
  32 +#define TARGET_PPCSPE
  33 +#elif defined(TARGET_PPCSPE)
31 34 /* GPR are 64 bits: used by vector extension */
32 35 typedef uint64_t ppc_gpr_t;
33 36 #define TARGET_LONG_BITS 32
  37 +#define TARGET_GPR_BITS 64
34 38 #define REGX "%08" PRIx32
35 39 #else
36 40 typedef uint32_t ppc_gpr_t;
37 41 #define TARGET_LONG_BITS 32
  42 +#define TARGET_GPR_BITS 32
38 43 #define REGX "%08" PRIx32
39 44 #endif
40 45  
... ... @@ -297,7 +302,7 @@ enum {
297 302 /* ld/st with reservation instructions */
298 303 /* cache control instructions */
299 304 /* spr/msr access instructions */
300   - PPC_INSNS_BASE = 0x00000001,
  305 + PPC_INSNS_BASE = 0x0000000000000001ULL,
301 306 #define PPC_INTEGER PPC_INSNS_BASE
302 307 #define PPC_FLOW PPC_INSNS_BASE
303 308 #define PPC_MEM PPC_INSNS_BASE
... ... @@ -305,68 +310,72 @@ enum {
305 310 #define PPC_CACHE PPC_INSNS_BASE
306 311 #define PPC_MISC PPC_INSNS_BASE
307 312 /* floating point operations instructions */
308   - PPC_FLOAT = 0x00000002,
  313 + PPC_FLOAT = 0x0000000000000002ULL,
309 314 /* more floating point operations instructions */
310   - PPC_FLOAT_EXT = 0x00000004,
  315 + PPC_FLOAT_EXT = 0x0000000000000004ULL,
311 316 /* external control instructions */
312   - PPC_EXTERN = 0x00000008,
  317 + PPC_EXTERN = 0x0000000000000008ULL,
313 318 /* segment register access instructions */
314   - PPC_SEGMENT = 0x00000010,
  319 + PPC_SEGMENT = 0x0000000000000010ULL,
315 320 /* Optional cache control instructions */
316   - PPC_CACHE_OPT = 0x00000020,
  321 + PPC_CACHE_OPT = 0x0000000000000020ULL,
317 322 /* Optional floating point op instructions */
318   - PPC_FLOAT_OPT = 0x00000040,
  323 + PPC_FLOAT_OPT = 0x0000000000000040ULL,
319 324 /* Optional memory control instructions */
320   - PPC_MEM_TLBIA = 0x00000080,
321   - PPC_MEM_TLBIE = 0x00000100,
322   - PPC_MEM_TLBSYNC = 0x00000200,
  325 + PPC_MEM_TLBIA = 0x0000000000000080ULL,
  326 + PPC_MEM_TLBIE = 0x0000000000000100ULL,
  327 + PPC_MEM_TLBSYNC = 0x0000000000000200ULL,
323 328 /* eieio & sync */
324   - PPC_MEM_SYNC = 0x00000400,
  329 + PPC_MEM_SYNC = 0x0000000000000400ULL,
325 330 /* PowerPC 6xx TLB management instructions */
326   - PPC_6xx_TLB = 0x00000800,
  331 + PPC_6xx_TLB = 0x0000000000000800ULL,
327 332 /* Altivec support */
328   - PPC_ALTIVEC = 0x00001000,
  333 + PPC_ALTIVEC = 0x0000000000001000ULL,
329 334 /* Time base support */
330   - PPC_TB = 0x00002000,
  335 + PPC_TB = 0x0000000000002000ULL,
331 336 /* Embedded PowerPC dedicated instructions */
332   - PPC_EMB_COMMON = 0x00004000,
  337 + PPC_EMB_COMMON = 0x0000000000004000ULL,
333 338 /* PowerPC 40x exception model */
334   - PPC_40x_EXCP = 0x00008000,
  339 + PPC_40x_EXCP = 0x0000000000008000ULL,
335 340 /* PowerPC 40x specific instructions */
336   - PPC_40x_SPEC = 0x00010000,
  341 + PPC_40x_SPEC = 0x0000000000010000ULL,
337 342 /* PowerPC 405 Mac instructions */
338   - PPC_405_MAC = 0x00020000,
  343 + PPC_405_MAC = 0x0000000000020000ULL,
339 344 /* PowerPC 440 specific instructions */
340   - PPC_440_SPEC = 0x00040000,
  345 + PPC_440_SPEC = 0x0000000000040000ULL,
341 346 /* Specific extensions */
342 347 /* Power-to-PowerPC bridge (601) */
343   - PPC_POWER_BR = 0x00080000,
  348 + PPC_POWER_BR = 0x0000000000080000ULL,
344 349 /* PowerPC 602 specific */
345   - PPC_602_SPEC = 0x00100000,
  350 + PPC_602_SPEC = 0x0000000000100000ULL,
346 351 /* Deprecated instructions */
347 352 /* Original POWER instruction set */
348   - PPC_POWER = 0x00200000,
  353 + PPC_POWER = 0x0000000000200000ULL,
349 354 /* POWER2 instruction set extension */
350   - PPC_POWER2 = 0x00400000,
  355 + PPC_POWER2 = 0x0000000000400000ULL,
351 356 /* Power RTC support */
352   - PPC_POWER_RTC = 0x00800000,
  357 + PPC_POWER_RTC = 0x0000000000800000ULL,
353 358 /* 64 bits PowerPC instructions */
354 359 /* 64 bits PowerPC instruction set */
355   - PPC_64B = 0x01000000,
  360 + PPC_64B = 0x0000000001000000ULL,
356 361 /* 64 bits hypervisor extensions */
357   - PPC_64H = 0x02000000,
  362 + PPC_64H = 0x0000000002000000ULL,
358 363 /* 64 bits PowerPC "bridge" features */
359   - PPC_64_BRIDGE = 0x04000000,
  364 + PPC_64_BRIDGE = 0x0000000004000000ULL,
360 365 /* BookE (embedded) PowerPC specification */
361   - PPC_BOOKE = 0x08000000,
  366 + PPC_BOOKE = 0x0000000008000000ULL,
362 367 /* eieio */
363   - PPC_MEM_EIEIO = 0x10000000,
  368 + PPC_MEM_EIEIO = 0x0000000010000000ULL,
364 369 /* e500 vector instructions */
365   - PPC_E500_VECTOR = 0x20000000,
  370 + PPC_E500_VECTOR = 0x0000000020000000ULL,
366 371 /* PowerPC 4xx dedicated instructions */
367   - PPC_4xx_COMMON = 0x40000000,
  372 + PPC_4xx_COMMON = 0x0000000040000000ULL,
368 373 /* PowerPC 2.03 specification extensions */
369   - PPC_203 = 0x80000000,
  374 + PPC_203 = 0x0000000080000000ULL,
  375 + /* PowerPC 2.03 SPE extension */
  376 + PPC_SPE = 0x0000000100000000ULL,
  377 + /* PowerPC 2.03 SPE floating-point extension */
  378 + PPC_SPEFPU = 0x0000000200000000ULL,
370 379 };
371 380  
372 381 /* CPU run-time flags (MMU and exception model) */
... ... @@ -618,10 +627,10 @@ struct CPUPPCState {
618 627 /* First are the most commonly used resources
619 628 * during translated code execution
620 629 */
621   -#if TARGET_LONG_BITS > HOST_LONG_BITS
  630 +#if TARGET_GPR_BITS > HOST_LONG_BITS
622 631 /* temporary fixed-point registers
623 632 * used to emulate 64 bits target on 32 bits hosts
624   - */
  633 + */
625 634 target_ulong t0, t1, t2;
626 635 #endif
627 636 ppc_avr_t t0_avr, t1_avr, t2_avr;
... ... @@ -683,6 +692,7 @@ struct CPUPPCState {
683 692 uint32_t vscr;
684 693 /* SPE registers */
685 694 ppc_gpr_t spe_acc;
  695 + float_status spe_status;
686 696 uint32_t spe_fscr;
687 697  
688 698 /* Internal devices resources */
... ... @@ -1192,6 +1202,8 @@ enum {
1192 1202 #define EXCP_970_MAINT 0x1600 /* Maintenance exception */
1193 1203 #define EXCP_970_THRM 0x1800 /* Thermal exception */
1194 1204 #define EXCP_970_VPUA 0x1700 /* VPU assist exception */
  1205 +/* SPE related exceptions */
  1206 +#define EXCP_NO_SPE 0x0F20 /* SPE unavailable exception */
1195 1207 /* End of exception vectors area */
1196 1208 #define EXCP_PPC_MAX 0x4000
1197 1209 /* Qemu exceptions: special cases we want to stop translation */
... ...
target-ppc/exec.h
... ... @@ -39,10 +39,10 @@ register unsigned long T1 asm(AREG2);
39 39 register unsigned long T2 asm(AREG3);
40 40 #endif
41 41 /* We may, sometime, need 64 bits registers on 32 bits target */
42   -#if defined(TARGET_PPC64) || (HOST_LONG_BITS == 64)
  42 +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE) || (HOST_LONG_BITS == 64)
43 43 #define T0_64 T0
44   -#define T1_64 T0
45   -#define T2_64 T0
  44 +#define T1_64 T1
  45 +#define T2_64 T2
46 46 #else
47 47 /* no registers can be used */
48 48 #define T0_64 (env->t0)
... ...
target-ppc/op.c
... ... @@ -1326,106 +1326,14 @@ void OPPROTO op_andi_T1 (void)
1326 1326 /* count leading zero */
1327 1327 void OPPROTO op_cntlzw (void)
1328 1328 {
1329   - int cnt;
1330   -
1331   - cnt = 0;
1332   - if (!(T0 & 0xFFFF0000UL)) {
1333   - cnt += 16;
1334   - T0 <<= 16;
1335   - }
1336   - if (!(T0 & 0xFF000000UL)) {
1337   - cnt += 8;
1338   - T0 <<= 8;
1339   - }
1340   - if (!(T0 & 0xF0000000UL)) {
1341   - cnt += 4;
1342   - T0 <<= 4;
1343   - }
1344   - if (!(T0 & 0xC0000000UL)) {
1345   - cnt += 2;
1346   - T0 <<= 2;
1347   - }
1348   - if (!(T0 & 0x80000000UL)) {
1349   - cnt++;
1350   - T0 <<= 1;
1351   - }
1352   - if (!(T0 & 0x80000000UL)) {
1353   - cnt++;
1354   - }
1355   - T0 = cnt;
  1329 + T0 = _do_cntlzw(T0);
1356 1330 RETURN();
1357 1331 }
1358 1332  
1359 1333 #if defined(TARGET_PPC64)
1360 1334 void OPPROTO op_cntlzd (void)
1361 1335 {
1362   -#if HOST_LONG_BITS == 64
1363   - int cnt;
1364   -
1365   - cnt = 0;
1366   - if (!(T0 & 0xFFFFFFFF00000000ULL)) {
1367   - cnt += 32;
1368   - T0 <<= 32;
1369   - }
1370   - if (!(T0 & 0xFFFF000000000000ULL)) {
1371   - cnt += 16;
1372   - T0 <<= 16;
1373   - }
1374   - if (!(T0 & 0xFF00000000000000ULL)) {
1375   - cnt += 8;
1376   - T0 <<= 8;
1377   - }
1378   - if (!(T0 & 0xF000000000000000ULL)) {
1379   - cnt += 4;
1380   - T0 <<= 4;
1381   - }
1382   - if (!(T0 & 0xC000000000000000ULL)) {
1383   - cnt += 2;
1384   - T0 <<= 2;
1385   - }
1386   - if (!(T0 & 0x8000000000000000ULL)) {
1387   - cnt++;
1388   - T0 <<= 1;
1389   - }
1390   - if (!(T0 & 0x8000000000000000ULL)) {
1391   - cnt++;
1392   - }
1393   - T0 = cnt;
1394   -#else
1395   - uint32_t tmp;
1396   -
1397   - /* Make it easier on 32 bits host machines */
1398   - if (!(T0 >> 32)) {
1399   - tmp = T0;
1400   - T0 = 32;
1401   - } else {
1402   - tmp = T0 >> 32;
1403   - T0 = 0;
1404   - }
1405   - if (!(tmp & 0xFFFF0000UL)) {
1406   - T0 += 16;
1407   - tmp <<= 16;
1408   - }
1409   - if (!(tmp & 0xFF000000UL)) {
1410   - T0 += 8;
1411   - tmp <<= 8;
1412   - }
1413   - if (!(tmp & 0xF0000000UL)) {
1414   - T0 += 4;
1415   - tmp <<= 4;
1416   - }
1417   - if (!(tmp & 0xC0000000UL)) {
1418   - T0 += 2;
1419   - tmp <<= 2;
1420   - }
1421   - if (!(tmp & 0x80000000UL)) {
1422   - T0++;
1423   - tmp <<= 1;
1424   - }
1425   - if (!(tmp & 0x80000000UL)) {
1426   - T0++;
1427   - }
1428   -#endif
  1336 + T0 = _do_cntlzd(T0);
1429 1337 RETURN();
1430 1338 }
1431 1339 #endif
... ... @@ -2462,4 +2370,723 @@ void OPPROTO op_store_booke_tsr (void)
2462 2370 store_booke_tsr(env, T0);
2463 2371 RETURN();
2464 2372 }
  2373 +
2465 2374 #endif /* !defined(CONFIG_USER_ONLY) */
  2375 +
  2376 +#if defined(TARGET_PPCSPE)
  2377 +/* SPE extension */
  2378 +void OPPROTO op_splatw_T1_64 (void)
  2379 +{
  2380 + T1_64 = (T1_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
  2381 +}
  2382 +
  2383 +void OPPROTO op_splatwi_T0_64 (void)
  2384 +{
  2385 + uint64_t tmp = PARAM1;
  2386 +
  2387 + T0_64 = (tmp << 32) | tmp;
  2388 +}
  2389 +
  2390 +void OPPROTO op_splatwi_T1_64 (void)
  2391 +{
  2392 + uint64_t tmp = PARAM1;
  2393 +
  2394 + T1_64 = (tmp << 32) | tmp;
  2395 +}
  2396 +
  2397 +void OPPROTO op_extsh_T1_64 (void)
  2398 +{
  2399 + T1_64 = (int32_t)((int16_t)T1_64);
  2400 + RETURN();
  2401 +}
  2402 +
  2403 +void OPPROTO op_sli16_T1_64 (void)
  2404 +{
  2405 + T1_64 = T1_64 << 16;
  2406 + RETURN();
  2407 +}
  2408 +
  2409 +void OPPROTO op_sli32_T1_64 (void)
  2410 +{
  2411 + T1_64 = T1_64 << 32;
  2412 + RETURN();
  2413 +}
  2414 +
  2415 +void OPPROTO op_srli32_T1_64 (void)
  2416 +{
  2417 + T1_64 = T1_64 >> 32;
  2418 + RETURN();
  2419 +}
  2420 +
  2421 +void OPPROTO op_evsel (void)
  2422 +{
  2423 + do_evsel();
  2424 + RETURN();
  2425 +}
  2426 +
  2427 +void OPPROTO op_evaddw (void)
  2428 +{
  2429 + do_evaddw();
  2430 + RETURN();
  2431 +}
  2432 +
  2433 +void OPPROTO op_evsubfw (void)
  2434 +{
  2435 + do_evsubfw();
  2436 + RETURN();
  2437 +}
  2438 +
  2439 +void OPPROTO op_evneg (void)
  2440 +{
  2441 + do_evneg();
  2442 + RETURN();
  2443 +}
  2444 +
  2445 +void OPPROTO op_evabs (void)
  2446 +{
  2447 + do_evabs();
  2448 + RETURN();
  2449 +}
  2450 +
  2451 +void OPPROTO op_evextsh (void)
  2452 +{
  2453 + T0_64 = ((uint64_t)((int32_t)(int16_t)(T0_64 >> 32)) << 32) |
  2454 + (uint64_t)((int32_t)(int16_t)T0_64);
  2455 + RETURN();
  2456 +}
  2457 +
  2458 +void OPPROTO op_evextsb (void)
  2459 +{
  2460 + T0_64 = ((uint64_t)((int32_t)(int8_t)(T0_64 >> 32)) << 32) |
  2461 + (uint64_t)((int32_t)(int8_t)T0_64);
  2462 + RETURN();
  2463 +}
  2464 +
  2465 +void OPPROTO op_evcntlzw (void)
  2466 +{
  2467 + do_evcntlzw();
  2468 + RETURN();
  2469 +}
  2470 +
  2471 +void OPPROTO op_evrndw (void)
  2472 +{
  2473 + do_evrndw();
  2474 + RETURN();
  2475 +}
  2476 +
  2477 +void OPPROTO op_brinc (void)
  2478 +{
  2479 + do_brinc();
  2480 + RETURN();
  2481 +}
  2482 +
  2483 +void OPPROTO op_evcntlsw (void)
  2484 +{
  2485 + do_evcntlsw();
  2486 + RETURN();
  2487 +}
  2488 +
  2489 +void OPPROTO op_evand (void)
  2490 +{
  2491 + T0_64 &= T1_64;
  2492 + RETURN();
  2493 +}
  2494 +
  2495 +void OPPROTO op_evandc (void)
  2496 +{
  2497 + T0_64 &= ~T1_64;
  2498 + RETURN();
  2499 +}
  2500 +
  2501 +void OPPROTO op_evor (void)
  2502 +{
  2503 + T0_64 |= T1_64;
  2504 + RETURN();
  2505 +}
  2506 +
  2507 +void OPPROTO op_evxor (void)
  2508 +{
  2509 + T0_64 ^= T1_64;
  2510 + RETURN();
  2511 +}
  2512 +
  2513 +void OPPROTO op_eveqv (void)
  2514 +{
  2515 + T0_64 = ~(T0_64 ^ T1_64);
  2516 + RETURN();
  2517 +}
  2518 +
  2519 +void OPPROTO op_evnor (void)
  2520 +{
  2521 + T0_64 = ~(T0_64 | T1_64);
  2522 + RETURN();
  2523 +}
  2524 +
  2525 +void OPPROTO op_evorc (void)
  2526 +{
  2527 + T0_64 |= ~T1_64;
  2528 + RETURN();
  2529 +}
  2530 +
  2531 +void OPPROTO op_evnand (void)
  2532 +{
  2533 + T0_64 = ~(T0_64 & T1_64);
  2534 + RETURN();
  2535 +}
  2536 +
  2537 +void OPPROTO op_evsrws (void)
  2538 +{
  2539 + do_evsrws();
  2540 + RETURN();
  2541 +}
  2542 +
  2543 +void OPPROTO op_evsrwu (void)
  2544 +{
  2545 + do_evsrwu();
  2546 + RETURN();
  2547 +}
  2548 +
  2549 +void OPPROTO op_evslw (void)
  2550 +{
  2551 + do_evslw();
  2552 + RETURN();
  2553 +}
  2554 +
  2555 +void OPPROTO op_evrlw (void)
  2556 +{
  2557 + do_evrlw();
  2558 + RETURN();
  2559 +}
  2560 +
  2561 +void OPPROTO op_evmergelo (void)
  2562 +{
  2563 + T0_64 = (T0_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
  2564 + RETURN();
  2565 +}
  2566 +
  2567 +void OPPROTO op_evmergehi (void)
  2568 +{
  2569 + T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 >> 32);
  2570 + RETURN();
  2571 +}
  2572 +
  2573 +void OPPROTO op_evmergelohi (void)
  2574 +{
  2575 + T0_64 = (T0_64 << 32) | (T1_64 >> 32);
  2576 + RETURN();
  2577 +}
  2578 +
  2579 +void OPPROTO op_evmergehilo (void)
  2580 +{
  2581 + T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 & 0x00000000FFFFFFFFULL);
  2582 + RETURN();
  2583 +}
  2584 +
  2585 +void OPPROTO op_evcmpgts (void)
  2586 +{
  2587 + do_evcmpgts();
  2588 + RETURN();
  2589 +}
  2590 +
  2591 +void OPPROTO op_evcmpgtu (void)
  2592 +{
  2593 + do_evcmpgtu();
  2594 + RETURN();
  2595 +}
  2596 +
  2597 +void OPPROTO op_evcmplts (void)
  2598 +{
  2599 + do_evcmplts();
  2600 + RETURN();
  2601 +}
  2602 +
  2603 +void OPPROTO op_evcmpltu (void)
  2604 +{
  2605 + do_evcmpltu();
  2606 + RETURN();
  2607 +}
  2608 +
  2609 +void OPPROTO op_evcmpeq (void)
  2610 +{
  2611 + do_evcmpeq();
  2612 + RETURN();
  2613 +}
  2614 +
  2615 +void OPPROTO op_evfssub (void)
  2616 +{
  2617 + do_evfssub();
  2618 + RETURN();
  2619 +}
  2620 +
  2621 +void OPPROTO op_evfsadd (void)
  2622 +{
  2623 + do_evfsadd();
  2624 + RETURN();
  2625 +}
  2626 +
  2627 +void OPPROTO op_evfsnabs (void)
  2628 +{
  2629 + do_evfsnabs();
  2630 + RETURN();
  2631 +}
  2632 +
  2633 +void OPPROTO op_evfsabs (void)
  2634 +{
  2635 + do_evfsabs();
  2636 + RETURN();
  2637 +}
  2638 +
  2639 +void OPPROTO op_evfsneg (void)
  2640 +{
  2641 + do_evfsneg();
  2642 + RETURN();
  2643 +}
  2644 +
  2645 +void OPPROTO op_evfsdiv (void)
  2646 +{
  2647 + do_evfsdiv();
  2648 + RETURN();
  2649 +}
  2650 +
  2651 +void OPPROTO op_evfsmul (void)
  2652 +{
  2653 + do_evfsmul();
  2654 + RETURN();
  2655 +}
  2656 +
  2657 +void OPPROTO op_evfscmplt (void)
  2658 +{
  2659 + do_evfscmplt();
  2660 + RETURN();
  2661 +}
  2662 +
  2663 +void OPPROTO op_evfscmpgt (void)
  2664 +{
  2665 + do_evfscmpgt();
  2666 + RETURN();
  2667 +}
  2668 +
  2669 +void OPPROTO op_evfscmpeq (void)
  2670 +{
  2671 + do_evfscmpeq();
  2672 + RETURN();
  2673 +}
  2674 +
  2675 +void OPPROTO op_evfscfsi (void)
  2676 +{
  2677 + do_evfscfsi();
  2678 + RETURN();
  2679 +}
  2680 +
  2681 +void OPPROTO op_evfscfui (void)
  2682 +{
  2683 + do_evfscfui();
  2684 + RETURN();
  2685 +}
  2686 +
  2687 +void OPPROTO op_evfscfsf (void)
  2688 +{
  2689 + do_evfscfsf();
  2690 + RETURN();
  2691 +}
  2692 +
  2693 +void OPPROTO op_evfscfuf (void)
  2694 +{
  2695 + do_evfscfuf();
  2696 + RETURN();
  2697 +}
  2698 +
  2699 +void OPPROTO op_evfsctsi (void)
  2700 +{
  2701 + do_evfsctsi();
  2702 + RETURN();
  2703 +}
  2704 +
  2705 +void OPPROTO op_evfsctui (void)
  2706 +{
  2707 + do_evfsctui();
  2708 + RETURN();
  2709 +}
  2710 +
  2711 +void OPPROTO op_evfsctsf (void)
  2712 +{
  2713 + do_evfsctsf();
  2714 + RETURN();
  2715 +}
  2716 +
  2717 +void OPPROTO op_evfsctuf (void)
  2718 +{
  2719 + do_evfsctuf();
  2720 + RETURN();
  2721 +}
  2722 +
  2723 +void OPPROTO op_evfsctuiz (void)
  2724 +{
  2725 + do_evfsctuiz();
  2726 + RETURN();
  2727 +}
  2728 +
  2729 +void OPPROTO op_evfsctsiz (void)
  2730 +{
  2731 + do_evfsctsiz();
  2732 + RETURN();
  2733 +}
  2734 +
  2735 +void OPPROTO op_evfststlt (void)
  2736 +{
  2737 + do_evfststlt();
  2738 + RETURN();
  2739 +}
  2740 +
  2741 +void OPPROTO op_evfststgt (void)
  2742 +{
  2743 + do_evfststgt();
  2744 + RETURN();
  2745 +}
  2746 +
  2747 +void OPPROTO op_evfststeq (void)
  2748 +{
  2749 + do_evfststeq();
  2750 + RETURN();
  2751 +}
  2752 +
  2753 +void OPPROTO op_efssub (void)
  2754 +{
  2755 + T0_64 = _do_efssub(T0_64, T1_64);
  2756 + RETURN();
  2757 +}
  2758 +
  2759 +void OPPROTO op_efsadd (void)
  2760 +{
  2761 + T0_64 = _do_efsadd(T0_64, T1_64);
  2762 + RETURN();
  2763 +}
  2764 +
  2765 +void OPPROTO op_efsnabs (void)
  2766 +{
  2767 + T0_64 = _do_efsnabs(T0_64);
  2768 + RETURN();
  2769 +}
  2770 +
  2771 +void OPPROTO op_efsabs (void)
  2772 +{
  2773 + T0_64 = _do_efsabs(T0_64);
  2774 + RETURN();
  2775 +}
  2776 +
  2777 +void OPPROTO op_efsneg (void)
  2778 +{
  2779 + T0_64 = _do_efsneg(T0_64);
  2780 + RETURN();
  2781 +}
  2782 +
  2783 +void OPPROTO op_efsdiv (void)
  2784 +{
  2785 + T0_64 = _do_efsdiv(T0_64, T1_64);
  2786 + RETURN();
  2787 +}
  2788 +
  2789 +void OPPROTO op_efsmul (void)
  2790 +{
  2791 + T0_64 = _do_efsmul(T0_64, T1_64);
  2792 + RETURN();
  2793 +}
  2794 +
  2795 +void OPPROTO op_efscmplt (void)
  2796 +{
  2797 + do_efscmplt();
  2798 + RETURN();
  2799 +}
  2800 +
  2801 +void OPPROTO op_efscmpgt (void)
  2802 +{
  2803 + do_efscmpgt();
  2804 + RETURN();
  2805 +}
  2806 +
  2807 +void OPPROTO op_efscfd (void)
  2808 +{
  2809 + do_efscfd();
  2810 + RETURN();
  2811 +}
  2812 +
  2813 +void OPPROTO op_efscmpeq (void)
  2814 +{
  2815 + do_efscmpeq();
  2816 + RETURN();
  2817 +}
  2818 +
  2819 +void OPPROTO op_efscfsi (void)
  2820 +{
  2821 + do_efscfsi();
  2822 + RETURN();
  2823 +}
  2824 +
  2825 +void OPPROTO op_efscfui (void)
  2826 +{
  2827 + do_efscfui();
  2828 + RETURN();
  2829 +}
  2830 +
  2831 +void OPPROTO op_efscfsf (void)
  2832 +{
  2833 + do_efscfsf();
  2834 + RETURN();
  2835 +}
  2836 +
  2837 +void OPPROTO op_efscfuf (void)
  2838 +{
  2839 + do_efscfuf();
  2840 + RETURN();
  2841 +}
  2842 +
  2843 +void OPPROTO op_efsctsi (void)
  2844 +{
  2845 + do_efsctsi();
  2846 + RETURN();
  2847 +}
  2848 +
  2849 +void OPPROTO op_efsctui (void)
  2850 +{
  2851 + do_efsctui();
  2852 + RETURN();
  2853 +}
  2854 +
  2855 +void OPPROTO op_efsctsf (void)
  2856 +{
  2857 + do_efsctsf();
  2858 + RETURN();
  2859 +}
  2860 +
  2861 +void OPPROTO op_efsctuf (void)
  2862 +{
  2863 + do_efsctuf();
  2864 + RETURN();
  2865 +}
  2866 +
  2867 +void OPPROTO op_efsctsiz (void)
  2868 +{
  2869 + do_efsctsiz();
  2870 + RETURN();
  2871 +}
  2872 +
  2873 +void OPPROTO op_efsctuiz (void)
  2874 +{
  2875 + do_efsctuiz();
  2876 + RETURN();
  2877 +}
  2878 +
  2879 +void OPPROTO op_efststlt (void)
  2880 +{
  2881 + T0 = _do_efststlt(T0_64, T1_64);
  2882 + RETURN();
  2883 +}
  2884 +
  2885 +void OPPROTO op_efststgt (void)
  2886 +{
  2887 + T0 = _do_efststgt(T0_64, T1_64);
  2888 + RETURN();
  2889 +}
  2890 +
  2891 +void OPPROTO op_efststeq (void)
  2892 +{
  2893 + T0 = _do_efststeq(T0_64, T1_64);
  2894 + RETURN();
  2895 +}
  2896 +
  2897 +void OPPROTO op_efdsub (void)
  2898 +{
  2899 + union {
  2900 + uint64_t u;
  2901 + float64 f;
  2902 + } u1, u2;
  2903 + u1.u = T0_64;
  2904 + u2.u = T1_64;
  2905 + u1.f = float64_sub(u1.f, u2.f, &env->spe_status);
  2906 + T0_64 = u1.u;
  2907 + RETURN();
  2908 +}
  2909 +
  2910 +void OPPROTO op_efdadd (void)
  2911 +{
  2912 + union {
  2913 + uint64_t u;
  2914 + float64 f;
  2915 + } u1, u2;
  2916 + u1.u = T0_64;
  2917 + u2.u = T1_64;
  2918 + u1.f = float64_add(u1.f, u2.f, &env->spe_status);
  2919 + T0_64 = u1.u;
  2920 + RETURN();
  2921 +}
  2922 +
  2923 +void OPPROTO op_efdcfsid (void)
  2924 +{
  2925 + do_efdcfsi();
  2926 + RETURN();
  2927 +}
  2928 +
  2929 +void OPPROTO op_efdcfuid (void)
  2930 +{
  2931 + do_efdcfui();
  2932 + RETURN();
  2933 +}
  2934 +
  2935 +void OPPROTO op_efdnabs (void)
  2936 +{
  2937 + T0_64 |= 0x8000000000000000ULL;
  2938 + RETURN();
  2939 +}
  2940 +
  2941 +void OPPROTO op_efdabs (void)
  2942 +{
  2943 + T0_64 &= ~0x8000000000000000ULL;
  2944 + RETURN();
  2945 +}
  2946 +
  2947 +void OPPROTO op_efdneg (void)
  2948 +{
  2949 + T0_64 ^= 0x8000000000000000ULL;
  2950 + RETURN();
  2951 +}
  2952 +
  2953 +void OPPROTO op_efddiv (void)
  2954 +{
  2955 + union {
  2956 + uint64_t u;
  2957 + float64 f;
  2958 + } u1, u2;
  2959 + u1.u = T0_64;
  2960 + u2.u = T1_64;
  2961 + u1.f = float64_div(u1.f, u2.f, &env->spe_status);
  2962 + T0_64 = u1.u;
  2963 + RETURN();
  2964 +}
  2965 +
  2966 +void OPPROTO op_efdmul (void)
  2967 +{
  2968 + union {
  2969 + uint64_t u;
  2970 + float64 f;
  2971 + } u1, u2;
  2972 + u1.u = T0_64;
  2973 + u2.u = T1_64;
  2974 + u1.f = float64_mul(u1.f, u2.f, &env->spe_status);
  2975 + T0_64 = u1.u;
  2976 + RETURN();
  2977 +}
  2978 +
  2979 +void OPPROTO op_efdctsidz (void)
  2980 +{
  2981 + do_efdctsiz();
  2982 + RETURN();
  2983 +}
  2984 +
  2985 +void OPPROTO op_efdctuidz (void)
  2986 +{
  2987 + do_efdctuiz();
  2988 + RETURN();
  2989 +}
  2990 +
  2991 +void OPPROTO op_efdcmplt (void)
  2992 +{
  2993 + do_efdcmplt();
  2994 + RETURN();
  2995 +}
  2996 +
  2997 +void OPPROTO op_efdcmpgt (void)
  2998 +{
  2999 + do_efdcmpgt();
  3000 + RETURN();
  3001 +}
  3002 +
  3003 +void OPPROTO op_efdcfs (void)
  3004 +{
  3005 + do_efdcfs();
  3006 + RETURN();
  3007 +}
  3008 +
  3009 +void OPPROTO op_efdcmpeq (void)
  3010 +{
  3011 + do_efdcmpeq();
  3012 + RETURN();
  3013 +}
  3014 +
  3015 +void OPPROTO op_efdcfsi (void)
  3016 +{
  3017 + do_efdcfsi();
  3018 + RETURN();
  3019 +}
  3020 +
  3021 +void OPPROTO op_efdcfui (void)
  3022 +{
  3023 + do_efdcfui();
  3024 + RETURN();
  3025 +}
  3026 +
  3027 +void OPPROTO op_efdcfsf (void)
  3028 +{
  3029 + do_efdcfsf();
  3030 + RETURN();
  3031 +}
  3032 +
  3033 +void OPPROTO op_efdcfuf (void)
  3034 +{
  3035 + do_efdcfuf();
  3036 + RETURN();
  3037 +}
  3038 +
  3039 +void OPPROTO op_efdctsi (void)
  3040 +{
  3041 + do_efdctsi();
  3042 + RETURN();
  3043 +}
  3044 +
  3045 +void OPPROTO op_efdctui (void)
  3046 +{
  3047 + do_efdctui();
  3048 + RETURN();
  3049 +}
  3050 +
  3051 +void OPPROTO op_efdctsf (void)
  3052 +{
  3053 + do_efdctsf();
  3054 + RETURN();
  3055 +}
  3056 +
  3057 +void OPPROTO op_efdctuf (void)
  3058 +{
  3059 + do_efdctuf();
  3060 + RETURN();
  3061 +}
  3062 +
  3063 +void OPPROTO op_efdctuiz (void)
  3064 +{
  3065 + do_efdctuiz();
  3066 + RETURN();
  3067 +}
  3068 +
  3069 +void OPPROTO op_efdctsiz (void)
  3070 +{
  3071 + do_efdctsiz();
  3072 + RETURN();
  3073 +}
  3074 +
  3075 +void OPPROTO op_efdtstlt (void)
  3076 +{
  3077 + T0 = _do_efdtstlt(T0_64, T1_64);
  3078 + RETURN();
  3079 +}
  3080 +
  3081 +void OPPROTO op_efdtstgt (void)
  3082 +{
  3083 + T0 = _do_efdtstgt(T0_64, T1_64);
  3084 + RETURN();
  3085 +}
  3086 +
  3087 +void OPPROTO op_efdtsteq (void)
  3088 +{
  3089 + T0 = _do_efdtsteq(T0_64, T1_64);
  3090 + RETURN();
  3091 +}
  3092 +#endif /* defined(TARGET_PPCSPE) */
... ...
target-ppc/op_helper.c
... ... @@ -19,12 +19,17 @@
19 19 */
20 20 #include "exec.h"
21 21  
  22 +#include "op_helper.h"
  23 +
22 24 #define MEMSUFFIX _raw
  25 +#include "op_helper.h"
23 26 #include "op_helper_mem.h"
24 27 #if !defined(CONFIG_USER_ONLY)
25 28 #define MEMSUFFIX _user
  29 +#include "op_helper.h"
26 30 #include "op_helper_mem.h"
27 31 #define MEMSUFFIX _kernel
  32 +#include "op_helper.h"
28 33 #include "op_helper_mem.h"
29 34 #endif
30 35  
... ... @@ -229,7 +234,7 @@ void do_mul64 (uint64_t *plow, uint64_t *phigh)
229 234 mul64(plow, phigh, T0, T1);
230 235 }
231 236  
232   -static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
  237 +static void imul64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
233 238 {
234 239 int sa, sb;
235 240 sa = (a < 0);
... ... @@ -1119,6 +1124,868 @@ void do_440_dlmzb (void)
1119 1124 T0 = i;
1120 1125 }
1121 1126  
  1127 +#if defined(TARGET_PPCSPE)
  1128 +/* SPE extension helpers */
  1129 +/* Use a table to make this quicker */
  1130 +static uint8_t hbrev[16] = {
  1131 + 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
  1132 + 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
  1133 +};
  1134 +
  1135 +static inline uint8_t byte_reverse (uint8_t val)
  1136 +{
  1137 + return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
  1138 +}
  1139 +
  1140 +static inline uint32_t word_reverse (uint32_t val)
  1141 +{
  1142 + return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
  1143 + (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
  1144 +}
  1145 +
  1146 +#define MASKBITS 16 // Random value - to be fixed
  1147 +void do_brinc (void)
  1148 +{
  1149 + uint32_t a, b, d, mask;
  1150 +
  1151 + mask = (uint32_t)(-1UL) >> MASKBITS;
  1152 + b = T1_64 & mask;
  1153 + a = T0_64 & mask;
  1154 + d = word_reverse(1 + word_reverse(a | ~mask));
  1155 + T0_64 = (T0_64 & ~mask) | (d & mask);
  1156 +}
  1157 +
  1158 +#define DO_SPE_OP2(name) \
  1159 +void do_ev##name (void) \
  1160 +{ \
  1161 + T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
  1162 + (uint64_t)_do_e##name(T0_64, T1_64); \
  1163 +}
  1164 +
  1165 +#define DO_SPE_OP1(name) \
  1166 +void do_ev##name (void) \
  1167 +{ \
  1168 + T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
  1169 + (uint64_t)_do_e##name(T0_64); \
  1170 +}
  1171 +
  1172 +/* Fixed-point vector arithmetic */
  1173 +static inline uint32_t _do_eabs (uint32_t val)
  1174 +{
  1175 + if (val != 0x80000000)
  1176 + val &= ~0x80000000;
  1177 +
  1178 + return val;
  1179 +}
  1180 +
  1181 +static inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
  1182 +{
  1183 + return op1 + op2;
  1184 +}
  1185 +
  1186 +static inline int _do_ecntlsw (uint32_t val)
  1187 +{
  1188 + if (val & 0x80000000)
  1189 + return _do_cntlzw(~val);
  1190 + else
  1191 + return _do_cntlzw(val);
  1192 +}
  1193 +
  1194 +static inline int _do_ecntlzw (uint32_t val)
  1195 +{
  1196 + return _do_cntlzw(val);
  1197 +}
  1198 +
  1199 +static inline uint32_t _do_eneg (uint32_t val)
  1200 +{
  1201 + if (val != 0x80000000)
  1202 + val ^= 0x80000000;
  1203 +
  1204 + return val;
  1205 +}
  1206 +
  1207 +static inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
  1208 +{
  1209 + return rotl32(op1, op2);
  1210 +}
  1211 +
  1212 +static inline uint32_t _do_erndw (uint32_t val)
  1213 +{
  1214 + return (val + 0x000080000000) & 0xFFFF0000;
  1215 +}
  1216 +
  1217 +static inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
  1218 +{
  1219 + /* No error here: 6 bits are used */
  1220 + return op1 << (op2 & 0x3F);
  1221 +}
  1222 +
  1223 +static inline int32_t _do_esrws (int32_t op1, uint32_t op2)
  1224 +{
  1225 + /* No error here: 6 bits are used */
  1226 + return op1 >> (op2 & 0x3F);
  1227 +}
  1228 +
  1229 +static inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
  1230 +{
  1231 + /* No error here: 6 bits are used */
  1232 + return op1 >> (op2 & 0x3F);
  1233 +}
  1234 +
  1235 +static inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
  1236 +{
  1237 + return op2 - op1;
  1238 +}
  1239 +
  1240 +/* evabs */
  1241 +DO_SPE_OP1(abs);
  1242 +/* evaddw */
  1243 +DO_SPE_OP2(addw);
  1244 +/* evcntlsw */
  1245 +DO_SPE_OP1(cntlsw);
  1246 +/* evcntlzw */
  1247 +DO_SPE_OP1(cntlzw);
  1248 +/* evneg */
  1249 +DO_SPE_OP1(neg);
  1250 +/* evrlw */
  1251 +DO_SPE_OP2(rlw);
  1252 +/* evrnd */
  1253 +DO_SPE_OP1(rndw);
  1254 +/* evslw */
  1255 +DO_SPE_OP2(slw);
  1256 +/* evsrws */
  1257 +DO_SPE_OP2(srws);
  1258 +/* evsrwu */
  1259 +DO_SPE_OP2(srwu);
  1260 +/* evsubfw */
  1261 +DO_SPE_OP2(subfw);
  1262 +
  1263 +/* evsel is a little bit more complicated... */
  1264 +static inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
  1265 +{
  1266 + if (n)
  1267 + return op1;
  1268 + else
  1269 + return op2;
  1270 +}
  1271 +
  1272 +void do_evsel (void)
  1273 +{
  1274 + T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
  1275 + (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
  1276 +}
  1277 +
  1278 +/* Fixed-point vector comparisons */
  1279 +#define DO_SPE_CMP(name) \
  1280 +void do_ev##name (void) \
  1281 +{ \
  1282 + T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
  1283 + T1_64 >> 32) << 32, \
  1284 + _do_e##name(T0_64, T1_64)); \
  1285 +}
  1286 +
  1287 +static inline uint32_t _do_evcmp_merge (int t0, int t1)
  1288 +{
  1289 + return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
  1290 +}
  1291 +static inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
  1292 +{
  1293 + return op1 == op2 ? 1 : 0;
  1294 +}
  1295 +
  1296 +static inline int _do_ecmpgts (int32_t op1, int32_t op2)
  1297 +{
  1298 + return op1 > op2 ? 1 : 0;
  1299 +}
  1300 +
  1301 +static inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
  1302 +{
  1303 + return op1 > op2 ? 1 : 0;
  1304 +}
  1305 +
  1306 +static inline int _do_ecmplts (int32_t op1, int32_t op2)
  1307 +{
  1308 + return op1 < op2 ? 1 : 0;
  1309 +}
  1310 +
  1311 +static inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
  1312 +{
  1313 + return op1 < op2 ? 1 : 0;
  1314 +}
  1315 +
  1316 +/* evcmpeq */
  1317 +DO_SPE_CMP(cmpeq);
  1318 +/* evcmpgts */
  1319 +DO_SPE_CMP(cmpgts);
  1320 +/* evcmpgtu */
  1321 +DO_SPE_CMP(cmpgtu);
  1322 +/* evcmplts */
  1323 +DO_SPE_CMP(cmplts);
  1324 +/* evcmpltu */
  1325 +DO_SPE_CMP(cmpltu);
  1326 +
  1327 +/* Single precision floating-point conversions from/to integer */
  1328 +static inline uint32_t _do_efscfsi (int32_t val)
  1329 +{
  1330 + union {
  1331 + uint32_t u;
  1332 + float32 f;
  1333 + } u;
  1334 +
  1335 + u.f = int32_to_float32(val, &env->spe_status);
  1336 +
  1337 + return u.u;
  1338 +}
  1339 +
  1340 +static inline uint32_t _do_efscfui (uint32_t val)
  1341 +{
  1342 + union {
  1343 + uint32_t u;
  1344 + float32 f;
  1345 + } u;
  1346 +
  1347 + u.f = uint32_to_float32(val, &env->spe_status);
  1348 +
  1349 + return u.u;
  1350 +}
  1351 +
  1352 +static inline int32_t _do_efsctsi (uint32_t val)
  1353 +{
  1354 + union {
  1355 + int32_t u;
  1356 + float32 f;
  1357 + } u;
  1358 +
  1359 + u.u = val;
  1360 + /* NaN are not treated the same way IEEE 754 does */
  1361 + if (unlikely(isnan(u.f)))
  1362 + return 0;
  1363 +
  1364 + return float32_to_int32(u.f, &env->spe_status);
  1365 +}
  1366 +
  1367 +static inline uint32_t _do_efsctui (uint32_t val)
  1368 +{
  1369 + union {
  1370 + int32_t u;
  1371 + float32 f;
  1372 + } u;
  1373 +
  1374 + u.u = val;
  1375 + /* NaN are not treated the same way IEEE 754 does */
  1376 + if (unlikely(isnan(u.f)))
  1377 + return 0;
  1378 +
  1379 + return float32_to_uint32(u.f, &env->spe_status);
  1380 +}
  1381 +
  1382 +static inline int32_t _do_efsctsiz (uint32_t val)
  1383 +{
  1384 + union {
  1385 + int32_t u;
  1386 + float32 f;
  1387 + } u;
  1388 +
  1389 + u.u = val;
  1390 + /* NaN are not treated the same way IEEE 754 does */
  1391 + if (unlikely(isnan(u.f)))
  1392 + return 0;
  1393 +
  1394 + return float32_to_int32_round_to_zero(u.f, &env->spe_status);
  1395 +}
  1396 +
  1397 +static inline uint32_t _do_efsctuiz (uint32_t val)
  1398 +{
  1399 + union {
  1400 + int32_t u;
  1401 + float32 f;
  1402 + } u;
  1403 +
  1404 + u.u = val;
  1405 + /* NaN are not treated the same way IEEE 754 does */
  1406 + if (unlikely(isnan(u.f)))
  1407 + return 0;
  1408 +
  1409 + return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
  1410 +}
  1411 +
  1412 +void do_efscfsi (void)
  1413 +{
  1414 + T0_64 = _do_efscfsi(T0_64);
  1415 +}
  1416 +
  1417 +void do_efscfui (void)
  1418 +{
  1419 + T0_64 = _do_efscfui(T0_64);
  1420 +}
  1421 +
  1422 +void do_efsctsi (void)
  1423 +{
  1424 + T0_64 = _do_efsctsi(T0_64);
  1425 +}
  1426 +
  1427 +void do_efsctui (void)
  1428 +{
  1429 + T0_64 = _do_efsctui(T0_64);
  1430 +}
  1431 +
  1432 +void do_efsctsiz (void)
  1433 +{
  1434 + T0_64 = _do_efsctsiz(T0_64);
  1435 +}
  1436 +
  1437 +void do_efsctuiz (void)
  1438 +{
  1439 + T0_64 = _do_efsctuiz(T0_64);
  1440 +}
  1441 +
  1442 +/* Single precision floating-point conversion to/from fractional */
  1443 +static inline uint32_t _do_efscfsf (uint32_t val)
  1444 +{
  1445 + union {
  1446 + uint32_t u;
  1447 + float32 f;
  1448 + } u;
  1449 + float32 tmp;
  1450 +
  1451 + u.f = int32_to_float32(val, &env->spe_status);
  1452 + tmp = int64_to_float32(1ULL << 32, &env->spe_status);
  1453 + u.f = float32_div(u.f, tmp, &env->spe_status);
  1454 +
  1455 + return u.u;
  1456 +}
  1457 +
  1458 +static inline uint32_t _do_efscfuf (uint32_t val)
  1459 +{
  1460 + union {
  1461 + uint32_t u;
  1462 + float32 f;
  1463 + } u;
  1464 + float32 tmp;
  1465 +
  1466 + u.f = uint32_to_float32(val, &env->spe_status);
  1467 + tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
  1468 + u.f = float32_div(u.f, tmp, &env->spe_status);
  1469 +
  1470 + return u.u;
  1471 +}
  1472 +
  1473 +static inline int32_t _do_efsctsf (uint32_t val)
  1474 +{
  1475 + union {
  1476 + int32_t u;
  1477 + float32 f;
  1478 + } u;
  1479 + float32 tmp;
  1480 +
  1481 + u.u = val;
  1482 + /* NaN are not treated the same way IEEE 754 does */
  1483 + if (unlikely(isnan(u.f)))
  1484 + return 0;
  1485 + tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
  1486 + u.f = float32_mul(u.f, tmp, &env->spe_status);
  1487 +
  1488 + return float32_to_int32(u.f, &env->spe_status);
  1489 +}
  1490 +
  1491 +static inline uint32_t _do_efsctuf (uint32_t val)
  1492 +{
  1493 + union {
  1494 + int32_t u;
  1495 + float32 f;
  1496 + } u;
  1497 + float32 tmp;
  1498 +
  1499 + u.u = val;
  1500 + /* NaN are not treated the same way IEEE 754 does */
  1501 + if (unlikely(isnan(u.f)))
  1502 + return 0;
  1503 + tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
  1504 + u.f = float32_mul(u.f, tmp, &env->spe_status);
  1505 +
  1506 + return float32_to_uint32(u.f, &env->spe_status);
  1507 +}
  1508 +
  1509 +static inline int32_t _do_efsctsfz (uint32_t val)
  1510 +{
  1511 + union {
  1512 + int32_t u;
  1513 + float32 f;
  1514 + } u;
  1515 + float32 tmp;
  1516 +
  1517 + u.u = val;
  1518 + /* NaN are not treated the same way IEEE 754 does */
  1519 + if (unlikely(isnan(u.f)))
  1520 + return 0;
  1521 + tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
  1522 + u.f = float32_mul(u.f, tmp, &env->spe_status);
  1523 +
  1524 + return float32_to_int32_round_to_zero(u.f, &env->spe_status);
  1525 +}
  1526 +
  1527 +static inline uint32_t _do_efsctufz (uint32_t val)
  1528 +{
  1529 + union {
  1530 + int32_t u;
  1531 + float32 f;
  1532 + } u;
  1533 + float32 tmp;
  1534 +
  1535 + u.u = val;
  1536 + /* NaN are not treated the same way IEEE 754 does */
  1537 + if (unlikely(isnan(u.f)))
  1538 + return 0;
  1539 + tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
  1540 + u.f = float32_mul(u.f, tmp, &env->spe_status);
  1541 +
  1542 + return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
  1543 +}
  1544 +
  1545 +void do_efscfsf (void)
  1546 +{
  1547 + T0_64 = _do_efscfsf(T0_64);
  1548 +}
  1549 +
  1550 +void do_efscfuf (void)
  1551 +{
  1552 + T0_64 = _do_efscfuf(T0_64);
  1553 +}
  1554 +
  1555 +void do_efsctsf (void)
  1556 +{
  1557 + T0_64 = _do_efsctsf(T0_64);
  1558 +}
  1559 +
  1560 +void do_efsctuf (void)
  1561 +{
  1562 + T0_64 = _do_efsctuf(T0_64);
  1563 +}
  1564 +
  1565 +void do_efsctsfz (void)
  1566 +{
  1567 + T0_64 = _do_efsctsfz(T0_64);
  1568 +}
  1569 +
  1570 +void do_efsctufz (void)
  1571 +{
  1572 + T0_64 = _do_efsctufz(T0_64);
  1573 +}
  1574 +
  1575 +/* Double precision floating point helpers */
  1576 +static inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
  1577 +{
  1578 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1579 + return _do_efdtstlt(op1, op2);
  1580 +}
  1581 +
  1582 +static inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
  1583 +{
  1584 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1585 + return _do_efdtstgt(op1, op2);
  1586 +}
  1587 +
  1588 +static inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
  1589 +{
  1590 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1591 + return _do_efdtsteq(op1, op2);
  1592 +}
  1593 +
  1594 +void do_efdcmplt (void)
  1595 +{
  1596 + T0 = _do_efdcmplt(T0_64, T1_64);
  1597 +}
  1598 +
  1599 +void do_efdcmpgt (void)
  1600 +{
  1601 + T0 = _do_efdcmpgt(T0_64, T1_64);
  1602 +}
  1603 +
  1604 +void do_efdcmpeq (void)
  1605 +{
  1606 + T0 = _do_efdcmpeq(T0_64, T1_64);
  1607 +}
  1608 +
  1609 +/* Double precision floating-point conversion to/from integer */
  1610 +static inline uint64_t _do_efdcfsi (int64_t val)
  1611 +{
  1612 + union {
  1613 + uint64_t u;
  1614 + float64 f;
  1615 + } u;
  1616 +
  1617 + u.f = int64_to_float64(val, &env->spe_status);
  1618 +
  1619 + return u.u;
  1620 +}
  1621 +
  1622 +static inline uint64_t _do_efdcfui (uint64_t val)
  1623 +{
  1624 + union {
  1625 + uint64_t u;
  1626 + float64 f;
  1627 + } u;
  1628 +
  1629 + u.f = uint64_to_float64(val, &env->spe_status);
  1630 +
  1631 + return u.u;
  1632 +}
  1633 +
  1634 +static inline int64_t _do_efdctsi (uint64_t val)
  1635 +{
  1636 + union {
  1637 + int64_t u;
  1638 + float64 f;
  1639 + } u;
  1640 +
  1641 + u.u = val;
  1642 + /* NaN are not treated the same way IEEE 754 does */
  1643 + if (unlikely(isnan(u.f)))
  1644 + return 0;
  1645 +
  1646 + return float64_to_int64(u.f, &env->spe_status);
  1647 +}
  1648 +
  1649 +static inline uint64_t _do_efdctui (uint64_t val)
  1650 +{
  1651 + union {
  1652 + int64_t u;
  1653 + float64 f;
  1654 + } u;
  1655 +
  1656 + u.u = val;
  1657 + /* NaN are not treated the same way IEEE 754 does */
  1658 + if (unlikely(isnan(u.f)))
  1659 + return 0;
  1660 +
  1661 + return float64_to_uint64(u.f, &env->spe_status);
  1662 +}
  1663 +
  1664 +static inline int64_t _do_efdctsiz (uint64_t val)
  1665 +{
  1666 + union {
  1667 + int64_t u;
  1668 + float64 f;
  1669 + } u;
  1670 +
  1671 + u.u = val;
  1672 + /* NaN are not treated the same way IEEE 754 does */
  1673 + if (unlikely(isnan(u.f)))
  1674 + return 0;
  1675 +
  1676 + return float64_to_int64_round_to_zero(u.f, &env->spe_status);
  1677 +}
  1678 +
  1679 +static inline uint64_t _do_efdctuiz (uint64_t val)
  1680 +{
  1681 + union {
  1682 + int64_t u;
  1683 + float64 f;
  1684 + } u;
  1685 +
  1686 + u.u = val;
  1687 + /* NaN are not treated the same way IEEE 754 does */
  1688 + if (unlikely(isnan(u.f)))
  1689 + return 0;
  1690 +
  1691 + return float64_to_uint64_round_to_zero(u.f, &env->spe_status);
  1692 +}
  1693 +
  1694 +void do_efdcfsi (void)
  1695 +{
  1696 + T0_64 = _do_efdcfsi(T0_64);
  1697 +}
  1698 +
  1699 +void do_efdcfui (void)
  1700 +{
  1701 + T0_64 = _do_efdcfui(T0_64);
  1702 +}
  1703 +
  1704 +void do_efdctsi (void)
  1705 +{
  1706 + T0_64 = _do_efdctsi(T0_64);
  1707 +}
  1708 +
  1709 +void do_efdctui (void)
  1710 +{
  1711 + T0_64 = _do_efdctui(T0_64);
  1712 +}
  1713 +
  1714 +void do_efdctsiz (void)
  1715 +{
  1716 + T0_64 = _do_efdctsiz(T0_64);
  1717 +}
  1718 +
  1719 +void do_efdctuiz (void)
  1720 +{
  1721 + T0_64 = _do_efdctuiz(T0_64);
  1722 +}
  1723 +
  1724 +/* Double precision floating-point conversion to/from fractional */
  1725 +static inline uint64_t _do_efdcfsf (int64_t val)
  1726 +{
  1727 + union {
  1728 + uint64_t u;
  1729 + float64 f;
  1730 + } u;
  1731 + float64 tmp;
  1732 +
  1733 + u.f = int32_to_float64(val, &env->spe_status);
  1734 + tmp = int64_to_float64(1ULL << 32, &env->spe_status);
  1735 + u.f = float64_div(u.f, tmp, &env->spe_status);
  1736 +
  1737 + return u.u;
  1738 +}
  1739 +
  1740 +static inline uint64_t _do_efdcfuf (uint64_t val)
  1741 +{
  1742 + union {
  1743 + uint64_t u;
  1744 + float64 f;
  1745 + } u;
  1746 + float64 tmp;
  1747 +
  1748 + u.f = uint32_to_float64(val, &env->spe_status);
  1749 + tmp = int64_to_float64(1ULL << 32, &env->spe_status);
  1750 + u.f = float64_div(u.f, tmp, &env->spe_status);
  1751 +
  1752 + return u.u;
  1753 +}
  1754 +
  1755 +static inline int64_t _do_efdctsf (uint64_t val)
  1756 +{
  1757 + union {
  1758 + int64_t u;
  1759 + float64 f;
  1760 + } u;
  1761 + float64 tmp;
  1762 +
  1763 + u.u = val;
  1764 + /* NaN are not treated the same way IEEE 754 does */
  1765 + if (unlikely(isnan(u.f)))
  1766 + return 0;
  1767 + tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
  1768 + u.f = float64_mul(u.f, tmp, &env->spe_status);
  1769 +
  1770 + return float64_to_int32(u.f, &env->spe_status);
  1771 +}
  1772 +
  1773 +static inline uint64_t _do_efdctuf (uint64_t val)
  1774 +{
  1775 + union {
  1776 + int64_t u;
  1777 + float64 f;
  1778 + } u;
  1779 + float64 tmp;
  1780 +
  1781 + u.u = val;
  1782 + /* NaN are not treated the same way IEEE 754 does */
  1783 + if (unlikely(isnan(u.f)))
  1784 + return 0;
  1785 + tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
  1786 + u.f = float64_mul(u.f, tmp, &env->spe_status);
  1787 +
  1788 + return float64_to_uint32(u.f, &env->spe_status);
  1789 +}
  1790 +
  1791 +static inline int64_t _do_efdctsfz (uint64_t val)
  1792 +{
  1793 + union {
  1794 + int64_t u;
  1795 + float64 f;
  1796 + } u;
  1797 + float64 tmp;
  1798 +
  1799 + u.u = val;
  1800 + /* NaN are not treated the same way IEEE 754 does */
  1801 + if (unlikely(isnan(u.f)))
  1802 + return 0;
  1803 + tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
  1804 + u.f = float64_mul(u.f, tmp, &env->spe_status);
  1805 +
  1806 + return float64_to_int32_round_to_zero(u.f, &env->spe_status);
  1807 +}
  1808 +
  1809 +static inline uint64_t _do_efdctufz (uint64_t val)
  1810 +{
  1811 + union {
  1812 + int64_t u;
  1813 + float64 f;
  1814 + } u;
  1815 + float64 tmp;
  1816 +
  1817 + u.u = val;
  1818 + /* NaN are not treated the same way IEEE 754 does */
  1819 + if (unlikely(isnan(u.f)))
  1820 + return 0;
  1821 + tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
  1822 + u.f = float64_mul(u.f, tmp, &env->spe_status);
  1823 +
  1824 + return float64_to_uint32_round_to_zero(u.f, &env->spe_status);
  1825 +}
  1826 +
  1827 +void do_efdcfsf (void)
  1828 +{
  1829 + T0_64 = _do_efdcfsf(T0_64);
  1830 +}
  1831 +
  1832 +void do_efdcfuf (void)
  1833 +{
  1834 + T0_64 = _do_efdcfuf(T0_64);
  1835 +}
  1836 +
  1837 +void do_efdctsf (void)
  1838 +{
  1839 + T0_64 = _do_efdctsf(T0_64);
  1840 +}
  1841 +
  1842 +void do_efdctuf (void)
  1843 +{
  1844 + T0_64 = _do_efdctuf(T0_64);
  1845 +}
  1846 +
  1847 +void do_efdctsfz (void)
  1848 +{
  1849 + T0_64 = _do_efdctsfz(T0_64);
  1850 +}
  1851 +
  1852 +void do_efdctufz (void)
  1853 +{
  1854 + T0_64 = _do_efdctufz(T0_64);
  1855 +}
  1856 +
  1857 +/* Floating point conversion between single and double precision */
  1858 +static inline uint32_t _do_efscfd (uint64_t val)
  1859 +{
  1860 + union {
  1861 + uint64_t u;
  1862 + float64 f;
  1863 + } u1;
  1864 + union {
  1865 + uint32_t u;
  1866 + float32 f;
  1867 + } u2;
  1868 +
  1869 + u1.u = val;
  1870 + u2.f = float64_to_float32(u1.f, &env->spe_status);
  1871 +
  1872 + return u2.u;
  1873 +}
  1874 +
  1875 +static inline uint64_t _do_efdcfs (uint32_t val)
  1876 +{
  1877 + union {
  1878 + uint64_t u;
  1879 + float64 f;
  1880 + } u2;
  1881 + union {
  1882 + uint32_t u;
  1883 + float32 f;
  1884 + } u1;
  1885 +
  1886 + u1.u = val;
  1887 + u2.f = float32_to_float64(u1.f, &env->spe_status);
  1888 +
  1889 + return u2.u;
  1890 +}
  1891 +
  1892 +void do_efscfd (void)
  1893 +{
  1894 + T0_64 = _do_efscfd(T0_64);
  1895 +}
  1896 +
  1897 +void do_efdcfs (void)
  1898 +{
  1899 + T0_64 = _do_efdcfs(T0_64);
  1900 +}
  1901 +
  1902 +/* Single precision fixed-point vector arithmetic */
  1903 +/* evfsabs */
  1904 +DO_SPE_OP1(fsabs);
  1905 +/* evfsnabs */
  1906 +DO_SPE_OP1(fsnabs);
  1907 +/* evfsneg */
  1908 +DO_SPE_OP1(fsneg);
  1909 +/* evfsadd */
  1910 +DO_SPE_OP2(fsadd);
  1911 +/* evfssub */
  1912 +DO_SPE_OP2(fssub);
  1913 +/* evfsmul */
  1914 +DO_SPE_OP2(fsmul);
  1915 +/* evfsdiv */
  1916 +DO_SPE_OP2(fsdiv);
  1917 +
  1918 +/* Single-precision floating-point comparisons */
  1919 +static inline int _do_efscmplt (uint32_t op1, uint32_t op2)
  1920 +{
  1921 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1922 + return _do_efststlt(op1, op2);
  1923 +}
  1924 +
  1925 +static inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
  1926 +{
  1927 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1928 + return _do_efststgt(op1, op2);
  1929 +}
  1930 +
  1931 +static inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
  1932 +{
  1933 + /* XXX: TODO: test special values (NaN, infinites, ...) */
  1934 + return _do_efststeq(op1, op2);
  1935 +}
  1936 +
  1937 +void do_efscmplt (void)
  1938 +{
  1939 + T0 = _do_efscmplt(T0_64, T1_64);
  1940 +}
  1941 +
  1942 +void do_efscmpgt (void)
  1943 +{
  1944 + T0 = _do_efscmpgt(T0_64, T1_64);
  1945 +}
  1946 +
  1947 +void do_efscmpeq (void)
  1948 +{
  1949 + T0 = _do_efscmpeq(T0_64, T1_64);
  1950 +}
  1951 +
  1952 +/* Single-precision floating-point vector comparisons */
  1953 +/* evfscmplt */
  1954 +DO_SPE_CMP(fscmplt);
  1955 +/* evfscmpgt */
  1956 +DO_SPE_CMP(fscmpgt);
  1957 +/* evfscmpeq */
  1958 +DO_SPE_CMP(fscmpeq);
  1959 +/* evfststlt */
  1960 +DO_SPE_CMP(fststlt);
  1961 +/* evfststgt */
  1962 +DO_SPE_CMP(fststgt);
  1963 +/* evfststeq */
  1964 +DO_SPE_CMP(fststeq);
  1965 +
  1966 +/* Single-precision floating-point vector conversions */
  1967 +/* evfscfsi */
  1968 +DO_SPE_OP1(fscfsi);
  1969 +/* evfscfui */
  1970 +DO_SPE_OP1(fscfui);
  1971 +/* evfscfuf */
  1972 +DO_SPE_OP1(fscfuf);
  1973 +/* evfscfsf */
  1974 +DO_SPE_OP1(fscfsf);
  1975 +/* evfsctsi */
  1976 +DO_SPE_OP1(fsctsi);
  1977 +/* evfsctui */
  1978 +DO_SPE_OP1(fsctui);
  1979 +/* evfsctsiz */
  1980 +DO_SPE_OP1(fsctsiz);
  1981 +/* evfsctuiz */
  1982 +DO_SPE_OP1(fsctuiz);
  1983 +/* evfsctsf */
  1984 +DO_SPE_OP1(fsctsf);
  1985 +/* evfsctuf */
  1986 +DO_SPE_OP1(fsctuf);
  1987 +#endif /* defined(TARGET_PPCSPE) */
  1988 +
1122 1989 /*****************************************************************************/
1123 1990 /* Softmmu support */
1124 1991 #if !defined (CONFIG_USER_ONLY)
... ...
target-ppc/op_helper.h
... ... @@ -100,6 +100,7 @@ void do_fctiwz (void);
100 100 void do_fcmpu (void);
101 101 void do_fcmpo (void);
102 102  
  103 +/* Misc */
103 104 void do_tw (int flags);
104 105 #if defined(TARGET_PPC64)
105 106 void do_td (int flags);
... ... @@ -157,11 +158,291 @@ void do_4xx_tlbwe_lo (void);
157 158 void do_4xx_tlbwe_hi (void);
158 159 #endif
159 160  
  161 +/* PowerPC 440 specific helpers */
160 162 void do_440_dlmzb (void);
161 163  
  164 +/* PowerPC 403 specific helpers */
162 165 #if !defined(CONFIG_USER_ONLY)
163 166 void do_load_403_pb (int num);
164 167 void do_store_403_pb (int num);
165 168 #endif
166 169  
  170 +#if defined(TARGET_PPCSPE)
  171 +/* SPE extension helpers */
  172 +void do_brinc (void);
  173 +/* Fixed-point vector helpers */
  174 +void do_evabs (void);
  175 +void do_evaddw (void);
  176 +void do_evcntlsw (void);
  177 +void do_evcntlzw (void);
  178 +void do_evneg (void);
  179 +void do_evrlw (void);
  180 +void do_evsel (void);
  181 +void do_evrndw (void);
  182 +void do_evslw (void);
  183 +void do_evsrws (void);
  184 +void do_evsrwu (void);
  185 +void do_evsubfw (void);
  186 +void do_evcmpeq (void);
  187 +void do_evcmpgts (void);
  188 +void do_evcmpgtu (void);
  189 +void do_evcmplts (void);
  190 +void do_evcmpltu (void);
  191 +
  192 +/* Single precision floating-point helpers */
  193 +void do_efscmplt (void);
  194 +void do_efscmpgt (void);
  195 +void do_efscmpeq (void);
  196 +void do_efscfsf (void);
  197 +void do_efscfuf (void);
  198 +void do_efsctsf (void);
  199 +void do_efsctuf (void);
  200 +
  201 +void do_efscfsi (void);
  202 +void do_efscfui (void);
  203 +void do_efsctsi (void);
  204 +void do_efsctui (void);
  205 +void do_efsctsiz (void);
  206 +void do_efsctuiz (void);
  207 +
  208 +/* Double precision floating-point helpers */
  209 +void do_efdcmplt (void);
  210 +void do_efdcmpgt (void);
  211 +void do_efdcmpeq (void);
  212 +void do_efdcfsf (void);
  213 +void do_efdcfuf (void);
  214 +void do_efdctsf (void);
  215 +void do_efdctuf (void);
  216 +
  217 +void do_efdcfsi (void);
  218 +void do_efdcfui (void);
  219 +void do_efdctsi (void);
  220 +void do_efdctui (void);
  221 +void do_efdctsiz (void);
  222 +void do_efdctuiz (void);
  223 +
  224 +void do_efdcfs (void);
  225 +void do_efscfd (void);
  226 +
  227 +/* Floating-point vector helpers */
  228 +void do_evfsabs (void);
  229 +void do_evfsnabs (void);
  230 +void do_evfsneg (void);
  231 +void do_evfsadd (void);
  232 +void do_evfssub (void);
  233 +void do_evfsmul (void);
  234 +void do_evfsdiv (void);
  235 +void do_evfscmplt (void);
  236 +void do_evfscmpgt (void);
  237 +void do_evfscmpeq (void);
  238 +void do_evfststlt (void);
  239 +void do_evfststgt (void);
  240 +void do_evfststeq (void);
  241 +void do_evfscfsi (void);
  242 +void do_evfscfui (void);
  243 +void do_evfscfsf (void);
  244 +void do_evfscfuf (void);
  245 +void do_evfsctsf (void);
  246 +void do_evfsctuf (void);
  247 +void do_evfsctsi (void);
  248 +void do_evfsctui (void);
  249 +void do_evfsctsiz (void);
  250 +void do_evfsctuiz (void);
  251 +#endif /* defined(TARGET_PPCSPE) */
  252 +
  253 +/* Inlined helpers: used in micro-operation as well as helpers */
  254 +/* Generic fixed-point helpers */
  255 +static inline int _do_cntlzw (uint32_t val)
  256 +{
  257 + int cnt = 0;
  258 + if (!(val & 0xFFFF0000UL)) {
  259 + cnt += 16;
  260 + val <<= 16;
  261 + }
  262 + if (!(val & 0xFF000000UL)) {
  263 + cnt += 8;
  264 + val <<= 8;
  265 + }
  266 + if (!(val & 0xF0000000UL)) {
  267 + cnt += 4;
  268 + val <<= 4;
  269 + }
  270 + if (!(val & 0xC0000000UL)) {
  271 + cnt += 2;
  272 + val <<= 2;
  273 + }
  274 + if (!(val & 0x80000000UL)) {
  275 + cnt++;
  276 + val <<= 1;
  277 + }
  278 + if (!(val & 0x80000000UL)) {
  279 + cnt++;
  280 + }
  281 + return cnt;
  282 +}
  283 +
  284 +static inline int _do_cntlzd (uint64_t val)
  285 +{
  286 + int cnt = 0;
  287 +#if HOST_LONG_BITS == 64
  288 + if (!(val & 0xFFFFFFFF00000000ULL)) {
  289 + cnt += 32;
  290 + val <<= 32;
  291 + }
  292 + if (!(val & 0xFFFF000000000000ULL)) {
  293 + cnt += 16;
  294 + val <<= 16;
  295 + }
  296 + if (!(val & 0xFF00000000000000ULL)) {
  297 + cnt += 8;
  298 + val <<= 8;
  299 + }
  300 + if (!(val & 0xF000000000000000ULL)) {
  301 + cnt += 4;
  302 + val <<= 4;
  303 + }
  304 + if (!(val & 0xC000000000000000ULL)) {
  305 + cnt += 2;
  306 + val <<= 2;
  307 + }
  308 + if (!(val & 0x8000000000000000ULL)) {
  309 + cnt++;
  310 + val <<= 1;
  311 + }
  312 + if (!(val & 0x8000000000000000ULL)) {
  313 + cnt++;
  314 + }
  315 +#else
  316 + uint32_t tmp;
  317 + /* Make it easier on 32 bits host machines */
  318 + if (!(val >> 32))
  319 + cnt = cntlzw(val) + 32;
  320 + else
  321 + cnt = cntlzw(val >> 32);
  322 +#endif
  323 + return cnt;
  324 +}
  325 +
  326 +#if defined(TARGET_PPCSPE)
  327 +/* SPE extension */
  328 +/* Single precision floating-point helpers */
  329 +static inline uint32_t _do_efsabs (uint32_t val)
  330 +{
  331 + return val & ~0x80000000;
  332 +}
  333 +static inline uint32_t _do_efsnabs (uint32_t val)
  334 +{
  335 + return val | 0x80000000;
  336 +}
  337 +static inline uint32_t _do_efsneg (uint32_t val)
  338 +{
  339 + return val ^ 0x80000000;
  340 +}
  341 +static inline uint32_t _do_efsadd (uint32_t op1, uint32_t op2)
  342 +{
  343 + union {
  344 + uint32_t u;
  345 + float32 f;
  346 + } u1, u2;
  347 + u1.u = op1;
  348 + u2.u = op2;
  349 + u1.f = float32_add(u1.f, u2.f, &env->spe_status);
  350 + return u1.u;
  351 +}
  352 +static inline uint32_t _do_efssub (uint32_t op1, uint32_t op2)
  353 +{
  354 + union {
  355 + uint32_t u;
  356 + float32 f;
  357 + } u1, u2;
  358 + u1.u = op1;
  359 + u2.u = op2;
  360 + u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
  361 + return u1.u;
  362 +}
  363 +static inline uint32_t _do_efsmul (uint32_t op1, uint32_t op2)
  364 +{
  365 + union {
  366 + uint32_t u;
  367 + float32 f;
  368 + } u1, u2;
  369 + u1.u = op1;
  370 + u2.u = op2;
  371 + u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
  372 + return u1.u;
  373 +}
  374 +static inline uint32_t _do_efsdiv (uint32_t op1, uint32_t op2)
  375 +{
  376 + union {
  377 + uint32_t u;
  378 + float32 f;
  379 + } u1, u2;
  380 + u1.u = op1;
  381 + u2.u = op2;
  382 + u1.f = float32_div(u1.f, u2.f, &env->spe_status);
  383 + return u1.u;
  384 +}
  385 +
  386 +static inline int _do_efststlt (uint32_t op1, uint32_t op2)
  387 +{
  388 + union {
  389 + uint32_t u;
  390 + float32 f;
  391 + } u1, u2;
  392 + u1.u = op1;
  393 + u2.u = op2;
  394 + return float32_lt(u1.f, u2.f, &env->spe_status) ? 1 : 0;
  395 +}
  396 +static inline int _do_efststgt (uint32_t op1, uint32_t op2)
  397 +{
  398 + union {
  399 + uint32_t u;
  400 + float32 f;
  401 + } u1, u2;
  402 + u1.u = op1;
  403 + u2.u = op2;
  404 + return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 1;
  405 +}
  406 +static inline int _do_efststeq (uint32_t op1, uint32_t op2)
  407 +{
  408 + union {
  409 + uint32_t u;
  410 + float32 f;
  411 + } u1, u2;
  412 + u1.u = op1;
  413 + u2.u = op2;
  414 + return float32_eq(u1.f, u2.f, &env->spe_status) ? 1 : 0;
  415 +}
  416 +/* Double precision floating-point helpers */
  417 +static inline int _do_efdtstlt (uint64_t op1, uint64_t op2)
  418 +{
  419 + union {
  420 + uint64_t u;
  421 + float64 f;
  422 + } u1, u2;
  423 + u1.u = op1;
  424 + u2.u = op2;
  425 + return float64_lt(u1.f, u2.f, &env->spe_status) ? 1 : 0;
  426 +}
  427 +static inline int _do_efdtstgt (uint64_t op1, uint64_t op2)
  428 +{
  429 + union {
  430 + uint64_t u;
  431 + float64 f;
  432 + } u1, u2;
  433 + u1.u = op1;
  434 + u2.u = op2;
  435 + return float64_le(u1.f, u2.f, &env->spe_status) ? 0 : 1;
  436 +}
  437 +static inline int _do_efdtsteq (uint64_t op1, uint64_t op2)
  438 +{
  439 + union {
  440 + uint64_t u;
  441 + float64 f;
  442 + } u1, u2;
  443 + u1.u = op1;
  444 + u2.u = op2;
  445 + return float64_eq(u1.f, u2.f, &env->spe_status) ? 1 : 0;
  446 +}
  447 +#endif /* defined(TARGET_PPCSPE) */
167 448 #endif
... ...
target-ppc/op_mem.h
... ... @@ -37,12 +37,7 @@ static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
37 37 ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
38 38 }
39 39  
40   -#if defined(TARGET_PPC64)
41   -static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA)
42   -{
43   - return (int32_t)glue(ldl, MEMSUFFIX)(EA);
44   -}
45   -
  40 +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE)
46 41 static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA)
47 42 {
48 43 uint64_t tmp = glue(ldq, MEMSUFFIX)(EA);
... ... @@ -55,6 +50,13 @@ static inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA)
55 50 ((tmp & 0x000000000000FF00ULL) << 40) |
56 51 ((tmp & 0x00000000000000FFULL) << 54);
57 52 }
  53 +#endif
  54 +
  55 +#if defined(TARGET_PPC64)
  56 +static inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA)
  57 +{
  58 + return (int32_t)glue(ldl, MEMSUFFIX)(EA);
  59 +}
58 60  
59 61 static inline int64_t glue(ld32rs, MEMSUFFIX) (target_ulong EA)
60 62 {
... ... @@ -77,7 +79,7 @@ static inline void glue(st32r, MEMSUFFIX) (target_ulong EA, uint32_t data)
77 79 glue(stl, MEMSUFFIX)(EA, tmp);
78 80 }
79 81  
80   -#if defined(TARGET_PPC64)
  82 +#if defined(TARGET_PPC64) || defined(TARGET_PPCSPE)
81 83 static inline void glue(st64r, MEMSUFFIX) (target_ulong EA, uint64_t data)
82 84 {
83 85 uint64_t tmp = ((data & 0xFF00000000000000ULL) >> 56) |
... ... @@ -839,4 +841,262 @@ void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void)
839 841 RETURN();
840 842 }
841 843  
  844 +#if defined(TARGET_PPCSPE)
  845 +/* SPE extension */
  846 +#define _PPC_SPE_LD_OP(name, op) \
  847 +void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void) \
  848 +{ \
  849 + T1_64 = glue(op, MEMSUFFIX)((uint32_t)T0); \
  850 + RETURN(); \
  851 +}
  852 +
  853 +#if defined(TARGET_PPC64)
  854 +#define _PPC_SPE_LD_OP_64(name, op) \
  855 +void OPPROTO glue(glue(glue(op_spe_l, name), _64), MEMSUFFIX) (void) \
  856 +{ \
  857 + T1_64 = glue(op, MEMSUFFIX)((uint64_t)T0); \
  858 + RETURN(); \
  859 +}
  860 +#define PPC_SPE_LD_OP(name, op) \
  861 +_PPC_SPE_LD_OP(name, op); \
  862 +_PPC_SPE_LD_OP_64(name, op)
  863 +#else
  864 +#define PPC_SPE_LD_OP(name, op) \
  865 +_PPC_SPE_LD_OP(name, op)
  866 +#endif
  867 +
  868 +
  869 +#define _PPC_SPE_ST_OP(name, op) \
  870 +void OPPROTO glue(glue(op_spe_st, name), MEMSUFFIX) (void) \
  871 +{ \
  872 + glue(op, MEMSUFFIX)((uint32_t)T0, T1_64); \
  873 + RETURN(); \
  874 +}
  875 +
  876 +#if defined(TARGET_PPC64)
  877 +#define _PPC_SPE_ST_OP_64(name, op) \
  878 +void OPPROTO glue(glue(glue(op_spe_st, name), _64), MEMSUFFIX) (void) \
  879 +{ \
  880 + glue(op, MEMSUFFIX)((uint64_t)T0, T1_64); \
  881 + RETURN(); \
  882 +}
  883 +#define PPC_SPE_ST_OP(name, op) \
  884 +_PPC_SPE_ST_OP(name, op); \
  885 +_PPC_SPE_ST_OP_64(name, op)
  886 +#else
  887 +#define PPC_SPE_ST_OP(name, op) \
  888 +_PPC_SPE_ST_OP(name, op)
  889 +#endif
  890 +
  891 +#if !defined(TARGET_PPC64)
  892 +PPC_SPE_LD_OP(dd, ldq);
  893 +PPC_SPE_ST_OP(dd, stq);
  894 +PPC_SPE_LD_OP(dd_le, ld64r);
  895 +PPC_SPE_ST_OP(dd_le, st64r);
  896 +#endif
  897 +static inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA)
  898 +{
  899 + uint64_t ret;
  900 + ret = (uint64_t)glue(ldl, MEMSUFFIX)(EA) << 32;
  901 + ret |= (uint64_t)glue(ldl, MEMSUFFIX)(EA + 4);
  902 + return ret;
  903 +}
  904 +PPC_SPE_LD_OP(dw, spe_ldw);
  905 +static inline void glue(spe_stdw, MEMSUFFIX) (target_ulong EA, uint64_t data)
  906 +{
  907 + glue(stl, MEMSUFFIX)(EA, data >> 32);
  908 + glue(stl, MEMSUFFIX)(EA + 4, data);
  909 +}
  910 +PPC_SPE_ST_OP(dw, spe_stdw);
  911 +static inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA)
  912 +{
  913 + uint64_t ret;
  914 + ret = (uint64_t)glue(ld32r, MEMSUFFIX)(EA) << 32;
  915 + ret |= (uint64_t)glue(ld32r, MEMSUFFIX)(EA + 4);
  916 + return ret;
  917 +}
  918 +PPC_SPE_LD_OP(dw_le, spe_ldw_le);
  919 +static inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA,
  920 + uint64_t data)
  921 +{
  922 + glue(st32r, MEMSUFFIX)(EA, data >> 32);
  923 + glue(st32r, MEMSUFFIX)(EA + 4, data);
  924 +}
  925 +PPC_SPE_ST_OP(dw_le, spe_stdw_le);
  926 +static inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA)
  927 +{
  928 + uint64_t ret;
  929 + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 48;
  930 + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2) << 32;
  931 + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 4) << 16;
  932 + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 6);
  933 + return ret;
  934 +}
  935 +PPC_SPE_LD_OP(dh, spe_ldh);
  936 +static inline void glue(spe_stdh, MEMSUFFIX) (target_ulong EA, uint64_t data)
  937 +{
  938 + glue(stw, MEMSUFFIX)(EA, data >> 48);
  939 + glue(stw, MEMSUFFIX)(EA + 2, data >> 32);
  940 + glue(stw, MEMSUFFIX)(EA + 4, data >> 16);
  941 + glue(stw, MEMSUFFIX)(EA + 6, data);
  942 +}
  943 +PPC_SPE_ST_OP(dh, spe_stdh);
  944 +static inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA)
  945 +{
  946 + uint64_t ret;
  947 + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48;
  948 + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 32;
  949 + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 4) << 16;
  950 + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 6);
  951 + return ret;
  952 +}
  953 +PPC_SPE_LD_OP(dh_le, spe_ldh_le);
  954 +static inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA,
  955 + uint64_t data)
  956 +{
  957 + glue(st16r, MEMSUFFIX)(EA, data >> 48);
  958 + glue(st16r, MEMSUFFIX)(EA + 2, data >> 32);
  959 + glue(st16r, MEMSUFFIX)(EA + 4, data >> 16);
  960 + glue(st16r, MEMSUFFIX)(EA + 6, data);
  961 +}
  962 +PPC_SPE_ST_OP(dh_le, spe_stdh_le);
  963 +static inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA)
  964 +{
  965 + uint64_t ret;
  966 + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 48;
  967 + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2) << 16;
  968 + return ret;
  969 +}
  970 +PPC_SPE_LD_OP(whe, spe_lwhe);
  971 +static inline void glue(spe_stwhe, MEMSUFFIX) (target_ulong EA, uint64_t data)
  972 +{
  973 + glue(stw, MEMSUFFIX)(EA, data >> 48);
  974 + glue(stw, MEMSUFFIX)(EA + 2, data >> 16);
  975 +}
  976 +PPC_SPE_ST_OP(whe, spe_stwhe);
  977 +static inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA)
  978 +{
  979 + uint64_t ret;
  980 + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48;
  981 + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 16;
  982 + return ret;
  983 +}
  984 +PPC_SPE_LD_OP(whe_le, spe_lwhe_le);
  985 +static inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA,
  986 + uint64_t data)
  987 +{
  988 + glue(st16r, MEMSUFFIX)(EA, data >> 48);
  989 + glue(st16r, MEMSUFFIX)(EA + 2, data >> 16);
  990 +}
  991 +PPC_SPE_ST_OP(whe_le, spe_stwhe_le);
  992 +static inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA)
  993 +{
  994 + uint64_t ret;
  995 + ret = (uint64_t)glue(lduw, MEMSUFFIX)(EA) << 32;
  996 + ret |= (uint64_t)glue(lduw, MEMSUFFIX)(EA + 2);
  997 + return ret;
  998 +}
  999 +PPC_SPE_LD_OP(whou, spe_lwhou);
  1000 +static inline uint64_t glue(spe_lwhos, MEMSUFFIX) (target_ulong EA)
  1001 +{
  1002 + uint64_t ret;
  1003 + ret = ((uint64_t)((int32_t)glue(ldsw, MEMSUFFIX)(EA))) << 32;
  1004 + ret |= (uint64_t)((int32_t)glue(ldsw, MEMSUFFIX)(EA + 2));
  1005 + return ret;
  1006 +}
  1007 +PPC_SPE_LD_OP(whos, spe_lwhos);
  1008 +static inline void glue(spe_stwho, MEMSUFFIX) (target_ulong EA, uint64_t data)
  1009 +{
  1010 + glue(stw, MEMSUFFIX)(EA, data >> 32);
  1011 + glue(stw, MEMSUFFIX)(EA + 2, data);
  1012 +}
  1013 +PPC_SPE_ST_OP(who, spe_stwho);
  1014 +static inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA)
  1015 +{
  1016 + uint64_t ret;
  1017 + ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 32;
  1018 + ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2);
  1019 + return ret;
  1020 +}
  1021 +PPC_SPE_LD_OP(whou_le, spe_lwhou_le);
  1022 +static inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA)
  1023 +{
  1024 + uint64_t ret;
  1025 + ret = ((uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA))) << 32;
  1026 + ret |= (uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA + 2));
  1027 + return ret;
  1028 +}
  1029 +PPC_SPE_LD_OP(whos_le, spe_lwhos_le);
  1030 +static inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA,
  1031 + uint64_t data)
  1032 +{
  1033 + glue(st16r, MEMSUFFIX)(EA, data >> 32);
  1034 + glue(st16r, MEMSUFFIX)(EA + 2, data);
  1035 +}
  1036 +PPC_SPE_ST_OP(who_le, spe_stwho_le);
  1037 +#if !defined(TARGET_PPC64)
  1038 +static inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA, uint64_t data)
  1039 +{
  1040 + glue(stl, MEMSUFFIX)(EA, data);
  1041 +}
  1042 +PPC_SPE_ST_OP(wwo, spe_stwwo);
  1043 +static inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA,
  1044 + uint64_t data)
  1045 +{
  1046 + glue(st32r, MEMSUFFIX)(EA, data);
  1047 +}
  1048 +PPC_SPE_ST_OP(wwo_le, spe_stwwo_le);
  1049 +#endif
  1050 +static inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA)
  1051 +{
  1052 + uint16_t tmp;
  1053 + tmp = glue(lduw, MEMSUFFIX)(EA);
  1054 + return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
  1055 +}
  1056 +PPC_SPE_LD_OP(h, spe_lh);
  1057 +static inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA)
  1058 +{
  1059 + uint16_t tmp;
  1060 + tmp = glue(ld16r, MEMSUFFIX)(EA);
  1061 + return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
  1062 +}
  1063 +PPC_SPE_LD_OP(h_le, spe_lh_le);
  1064 +static inline uint64_t glue(spe_lwwsplat, MEMSUFFIX) (target_ulong EA)
  1065 +{
  1066 + uint32_t tmp;
  1067 + tmp = glue(ldl, MEMSUFFIX)(EA);
  1068 + return ((uint64_t)tmp << 32) | (uint64_t)tmp;
  1069 +}
  1070 +PPC_SPE_LD_OP(wwsplat, spe_lwwsplat);
  1071 +static inline uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA)
  1072 +{
  1073 + uint32_t tmp;
  1074 + tmp = glue(ld32r, MEMSUFFIX)(EA);
  1075 + return ((uint64_t)tmp << 32) | (uint64_t)tmp;
  1076 +}
  1077 +PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le);
  1078 +static inline uint64_t glue(spe_lwhsplat, MEMSUFFIX) (target_ulong EA)
  1079 +{
  1080 + uint64_t ret;
  1081 + uint16_t tmp;
  1082 + tmp = glue(lduw, MEMSUFFIX)(EA);
  1083 + ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
  1084 + tmp = glue(lduw, MEMSUFFIX)(EA + 2);
  1085 + ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
  1086 + return ret;
  1087 +}
  1088 +PPC_SPE_LD_OP(whsplat, spe_lwhsplat);
  1089 +static inline uint64_t glue(spe_lwhsplat_le, MEMSUFFIX) (target_ulong EA)
  1090 +{
  1091 + uint64_t ret;
  1092 + uint16_t tmp;
  1093 + tmp = glue(ld16r, MEMSUFFIX)(EA);
  1094 + ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
  1095 + tmp = glue(ld16r, MEMSUFFIX)(EA + 2);
  1096 + ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
  1097 + return ret;
  1098 +}
  1099 +PPC_SPE_LD_OP(whsplat_le, spe_lwhsplat_le);
  1100 +#endif /* defined(TARGET_PPCSPE) */
  1101 +
842 1102 #undef MEMSUFFIX
... ...
target-ppc/op_template.h
... ... @@ -57,6 +57,48 @@ void OPPROTO glue(op_store_T2_gpr_gpr, REG) (void)
57 57 }
58 58 #endif
59 59  
  60 +#if defined(TARGET_PPCSPE)
  61 +void OPPROTO glue(op_load_gpr64_T0_gpr, REG) (void)
  62 +{
  63 + T0_64 = regs->gpr[REG];
  64 + RETURN();
  65 +}
  66 +
  67 +void OPPROTO glue(op_load_gpr64_T1_gpr, REG) (void)
  68 +{
  69 + T1_64 = regs->gpr[REG];
  70 + RETURN();
  71 +}
  72 +
  73 +#if 0 // unused
  74 +void OPPROTO glue(op_load_gpr64_T2_gpr, REG) (void)
  75 +{
  76 + T2_64 = regs->gpr[REG];
  77 + RETURN();
  78 +}
  79 +#endif
  80 +
  81 +void OPPROTO glue(op_store_T0_gpr64_gpr, REG) (void)
  82 +{
  83 + regs->gpr[REG] = T0_64;
  84 + RETURN();
  85 +}
  86 +
  87 +void OPPROTO glue(op_store_T1_gpr64_gpr, REG) (void)
  88 +{
  89 + regs->gpr[REG] = T1_64;
  90 + RETURN();
  91 +}
  92 +
  93 +#if 0 // unused
  94 +void OPPROTO glue(op_store_T2_gpr64_gpr, REG) (void)
  95 +{
  96 + regs->gpr[REG] = T2_64;
  97 + RETURN();
  98 +}
  99 +#endif
  100 +#endif /* defined(TARGET_PPCSPE) */
  101 +
60 102 #if REG <= 7
61 103 /* Condition register moves */
62 104 void OPPROTO glue(op_load_crf_T0_crf, REG) (void)
... ...
target-ppc/translate.c
... ... @@ -160,6 +160,9 @@ typedef struct DisasContext {
160 160 int sf_mode;
161 161 #endif
162 162 int fpu_enabled;
  163 +#if defined(TARGET_PPCSPE)
  164 + int spe_enabled;
  165 +#endif
163 166 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
164 167 int singlestep_enabled;
165 168 } DisasContext;
... ... @@ -168,7 +171,7 @@ struct opc_handler_t {
168 171 /* invalid bits */
169 172 uint32_t inval;
170 173 /* instruction type */
171   - uint32_t type;
  174 + uint64_t type;
172 175 /* handler */
173 176 void (*handler)(DisasContext *ctx);
174 177 #if defined(DO_PPC_STATISTICS)
... ... @@ -4468,6 +4471,814 @@ GEN_HANDLER(icbt_440, 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE)
4468 4471 */
4469 4472 }
4470 4473  
  4474 +#if defined(TARGET_PPCSPE)
  4475 +/*** SPE extension ***/
  4476 +
  4477 +/* Register moves */
  4478 +GEN32(gen_op_load_gpr64_T0, gen_op_load_gpr64_T0_gpr);
  4479 +GEN32(gen_op_load_gpr64_T1, gen_op_load_gpr64_T1_gpr);
  4480 +#if 0 // unused
  4481 +GEN32(gen_op_load_gpr64_T2, gen_op_load_gpr64_T2_gpr);
  4482 +#endif
  4483 +
  4484 +GEN32(gen_op_store_T0_gpr64, gen_op_store_T0_gpr64_gpr);
  4485 +GEN32(gen_op_store_T1_gpr64, gen_op_store_T1_gpr64_gpr);
  4486 +#if 0 // unused
  4487 +GEN32(gen_op_store_T2_gpr64, gen_op_store_T2_gpr64_gpr);
  4488 +#endif
  4489 +
  4490 +#define GEN_SPE(name0, name1, opc2, opc3, inval, type) \
  4491 +GEN_HANDLER(name0##_##name1, 0x04, opc2, opc3, inval, type) \
  4492 +{ \
  4493 + if (Rc(ctx->opcode)) \
  4494 + gen_##name1(ctx); \
  4495 + else \
  4496 + gen_##name0(ctx); \
  4497 +}
  4498 +
  4499 +/* Handler for undefined SPE opcodes */
  4500 +static inline void gen_speundef (DisasContext *ctx)
  4501 +{
  4502 + RET_INVAL(ctx);
  4503 +}
  4504 +
  4505 +/* SPE load and stores */
  4506 +static inline void gen_addr_spe_imm_index (DisasContext *ctx, int sh)
  4507 +{
  4508 + target_long simm = rB(ctx->opcode);
  4509 +
  4510 + if (rA(ctx->opcode) == 0) {
  4511 + gen_set_T0(simm << sh);
  4512 + } else {
  4513 + gen_op_load_gpr_T0(rA(ctx->opcode));
  4514 + if (likely(simm != 0))
  4515 + gen_op_addi(simm << sh);
  4516 + }
  4517 +}
  4518 +
  4519 +#define op_spe_ldst(name) (*gen_op_##name[ctx->mem_idx])()
  4520 +#if defined(CONFIG_USER_ONLY)
  4521 +#if defined(TARGET_PPC64)
  4522 +#define OP_SPE_LD_TABLE(name) \
  4523 +static GenOpFunc *gen_op_spe_l##name[] = { \
  4524 + &gen_op_spe_l##name##_raw, \
  4525 + &gen_op_spe_l##name##_le_raw, \
  4526 + &gen_op_spe_l##name##_64_raw, \
  4527 + &gen_op_spe_l##name##_le_64_raw, \
  4528 +};
  4529 +#define OP_SPE_ST_TABLE(name) \
  4530 +static GenOpFunc *gen_op_spe_st##name[] = { \
  4531 + &gen_op_spe_st##name##_raw, \
  4532 + &gen_op_spe_st##name##_le_raw, \
  4533 + &gen_op_spe_st##name##_64_raw, \
  4534 + &gen_op_spe_st##name##_le_64_raw, \
  4535 +};
  4536 +#else /* defined(TARGET_PPC64) */
  4537 +#define OP_SPE_LD_TABLE(name) \
  4538 +static GenOpFunc *gen_op_spe_l##name[] = { \
  4539 + &gen_op_spe_l##name##_raw, \
  4540 + &gen_op_spe_l##name##_le_raw, \
  4541 +};
  4542 +#define OP_SPE_ST_TABLE(name) \
  4543 +static GenOpFunc *gen_op_spe_st##name[] = { \
  4544 + &gen_op_spe_st##name##_raw, \
  4545 + &gen_op_spe_st##name##_le_raw, \
  4546 +};
  4547 +#endif /* defined(TARGET_PPC64) */
  4548 +#else /* defined(CONFIG_USER_ONLY) */
  4549 +#if defined(TARGET_PPC64)
  4550 +#define OP_SPE_LD_TABLE(name) \
  4551 +static GenOpFunc *gen_op_spe_l##name[] = { \
  4552 + &gen_op_spe_l##name##_user, \
  4553 + &gen_op_spe_l##name##_le_user, \
  4554 + &gen_op_spe_l##name##_kernel, \
  4555 + &gen_op_spe_l##name##_le_kernel, \
  4556 + &gen_op_spe_l##name##_64_user, \
  4557 + &gen_op_spe_l##name##_le_64_user, \
  4558 + &gen_op_spe_l##name##_64_kernel, \
  4559 + &gen_op_spe_l##name##_le_64_kernel, \
  4560 +};
  4561 +#define OP_SPE_ST_TABLE(name) \
  4562 +static GenOpFunc *gen_op_spe_st##name[] = { \
  4563 + &gen_op_spe_st##name##_user, \
  4564 + &gen_op_spe_st##name##_le_user, \
  4565 + &gen_op_spe_st##name##_kernel, \
  4566 + &gen_op_spe_st##name##_le_kernel, \
  4567 + &gen_op_spe_st##name##_64_user, \
  4568 + &gen_op_spe_st##name##_le_64_user, \
  4569 + &gen_op_spe_st##name##_64_kernel, \
  4570 + &gen_op_spe_st##name##_le_64_kernel, \
  4571 +};
  4572 +#else /* defined(TARGET_PPC64) */
  4573 +#define OP_SPE_LD_TABLE(name) \
  4574 +static GenOpFunc *gen_op_spe_l##name[] = { \
  4575 + &gen_op_spe_l##name##_user, \
  4576 + &gen_op_spe_l##name##_le_user, \
  4577 + &gen_op_spe_l##name##_kernel, \
  4578 + &gen_op_spe_l##name##_le_kernel, \
  4579 +};
  4580 +#define OP_SPE_ST_TABLE(name) \
  4581 +static GenOpFunc *gen_op_spe_st##name[] = { \
  4582 + &gen_op_spe_st##name##_user, \
  4583 + &gen_op_spe_st##name##_le_user, \
  4584 + &gen_op_spe_st##name##_kernel, \
  4585 + &gen_op_spe_st##name##_le_kernel, \
  4586 +};
  4587 +#endif /* defined(TARGET_PPC64) */
  4588 +#endif /* defined(CONFIG_USER_ONLY) */
  4589 +
  4590 +#define GEN_SPE_LD(name, sh) \
  4591 +static inline void gen_evl##name (DisasContext *ctx) \
  4592 +{ \
  4593 + if (unlikely(!ctx->spe_enabled)) { \
  4594 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4595 + return; \
  4596 + } \
  4597 + gen_addr_spe_imm_index(ctx, sh); \
  4598 + op_spe_ldst(spe_l##name); \
  4599 + gen_op_store_T1_gpr64(rD(ctx->opcode)); \
  4600 +}
  4601 +
  4602 +#define GEN_SPE_LDX(name) \
  4603 +static inline void gen_evl##name##x (DisasContext *ctx) \
  4604 +{ \
  4605 + if (unlikely(!ctx->spe_enabled)) { \
  4606 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4607 + return; \
  4608 + } \
  4609 + gen_addr_reg_index(ctx); \
  4610 + op_spe_ldst(spe_l##name); \
  4611 + gen_op_store_T1_gpr64(rD(ctx->opcode)); \
  4612 +}
  4613 +
  4614 +#define GEN_SPEOP_LD(name, sh) \
  4615 +OP_SPE_LD_TABLE(name); \
  4616 +GEN_SPE_LD(name, sh); \
  4617 +GEN_SPE_LDX(name)
  4618 +
  4619 +#define GEN_SPE_ST(name, sh) \
  4620 +static inline void gen_evst##name (DisasContext *ctx) \
  4621 +{ \
  4622 + if (unlikely(!ctx->spe_enabled)) { \
  4623 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4624 + return; \
  4625 + } \
  4626 + gen_addr_spe_imm_index(ctx, sh); \
  4627 + gen_op_load_gpr64_T1(rS(ctx->opcode)); \
  4628 + op_spe_ldst(spe_st##name); \
  4629 +}
  4630 +
  4631 +#define GEN_SPE_STX(name) \
  4632 +static inline void gen_evst##name##x (DisasContext *ctx) \
  4633 +{ \
  4634 + if (unlikely(!ctx->spe_enabled)) { \
  4635 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4636 + return; \
  4637 + } \
  4638 + gen_addr_reg_index(ctx); \
  4639 + gen_op_load_gpr64_T1(rS(ctx->opcode)); \
  4640 + op_spe_ldst(spe_st##name); \
  4641 +}
  4642 +
  4643 +#define GEN_SPEOP_ST(name, sh) \
  4644 +OP_SPE_ST_TABLE(name); \
  4645 +GEN_SPE_ST(name, sh); \
  4646 +GEN_SPE_STX(name)
  4647 +
  4648 +#define GEN_SPEOP_LDST(name, sh) \
  4649 +GEN_SPEOP_LD(name, sh); \
  4650 +GEN_SPEOP_ST(name, sh)
  4651 +
  4652 +/* SPE arithmetic and logic */
  4653 +#define GEN_SPEOP_ARITH2(name) \
  4654 +static inline void gen_##name (DisasContext *ctx) \
  4655 +{ \
  4656 + if (unlikely(!ctx->spe_enabled)) { \
  4657 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4658 + return; \
  4659 + } \
  4660 + gen_op_load_gpr64_T0(rA(ctx->opcode)); \
  4661 + gen_op_load_gpr64_T1(rB(ctx->opcode)); \
  4662 + gen_op_##name(); \
  4663 + gen_op_store_T0_gpr64(rD(ctx->opcode)); \
  4664 +}
  4665 +
  4666 +#define GEN_SPEOP_ARITH1(name) \
  4667 +static inline void gen_##name (DisasContext *ctx) \
  4668 +{ \
  4669 + if (unlikely(!ctx->spe_enabled)) { \
  4670 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4671 + return; \
  4672 + } \
  4673 + gen_op_load_gpr64_T0(rA(ctx->opcode)); \
  4674 + gen_op_##name(); \
  4675 + gen_op_store_T0_gpr64(rD(ctx->opcode)); \
  4676 +}
  4677 +
  4678 +#define GEN_SPEOP_COMP(name) \
  4679 +static inline void gen_##name (DisasContext *ctx) \
  4680 +{ \
  4681 + if (unlikely(!ctx->spe_enabled)) { \
  4682 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4683 + return; \
  4684 + } \
  4685 + gen_op_load_gpr64_T0(rA(ctx->opcode)); \
  4686 + gen_op_load_gpr64_T1(rB(ctx->opcode)); \
  4687 + gen_op_##name(); \
  4688 + gen_op_store_T0_crf(crfD(ctx->opcode)); \
  4689 +}
  4690 +
  4691 +/* Logical */
  4692 +GEN_SPEOP_ARITH2(evand);
  4693 +GEN_SPEOP_ARITH2(evandc);
  4694 +GEN_SPEOP_ARITH2(evxor);
  4695 +GEN_SPEOP_ARITH2(evor);
  4696 +GEN_SPEOP_ARITH2(evnor);
  4697 +GEN_SPEOP_ARITH2(eveqv);
  4698 +GEN_SPEOP_ARITH2(evorc);
  4699 +GEN_SPEOP_ARITH2(evnand);
  4700 +GEN_SPEOP_ARITH2(evsrwu);
  4701 +GEN_SPEOP_ARITH2(evsrws);
  4702 +GEN_SPEOP_ARITH2(evslw);
  4703 +GEN_SPEOP_ARITH2(evrlw);
  4704 +GEN_SPEOP_ARITH2(evmergehi);
  4705 +GEN_SPEOP_ARITH2(evmergelo);
  4706 +GEN_SPEOP_ARITH2(evmergehilo);
  4707 +GEN_SPEOP_ARITH2(evmergelohi);
  4708 +
  4709 +/* Arithmetic */
  4710 +GEN_SPEOP_ARITH2(evaddw);
  4711 +GEN_SPEOP_ARITH2(evsubfw);
  4712 +GEN_SPEOP_ARITH1(evabs);
  4713 +GEN_SPEOP_ARITH1(evneg);
  4714 +GEN_SPEOP_ARITH1(evextsb);
  4715 +GEN_SPEOP_ARITH1(evextsh);
  4716 +GEN_SPEOP_ARITH1(evrndw);
  4717 +GEN_SPEOP_ARITH1(evcntlzw);
  4718 +GEN_SPEOP_ARITH1(evcntlsw);
  4719 +static inline void gen_brinc (DisasContext *ctx)
  4720 +{
  4721 + /* Note: brinc is usable even if SPE is disabled */
  4722 + gen_op_load_gpr64_T0(rA(ctx->opcode));
  4723 + gen_op_load_gpr64_T1(rB(ctx->opcode));
  4724 + gen_op_brinc();
  4725 + gen_op_store_T0_gpr64(rD(ctx->opcode));
  4726 +}
  4727 +
  4728 +#define GEN_SPEOP_ARITH_IMM2(name) \
  4729 +static inline void gen_##name##i (DisasContext *ctx) \
  4730 +{ \
  4731 + if (unlikely(!ctx->spe_enabled)) { \
  4732 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4733 + return; \
  4734 + } \
  4735 + gen_op_load_gpr64_T0(rB(ctx->opcode)); \
  4736 + gen_op_splatwi_T1_64(rA(ctx->opcode)); \
  4737 + gen_op_##name(); \
  4738 + gen_op_store_T0_gpr64(rD(ctx->opcode)); \
  4739 +}
  4740 +
  4741 +#define GEN_SPEOP_LOGIC_IMM2(name) \
  4742 +static inline void gen_##name##i (DisasContext *ctx) \
  4743 +{ \
  4744 + if (unlikely(!ctx->spe_enabled)) { \
  4745 + RET_EXCP(ctx, EXCP_NO_SPE, 0); \
  4746 + return; \
  4747 + } \
  4748 + gen_op_load_gpr64_T0(rA(ctx->opcode)); \
  4749 + gen_op_splatwi_T1_64(rB(ctx->opcode)); \
  4750 + gen_op_##name(); \
  4751 + gen_op_store_T0_gpr64(rD(ctx->opcode)); \
  4752 +}
  4753 +
  4754 +GEN_SPEOP_ARITH_IMM2(evaddw);
  4755 +#define gen_evaddiw gen_evaddwi
  4756 +GEN_SPEOP_ARITH_IMM2(evsubfw);
  4757 +#define gen_evsubifw gen_evsubfwi
  4758 +GEN_SPEOP_LOGIC_IMM2(evslw);
  4759 +GEN_SPEOP_LOGIC_IMM2(evsrwu);
  4760 +#define gen_evsrwis gen_evsrwsi
  4761 +GEN_SPEOP_LOGIC_IMM2(evsrws);
  4762 +#define gen_evsrwiu gen_evsrwui
  4763 +GEN_SPEOP_LOGIC_IMM2(evrlw);
  4764 +
  4765 +static inline void gen_evsplati (DisasContext *ctx)
  4766 +{
  4767 + int32_t imm = (int32_t)(rA(ctx->opcode) << 27) >> 27;
  4768 +
  4769 + gen_op_splatwi_T0_64(imm);
  4770 + gen_op_store_T0_gpr64(rD(ctx->opcode));
  4771 +}
  4772 +
  4773 +static inline void gen_evsplatfi (DisasContext *ctx)
  4774 +{
  4775 + uint32_t imm = rA(ctx->opcode) << 27;
  4776 +
  4777 + gen_op_splatwi_T0_64(imm);
  4778 + gen_op_store_T0_gpr64(rD(ctx->opcode));
  4779 +}
  4780 +
  4781 +/* Comparison */
  4782 +GEN_SPEOP_COMP(evcmpgtu);
  4783 +GEN_SPEOP_COMP(evcmpgts);
  4784 +GEN_SPEOP_COMP(evcmpltu);
  4785 +GEN_SPEOP_COMP(evcmplts);
  4786 +GEN_SPEOP_COMP(evcmpeq);
  4787 +
  4788 +GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, PPC_SPE); ////
  4789 +GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, PPC_SPE);
  4790 +GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, PPC_SPE); ////
  4791 +GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, PPC_SPE);
  4792 +GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, PPC_SPE); ////
  4793 +GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, PPC_SPE); ////
  4794 +GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, PPC_SPE); ////
  4795 +GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x00000000, PPC_SPE); //
  4796 +GEN_SPE(speundef, evand, 0x08, 0x08, 0x00000000, PPC_SPE); ////
  4797 +GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, PPC_SPE); ////
  4798 +GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, PPC_SPE); ////
  4799 +GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, PPC_SPE); ////
  4800 +GEN_SPE(speundef, evorc, 0x0D, 0x08, 0x00000000, PPC_SPE); ////
  4801 +GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, PPC_SPE); ////
  4802 +GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, PPC_SPE); ////
  4803 +GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, PPC_SPE);
  4804 +GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, PPC_SPE); ////
  4805 +GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, PPC_SPE);
  4806 +GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, PPC_SPE); //
  4807 +GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, PPC_SPE);
  4808 +GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, PPC_SPE); ////
  4809 +GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, PPC_SPE); ////
  4810 +GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, PPC_SPE); ////
  4811 +GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, PPC_SPE); ////
  4812 +GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); ////
  4813 +
  4814 +static inline void gen_evsel (DisasContext *ctx)
  4815 +{
  4816 + if (unlikely(!ctx->spe_enabled)) {
  4817 + RET_EXCP(ctx, EXCP_NO_SPE, 0);
  4818 + return;
  4819 + }
  4820 + gen_op_load_crf_T0(ctx->opcode & 0x7);
  4821 + gen_op_load_gpr64_T0(rA(ctx->opcode));
  4822 + gen_op_load_gpr64_T1(rB(ctx->opcode));
  4823 + gen_op_evsel();
  4824 + gen_op_store_T0_gpr64(rD(ctx->opcode));
  4825 +}
  4826 +
  4827 +GEN_HANDLER(evsel0, 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE)
  4828 +{
  4829 + gen_evsel(ctx);
  4830 +}
  4831 +GEN_HANDLER(evsel1, 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE)
  4832 +{
  4833 + gen_evsel(ctx);
  4834 +}
  4835 +GEN_HANDLER(evsel2, 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE)
  4836 +{
  4837 + gen_evsel(ctx);
  4838 +}
  4839 +GEN_HANDLER(evsel3, 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE)
  4840 +{
  4841 + gen_evsel(ctx);
  4842 +}
  4843 +
  4844 +/* Load and stores */
  4845 +#if defined(TARGET_PPC64)
  4846 +/* In that case, we already have 64 bits load & stores
  4847 + * so, spe_ldd is equivalent to ld and spe_std is equivalent to std
  4848 + */
  4849 +#if defined(CONFIG_USER_ONLY)
  4850 +#define gen_op_spe_ldd_raw gen_op_ld_raw
  4851 +#define gen_op_spe_ldd_64_raw gen_op_ld_64_raw
  4852 +#define gen_op_spe_ldd_le_raw gen_op_ld_le_raw
  4853 +#define gen_op_spe_ldd_le_64_raw gen_op_ld_le_64_raw
  4854 +#define gen_op_spe_stdd_raw gen_op_ld_raw
  4855 +#define gen_op_spe_stdd_64_raw gen_op_std_64_raw
  4856 +#define gen_op_spe_stdd_le_raw gen_op_std_le_raw
  4857 +#define gen_op_spe_stdd_le_64_raw gen_op_std_le_64_raw
  4858 +#else /* defined(CONFIG_USER_ONLY) */
  4859 +#define gen_op_spe_ldd_kernel gen_op_ld_kernel
  4860 +#define gen_op_spe_ldd_64_kernel gen_op_ld_64_kernel
  4861 +#define gen_op_spe_ldd_le_kernel gen_op_ld_kernel
  4862 +#define gen_op_spe_ldd_le_64_kernel gen_op_ld_64_kernel
  4863 +#define gen_op_spe_ldd_user gen_op_ld_user
  4864 +#define gen_op_spe_ldd_64_user gen_op_ld_64_user
  4865 +#define gen_op_spe_ldd_le_user gen_op_ld_le_user
  4866 +#define gen_op_spe_ldd_le_64_user gen_op_ld_le_64_user
  4867 +#define gen_op_spe_stdd_kernel gen_op_std_kernel
  4868 +#define gen_op_spe_stdd_64_kernel gen_op_std_64_kernel
  4869 +#define gen_op_spe_stdd_le_kernel gen_op_std_kernel
  4870 +#define gen_op_spe_stdd_le_64_kernel gen_op_std_64_kernel
  4871 +#define gen_op_spe_stdd_user gen_op_std_user
  4872 +#define gen_op_spe_stdd_64_user gen_op_std_64_user
  4873 +#define gen_op_spe_stdd_le_user gen_op_std_le_user
  4874 +#define gen_op_spe_stdd_le_64_user gen_op_std_le_64_user
  4875 +#endif /* defined(CONFIG_USER_ONLY) */
  4876 +#endif /* defined(TARGET_PPC64) */
  4877 +GEN_SPEOP_LDST(dd, 3);
  4878 +GEN_SPEOP_LDST(dw, 3);
  4879 +GEN_SPEOP_LDST(dh, 3);
  4880 +GEN_SPEOP_LDST(whe, 2);
  4881 +GEN_SPEOP_LD(whou, 2);
  4882 +GEN_SPEOP_LD(whos, 2);
  4883 +GEN_SPEOP_ST(who, 2);
  4884 +
  4885 +#if defined(TARGET_PPC64)
  4886 +/* In that case, spe_stwwo is equivalent to stw */
  4887 +#if defined(CONFIG_USER_ONLY)
  4888 +#define gen_op_spe_stwwo_raw gen_op_stw_raw
  4889 +#define gen_op_spe_stwwo_le_raw gen_op_stw_le_raw
  4890 +#define gen_op_spe_stwwo_64_raw gen_op_stw_64_raw
  4891 +#define gen_op_spe_stwwo_le_64_raw gen_op_stw_le_64_raw
  4892 +#else
  4893 +#define gen_op_spe_stwwo_user gen_op_stw_user
  4894 +#define gen_op_spe_stwwo_le_user gen_op_stw_le_user
  4895 +#define gen_op_spe_stwwo_64_user gen_op_stw_64_user
  4896 +#define gen_op_spe_stwwo_le_64_user gen_op_stw_le_64_user
  4897 +#define gen_op_spe_stwwo_kernel gen_op_stw_kernel
  4898 +#define gen_op_spe_stwwo_le_kernel gen_op_stw_le_kernel
  4899 +#define gen_op_spe_stwwo_64_kernel gen_op_stw_64_kernel
  4900 +#define gen_op_spe_stwwo_le_64_kernel gen_op_stw_le_64_kernel
  4901 +#endif
  4902 +#endif
  4903 +#define _GEN_OP_SPE_STWWE(suffix) \
  4904 +static inline void gen_op_spe_stwwe_##suffix (void) \
  4905 +{ \
  4906 + gen_op_srli32_T1_64(); \
  4907 + gen_op_spe_stwwo_##suffix(); \
  4908 +}
  4909 +#define _GEN_OP_SPE_STWWE_LE(suffix) \
  4910 +static inline void gen_op_spe_stwwe_le_##suffix (void) \
  4911 +{ \
  4912 + gen_op_srli32_T1_64(); \
  4913 + gen_op_spe_stwwo_le_##suffix(); \
  4914 +}
  4915 +#if defined(TARGET_PPC64)
  4916 +#define GEN_OP_SPE_STWWE(suffix) \
  4917 +_GEN_OP_SPE_STWWE(suffix); \
  4918 +_GEN_OP_SPE_STWWE_LE(suffix); \
  4919 +static inline void gen_op_spe_stwwe_64_##suffix (void) \
  4920 +{ \
  4921 + gen_op_srli32_T1_64(); \
  4922 + gen_op_spe_stwwo_64_##suffix(); \
  4923 +} \
  4924 +static inline void gen_op_spe_stwwe_le_64_##suffix (void) \
  4925 +{ \
  4926 + gen_op_srli32_T1_64(); \
  4927 + gen_op_spe_stwwo_le_64_##suffix(); \
  4928 +}
  4929 +#else
  4930 +#define GEN_OP_SPE_STWWE(suffix) \
  4931 +_GEN_OP_SPE_STWWE(suffix); \
  4932 +_GEN_OP_SPE_STWWE_LE(suffix)
  4933 +#endif
  4934 +#if defined(CONFIG_USER_ONLY)
  4935 +GEN_OP_SPE_STWWE(raw);
  4936 +#else /* defined(CONFIG_USER_ONLY) */
  4937 +GEN_OP_SPE_STWWE(kernel);
  4938 +GEN_OP_SPE_STWWE(user);
  4939 +#endif /* defined(CONFIG_USER_ONLY) */
  4940 +GEN_SPEOP_ST(wwe, 2);
  4941 +GEN_SPEOP_ST(wwo, 2);
  4942 +
  4943 +#define GEN_SPE_LDSPLAT(name, op, suffix) \
  4944 +static inline void gen_op_spe_l##name##_##suffix (void) \
  4945 +{ \
  4946 + gen_op_##op##_##suffix(); \
  4947 + gen_op_splatw_T1_64(); \
  4948 +}
  4949 +
  4950 +#define GEN_OP_SPE_LHE(suffix) \
  4951 +static inline void gen_op_spe_lhe_##suffix (void) \
  4952 +{ \
  4953 + gen_op_spe_lh_##suffix(); \
  4954 + gen_op_sli16_T1_64(); \
  4955 +}
  4956 +
  4957 +#define GEN_OP_SPE_LHX(suffix) \
  4958 +static inline void gen_op_spe_lhx_##suffix (void) \
  4959 +{ \
  4960 + gen_op_spe_lh_##suffix(); \
  4961 + gen_op_extsh_T1_64(); \
  4962 +}
  4963 +
  4964 +#if defined(CONFIG_USER_ONLY)
  4965 +GEN_OP_SPE_LHE(raw);
  4966 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, raw);
  4967 +GEN_OP_SPE_LHE(le_raw);
  4968 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_raw);
  4969 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, raw);
  4970 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_raw);
  4971 +GEN_OP_SPE_LHX(raw);
  4972 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, raw);
  4973 +GEN_OP_SPE_LHX(le_raw);
  4974 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_raw);
  4975 +#if defined(TARGET_PPC64)
  4976 +GEN_OP_SPE_LHE(64_raw);
  4977 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_raw);
  4978 +GEN_OP_SPE_LHE(le_64_raw);
  4979 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_raw);
  4980 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_raw);
  4981 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_raw);
  4982 +GEN_OP_SPE_LHX(64_raw);
  4983 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_raw);
  4984 +GEN_OP_SPE_LHX(le_64_raw);
  4985 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_raw);
  4986 +#endif
  4987 +#else
  4988 +GEN_OP_SPE_LHE(kernel);
  4989 +GEN_OP_SPE_LHE(user);
  4990 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, kernel);
  4991 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, user);
  4992 +GEN_OP_SPE_LHE(le_kernel);
  4993 +GEN_OP_SPE_LHE(le_user);
  4994 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_kernel);
  4995 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_user);
  4996 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, kernel);
  4997 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, user);
  4998 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_kernel);
  4999 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_user);
  5000 +GEN_OP_SPE_LHX(kernel);
  5001 +GEN_OP_SPE_LHX(user);
  5002 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, kernel);
  5003 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, user);
  5004 +GEN_OP_SPE_LHX(le_kernel);
  5005 +GEN_OP_SPE_LHX(le_user);
  5006 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_kernel);
  5007 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_user);
  5008 +#if defined(TARGET_PPC64)
  5009 +GEN_OP_SPE_LHE(64_kernel);
  5010 +GEN_OP_SPE_LHE(64_user);
  5011 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_kernel);
  5012 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_user);
  5013 +GEN_OP_SPE_LHE(le_64_kernel);
  5014 +GEN_OP_SPE_LHE(le_64_user);
  5015 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_kernel);
  5016 +GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_user);
  5017 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_kernel);
  5018 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_user);
  5019 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_kernel);
  5020 +GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_user);
  5021 +GEN_OP_SPE_LHX(64_kernel);
  5022 +GEN_OP_SPE_LHX(64_user);
  5023 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_kernel);
  5024 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_user);
  5025 +GEN_OP_SPE_LHX(le_64_kernel);
  5026 +GEN_OP_SPE_LHX(le_64_user);
  5027 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_kernel);
  5028 +GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_user);
  5029 +#endif
  5030 +#endif
  5031 +GEN_SPEOP_LD(hhesplat, 1);
  5032 +GEN_SPEOP_LD(hhousplat, 1);
  5033 +GEN_SPEOP_LD(hhossplat, 1);
  5034 +GEN_SPEOP_LD(wwsplat, 2);
  5035 +GEN_SPEOP_LD(whsplat, 2);
  5036 +
  5037 +GEN_SPE(evlddx, evldd, 0x00, 0x0C, 0x00000000, PPC_SPE); //
  5038 +GEN_SPE(evldwx, evldw, 0x01, 0x0C, 0x00000000, PPC_SPE); //
  5039 +GEN_SPE(evldhx, evldh, 0x02, 0x0C, 0x00000000, PPC_SPE); //
  5040 +GEN_SPE(evlhhesplatx, evlhhesplat, 0x04, 0x0C, 0x00000000, PPC_SPE); //
  5041 +GEN_SPE(evlhhousplatx, evlhhousplat, 0x06, 0x0C, 0x00000000, PPC_SPE); //
  5042 +GEN_SPE(evlhhossplatx, evlhhossplat, 0x07, 0x0C, 0x00000000, PPC_SPE); //
  5043 +GEN_SPE(evlwhex, evlwhe, 0x08, 0x0C, 0x00000000, PPC_SPE); //
  5044 +GEN_SPE(evlwhoux, evlwhou, 0x0A, 0x0C, 0x00000000, PPC_SPE); //
  5045 +GEN_SPE(evlwhosx, evlwhos, 0x0B, 0x0C, 0x00000000, PPC_SPE); //
  5046 +GEN_SPE(evlwwsplatx, evlwwsplat, 0x0C, 0x0C, 0x00000000, PPC_SPE); //
  5047 +GEN_SPE(evlwhsplatx, evlwhsplat, 0x0E, 0x0C, 0x00000000, PPC_SPE); //
  5048 +GEN_SPE(evstddx, evstdd, 0x10, 0x0C, 0x00000000, PPC_SPE); //
  5049 +GEN_SPE(evstdwx, evstdw, 0x11, 0x0C, 0x00000000, PPC_SPE); //
  5050 +GEN_SPE(evstdhx, evstdh, 0x12, 0x0C, 0x00000000, PPC_SPE); //
  5051 +GEN_SPE(evstwhex, evstwhe, 0x18, 0x0C, 0x00000000, PPC_SPE); //
  5052 +GEN_SPE(evstwhox, evstwho, 0x1A, 0x0C, 0x00000000, PPC_SPE); //
  5053 +GEN_SPE(evstwwex, evstwwe, 0x1C, 0x0C, 0x00000000, PPC_SPE); //
  5054 +GEN_SPE(evstwwox, evstwwo, 0x1E, 0x0C, 0x00000000, PPC_SPE); //
  5055 +
  5056 +/* Multiply and add - TODO */
  5057 +#if 0
  5058 +GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0x00000000, PPC_SPE);
  5059 +GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0x00000000, PPC_SPE);
  5060 +GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, PPC_SPE);
  5061 +GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0x00000000, PPC_SPE);
  5062 +GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, PPC_SPE);
  5063 +GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0x00000000, PPC_SPE);
  5064 +GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0x00000000, PPC_SPE);
  5065 +GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0x00000000, PPC_SPE);
  5066 +GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, PPC_SPE);
  5067 +GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0x00000000, PPC_SPE);
  5068 +GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, PPC_SPE);
  5069 +GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0x00000000, PPC_SPE);
  5070 +
  5071 +GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0x00000000, PPC_SPE);
  5072 +GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, PPC_SPE);
  5073 +GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, PPC_SPE);
  5074 +GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0x00000000, PPC_SPE);
  5075 +GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0x00000000, PPC_SPE);
  5076 +GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, PPC_SPE);
  5077 +GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0x00000000, PPC_SPE);
  5078 +GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0x00000000, PPC_SPE);
  5079 +GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, PPC_SPE);
  5080 +GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, PPC_SPE);
  5081 +GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0x00000000, PPC_SPE);
  5082 +GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0x00000000, PPC_SPE);
  5083 +GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, PPC_SPE);
  5084 +GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0x00000000, PPC_SPE);
  5085 +
  5086 +GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, PPC_SPE);
  5087 +GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, PPC_SPE);
  5088 +GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, PPC_SPE);
  5089 +GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, PPC_SPE);
  5090 +GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, PPC_SPE);
  5091 +GEN_SPE(evmra, speundef, 0x07, 0x13, 0x0000F800, PPC_SPE);
  5092 +
  5093 +GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, PPC_SPE);
  5094 +GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0x00000000, PPC_SPE);
  5095 +GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, PPC_SPE);
  5096 +GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0x00000000, PPC_SPE);
  5097 +GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, PPC_SPE);
  5098 +GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0x00000000, PPC_SPE);
  5099 +GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, PPC_SPE);
  5100 +GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0x00000000, PPC_SPE);
  5101 +GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, PPC_SPE);
  5102 +GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0x00000000, PPC_SPE);
  5103 +GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, PPC_SPE);
  5104 +GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0x00000000, PPC_SPE);
  5105 +
  5106 +GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, PPC_SPE);
  5107 +GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, PPC_SPE);
  5108 +GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0x00000000, PPC_SPE);
  5109 +GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, PPC_SPE);
  5110 +GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0x00000000, PPC_SPE);
  5111 +
  5112 +GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, PPC_SPE);
  5113 +GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0x00000000, PPC_SPE);
  5114 +GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, PPC_SPE);
  5115 +GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0x00000000, PPC_SPE);
  5116 +GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, PPC_SPE);
  5117 +GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0x00000000, PPC_SPE);
  5118 +GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, PPC_SPE);
  5119 +GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0x00000000, PPC_SPE);
  5120 +GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, PPC_SPE);
  5121 +GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0x00000000, PPC_SPE);
  5122 +GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, PPC_SPE);
  5123 +GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0x00000000, PPC_SPE);
  5124 +
  5125 +GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, PPC_SPE);
  5126 +GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, PPC_SPE);
  5127 +GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0x00000000, PPC_SPE);
  5128 +GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, PPC_SPE);
  5129 +GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0x00000000, PPC_SPE);
  5130 +#endif
  5131 +
  5132 +/*** SPE floating-point extension ***/
  5133 +#define GEN_SPEFPUOP_CONV(name) \
  5134 +static inline void gen_##name (DisasContext *ctx) \
  5135 +{ \
  5136 + gen_op_load_gpr64_T0(rB(ctx->opcode)); \
  5137 + gen_op_##name(); \
  5138 + gen_op_store_T0_gpr64(rD(ctx->opcode)); \
  5139 +}
  5140 +
  5141 +/* Single precision floating-point vectors operations */
  5142 +/* Arithmetic */
  5143 +GEN_SPEOP_ARITH2(evfsadd);
  5144 +GEN_SPEOP_ARITH2(evfssub);
  5145 +GEN_SPEOP_ARITH2(evfsmul);
  5146 +GEN_SPEOP_ARITH2(evfsdiv);
  5147 +GEN_SPEOP_ARITH1(evfsabs);
  5148 +GEN_SPEOP_ARITH1(evfsnabs);
  5149 +GEN_SPEOP_ARITH1(evfsneg);
  5150 +/* Conversion */
  5151 +GEN_SPEFPUOP_CONV(evfscfui);
  5152 +GEN_SPEFPUOP_CONV(evfscfsi);
  5153 +GEN_SPEFPUOP_CONV(evfscfuf);
  5154 +GEN_SPEFPUOP_CONV(evfscfsf);
  5155 +GEN_SPEFPUOP_CONV(evfsctui);
  5156 +GEN_SPEFPUOP_CONV(evfsctsi);
  5157 +GEN_SPEFPUOP_CONV(evfsctuf);
  5158 +GEN_SPEFPUOP_CONV(evfsctsf);
  5159 +GEN_SPEFPUOP_CONV(evfsctuiz);
  5160 +GEN_SPEFPUOP_CONV(evfsctsiz);
  5161 +/* Comparison */
  5162 +GEN_SPEOP_COMP(evfscmpgt);
  5163 +GEN_SPEOP_COMP(evfscmplt);
  5164 +GEN_SPEOP_COMP(evfscmpeq);
  5165 +GEN_SPEOP_COMP(evfststgt);
  5166 +GEN_SPEOP_COMP(evfststlt);
  5167 +GEN_SPEOP_COMP(evfststeq);
  5168 +
  5169 +/* Opcodes definitions */
  5170 +GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, PPC_SPEFPU); //
  5171 +GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, PPC_SPEFPU); //
  5172 +GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, PPC_SPEFPU); //
  5173 +GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, PPC_SPEFPU); //
  5174 +GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, PPC_SPEFPU); //
  5175 +GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, PPC_SPEFPU); //
  5176 +GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, PPC_SPEFPU); //
  5177 +GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, PPC_SPEFPU); //
  5178 +GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, PPC_SPEFPU); //
  5179 +GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, PPC_SPEFPU); //
  5180 +GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, PPC_SPEFPU); //
  5181 +GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, PPC_SPEFPU); //
  5182 +GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, PPC_SPEFPU); //
  5183 +GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, PPC_SPEFPU); //
  5184 +
  5185 +/* Single precision floating-point operations */
  5186 +/* Arithmetic */
  5187 +GEN_SPEOP_ARITH2(efsadd);
  5188 +GEN_SPEOP_ARITH2(efssub);
  5189 +GEN_SPEOP_ARITH2(efsmul);
  5190 +GEN_SPEOP_ARITH2(efsdiv);
  5191 +GEN_SPEOP_ARITH1(efsabs);
  5192 +GEN_SPEOP_ARITH1(efsnabs);
  5193 +GEN_SPEOP_ARITH1(efsneg);
  5194 +/* Conversion */
  5195 +GEN_SPEFPUOP_CONV(efscfui);
  5196 +GEN_SPEFPUOP_CONV(efscfsi);
  5197 +GEN_SPEFPUOP_CONV(efscfuf);
  5198 +GEN_SPEFPUOP_CONV(efscfsf);
  5199 +GEN_SPEFPUOP_CONV(efsctui);
  5200 +GEN_SPEFPUOP_CONV(efsctsi);
  5201 +GEN_SPEFPUOP_CONV(efsctuf);
  5202 +GEN_SPEFPUOP_CONV(efsctsf);
  5203 +GEN_SPEFPUOP_CONV(efsctuiz);
  5204 +GEN_SPEFPUOP_CONV(efsctsiz);
  5205 +GEN_SPEFPUOP_CONV(efscfd);
  5206 +/* Comparison */
  5207 +GEN_SPEOP_COMP(efscmpgt);
  5208 +GEN_SPEOP_COMP(efscmplt);
  5209 +GEN_SPEOP_COMP(efscmpeq);
  5210 +GEN_SPEOP_COMP(efststgt);
  5211 +GEN_SPEOP_COMP(efststlt);
  5212 +GEN_SPEOP_COMP(efststeq);
  5213 +
  5214 +/* Opcodes definitions */
  5215 +GEN_SPE(efsadd, efssub, 0x00, 0x0A, 0x00000000, PPC_SPEFPU); //
  5216 +GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, PPC_SPEFPU); //
  5217 +GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, PPC_SPEFPU); //
  5218 +GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, PPC_SPEFPU); //
  5219 +GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, PPC_SPEFPU); //
  5220 +GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, PPC_SPEFPU); //
  5221 +GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, PPC_SPEFPU); //
  5222 +GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, PPC_SPEFPU); //
  5223 +GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, PPC_SPEFPU); //
  5224 +GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, PPC_SPEFPU); //
  5225 +GEN_SPE(efsctuiz, efsctsiz, 0x0C, 0x0B, 0x00180000, PPC_SPEFPU); //
  5226 +GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, PPC_SPEFPU); //
  5227 +GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, PPC_SPEFPU); //
  5228 +
  5229 +/* Double precision floating-point operations */
  5230 +/* Arithmetic */
  5231 +GEN_SPEOP_ARITH2(efdadd);
  5232 +GEN_SPEOP_ARITH2(efdsub);
  5233 +GEN_SPEOP_ARITH2(efdmul);
  5234 +GEN_SPEOP_ARITH2(efddiv);
  5235 +GEN_SPEOP_ARITH1(efdabs);
  5236 +GEN_SPEOP_ARITH1(efdnabs);
  5237 +GEN_SPEOP_ARITH1(efdneg);
  5238 +/* Conversion */
  5239 +
  5240 +GEN_SPEFPUOP_CONV(efdcfui);
  5241 +GEN_SPEFPUOP_CONV(efdcfsi);
  5242 +GEN_SPEFPUOP_CONV(efdcfuf);
  5243 +GEN_SPEFPUOP_CONV(efdcfsf);
  5244 +GEN_SPEFPUOP_CONV(efdctui);
  5245 +GEN_SPEFPUOP_CONV(efdctsi);
  5246 +GEN_SPEFPUOP_CONV(efdctuf);
  5247 +GEN_SPEFPUOP_CONV(efdctsf);
  5248 +GEN_SPEFPUOP_CONV(efdctuiz);
  5249 +GEN_SPEFPUOP_CONV(efdctsiz);
  5250 +GEN_SPEFPUOP_CONV(efdcfs);
  5251 +GEN_SPEFPUOP_CONV(efdcfuid);
  5252 +GEN_SPEFPUOP_CONV(efdcfsid);
  5253 +GEN_SPEFPUOP_CONV(efdctuidz);
  5254 +GEN_SPEFPUOP_CONV(efdctsidz);
  5255 +/* Comparison */
  5256 +GEN_SPEOP_COMP(efdcmpgt);
  5257 +GEN_SPEOP_COMP(efdcmplt);
  5258 +GEN_SPEOP_COMP(efdcmpeq);
  5259 +GEN_SPEOP_COMP(efdtstgt);
  5260 +GEN_SPEOP_COMP(efdtstlt);
  5261 +GEN_SPEOP_COMP(efdtsteq);
  5262 +
  5263 +/* Opcodes definitions */
  5264 +GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, PPC_SPEFPU); //
  5265 +GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, PPC_SPEFPU); //
  5266 +GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, PPC_SPEFPU); //
  5267 +GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, PPC_SPEFPU); //
  5268 +GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, PPC_SPEFPU); //
  5269 +GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, PPC_SPEFPU); //
  5270 +GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, PPC_SPEFPU); //
  5271 +GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, PPC_SPEFPU); //
  5272 +GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, PPC_SPEFPU); //
  5273 +GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, PPC_SPEFPU); //
  5274 +GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, PPC_SPEFPU); //
  5275 +GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, PPC_SPEFPU); //
  5276 +GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, PPC_SPEFPU); //
  5277 +GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, PPC_SPEFPU); //
  5278 +GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, PPC_SPEFPU); //
  5279 +GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, PPC_SPEFPU); //
  5280 +#endif
  5281 +
4471 5282 /* End opcode list */
4472 5283 GEN_OPCODE_MARK(end);
4473 5284  
... ... @@ -4604,9 +5415,9 @@ void cpu_dump_statistics (CPUState *env, FILE*f,
4604 5415 }
4605 5416  
4606 5417 /*****************************************************************************/
4607   -static inline int
4608   -gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
4609   - int search_pc)
  5418 +static inline int gen_intermediate_code_internal (CPUState *env,
  5419 + TranslationBlock *tb,
  5420 + int search_pc)
4610 5421 {
4611 5422 DisasContext ctx, *ctxp = &ctx;
4612 5423 opc_handler_t **table, *handler;
... ... @@ -4639,6 +5450,9 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
4639 5450 ctx.sf_mode = msr_sf;
4640 5451 #endif
4641 5452 ctx.fpu_enabled = msr_fp;
  5453 +#if defined(TARGET_PPCSPE)
  5454 + ctx.spe_enabled = msr_spe;
  5455 +#endif
4642 5456 ctx.singlestep_enabled = env->singlestep_enabled;
4643 5457 #if defined (DO_SINGLE_STEP) && 0
4644 5458 /* Single step trace mode */
... ...
target-ppc/translate_init.c
... ... @@ -30,7 +30,7 @@ struct ppc_def_t {
30 30 const unsigned char *name;
31 31 uint32_t pvr;
32 32 uint32_t pvr_mask;
33   - uint32_t insns_flags;
  33 + uint64_t insns_flags;
34 34 uint32_t flags;
35 35 uint64_t msr_mask;
36 36 };
... ... @@ -2424,7 +2424,8 @@ static int create_ppc_opcodes (CPUPPCState *env, ppc_def_t *def)
2424 2424  
2425 2425 fill_new_table(env->opcodes, 0x40);
2426 2426 #if defined(PPC_DUMP_CPU)
2427   - printf("* PowerPC instructions for PVR %08x: %s flags %08x %08x\n",
  2427 + printf("* PowerPC instructions for PVR %08x: %s flags %016 " PRIx64
  2428 + " %08x\n",
2428 2429 def->pvr, def->name, def->insns_flags, def->flags);
2429 2430 #endif
2430 2431 if (&opc_start < &opc_end) {
... ...