Commit c280d62ef35cad1f3674ae83dec256c4a14031af

Authored by Filip Navara
1 parent a699b51a

Convert disas_iwmmxt_insn not to use cpu_T.

Signed-off-by: Filip Navara <filip.navara@gmail.com>
Showing 1 changed file with 219 additions and 252 deletions
target-arm/translate.c
@@ -182,19 +182,9 @@ static void store_reg(DisasContext *s, int reg, TCGv var) @@ -182,19 +182,9 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
182 182
183 183
184 /* Basic operations. */ 184 /* Basic operations. */
185 -#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])  
186 -#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)  
187 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im) 185 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
188 186
189 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im) 187 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
190 -#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])  
191 -  
192 -#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])  
193 -#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])  
194 -#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])  
195 -  
196 -#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)  
197 -#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)  
198 188
199 /* Value extensions. */ 189 /* Value extensions. */
200 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var) 190 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
@@ -441,11 +431,6 @@ static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) @@ -441,11 +431,6 @@ static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
441 tcg_gen_and_i32(dest, t0, tmp); 431 tcg_gen_and_i32(dest, t0, tmp);
442 dead_tmp(tmp); 432 dead_tmp(tmp);
443 } 433 }
444 -static inline void gen_op_bicl_T0_T1(void)  
445 -{  
446 - gen_op_notl_T1();  
447 - gen_op_andl_T0_T1();  
448 -}  
449 434
450 /* FIXME: Implement this natively. */ 435 /* FIXME: Implement this natively. */
451 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1) 436 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
@@ -839,11 +824,6 @@ static inline void gen_st32(TCGv val, TCGv addr, int index) @@ -839,11 +824,6 @@ static inline void gen_st32(TCGv val, TCGv addr, int index)
839 dead_tmp(val); 824 dead_tmp(val);
840 } 825 }
841 826
842 -static inline void gen_movl_T0_reg(DisasContext *s, int reg)  
843 -{  
844 - load_reg_var(s, cpu_T[0], reg);  
845 -}  
846 -  
847 static inline void gen_movl_T1_reg(DisasContext *s, int reg) 827 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
848 { 828 {
849 load_reg_var(s, cpu_T[1], reg); 829 load_reg_var(s, cpu_T[1], reg);
@@ -870,11 +850,6 @@ static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t) @@ -870,11 +850,6 @@ static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
870 } 850 }
871 } 851 }
872 852
873 -static inline void gen_movl_reg_T0(DisasContext *s, int reg)  
874 -{  
875 - gen_movl_reg_TN(s, reg, 0);  
876 -}  
877 -  
878 static inline void gen_movl_reg_T1(DisasContext *s, int reg) 853 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
879 { 854 {
880 gen_movl_reg_TN(s, reg, 1); 855 gen_movl_reg_TN(s, reg, 1);
@@ -1177,19 +1152,16 @@ static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) @@ -1177,19 +1152,16 @@ static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1177 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg])); 1152 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1178 } 1153 }
1179 1154
1180 -static inline void gen_op_iwmmxt_movl_wCx_T0(int reg) 1155 +static inline TCGv iwmmxt_load_creg(int reg)
1181 { 1156 {
1182 - tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg])); 1157 + TCGv var = new_tmp();
  1158 + tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
  1159 + return var;
1183 } 1160 }
1184 1161
1185 -static inline void gen_op_iwmmxt_movl_T0_wCx(int reg) 1162 +static inline void iwmmxt_store_creg(int reg, TCGv var)
1186 { 1163 {
1187 - tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));  
1188 -}  
1189 -  
1190 -static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)  
1191 -{  
1192 - tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg])); 1164 + tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1193 } 1165 }
1194 1166
1195 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) 1167 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
@@ -1302,40 +1274,6 @@ IWMMXT_OP_ENV(packsw) @@ -1302,40 +1274,6 @@ IWMMXT_OP_ENV(packsw)
1302 IWMMXT_OP_ENV(packsl) 1274 IWMMXT_OP_ENV(packsl)
1303 IWMMXT_OP_ENV(packsq) 1275 IWMMXT_OP_ENV(packsq)
1304 1276
1305 -static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)  
1306 -{  
1307 - iwmmxt_load_reg(cpu_V1, rn);  
1308 - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);  
1309 -}  
1310 -  
1311 -static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)  
1312 -{  
1313 - TCGv tmp = tcg_const_i32(shift);  
1314 - gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);  
1315 -}  
1316 -  
1317 -static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)  
1318 -{  
1319 - tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);  
1320 - tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);  
1321 - tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);  
1322 -}  
1323 -  
1324 -static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)  
1325 -{  
1326 - tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);  
1327 - tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);  
1328 - tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);  
1329 -}  
1330 -  
1331 -static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)  
1332 -{  
1333 - tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);  
1334 - tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);  
1335 - if (mask != ~0u)  
1336 - tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);  
1337 -}  
1338 -  
1339 static void gen_op_iwmmxt_set_mup(void) 1277 static void gen_op_iwmmxt_set_mup(void)
1340 { 1278 {
1341 TCGv tmp; 1279 TCGv tmp;
@@ -1366,65 +1304,59 @@ static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) @@ -1366,65 +1304,59 @@ static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1366 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); 1304 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1367 } 1305 }
1368 1306
1369 -static void gen_iwmmxt_movl_T0_T1_wRn(int rn)  
1370 -{  
1371 - iwmmxt_load_reg(cpu_V0, rn);  
1372 - tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);  
1373 - tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);  
1374 - tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);  
1375 -}  
1376 -  
1377 -static void gen_iwmmxt_movl_wRn_T0_T1(int rn)  
1378 -{  
1379 - tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);  
1380 - iwmmxt_store_reg(cpu_V0, rn);  
1381 -}  
1382 -  
1383 -static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn) 1307 +static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1384 { 1308 {
1385 int rd; 1309 int rd;
1386 uint32_t offset; 1310 uint32_t offset;
  1311 + TCGv tmp;
1387 1312
1388 rd = (insn >> 16) & 0xf; 1313 rd = (insn >> 16) & 0xf;
1389 - gen_movl_T1_reg(s, rd); 1314 + tmp = load_reg(s, rd);
1390 1315
1391 offset = (insn & 0xff) << ((insn >> 7) & 2); 1316 offset = (insn & 0xff) << ((insn >> 7) & 2);
1392 if (insn & (1 << 24)) { 1317 if (insn & (1 << 24)) {
1393 /* Pre indexed */ 1318 /* Pre indexed */
1394 if (insn & (1 << 23)) 1319 if (insn & (1 << 23))
1395 - gen_op_addl_T1_im(offset); 1320 + tcg_gen_addi_i32(tmp, tmp, offset);
1396 else 1321 else
1397 - gen_op_addl_T1_im(-offset);  
1398 - 1322 + tcg_gen_addi_i32(tmp, tmp, -offset);
  1323 + tcg_gen_mov_i32(dest, tmp);
1399 if (insn & (1 << 21)) 1324 if (insn & (1 << 21))
1400 - gen_movl_reg_T1(s, rd); 1325 + store_reg(s, rd, tmp);
  1326 + else
  1327 + dead_tmp(tmp);
1401 } else if (insn & (1 << 21)) { 1328 } else if (insn & (1 << 21)) {
1402 /* Post indexed */ 1329 /* Post indexed */
  1330 + tcg_gen_mov_i32(dest, tmp);
1403 if (insn & (1 << 23)) 1331 if (insn & (1 << 23))
1404 - gen_op_movl_T0_im(offset); 1332 + tcg_gen_addi_i32(tmp, tmp, offset);
1405 else 1333 else
1406 - gen_op_movl_T0_im(- offset);  
1407 - gen_op_addl_T0_T1();  
1408 - gen_movl_reg_T0(s, rd); 1334 + tcg_gen_addi_i32(tmp, tmp, -offset);
  1335 + store_reg(s, rd, tmp);
1409 } else if (!(insn & (1 << 23))) 1336 } else if (!(insn & (1 << 23)))
1410 return 1; 1337 return 1;
1411 return 0; 1338 return 0;
1412 } 1339 }
1413 1340
1414 -static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask) 1341 +static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1415 { 1342 {
1416 int rd = (insn >> 0) & 0xf; 1343 int rd = (insn >> 0) & 0xf;
  1344 + TCGv tmp;
1417 1345
1418 - if (insn & (1 << 8))  
1419 - if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) 1346 + if (insn & (1 << 8)) {
  1347 + if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1420 return 1; 1348 return 1;
1421 - else  
1422 - gen_op_iwmmxt_movl_T0_wCx(rd);  
1423 - else  
1424 - gen_iwmmxt_movl_T0_T1_wRn(rd);  
1425 -  
1426 - gen_op_movl_T1_im(mask);  
1427 - gen_op_andl_T0_T1(); 1349 + } else {
  1350 + tmp = iwmmxt_load_creg(rd);
  1351 + }
  1352 + } else {
  1353 + tmp = new_tmp();
  1354 + iwmmxt_load_reg(cpu_V0, rd);
  1355 + tcg_gen_trunc_i64_i32(tmp, cpu_V0);
  1356 + }
  1357 + tcg_gen_andi_i32(tmp, tmp, mask);
  1358 + tcg_gen_mov_i32(dest, tmp);
  1359 + dead_tmp(tmp);
1428 return 0; 1360 return 0;
1429 } 1361 }
1430 1362
@@ -1434,7 +1366,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1434,7 +1366,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1434 { 1366 {
1435 int rd, wrd; 1367 int rd, wrd;
1436 int rdhi, rdlo, rd0, rd1, i; 1368 int rdhi, rdlo, rd0, rd1, i;
1437 - TCGv tmp; 1369 + TCGv addr;
  1370 + TCGv tmp, tmp2, tmp3;
1438 1371
1439 if ((insn & 0x0e000e00) == 0x0c000000) { 1372 if ((insn & 0x0e000e00) == 0x0c000000) {
1440 if ((insn & 0x0fe00ff0) == 0x0c400000) { 1373 if ((insn & 0x0fe00ff0) == 0x0c400000) {
@@ -1442,41 +1375,43 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1442,41 +1375,43 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1442 rdlo = (insn >> 12) & 0xf; 1375 rdlo = (insn >> 12) & 0xf;
1443 rdhi = (insn >> 16) & 0xf; 1376 rdhi = (insn >> 16) & 0xf;
1444 if (insn & ARM_CP_RW_BIT) { /* TMRRC */ 1377 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1445 - gen_iwmmxt_movl_T0_T1_wRn(wrd);  
1446 - gen_movl_reg_T0(s, rdlo);  
1447 - gen_movl_reg_T1(s, rdhi); 1378 + iwmmxt_load_reg(cpu_V0, wrd);
  1379 + tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
  1380 + tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
  1381 + tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1448 } else { /* TMCRR */ 1382 } else { /* TMCRR */
1449 - gen_movl_T0_reg(s, rdlo);  
1450 - gen_movl_T1_reg(s, rdhi);  
1451 - gen_iwmmxt_movl_wRn_T0_T1(wrd); 1383 + tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
  1384 + iwmmxt_store_reg(cpu_V0, wrd);
1452 gen_op_iwmmxt_set_mup(); 1385 gen_op_iwmmxt_set_mup();
1453 } 1386 }
1454 return 0; 1387 return 0;
1455 } 1388 }
1456 1389
1457 wrd = (insn >> 12) & 0xf; 1390 wrd = (insn >> 12) & 0xf;
1458 - if (gen_iwmmxt_address(s, insn)) 1391 + addr = new_tmp();
  1392 + if (gen_iwmmxt_address(s, insn, addr)) {
  1393 + dead_tmp(addr);
1459 return 1; 1394 return 1;
  1395 + }
1460 if (insn & ARM_CP_RW_BIT) { 1396 if (insn & ARM_CP_RW_BIT) {
1461 if ((insn >> 28) == 0xf) { /* WLDRW wCx */ 1397 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1462 - tmp = gen_ld32(cpu_T[1], IS_USER(s));  
1463 - tcg_gen_mov_i32(cpu_T[0], tmp);  
1464 - dead_tmp(tmp);  
1465 - gen_op_iwmmxt_movl_wCx_T0(wrd); 1398 + tmp = new_tmp();
  1399 + tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
  1400 + iwmmxt_store_creg(wrd, tmp);
1466 } else { 1401 } else {
1467 i = 1; 1402 i = 1;
1468 if (insn & (1 << 8)) { 1403 if (insn & (1 << 8)) {
1469 if (insn & (1 << 22)) { /* WLDRD */ 1404 if (insn & (1 << 22)) { /* WLDRD */
1470 - tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s)); 1405 + tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1471 i = 0; 1406 i = 0;
1472 } else { /* WLDRW wRd */ 1407 } else { /* WLDRW wRd */
1473 - tmp = gen_ld32(cpu_T[1], IS_USER(s)); 1408 + tmp = gen_ld32(addr, IS_USER(s));
1474 } 1409 }
1475 } else { 1410 } else {
1476 if (insn & (1 << 22)) { /* WLDRH */ 1411 if (insn & (1 << 22)) { /* WLDRH */
1477 - tmp = gen_ld16u(cpu_T[1], IS_USER(s)); 1412 + tmp = gen_ld16u(addr, IS_USER(s));
1478 } else { /* WLDRB */ 1413 } else { /* WLDRB */
1479 - tmp = gen_ld8u(cpu_T[1], IS_USER(s)); 1414 + tmp = gen_ld8u(addr, IS_USER(s));
1480 } 1415 }
1481 } 1416 }
1482 if (i) { 1417 if (i) {
@@ -1487,28 +1422,26 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1487,28 +1422,26 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1487 } 1422 }
1488 } else { 1423 } else {
1489 if ((insn >> 28) == 0xf) { /* WSTRW wCx */ 1424 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1490 - gen_op_iwmmxt_movl_T0_wCx(wrd);  
1491 - tmp = new_tmp();  
1492 - tcg_gen_mov_i32(tmp, cpu_T[0]);  
1493 - gen_st32(tmp, cpu_T[1], IS_USER(s)); 1425 + tmp = iwmmxt_load_creg(wrd);
  1426 + gen_st32(tmp, addr, IS_USER(s));
1494 } else { 1427 } else {
1495 gen_op_iwmmxt_movq_M0_wRn(wrd); 1428 gen_op_iwmmxt_movq_M0_wRn(wrd);
1496 tmp = new_tmp(); 1429 tmp = new_tmp();
1497 if (insn & (1 << 8)) { 1430 if (insn & (1 << 8)) {
1498 if (insn & (1 << 22)) { /* WSTRD */ 1431 if (insn & (1 << 22)) { /* WSTRD */
1499 dead_tmp(tmp); 1432 dead_tmp(tmp);
1500 - tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s)); 1433 + tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1501 } else { /* WSTRW wRd */ 1434 } else { /* WSTRW wRd */
1502 tcg_gen_trunc_i64_i32(tmp, cpu_M0); 1435 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1503 - gen_st32(tmp, cpu_T[1], IS_USER(s)); 1436 + gen_st32(tmp, addr, IS_USER(s));
1504 } 1437 }
1505 } else { 1438 } else {
1506 if (insn & (1 << 22)) { /* WSTRH */ 1439 if (insn & (1 << 22)) { /* WSTRH */
1507 tcg_gen_trunc_i64_i32(tmp, cpu_M0); 1440 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1508 - gen_st16(tmp, cpu_T[1], IS_USER(s)); 1441 + gen_st16(tmp, addr, IS_USER(s));
1509 } else { /* WSTRB */ 1442 } else { /* WSTRB */
1510 tcg_gen_trunc_i64_i32(tmp, cpu_M0); 1443 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1511 - gen_st8(tmp, cpu_T[1], IS_USER(s)); 1444 + gen_st8(tmp, addr, IS_USER(s));
1512 } 1445 }
1513 } 1446 }
1514 } 1447 }
@@ -1544,18 +1477,19 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1544,18 +1477,19 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1544 gen_op_iwmmxt_set_cup(); 1477 gen_op_iwmmxt_set_cup();
1545 /* Fall through. */ 1478 /* Fall through. */
1546 case ARM_IWMMXT_wCSSF: 1479 case ARM_IWMMXT_wCSSF:
1547 - gen_op_iwmmxt_movl_T0_wCx(wrd);  
1548 - gen_movl_T1_reg(s, rd);  
1549 - gen_op_bicl_T0_T1();  
1550 - gen_op_iwmmxt_movl_wCx_T0(wrd); 1480 + tmp = iwmmxt_load_creg(wrd);
  1481 + tmp2 = load_reg(s, rd);
  1482 + tcg_gen_bic_i32(tmp, tmp, tmp2);
  1483 + dead_tmp(tmp2);
  1484 + iwmmxt_store_creg(wrd, tmp);
1551 break; 1485 break;
1552 case ARM_IWMMXT_wCGR0: 1486 case ARM_IWMMXT_wCGR0:
1553 case ARM_IWMMXT_wCGR1: 1487 case ARM_IWMMXT_wCGR1:
1554 case ARM_IWMMXT_wCGR2: 1488 case ARM_IWMMXT_wCGR2:
1555 case ARM_IWMMXT_wCGR3: 1489 case ARM_IWMMXT_wCGR3:
1556 gen_op_iwmmxt_set_cup(); 1490 gen_op_iwmmxt_set_cup();
1557 - gen_movl_reg_T0(s, rd);  
1558 - gen_op_iwmmxt_movl_wCx_T0(wrd); 1491 + tmp = load_reg(s, rd);
  1492 + iwmmxt_store_creg(wrd, tmp);
1559 break; 1493 break;
1560 default: 1494 default:
1561 return 1; 1495 return 1;
@@ -1577,8 +1511,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1577,8 +1511,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1577 return 1; 1511 return 1;
1578 rd = (insn >> 12) & 0xf; 1512 rd = (insn >> 12) & 0xf;
1579 wrd = (insn >> 16) & 0xf; 1513 wrd = (insn >> 16) & 0xf;
1580 - gen_op_iwmmxt_movl_T0_wCx(wrd);  
1581 - gen_movl_reg_T0(s, rd); 1514 + tmp = iwmmxt_load_creg(wrd);
  1515 + store_reg(s, rd, tmp);
1582 break; 1516 break;
1583 case 0x300: /* WANDN */ 1517 case 0x300: /* WANDN */
1584 wrd = (insn >> 12) & 0xf; 1518 wrd = (insn >> 12) & 0xf;
@@ -1755,132 +1689,145 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1755,132 +1689,145 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1755 rd0 = (insn >> 16) & 0xf; 1689 rd0 = (insn >> 16) & 0xf;
1756 rd1 = (insn >> 0) & 0xf; 1690 rd1 = (insn >> 0) & 0xf;
1757 gen_op_iwmmxt_movq_M0_wRn(rd0); 1691 gen_op_iwmmxt_movq_M0_wRn(rd0);
1758 - gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));  
1759 - gen_op_movl_T1_im(7);  
1760 - gen_op_andl_T0_T1();  
1761 - gen_op_iwmmxt_align_M0_T0_wRn(rd1); 1692 + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
  1693 + tcg_gen_andi_i32(tmp, tmp, 7);
  1694 + iwmmxt_load_reg(cpu_V1, rd1);
  1695 + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  1696 + dead_tmp(tmp);
1762 gen_op_iwmmxt_movq_wRn_M0(wrd); 1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1763 gen_op_iwmmxt_set_mup(); 1698 gen_op_iwmmxt_set_mup();
1764 break; 1699 break;
1765 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ 1700 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
  1701 + if (((insn >> 6) & 3) == 3)
  1702 + return 1;
1766 rd = (insn >> 12) & 0xf; 1703 rd = (insn >> 12) & 0xf;
1767 wrd = (insn >> 16) & 0xf; 1704 wrd = (insn >> 16) & 0xf;
1768 - gen_movl_T0_reg(s, rd); 1705 + tmp = load_reg(s, rd);
1769 gen_op_iwmmxt_movq_M0_wRn(wrd); 1706 gen_op_iwmmxt_movq_M0_wRn(wrd);
1770 switch ((insn >> 6) & 3) { 1707 switch ((insn >> 6) & 3) {
1771 case 0: 1708 case 0:
1772 - gen_op_movl_T1_im(0xff);  
1773 - gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3); 1709 + tmp2 = tcg_const_i32(0xff);
  1710 + tmp3 = tcg_const_i32((insn & 7) << 3);
1774 break; 1711 break;
1775 case 1: 1712 case 1:
1776 - gen_op_movl_T1_im(0xffff);  
1777 - gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4); 1713 + tmp2 = tcg_const_i32(0xffff);
  1714 + tmp3 = tcg_const_i32((insn & 3) << 4);
1778 break; 1715 break;
1779 case 2: 1716 case 2:
1780 - gen_op_movl_T1_im(0xffffffff);  
1781 - gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5); 1717 + tmp2 = tcg_const_i32(0xffffffff);
  1718 + tmp3 = tcg_const_i32((insn & 1) << 5);
1782 break; 1719 break;
1783 - case 3:  
1784 - return 1; 1720 + default:
  1721 + TCGV_UNUSED(tmp2);
  1722 + TCGV_UNUSED(tmp3);
1785 } 1723 }
  1724 + gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
  1725 + tcg_temp_free(tmp3);
  1726 + tcg_temp_free(tmp2);
  1727 + dead_tmp(tmp);
1786 gen_op_iwmmxt_movq_wRn_M0(wrd); 1728 gen_op_iwmmxt_movq_wRn_M0(wrd);
1787 gen_op_iwmmxt_set_mup(); 1729 gen_op_iwmmxt_set_mup();
1788 break; 1730 break;
1789 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ 1731 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1790 rd = (insn >> 12) & 0xf; 1732 rd = (insn >> 12) & 0xf;
1791 wrd = (insn >> 16) & 0xf; 1733 wrd = (insn >> 16) & 0xf;
1792 - if (rd == 15) 1734 + if (rd == 15 || ((insn >> 22) & 3) == 3)
1793 return 1; 1735 return 1;
1794 gen_op_iwmmxt_movq_M0_wRn(wrd); 1736 gen_op_iwmmxt_movq_M0_wRn(wrd);
  1737 + tmp = new_tmp();
1795 switch ((insn >> 22) & 3) { 1738 switch ((insn >> 22) & 3) {
1796 case 0: 1739 case 0:
1797 - if (insn & 8)  
1798 - gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);  
1799 - else {  
1800 - gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff); 1740 + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
  1741 + tcg_gen_trunc_i64_i32(tmp, cpu_M0);
  1742 + if (insn & 8) {
  1743 + tcg_gen_ext8s_i32(tmp, tmp);
  1744 + } else {
  1745 + tcg_gen_andi_i32(tmp, tmp, 0xff);
1801 } 1746 }
1802 break; 1747 break;
1803 case 1: 1748 case 1:
1804 - if (insn & 8)  
1805 - gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);  
1806 - else {  
1807 - gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff); 1749 + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
  1750 + tcg_gen_trunc_i64_i32(tmp, cpu_M0);
  1751 + if (insn & 8) {
  1752 + tcg_gen_ext16s_i32(tmp, tmp);
  1753 + } else {
  1754 + tcg_gen_andi_i32(tmp, tmp, 0xffff);
1808 } 1755 }
1809 break; 1756 break;
1810 case 2: 1757 case 2:
1811 - gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u); 1758 + tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
  1759 + tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1812 break; 1760 break;
1813 - case 3:  
1814 - return 1;  
1815 } 1761 }
1816 - gen_movl_reg_T0(s, rd); 1762 + store_reg(s, rd, tmp);
1817 break; 1763 break;
1818 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ 1764 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1819 - if ((insn & 0x000ff008) != 0x0003f000) 1765 + if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1820 return 1; 1766 return 1;
1821 - gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF); 1767 + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1822 switch ((insn >> 22) & 3) { 1768 switch ((insn >> 22) & 3) {
1823 case 0: 1769 case 0:
1824 - gen_op_shrl_T1_im(((insn & 7) << 2) + 0); 1770 + tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1825 break; 1771 break;
1826 case 1: 1772 case 1:
1827 - gen_op_shrl_T1_im(((insn & 3) << 3) + 4); 1773 + tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1828 break; 1774 break;
1829 case 2: 1775 case 2:
1830 - gen_op_shrl_T1_im(((insn & 1) << 4) + 12); 1776 + tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1831 break; 1777 break;
1832 - case 3:  
1833 - return 1;  
1834 } 1778 }
1835 - gen_op_shll_T1_im(28);  
1836 - gen_set_nzcv(cpu_T[1]); 1779 + tcg_gen_shli_i32(tmp, tmp, 28);
  1780 + gen_set_nzcv(tmp);
  1781 + dead_tmp(tmp);
1837 break; 1782 break;
1838 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ 1783 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
  1784 + if (((insn >> 6) & 3) == 3)
  1785 + return 1;
1839 rd = (insn >> 12) & 0xf; 1786 rd = (insn >> 12) & 0xf;
1840 wrd = (insn >> 16) & 0xf; 1787 wrd = (insn >> 16) & 0xf;
1841 - gen_movl_T0_reg(s, rd); 1788 + tmp = load_reg(s, rd);
1842 switch ((insn >> 6) & 3) { 1789 switch ((insn >> 6) & 3) {
1843 case 0: 1790 case 0:
1844 - gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]); 1791 + gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1845 break; 1792 break;
1846 case 1: 1793 case 1:
1847 - gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]); 1794 + gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1848 break; 1795 break;
1849 case 2: 1796 case 2:
1850 - gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]); 1797 + gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1851 break; 1798 break;
1852 - case 3:  
1853 - return 1;  
1854 } 1799 }
  1800 + dead_tmp(tmp);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd); 1801 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup(); 1802 gen_op_iwmmxt_set_mup();
1857 break; 1803 break;
1858 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ 1804 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1859 - if ((insn & 0x000ff00f) != 0x0003f000) 1805 + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1860 return 1; 1806 return 1;
1861 - gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);  
1862 - gen_op_movl_T0_T1(); 1807 + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1808 + tmp2 = new_tmp();
  1809 + tcg_gen_mov_i32(tmp2, tmp);
1863 switch ((insn >> 22) & 3) { 1810 switch ((insn >> 22) & 3) {
1864 case 0: 1811 case 0:
1865 for (i = 0; i < 7; i ++) { 1812 for (i = 0; i < 7; i ++) {
1866 - gen_op_shll_T1_im(4);  
1867 - gen_op_andl_T0_T1(); 1813 + tcg_gen_shli_i32(tmp2, tmp2, 4);
  1814 + tcg_gen_and_i32(tmp, tmp, tmp2);
1868 } 1815 }
1869 break; 1816 break;
1870 case 1: 1817 case 1:
1871 for (i = 0; i < 3; i ++) { 1818 for (i = 0; i < 3; i ++) {
1872 - gen_op_shll_T1_im(8);  
1873 - gen_op_andl_T0_T1(); 1819 + tcg_gen_shli_i32(tmp2, tmp2, 8);
  1820 + tcg_gen_and_i32(tmp, tmp, tmp2);
1874 } 1821 }
1875 break; 1822 break;
1876 case 2: 1823 case 2:
1877 - gen_op_shll_T1_im(16);  
1878 - gen_op_andl_T0_T1(); 1824 + tcg_gen_shli_i32(tmp2, tmp2, 16);
  1825 + tcg_gen_and_i32(tmp, tmp, tmp2);
1879 break; 1826 break;
1880 - case 3:  
1881 - return 1;  
1882 } 1827 }
1883 - gen_set_nzcv(cpu_T[0]); 1828 + gen_set_nzcv(tmp);
  1829 + dead_tmp(tmp2);
  1830 + dead_tmp(tmp);
1884 break; 1831 break;
1885 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ 1832 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1886 wrd = (insn >> 12) & 0xf; 1833 wrd = (insn >> 12) & 0xf;
@@ -1903,52 +1850,52 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -1903,52 +1850,52 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1903 gen_op_iwmmxt_set_mup(); 1850 gen_op_iwmmxt_set_mup();
1904 break; 1851 break;
1905 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ 1852 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1906 - if ((insn & 0x000ff00f) != 0x0003f000) 1853 + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1907 return 1; 1854 return 1;
1908 - gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);  
1909 - gen_op_movl_T0_T1(); 1855 + tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
  1856 + tmp2 = new_tmp();
  1857 + tcg_gen_mov_i32(tmp2, tmp);
1910 switch ((insn >> 22) & 3) { 1858 switch ((insn >> 22) & 3) {
1911 case 0: 1859 case 0:
1912 for (i = 0; i < 7; i ++) { 1860 for (i = 0; i < 7; i ++) {
1913 - gen_op_shll_T1_im(4);  
1914 - gen_op_orl_T0_T1(); 1861 + tcg_gen_shli_i32(tmp2, tmp2, 4);
  1862 + tcg_gen_or_i32(tmp, tmp, tmp2);
1915 } 1863 }
1916 break; 1864 break;
1917 case 1: 1865 case 1:
1918 for (i = 0; i < 3; i ++) { 1866 for (i = 0; i < 3; i ++) {
1919 - gen_op_shll_T1_im(8);  
1920 - gen_op_orl_T0_T1(); 1867 + tcg_gen_shli_i32(tmp2, tmp2, 8);
  1868 + tcg_gen_or_i32(tmp, tmp, tmp2);
1921 } 1869 }
1922 break; 1870 break;
1923 case 2: 1871 case 2:
1924 - gen_op_shll_T1_im(16);  
1925 - gen_op_orl_T0_T1(); 1872 + tcg_gen_shli_i32(tmp2, tmp2, 16);
  1873 + tcg_gen_or_i32(tmp, tmp, tmp2);
1926 break; 1874 break;
1927 - case 3:  
1928 - return 1;  
1929 } 1875 }
1930 - gen_set_nzcv(cpu_T[0]); 1876 + gen_set_nzcv(tmp);
  1877 + dead_tmp(tmp2);
  1878 + dead_tmp(tmp);
1931 break; 1879 break;
1932 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ 1880 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1933 rd = (insn >> 12) & 0xf; 1881 rd = (insn >> 12) & 0xf;
1934 rd0 = (insn >> 16) & 0xf; 1882 rd0 = (insn >> 16) & 0xf;
1935 - if ((insn & 0xf) != 0) 1883 + if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1936 return 1; 1884 return 1;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0); 1885 gen_op_iwmmxt_movq_M0_wRn(rd0);
  1886 + tmp = new_tmp();
1938 switch ((insn >> 22) & 3) { 1887 switch ((insn >> 22) & 3) {
1939 case 0: 1888 case 0:
1940 - gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0); 1889 + gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1941 break; 1890 break;
1942 case 1: 1891 case 1:
1943 - gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0); 1892 + gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1944 break; 1893 break;
1945 case 2: 1894 case 2:
1946 - gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0); 1895 + gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1947 break; 1896 break;
1948 - case 3:  
1949 - return 1;  
1950 } 1897 }
1951 - gen_movl_reg_T0(s, rd); 1898 + store_reg(s, rd, tmp);
1952 break; 1899 break;
1953 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ 1900 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1954 case 0x906: case 0xb06: case 0xd06: case 0xf06: 1901 case 0x906: case 0xb06: case 0xd06: case 0xf06:
@@ -2046,100 +1993,120 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -2046,100 +1993,120 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
2046 break; 1993 break;
2047 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ 1994 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2048 case 0x214: case 0x614: case 0xa14: case 0xe14: 1995 case 0x214: case 0x614: case 0xa14: case 0xe14:
  1996 + if (((insn >> 22) & 3) == 0)
  1997 + return 1;
2049 wrd = (insn >> 12) & 0xf; 1998 wrd = (insn >> 12) & 0xf;
2050 rd0 = (insn >> 16) & 0xf; 1999 rd0 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0); 2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 - if (gen_iwmmxt_shift(insn, 0xff)) 2001 + tmp = new_tmp();
  2002 + if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2003 + dead_tmp(tmp);
2053 return 1; 2004 return 1;
  2005 + }
2054 switch ((insn >> 22) & 3) { 2006 switch ((insn >> 22) & 3) {
2055 - case 0:  
2056 - return 1;  
2057 case 1: 2007 case 1:
2058 - gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2008 + gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2059 break; 2009 break;
2060 case 2: 2010 case 2:
2061 - gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2011 + gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2062 break; 2012 break;
2063 case 3: 2013 case 3:
2064 - gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2014 + gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2065 break; 2015 break;
2066 } 2016 }
  2017 + dead_tmp(tmp);
2067 gen_op_iwmmxt_movq_wRn_M0(wrd); 2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2068 gen_op_iwmmxt_set_mup(); 2019 gen_op_iwmmxt_set_mup();
2069 gen_op_iwmmxt_set_cup(); 2020 gen_op_iwmmxt_set_cup();
2070 break; 2021 break;
2071 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ 2022 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2072 case 0x014: case 0x414: case 0x814: case 0xc14: 2023 case 0x014: case 0x414: case 0x814: case 0xc14:
  2024 + if (((insn >> 22) & 3) == 0)
  2025 + return 1;
2073 wrd = (insn >> 12) & 0xf; 2026 wrd = (insn >> 12) & 0xf;
2074 rd0 = (insn >> 16) & 0xf; 2027 rd0 = (insn >> 16) & 0xf;
2075 gen_op_iwmmxt_movq_M0_wRn(rd0); 2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2076 - if (gen_iwmmxt_shift(insn, 0xff)) 2029 + tmp = new_tmp();
  2030 + if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2031 + dead_tmp(tmp);
2077 return 1; 2032 return 1;
  2033 + }
2078 switch ((insn >> 22) & 3) { 2034 switch ((insn >> 22) & 3) {
2079 - case 0:  
2080 - return 1;  
2081 case 1: 2035 case 1:
2082 - gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2036 + gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2083 break; 2037 break;
2084 case 2: 2038 case 2:
2085 - gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2039 + gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2086 break; 2040 break;
2087 case 3: 2041 case 3:
2088 - gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2042 + gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2089 break; 2043 break;
2090 } 2044 }
  2045 + dead_tmp(tmp);
2091 gen_op_iwmmxt_movq_wRn_M0(wrd); 2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2092 gen_op_iwmmxt_set_mup(); 2047 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup(); 2048 gen_op_iwmmxt_set_cup();
2094 break; 2049 break;
2095 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ 2050 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2096 case 0x114: case 0x514: case 0x914: case 0xd14: 2051 case 0x114: case 0x514: case 0x914: case 0xd14:
  2052 + if (((insn >> 22) & 3) == 0)
  2053 + return 1;
2097 wrd = (insn >> 12) & 0xf; 2054 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf; 2055 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0); 2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2100 - if (gen_iwmmxt_shift(insn, 0xff)) 2057 + tmp = new_tmp();
  2058 + if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
  2059 + dead_tmp(tmp);
2101 return 1; 2060 return 1;
  2061 + }
2102 switch ((insn >> 22) & 3) { 2062 switch ((insn >> 22) & 3) {
2103 - case 0:  
2104 - return 1;  
2105 case 1: 2063 case 1:
2106 - gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2064 + gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2107 break; 2065 break;
2108 case 2: 2066 case 2:
2109 - gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2067 + gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2110 break; 2068 break;
2111 case 3: 2069 case 3:
2112 - gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2070 + gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2113 break; 2071 break;
2114 } 2072 }
  2073 + dead_tmp(tmp);
2115 gen_op_iwmmxt_movq_wRn_M0(wrd); 2074 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup(); 2075 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup(); 2076 gen_op_iwmmxt_set_cup();
2118 break; 2077 break;
2119 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ 2078 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2120 case 0x314: case 0x714: case 0xb14: case 0xf14: 2079 case 0x314: case 0x714: case 0xb14: case 0xf14:
  2080 + if (((insn >> 22) & 3) == 0)
  2081 + return 1;
2121 wrd = (insn >> 12) & 0xf; 2082 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf; 2083 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0); 2084 gen_op_iwmmxt_movq_M0_wRn(rd0);
  2085 + tmp = new_tmp();
2124 switch ((insn >> 22) & 3) { 2086 switch ((insn >> 22) & 3) {
2125 - case 0:  
2126 - return 1;  
2127 case 1: 2087 case 1:
2128 - if (gen_iwmmxt_shift(insn, 0xf)) 2088 + if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
  2089 + dead_tmp(tmp);
2129 return 1; 2090 return 1;
2130 - gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2091 + }
  2092 + gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2131 break; 2093 break;
2132 case 2: 2094 case 2:
2133 - if (gen_iwmmxt_shift(insn, 0x1f)) 2095 + if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
  2096 + dead_tmp(tmp);
2134 return 1; 2097 return 1;
2135 - gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2098 + }
  2099 + gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2136 break; 2100 break;
2137 case 3: 2101 case 3:
2138 - if (gen_iwmmxt_shift(insn, 0x3f)) 2102 + if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
  2103 + dead_tmp(tmp);
2139 return 1; 2104 return 1;
2140 - gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2105 + }
  2106 + gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2141 break; 2107 break;
2142 } 2108 }
  2109 + dead_tmp(tmp);
2143 gen_op_iwmmxt_movq_wRn_M0(wrd); 2110 gen_op_iwmmxt_movq_wRn_M0(wrd);
2144 gen_op_iwmmxt_set_mup(); 2111 gen_op_iwmmxt_set_mup();
2145 gen_op_iwmmxt_set_cup(); 2112 gen_op_iwmmxt_set_cup();
@@ -2212,8 +2179,10 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -2212,8 +2179,10 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
2212 rd0 = (insn >> 16) & 0xf; 2179 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf; 2180 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0); 2181 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 - gen_op_movl_T0_im((insn >> 20) & 3);  
2216 - gen_op_iwmmxt_align_M0_T0_wRn(rd1); 2182 + tmp = tcg_const_i32((insn >> 20) & 3);
  2183 + iwmmxt_load_reg(cpu_V1, rd1);
  2184 + gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
  2185 + tcg_temp_free(tmp);
2217 gen_op_iwmmxt_movq_wRn_M0(wrd); 2186 gen_op_iwmmxt_movq_wRn_M0(wrd);
2218 gen_op_iwmmxt_set_mup(); 2187 gen_op_iwmmxt_set_mup();
2219 break; 2188 break;
@@ -2267,8 +2236,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -2267,8 +2236,9 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
2267 wrd = (insn >> 12) & 0xf; 2236 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf; 2237 rd0 = (insn >> 16) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0); 2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
2270 - gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));  
2271 - gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]); 2239 + tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
  2240 + gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
  2241 + tcg_temp_free(tmp);
2272 gen_op_iwmmxt_movq_wRn_M0(wrd); 2242 gen_op_iwmmxt_movq_wRn_M0(wrd);
2273 gen_op_iwmmxt_set_mup(); 2243 gen_op_iwmmxt_set_mup();
2274 gen_op_iwmmxt_set_cup(); 2244 gen_op_iwmmxt_set_cup();
@@ -2320,15 +2290,13 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -2320,15 +2290,13 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
2320 case 0x408: case 0x508: case 0x608: case 0x708: 2290 case 0x408: case 0x508: case 0x608: case 0x708:
2321 case 0x808: case 0x908: case 0xa08: case 0xb08: 2291 case 0x808: case 0x908: case 0xa08: case 0xb08:
2322 case 0xc08: case 0xd08: case 0xe08: case 0xf08: 2292 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
  2293 + if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
  2294 + return 1;
2323 wrd = (insn >> 12) & 0xf; 2295 wrd = (insn >> 12) & 0xf;
2324 rd0 = (insn >> 16) & 0xf; 2296 rd0 = (insn >> 16) & 0xf;
2325 rd1 = (insn >> 0) & 0xf; 2297 rd1 = (insn >> 0) & 0xf;
2326 gen_op_iwmmxt_movq_M0_wRn(rd0); 2298 gen_op_iwmmxt_movq_M0_wRn(rd0);
2327 - if (!(insn & (1 << 20)))  
2328 - return 1;  
2329 switch ((insn >> 22) & 3) { 2299 switch ((insn >> 22) & 3) {
2330 - case 0:  
2331 - return 1;  
2332 case 1: 2300 case 1:
2333 if (insn & (1 << 21)) 2301 if (insn & (1 << 21))
2334 gen_op_iwmmxt_packsw_M0_wRn(rd1); 2302 gen_op_iwmmxt_packsw_M0_wRn(rd1);
@@ -2362,30 +2330,29 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) @@ -2362,30 +2330,29 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
2362 if (rd0 == 0xf || rd1 == 0xf) 2330 if (rd0 == 0xf || rd1 == 0xf)
2363 return 1; 2331 return 1;
2364 gen_op_iwmmxt_movq_M0_wRn(wrd); 2332 gen_op_iwmmxt_movq_M0_wRn(wrd);
  2333 + tmp = load_reg(s, rd0);
  2334 + tmp2 = load_reg(s, rd1);
2365 switch ((insn >> 16) & 0xf) { 2335 switch ((insn >> 16) & 0xf) {
2366 case 0x0: /* TMIA */ 2336 case 0x0: /* TMIA */
2367 - gen_movl_T0_reg(s, rd0);  
2368 - gen_movl_T1_reg(s, rd1);  
2369 - gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]); 2337 + gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2370 break; 2338 break;
2371 case 0x8: /* TMIAPH */ 2339 case 0x8: /* TMIAPH */
2372 - gen_movl_T0_reg(s, rd0);  
2373 - gen_movl_T1_reg(s, rd1);  
2374 - gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]); 2340 + gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2375 break; 2341 break;
2376 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ 2342 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2377 - gen_movl_T1_reg(s, rd0);  
2378 if (insn & (1 << 16)) 2343 if (insn & (1 << 16))
2379 - gen_op_shrl_T1_im(16);  
2380 - gen_op_movl_T0_T1();  
2381 - gen_movl_T1_reg(s, rd1); 2344 + tcg_gen_shri_i32(tmp, tmp, 16);
2382 if (insn & (1 << 17)) 2345 if (insn & (1 << 17))
2383 - gen_op_shrl_T1_im(16);  
2384 - gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]); 2346 + tcg_gen_shri_i32(tmp2, tmp2, 16);
  2347 + gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2385 break; 2348 break;
2386 default: 2349 default:
  2350 + dead_tmp(tmp2);
  2351 + dead_tmp(tmp);
2387 return 1; 2352 return 1;
2388 } 2353 }
  2354 + dead_tmp(tmp2);
  2355 + dead_tmp(tmp);
2389 gen_op_iwmmxt_movq_wRn_M0(wrd); 2356 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup(); 2357 gen_op_iwmmxt_set_mup();
2391 break; 2358 break;