Commit 3bd7da9e1851f024919c07eba4c29acf209ecd6e

Authored by bellard
1 parent cec6843e

convert remaining segment handling to TCG

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4514 c046a42c-6fe2-441c-8c8c-71466251a162
target-i386/TODO
@@ -4,7 +4,6 @@ Correctness issues: @@ -4,7 +4,6 @@ Correctness issues:
4 - SVM: rework the implementation: simplify code, move most intercept 4 - SVM: rework the implementation: simplify code, move most intercept
5 tests as dynamic, correct segment access, verify exception safety, 5 tests as dynamic, correct segment access, verify exception safety,
6 cpu save/restore, SMM save/restore. 6 cpu save/restore, SMM save/restore.
7 -- arpl eflags computation is invalid  
8 - x86_64: fxsave/fxrestore intel/amd differences 7 - x86_64: fxsave/fxrestore intel/amd differences
9 - x86_64: lcall/ljmp intel/amd differences ? 8 - x86_64: lcall/ljmp intel/amd differences ?
10 - x86_64: cmpxchgl intel/amd differences ? 9 - x86_64: cmpxchgl intel/amd differences ?
@@ -32,6 +31,8 @@ Optimizations/Features: @@ -32,6 +31,8 @@ Optimizations/Features:
32 - add VMX support 31 - add VMX support
33 - add AVX support 32 - add AVX support
34 - add SSE5 support 33 - add SSE5 support
  34 +- faster EFLAGS update: consider SZAP, C, O can be updated separately
  35 + with a bit field in CC_OP and more state variables.
35 - evaluate x87 stack pointer statically 36 - evaluate x87 stack pointer statically
36 - find a way to avoid translating several time the same TB if CR0.TS 37 - find a way to avoid translating several time the same TB if CR0.TS
37 is set or not. 38 is set or not.
target-i386/op.c
@@ -147,45 +147,6 @@ @@ -147,45 +147,6 @@
147 147
148 #endif 148 #endif
149 149
150 -/* segment handling */  
151 -  
152 -/* faster VM86 version */  
153 -void OPPROTO op_movl_seg_T0_vm(void)  
154 -{  
155 - int selector;  
156 - SegmentCache *sc;  
157 -  
158 - selector = T0 & 0xffff;  
159 - /* env->segs[] access */  
160 - sc = (SegmentCache *)((char *)env + PARAM1);  
161 - sc->selector = selector;  
162 - sc->base = (selector << 4);  
163 -}  
164 -  
165 -void OPPROTO op_movl_T0_seg(void)  
166 -{  
167 - T0 = env->segs[PARAM1].selector;  
168 -}  
169 -  
170 -void OPPROTO op_arpl(void)  
171 -{  
172 - if ((T0 & 3) < (T1 & 3)) {  
173 - /* XXX: emulate bug or 0xff3f0000 oring as in bochs ? */  
174 - T0 = (T0 & ~3) | (T1 & 3);  
175 - T1 = CC_Z;  
176 - } else {  
177 - T1 = 0;  
178 - }  
179 - FORCE_RET();  
180 -}  
181 -  
182 -void OPPROTO op_arpl_update(void)  
183 -{  
184 - int eflags;  
185 - eflags = cc_table[CC_OP].compute_all();  
186 - CC_SRC = (eflags & ~CC_Z) | T1;  
187 -}  
188 -  
189 void OPPROTO op_movl_T0_env(void) 150 void OPPROTO op_movl_T0_env(void)
190 { 151 {
191 T0 = *(uint32_t *)((char *)env + PARAM1); 152 T0 = *(uint32_t *)((char *)env + PARAM1);
target-i386/translate.c
@@ -2167,6 +2167,22 @@ static void gen_setcc(DisasContext *s, int b) @@ -2167,6 +2167,22 @@ static void gen_setcc(DisasContext *s, int b)
2167 } 2167 }
2168 } 2168 }
2169 2169
  2170 +static inline void gen_op_movl_T0_seg(int seg_reg)
  2171 +{
  2172 + tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
  2173 + offsetof(CPUX86State,segs[seg_reg].selector));
  2174 +}
  2175 +
  2176 +static inline void gen_op_movl_seg_T0_vm(int seg_reg)
  2177 +{
  2178 + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
  2179 + tcg_gen_st32_tl(cpu_T[0], cpu_env,
  2180 + offsetof(CPUX86State,segs[seg_reg].selector));
  2181 + tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
  2182 + tcg_gen_st_tl(cpu_T[0], cpu_env,
  2183 + offsetof(CPUX86State,segs[seg_reg].base));
  2184 +}
  2185 +
2170 /* move T0 to seg_reg and compute if the CPU state may change. Never 2186 /* move T0 to seg_reg and compute if the CPU state may change. Never
2171 call this function with seg_reg == R_CS */ 2187 call this function with seg_reg == R_CS */
2172 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) 2188 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
@@ -2185,7 +2201,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) @@ -2185,7 +2201,7 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2185 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) 2201 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2186 s->is_jmp = 3; 2202 s->is_jmp = 3;
2187 } else { 2203 } else {
2188 - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); 2204 + gen_op_movl_seg_T0_vm(seg_reg);
2189 if (seg_reg == R_SS) 2205 if (seg_reg == R_SS)
2190 s->is_jmp = 3; 2206 s->is_jmp = 3;
2191 } 2207 }
@@ -4085,7 +4101,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -4085,7 +4101,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4085 cpu_T[1], 4101 cpu_T[1],
4086 tcg_const_i32(s->pc - pc_start)); 4102 tcg_const_i32(s->pc - pc_start));
4087 } else { 4103 } else {
4088 - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); 4104 + gen_op_movl_seg_T0_vm(R_CS);
4089 gen_op_movl_T0_T1(); 4105 gen_op_movl_T0_T1();
4090 gen_op_jmp_T0(); 4106 gen_op_jmp_T0();
4091 } 4107 }
@@ -5575,7 +5591,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -5575,7 +5591,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
5575 /* pop selector */ 5591 /* pop selector */
5576 gen_op_addl_A0_im(2 << s->dflag); 5592 gen_op_addl_A0_im(2 << s->dflag);
5577 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); 5593 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
5578 - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); 5594 + gen_op_movl_seg_T0_vm(R_CS);
5579 /* add stack offset */ 5595 /* add stack offset */
5580 gen_stack_update(s, val + (4 << s->dflag)); 5596 gen_stack_update(s, val + (4 << s->dflag));
5581 } 5597 }
@@ -6578,9 +6594,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -6578,9 +6594,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
6578 } else 6594 } else
6579 #endif 6595 #endif
6580 { 6596 {
  6597 + int label1;
6581 if (!s->pe || s->vm86) 6598 if (!s->pe || s->vm86)
6582 goto illegal_op; 6599 goto illegal_op;
6583 - ot = dflag ? OT_LONG : OT_WORD; 6600 + ot = OT_WORD;
6584 modrm = ldub_code(s->pc++); 6601 modrm = ldub_code(s->pc++);
6585 reg = (modrm >> 3) & 7; 6602 reg = (modrm >> 3) & 7;
6586 mod = (modrm >> 6) & 3; 6603 mod = (modrm >> 6) & 3;
@@ -6592,16 +6609,26 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) @@ -6592,16 +6609,26 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
6592 gen_op_mov_TN_reg(ot, 0, rm); 6609 gen_op_mov_TN_reg(ot, 0, rm);
6593 } 6610 }
6594 gen_op_mov_TN_reg(ot, 1, reg); 6611 gen_op_mov_TN_reg(ot, 1, reg);
6595 - if (s->cc_op != CC_OP_DYNAMIC)  
6596 - gen_op_set_cc_op(s->cc_op);  
6597 - gen_op_arpl();  
6598 - s->cc_op = CC_OP_EFLAGS; 6612 + tcg_gen_andi_tl(cpu_tmp0, cpu_T[0], 3);
  6613 + tcg_gen_andi_tl(cpu_T[1], cpu_T[1], 3);
  6614 + tcg_gen_movi_tl(cpu_T3, 0);
  6615 + label1 = gen_new_label();
  6616 + tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, cpu_T[1], label1);
  6617 + tcg_gen_andi_tl(cpu_T[0], cpu_T[0], ~3);
  6618 + tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
  6619 + tcg_gen_movi_tl(cpu_T3, CC_Z);
  6620 + gen_set_label(label1);
6599 if (mod != 3) { 6621 if (mod != 3) {
6600 gen_op_st_T0_A0(ot + s->mem_index); 6622 gen_op_st_T0_A0(ot + s->mem_index);
6601 } else { 6623 } else {
6602 gen_op_mov_reg_T0(ot, rm); 6624 gen_op_mov_reg_T0(ot, rm);
6603 } 6625 }
6604 - gen_op_arpl_update(); 6626 + if (s->cc_op != CC_OP_DYNAMIC)
  6627 + gen_op_set_cc_op(s->cc_op);
  6628 + gen_compute_eflags(cpu_cc_src);
  6629 + tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
  6630 + tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T3);
  6631 + s->cc_op = CC_OP_EFLAGS;
6605 } 6632 }
6606 break; 6633 break;
6607 case 0x102: /* lar */ 6634 case 0x102: /* lar */