Commit 2c1794c42ef9d23dc6aeb5e07673f2fcd885b9eb

Authored by bellard
1 parent 8a4c1cc4

more generic ljmp and lcall - fixed REPNZ usage for non compare string ops (FreeDos boot loader fix)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@340 c046a42c-6fe2-441c-8c8c-71466251a162
exec-i386.h
... ... @@ -123,8 +123,12 @@ typedef struct CCTable {
123 123 extern CCTable cc_table[];
124 124  
125 125 void load_seg(int seg_reg, int selector, unsigned cur_eip);
126   -void jmp_seg(int selector, unsigned int new_eip);
  126 +void helper_ljmp_protected_T0_T1(void);
  127 +void helper_lcall_real_T0_T1(int shift, int next_eip);
  128 +void helper_lcall_protected_T0_T1(int shift, int next_eip);
  129 +void helper_iret_real(int shift);
127 130 void helper_iret_protected(int shift);
  131 +void helper_lret_protected(int shift, int addend);
128 132 void helper_lldt_T0(void);
129 133 void helper_ltr_T0(void);
130 134 void helper_movl_crN_T0(int reg);
... ...
helper-i386.c
... ... @@ -185,7 +185,7 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
185 185  
186 186 /* protected mode interrupt */
187 187 static void do_interrupt_protected(int intno, int is_int, int error_code,
188   - unsigned int next_eip)
  188 + unsigned int next_eip)
189 189 {
190 190 SegmentCache *dt;
191 191 uint8_t *ptr, *ssp;
... ... @@ -378,20 +378,19 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
378 378 ptr = dt->base + intno * 4;
379 379 offset = lduw(ptr);
380 380 selector = lduw(ptr + 2);
381   - esp = env->regs[R_ESP] & 0xffff;
382   - ssp = env->segs[R_SS].base + esp;
  381 + esp = env->regs[R_ESP];
  382 + ssp = env->segs[R_SS].base;
383 383 if (is_int)
384 384 old_eip = next_eip;
385 385 else
386 386 old_eip = env->eip;
387 387 old_cs = env->segs[R_CS].selector;
388   - ssp -= 2;
389   - stw(ssp, compute_eflags());
390   - ssp -= 2;
391   - stw(ssp, old_cs);
392   - ssp -= 2;
393   - stw(ssp, old_eip);
394   - esp -= 6;
  388 + esp -= 2;
  389 + stw(ssp + (esp & 0xffff), compute_eflags());
  390 + esp -= 2;
  391 + stw(ssp + (esp & 0xffff), old_cs);
  392 + esp -= 2;
  393 + stw(ssp + (esp & 0xffff), old_eip);
395 394  
396 395 /* update processor state */
397 396 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
... ... @@ -733,47 +732,275 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
733 732 }
734 733  
735 734 /* protected mode jump */
736   -void jmp_seg(int selector, unsigned int new_eip)
  735 +void helper_ljmp_protected_T0_T1(void)
737 736 {
  737 + int new_cs, new_eip;
738 738 SegmentCache sc1;
739 739 uint32_t e1, e2, cpl, dpl, rpl;
740 740  
741   - if ((selector & 0xfffc) == 0) {
  741 + new_cs = T0;
  742 + new_eip = T1;
  743 + if ((new_cs & 0xfffc) == 0)
742 744 raise_exception_err(EXCP0D_GPF, 0);
  745 + if (load_segment(&e1, &e2, new_cs) != 0)
  746 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  747 + cpl = env->segs[R_CS].selector & 3;
  748 + if (e2 & DESC_S_MASK) {
  749 + if (!(e2 & DESC_CS_MASK))
  750 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  751 + dpl = (e2 >> DESC_DPL_SHIFT) & 3;
  752 + if (e2 & DESC_CS_MASK) {
  753 + /* conforming code segment */
  754 + if (dpl > cpl)
  755 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  756 + } else {
  757 + /* non conforming code segment */
  758 + rpl = new_cs & 3;
  759 + if (rpl > cpl)
  760 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  761 + if (dpl != cpl)
  762 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  763 + }
  764 + if (!(e2 & DESC_P_MASK))
  765 + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
  766 + load_seg_cache(&sc1, e1, e2);
  767 + if (new_eip > sc1.limit)
  768 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  769 + env->segs[R_CS].base = sc1.base;
  770 + env->segs[R_CS].limit = sc1.limit;
  771 + env->segs[R_CS].flags = sc1.flags;
  772 + env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
  773 + EIP = new_eip;
  774 + } else {
  775 + cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
  776 + new_cs, new_eip);
743 777 }
  778 +}
744 779  
745   - if (load_segment(&e1, &e2, selector) != 0)
746   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  780 +/* real mode call */
  781 +void helper_lcall_real_T0_T1(int shift, int next_eip)
  782 +{
  783 + int new_cs, new_eip;
  784 + uint32_t esp, esp_mask;
  785 + uint8_t *ssp;
  786 +
  787 + new_cs = T0;
  788 + new_eip = T1;
  789 + esp = env->regs[R_ESP];
  790 + esp_mask = 0xffffffff;
  791 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  792 + esp_mask = 0xffff;
  793 + ssp = env->segs[R_SS].base;
  794 + if (shift) {
  795 + esp -= 4;
  796 + stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
  797 + esp -= 4;
  798 + stl(ssp + (esp & esp_mask), next_eip);
  799 + } else {
  800 + esp -= 2;
  801 + stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
  802 + esp -= 2;
  803 + stw(ssp + (esp & esp_mask), next_eip);
  804 + }
  805 +
  806 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  807 + env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
  808 + else
  809 + env->regs[R_ESP] = esp;
  810 + env->eip = new_eip;
  811 + env->segs[R_CS].selector = new_cs;
  812 + env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
  813 +}
  814 +
  815 +/* protected mode call */
  816 +void helper_lcall_protected_T0_T1(int shift, int next_eip)
  817 +{
  818 + int new_cs, new_eip;
  819 + SegmentCache sc1;
  820 + uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
  821 + uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
  822 + uint32_t old_ss, old_esp, val, i;
  823 + uint8_t *ssp, *old_ssp;
  824 +
  825 + new_cs = T0;
  826 + new_eip = T1;
  827 + if ((new_cs & 0xfffc) == 0)
  828 + raise_exception_err(EXCP0D_GPF, 0);
  829 + if (load_segment(&e1, &e2, new_cs) != 0)
  830 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
747 831 cpl = env->segs[R_CS].selector & 3;
748 832 if (e2 & DESC_S_MASK) {
749 833 if (!(e2 & DESC_CS_MASK))
750   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  834 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
751 835 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
752 836 if (e2 & DESC_CS_MASK) {
753 837 /* conforming code segment */
754 838 if (dpl > cpl)
755   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  839 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
756 840 } else {
757 841 /* non conforming code segment */
758   - rpl = selector & 3;
  842 + rpl = new_cs & 3;
759 843 if (rpl > cpl)
760   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  844 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
761 845 if (dpl != cpl)
762   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  846 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
763 847 }
764 848 if (!(e2 & DESC_P_MASK))
765   - raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
  849 + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
  850 +
  851 + sp = env->regs[R_ESP];
  852 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  853 + sp &= 0xffff;
  854 + ssp = env->segs[R_SS].base + sp;
  855 + if (shift) {
  856 + ssp -= 4;
  857 + stl(ssp, env->segs[R_CS].selector);
  858 + ssp -= 4;
  859 + stl(ssp, next_eip);
  860 + } else {
  861 + ssp -= 2;
  862 + stw(ssp, env->segs[R_CS].selector);
  863 + ssp -= 2;
  864 + stw(ssp, next_eip);
  865 + }
  866 + sp -= (4 << shift);
  867 +
766 868 load_seg_cache(&sc1, e1, e2);
767 869 if (new_eip > sc1.limit)
768   - raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  870 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  871 + /* from this point, not restartable */
  872 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  873 + env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
  874 + else
  875 + env->regs[R_ESP] = sp;
769 876 env->segs[R_CS].base = sc1.base;
770 877 env->segs[R_CS].limit = sc1.limit;
771 878 env->segs[R_CS].flags = sc1.flags;
772   - env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
  879 + env->segs[R_CS].selector = (new_cs & 0xfffc) | cpl;
773 880 EIP = new_eip;
774 881 } else {
775   - cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
776   - selector, new_eip);
  882 + /* check gate type */
  883 + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
  884 + switch(type) {
  885 + case 1: /* available 286 TSS */
  886 + case 9: /* available 386 TSS */
  887 + case 5: /* task gate */
  888 + cpu_abort(env, "task gate not supported");
  889 + break;
  890 + case 4: /* 286 call gate */
  891 + case 12: /* 386 call gate */
  892 + break;
  893 + default:
  894 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  895 + break;
  896 + }
  897 + shift = type >> 3;
  898 +
  899 + dpl = (e2 >> DESC_DPL_SHIFT) & 3;
  900 + rpl = new_cs & 3;
  901 + if (dpl < cpl || dpl < rpl)
  902 + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
  903 + /* check valid bit */
  904 + if (!(e2 & DESC_P_MASK))
  905 + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
  906 + selector = e1 >> 16;
  907 + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
  908 + if ((selector & 0xfffc) == 0)
  909 + raise_exception_err(EXCP0D_GPF, 0);
  910 +
  911 + if (load_segment(&e1, &e2, selector) != 0)
  912 + raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  913 + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
  914 + raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  915 + dpl = (e2 >> DESC_DPL_SHIFT) & 3;
  916 + if (dpl > cpl)
  917 + raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
  918 + if (!(e2 & DESC_P_MASK))
  919 + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
  920 +
  921 + if (!(e2 & DESC_C_MASK) && dpl < cpl) {
  922 + /* to inner priviledge */
  923 + get_ss_esp_from_tss(&ss, &sp, dpl);
  924 + if ((ss & 0xfffc) == 0)
  925 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  926 + if ((ss & 3) != dpl)
  927 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  928 + if (load_segment(&ss_e1, &ss_e2, ss) != 0)
  929 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  930 + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
  931 + if (ss_dpl != dpl)
  932 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  933 + if (!(ss_e2 & DESC_S_MASK) ||
  934 + (ss_e2 & DESC_CS_MASK) ||
  935 + !(ss_e2 & DESC_W_MASK))
  936 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  937 + if (!(ss_e2 & DESC_P_MASK))
  938 + raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
  939 +
  940 + param_count = e2 & 0x1f;
  941 + push_size = ((param_count * 2) + 8) << shift;
  942 +
  943 + old_esp = env->regs[R_ESP];
  944 + old_ss = env->segs[R_SS].selector;
  945 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  946 + old_esp &= 0xffff;
  947 + old_ssp = env->segs[R_SS].base + old_esp;
  948 +
  949 + /* XXX: from this point not restartable */
  950 + load_seg(R_SS, ss, env->eip);
  951 +
  952 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  953 + sp &= 0xffff;
  954 + ssp = env->segs[R_SS].base + sp;
  955 + if (shift) {
  956 + ssp -= 4;
  957 + stl(ssp, old_ss);
  958 + ssp -= 4;
  959 + stl(ssp, old_esp);
  960 + ssp -= 4 * param_count;
  961 + for(i = 0; i < param_count; i++) {
  962 + val = ldl(old_ssp + i * 4);
  963 + stl(ssp + i * 4, val);
  964 + }
  965 + } else {
  966 + ssp -= 2;
  967 + stw(ssp, old_ss);
  968 + ssp -= 2;
  969 + stw(ssp, old_esp);
  970 + ssp -= 2 * param_count;
  971 + for(i = 0; i < param_count; i++) {
  972 + val = lduw(old_ssp + i * 2);
  973 + stw(ssp + i * 2, val);
  974 + }
  975 + }
  976 + } else {
  977 + /* to same priviledge */
  978 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  979 + sp &= 0xffff;
  980 + ssp = env->segs[R_SS].base + sp;
  981 + push_size = (4 << shift);
  982 + }
  983 +
  984 + if (shift) {
  985 + ssp -= 4;
  986 + stl(ssp, env->segs[R_CS].selector);
  987 + ssp -= 4;
  988 + stl(ssp, next_eip);
  989 + } else {
  990 + ssp -= 2;
  991 + stw(ssp, env->segs[R_CS].selector);
  992 + ssp -= 2;
  993 + stw(ssp, next_eip);
  994 + }
  995 +
  996 + sp -= push_size;
  997 + load_seg(R_CS, selector, env->eip);
  998 + /* from this point, not restartable if same priviledge */
  999 + if (!(env->segs[R_SS].flags & DESC_B_MASK))
  1000 + env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (sp & 0xffff);
  1001 + else
  1002 + env->regs[R_ESP] = sp;
  1003 + EIP = offset;
777 1004 }
778 1005 }
779 1006  
... ... @@ -820,7 +1047,7 @@ void helper_iret_real(int shift)
820 1047 }
821 1048  
822 1049 /* protected mode iret */
823   -void helper_iret_protected(int shift)
  1050 +static inline void helper_ret_protected(int shift, int is_iret, int addend)
824 1051 {
825 1052 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
826 1053 uint32_t new_es, new_ds, new_fs, new_gs;
... ... @@ -834,14 +1061,16 @@ void helper_iret_protected(int shift)
834 1061 ssp = env->segs[R_SS].base + sp;
835 1062 if (shift == 1) {
836 1063 /* 32 bits */
837   - new_eflags = ldl(ssp + 8);
  1064 + if (is_iret)
  1065 + new_eflags = ldl(ssp + 8);
838 1066 new_cs = ldl(ssp + 4) & 0xffff;
839 1067 new_eip = ldl(ssp);
840   - if (new_eflags & VM_MASK)
  1068 + if (is_iret && (new_eflags & VM_MASK))
841 1069 goto return_to_vm86;
842 1070 } else {
843 1071 /* 16 bits */
844   - new_eflags = lduw(ssp + 4);
  1072 + if (is_iret)
  1073 + new_eflags = lduw(ssp + 4);
845 1074 new_cs = lduw(ssp + 2);
846 1075 new_eip = lduw(ssp);
847 1076 }
... ... @@ -870,17 +1099,18 @@ void helper_iret_protected(int shift)
870 1099 if (rpl == cpl) {
871 1100 /* return to same priledge level */
872 1101 load_seg(R_CS, new_cs, env->eip);
873   - new_esp = sp + (6 << shift);
  1102 + new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
874 1103 } else {
875   - /* return to differentr priviledge level */
  1104 + /* return to different priviledge level */
  1105 + ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
876 1106 if (shift == 1) {
877 1107 /* 32 bits */
878   - new_esp = ldl(ssp + 12);
879   - new_ss = ldl(ssp + 16) & 0xffff;
  1108 + new_esp = ldl(ssp);
  1109 + new_ss = ldl(ssp + 4) & 0xffff;
880 1110 } else {
881 1111 /* 16 bits */
882   - new_esp = lduw(ssp + 6);
883   - new_ss = lduw(ssp + 8);
  1112 + new_esp = lduw(ssp);
  1113 + new_ss = lduw(ssp + 2);
884 1114 }
885 1115  
886 1116 if ((new_ss & 3) != rpl)
... ... @@ -906,13 +1136,15 @@ void helper_iret_protected(int shift)
906 1136 env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
907 1137 (new_esp & 0xffff);
908 1138 env->eip = new_eip;
909   - if (cpl == 0)
910   - eflags_mask = FL_UPDATE_CPL0_MASK;
911   - else
912   - eflags_mask = FL_UPDATE_MASK32;
913   - if (shift == 0)
914   - eflags_mask &= 0xffff;
915   - load_eflags(new_eflags, eflags_mask);
  1139 + if (is_iret) {
  1140 + if (cpl == 0)
  1141 + eflags_mask = FL_UPDATE_CPL0_MASK;
  1142 + else
  1143 + eflags_mask = FL_UPDATE_MASK32;
  1144 + if (shift == 0)
  1145 + eflags_mask &= 0xffff;
  1146 + load_eflags(new_eflags, eflags_mask);
  1147 + }
916 1148 return;
917 1149  
918 1150 return_to_vm86:
... ... @@ -936,6 +1168,16 @@ void helper_iret_protected(int shift)
936 1168 env->regs[R_ESP] = new_esp;
937 1169 }
938 1170  
  1171 +void helper_iret_protected(int shift)
  1172 +{
  1173 + helper_ret_protected(shift, 1, 0);
  1174 +}
  1175 +
  1176 +void helper_lret_protected(int shift, int addend)
  1177 +{
  1178 + helper_ret_protected(shift, 0, addend);
  1179 +}
  1180 +
939 1181 void helper_movl_crN_T0(int reg)
940 1182 {
941 1183 env->cr[reg] = T0;
... ...
op-i386.c
... ... @@ -948,9 +948,19 @@ void OPPROTO op_lar(void)
948 948 }
949 949  
950 950 /* T0: segment, T1:eip */
951   -void OPPROTO op_ljmp_T0_T1(void)
  951 +void OPPROTO op_ljmp_protected_T0_T1(void)
952 952 {
953   - jmp_seg(T0 & 0xffff, T1);
  953 + helper_ljmp_protected_T0_T1();
  954 +}
  955 +
  956 +void OPPROTO op_lcall_real_T0_T1(void)
  957 +{
  958 + helper_lcall_real_T0_T1(PARAM1, PARAM2);
  959 +}
  960 +
  961 +void OPPROTO op_lcall_protected_T0_T1(void)
  962 +{
  963 + helper_lcall_protected_T0_T1(PARAM1, PARAM2);
954 964 }
955 965  
956 966 void OPPROTO op_iret_real(void)
... ... @@ -963,6 +973,11 @@ void OPPROTO op_iret_protected(void)
963 973 helper_iret_protected(PARAM1);
964 974 }
965 975  
  976 +void OPPROTO op_lret_protected(void)
  977 +{
  978 + helper_lret_protected(PARAM1, PARAM2);
  979 +}
  980 +
966 981 void OPPROTO op_lldt_T0(void)
967 982 {
968 983 helper_lldt_T0();
... ...
translate-i386.c
... ... @@ -1832,19 +1832,18 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
1832 1832 s->is_jmp = 1;
1833 1833 break;
1834 1834 case 3: /* lcall Ev */
1835   - /* push return segment + offset */
1836   - gen_op_movl_T0_seg(R_CS);
1837   - gen_push_T0(s);
1838   - next_eip = s->pc - s->cs_base;
1839   - gen_op_movl_T0_im(next_eip);
1840   - gen_push_T0(s);
1841   -
1842 1835 gen_op_ld_T1_A0[ot]();
1843 1836 gen_op_addl_A0_im(1 << (ot - OT_WORD + 1));
1844 1837 gen_op_lduw_T0_A0();
1845   - gen_movl_seg_T0(s, R_CS, pc_start - s->cs_base);
1846   - gen_op_movl_T0_T1();
1847   - gen_op_jmp_T0();
  1838 + do_lcall:
  1839 + if (s->pe && !s->vm86) {
  1840 + if (s->cc_op != CC_OP_DYNAMIC)
  1841 + gen_op_set_cc_op(s->cc_op);
  1842 + gen_op_jmp_im(pc_start - s->cs_base);
  1843 + gen_op_lcall_protected_T0_T1(dflag, s->pc - s->cs_base);
  1844 + } else {
  1845 + gen_op_lcall_real_T0_T1(dflag, s->pc - s->cs_base);
  1846 + }
1848 1847 s->is_jmp = 1;
1849 1848 break;
1850 1849 case 4: /* jmp Ev */
... ... @@ -1857,10 +1856,12 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
1857 1856 gen_op_ld_T1_A0[ot]();
1858 1857 gen_op_addl_A0_im(1 << (ot - OT_WORD + 1));
1859 1858 gen_op_lduw_T0_A0();
  1859 + do_ljmp:
1860 1860 if (s->pe && !s->vm86) {
1861   - /* we compute EIP to handle the exception case */
  1861 + if (s->cc_op != CC_OP_DYNAMIC)
  1862 + gen_op_set_cc_op(s->cc_op);
1862 1863 gen_op_jmp_im(pc_start - s->cs_base);
1863   - gen_op_ljmp_T0_T1();
  1864 + gen_op_ljmp_protected_T0_T1();
1864 1865 } else {
1865 1866 gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
1866 1867 gen_op_movl_T0_T1();
... ... @@ -2867,7 +2868,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
2867 2868 else
2868 2869 ot = dflag ? OT_LONG : OT_WORD;
2869 2870  
2870   - if (prefixes & PREFIX_REPZ) {
  2871 + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
2871 2872 gen_string_ds(s, ot, gen_op_movs + 9);
2872 2873 } else {
2873 2874 gen_string_ds(s, ot, gen_op_movs);
... ... @@ -2881,7 +2882,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
2881 2882 else
2882 2883 ot = dflag ? OT_LONG : OT_WORD;
2883 2884  
2884   - if (prefixes & PREFIX_REPZ) {
  2885 + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
2885 2886 gen_string_es(s, ot, gen_op_stos + 9);
2886 2887 } else {
2887 2888 gen_string_es(s, ot, gen_op_stos);
... ... @@ -2893,7 +2894,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
2893 2894 ot = OT_BYTE;
2894 2895 else
2895 2896 ot = dflag ? OT_LONG : OT_WORD;
2896   - if (prefixes & PREFIX_REPZ) {
  2897 + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
2897 2898 gen_string_ds(s, ot, gen_op_lods + 9);
2898 2899 } else {
2899 2900 gen_string_ds(s, ot, gen_op_lods);
... ... @@ -2952,7 +2953,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
2952 2953 ot = OT_BYTE;
2953 2954 else
2954 2955 ot = dflag ? OT_LONG : OT_WORD;
2955   - if (prefixes & PREFIX_REPZ) {
  2956 + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
2956 2957 gen_string_es(s, ot, gen_op_ins + 9);
2957 2958 } else {
2958 2959 gen_string_es(s, ot, gen_op_ins);
... ... @@ -2969,7 +2970,7 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
2969 2970 ot = OT_BYTE;
2970 2971 else
2971 2972 ot = dflag ? OT_LONG : OT_WORD;
2972   - if (prefixes & PREFIX_REPZ) {
  2973 + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
2973 2974 gen_string_ds(s, ot, gen_op_outs + 9);
2974 2975 } else {
2975 2976 gen_string_ds(s, ot, gen_op_outs);
... ... @@ -3062,20 +3063,27 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
3062 3063 val = ldsw(s->pc);
3063 3064 s->pc += 2;
3064 3065 do_lret:
3065   - gen_stack_A0(s);
3066   - /* pop offset */
3067   - gen_op_ld_T0_A0[1 + s->dflag]();
3068   - if (s->dflag == 0)
3069   - gen_op_andl_T0_ffff();
3070   - /* NOTE: keeping EIP updated is not a problem in case of
3071   - exception */
3072   - gen_op_jmp_T0();
3073   - /* pop selector */
3074   - gen_op_addl_A0_im(2 << s->dflag);
3075   - gen_op_ld_T0_A0[1 + s->dflag]();
3076   - gen_movl_seg_T0(s, R_CS, pc_start - s->cs_base);
3077   - /* add stack offset */
3078   - gen_stack_update(s, val + (4 << s->dflag));
  3066 + if (s->pe && !s->vm86) {
  3067 + if (s->cc_op != CC_OP_DYNAMIC)
  3068 + gen_op_set_cc_op(s->cc_op);
  3069 + gen_op_jmp_im(pc_start - s->cs_base);
  3070 + gen_op_lret_protected(s->dflag, val);
  3071 + } else {
  3072 + gen_stack_A0(s);
  3073 + /* pop offset */
  3074 + gen_op_ld_T0_A0[1 + s->dflag]();
  3075 + if (s->dflag == 0)
  3076 + gen_op_andl_T0_ffff();
  3077 + /* NOTE: keeping EIP updated is not a problem in case of
  3078 + exception */
  3079 + gen_op_jmp_T0();
  3080 + /* pop selector */
  3081 + gen_op_addl_A0_im(2 << s->dflag);
  3082 + gen_op_ld_T0_A0[1 + s->dflag]();
  3083 + gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
  3084 + /* add stack offset */
  3085 + gen_stack_update(s, val + (4 << s->dflag));
  3086 + }
3079 3087 s->is_jmp = 1;
3080 3088 break;
3081 3089 case 0xcb: /* lret */
... ... @@ -3114,26 +3122,15 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
3114 3122 case 0x9a: /* lcall im */
3115 3123 {
3116 3124 unsigned int selector, offset;
3117   - /* XXX: not restartable */
3118 3125  
3119 3126 ot = dflag ? OT_LONG : OT_WORD;
3120 3127 offset = insn_get(s, ot);
3121 3128 selector = insn_get(s, OT_WORD);
3122 3129  
3123   - /* push return segment + offset */
3124   - gen_op_movl_T0_seg(R_CS);
3125   - gen_push_T0(s);
3126   - next_eip = s->pc - s->cs_base;
3127   - gen_op_movl_T0_im(next_eip);
3128   - gen_push_T0(s);
3129   -
3130   - /* change cs and pc */
3131 3130 gen_op_movl_T0_im(selector);
3132   - gen_movl_seg_T0(s, R_CS, pc_start - s->cs_base);
3133   - gen_op_jmp_im((unsigned long)offset);
3134   - s->is_jmp = 1;
  3131 + gen_op_movl_T1_im(offset);
3135 3132 }
3136   - break;
  3133 + goto do_lcall;
3137 3134 case 0xe9: /* jmp */
3138 3135 ot = dflag ? OT_LONG : OT_WORD;
3139 3136 val = insn_get(s, ot);
... ... @@ -3150,20 +3147,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
3150 3147 offset = insn_get(s, ot);
3151 3148 selector = insn_get(s, OT_WORD);
3152 3149  
3153   - /* change cs and pc */
3154 3150 gen_op_movl_T0_im(selector);
3155   - if (s->pe && !s->vm86) {
3156   - /* we compute EIP to handle the exception case */
3157   - gen_op_jmp_im(pc_start - s->cs_base);
3158   - gen_op_movl_T1_im(offset);
3159   - gen_op_ljmp_T0_T1();
3160   - } else {
3161   - gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
3162   - gen_op_jmp_im((unsigned long)offset);
3163   - }
3164   - s->is_jmp = 1;
  3151 + gen_op_movl_T1_im(offset);
3165 3152 }
3166   - break;
  3153 + goto do_ljmp;
3167 3154 case 0xeb: /* jmp Jb */
3168 3155 val = (int8_t)insn_get(s, OT_BYTE);
3169 3156 val += s->pc - s->cs_base;
... ...