Commit 239fbd8623f627c1e6c0f2b41e2c5686c89d4aa6
1 parent
0573fbfc
Add missing svm.h header, and add a Changelog entry.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3211 c046a42c-6fe2-441c-8c8c-71466251a162
Showing
2 changed files
with
359 additions
and
0 deletions
Changelog
| ... | ... | @@ -11,6 +11,7 @@ |
| 11 | 11 | - MIPS64 support (Aurelien Jarno, Thiemo Seufer) |
| 12 | 12 | - Preliminary Alpha guest support (J. Mayer) |
| 13 | 13 | - Read-only support for Parallels disk images (Alex Beregszaszi) |
| 14 | + - SVM (x86 virtualization) support (Alexander Graf) | |
| 14 | 15 | |
| 15 | 16 | version 0.9.0: |
| 16 | 17 | ... | ... |
target-i386/svm.h
0 → 100644
| 1 | +#ifndef __SVM_H | |
| 2 | +#define __SVM_H | |
| 3 | + | |
| 4 | +enum { | |
| 5 | + /* We shift all the intercept bits so we can OR them with the | |
| 6 | + TB flags later on */ | |
| 7 | + INTERCEPT_INTR = HF_HIF_SHIFT, | |
| 8 | + INTERCEPT_NMI, | |
| 9 | + INTERCEPT_SMI, | |
| 10 | + INTERCEPT_INIT, | |
| 11 | + INTERCEPT_VINTR, | |
| 12 | + INTERCEPT_SELECTIVE_CR0, | |
| 13 | + INTERCEPT_STORE_IDTR, | |
| 14 | + INTERCEPT_STORE_GDTR, | |
| 15 | + INTERCEPT_STORE_LDTR, | |
| 16 | + INTERCEPT_STORE_TR, | |
| 17 | + INTERCEPT_LOAD_IDTR, | |
| 18 | + INTERCEPT_LOAD_GDTR, | |
| 19 | + INTERCEPT_LOAD_LDTR, | |
| 20 | + INTERCEPT_LOAD_TR, | |
| 21 | + INTERCEPT_RDTSC, | |
| 22 | + INTERCEPT_RDPMC, | |
| 23 | + INTERCEPT_PUSHF, | |
| 24 | + INTERCEPT_POPF, | |
| 25 | + INTERCEPT_CPUID, | |
| 26 | + INTERCEPT_RSM, | |
| 27 | + INTERCEPT_IRET, | |
| 28 | + INTERCEPT_INTn, | |
| 29 | + INTERCEPT_INVD, | |
| 30 | + INTERCEPT_PAUSE, | |
| 31 | + INTERCEPT_HLT, | |
| 32 | + INTERCEPT_INVLPG, | |
| 33 | + INTERCEPT_INVLPGA, | |
| 34 | + INTERCEPT_IOIO_PROT, | |
| 35 | + INTERCEPT_MSR_PROT, | |
| 36 | + INTERCEPT_TASK_SWITCH, | |
| 37 | + INTERCEPT_FERR_FREEZE, | |
| 38 | + INTERCEPT_SHUTDOWN, | |
| 39 | + INTERCEPT_VMRUN, | |
| 40 | + INTERCEPT_VMMCALL, | |
| 41 | + INTERCEPT_VMLOAD, | |
| 42 | + INTERCEPT_VMSAVE, | |
| 43 | + INTERCEPT_STGI, | |
| 44 | + INTERCEPT_CLGI, | |
| 45 | + INTERCEPT_SKINIT, | |
| 46 | + INTERCEPT_RDTSCP, | |
| 47 | + INTERCEPT_ICEBP, | |
| 48 | + INTERCEPT_WBINVD, | |
| 49 | +}; | |
| 50 | +/* This is not really an intercept but rather a placeholder to | |
| 51 | + show that we are in an SVM (just like a hidden flag, but keeps the | |
| 52 | + TBs clean) */ | |
| 53 | +#define INTERCEPT_SVM 63 | |
| 54 | +#define INTERCEPT_SVM_MASK (1ULL << INTERCEPT_SVM) | |
| 55 | + | |
| 56 | +struct __attribute__ ((__packed__)) vmcb_control_area { | |
| 57 | + uint16_t intercept_cr_read; | |
| 58 | + uint16_t intercept_cr_write; | |
| 59 | + uint16_t intercept_dr_read; | |
| 60 | + uint16_t intercept_dr_write; | |
| 61 | + uint32_t intercept_exceptions; | |
| 62 | + uint64_t intercept; | |
| 63 | + uint8_t reserved_1[44]; | |
| 64 | + uint64_t iopm_base_pa; | |
| 65 | + uint64_t msrpm_base_pa; | |
| 66 | + uint64_t tsc_offset; | |
| 67 | + uint32_t asid; | |
| 68 | + uint8_t tlb_ctl; | |
| 69 | + uint8_t reserved_2[3]; | |
| 70 | + uint32_t int_ctl; | |
| 71 | + uint32_t int_vector; | |
| 72 | + uint32_t int_state; | |
| 73 | + uint8_t reserved_3[4]; | |
| 74 | + uint32_t exit_code; | |
| 75 | + uint32_t exit_code_hi; | |
| 76 | + uint64_t exit_info_1; | |
| 77 | + uint64_t exit_info_2; | |
| 78 | + uint32_t exit_int_info; | |
| 79 | + uint32_t exit_int_info_err; | |
| 80 | + uint64_t nested_ctl; | |
| 81 | + uint8_t reserved_4[16]; | |
| 82 | + uint32_t event_inj; | |
| 83 | + uint32_t event_inj_err; | |
| 84 | + uint64_t nested_cr3; | |
| 85 | + uint64_t lbr_ctl; | |
| 86 | + uint8_t reserved_5[832]; | |
| 87 | +}; | |
| 88 | + | |
| 89 | + | |
| 90 | +#define TLB_CONTROL_DO_NOTHING 0 | |
| 91 | +#define TLB_CONTROL_FLUSH_ALL_ASID 1 | |
| 92 | + | |
| 93 | +#define V_TPR_MASK 0x0f | |
| 94 | + | |
| 95 | +#define V_IRQ_SHIFT 8 | |
| 96 | +#define V_IRQ_MASK (1 << V_IRQ_SHIFT) | |
| 97 | + | |
| 98 | +#define V_INTR_PRIO_SHIFT 16 | |
| 99 | +#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) | |
| 100 | + | |
| 101 | +#define V_IGN_TPR_SHIFT 20 | |
| 102 | +#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) | |
| 103 | + | |
| 104 | +#define V_INTR_MASKING_SHIFT 24 | |
| 105 | +#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) | |
| 106 | + | |
| 107 | +#define SVM_INTERRUPT_SHADOW_MASK 1 | |
| 108 | + | |
| 109 | +#define SVM_IOIO_STR_SHIFT 2 | |
| 110 | +#define SVM_IOIO_REP_SHIFT 3 | |
| 111 | +#define SVM_IOIO_SIZE_SHIFT 4 | |
| 112 | +#define SVM_IOIO_ASIZE_SHIFT 7 | |
| 113 | + | |
| 114 | +#define SVM_IOIO_TYPE_MASK 1 | |
| 115 | +#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) | |
| 116 | +#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) | |
| 117 | +#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) | |
| 118 | +#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) | |
| 119 | + | |
| 120 | +struct __attribute__ ((__packed__)) vmcb_seg { | |
| 121 | + uint16_t selector; | |
| 122 | + uint16_t attrib; | |
| 123 | + uint32_t limit; | |
| 124 | + uint64_t base; | |
| 125 | +}; | |
| 126 | + | |
| 127 | +struct __attribute__ ((__packed__)) vmcb_save_area { | |
| 128 | + struct vmcb_seg es; | |
| 129 | + struct vmcb_seg cs; | |
| 130 | + struct vmcb_seg ss; | |
| 131 | + struct vmcb_seg ds; | |
| 132 | + struct vmcb_seg fs; | |
| 133 | + struct vmcb_seg gs; | |
| 134 | + struct vmcb_seg gdtr; | |
| 135 | + struct vmcb_seg ldtr; | |
| 136 | + struct vmcb_seg idtr; | |
| 137 | + struct vmcb_seg tr; | |
| 138 | + uint8_t reserved_1[43]; | |
| 139 | + uint8_t cpl; | |
| 140 | + uint8_t reserved_2[4]; | |
| 141 | + uint64_t efer; | |
| 142 | + uint8_t reserved_3[112]; | |
| 143 | + uint64_t cr4; | |
| 144 | + uint64_t cr3; | |
| 145 | + uint64_t cr0; | |
| 146 | + uint64_t dr7; | |
| 147 | + uint64_t dr6; | |
| 148 | + uint64_t rflags; | |
| 149 | + uint64_t rip; | |
| 150 | + uint8_t reserved_4[88]; | |
| 151 | + uint64_t rsp; | |
| 152 | + uint8_t reserved_5[24]; | |
| 153 | + uint64_t rax; | |
| 154 | + uint64_t star; | |
| 155 | + uint64_t lstar; | |
| 156 | + uint64_t cstar; | |
| 157 | + uint64_t sfmask; | |
| 158 | + uint64_t kernel_gs_base; | |
| 159 | + uint64_t sysenter_cs; | |
| 160 | + uint64_t sysenter_esp; | |
| 161 | + uint64_t sysenter_eip; | |
| 162 | + uint64_t cr2; | |
| 163 | + /* qemu: cr8 added to reuse this as hsave */ | |
| 164 | + uint64_t cr8; | |
| 165 | + uint8_t reserved_6[32 - 8]; /* originally 32 */ | |
| 166 | + uint64_t g_pat; | |
| 167 | + uint64_t dbgctl; | |
| 168 | + uint64_t br_from; | |
| 169 | + uint64_t br_to; | |
| 170 | + uint64_t last_excp_from; | |
| 171 | + uint64_t last_excp_to; | |
| 172 | +}; | |
| 173 | + | |
| 174 | +struct __attribute__ ((__packed__)) vmcb { | |
| 175 | + struct vmcb_control_area control; | |
| 176 | + struct vmcb_save_area save; | |
| 177 | +}; | |
| 178 | + | |
| 179 | +#define SVM_CPUID_FEATURE_SHIFT 2 | |
| 180 | +#define SVM_CPUID_FUNC 0x8000000a | |
| 181 | + | |
| 182 | +#define MSR_EFER_SVME_MASK (1ULL << 12) | |
| 183 | + | |
| 184 | +#define SVM_SELECTOR_S_SHIFT 4 | |
| 185 | +#define SVM_SELECTOR_DPL_SHIFT 5 | |
| 186 | +#define SVM_SELECTOR_P_SHIFT 7 | |
| 187 | +#define SVM_SELECTOR_AVL_SHIFT 8 | |
| 188 | +#define SVM_SELECTOR_L_SHIFT 9 | |
| 189 | +#define SVM_SELECTOR_DB_SHIFT 10 | |
| 190 | +#define SVM_SELECTOR_G_SHIFT 11 | |
| 191 | + | |
| 192 | +#define SVM_SELECTOR_TYPE_MASK (0xf) | |
| 193 | +#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) | |
| 194 | +#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) | |
| 195 | +#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) | |
| 196 | +#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) | |
| 197 | +#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) | |
| 198 | +#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) | |
| 199 | +#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) | |
| 200 | + | |
| 201 | +#define SVM_SELECTOR_WRITE_MASK (1 << 1) | |
| 202 | +#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK | |
| 203 | +#define SVM_SELECTOR_CODE_MASK (1 << 3) | |
| 204 | + | |
| 205 | +#define INTERCEPT_CR0_MASK 1 | |
| 206 | +#define INTERCEPT_CR3_MASK (1 << 3) | |
| 207 | +#define INTERCEPT_CR4_MASK (1 << 4) | |
| 208 | + | |
| 209 | +#define INTERCEPT_DR0_MASK 1 | |
| 210 | +#define INTERCEPT_DR1_MASK (1 << 1) | |
| 211 | +#define INTERCEPT_DR2_MASK (1 << 2) | |
| 212 | +#define INTERCEPT_DR3_MASK (1 << 3) | |
| 213 | +#define INTERCEPT_DR4_MASK (1 << 4) | |
| 214 | +#define INTERCEPT_DR5_MASK (1 << 5) | |
| 215 | +#define INTERCEPT_DR6_MASK (1 << 6) | |
| 216 | +#define INTERCEPT_DR7_MASK (1 << 7) | |
| 217 | + | |
| 218 | +#define SVM_EVTINJ_VEC_MASK 0xff | |
| 219 | + | |
| 220 | +#define SVM_EVTINJ_TYPE_SHIFT 8 | |
| 221 | +#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) | |
| 222 | + | |
| 223 | +#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) | |
| 224 | +#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) | |
| 225 | +#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) | |
| 226 | +#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) | |
| 227 | + | |
| 228 | +#define SVM_EVTINJ_VALID (1 << 31) | |
| 229 | +#define SVM_EVTINJ_VALID_ERR (1 << 11) | |
| 230 | + | |
| 231 | +#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK | |
| 232 | + | |
| 233 | +#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR | |
| 234 | +#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI | |
| 235 | +#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT | |
| 236 | +#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT | |
| 237 | + | |
| 238 | +#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID | |
| 239 | +#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR | |
| 240 | + | |
| 241 | +#define SVM_EXIT_READ_CR0 0x000 | |
| 242 | +#define SVM_EXIT_READ_CR3 0x003 | |
| 243 | +#define SVM_EXIT_READ_CR4 0x004 | |
| 244 | +#define SVM_EXIT_READ_CR8 0x008 | |
| 245 | +#define SVM_EXIT_WRITE_CR0 0x010 | |
| 246 | +#define SVM_EXIT_WRITE_CR3 0x013 | |
| 247 | +#define SVM_EXIT_WRITE_CR4 0x014 | |
| 248 | +#define SVM_EXIT_WRITE_CR8 0x018 | |
| 249 | +#define SVM_EXIT_READ_DR0 0x020 | |
| 250 | +#define SVM_EXIT_READ_DR1 0x021 | |
| 251 | +#define SVM_EXIT_READ_DR2 0x022 | |
| 252 | +#define SVM_EXIT_READ_DR3 0x023 | |
| 253 | +#define SVM_EXIT_READ_DR4 0x024 | |
| 254 | +#define SVM_EXIT_READ_DR5 0x025 | |
| 255 | +#define SVM_EXIT_READ_DR6 0x026 | |
| 256 | +#define SVM_EXIT_READ_DR7 0x027 | |
| 257 | +#define SVM_EXIT_WRITE_DR0 0x030 | |
| 258 | +#define SVM_EXIT_WRITE_DR1 0x031 | |
| 259 | +#define SVM_EXIT_WRITE_DR2 0x032 | |
| 260 | +#define SVM_EXIT_WRITE_DR3 0x033 | |
| 261 | +#define SVM_EXIT_WRITE_DR4 0x034 | |
| 262 | +#define SVM_EXIT_WRITE_DR5 0x035 | |
| 263 | +#define SVM_EXIT_WRITE_DR6 0x036 | |
| 264 | +#define SVM_EXIT_WRITE_DR7 0x037 | |
| 265 | +#define SVM_EXIT_EXCP_BASE 0x040 | |
| 266 | +#define SVM_EXIT_INTR 0x060 | |
| 267 | +#define SVM_EXIT_NMI 0x061 | |
| 268 | +#define SVM_EXIT_SMI 0x062 | |
| 269 | +#define SVM_EXIT_INIT 0x063 | |
| 270 | +#define SVM_EXIT_VINTR 0x064 | |
| 271 | +#define SVM_EXIT_CR0_SEL_WRITE 0x065 | |
| 272 | +#define SVM_EXIT_IDTR_READ 0x066 | |
| 273 | +#define SVM_EXIT_GDTR_READ 0x067 | |
| 274 | +#define SVM_EXIT_LDTR_READ 0x068 | |
| 275 | +#define SVM_EXIT_TR_READ 0x069 | |
| 276 | +#define SVM_EXIT_IDTR_WRITE 0x06a | |
| 277 | +#define SVM_EXIT_GDTR_WRITE 0x06b | |
| 278 | +#define SVM_EXIT_LDTR_WRITE 0x06c | |
| 279 | +#define SVM_EXIT_TR_WRITE 0x06d | |
| 280 | +#define SVM_EXIT_RDTSC 0x06e | |
| 281 | +#define SVM_EXIT_RDPMC 0x06f | |
| 282 | +#define SVM_EXIT_PUSHF 0x070 | |
| 283 | +#define SVM_EXIT_POPF 0x071 | |
| 284 | +#define SVM_EXIT_CPUID 0x072 | |
| 285 | +#define SVM_EXIT_RSM 0x073 | |
| 286 | +#define SVM_EXIT_IRET 0x074 | |
| 287 | +#define SVM_EXIT_SWINT 0x075 | |
| 288 | +#define SVM_EXIT_INVD 0x076 | |
| 289 | +#define SVM_EXIT_PAUSE 0x077 | |
| 290 | +#define SVM_EXIT_HLT 0x078 | |
| 291 | +#define SVM_EXIT_INVLPG 0x079 | |
| 292 | +#define SVM_EXIT_INVLPGA 0x07a | |
| 293 | +#define SVM_EXIT_IOIO 0x07b | |
| 294 | +#define SVM_EXIT_MSR 0x07c | |
| 295 | +#define SVM_EXIT_TASK_SWITCH 0x07d | |
| 296 | +#define SVM_EXIT_FERR_FREEZE 0x07e | |
| 297 | +#define SVM_EXIT_SHUTDOWN 0x07f | |
| 298 | +#define SVM_EXIT_VMRUN 0x080 | |
| 299 | +#define SVM_EXIT_VMMCALL 0x081 | |
| 300 | +#define SVM_EXIT_VMLOAD 0x082 | |
| 301 | +#define SVM_EXIT_VMSAVE 0x083 | |
| 302 | +#define SVM_EXIT_STGI 0x084 | |
| 303 | +#define SVM_EXIT_CLGI 0x085 | |
| 304 | +#define SVM_EXIT_SKINIT 0x086 | |
| 305 | +#define SVM_EXIT_RDTSCP 0x087 | |
| 306 | +#define SVM_EXIT_ICEBP 0x088 | |
| 307 | +#define SVM_EXIT_WBINVD 0x089 | |
| 308 | +/* only included in documentation, maybe wrong */ | |
| 309 | +#define SVM_EXIT_MONITOR 0x08a | |
| 310 | +#define SVM_EXIT_MWAIT 0x08b | |
| 311 | +#define SVM_EXIT_NPF 0x400 | |
| 312 | + | |
| 313 | +#define SVM_EXIT_ERR -1 | |
| 314 | + | |
| 315 | +#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ | |
| 316 | + | |
| 317 | +#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" | |
| 318 | +#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" | |
| 319 | +#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" | |
| 320 | +#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" | |
| 321 | +#define SVM_STGI ".byte 0x0f, 0x01, 0xdc" | |
| 322 | +#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" | |
| 323 | + | |
| 324 | +/* function references */ | |
| 325 | + | |
| 326 | +void helper_stgi(); | |
| 327 | +void vmexit(uint64_t exit_code, uint64_t exit_info_1); | |
| 328 | +int svm_check_intercept_param(uint32_t type, uint64_t param); | |
| 329 | +static inline int svm_check_intercept(unsigned int type) { | |
| 330 | + return svm_check_intercept_param(type, 0); | |
| 331 | +} | |
| 332 | + | |
| 333 | + | |
| 334 | +#define INTERCEPTED(mask) (env->intercept & mask) | |
| 335 | +#define INTERCEPTEDw(var, mask) (env->intercept ## var & mask) | |
| 336 | +#define INTERCEPTEDl(var, mask) (env->intercept ## var & mask) | |
| 337 | + | |
| 338 | +#define SVM_LOAD_SEG(addr, seg_index, seg) \ | |
| 339 | + cpu_x86_load_seg_cache(env, \ | |
| 340 | + R_##seg_index, \ | |
| 341 | + lduw_phys(addr + offsetof(struct vmcb, save.seg.selector)),\ | |
| 342 | + ldq_phys(addr + offsetof(struct vmcb, save.seg.base)),\ | |
| 343 | + ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)),\ | |
| 344 | + vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg.attrib)), ldq_phys(addr + offsetof(struct vmcb, save.seg.base)), ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)))) | |
| 345 | + | |
| 346 | +#define SVM_LOAD_SEG2(addr, seg_qemu, seg_vmcb) \ | |
| 347 | + env->seg_qemu.selector = lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector)); \ | |
| 348 | + env->seg_qemu.base = ldq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base)); \ | |
| 349 | + env->seg_qemu.limit = ldl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit)); \ | |
| 350 | + env->seg_qemu.flags = vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib)), env->seg_qemu.base, env->seg_qemu.limit) | |
| 351 | + | |
| 352 | +#define SVM_SAVE_SEG(addr, seg_qemu, seg_vmcb) \ | |
| 353 | + stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector), env->seg_qemu.selector); \ | |
| 354 | + stq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base), env->seg_qemu.base); \ | |
| 355 | + stl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit), env->seg_qemu.limit); \ | |
| 356 | + stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib), cpu2vmcb_attrib(env->seg_qemu.flags)) | |
| 357 | + | |
| 358 | +#endif | ... | ... |