Changeset 92508 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Nov 19, 2021 1:59:23 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r92465 r92508 433 433 434 434 435 /** @callback_method_impl{FNVMMEMTRENDEZVOUS} */ 436 static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser) 437 { 438 RT_NOREF(pVM, pvUser); 439 int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/); 440 AssertLogRelRC(rc); 441 return VINF_SUCCESS; 442 } 443 444 435 445 /** 436 446 * Try initialize the native API. … … 514 524 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu); 515 525 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu); 526 STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits", "/NEM/CPU%u/Exit", idCpu); 527 STAMR3RegisterF(pVM, &pNemCpu->StatExitIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO", "/NEM/CPU%u/Exit/Io", idCpu); 528 STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO", "/NEM/CPU%u/Exit/Mmio", idCpu); 529 STAMR3RegisterF(pVM, &pNemCpu->StatExitSetTpr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_SET_TRP", "/NEM/CPU%u/Exit/SetTpr", idCpu); 530 STAMR3RegisterF(pVM, &pNemCpu->StatExitTprAccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_TPR_ACCESS", "/NEM/CPU%u/Exit/TprAccess", idCpu); 531 STAMR3RegisterF(pVM, &pNemCpu->StatExitRdMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_RDMSR", "/NEM/CPU%u/Exit/RdMsr", idCpu); 532 STAMR3RegisterF(pVM, &pNemCpu->StatExitWrMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_WRMSR", "/NEM/CPU%u/Exit/WrMsr", idCpu); 533 STAMR3RegisterF(pVM, &pNemCpu->StatExitIrqWindowOpen, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IRQ_WINDOWS_OPEN", "/NEM/CPU%u/Exit/IrqWindowOpen", idCpu); 534 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HLT", "/NEM/CPU%u/Exit/Hlt", idCpu); 535 STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR", "/NEM/CPU%u/Exit/Intr", idCpu); 536 STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL", "/NEM/CPU%u/Exit/Hypercall", idCpu); 537 STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG", "/NEM/CPU%u/Exit/Debug", idCpu); 538 STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK", "/NEM/CPU%u/Exit/BusLock", idCpu); 516 539 } 540 541 /* 542 * Make RTThreadPoke work again (disabled for avoiding unnecessary 543 * critical section issues in ring-0). 544 */ 545 VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, nemR3LnxFixThreadPoke, NULL); 517 546 518 547 /* … … 1011 1040 * Worker that imports selected state from KVM. 1012 1041 */ 1013 static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, struct kvm_run *pRun) 1014 { 1015 RT_NOREF(pVCpu, fWhat, pRun); 1016 return VERR_NOT_IMPLEMENTED; 1042 static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, PCPUMCTX pCtx, struct kvm_run *pRun) 1043 { 1044 fWhat &= pVCpu->cpum.GstCtx.fExtrn; 1045 if (!fWhat) 1046 return VINF_SUCCESS; 1047 1048 /* 1049 * Stuff that goes into kvm_run::s.regs.regs: 1050 */ 1051 if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1052 { 1053 if (fWhat & CPUMCTX_EXTRN_RIP) 1054 pCtx->rip = pRun->s.regs.regs.rip; 1055 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 1056 pCtx->rflags.u = pRun->s.regs.regs.rflags; 1057 1058 if (fWhat & CPUMCTX_EXTRN_RAX) 1059 pCtx->rax = pRun->s.regs.regs.rax; 1060 if (fWhat & CPUMCTX_EXTRN_RCX) 1061 pCtx->rcx = pRun->s.regs.regs.rcx; 1062 if (fWhat & CPUMCTX_EXTRN_RDX) 1063 pCtx->rdx = pRun->s.regs.regs.rdx; 1064 if (fWhat & CPUMCTX_EXTRN_RBX) 1065 pCtx->rbx = pRun->s.regs.regs.rbx; 1066 if (fWhat & CPUMCTX_EXTRN_RSP) 1067 pCtx->rsp = pRun->s.regs.regs.rsp; 1068 if (fWhat & CPUMCTX_EXTRN_RBP) 1069 pCtx->rbp = pRun->s.regs.regs.rbp; 1070 if (fWhat & CPUMCTX_EXTRN_RSI) 1071 pCtx->rsi = pRun->s.regs.regs.rsi; 1072 if (fWhat & CPUMCTX_EXTRN_RDI) 1073 pCtx->rdi = pRun->s.regs.regs.rdi; 1074 if (fWhat & CPUMCTX_EXTRN_R8_R15) 1075 { 1076 pCtx->r8 = pRun->s.regs.regs.r8; 1077 pCtx->r9 = pRun->s.regs.regs.r9; 1078 pCtx->r10 = pRun->s.regs.regs.r10; 1079 pCtx->r11 = pRun->s.regs.regs.r11; 1080 pCtx->r12 = pRun->s.regs.regs.r12; 1081 pCtx->r13 = pRun->s.regs.regs.r13; 1082 pCtx->r14 = pRun->s.regs.regs.r14; 1083 pCtx->r15 = pRun->s.regs.regs.r15; 1084 } 1085 } 1086 1087 /* 1088 * Stuff that goes into kvm_run::s.regs.sregs: 1089 */ 1090 /** @todo apic_base */ 1091 1092 bool fMaybeChangedMode = false; 1093 bool fUpdateCr3 = false; 1094 if (fWhat & ( CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK 1095 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR)) 1096 { 1097 /** @todo what about Attr.n.u4LimitHigh? */ 1098 #define NEM_LNX_IMPORT_SEG(a_CtxSeg, a_KvmSeg) do { \ 1099 (a_CtxSeg).u64Base = (a_KvmSeg).base; \ 1100 (a_CtxSeg).u32Limit = (a_KvmSeg).limit; \ 1101 (a_CtxSeg).ValidSel = (a_CtxSeg).Sel = (a_KvmSeg).selector; \ 1102 (a_CtxSeg).Attr.n.u4Type = (a_KvmSeg).type; \ 1103 (a_CtxSeg).Attr.n.u1DescType = (a_KvmSeg).s; \ 1104 (a_CtxSeg).Attr.n.u2Dpl = (a_KvmSeg).dpl; \ 1105 (a_CtxSeg).Attr.n.u1Present = (a_KvmSeg).present; \ 1106 (a_CtxSeg).Attr.n.u1Available = (a_KvmSeg).avl; \ 1107 (a_CtxSeg).Attr.n.u1Long = (a_KvmSeg).l; \ 1108 (a_CtxSeg).Attr.n.u1DefBig = (a_KvmSeg).db; \ 1109 (a_CtxSeg).Attr.n.u1Granularity = (a_KvmSeg).g; \ 1110 (a_CtxSeg).Attr.n.u1Unusable = (a_KvmSeg).unusable; \ 1111 (a_CtxSeg).fFlags = CPUMSELREG_FLAGS_VALID; \ 1112 CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &(a_CtxSeg)); \ 1113 } while (0) 1114 1115 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 1116 { 1117 if (fWhat & CPUMCTX_EXTRN_ES) 1118 NEM_LNX_IMPORT_SEG(pCtx->es, pRun->s.regs.sregs.es); 1119 if (fWhat & CPUMCTX_EXTRN_CS) 1120 NEM_LNX_IMPORT_SEG(pCtx->cs, pRun->s.regs.sregs.cs); 1121 if (fWhat & CPUMCTX_EXTRN_SS) 1122 NEM_LNX_IMPORT_SEG(pCtx->ss, pRun->s.regs.sregs.ss); 1123 if (fWhat & CPUMCTX_EXTRN_DS) 1124 NEM_LNX_IMPORT_SEG(pCtx->ds, pRun->s.regs.sregs.ds); 1125 if (fWhat & CPUMCTX_EXTRN_FS) 1126 NEM_LNX_IMPORT_SEG(pCtx->fs, pRun->s.regs.sregs.fs); 1127 if (fWhat & CPUMCTX_EXTRN_GS) 1128 NEM_LNX_IMPORT_SEG(pCtx->gs, pRun->s.regs.sregs.gs); 1129 } 1130 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 1131 { 1132 if (fWhat & CPUMCTX_EXTRN_GDTR) 1133 { 1134 pCtx->gdtr.pGdt = pRun->s.regs.sregs.gdt.base; 1135 pCtx->gdtr.cbGdt = pRun->s.regs.sregs.gdt.limit; 1136 } 1137 if (fWhat & CPUMCTX_EXTRN_IDTR) 1138 { 1139 pCtx->idtr.pIdt = pRun->s.regs.sregs.idt.base; 1140 pCtx->idtr.cbIdt = pRun->s.regs.sregs.idt.limit; 1141 } 1142 if (fWhat & CPUMCTX_EXTRN_LDTR) 1143 NEM_LNX_IMPORT_SEG(pCtx->ldtr, pRun->s.regs.sregs.ldt); 1144 if (fWhat & CPUMCTX_EXTRN_TR) 1145 NEM_LNX_IMPORT_SEG(pCtx->tr, pRun->s.regs.sregs.tr); 1146 } 1147 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 1148 { 1149 if (fWhat & CPUMCTX_EXTRN_CR0) 1150 { 1151 if (pVCpu->cpum.GstCtx.cr0 != pRun->s.regs.sregs.cr0) 1152 { 1153 CPUMSetGuestCR0(pVCpu, pRun->s.regs.sregs.cr0); 1154 fMaybeChangedMode = true; 1155 } 1156 } 1157 if (fWhat & CPUMCTX_EXTRN_CR2) 1158 pCtx->cr2 = pRun->s.regs.sregs.cr2; 1159 if (fWhat & CPUMCTX_EXTRN_CR3) 1160 { 1161 if (pCtx->cr3 != pRun->s.regs.sregs.cr3) 1162 { 1163 CPUMSetGuestCR3(pVCpu, pRun->s.regs.sregs.cr3); 1164 fUpdateCr3 = true; 1165 } 1166 } 1167 if (fWhat & CPUMCTX_EXTRN_CR4) 1168 { 1169 if (pCtx->cr4 != pRun->s.regs.sregs.cr4) 1170 { 1171 CPUMSetGuestCR4(pVCpu, pRun->s.regs.sregs.cr4); 1172 fMaybeChangedMode = true; 1173 } 1174 } 1175 } 1176 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1177 APICSetTpr(pVCpu, (uint8_t)pRun->s.regs.sregs.cr8 << 4); 1178 if (fWhat & CPUMCTX_EXTRN_EFER) 1179 { 1180 if (pCtx->msrEFER != pRun->s.regs.sregs.efer) 1181 { 1182 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, pRun->s.regs.sregs.efer)); 1183 if ((pRun->s.regs.sregs.efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE) 1184 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pRun->s.regs.sregs.efer & MSR_K6_EFER_NXE)); 1185 pCtx->msrEFER = pRun->s.regs.sregs.efer; 1186 fMaybeChangedMode = true; 1187 } 1188 } 1189 1190 /** @todo apic_base */ 1191 #undef NEM_LNX_IMPORT_SEG 1192 } 1193 1194 /* 1195 * Debug registers. 1196 */ 1197 if (fWhat & CPUMCTX_EXTRN_DR_MASK) 1198 { 1199 struct kvm_debugregs DbgRegs = {{0}}; 1200 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_DEBUGREGS, &DbgRegs); 1201 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1202 1203 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 1204 { 1205 pCtx->dr[0] = DbgRegs.db[0]; 1206 pCtx->dr[1] = DbgRegs.db[1]; 1207 pCtx->dr[2] = DbgRegs.db[2]; 1208 pCtx->dr[3] = DbgRegs.db[3]; 1209 } 1210 if (fWhat & CPUMCTX_EXTRN_DR6) 1211 pCtx->dr[6] = DbgRegs.dr6; 1212 if (fWhat & CPUMCTX_EXTRN_DR7) 1213 pCtx->dr[7] = DbgRegs.dr7; 1214 } 1215 1216 /* 1217 * FPU, SSE, AVX, ++. 1218 */ 1219 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1220 { 1221 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1222 { 1223 /* Partial state is annoying as we have to do merging - is this possible at all? */ 1224 struct kvm_xsave XSave; 1225 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XSAVE, &XSave); 1226 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1227 1228 if (fWhat & CPUMCTX_EXTRN_X87) 1229 memcpy(&pCtx->XState.x87, &XSave, sizeof(pCtx->XState.x87)); 1230 if (fWhat & CPUMCTX_EXTRN_SSE_AVX) 1231 { 1232 /** @todo */ 1233 } 1234 if (fWhat & CPUMCTX_EXTRN_OTHER_XSAVE) 1235 { 1236 /** @todo */ 1237 } 1238 } 1239 1240 if (fWhat & CPUMCTX_EXTRN_XCRx) 1241 { 1242 struct kvm_xcrs Xcrs = 1243 { /*.nr_xcrs = */ 2, 1244 /*.flags = */ 0, 1245 /*.xcrs= */ { 1246 { /*.xcr =*/ 0, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[0] }, 1247 { /*.xcr =*/ 1, /*.reserved=*/ 0, /*.value=*/ pCtx->aXcr[1] }, 1248 } 1249 }; 1250 1251 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_XCRS, &Xcrs); 1252 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1253 1254 pCtx->aXcr[0] = Xcrs.xcrs[0].value; 1255 pCtx->aXcr[1] = Xcrs.xcrs[1].value; 1256 } 1257 } 1258 1259 /* 1260 * MSRs. 1261 */ 1262 if (fWhat & ( CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS 1263 | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 1264 { 1265 union 1266 { 1267 struct kvm_msrs Core; 1268 uint64_t padding[2 + sizeof(struct kvm_msr_entry) * 32]; 1269 } uBuf; 1270 uint64_t *pauDsts[32]; 1271 uint32_t iMsr = 0; 1272 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1273 1274 #define ADD_MSR(a_Msr, a_uValue) do { \ 1275 Assert(iMsr < 32); \ 1276 uBuf.Core.entries[iMsr].index = (a_Msr); \ 1277 uBuf.Core.entries[iMsr].reserved = 0; \ 1278 uBuf.Core.entries[iMsr].data = UINT64_MAX; \ 1279 pauDsts[iMsr] = &(a_uValue); \ 1280 iMsr += 1; \ 1281 } while (0) 1282 1283 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1284 ADD_MSR(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE); 1285 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 1286 { 1287 ADD_MSR(MSR_K6_STAR, pCtx->msrSTAR); 1288 ADD_MSR(MSR_K8_LSTAR, pCtx->msrLSTAR); 1289 ADD_MSR(MSR_K8_CSTAR, pCtx->msrCSTAR); 1290 ADD_MSR(MSR_K8_SF_MASK, pCtx->msrSFMASK); 1291 } 1292 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 1293 { 1294 ADD_MSR(MSR_IA32_SYSENTER_CS, pCtx->SysEnter.cs); 1295 ADD_MSR(MSR_IA32_SYSENTER_EIP, pCtx->SysEnter.eip); 1296 ADD_MSR(MSR_IA32_SYSENTER_ESP, pCtx->SysEnter.esp); 1297 } 1298 if (fWhat & CPUMCTX_EXTRN_TSC_AUX) 1299 ADD_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux); 1300 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1301 { 1302 ADD_MSR(MSR_IA32_CR_PAT, pCtx->msrPAT); 1303 /** @todo What do we _have_ to add here? 1304 * We also have: Mttr*, MiscEnable, FeatureControl. */ 1305 } 1306 1307 uBuf.Core.pad = 0; 1308 uBuf.Core.nmsrs = iMsr; 1309 int rc = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MSRS, &uBuf); 1310 AssertMsgReturn(rc == (int)iMsr, 1311 ("rc=%d iMsr=%d (->%#x) errno=%d\n", 1312 rc, iMsr, (uint32_t)rc < iMsr ? uBuf.Core.entries[rc].index : 0, errno), 1313 VERR_NEM_IPE_3); 1314 1315 while (iMsr-- > 0) 1316 *pauDsts[iMsr] = uBuf.Core.entries[iMsr].data; 1317 #undef ADD_MSR 1318 } 1319 1320 /* 1321 * Interruptibility state. 1322 */ 1323 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 1324 { 1325 fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI; /* always do both, see export and interrupt FF handling */ 1326 1327 struct kvm_vcpu_events KvmEvents = {0}; 1328 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_VCPU_EVENTS, &KvmEvents); 1329 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_3); 1330 1331 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP) 1332 pVCpu->cpum.GstCtx.rip = pRun->s.regs.regs.rip; 1333 1334 if (KvmEvents.interrupt.shadow) 1335 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 1336 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1337 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1338 1339 if (KvmEvents.nmi.masked) 1340 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 1341 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1342 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 1343 } 1344 1345 /* 1346 * Update the external mask. 1347 */ 1348 pCtx->fExtrn &= ~fWhat; 1349 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat; 1350 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)) 1351 pVCpu->cpum.GstCtx.fExtrn = 0; 1352 1353 /* 1354 * We sometimes need to update PGM on the guest status. 1355 */ 1356 if (!fMaybeChangedMode && !fUpdateCr3) 1357 { /* likely */ } 1358 else 1359 { 1360 /* 1361 * Make sure we got all the state PGM might need. 1362 */ 1363 Log7(("nemHCLnxImportState: fMaybeChangedMode=%d fUpdateCr3=%d fExtrnNeeded=%#RX64\n", fMaybeChangedMode, fUpdateCr3, 1364 pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER) )); 1365 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER)) 1366 { 1367 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR0) 1368 { 1369 if (pVCpu->cpum.GstCtx.cr0 != pRun->s.regs.sregs.cr0) 1370 { 1371 CPUMSetGuestCR0(pVCpu, pRun->s.regs.sregs.cr0); 1372 fMaybeChangedMode = true; 1373 } 1374 } 1375 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR3) 1376 { 1377 if (pCtx->cr3 != pRun->s.regs.sregs.cr3) 1378 { 1379 CPUMSetGuestCR3(pVCpu, pRun->s.regs.sregs.cr3); 1380 fUpdateCr3 = true; 1381 } 1382 } 1383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_CR4) 1384 { 1385 if (pCtx->cr4 != pRun->s.regs.sregs.cr4) 1386 { 1387 CPUMSetGuestCR4(pVCpu, pRun->s.regs.sregs.cr4); 1388 fMaybeChangedMode = true; 1389 } 1390 } 1391 if (fWhat & CPUMCTX_EXTRN_EFER) 1392 { 1393 if (pCtx->msrEFER != pRun->s.regs.sregs.efer) 1394 { 1395 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, pRun->s.regs.sregs.efer)); 1396 if ((pRun->s.regs.sregs.efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE) 1397 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pRun->s.regs.sregs.efer & MSR_K6_EFER_NXE)); 1398 pCtx->msrEFER = pRun->s.regs.sregs.efer; 1399 fMaybeChangedMode = true; 1400 } 1401 } 1402 1403 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_EFER); 1404 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)) 1405 pVCpu->cpum.GstCtx.fExtrn = 0; 1406 } 1407 1408 /* 1409 * Notify PGM about the changes. 1410 */ 1411 if (fMaybeChangedMode) 1412 { 1413 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 1414 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1); 1415 } 1416 1417 if (fUpdateCr3) 1418 { 1419 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/); 1420 if (rc == VINF_SUCCESS) 1421 { /* likely */ } 1422 else 1423 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 1424 } 1425 } 1426 1427 return VINF_SUCCESS; 1017 1428 } 1018 1429 … … 1028 1439 { 1029 1440 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 1030 1031 RT_NOREF(pVCpu, fWhat); 1032 return nemHCLnxImportState(pVCpu, fWhat, pVCpu->nem.s.pRun); 1441 return nemHCLnxImportState(pVCpu, fWhat, &pVCpu->cpum.GstCtx, pVCpu->nem.s.pRun); 1033 1442 } 1034 1443 … … 1039 1448 static int nemHCLnxExportState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, struct kvm_run *pRun) 1040 1449 { 1041 uint64_t const fExtrn = pCtx->fExtrn;1042 Assert(( fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL);1450 uint64_t const fExtrn = ~pCtx->fExtrn & CPUMCTX_EXTRN_ALL; 1451 Assert((~fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL); 1043 1452 1044 1453 /* 1045 1454 * Stuff that goes into kvm_run::s.regs.regs: 1046 1455 */ 1047 if ( (fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1048 != (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1049 { 1050 if (!(fExtrn & CPUMCTX_EXTRN_RIP)) 1456 if (fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)) 1457 { 1458 if (fExtrn & CPUMCTX_EXTRN_RIP) 1051 1459 pRun->s.regs.regs.rip = pCtx->rip; 1052 if ( !(fExtrn & CPUMCTX_EXTRN_RFLAGS))1460 if (fExtrn & CPUMCTX_EXTRN_RFLAGS) 1053 1461 pRun->s.regs.regs.rflags = pCtx->rflags.u; 1054 1462 1055 if ( !(fExtrn & CPUMCTX_EXTRN_RAX))1463 if (fExtrn & CPUMCTX_EXTRN_RAX) 1056 1464 pRun->s.regs.regs.rax = pCtx->rax; 1057 if ( !(fExtrn & CPUMCTX_EXTRN_RCX))1465 if (fExtrn & CPUMCTX_EXTRN_RCX) 1058 1466 pRun->s.regs.regs.rcx = pCtx->rcx; 1059 if ( !(fExtrn & CPUMCTX_EXTRN_RDX))1467 if (fExtrn & CPUMCTX_EXTRN_RDX) 1060 1468 pRun->s.regs.regs.rdx = pCtx->rdx; 1061 if ( !(fExtrn & CPUMCTX_EXTRN_RBX))1469 if (fExtrn & CPUMCTX_EXTRN_RBX) 1062 1470 pRun->s.regs.regs.rbx = pCtx->rbx; 1063 if ( !(fExtrn & CPUMCTX_EXTRN_RSP))1471 if (fExtrn & CPUMCTX_EXTRN_RSP) 1064 1472 pRun->s.regs.regs.rsp = pCtx->rsp; 1065 if ( !(fExtrn & CPUMCTX_EXTRN_RBP))1473 if (fExtrn & CPUMCTX_EXTRN_RBP) 1066 1474 pRun->s.regs.regs.rbp = pCtx->rbp; 1067 if ( !(fExtrn & CPUMCTX_EXTRN_RSI))1475 if (fExtrn & CPUMCTX_EXTRN_RSI) 1068 1476 pRun->s.regs.regs.rsi = pCtx->rsi; 1069 if ( !(fExtrn & CPUMCTX_EXTRN_RDI))1477 if (fExtrn & CPUMCTX_EXTRN_RDI) 1070 1478 pRun->s.regs.regs.rdi = pCtx->rdi; 1071 if ( !(fExtrn & CPUMCTX_EXTRN_R8_R15))1479 if (fExtrn & CPUMCTX_EXTRN_R8_R15) 1072 1480 { 1073 1481 pRun->s.regs.regs.r8 = pCtx->r8; … … 1087 1495 */ 1088 1496 /** @todo apic_base */ 1089 if ( (fExtrn & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR))1090 != (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK | CPUMCTX_EXTRN_EFER| CPUMCTX_EXTRN_APIC_TPR))1497 if (fExtrn & ( CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_CR_MASK 1498 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_APIC_TPR)) 1091 1499 { 1092 1500 #define NEM_LNX_EXPORT_SEG(a_KvmSeg, a_CtxSeg) do { \ … … 1106 1514 } while (0) 1107 1515 1108 if ( (fExtrn & CPUMCTX_EXTRN_SREG_MASK) !=CPUMCTX_EXTRN_SREG_MASK)1109 { 1110 if ( !(fExtrn & CPUMCTX_EXTRN_ES))1516 if (fExtrn & CPUMCTX_EXTRN_SREG_MASK) 1517 { 1518 if (fExtrn & CPUMCTX_EXTRN_ES) 1111 1519 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.es, pCtx->es); 1112 if ( !(fExtrn & CPUMCTX_EXTRN_CS))1520 if (fExtrn & CPUMCTX_EXTRN_CS) 1113 1521 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.cs, pCtx->cs); 1114 if ( !(fExtrn & CPUMCTX_EXTRN_SS))1522 if (fExtrn & CPUMCTX_EXTRN_SS) 1115 1523 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ss, pCtx->ss); 1116 if ( !(fExtrn & CPUMCTX_EXTRN_DS))1524 if (fExtrn & CPUMCTX_EXTRN_DS) 1117 1525 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ds, pCtx->ds); 1118 if ( !(fExtrn & CPUMCTX_EXTRN_FS))1526 if (fExtrn & CPUMCTX_EXTRN_FS) 1119 1527 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.fs, pCtx->fs); 1120 if ( !(fExtrn & CPUMCTX_EXTRN_GS))1528 if (fExtrn & CPUMCTX_EXTRN_GS) 1121 1529 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.gs, pCtx->gs); 1122 1530 } 1123 if ( (fExtrn & CPUMCTX_EXTRN_TABLE_MASK) !=CPUMCTX_EXTRN_TABLE_MASK)1124 { 1125 if ( !(fExtrn & CPUMCTX_EXTRN_GDTR))1531 if (fExtrn & CPUMCTX_EXTRN_TABLE_MASK) 1532 { 1533 if (fExtrn & CPUMCTX_EXTRN_GDTR) 1126 1534 { 1127 1535 pRun->s.regs.sregs.gdt.base = pCtx->gdtr.pGdt; … … 1131 1539 pRun->s.regs.sregs.gdt.padding[2] = 0; 1132 1540 } 1133 if ( !(fExtrn & CPUMCTX_EXTRN_IDTR))1541 if (fExtrn & CPUMCTX_EXTRN_IDTR) 1134 1542 { 1135 1543 pRun->s.regs.sregs.idt.base = pCtx->idtr.pIdt; … … 1139 1547 pRun->s.regs.sregs.idt.padding[2] = 0; 1140 1548 } 1141 if ( !(fExtrn & CPUMCTX_EXTRN_LDTR))1549 if (fExtrn & CPUMCTX_EXTRN_LDTR) 1142 1550 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.ldt, pCtx->ldtr); 1143 if ( !(fExtrn & CPUMCTX_EXTRN_TR))1551 if (fExtrn & CPUMCTX_EXTRN_TR) 1144 1552 NEM_LNX_EXPORT_SEG(pRun->s.regs.sregs.tr, pCtx->tr); 1145 1553 } 1146 if ( (fExtrn & CPUMCTX_EXTRN_CR_MASK) !=CPUMCTX_EXTRN_CR_MASK)1147 { 1148 if ( !(fExtrn & CPUMCTX_EXTRN_CR0))1554 if (fExtrn & CPUMCTX_EXTRN_CR_MASK) 1555 { 1556 if (fExtrn & CPUMCTX_EXTRN_CR0) 1149 1557 pRun->s.regs.sregs.cr0 = pCtx->cr0; 1150 if ( !(fExtrn & CPUMCTX_EXTRN_CR2))1558 if (fExtrn & CPUMCTX_EXTRN_CR2) 1151 1559 pRun->s.regs.sregs.cr2 = pCtx->cr2; 1152 if ( !(fExtrn & CPUMCTX_EXTRN_CR3))1560 if (fExtrn & CPUMCTX_EXTRN_CR3) 1153 1561 pRun->s.regs.sregs.cr3 = pCtx->cr3; 1154 if ( !(fExtrn & CPUMCTX_EXTRN_CR4))1562 if (fExtrn & CPUMCTX_EXTRN_CR4) 1155 1563 pRun->s.regs.sregs.cr4 = pCtx->cr4; 1156 1564 } 1157 if ( !(fExtrn & CPUMCTX_EXTRN_APIC_TPR))1565 if (fExtrn & CPUMCTX_EXTRN_APIC_TPR) 1158 1566 pRun->s.regs.sregs.cr8 = CPUMGetGuestCR8(pVCpu); 1159 if ( !(fExtrn & CPUMCTX_EXTRN_EFER))1567 if (fExtrn & CPUMCTX_EXTRN_EFER) 1160 1568 pRun->s.regs.sregs.efer = pCtx->msrEFER; 1161 1569 1162 1570 /** @todo apic_base */ 1163 /** @todo interrupt_bitmap - IRQ injection? */ 1571 1572 RT_ZERO(pRun->s.regs.sregs.interrupt_bitmap); /* this is an alternative interrupt injection interface */ 1573 1164 1574 pRun->kvm_dirty_regs |= KVM_SYNC_X86_SREGS; 1165 1575 } … … 1168 1578 * Debug registers. 1169 1579 */ 1170 if ( (fExtrn & CPUMCTX_EXTRN_DR_MASK) !=CPUMCTX_EXTRN_DR_MASK)1580 if (fExtrn & CPUMCTX_EXTRN_DR_MASK) 1171 1581 { 1172 1582 struct kvm_debugregs DbgRegs = {{0}}; 1173 1583 1174 if ( fExtrn &CPUMCTX_EXTRN_DR_MASK)1584 if ((fExtrn & CPUMCTX_EXTRN_DR_MASK) != CPUMCTX_EXTRN_DR_MASK) 1175 1585 { 1176 1586 /* Partial debug state, we must get DbgRegs first so we can merge: */ … … 1179 1589 } 1180 1590 1181 if ( !(fExtrn & CPUMCTX_EXTRN_DR0_DR3))1591 if (fExtrn & CPUMCTX_EXTRN_DR0_DR3) 1182 1592 { 1183 1593 DbgRegs.db[0] = pCtx->dr[0]; … … 1186 1596 DbgRegs.db[3] = pCtx->dr[3]; 1187 1597 } 1188 if ( !(fExtrn & CPUMCTX_EXTRN_DR6))1598 if (fExtrn & CPUMCTX_EXTRN_DR6) 1189 1599 DbgRegs.dr6 = pCtx->dr[6]; 1190 if ( !(fExtrn & CPUMCTX_EXTRN_DR7))1600 if (fExtrn & CPUMCTX_EXTRN_DR7) 1191 1601 DbgRegs.dr7 = pCtx->dr[7]; 1192 1602 … … 1198 1608 * FPU, SSE, AVX, ++. 1199 1609 */ 1200 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1201 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1202 { 1203 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1204 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1205 { 1206 if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1610 if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx)) 1611 { 1612 if (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1613 { 1614 if ( (fExtrn & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1615 != (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE)) 1207 1616 { 1208 1617 /* Partial state is annoying as we have to do merging - is this possible at all? */ … … 1211 1620 AssertMsgReturn(rc == 0, ("rc=%d errno=%d\n", rc, errno), VERR_NEM_IPE_3); 1212 1621 1213 if ( !(fExtrn & CPUMCTX_EXTRN_X87))1622 if (fExtrn & CPUMCTX_EXTRN_X87) 1214 1623 memcpy(&pCtx->XState.x87, &XSave, sizeof(pCtx->XState.x87)); 1215 if ( !(fExtrn & CPUMCTX_EXTRN_SSE_AVX))1624 if (fExtrn & CPUMCTX_EXTRN_SSE_AVX) 1216 1625 { 1217 1626 /** @todo */ 1218 1627 } 1219 if ( !(fExtrn & CPUMCTX_EXTRN_OTHER_XSAVE))1628 if (fExtrn & CPUMCTX_EXTRN_OTHER_XSAVE) 1220 1629 { 1221 1630 /** @todo */ … … 1227 1636 } 1228 1637 1229 if ( !(fExtrn & CPUMCTX_EXTRN_XCRx))1638 if (fExtrn & CPUMCTX_EXTRN_XCRx) 1230 1639 { 1231 1640 struct kvm_xcrs Xcrs = … … 1246 1655 * MSRs. 1247 1656 */ 1248 if ( (fExtrn & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))1249 != (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX| CPUMCTX_EXTRN_OTHER_MSRS))1657 if (fExtrn & ( CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS 1658 | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 1250 1659 { 1251 1660 union … … 1265 1674 } while (0) 1266 1675 1267 if ( !(fExtrn & CPUMCTX_EXTRN_KERNEL_GS_BASE))1676 if (fExtrn & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1268 1677 ADD_MSR(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE); 1269 if ( !(fExtrn & CPUMCTX_EXTRN_SYSCALL_MSRS))1678 if (fExtrn & CPUMCTX_EXTRN_SYSCALL_MSRS) 1270 1679 { 1271 1680 ADD_MSR(MSR_K6_STAR, pCtx->msrSTAR); … … 1274 1683 ADD_MSR(MSR_K8_SF_MASK, pCtx->msrSFMASK); 1275 1684 } 1276 if ( !(fExtrn & CPUMCTX_EXTRN_SYSENTER_MSRS))1685 if (fExtrn & CPUMCTX_EXTRN_SYSENTER_MSRS) 1277 1686 { 1278 1687 ADD_MSR(MSR_IA32_SYSENTER_CS, pCtx->SysEnter.cs); … … 1280 1689 ADD_MSR(MSR_IA32_SYSENTER_ESP, pCtx->SysEnter.esp); 1281 1690 } 1282 if ( !(fExtrn & CPUMCTX_EXTRN_TSC_AUX))1691 if (fExtrn & CPUMCTX_EXTRN_TSC_AUX) 1283 1692 ADD_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux); 1284 if ( !(fExtrn & CPUMCTX_EXTRN_OTHER_MSRS))1693 if (fExtrn & CPUMCTX_EXTRN_OTHER_MSRS) 1285 1694 { 1286 1695 ADD_MSR(MSR_IA32_CR_PAT, pCtx->msrPAT); … … 1299 1708 1300 1709 /* 1710 * Interruptibility state. 1711 * 1712 * Note! This I/O control function sets most fields passed in, so when 1713 * raising an interrupt, NMI, SMI or exception, this must be done 1714 * by the code doing the rasing or we'll overwrite it here. 1715 */ 1716 if (fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 1717 { 1718 Assert( (fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)) 1719 == (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)); 1720 1721 struct kvm_vcpu_events KvmEvents = {0}; 1722 1723 KvmEvents.flags = KVM_VCPUEVENT_VALID_SHADOW; 1724 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1725 { 1726 if (pRun->s.regs.regs.rip == EMGetInhibitInterruptsPC(pVCpu)) 1727 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 1728 else 1729 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1730 } 1731 1732 /* No flag - this is updated unconditionally. */ 1733 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1734 KvmEvents.nmi.masked = 1; 1735 1736 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_VCPU_EVENTS, &KvmEvents); 1737 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_3); 1738 } 1739 1740 /* 1301 1741 * KVM now owns all the state. 1302 1742 */ 1303 pCtx->fExtrn = (fExtrn & ~CPUMCTX_EXTRN_KEEPER_MASK) |CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL;1743 pCtx->fExtrn = CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL; 1304 1744 1305 1745 RT_NOREF(pVM); … … 1389 1829 void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) 1390 1830 { 1391 RT_NOREF(pVM, pVCpu, fFlags); 1392 } 1393 1394 1395 static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu) 1396 { 1397 RT_NOREF(pVM, pVCpu); 1831 int rc = RTThreadPoke(pVCpu->hThread); 1832 LogFlow(("nemR3NativeNotifyFF: #%u -> %Rrc\n", pVCpu->idCpu, rc)); 1833 AssertRC(rc); 1834 RT_NOREF(pVM, fFlags); 1835 } 1836 1837 1838 /** 1839 * Deals with pending interrupt FFs prior to executing guest code. 1840 */ 1841 static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu, struct kvm_run *pRun) 1842 { 1843 Assert(!TRPMHasTrap(pVCpu)); 1844 RT_NOREF_PV(pVM); 1845 1846 /* 1847 * First update APIC. We ASSUME this won't need TPR/CR8. 1848 */ 1849 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 1850 { 1851 APICUpdatePendingInterrupts(pVCpu); 1852 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 1853 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 1854 return VINF_SUCCESS; 1855 } 1856 1857 /* 1858 * We don't currently implement SMIs. 1859 */ 1860 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0); 1861 1862 /* 1863 * In KVM the CPUMCTX_EXTRN_INHIBIT_INT and CPUMCTX_EXTRN_INHIBIT_NMI states 1864 * are tied together with interrupt and NMI delivery, so we must get and 1865 * synchronize these all in one go and set both CPUMCTX_EXTRN_INHIBIT_XXX flags. 1866 * If we don't we may lose the interrupt/NMI we marked pending here when the 1867 * state is exported again before execution. 1868 */ 1869 struct kvm_vcpu_events KvmEvents = {0}; 1870 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_VCPU_EVENTS, &KvmEvents); 1871 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5); 1872 1873 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)) 1874 pRun->s.regs.regs.rip = pVCpu->cpum.GstCtx.rip; 1875 1876 KvmEvents.flags |= KVM_VCPUEVENT_VALID_SHADOW; 1877 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT)) 1878 { 1879 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1880 KvmEvents.interrupt.shadow = 0; 1881 else if (EMGetInhibitInterruptsPC(pVCpu) == pRun->s.regs.regs.rip) 1882 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 1883 else 1884 { 1885 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1886 KvmEvents.interrupt.shadow = 0; 1887 } 1888 } 1889 else if (KvmEvents.interrupt.shadow) 1890 EMSetInhibitInterruptsPC(pVCpu, pRun->s.regs.regs.rip); 1891 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1892 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1893 1894 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI)) 1895 KvmEvents.nmi.masked = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) ? 1 : 0; 1896 else if (KvmEvents.nmi.masked) 1897 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 1898 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1899 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 1900 1901 /* KVM will own the INT + NMI inhibit state soon: */ 1902 pVCpu->cpum.GstCtx.fExtrn = (pVCpu->cpum.GstCtx.fExtrn & ~CPUMCTX_EXTRN_KEEPER_MASK) 1903 | CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI; 1904 1905 /* 1906 * NMI? Try deliver it first. 1907 */ 1908 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 1909 { 1910 #if 0 1911 int rcLnx = ioctl(pVCpu->nem.s.fdVm, KVM_NMI, 0UL); 1912 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5); 1913 #else 1914 KvmEvents.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 1915 KvmEvents.nmi.pending = 1; 1916 #endif 1917 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 1918 Log8(("Queuing NMI on %u\n", pVCpu->idCpu)); 1919 } 1920 1921 /* 1922 * APIC or PIC interrupt? 1923 */ 1924 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 1925 { 1926 if (pRun->s.regs.regs.rflags & X86_EFL_IF) 1927 { 1928 if (KvmEvents.interrupt.shadow == 0) 1929 { 1930 /* 1931 * If CR8 is in KVM, update the VBox copy so PDMGetInterrupt will 1932 * work correctly. 1933 */ 1934 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_APIC_TPR) 1935 APICSetTpr(pVCpu, (uint8_t)pRun->cr8 << 4); 1936 1937 uint8_t bInterrupt; 1938 int rc = PDMGetInterrupt(pVCpu, &bInterrupt); 1939 if (RT_SUCCESS(rc)) 1940 { 1941 #if 0 1942 int rcLnx = ioctl(pVCpu->nem.s.fdVm, KVM_INTERRUPT, (unsigned long)bInterrupt); 1943 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5); 1944 #else 1945 KvmEvents.interrupt.nr = bInterrupt; 1946 KvmEvents.interrupt.soft = false; 1947 KvmEvents.interrupt.injected = true; 1948 #endif 1949 Log8(("Queuing interrupt %#x on %u: %04x:%08RX64 efl=%#x\n", bInterrupt, pVCpu->idCpu, 1950 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags)); 1951 } 1952 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) /** @todo this isn't extremely efficient if we get a lot of exits... */ 1953 Log8(("VERR_APIC_INTR_MASKED_BY_TPR\n")); /* We'll get a TRP exit - no interrupt window needed. */ 1954 else 1955 Log8(("PDMGetInterrupt failed -> %Rrc\n", rc)); 1956 } 1957 else 1958 { 1959 pRun->request_interrupt_window = 1; 1960 Log8(("Interrupt window pending on %u (#2)\n", pVCpu->idCpu)); 1961 } 1962 } 1963 else 1964 { 1965 pRun->request_interrupt_window = 1; 1966 Log8(("Interrupt window pending on %u (#1)\n", pVCpu->idCpu)); 1967 } 1968 } 1969 1970 /* 1971 * Now, update the state. 1972 */ 1973 /** @todo skip when possible... */ 1974 rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_SET_VCPU_EVENTS, &KvmEvents); 1975 AssertLogRelMsgReturn(rcLnx == 0, ("rcLnx=%d errno=%d\n", rcLnx, errno), VERR_NEM_IPE_5); 1976 1398 1977 return VINF_SUCCESS; 1399 1978 } 1400 1979 1401 1980 1981 /** 1982 * Handles KVM_EXIT_IO. 1983 */ 1402 1984 static VBOXSTRICTRC nemHCLnxHandleExitIo(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun) 1403 1985 { … … 1410 1992 Assert(pRun->io.data_offset < pVM->nem.s.cbVCpuMmap); 1411 1993 Assert(pRun->io.data_offset + pRun->io.size * pRun->io.count <= pVM->nem.s.cbVCpuMmap); 1994 1995 /* 1996 * We cannot actually act on the exit history here, because the I/O port 1997 * exit is stateful and the instruction will be completed in the next 1998 * KVM_RUN call. There seems no way to avoid this. 1999 */ 2000 EMHistoryAddExit(pVCpu, 2001 pRun->io.count == 1 2002 ? ( pRun->io.direction == KVM_EXIT_IO_IN 2003 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ) 2004 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)) 2005 : ( pRun->io.direction == KVM_EXIT_IO_IN 2006 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ) 2007 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)), 2008 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC()); 1412 2009 1413 2010 /* … … 1470 2067 1471 2068 2069 /** 2070 * Handles KVM_EXIT_MMIO. 2071 */ 2072 static VBOXSTRICTRC nemHCLnxHandleExitMmio(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun) 2073 { 2074 /* 2075 * Input validation. 2076 */ 2077 Assert(pRun->mmio.len <= sizeof(pRun->mmio.data)); 2078 Assert(pRun->mmio.is_write <= 1); 2079 2080 /* 2081 * We cannot actually act on the exit history here, because the MMIO port 2082 * exit is stateful and the instruction will be completed in the next 2083 * KVM_RUN call. There seems no way to circumvent this. 2084 */ 2085 EMHistoryAddExit(pVCpu, 2086 pRun->mmio.is_write 2087 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE) 2088 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ), 2089 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC()); 2090 2091 /* 2092 * Do the requested job. 2093 */ 2094 VBOXSTRICTRC rcStrict; 2095 if (pRun->mmio.is_write) 2096 { 2097 rcStrict = PGMPhysWrite(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM); 2098 Log4(("MmioExit/%u: %04x:%08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n", 2099 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 2100 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) )); 2101 } 2102 else 2103 { 2104 rcStrict = PGMPhysRead(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM); 2105 Log4(("MmioExit/%u: %04x:%08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n", 2106 pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, 2107 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) )); 2108 } 2109 return rcStrict; 2110 } 2111 2112 1472 2113 static VBOXSTRICTRC nemHCLnxHandleExit(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun) 1473 2114 { 2115 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTotal); 1474 2116 switch (pRun->exit_reason) 1475 2117 { … … 1479 2121 1480 2122 case KVM_EXIT_IO: 2123 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIo); 1481 2124 return nemHCLnxHandleExitIo(pVM, pVCpu, pRun); 1482 2125 1483 case KVM_EXIT_HYPERCALL: 2126 case KVM_EXIT_MMIO: 2127 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMmio); 2128 return nemHCLnxHandleExitMmio(pVM, pVCpu, pRun); 2129 2130 case KVM_EXIT_IRQ_WINDOW_OPEN: 2131 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIrqWindowOpen); 2132 Log5(("IrqWinOpen/%u: %d\n", pVCpu->idCpu, pRun->request_interrupt_window)); 2133 pRun->request_interrupt_window = 0; 2134 return VINF_SUCCESS; 2135 2136 case KVM_EXIT_SET_TPR: 2137 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitSetTpr); 1484 2138 AssertFailed(); 1485 2139 break; 1486 2140 1487 case KVM_EXIT_DEBUG: 2141 case KVM_EXIT_TPR_ACCESS: 2142 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTprAccess); 1488 2143 AssertFailed(); 1489 2144 break; 1490 2145 1491 case KVM_EXIT_HLT: 2146 case KVM_EXIT_X86_RDMSR: 2147 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitRdMsr); 1492 2148 AssertFailed(); 1493 2149 break; 1494 2150 1495 case KVM_EXIT_MMIO: 2151 case KVM_EXIT_X86_WRMSR: 2152 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitWrMsr); 1496 2153 AssertFailed(); 1497 2154 break; 1498 2155 1499 case KVM_EXIT_IRQ_WINDOW_OPEN: 2156 case KVM_EXIT_HLT: 2157 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt); 2158 Log5(("Halt/%u\n", pVCpu->idCpu)); 2159 return VINF_EM_HALT; 2160 2161 case KVM_EXIT_INTR: /* EINTR */ 2162 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIntr); 2163 Log5(("Intr/%u\n", pVCpu->idCpu)); 2164 return VINF_SUCCESS; 2165 2166 case KVM_EXIT_HYPERCALL: 2167 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHypercall); 1500 2168 AssertFailed(); 1501 2169 break; 1502 2170 1503 case KVM_EXIT_X86_RDMSR: 1504 AssertFailed(); 1505 break; 1506 1507 case KVM_EXIT_X86_WRMSR: 1508 AssertFailed(); 1509 break; 1510 1511 case KVM_EXIT_INTR: /* EINTR */ 1512 return VINF_SUCCESS; 1513 1514 case KVM_EXIT_SET_TPR: 1515 AssertFailed(); 1516 break; 1517 case KVM_EXIT_TPR_ACCESS: 1518 AssertFailed(); 1519 break; 1520 case KVM_EXIT_NMI: 2171 case KVM_EXIT_DEBUG: 2172 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitDebug); 1521 2173 AssertFailed(); 1522 2174 break; … … 1539 2191 break; 1540 2192 case KVM_EXIT_X86_BUS_LOCK: 2193 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitBusLock); 1541 2194 AssertFailed(); 1542 2195 break; … … 1557 2210 * Foreign and unknowns. 1558 2211 */ 2212 case KVM_EXIT_NMI: 2213 AssertLogRelMsgFailedReturn(("KVM_EXIT_NMI on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); 1559 2214 case KVM_EXIT_EPR: 1560 2215 AssertLogRelMsgFailedReturn(("KVM_EXIT_EPR on VCpu #%u at %04x:%RX64!\n", pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip), VERR_NEM_IPE_1); … … 1622 2277 { 1623 2278 /* Try inject interrupt. */ 1624 rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu );2279 rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu, pRun); 1625 2280 if (rcStrict == VINF_SUCCESS) 1626 2281 { /* likely */ } … … 1648 2303 * Ensure KVM has the whole state. 1649 2304 */ 1650 if ( (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) 1651 != CPUMCTX_EXTRN_ALL) 2305 if ((pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL) 1652 2306 { 1653 2307 int rc2 = nemHCLnxExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, pRun); … … 1682 2336 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC()); 1683 2337 1684 LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x\n", pVCpu->idCpu, 1685 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag, 1686 pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason, 1687 pRun->ready_for_interrupt_injection, pRun->flags)); 2338 #ifdef LOG_ENABLED 2339 if (LogIsFlowEnabled()) 2340 { 2341 struct kvm_mp_state MpState = {UINT32_MAX}; 2342 ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MP_STATE, &MpState); 2343 LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x %#lx\n", pVCpu->idCpu, 2344 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag, 2345 pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason, 2346 pRun->ready_for_interrupt_injection, pRun->flags, MpState.mp_state)); 2347 } 2348 #endif 1688 2349 if (RT_LIKELY(rcLnx == 0 || errno == EINTR)) 1689 2350 { … … 1762 2423 if (pVCpu->cpum.GstCtx.fExtrn & fImport) 1763 2424 { 1764 int rc2 = nemHCLnxImportState(pVCpu, fImport, pRun);2425 int rc2 = nemHCLnxImportState(pVCpu, fImport, &pVCpu->cpum.GstCtx, pRun); 1765 2426 if (RT_SUCCESS(rc2)) 1766 2427 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
Note:
See TracChangeset
for help on using the changeset viewer.