Changeset 70462 in vbox
- Timestamp:
- Jan 5, 2018 8:11:02 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r70299 r70462 154 154 155 155 /** @name All-context SVM helpers. 156 * 157 * These are SVM functions (based on AMD specs.) that may be used by IEM/REM and 158 * not VirtualBox functions that are used for hardware-assisted SVM. Those are 159 * declared below under the !IN_RC section. 156 160 * @{ */ 157 161 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent); … … 161 165 PSVMIOIOEXITINFO pIoExitInfo); 162 166 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated); 163 /** @} */164 165 /** @name Nested hardware virtualization.166 * @{167 */168 #ifdef VBOX_WITH_NESTED_HWVIRT169 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx);170 #endif171 167 /** @} */ 172 168 … … 181 177 VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM); 182 178 VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM); 179 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx); 180 VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM); 183 181 #else /* Nops in RC: */ 184 182 # define HMFlushTLB(pVCpu) do { } while (0) … … 188 186 # define HMAreMsrBitmapsAvailable(pVM) false 189 187 # define HMFlushTLBOnAllVCpus(pVM) do { } while (0) 188 # define HMSvmNstGstVmExitNotify(pVCpu, pCtx) do { } while (0) 189 # define HMSvmIsVGifActive(pVM) false 190 190 #endif 191 191 -
trunk/include/VBox/vmm/hm_svm.h
r70444 r70462 1119 1119 } while (0) 1120 1120 1121 /* 1122 * These functions are only here because the inline functions in cpum.h calls them. 1123 * Don't add any more functions here unless there is no other option. 1124 */ 1121 1125 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept); 1122 1126 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr); -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r70457 r70462 126 126 return VERR_NOT_FOUND; 127 127 } 128 #endif /* !IN_RC */ 129 130 131 /** 132 * Performs the operations necessary that are part of the vmmcall instruction 133 * execution in the guest. 134 * 135 * @returns Strict VBox status code (i.e. informational status codes too). 136 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown, 137 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and 138 * continue guest execution. 139 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating 140 * RIP. 141 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3. 142 * 143 * @param pVCpu The cross context virtual CPU structure. 144 * @param pCtx Pointer to the guest-CPU context. 145 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as 146 * part of handling the VMMCALL operation. 147 */ 148 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF) 149 { 150 #ifndef IN_RC 151 /* 152 * TPR patched instruction emulation for 32-bit guests. 153 */ 154 PVM pVM = pVCpu->CTX_SUFF(pVM); 155 if (pVM->hm.s.fTprPatchingAllowed) 156 { 157 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF); 158 if (RT_SUCCESS(rc)) 159 return VINF_SUCCESS; 160 161 if (rc != VERR_NOT_FOUND) 162 { 163 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc)); 164 return rc; 165 } 166 } 167 #endif 168 169 /* 170 * Paravirtualized hypercalls. 171 */ 172 *pfUpdatedRipAndRF = false; 173 if (pVCpu->hm.s.fHypercallsEnabled) 174 return GIMHypercall(pVCpu, pCtx); 175 176 return VERR_NOT_AVAILABLE; 177 } 178 179 180 /** 181 * Converts an SVM event type to a TRPM event type. 182 * 183 * @returns The TRPM event type. 184 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set 185 * of recognized trap types. 186 * 187 * @param pEvent Pointer to the SVM event. 188 */ 189 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent) 190 { 191 uint8_t const uType = pEvent->n.u3Type; 192 switch (uType) 193 { 194 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT; 195 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT; 196 case SVM_EVENT_EXCEPTION: 197 case SVM_EVENT_NMI: return TRPM_TRAP; 198 default: 199 break; 200 } 201 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType)); 202 return TRPM_32BIT_HACK; 203 } 204 205 206 /** 207 * Gets the MSR permission bitmap byte and bit offset for the specified MSR. 208 * 209 * @returns VBox status code. 210 * @param idMsr The MSR being requested. 211 * @param pbOffMsrpm Where to store the byte offset in the MSR permission 212 * bitmap for @a idMsr. 213 * @param puMsrpmBit Where to store the bit offset starting at the byte 214 * returned in @a pbOffMsrpm. 215 */ 216 VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit) 217 { 218 Assert(pbOffMsrpm); 219 Assert(puMsrpmBit); 220 221 /* 222 * MSRPM Layout: 223 * Byte offset MSR range 224 * 0x000 - 0x7ff 0x00000000 - 0x00001fff 225 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff 226 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff 227 * 0x1800 - 0x1fff Reserved 228 * 229 * Each MSR is represented by 2 permission bits (read and write). 230 */ 231 if (idMsr <= 0x00001fff) 232 { 233 /* Pentium-compatible MSRs. */ 234 *pbOffMsrpm = 0; 235 *puMsrpmBit = idMsr << 1; 236 return VINF_SUCCESS; 237 } 238 239 if ( idMsr >= 0xc0000000 240 && idMsr <= 0xc0001fff) 241 { 242 /* AMD Sixth Generation x86 Processor MSRs. */ 243 *pbOffMsrpm = 0x800; 244 *puMsrpmBit = (idMsr - 0xc0000000) << 1; 245 return VINF_SUCCESS; 246 } 247 248 if ( idMsr >= 0xc0010000 249 && idMsr <= 0xc0011fff) 250 { 251 /* AMD Seventh and Eighth Generation Processor MSRs. */ 252 *pbOffMsrpm = 0x1000; 253 *puMsrpmBit = (idMsr - 0xc0010000) << 1; 254 return VINF_SUCCESS; 255 } 256 257 *pbOffMsrpm = 0; 258 *puMsrpmBit = 0; 259 return VERR_OUT_OF_RANGE; 260 } 261 262 263 /** 264 * Determines whether an IOIO intercept is active for the nested-guest or not. 265 * 266 * @param pvIoBitmap Pointer to the nested-guest IO bitmap. 267 * @param u16Port The IO port being accessed. 268 * @param enmIoType The type of IO access. 269 * @param cbReg The IO operand size in bytes. 270 * @param cAddrSizeBits The address size bits (for 16, 32 or 64). 271 * @param iEffSeg The effective segment number. 272 * @param fRep Whether this is a repeating IO instruction (REP prefix). 273 * @param fStrIo Whether this is a string IO instruction. 274 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled. 275 * Optional, can be NULL. 276 */ 277 VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 278 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, 279 PSVMIOIOEXITINFO pIoExitInfo) 280 { 281 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64); 282 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8); 283 284 /* 285 * The IOPM layout: 286 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or 287 * two 4K pages. 288 * 289 * For IO instructions that access more than a single byte, the permission bits 290 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted. 291 * 292 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes), 293 * we need 3 extra bits beyond the second 4K page. 294 */ 295 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 }; 296 297 uint16_t const offIopm = u16Port >> 3; 298 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7]; 299 uint8_t const cShift = u16Port - (offIopm << 3); 300 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift); 301 302 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap; 303 Assert(pbIopm); 304 pbIopm += offIopm; 305 uint16_t const u16Iopm = *(uint16_t *)pbIopm; 306 if (u16Iopm & fIopmMask) 307 { 308 if (pIoExitInfo) 309 { 310 static const uint32_t s_auIoOpSize[] = 311 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 }; 312 313 static const uint32_t s_auIoAddrSize[] = 314 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 }; 315 316 pIoExitInfo->u = s_auIoOpSize[cbReg & 7]; 317 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7]; 318 pIoExitInfo->n.u1STR = fStrIo; 319 pIoExitInfo->n.u1REP = fRep; 320 pIoExitInfo->n.u3SEG = iEffSeg & 7; 321 pIoExitInfo->n.u1Type = enmIoType; 322 pIoExitInfo->n.u16Port = u16Port; 323 } 324 return true; 325 } 326 327 /** @todo remove later (for debugging as VirtualBox always traps all IO 328 * intercepts). */ 329 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n")); 330 return false; 331 } 332 333 334 #ifdef VBOX_WITH_NESTED_HWVIRT 128 129 335 130 /** 336 131 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g. … … 377 172 } 378 173 } 174 175 176 /** 177 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and 178 * enabled for the VM. 179 * 180 * @returns @c true if VGIF is enabled, @c false otherwise. 181 * @param pVM The cross context VM structure. 182 */ 183 VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM) 184 { 185 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF); 186 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif; 187 188 return HMIsEnabled(pVM) && fVGif && fUseVGif; 189 } 190 #endif /* !IN_RC */ 191 192 193 /** 194 * Performs the operations necessary that are part of the vmmcall instruction 195 * execution in the guest. 196 * 197 * @returns Strict VBox status code (i.e. informational status codes too). 198 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown, 199 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and 200 * continue guest execution. 201 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating 202 * RIP. 203 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3. 204 * 205 * @param pVCpu The cross context virtual CPU structure. 206 * @param pCtx Pointer to the guest-CPU context. 207 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as 208 * part of handling the VMMCALL operation. 209 */ 210 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF) 211 { 212 #ifndef IN_RC 213 /* 214 * TPR patched instruction emulation for 32-bit guests. 215 */ 216 PVM pVM = pVCpu->CTX_SUFF(pVM); 217 if (pVM->hm.s.fTprPatchingAllowed) 218 { 219 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF); 220 if (RT_SUCCESS(rc)) 221 return VINF_SUCCESS; 222 223 if (rc != VERR_NOT_FOUND) 224 { 225 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc)); 226 return rc; 227 } 228 } 379 229 #endif 230 231 /* 232 * Paravirtualized hypercalls. 233 */ 234 *pfUpdatedRipAndRF = false; 235 if (pVCpu->hm.s.fHypercallsEnabled) 236 return GIMHypercall(pVCpu, pCtx); 237 238 return VERR_NOT_AVAILABLE; 239 } 240 241 242 /** 243 * Converts an SVM event type to a TRPM event type. 244 * 245 * @returns The TRPM event type. 246 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set 247 * of recognized trap types. 248 * 249 * @param pEvent Pointer to the SVM event. 250 */ 251 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent) 252 { 253 uint8_t const uType = pEvent->n.u3Type; 254 switch (uType) 255 { 256 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT; 257 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT; 258 case SVM_EVENT_EXCEPTION: 259 case SVM_EVENT_NMI: return TRPM_TRAP; 260 default: 261 break; 262 } 263 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType)); 264 return TRPM_32BIT_HACK; 265 } 266 267 268 /** 269 * Gets the MSR permission bitmap byte and bit offset for the specified MSR. 270 * 271 * @returns VBox status code. 272 * @param idMsr The MSR being requested. 273 * @param pbOffMsrpm Where to store the byte offset in the MSR permission 274 * bitmap for @a idMsr. 275 * @param puMsrpmBit Where to store the bit offset starting at the byte 276 * returned in @a pbOffMsrpm. 277 */ 278 VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit) 279 { 280 Assert(pbOffMsrpm); 281 Assert(puMsrpmBit); 282 283 /* 284 * MSRPM Layout: 285 * Byte offset MSR range 286 * 0x000 - 0x7ff 0x00000000 - 0x00001fff 287 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff 288 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff 289 * 0x1800 - 0x1fff Reserved 290 * 291 * Each MSR is represented by 2 permission bits (read and write). 292 */ 293 if (idMsr <= 0x00001fff) 294 { 295 /* Pentium-compatible MSRs. */ 296 *pbOffMsrpm = 0; 297 *puMsrpmBit = idMsr << 1; 298 return VINF_SUCCESS; 299 } 300 301 if ( idMsr >= 0xc0000000 302 && idMsr <= 0xc0001fff) 303 { 304 /* AMD Sixth Generation x86 Processor MSRs. */ 305 *pbOffMsrpm = 0x800; 306 *puMsrpmBit = (idMsr - 0xc0000000) << 1; 307 return VINF_SUCCESS; 308 } 309 310 if ( idMsr >= 0xc0010000 311 && idMsr <= 0xc0011fff) 312 { 313 /* AMD Seventh and Eighth Generation Processor MSRs. */ 314 *pbOffMsrpm = 0x1000; 315 *puMsrpmBit = (idMsr - 0xc0010000) << 1; 316 return VINF_SUCCESS; 317 } 318 319 *pbOffMsrpm = 0; 320 *puMsrpmBit = 0; 321 return VERR_OUT_OF_RANGE; 322 } 323 324 325 /** 326 * Determines whether an IOIO intercept is active for the nested-guest or not. 327 * 328 * @param pvIoBitmap Pointer to the nested-guest IO bitmap. 329 * @param u16Port The IO port being accessed. 330 * @param enmIoType The type of IO access. 331 * @param cbReg The IO operand size in bytes. 332 * @param cAddrSizeBits The address size bits (for 16, 32 or 64). 333 * @param iEffSeg The effective segment number. 334 * @param fRep Whether this is a repeating IO instruction (REP prefix). 335 * @param fStrIo Whether this is a string IO instruction. 336 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled. 337 * Optional, can be NULL. 338 */ 339 VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 340 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, 341 PSVMIOIOEXITINFO pIoExitInfo) 342 { 343 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64); 344 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8); 345 346 /* 347 * The IOPM layout: 348 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or 349 * two 4K pages. 350 * 351 * For IO instructions that access more than a single byte, the permission bits 352 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted. 353 * 354 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes), 355 * we need 3 extra bits beyond the second 4K page. 356 */ 357 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 }; 358 359 uint16_t const offIopm = u16Port >> 3; 360 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7]; 361 uint8_t const cShift = u16Port - (offIopm << 3); 362 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift); 363 364 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap; 365 Assert(pbIopm); 366 pbIopm += offIopm; 367 uint16_t const u16Iopm = *(uint16_t *)pbIopm; 368 if (u16Iopm & fIopmMask) 369 { 370 if (pIoExitInfo) 371 { 372 static const uint32_t s_auIoOpSize[] = 373 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 }; 374 375 static const uint32_t s_auIoAddrSize[] = 376 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 }; 377 378 pIoExitInfo->u = s_auIoOpSize[cbReg & 7]; 379 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7]; 380 pIoExitInfo->n.u1STR = fStrIo; 381 pIoExitInfo->n.u1REP = fRep; 382 pIoExitInfo->n.u3SEG = iEffSeg & 7; 383 pIoExitInfo->n.u1Type = enmIoType; 384 pIoExitInfo->n.u16Port = u16Port; 385 } 386 return true; 387 } 388 389 /** @todo remove later (for debugging as VirtualBox always traps all IO 390 * intercepts). */ 391 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n")); 392 return false; 393 } 380 394 381 395
Note:
See TracChangeset
for help on using the changeset viewer.