Changeset 17106 in vbox for trunk/src/VBox/VMM/VMMGC
- Timestamp:
- Feb 25, 2009 12:35:15 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 43338
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/src/VBox/VMM/VMMGC/SELMGC.cpp ¶
r17035 r17106 255 255 256 256 /** 257 * Read wrapper used by selmRCGuestTSSWriteHandler. 258 * @returns VBox status code (appropriate for trap handling and GC return). 259 * @param pVM The VM handle 260 * @param pvDst Where to put the bits we read. 261 * @param pvSrc Guest address to read from. 262 * @param cb The number of bytes to read. 263 */ 264 DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb) 265 { 266 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 267 if (RT_SUCCESS(rc)) 268 return VINF_SUCCESS; 269 270 /** @todo use different fallback? */ 271 rc = PGMPrefetchPage(pVM, (uintptr_t)pvSrc); 272 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc)); 273 if (rc == VINF_SUCCESS) 274 { 275 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 276 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc)); 277 } 278 return rc; 279 } 280 281 /** 257 282 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS. 258 283 * … … 271 296 272 297 /* 273 * Try emulate the access and compare the R0 ss:esp with the shadow tss values. 274 * 275 * Note, that it's safe to access the TSS after a successfull instruction emulation, 276 * even if the stuff that was changed wasn't the ss0 or esp0 bits. The CPU insists 277 * on the TSS being all one physical page, so ASSUMING that we're not trapping 278 * I/O map accesses this is safe. 298 * Try emulate the access. 279 299 */ 280 300 uint32_t cb; … … 282 302 if (RT_SUCCESS(rc) && cb) 283 303 { 284 PCVBOXTSS pGuestTSS = (PVBOXTSS)pVM->selm.s.GCPtrGuestTss; 285 if ( pGuestTSS->esp0 != pVM->selm.s.Tss.esp1 286 || pGuestTSS->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 304 rc = VINF_SUCCESS; 305 306 /* 307 * If it's on the same page as the esp0 and ss0 fields or actually one of them, 308 * then check if any of these has changed. 309 */ 310 PCVBOXTSS pGuestTss = (PVBOXTSS)pVM->selm.s.GCPtrGuestTss; 311 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0) 312 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange) 313 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1 314 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 315 ) 287 316 { 288 317 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 289 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestT SS->ss0, (RTGCPTR)pGuestTSS->esp0));290 pVM->selm.s.Tss.esp1 = pGuestT SS->esp0;291 pVM->selm.s.Tss.ss1 = pGuestTSS->ss0 | 1;318 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0)); 319 pVM->selm.s.Tss.esp1 = pGuestTss->esp0; 320 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1; 292 321 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 293 322 } 294 if (CPUMGetGuestCR4(pVM) & X86_CR4_VME) 295 { 296 uint32_t offIntRedirBitmap = pGuestTSS->offIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap); 297 298 /** @todo not sure how the partial case is handled; probably not allowed */ 299 if ( offIntRedirBitmap <= offRange 300 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb 301 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 323 /* Handle misaligned TSS in a safe manner (just in case). */ 324 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0) 325 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0)) 326 { 327 struct 302 328 { 303 Log(("offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x\n", pGuestTSS->offIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss)); 304 /** @todo only update the changed part. */ 305 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8;i++) 329 uint32_t esp0; 330 uint16_t ss0; 331 uint16_t padding_ss0; 332 } s; 333 AssertCompileSize(s, 8); 334 rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s)); 335 if ( rc == VINF_SUCCESS 336 && ( s.esp0 != pVM->selm.s.Tss.esp1 337 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 338 ) 339 { 340 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 341 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0)); 342 pVM->selm.s.Tss.esp1 = s.esp0; 343 pVM->selm.s.Tss.ss1 = s.ss0 | 1; 344 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 345 } 346 } 347 348 /* 349 * If VME is enabled we need to check if the interrupt redirection bitmap 350 * needs updating. 351 */ 352 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap) 353 && (CPUMGetGuestCR4(pVM) & X86_CR4_VME)) 354 { 355 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap)) 356 { 357 uint16_t offIoBitmap = pGuestTss->offIoBitmap; 358 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap) 306 359 { 307 rc = MMGCRamRead(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], (uint8_t *)pGuestTSS + offIntRedirBitmap + i * 8, 8); 308 if (RT_FAILURE(rc)) 360 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 361 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); 362 VM_FF_SET(pVM, VM_FF_TO_R3); 363 } 364 else 365 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 366 } 367 else 368 { 369 /** @todo not sure how the partial case is handled; probably not allowed */ 370 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap); 371 if ( offIntRedirBitmap <= offRange 372 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb 373 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 374 { 375 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n", 376 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb)); 377 378 /** @todo only update the changed part. */ 379 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++) 309 380 { 310 /* Shadow page table might be out of sync */ 311 rc = PGMPrefetchPage(pVM, (RTGCPTR)(RTRCUINTPTR)((uint8_t *)pGuestTSS + offIntRedirBitmap + i*8)); 312 if (RT_FAILURE(rc)) 313 { 314 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %RGv failed with %Rrc\n", (RTGCPTR)((uintptr_t)pGuestTSS + offIntRedirBitmap + i*8), rc)); 381 rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], 382 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8); 383 if (rc != VINF_SUCCESS) 315 384 break; 316 }317 rc = MMGCRamRead(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], (uint8_t *)pGuestTSS + offIntRedirBitmap + i * 8, 8);318 385 } 319 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %RGv failed with %Rrc\n", (RTGCPTR)((uintptr_t)pGuestTSS + offIntRedirBitmap + i * 8), rc));386 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); 320 387 } 321 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);322 388 } 323 389 } 390 391 /* Return to ring-3 for a full resync if any of the above fails... (?) */ 392 if (rc != VINF_SUCCESS) 393 { 394 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); 395 VM_FF_SET(pVM, VM_FF_TO_R3); 396 if (RT_SUCCESS(rc)) 397 rc = VINF_SUCCESS; 398 } 399 324 400 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled); 325 401 }
Note:
See TracChangeset
for help on using the changeset viewer.