Changeset 12657 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 22, 2008 6:29:06 PM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 36948
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r10687 r12657 266 266 ENDPROC CPUMLoadFPUAsm 267 267 268 268 269 ;; 269 270 ; Restores the guest's FPU/XMM state … … 285 286 ret 286 287 ENDPROC CPUMSaveFPUAsm 288 287 289 288 290 ;; … … 310 312 movdqa xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6] 311 313 movdqa xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7] 312 314 313 315 %ifdef RT_ARCH_AMD64 314 316 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 315 317 jz CPUMLoadXMMAsm_done 316 318 317 319 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] 318 320 movdqa xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9] … … 354 356 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6 355 357 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7 356 358 357 359 %ifdef RT_ARCH_AMD64 358 360 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA … … 367 369 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14 368 370 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 369 371 370 372 CPUMSaveXMMAsm_done: 371 373 %endif … … 396 398 ENDPROC CPUMSetFCW 397 399 400 398 401 ;; 399 402 ; Get the FPU control word … … 408 411 409 412 ;; 410 ; Set the MXCSR; 413 ; Set the MXCSR; 411 414 ; 412 415 ; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR … … 428 431 ENDPROC CPUMSetMXCSR 429 432 433 430 434 ;; 431 435 ; Get the MXCSR -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r12600 r12657 1 1 /* $Id$ */ 2 2 /** @file 3 * CPUM - CPU Monitor(/Manager) - Get s and Sets.3 * CPUM - CPU Monitor(/Manager) - Getters and Setters. 4 4 */ 5 5 … … 38 38 39 39 40 41 40 /** Disable stack frame pointer generation here. */ 42 41 #if defined(_MSC_VER) && !defined(DEBUG) … … 58 57 CPUMDECL(void) CPUMHyperSetCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore) 59 58 { 60 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTX ALLSUFF(pHyperCore), pCtxCore));59 LogFlow(("CPUMHyperSetCtxCore: %p/%p/%p -> %p\n", pVM->cpum.s.CTX_SUFF(pHyperCore), pCtxCore)); 61 60 if (!pCtxCore) 62 61 { … … 64 63 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))VM_R3_ADDR(pVM, pCtxCore); 65 64 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))VM_R0_ADDR(pVM, pCtxCore); 66 pVM->cpum.s.pHyperCore GC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore);65 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))VM_GUEST_ADDR(pVM, pCtxCore); 67 66 } 68 67 else … … 70 69 pVM->cpum.s.pHyperCoreR3 = (R3PTRTYPE(PCPUMCTXCORE))MMHyperCCToR3(pVM, pCtxCore); 71 70 pVM->cpum.s.pHyperCoreR0 = (R0PTRTYPE(PCPUMCTXCORE))MMHyperCCToR0(pVM, pCtxCore); 72 pVM->cpum.s.pHyperCore GC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore);71 pVM->cpum.s.pHyperCoreRC = (RCPTRTYPE(PCPUMCTXCORE))MMHyperCCToRC(pVM, pCtxCore); 73 72 } 74 73 } … … 83 82 CPUMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVM pVM) 84 83 { 85 return pVM->cpum.s.CTX ALLSUFF(pHyperCore);84 return pVM->cpum.s.CTX_SUFF(pHyperCore); 86 85 } 87 86 … … 105 104 } 106 105 106 107 107 CPUMDECL(void) CPUMSetHyperGDTR(PVM pVM, uint32_t addr, uint16_t limit) 108 108 { … … 112 112 } 113 113 114 114 115 CPUMDECL(void) CPUMSetHyperIDTR(PVM pVM, uint32_t addr, uint16_t limit) 115 116 { … … 119 120 } 120 121 122 121 123 CPUMDECL(void) CPUMSetHyperCR3(PVM pVM, uint32_t cr3) 122 124 { … … 124 126 } 125 127 128 126 129 CPUMDECL(void) CPUMSetHyperCS(PVM pVM, RTSEL SelCS) 127 130 { 128 pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs = SelCS; 129 } 131 pVM->cpum.s.CTX_SUFF(pHyperCore)->cs = SelCS; 132 } 133 130 134 131 135 CPUMDECL(void) CPUMSetHyperDS(PVM pVM, RTSEL SelDS) 132 136 { 133 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds = SelDS; 134 } 137 pVM->cpum.s.CTX_SUFF(pHyperCore)->ds = SelDS; 138 } 139 135 140 136 141 CPUMDECL(void) CPUMSetHyperES(PVM pVM, RTSEL SelES) 137 142 { 138 pVM->cpum.s.CTXALLSUFF(pHyperCore)->es = SelES; 139 } 143 pVM->cpum.s.CTX_SUFF(pHyperCore)->es = SelES; 144 } 145 140 146 141 147 CPUMDECL(void) CPUMSetHyperFS(PVM pVM, RTSEL SelFS) 142 148 { 143 pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs = SelFS; 144 } 149 pVM->cpum.s.CTX_SUFF(pHyperCore)->fs = SelFS; 150 } 151 145 152 146 153 CPUMDECL(void) CPUMSetHyperGS(PVM pVM, RTSEL SelGS) 147 154 { 148 pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs = SelGS; 149 } 155 pVM->cpum.s.CTX_SUFF(pHyperCore)->gs = SelGS; 156 } 157 150 158 151 159 CPUMDECL(void) CPUMSetHyperSS(PVM pVM, RTSEL SelSS) 152 160 { 153 pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss = SelSS; 154 } 161 pVM->cpum.s.CTX_SUFF(pHyperCore)->ss = SelSS; 162 } 163 155 164 156 165 CPUMDECL(void) CPUMSetHyperESP(PVM pVM, uint32_t u32ESP) 157 166 { 158 pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp = u32ESP; 159 } 167 pVM->cpum.s.CTX_SUFF(pHyperCore)->esp = u32ESP; 168 } 169 160 170 161 171 CPUMDECL(int) CPUMSetHyperEFlags(PVM pVM, uint32_t Efl) 162 172 { 163 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32 = Efl; 164 return VINF_SUCCESS; 165 } 173 pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32 = Efl; 174 return VINF_SUCCESS; 175 } 176 166 177 167 178 CPUMDECL(void) CPUMSetHyperEIP(PVM pVM, uint32_t u32EIP) 168 179 { 169 pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip = u32EIP; 170 } 180 pVM->cpum.s.CTX_SUFF(pHyperCore)->eip = u32EIP; 181 } 182 171 183 172 184 CPUMDECL(void) CPUMSetHyperTR(PVM pVM, RTSEL SelTR) … … 175 187 } 176 188 189 177 190 CPUMDECL(void) CPUMSetHyperLDTR(PVM pVM, RTSEL SelLDTR) 178 191 { 179 192 pVM->cpum.s.Hyper.ldtr = SelLDTR; 180 193 } 194 181 195 182 196 CPUMDECL(void) CPUMSetHyperDR0(PVM pVM, RTGCUINTREG uDr0) … … 186 200 } 187 201 202 188 203 CPUMDECL(void) CPUMSetHyperDR1(PVM pVM, RTGCUINTREG uDr1) 189 204 { … … 192 207 } 193 208 209 194 210 CPUMDECL(void) CPUMSetHyperDR2(PVM pVM, RTGCUINTREG uDr2) 195 211 { … … 198 214 } 199 215 216 200 217 CPUMDECL(void) CPUMSetHyperDR3(PVM pVM, RTGCUINTREG uDr3) 201 218 { … … 204 221 } 205 222 223 206 224 CPUMDECL(void) CPUMSetHyperDR6(PVM pVM, RTGCUINTREG uDr6) 207 225 { … … 210 228 } 211 229 230 212 231 CPUMDECL(void) CPUMSetHyperDR7(PVM pVM, RTGCUINTREG uDr7) 213 232 { … … 219 238 CPUMDECL(RTSEL) CPUMGetHyperCS(PVM pVM) 220 239 { 221 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->cs; 222 } 240 return pVM->cpum.s.CTX_SUFF(pHyperCore)->cs; 241 } 242 223 243 224 244 CPUMDECL(RTSEL) CPUMGetHyperDS(PVM pVM) 225 245 { 226 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ds; 227 } 246 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ds; 247 } 248 228 249 229 250 CPUMDECL(RTSEL) CPUMGetHyperES(PVM pVM) 230 251 { 231 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->es; 232 } 252 return pVM->cpum.s.CTX_SUFF(pHyperCore)->es; 253 } 254 233 255 234 256 CPUMDECL(RTSEL) CPUMGetHyperFS(PVM pVM) 235 257 { 236 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->fs; 237 } 258 return pVM->cpum.s.CTX_SUFF(pHyperCore)->fs; 259 } 260 238 261 239 262 CPUMDECL(RTSEL) CPUMGetHyperGS(PVM pVM) 240 263 { 241 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->gs; 242 } 264 return pVM->cpum.s.CTX_SUFF(pHyperCore)->gs; 265 } 266 243 267 244 268 CPUMDECL(RTSEL) CPUMGetHyperSS(PVM pVM) 245 269 { 246 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ss; 247 } 248 249 #if 0 /* these are not correct. */ 250 251 CPUMDECL(uint32_t) CPUMGetHyperCR0(PVM pVM) 252 { 253 return pVM->cpum.s.Hyper.cr0; 254 } 255 256 CPUMDECL(uint32_t) CPUMGetHyperCR2(PVM pVM) 257 { 258 return pVM->cpum.s.Hyper.cr2; 259 } 260 261 CPUMDECL(uint32_t) CPUMGetHyperCR3(PVM pVM) 262 { 263 return pVM->cpum.s.Hyper.cr3; 264 } 265 266 CPUMDECL(uint32_t) CPUMGetHyperCR4(PVM pVM) 267 { 268 return pVM->cpum.s.Hyper.cr4; 269 } 270 271 #endif /* not correct */ 270 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ss; 271 } 272 272 273 273 274 CPUMDECL(uint32_t) CPUMGetHyperEAX(PVM pVM) 274 275 { 275 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eax; 276 } 276 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eax; 277 } 278 277 279 278 280 CPUMDECL(uint32_t) CPUMGetHyperEBX(PVM pVM) 279 281 { 280 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebx; 281 } 282 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebx; 283 } 284 282 285 283 286 CPUMDECL(uint32_t) CPUMGetHyperECX(PVM pVM) 284 287 { 285 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ecx; 286 } 288 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ecx; 289 } 290 287 291 288 292 CPUMDECL(uint32_t) CPUMGetHyperEDX(PVM pVM) 289 293 { 290 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edx; 291 } 294 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edx; 295 } 296 292 297 293 298 CPUMDECL(uint32_t) CPUMGetHyperESI(PVM pVM) 294 299 { 295 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esi; 296 } 300 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esi; 301 } 302 297 303 298 304 CPUMDECL(uint32_t) CPUMGetHyperEDI(PVM pVM) 299 305 { 300 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->edi; 301 } 306 return pVM->cpum.s.CTX_SUFF(pHyperCore)->edi; 307 } 308 302 309 303 310 CPUMDECL(uint32_t) CPUMGetHyperEBP(PVM pVM) 304 311 { 305 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->ebp; 306 } 312 return pVM->cpum.s.CTX_SUFF(pHyperCore)->ebp; 313 } 314 307 315 308 316 CPUMDECL(uint32_t) CPUMGetHyperESP(PVM pVM) 309 317 { 310 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->esp; 311 } 318 return pVM->cpum.s.CTX_SUFF(pHyperCore)->esp; 319 } 320 312 321 313 322 CPUMDECL(uint32_t) CPUMGetHyperEFlags(PVM pVM) 314 323 { 315 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eflags.u32; 316 } 324 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eflags.u32; 325 } 326 317 327 318 328 CPUMDECL(uint32_t) CPUMGetHyperEIP(PVM pVM) 319 329 { 320 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->eip; 321 } 330 return pVM->cpum.s.CTX_SUFF(pHyperCore)->eip; 331 } 332 322 333 323 334 CPUMDECL(uint64_t) CPUMGetHyperRIP(PVM pVM) 324 335 { 325 return pVM->cpum.s.CTXALLSUFF(pHyperCore)->rip; 326 } 336 return pVM->cpum.s.CTX_SUFF(pHyperCore)->rip; 337 } 338 327 339 328 340 CPUMDECL(uint32_t) CPUMGetHyperIDTR(PVM pVM, uint16_t *pcbLimit) … … 333 345 } 334 346 347 335 348 CPUMDECL(uint32_t) CPUMGetHyperGDTR(PVM pVM, uint16_t *pcbLimit) 336 349 { … … 340 353 } 341 354 355 342 356 CPUMDECL(RTSEL) CPUMGetHyperLDTR(PVM pVM) 343 357 { … … 345 359 } 346 360 361 347 362 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVM pVM) 348 363 { … … 350 365 } 351 366 367 352 368 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVM pVM) 353 369 { … … 355 371 } 356 372 373 357 374 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVM pVM) 358 375 { … … 360 377 } 361 378 379 362 380 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVM pVM) 363 381 { … … 365 383 } 366 384 385 367 386 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVM pVM) 368 387 { 369 388 return pVM->cpum.s.Hyper.dr[6]; 370 389 } 390 371 391 372 392 CPUMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVM pVM) … … 396 416 CPUMDECL(void) CPUMSetGuestCtxCore(PVM pVM, PCCPUMCTXCORE pCtxCore) 397 417 { 398 /** @todo #1410 requires selectors to be checked. */418 /** @todo #1410 requires selectors to be checked. (huh? 1410?) */ 399 419 400 420 PCPUMCTXCORE pCtxCoreDst = CPUMCTX2CORE(&pVM->cpum.s.Guest); … … 511 531 ASMSetCR0(HyperCR0); 512 532 } 513 # ifdef VBOX_STRICT533 # ifdef VBOX_STRICT 514 534 else 515 535 { … … 518 538 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVM->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0)); 519 539 } 520 # endif540 # endif 521 541 } 522 542 else … … 536 556 } 537 557 } 538 #endif 558 #endif /* IN_GC */ 539 559 540 560 /* … … 551 571 } 552 572 573 553 574 CPUMDECL(int) CPUMSetGuestCR2(PVM pVM, uint64_t cr2) 554 575 { … … 556 577 return VINF_SUCCESS; 557 578 } 579 558 580 559 581 CPUMDECL(int) CPUMSetGuestCR3(PVM pVM, uint64_t cr3) … … 563 585 return VINF_SUCCESS; 564 586 } 587 565 588 566 589 CPUMDECL(int) CPUMSetGuestCR4(PVM pVM, uint64_t cr4) … … 576 599 } 577 600 601 578 602 CPUMDECL(int) CPUMSetGuestEFlags(PVM pVM, uint32_t eflags) 579 603 { … … 582 606 } 583 607 608 584 609 CPUMDECL(int) CPUMSetGuestEIP(PVM pVM, uint32_t eip) 585 610 { … … 588 613 } 589 614 615 590 616 CPUMDECL(int) CPUMSetGuestEAX(PVM pVM, uint32_t eax) 591 617 { … … 594 620 } 595 621 622 596 623 CPUMDECL(int) CPUMSetGuestEBX(PVM pVM, uint32_t ebx) 597 624 { … … 600 627 } 601 628 629 602 630 CPUMDECL(int) CPUMSetGuestECX(PVM pVM, uint32_t ecx) 603 631 { … … 606 634 } 607 635 636 608 637 CPUMDECL(int) CPUMSetGuestEDX(PVM pVM, uint32_t edx) 609 638 { … … 612 641 } 613 642 643 614 644 CPUMDECL(int) CPUMSetGuestESP(PVM pVM, uint32_t esp) 615 645 { … … 618 648 } 619 649 650 620 651 CPUMDECL(int) CPUMSetGuestEBP(PVM pVM, uint32_t ebp) 621 652 { … … 624 655 } 625 656 657 626 658 CPUMDECL(int) CPUMSetGuestESI(PVM pVM, uint32_t esi) 627 659 { … … 630 662 } 631 663 664 632 665 CPUMDECL(int) CPUMSetGuestEDI(PVM pVM, uint32_t edi) 633 666 { … … 636 669 } 637 670 671 638 672 CPUMDECL(int) CPUMSetGuestSS(PVM pVM, uint16_t ss) 639 673 { … … 642 676 } 643 677 678 644 679 CPUMDECL(int) CPUMSetGuestCS(PVM pVM, uint16_t cs) 645 680 { … … 648 683 } 649 684 685 650 686 CPUMDECL(int) CPUMSetGuestDS(PVM pVM, uint16_t ds) 651 687 { … … 654 690 } 655 691 692 656 693 CPUMDECL(int) CPUMSetGuestES(PVM pVM, uint16_t es) 657 694 { … … 660 697 } 661 698 699 662 700 CPUMDECL(int) CPUMSetGuestFS(PVM pVM, uint16_t fs) 663 701 { … … 666 704 } 667 705 706 668 707 CPUMDECL(int) CPUMSetGuestGS(PVM pVM, uint16_t gs) 669 708 { … … 672 711 } 673 712 713 674 714 CPUMDECL(void) CPUMSetGuestEFER(PVM pVM, uint64_t val) 675 715 { … … 677 717 } 678 718 719 679 720 CPUMDECL(uint64_t) CPUMGetGuestMsr(PVM pVM, unsigned idMsr) 680 721 { 681 uint64_t val= 0;722 uint64_t u64 = 0; 682 723 683 724 switch (idMsr) 684 725 { 685 case MSR_IA32_CR_PAT:686 val= pVM->cpum.s.Guest.msrPAT;687 break;688 689 case MSR_IA32_SYSENTER_CS:690 val= pVM->cpum.s.Guest.SysEnter.cs;691 break;692 693 case MSR_IA32_SYSENTER_EIP:694 val= pVM->cpum.s.Guest.SysEnter.eip;695 break;696 697 case MSR_IA32_SYSENTER_ESP:698 val= pVM->cpum.s.Guest.SysEnter.esp;699 break;700 701 case MSR_K6_EFER:702 val= pVM->cpum.s.Guest.msrEFER;703 break;704 705 case MSR_K8_SF_MASK:706 val= pVM->cpum.s.Guest.msrSFMASK;707 break;708 709 case MSR_K6_STAR:710 val= pVM->cpum.s.Guest.msrSTAR;711 break;712 713 case MSR_K8_LSTAR:714 val= pVM->cpum.s.Guest.msrLSTAR;715 break;716 717 case MSR_K8_CSTAR:718 val= pVM->cpum.s.Guest.msrCSTAR;719 break;720 721 case MSR_K8_KERNEL_GS_BASE:722 val= pVM->cpum.s.Guest.msrKERNELGSBASE;723 break;724 725 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */726 default:727 AssertFailed();728 break;726 case MSR_IA32_CR_PAT: 727 u64 = pVM->cpum.s.Guest.msrPAT; 728 break; 729 730 case MSR_IA32_SYSENTER_CS: 731 u64 = pVM->cpum.s.Guest.SysEnter.cs; 732 break; 733 734 case MSR_IA32_SYSENTER_EIP: 735 u64 = pVM->cpum.s.Guest.SysEnter.eip; 736 break; 737 738 case MSR_IA32_SYSENTER_ESP: 739 u64 = pVM->cpum.s.Guest.SysEnter.esp; 740 break; 741 742 case MSR_K6_EFER: 743 u64 = pVM->cpum.s.Guest.msrEFER; 744 break; 745 746 case MSR_K8_SF_MASK: 747 u64 = pVM->cpum.s.Guest.msrSFMASK; 748 break; 749 750 case MSR_K6_STAR: 751 u64 = pVM->cpum.s.Guest.msrSTAR; 752 break; 753 754 case MSR_K8_LSTAR: 755 u64 = pVM->cpum.s.Guest.msrLSTAR; 756 break; 757 758 case MSR_K8_CSTAR: 759 u64 = pVM->cpum.s.Guest.msrCSTAR; 760 break; 761 762 case MSR_K8_KERNEL_GS_BASE: 763 u64 = pVM->cpum.s.Guest.msrKERNELGSBASE; 764 break; 765 766 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */ 767 default: 768 AssertFailed(); 769 break; 729 770 } 730 return val; 731 } 771 return u64; 772 } 773 732 774 733 775 CPUMDECL(RTGCPTR) CPUMGetGuestIDTR(PVM pVM, uint16_t *pcbLimit) … … 738 780 } 739 781 782 740 783 CPUMDECL(RTSEL) CPUMGetGuestTR(PVM pVM) 741 784 { … … 743 786 } 744 787 788 745 789 CPUMDECL(RTSEL) CPUMGetGuestCS(PVM pVM) 746 790 { … … 748 792 } 749 793 794 750 795 CPUMDECL(RTSEL) CPUMGetGuestDS(PVM pVM) 751 796 { … … 753 798 } 754 799 800 755 801 CPUMDECL(RTSEL) CPUMGetGuestES(PVM pVM) 756 802 { … … 758 804 } 759 805 806 760 807 CPUMDECL(RTSEL) CPUMGetGuestFS(PVM pVM) 761 808 { … … 763 810 } 764 811 812 765 813 CPUMDECL(RTSEL) CPUMGetGuestGS(PVM pVM) 766 814 { … … 768 816 } 769 817 818 770 819 CPUMDECL(RTSEL) CPUMGetGuestSS(PVM pVM) 771 820 { … … 773 822 } 774 823 824 775 825 CPUMDECL(RTSEL) CPUMGetGuestLDTR(PVM pVM) 776 826 { … … 778 828 } 779 829 830 780 831 CPUMDECL(uint64_t) CPUMGetGuestCR0(PVM pVM) 781 832 { … … 783 834 } 784 835 836 785 837 CPUMDECL(uint64_t) CPUMGetGuestCR2(PVM pVM) 786 838 { … … 788 840 } 789 841 842 790 843 CPUMDECL(uint64_t) CPUMGetGuestCR3(PVM pVM) 791 844 { … … 793 846 } 794 847 848 795 849 CPUMDECL(uint64_t) CPUMGetGuestCR4(PVM pVM) 796 850 { … … 798 852 } 799 853 854 800 855 CPUMDECL(void) CPUMGetGuestGDTR(PVM pVM, PVBOXGDTR pGDTR) 801 856 { … … 803 858 } 804 859 860 805 861 CPUMDECL(uint32_t) CPUMGetGuestEIP(PVM pVM) 806 862 { … … 808 864 } 809 865 866 810 867 CPUMDECL(uint64_t) CPUMGetGuestRIP(PVM pVM) 811 868 { … … 813 870 } 814 871 872 815 873 CPUMDECL(uint32_t) CPUMGetGuestEAX(PVM pVM) 816 874 { … … 818 876 } 819 877 878 820 879 CPUMDECL(uint32_t) CPUMGetGuestEBX(PVM pVM) 821 880 { … … 823 882 } 824 883 884 825 885 CPUMDECL(uint32_t) CPUMGetGuestECX(PVM pVM) 826 886 { … … 828 888 } 829 889 890 830 891 CPUMDECL(uint32_t) CPUMGetGuestEDX(PVM pVM) 831 892 { … … 833 894 } 834 895 896 835 897 CPUMDECL(uint32_t) CPUMGetGuestESI(PVM pVM) 836 898 { … … 838 900 } 839 901 902 840 903 CPUMDECL(uint32_t) CPUMGetGuestEDI(PVM pVM) 841 904 { … … 843 906 } 844 907 908 845 909 CPUMDECL(uint32_t) CPUMGetGuestESP(PVM pVM) 846 910 { … … 848 912 } 849 913 914 850 915 CPUMDECL(uint32_t) CPUMGetGuestEBP(PVM pVM) 851 916 { … … 853 918 } 854 919 920 855 921 CPUMDECL(uint32_t) CPUMGetGuestEFlags(PVM pVM) 856 922 { … … 858 924 } 859 925 926 860 927 CPUMDECL(CPUMSELREGHID *) CPUMGetGuestTRHid(PVM pVM) 861 928 { … … 863 930 } 864 931 865 //@todo: crx should be an array 932 933 ///@todo: crx should be an array 866 934 CPUMDECL(int) CPUMGetGuestCRx(PVM pVM, unsigned iReg, uint64_t *pValue) 867 935 { … … 886 954 } 887 955 956 888 957 CPUMDECL(uint64_t) CPUMGetGuestDR0(PVM pVM) 889 958 { … … 891 960 } 892 961 962 893 963 CPUMDECL(uint64_t) CPUMGetGuestDR1(PVM pVM) 894 964 { … … 896 966 } 897 967 968 898 969 CPUMDECL(uint64_t) CPUMGetGuestDR2(PVM pVM) 899 970 { … … 901 972 } 902 973 974 903 975 CPUMDECL(uint64_t) CPUMGetGuestDR3(PVM pVM) 904 976 { … … 906 978 } 907 979 980 908 981 CPUMDECL(uint64_t) CPUMGetGuestDR6(PVM pVM) 909 982 { … … 911 984 } 912 985 986 913 987 CPUMDECL(uint64_t) CPUMGetGuestDR7(PVM pVM) 914 988 { … … 916 990 } 917 991 992 918 993 CPUMDECL(int) CPUMGetGuestDRx(PVM pVM, uint32_t iReg, uint64_t *pValue) 919 994 { 920 995 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER); 996 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */ 997 if (iReg == 4 || iReg == 5) 998 iReg += 2; 921 999 *pValue = pVM->cpum.s.Guest.dr[iReg]; 922 1000 return VINF_SUCCESS; 923 1001 } 924 1002 1003 925 1004 CPUMDECL(uint64_t) CPUMGetGuestEFER(PVM pVM) 926 1005 { 927 1006 return pVM->cpum.s.Guest.msrEFER; 928 1007 } 1008 929 1009 930 1010 /** … … 957 1037 } 958 1038 1039 959 1040 /** 960 1041 * Gets a pointer to the array of standard CPUID leafs. … … 971 1052 } 972 1053 1054 973 1055 /** 974 1056 * Gets a pointer to the array of extended CPUID leafs. … … 982 1064 CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdExtGCPtr(PVM pVM) 983 1065 { 984 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]); 985 } 1066 return (RCPTRTYPE(PCCPUMCPUID))VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdExt[0]); 1067 } 1068 986 1069 987 1070 /** … … 996 1079 CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdCentaurGCPtr(PVM pVM) 997 1080 { 998 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]); 999 } 1081 return (RCPTRTYPE(PCCPUMCPUID))VM_GUEST_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdCentaur[0]); 1082 } 1083 1000 1084 1001 1085 /** … … 1008 1092 CPUMDECL(RCPTRTYPE(PCCPUMCPUID)) CPUMGetGuestCpuIdDefGCPtr(PVM pVM) 1009 1093 { 1010 return RCPTRTYPE(PCCPUMCPUID)VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef); 1011 } 1094 return (RCPTRTYPE(PCCPUMCPUID))VM_GUEST_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef); 1095 } 1096 1012 1097 1013 1098 /** … … 1023 1108 } 1024 1109 1110 1025 1111 /** 1026 1112 * Gets a number of extended CPUID leafs. … … 1035 1121 } 1036 1122 1123 1037 1124 /** 1038 1125 * Gets a number of centaur CPUID leafs. … … 1046 1133 return RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur); 1047 1134 } 1135 1048 1136 1049 1137 /** … … 1196 1284 } 1197 1285 1286 1198 1287 /** 1199 1288 * Queries a CPUID feature bit. … … 1220 1309 return false; 1221 1310 } 1311 1222 1312 1223 1313 /** … … 1272 1362 } 1273 1363 1364 1274 1365 /** 1275 1366 * Gets the CPU vendor … … 1290 1381 } 1291 1382 1383 1292 1384 CPUMDECL(int) CPUMSetGuestDR1(PVM pVM, uint64_t uDr1) 1293 1385 { … … 1296 1388 } 1297 1389 1390 1298 1391 CPUMDECL(int) CPUMSetGuestDR2(PVM pVM, uint64_t uDr2) 1299 1392 { … … 1302 1395 } 1303 1396 1397 1304 1398 CPUMDECL(int) CPUMSetGuestDR3(PVM pVM, uint64_t uDr3) 1305 1399 { … … 1308 1402 } 1309 1403 1404 1310 1405 CPUMDECL(int) CPUMSetGuestDR6(PVM pVM, uint64_t uDr6) 1311 1406 { … … 1314 1409 } 1315 1410 1411 1316 1412 CPUMDECL(int) CPUMSetGuestDR7(PVM pVM, uint64_t uDr7) 1317 1413 { … … 1320 1416 } 1321 1417 1418 1322 1419 CPUMDECL(int) CPUMSetGuestDRx(PVM pVM, uint32_t iReg, uint64_t Value) 1323 1420 { 1324 1421 AssertReturn(iReg <= USE_REG_DR7, VERR_INVALID_PARAMETER); 1422 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */ 1423 if (iReg == 4 || iReg == 5) 1424 iReg += 2; 1325 1425 pVM->cpum.s.Guest.dr[iReg] = Value; 1326 1426 return CPUMRecalcHyperDRx(pVM); … … 1639 1739 1640 1740 1641 1642 1643 1741 /** 1644 1742 * Gets and resets the changed flags (CPUM_CHANGED_*). … … 1661 1759 } 1662 1760 1761 1663 1762 /** 1664 1763 * Sets the specified changed flags (CPUM_CHANGED_*). … … 1670 1769 pVM->cpum.s.fChanged |= fChangedFlags; 1671 1770 } 1771 1672 1772 1673 1773 /** … … 1706 1806 } 1707 1807 1708 1709 1808 #ifndef IN_RING3 1809 1710 1810 /** 1711 1811 * Lazily sync in the FPU/XMM state … … 1731 1831 return CPUMRestoreHostFPUStateAsm(&pVM->cpum.s); 1732 1832 } 1833 1733 1834 #endif /* !IN_RING3 */ 1734 1735 1835 1736 1836 /** … … 1756 1856 1757 1857 1758 1759 1858 /** 1760 1859 * Checks if the guest debug state is active … … 1768 1867 } 1769 1868 1869 1770 1870 /** 1771 1871 * Mark the guest's debug state as inactive … … 1774 1874 * @param pVM VM handle. 1775 1875 */ 1776 CPUMDECL(void) CPUMDeactivateGuestDebug tate(PVM pVM)1876 CPUMDECL(void) CPUMDeactivateGuestDebugState(PVM pVM) 1777 1877 { 1778 1878 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; … … 1820 1920 * not affected by loading a conforming coding segment. 1821 1921 * 1822 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look at SS. (ACP2 regression during install after a far call to ring 2) 1922 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look 1923 * at SS. (ACP2 regression during install after a far call to ring 2) 1823 1924 */ 1824 1925 cpl = pCtxCore->ssHid.Attr.n.u2Dpl; … … 1862 1963 if (!(pVM->cpum.s.Guest.cr0 & X86_CR0_PE)) 1863 1964 enmMode = CPUMMODE_REAL; 1864 else 1865 if (!(pVM->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) 1965 else if (!(pVM->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) 1866 1966 enmMode = CPUMMODE_PROTECTED; 1867 1967 else … … 1870 1970 return enmMode; 1871 1971 } 1972 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r12307 r12657 298 298 if (RT_LIKELY(rc != VERR_ACCESS_DENIED)) 299 299 return rc; 300 /* 301 * The page pool cache may end up here in some cases because it 302 * flushed one of the shadow mappings used by the trapping 300 /* 301 * The page pool cache may end up here in some cases because it 302 * flushed one of the shadow mappings used by the trapping 303 303 * instruction and it either flushed the TLB or the CPU reused it. 304 304 */ … … 319 319 if (RT_LIKELY(rc != VERR_ACCESS_DENIED)) 320 320 return rc; 321 /* 322 * The page pool cache may end up here in some cases because it 323 * flushed one of the shadow mappings used by the trapping 321 /* 322 * The page pool cache may end up here in some cases because it 323 * flushed one of the shadow mappings used by the trapping 324 324 * instruction and it either flushed the TLB or the CPU reused it. 325 * We want to play safe here, verifying that we've got write 325 * We want to play safe here, verifying that we've got write 326 326 * access doesn't cost us much (see PGMPhysGCPtr2GCPhys()). 327 327 */ … … 331 331 if (RT_FAILURE(rc)) 332 332 return rc; 333 if ( !(fFlags & X86_PTE_RW) 333 if ( !(fFlags & X86_PTE_RW) 334 334 && (CPUMGetGuestCR0(pVM) & X86_CR0_WP)) 335 335 return VERR_ACCESS_DENIED; … … 351 351 352 352 #if defined(VBOX_STRICT) || defined(LOG_ENABLED) 353 /** 353 /** 354 354 * Get the mnemonic for the disassembled instruction. 355 * 356 * GC/R0 doesn't include the strings in the DIS tables because 357 * of limited space. 358 */ 355 * 356 * GC/R0 doesn't include the strings in the DIS tables because 357 * of limited space. 358 */ 359 359 static const char *emGetMnemonic(PDISCPUSTATE pCpu) 360 360 { … … 803 803 * LOCK XOR/OR/AND Emulation. 804 804 */ 805 static int emInterpretLockOrXorAnd(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 805 static int emInterpretLockOrXorAnd(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 806 806 uint32_t *pcbSize, PFNEMULATELOCKPARAM3 pfnEmulate) 807 807 { … … 1097 1097 * LOCK BTR/C/S Emulation. 1098 1098 */ 1099 static int emInterpretLockBitTest(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 1099 static int emInterpretLockBitTest(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 1100 1100 uint32_t *pcbSize, PFNEMULATELOCKPARAM2 pfnEmulate) 1101 1101 { … … 1141 1141 #ifdef IN_GC 1142 1142 Assert(TRPMHasTrap(pVM)); 1143 AssertMsgReturn((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, 1144 ("GCPtrPar1=%VGv pvFault=%VGv\n", GCPtrPar1, pvFault), 1143 AssertMsgReturn((RTGCPTR)((RTGCUINTPTR)GCPtrPar1 & ~(RTGCUINTPTR)3) == pvFault, 1144 ("GCPtrPar1=%VGv pvFault=%VGv\n", GCPtrPar1, pvFault), 1145 1145 VERR_EM_INTERPRETER); 1146 1146 #endif … … 1157 1157 if (RT_FAILURE(rc)) 1158 1158 { 1159 Log(("emInterpretLockBitTest %s: %VGv imm%d=%RX64 -> emulation failed due to page fault!\n", 1159 Log(("emInterpretLockBitTest %s: %VGv imm%d=%RX64 -> emulation failed due to page fault!\n", 1160 1160 emGetMnemonic(pCpu), GCPtrPar1, pCpu->param2.size*8, ValPar2)); 1161 1161 return VERR_EM_INTERPRETER; … … 1395 1395 } 1396 1396 else 1397 { 1398 if (!cTransfers) 1397 { 1398 if (!cTransfers) 1399 1399 return VINF_SUCCESS; 1400 1400 … … 1517 1517 else 1518 1518 eflags = EMEmulateCmpXchg(pvParam1, &pRegFrame->rax, valpar, pCpu->param2.size); 1519 1519 1520 1520 LogFlow(("%s %VGv rax=%RX64 %RX64 ZF=%d\n", pszInstr, GCPtrPar1, pRegFrame->rax, valpar, !!(eflags & X86_EFL_ZF))); 1521 1521 … … 2043 2043 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val); 2044 2044 } 2045 else 2045 else 2046 2046 { 2047 2047 uint32_t val32; … … 2059 2059 #ifdef IN_GC 2060 2060 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */ 2061 if ( (val & (X86_CR0_WP | X86_CR0_AM)) 2061 if ( (val & (X86_CR0_WP | X86_CR0_AM)) 2062 2062 != (oldval & (X86_CR0_WP | X86_CR0_AM))) 2063 2063 return VERR_EM_INTERPRETER; … … 2200 2200 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val); 2201 2201 } 2202 else 2202 else 2203 2203 { 2204 2204 uint32_t val32; … … 2207 2207 } 2208 2208 2209 if ( VBOX_SUCCESS(rc))2210 { 2211 /* @todo:we don't fail if illegal bits are set/cleared for e.g. dr7 */2209 if (RT_SUCCESS(rc)) 2210 { 2211 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */ 2212 2212 rc = CPUMSetGuestDRx(pVM, DestRegDrx, val); 2213 if ( VBOX_SUCCESS(rc))2213 if (RT_SUCCESS(rc)) 2214 2214 return rc; 2215 2215 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
Note:
See TracChangeset
for help on using the changeset viewer.