Changeset 47660 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 12, 2013 12:37:34 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r47652 r47660 317 317 318 318 319 /** @MAYBE_LOAD_DRx 320 * Macro for updating DRx values in raw-mode and ring-0 contexts. 321 */ 322 #ifdef IN_RING0 323 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 324 # ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 325 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 326 do { \ 327 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \ 328 a_fnLoad(a_uValue); \ 329 else \ 330 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \ 331 } while (0) 332 # else 333 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 334 do { \ 335 /** @todo we're not loading the correct guest value here! */ \ 336 a_fnLoad(a_uValue); \ 337 } while (0) 338 # endif 339 # else 340 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 341 do { \ 342 a_fnLoad(a_uValue); \ 343 } while (0) 344 # endif 345 346 #elif defined(IN_RC) 347 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 348 do { \ 349 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \ 350 { a_fnLoad(a_uValue); } \ 351 } while (0) 352 353 #else 354 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0) 355 #endif 356 319 357 VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0) 320 358 { 321 359 pVCpu->cpum.s.Hyper.dr[0] = uDr0; 322 /** @todo in GC we must load it! */360 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0); 323 361 } 324 362 … … 327 365 { 328 366 pVCpu->cpum.s.Hyper.dr[1] = uDr1; 329 /** @todo in GC we must load it! */367 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1); 330 368 } 331 369 … … 334 372 { 335 373 pVCpu->cpum.s.Hyper.dr[2] = uDr2; 336 /** @todo in GC we must load it! */374 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2); 337 375 } 338 376 … … 341 379 { 342 380 pVCpu->cpum.s.Hyper.dr[3] = uDr3; 343 /** @todo in GC we must load it! */381 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3); 344 382 } 345 383 … … 348 386 { 349 387 pVCpu->cpum.s.Hyper.dr[6] = uDr6; 350 /** @todo in GC we must load it! */351 388 } 352 389 … … 355 392 { 356 393 pVCpu->cpum.s.Hyper.dr[7] = uDr7; 357 /** @todo in GC we must load it! */ 394 #ifdef IN_RC 395 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7); 396 #endif 358 397 } 359 398 … … 2118 2157 { 2119 2158 pVCpu->cpum.s.Guest.dr[0] = uDr0; 2120 return CPUMRecalcHyperDRx(pVCpu );2159 return CPUMRecalcHyperDRx(pVCpu, 0); 2121 2160 } 2122 2161 … … 2125 2164 { 2126 2165 pVCpu->cpum.s.Guest.dr[1] = uDr1; 2127 return CPUMRecalcHyperDRx(pVCpu );2166 return CPUMRecalcHyperDRx(pVCpu, 1); 2128 2167 } 2129 2168 … … 2132 2171 { 2133 2172 pVCpu->cpum.s.Guest.dr[2] = uDr2; 2134 return CPUMRecalcHyperDRx(pVCpu );2173 return CPUMRecalcHyperDRx(pVCpu, 2); 2135 2174 } 2136 2175 … … 2139 2178 { 2140 2179 pVCpu->cpum.s.Guest.dr[3] = uDr3; 2141 return CPUMRecalcHyperDRx(pVCpu );2180 return CPUMRecalcHyperDRx(pVCpu, 3); 2142 2181 } 2143 2182 … … 2146 2185 { 2147 2186 pVCpu->cpum.s.Guest.dr[6] = uDr6; 2148 return CPUMRecalcHyperDRx(pVCpu);2187 return VINF_SUCCESS; /* No need to recalc. */ 2149 2188 } 2150 2189 … … 2153 2192 { 2154 2193 pVCpu->cpum.s.Guest.dr[7] = uDr7; 2155 return CPUMRecalcHyperDRx(pVCpu );2194 return CPUMRecalcHyperDRx(pVCpu, 7); 2156 2195 } 2157 2196 … … 2164 2203 iReg += 2; 2165 2204 pVCpu->cpum.s.Guest.dr[iReg] = Value; 2166 return CPUMRecalcHyperDRx(pVCpu); 2167 } 2168 2169 2170 /** 2171 * Recalculates the hypervisor DRx register values based on 2172 * current guest registers and DBGF breakpoints. 2173 * 2174 * This is called whenever a guest DRx register is modified and when DBGF 2175 * sets a hardware breakpoint. In guest context this function will reload 2176 * any (hyper) DRx registers which comes out with a different value. 2205 return CPUMRecalcHyperDRx(pVCpu, iReg); 2206 } 2207 2208 2209 /** 2210 * Recalculates the hypervisor DRx register values based on current guest 2211 * registers and DBGF breakpoints, updating changed registers depending on the 2212 * context. 2213 * 2214 * This is called whenever a guest DRx register is modified (any context) and 2215 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous). 2216 * 2217 * In raw-mode context this function will reload any (hyper) DRx registers which 2218 * comes out with a different value. It may also have to save the host debug 2219 * registers if that haven't been done already. In this context though, we'll 2220 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values 2221 * are only important when breakpoints are actually enabled. 2222 * 2223 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be 2224 * reloaded by the HM code if it changes. Further more, we will only use the 2225 * combined register set when the VBox debugger is actually using hardware BPs, 2226 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't 2227 * concern us here). 2228 * 2229 * In ring-3 we won't be loading anything, so well calculate hypervisor values 2230 * all the time. 2177 2231 * 2178 2232 * @returns VINF_SUCCESS. 2179 2233 * @param pVCpu Pointer to the VMCPU. 2180 */ 2181 VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu) 2234 * @param iGstReg The guest debug register number that was modified. 2235 * UINT8_MAX if not guest register. 2236 */ 2237 VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg) 2182 2238 { 2183 2239 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2186 2242 * Compare the DR7s first. 2187 2243 * 2188 * We only care about the enabled flags. The GE and LE flags are always 2189 * set and we don't care if the guest doesn't set them. GD is virtualized 2190 * when we dispatch #DB, we never enable it. 2244 * We only care about the enabled flags. GD is virtualized when we 2245 * dispatch the #DB, we never enable it. The DBGF DR7 value is will 2246 * always have the LE and GE bits set, so no need to check and disable 2247 * stuff if they're cleared like we have to for the guest DR7. 2191 2248 */ 2249 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu); 2250 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE))) 2251 uGstDr7 = 0; 2252 else if (!(uGstDr7 & X86_DR7_LE)) 2253 uGstDr7 &= ~X86_DR7_LE_ALL; 2254 else if (!(uGstDr7 & X86_DR7_GE)) 2255 uGstDr7 &= ~X86_DR7_GE_ALL; 2256 2192 2257 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM); 2193 #ifdef CPUM_VIRTUALIZE_DRX 2194 const RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu); 2195 #else 2196 const RTGCUINTREG uGstDr7 = 0; 2197 #endif 2198 if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK) 2258 if ((HMIsEnabled(pVCpu->CTX_SUFF(pVM)) ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK) 2199 2259 { 2260 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 2261 2200 2262 /* 2201 * Ok, something is enabled. Recalc each of the breakpoints. 2202 * Straight forward code, not optimized/minimized in any way. 2263 * Ok, something is enabled. Recalc each of the breakpoints, taking 2264 * the VM debugger ones of the guest ones. In raw-mode context we will 2265 * not allow breakpoints with values inside the hypervisor area. 2203 2266 */ 2204 2267 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK; … … 2213 2276 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0)) 2214 2277 { 2215 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);2216 2278 uNewDr0 = CPUMGetGuestDR0(pVCpu); 2279 #ifdef IN_RC 2280 if (MMHyperIsInsideArea(pVM, uNewDr0)) 2281 uNewDr0 = 0; 2282 else 2283 #endif 2284 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK); 2217 2285 } 2218 2286 else 2219 uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];2287 uNewDr0 = 0; 2220 2288 2221 2289 /* bp 1 */ … … 2228 2296 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1)) 2229 2297 { 2230 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);2231 2298 uNewDr1 = CPUMGetGuestDR1(pVCpu); 2299 #ifdef IN_RC 2300 if (MMHyperIsInsideArea(pVM, uNewDr1)) 2301 uNewDr1 = 0; 2302 else 2303 #endif 2304 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK); 2232 2305 } 2233 2306 else 2234 uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];2307 uNewDr1 = 0; 2235 2308 2236 2309 /* bp 2 */ … … 2243 2316 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2)) 2244 2317 { 2245 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);2246 2318 uNewDr2 = CPUMGetGuestDR2(pVCpu); 2319 #ifdef IN_RC 2320 if (MMHyperIsInsideArea(pVM, uNewDr2)) 2321 uNewDr2 = 0; 2322 else 2323 #endif 2324 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK); 2247 2325 } 2248 2326 else 2249 uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];2327 uNewDr2 = 0; 2250 2328 2251 2329 /* bp 3 */ … … 2258 2336 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3)) 2259 2337 { 2260 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);2261 2338 uNewDr3 = CPUMGetGuestDR3(pVCpu); 2339 #ifdef IN_RC 2340 if (MMHyperIsInsideArea(pVM, uNewDr3)) 2341 uNewDr3 = 0; 2342 else 2343 #endif 2344 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK); 2262 2345 } 2263 2346 else 2264 uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];2347 uNewDr3 = 0; 2265 2348 2266 2349 /* … … 2268 2351 */ 2269 2352 #ifdef IN_RC 2270 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)) 2271 { 2272 /** @todo save host DBx registers. */ 2273 } 2353 /* Make sure to save host registers first. */ 2354 if (!(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER))) 2355 { 2356 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)); 2357 pVCpu->cpum.s.Host.dr6 = ASMGetDR6(); 2358 pVCpu->cpum.s.Host.dr7 = ASMGetDR7(); 2359 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER; 2360 ASMSetDR6(X86_DR6_INIT_VAL); 2361 } 2362 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)) 2363 { 2364 pVCpu->cpum.s.Host.dr0 = ASMGetDR0(); 2365 pVCpu->cpum.s.Host.dr1 = ASMGetDR1(); 2366 pVCpu->cpum.s.Host.dr2 = ASMGetDR2(); 2367 pVCpu->cpum.s.Host.dr3 = ASMGetDR3(); 2368 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER; 2369 2370 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */ 2371 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0; 2372 ASMSetDR0(uNewDr0); 2373 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1; 2374 ASMSetDR1(uNewDr1); 2375 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2; 2376 ASMSetDR2(uNewDr2); 2377 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3; 2378 ASMSetDR3(uNewDr3); 2379 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7; 2380 ASMSetDR7(uNewDr7); 2381 } 2382 else 2274 2383 #endif 2275 /** @todo Should this not be setting CPUM_USE_DEBUG_REGS_HYPER?2276 * (CPUM_VIRTUALIZE_DRX is never defined). */2277 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;2278 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])2279 CPUMSetHyperDR3(pVCpu, uNewDr3);2280 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])2281 CPUMSetHyperDR2(pVCpu, uNewDr2);2282 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])2283 CPUMSetHyperDR1(pVCpu, uNewDr1);2284 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])2285 CPUMSetHyperDR0(pVCpu, uNewDr0);2286 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])2287 CPUMSetHyperDR7(pVCpu, uNewDr7);2384 { 2385 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER; 2386 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3]) 2387 CPUMSetHyperDR3(pVCpu, uNewDr3); 2388 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2]) 2389 CPUMSetHyperDR2(pVCpu, uNewDr2); 2390 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1]) 2391 CPUMSetHyperDR1(pVCpu, uNewDr1); 2392 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0]) 2393 CPUMSetHyperDR0(pVCpu, uNewDr0); 2394 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7]) 2395 CPUMSetHyperDR7(pVCpu, uNewDr7); 2396 } 2288 2397 } 2398 #ifdef IN_RING0 2399 else if (CPUMIsGuestDebugStateActive(pVCpu)) 2400 { 2401 /* 2402 * Reload the register that was modified. Normally this won't happen 2403 * as we won't intercept DRx writes when not having the hyper debug 2404 * state loaded, but in case we do for some reason we'll simply deal 2405 * with it. 2406 */ 2407 switch (iGstReg) 2408 { 2409 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break; 2410 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break; 2411 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break; 2412 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break; 2413 default: 2414 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3); 2415 } 2416 } 2417 #endif 2289 2418 else 2290 2419 { 2291 #ifdef IN_RC 2292 if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) 2293 { 2294 /** @todo restore host DBx registers. */ 2295 } 2420 /* 2421 * No active debug state any more. In raw-mode this means we have to 2422 * make sure DR7 has everything disabled now, if we armed it already. 2423 * 2424 * In the ring-0 this only happens when we decided to lazy load the 2425 * debug state because it wasn't active, and that didn't change with 2426 * the latest changes, so nothing to do here. 2427 */ 2428 #if defined(IN_RC) 2429 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) 2430 { 2431 ASMSetDR7(X86_DR7_INIT_VAL); 2432 if (pVCpu->cpum.s.Hyper.dr[0]) 2433 ASMSetDR0(0); 2434 if (pVCpu->cpum.s.Hyper.dr[1]) 2435 ASMSetDR1(0); 2436 if (pVCpu->cpum.s.Hyper.dr[2]) 2437 ASMSetDR2(0); 2438 if (pVCpu->cpum.s.Hyper.dr[3]) 2439 ASMSetDR3(0); 2440 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER; 2441 } 2442 2443 #else defined(IN_RING0) 2444 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); /* (can only change while in ring-3) */ 2296 2445 #endif 2297 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; 2446 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER; 2447 2448 /* Clear all the registers. */ 2449 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK; 2450 pVCpu->cpum.s.Hyper.dr[3] = 0; 2451 pVCpu->cpum.s.Hyper.dr[2] = 0; 2452 pVCpu->cpum.s.Hyper.dr[1] = 0; 2453 pVCpu->cpum.s.Hyper.dr[0] = 0; 2454 2298 2455 } 2299 2456 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n", 2300 2457 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1], 2301 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],2302 pVCpu->cpum.s.Hyper.dr[7]));2458 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6], 2459 pVCpu->cpum.s.Hyper.dr[7])); 2303 2460 2304 2461 return VINF_SUCCESS; … … 2721 2878 VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM) 2722 2879 { 2723 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;2880 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER); 2724 2881 } 2725 2882 … … 2733 2890 VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM) 2734 2891 { 2735 return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;2892 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL); 2736 2893 } 2737 2894 … … 2759 2916 VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu) 2760 2917 { 2761 return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;2918 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU); 2762 2919 } 2763 2920 … … 2766 2923 * Deactivate the FPU/XMM state of the guest OS. 2767 2924 * @param pVCpu Pointer to the VMCPU. 2925 * 2926 * @todo r=bird: Why is this needed? Looks like a workaround for mishandled 2927 * FPU state management. 2768 2928 */ 2769 2929 VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu) 2770 2930 { 2931 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU)); 2771 2932 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU; 2772 2933 } … … 2781 2942 VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu) 2782 2943 { 2783 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;2944 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST); 2784 2945 } 2785 2946 … … 2792 2953 VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu) 2793 2954 { 2794 return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;2955 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER); 2795 2956 } 2796 2957 … … 2801 2962 * @returns boolean 2802 2963 * @param pVM Pointer to the VM. 2964 * @todo This API doesn't make sense any more. 2803 2965 */ 2804 2966 VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu) 2805 2967 { 2806 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS; 2807 } 2808 2809 2810 /** 2811 * Mark the hypervisor's debug state as inactive. 2812 * 2813 * @returns boolean 2814 * @param pVM Pointer to the VM. 2815 */ 2816 VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu) 2817 { 2818 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER; 2968 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST))); 2819 2969 } 2820 2970 -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r47064 r47660 80 80 static void cpumR0UnmapLocalApics(void); 81 81 #endif 82 static int cpumR0SaveHostDebugState(PVMCPU pVCpu); 82 83 83 84 … … 345 346 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 346 347 # if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */ 347 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE));348 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); 348 349 /** @todo Move the FFXR handling down into 349 350 * cpumR0SaveHostRestoreguestFPUState to optimize the … … 357 358 { 358 359 ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR); 359 pVCpu->cpum.s.fUseFlags |= CPUM_ MANUAL_XMM_RESTORE;360 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 360 361 } 361 362 } … … 365 366 366 367 /* Restore EFER. */ 367 if (pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE)368 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 368 369 ASMWrMsr(MSR_K6_EFER, SavedEFER); 369 370 … … 382 383 { 383 384 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 384 pVCpu->cpum.s.fUseFlags |= CPUM_ MANUAL_XMM_RESTORE;385 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 385 386 } 386 387 } … … 392 393 393 394 /* Restore EFER MSR */ 394 if (pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE)395 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 395 396 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost); 396 397 … … 425 426 /* fxrstor doesn't restore the XMM state! */ 426 427 cpumR0LoadXMM(pCtx); 427 pVCpu->cpum.s.fUseFlags |= CPUM_ MANUAL_XMM_RESTORE;428 pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE; 428 429 } 429 430 } … … 481 482 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ 482 483 uint64_t oldMsrEFERHost = 0; 483 if (pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE)484 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 484 485 { 485 486 oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER); … … 489 490 490 491 /* Restore EFER MSR */ 491 if (pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE)492 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 492 493 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR); 493 494 … … 501 502 # endif 502 503 cpumR0SaveFPU(pCtx); 503 if (pVCpu->cpum.s.fUseFlags & CPUM_ MANUAL_XMM_RESTORE)504 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE) 504 505 { 505 506 /* fxsave doesn't save the XMM state! */ … … 517 518 } 518 519 519 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_ MANUAL_XMM_RESTORE);520 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE); 520 521 return VINF_SUCCESS; 521 522 } … … 523 524 524 525 /** 525 * Save guest debug state 526 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading 527 * DR7 with safe values. 526 528 * 527 529 * @returns VBox status code. 528 * @param pVM Pointer to the VM.529 530 * @param pVCpu Pointer to the VMCPU. 530 * @param pCtx Pointer to the guest CPU context. 531 * @param fDR6 Whether to include DR6 or not. 532 */ 533 VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 534 { 535 Assert(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS); 536 537 /* Save the guest's debug state. The caller is responsible for DR7. */ 538 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 539 if (CPUMIsGuestInLongModeEx(pCtx)) 540 { 541 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE)) 542 { 543 uint64_t dr6 = pCtx->dr[6]; 544 545 HMR0SaveDebugState(pVM, pVCpu, pCtx); 546 if (!fDR6) /* dr6 was already up-to-date */ 547 pCtx->dr[6] = dr6; 548 } 549 } 550 else 551 #endif 552 { 553 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 554 cpumR0SaveDRx(&pCtx->dr[0]); 555 #else 556 pCtx->dr[0] = ASMGetDR0(); 557 pCtx->dr[1] = ASMGetDR1(); 558 pCtx->dr[2] = ASMGetDR2(); 559 pCtx->dr[3] = ASMGetDR3(); 560 #endif 561 if (fDR6) 562 pCtx->dr[6] = ASMGetDR6(); 563 } 564 565 /* 566 * Restore the host's debug state. DR0-3, DR6 and only then DR7! 567 * DR7 contains 0x400 right now. 568 */ 569 CPUMR0LoadHostDebugState(pVM, pVCpu); 570 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)); 571 return VINF_SUCCESS; 572 } 573 574 575 /** 576 * Lazily sync in the debug state 577 * 578 * @returns VBox status code. 579 * @param pVM Pointer to the VM. 580 * @param pVCpu Pointer to the VMCPU. 581 * @param pCtx Pointer to the guest CPU context. 582 * @param fDR6 Whether to include DR6 or not. 583 */ 584 VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 585 { 586 /* Save the host state. */ 587 CPUMR0SaveHostDebugState(pVM, pVCpu); 588 Assert(ASMGetDR7() == X86_DR7_INIT_VAL); 589 590 /* Activate the guest state DR0-3; DR7 is left to the caller. */ 591 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 592 if (CPUMIsGuestInLongModeEx(pCtx)) 593 { 594 /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */ 595 pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE; 596 } 597 else 598 #endif 599 { 600 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 601 cpumR0LoadDRx(&pCtx->dr[0]); 602 #else 603 ASMSetDR0(pCtx->dr[0]); 604 ASMSetDR1(pCtx->dr[1]); 605 ASMSetDR2(pCtx->dr[2]); 606 ASMSetDR3(pCtx->dr[3]); 607 #endif 608 if (fDR6) 609 ASMSetDR6(pCtx->dr[6]); 610 } 611 612 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS; 613 return VINF_SUCCESS; 614 } 615 616 /** 617 * Save the host debug state 618 * 619 * @returns VBox status code. 620 * @param pVM Pointer to the VM. 621 * @param pVCpu Pointer to the VMCPU. 622 */ 623 VMMR0DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu) 624 { 625 NOREF(pVM); 626 627 /* Save the host state. */ 531 */ 532 static int cpumR0SaveHostDebugState(PVMCPU pVCpu) 533 { 534 /* 535 * Save the host state. 536 */ 628 537 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 629 538 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3); … … 638 547 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */ 639 548 pVCpu->cpum.s.Host.dr7 = ASMGetDR7(); 640 /* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */ 641 ASMSetDR7(X86_DR7_INIT_VAL); 549 550 /* Preemption paranoia. */ 551 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST); 552 553 /* 554 * Make sure DR7 is harmless or else we could trigger breakpoints when 555 * load guest or hypervisor DRx values later. 556 */ 557 if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL) 558 ASMSetDR7(X86_DR7_INIT_VAL); 642 559 643 560 return VINF_SUCCESS; 644 561 } 645 562 646 /** 647 * Load the host debug state 563 564 /** 565 * Saves the guest DRx state residing in host registers and restore the host 566 * register values. 567 * 568 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called, 569 * since it's assumed that we're shadowing the guest DRx register values 570 * accurately when using the combined hypervisor debug register values 571 * (CPUMR0LoadHyperDebugState). 572 * 573 * @returns true if either guest or hypervisor debug registers were loaded. 574 * @param pVCpu The cross context CPU structure for the calling EMT. 575 * @param fDR6 Whether to include DR6 or not. 576 * @thread EMT(pVCpu) 577 */ 578 VMMR0DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDR6) 579 { 580 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)); 581 582 /* 583 * Do we need to save the guest DRx registered loaded into host registers? 584 * (DR7 and DR6 (if fDR6 is true) are left to the caller.) 585 */ 586 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST) 587 { 588 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 589 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 590 { 591 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6]; 592 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest); 593 if (!fDR6) 594 pVCpu->cpum.s.Guest.dr[6] = uDr6; 595 } 596 else 597 #endif 598 { 599 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 600 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]); 601 #else 602 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0(); 603 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1(); 604 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2(); 605 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3(); 606 #endif 607 if (fDR6) 608 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6(); 609 } 610 } 611 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER 612 | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER)); 613 614 /* 615 * Restore the host's debug state. DR0-3, DR6 and only then DR7! 616 */ 617 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST) 618 { 619 /* A bit of paranoia first... */ 620 uint64_t uCurDR7 = ASMGetDR7(); 621 if (uCurDR7 != X86_DR7_INIT_VAL) 622 ASMSetDR7(X86_DR7_INIT_VAL); 623 624 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 625 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3); 626 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0); 627 #else 628 ASMSetDR0(pVCpu->cpum.s.Host.dr0); 629 ASMSetDR1(pVCpu->cpum.s.Host.dr1); 630 ASMSetDR2(pVCpu->cpum.s.Host.dr2); 631 ASMSetDR3(pVCpu->cpum.s.Host.dr3); 632 #endif 633 /** @todo consider only updating if they differ, esp. DR6. Need to figure how 634 * expensive DRx reads are over DRx writes. */ 635 ASMSetDR6(pVCpu->cpum.s.Host.dr6); 636 ASMSetDR7(pVCpu->cpum.s.Host.dr7); 637 638 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST); 639 } 640 641 return fDrXLoaded; 642 } 643 644 645 /** 646 * Lazily sync in the debug state. 647 * 648 * @param pVCpu The cross context CPU structure for the calling EMT. 649 * @param fDR6 Whether to include DR6 or not. 650 * @thread EMT(pVCpu) 651 */ 652 VMMR0DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDR6) 653 { 654 /* 655 * Save the host state and disarm all host BPs. 656 */ 657 cpumR0SaveHostDebugState(pVCpu); 658 Assert(ASMGetDR7() == X86_DR7_INIT_VAL); 659 660 /* 661 * Activate the guest state DR0-3. 662 * DR7 and DR6 (if fDR6 is true) are left to the caller. 663 */ 664 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 665 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 666 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */ 667 else 668 #endif 669 { 670 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 671 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]); 672 #else 673 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]); 674 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]); 675 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]); 676 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]); 677 #endif 678 if (fDR6) 679 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]); 680 681 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST); 682 } 683 } 684 685 686 /** 687 * Lazily sync in the hypervisor debug state 648 688 * 649 689 * @returns VBox status code. 650 * @param pVM Pointer to the VM. 651 * @param pVCpu Pointer to the VMCPU. 652 */ 653 VMMR0DECL(int) CPUMR0LoadHostDebugState(PVM pVM, PVMCPU pVCpu) 654 { 655 Assert(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER)); 656 NOREF(pVM); 657 658 /* 659 * Restore the host's debug state. DR0-3, DR6 and only then DR7! 660 * DR7 contains 0x400 right now. 661 */ 662 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 663 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3); 664 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0); 665 #else 666 ASMSetDR0(pVCpu->cpum.s.Host.dr0); 667 ASMSetDR1(pVCpu->cpum.s.Host.dr1); 668 ASMSetDR2(pVCpu->cpum.s.Host.dr2); 669 ASMSetDR3(pVCpu->cpum.s.Host.dr3); 670 #endif 671 ASMSetDR6(pVCpu->cpum.s.Host.dr6); 672 ASMSetDR7(pVCpu->cpum.s.Host.dr7); 673 674 pVCpu->cpum.s.fUseFlags &= ~(CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER); 675 return VINF_SUCCESS; 676 } 677 678 679 /** 680 * Lazily sync in the hypervisor debug state 681 * 682 * @returns VBox status code. 683 * @param pVM Pointer to the VM. 684 * @param pVCpu Pointer to the VMCPU. 685 * @param pCtx Pointer to the guest CPU context. 690 * @param pVCpu The cross context CPU structure for the calling EMT. 686 691 * @param fDR6 Whether to include DR6 or not. 687 */ 688 VMMR0DECL(int) CPUMR0LoadHyperDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6) 689 { 690 NOREF(pCtx); 691 692 /* Save the host state. */ 693 CPUMR0SaveHostDebugState(pVM, pVCpu); 692 * @thread EMT(pVCpu) 693 */ 694 VMMR0DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDR6) 695 { 696 /* 697 * Save the host state and disarm all host BPs. 698 */ 699 cpumR0SaveHostDebugState(pVCpu); 694 700 Assert(ASMGetDR7() == X86_DR7_INIT_VAL); 695 701 696 /* Activate the guest state DR0-3; DR7 is left to the caller. */ 702 /* 703 * Make sure the hypervisor values are up to date. 704 */ 705 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */); 706 707 /* 708 * Activate the guest state DR0-3. 709 * DR7 and DR6 (if fDR6 is true) are left to the caller. 710 */ 697 711 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 698 712 if (CPUMIsGuestInLongModeEx(pCtx)) 699 { 700 AssertFailed(); 701 return VERR_NOT_IMPLEMENTED; 702 } 713 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */ 703 714 else 704 715 #endif 705 716 { 706 717 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 707 AssertFailed(); 708 return VERR_NOT_IMPLEMENTED; 718 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]); 709 719 #else 710 ASMSetDR0( CPUMGetHyperDR0(pVCpu));711 ASMSetDR1( CPUMGetHyperDR1(pVCpu));712 ASMSetDR2( CPUMGetHyperDR2(pVCpu));713 ASMSetDR3( CPUMGetHyperDR3(pVCpu));720 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]); 721 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]); 722 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]); 723 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]); 714 724 #endif 715 725 if (fDR6) 716 ASMSetDR6(CPUMGetHyperDR6(pVCpu)); 717 } 718 719 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER; 720 return VINF_SUCCESS; 726 ASMSetDR6(X86_DR6_INIT_VAL); 727 728 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER); 729 } 721 730 } 722 731 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r47652 r47660 1321 1321 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) 1322 1322 return; 1323 1324 /** @todo Turn these into assertions if possible. */ 1325 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* Set reserved bits to 1. */ 1326 pCtx->dr[6] &= ~RT_BIT(12); /* MBZ. */ 1327 1328 pCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */ 1329 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */ 1330 pCtx->dr[7] |= X86_DR7_INIT_VAL; /* MB1. */ 1331 1332 /* Update DR6, DR7 with the guest values. */ 1333 pVmcb->guest.u64DR7 = pCtx->dr[7]; 1334 pVmcb->guest.u64DR6 = pCtx->dr[6]; 1335 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1323 Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0); 1324 Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0); 1336 1325 1337 1326 bool fInterceptDB = false; 1338 1327 bool fInterceptMovDRx = false; 1339 if (DBGFIsStepping(pVCpu)) 1340 { 1341 /* AMD-V doesn't have any monitor-trap flag equivalent. Instead, enable tracing in the guest and trap #DB. */ 1328 1329 /* 1330 * Anyone single stepping on the host side? If so, we'll have to use the 1331 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on 1332 * the VMM level like VT-x implementations does. 1333 */ 1334 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu); 1335 if (fStepping) 1336 { 1342 1337 pVmcb->guest.u64RFlags |= X86_EFL_TF; 1343 1338 fInterceptDB = true; 1339 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */ 1344 1340 } 1345 1341 1346 1342 PVM pVM = pVCpu->CTX_SUFF(pVM); 1347 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 1348 { 1343 if (fStepping || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) 1344 { 1345 /* 1346 * Use the combined guest and host DRx values found in the hypervisor 1347 * register set because the debugger has breakpoints active or someone 1348 * is single stepping on the host side. 1349 * 1350 * Note! DBGF expects a clean DR6 state before executing guest code. 1351 */ 1349 1352 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1350 { 1351 int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1352 AssertRC(rc); 1353 1354 /* Update DR6, DR7 with the hypervisor values. */ 1353 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */); 1354 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1355 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 1356 1357 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */ 1358 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL 1359 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu) ) 1360 { 1355 1361 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); 1356 pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);1362 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL; 1357 1363 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1358 1364 } 1359 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 1365 1366 /** @todo If we cared, we could optimize to allow the guest to read registers 1367 * with the same values. */ 1368 fInterceptDB = true; 1360 1369 fInterceptMovDRx = true; 1361 } 1362 else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 1363 { 1364 if (!CPUMIsGuestDebugStateActive(pVCpu)) 1365 { 1366 int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1367 AssertRC(rc); 1368 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 1369 } 1370 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 1371 Assert(fInterceptMovDRx == false); 1372 } 1373 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 1374 { 1375 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */ 1376 fInterceptMovDRx = true; 1377 } 1378 1370 Log5(("hm: Loaded hyper DRx\n")); 1371 } 1372 else 1373 { 1374 /* 1375 * Update DR6, DR7 with the guest values if necessary. 1376 */ 1377 if ( pVmcb->guest.u64DR7 != pCtx->dr[7] 1378 || pVmcb->guest.u64DR6 != pCtx->dr[6]) 1379 { 1380 pVmcb->guest.u64DR7 = pCtx->dr[7]; 1381 pVmcb->guest.u64DR6 = pCtx->dr[6]; 1382 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1383 } 1384 1385 /* 1386 * If the guest has enabled debug registers, we need to load them prior to 1387 * executing guest code so they'll trigger at the right time. 1388 */ 1389 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 1390 { 1391 if (!CPUMIsGuestDebugStateActive(pVCpu)) 1392 { 1393 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */); 1394 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 1395 } 1396 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1397 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 1398 Log5(("hm: Loaded guest DRx\n")); 1399 } 1400 /* 1401 * If no debugging enabled, we'll lazy load DR0-3. 1402 */ 1403 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 1404 fInterceptMovDRx = true; 1405 } 1406 1407 /* 1408 * Set up the intercepts. 1409 */ 1379 1410 if (fInterceptDB) 1380 1411 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB); … … 1760 1791 * Guest Debug registers. 1761 1792 */ 1762 pMixedCtx->dr[6] = pVmcb->guest.u64DR6; 1763 pMixedCtx->dr[7] = pVmcb->guest.u64DR7; 1793 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1794 { 1795 pMixedCtx->dr[6] = pVmcb->guest.u64DR6; 1796 pMixedCtx->dr[7] = pVmcb->guest.u64DR7; 1797 } 1798 else 1799 { 1800 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu)); 1801 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6); 1802 } 1764 1803 1765 1804 /* … … 1800 1839 } 1801 1840 1802 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 1803 if (CPUMIsGuestDebugStateActive(pVCpu)) 1804 { 1805 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */); 1806 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1807 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1808 } 1809 else if (CPUMIsHyperDebugStateActive(pVCpu)) 1810 { 1811 CPUMR0LoadHostDebugState(pVM, pVCpu); 1812 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1841 /* 1842 * Restore host debug registers if necessary and resync on next R0 reentry. 1843 */ 1813 1844 #ifdef VBOX_STRICT 1845 if (CPUMIsHyperDebugStateActive(pVCpu)) 1846 { 1814 1847 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1815 1848 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1816 1849 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1850 } 1817 1851 #endif 1818 } 1852 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1853 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1854 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1855 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1819 1856 1820 1857 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); … … 3094 3131 3095 3132 # define HMSVM_ASSERT_PREEMPT_CPUID() \ 3096 do \3097 { \3098 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \3099 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \3100 } while (0)3133 do \ 3134 { \ 3135 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \ 3136 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \ 3137 } while (0) 3101 3138 3102 3139 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \ 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3140 do { \ 3141 AssertPtr(pVCpu); \ 3142 AssertPtr(pCtx); \ 3143 AssertPtr(pSvmTransient); \ 3144 Assert(ASMIntAreEnabled()); \ 3145 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 3146 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \ 3147 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \ 3148 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 3149 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 3150 HMSVM_ASSERT_PREEMPT_CPUID(); \ 3151 } while (0) 3115 3152 #else /* Release builds */ 3116 3153 # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0) … … 3953 3990 3954 3991 /* We should -not- get this VM-exit if the guest is debugging. */ 3955 if (CPUMIsGuestDebugStateActive(pVCpu)) 3956 { 3957 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx)); 3958 return VERR_SVM_UNEXPECTED_EXIT; 3959 } 3960 3961 if ( !DBGFIsStepping(pVCpu) 3962 && !CPUMIsHyperDebugStateActive(pVCpu)) 3963 { 3992 AssertMsgReturn(!CPUMIsGuestDebugStateActive(pVCpu), 3993 ("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx), 3994 VERR_SVM_UNEXPECTED_EXIT); 3995 3996 /* 3997 * Lazy DR0-3 loading? 3998 */ 3999 if (!CPUMIsHyperDebugStateActive(pVCpu)) 4000 { 4001 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction); 4002 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n")); 4003 3964 4004 /* Don't intercept DRx read and writes. */ 3965 4005 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; … … 3969 4009 3970 4010 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 3971 PVM pVM = pVCpu->CTX_SUFF(pVM); 3972 int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 3973 AssertRC(rc); 4011 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */); 3974 4012 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3975 4013 3976 4014 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 3977 return rc; 3978 } 3979 4015 return VINF_SUCCESS; 4016 } 4017 4018 /* 4019 * Interpret the read/writing of DRx. 4020 */ 3980 4021 /** @todo Decode assist. */ 3981 VBOXSTRICTRC rc 2= EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);3982 int rc = VBOXSTRICTRC_VAL(rc2);4022 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 4023 Log5(("hmR0SvmExitReadDRx: Emulatined DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc))); 3983 4024 if (RT_LIKELY(rc == VINF_SUCCESS)) 3984 4025 { 3985 4026 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */ 4027 /** @todo CPUM should set this flag! */ 3986 4028 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 3987 4029 } 3988 4030 else 3989 4031 Assert(rc == VERR_EM_INTERPRETER); 3990 return rc;4032 return VBOXSTRICTRC_TODO(rc); 3991 4033 } 3992 4034 … … 4533 4575 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 4534 4576 4577 /* If we sat the trap flag above, we have to clear it. */ /** @todo HM should remember what it does and possibly do this elsewhere! */ 4578 if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu)) 4579 pCtx->eflags.Bits.u1TF = 0; 4580 4535 4581 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases 4536 4582 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */ 4537 PVM pVM = pVCpu->CTX_SUFF(pVM); 4538 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pCtx->dr[6]); 4583 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 4584 PVM pVM = pVCpu->CTX_SUFF(pVM); 4585 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6); 4539 4586 if (rc == VINF_EM_RAW_GUEST_TRAP) 4540 4587 { 4588 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6)); 4589 if (CPUMIsHyperDebugStateActive(pVCpu)) 4590 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6); 4591 4541 4592 /* Reflect the exception back to the guest. */ 4542 4593 hmR0SvmSetPendingXcptDB(pVCpu); … … 4544 4595 } 4545 4596 4597 /* 4598 * Update DR6. 4599 */ 4600 if (CPUMIsHyperDebugStateActive(pVCpu)) 4601 { 4602 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc)); 4603 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL; 4604 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 4605 } 4606 else 4607 { 4608 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); 4609 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu)); 4610 } 4611 4546 4612 return rc; 4547 4613 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47653 r47660 3320 3320 #endif 3321 3321 3322 int rc = VERR_INTERNAL_ERROR_5;3323 PVM pVM= pVCpu->CTX_SUFF(pVM);3322 int rc; 3323 PVM pVM = pVCpu->CTX_SUFF(pVM); 3324 3324 bool fInterceptDB = false; 3325 3325 bool fInterceptMovDRx = false; 3326 if ( DBGFIsStepping(pVCpu) || pVCpu->hm.s.fSingleInstruction)3326 if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu)) 3327 3327 { 3328 3328 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ … … 3342 3342 } 3343 3343 3344 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 3345 { 3344 if (fInterceptDB || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) 3345 { 3346 /* 3347 * Use the combined guest and host DRx values found in the hypervisor 3348 * register set because the debugger has breakpoints active or someone 3349 * is single stepping on the host side without a monitor trap flag. 3350 * 3351 * Note! DBGF expects a clean DR6 state before executing guest code. 3352 */ 3346 3353 if (!CPUMIsHyperDebugStateActive(pVCpu)) 3347 { 3348 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 3349 AssertRC(rc); 3350 } 3354 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */); 3355 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 3351 3356 Assert(CPUMIsHyperDebugStateActive(pVCpu)); 3357 3358 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */ 3359 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu)); 3360 AssertRCReturn(rc, rc); 3361 3362 fInterceptDB = true; 3352 3363 fInterceptMovDRx = true; 3353 3364 } 3354 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 3355 { 3356 if (!CPUMIsGuestDebugStateActive(pVCpu)) 3357 { 3358 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 3359 AssertRC(rc); 3360 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 3361 } 3362 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3363 Assert(fInterceptMovDRx == false); 3364 } 3365 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 3366 { 3367 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */ 3368 fInterceptMovDRx = true; 3369 } 3370 3371 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */ 3365 else 3366 { 3367 /* 3368 * If the guest has enabled debug registers, we need to load them prior to 3369 * executing guest code so they'll trigger at the right time. 3370 */ 3371 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 3372 { 3373 if (!CPUMIsGuestDebugStateActive(pVCpu)) 3374 { 3375 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */); 3376 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 3377 } 3378 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 3379 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3380 } 3381 /* 3382 * If no debugging enabled, we'll lazy load DR0-3. 3383 */ 3384 else if (!CPUMIsGuestDebugStateActive(pVCpu)) 3385 fInterceptMovDRx = true; 3386 3387 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 3388 AssertRCReturn(rc, rc); 3389 } 3390 3391 /* 3392 * Update the exception bitmap regarding intercepting #DB generated by the guest. 3393 */ 3372 3394 if (fInterceptDB) 3373 3395 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB); … … 3378 3400 #endif 3379 3401 } 3380 3381 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */ 3402 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3403 AssertRCReturn(rc, rc); 3404 3405 /* 3406 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. 3407 */ 3382 3408 if (fInterceptMovDRx) 3383 3409 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 3384 3410 else 3385 3411 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 3386 3387 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);3388 AssertRCReturn(rc, rc);3389 3412 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 3390 3413 AssertRCReturn(rc, rc); 3391 3414 3392 /* The guest's view of its DR7 is unblemished. Use 32-bit write as upper 32-bits MBZ as asserted above. */3393 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);3394 AssertRCReturn(rc, rc);3395 3396 3415 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG; 3397 return rc;3416 return VINF_SUCCESS; 3398 3417 } 3399 3418 … … 5644 5663 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG)) 5645 5664 { 5646 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 5647 uint32_t u32Val; 5648 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc); 5649 pMixedCtx->dr[7] = u32Val; 5665 if (!CPUMIsHyperDebugStateActive(pVCpu)) 5666 { 5667 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 5668 uint32_t u32Val; 5669 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc); 5670 pMixedCtx->dr[7] = u32Val; 5671 } 5650 5672 5651 5673 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG; … … 6016 6038 if (CPUMIsGuestDebugStateActive(pVCpu)) 6017 6039 { 6018 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */); 6019 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 6040 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */); 6020 6041 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 6021 6042 } 6022 6043 else if (CPUMIsHyperDebugStateActive(pVCpu)) 6023 6044 { 6024 CPUMR0LoadHostDebugState(pVM, pVCpu); 6025 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 6045 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */); 6026 6046 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 6027 6047 } 6048 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 6049 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 6050 6028 6051 6029 6052 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); … … 9116 9139 * bootsector testcase for asserting the correct behavior (as well as 9117 9140 * correctness of this code). */ 9141 /** @todo r=bird: DR0-3 are normally in host registes when the guest is using 9142 * them, so we're testing against potentially stale values here! */ 9118 9143 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 9119 9144 uint32_t uIOPortLast = uIOPort + cbValue - 1; … … 9356 9381 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 9357 9382 PVM pVM = pVCpu->CTX_SUFF(pVM); 9358 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */); 9359 AssertRC(rc); 9383 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */); 9360 9384 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 9361 9385 … … 9599 9623 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 9600 9624 9625 /* 9626 * Get the DR6-like values from the exit qualification and pass it to DBGF 9627 * for processing. 9628 */ 9601 9629 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 9602 9630 AssertRCReturn(rc, rc); … … 9604 9632 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ 9605 9633 uint64_t uDR6 = X86_DR6_INIT_VAL; 9606 uDR6 |= ( pVmxTransient->uExitQualification9634 uDR6 |= ( pVmxTransient->uExitQualification 9607 9635 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 9608 PVM pVM = pVCpu->CTX_SUFF(pVM); 9609 rc = DBGFRZTrap01Handler(pV M, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);9636 9637 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6); 9610 9638 if (rc == VINF_EM_RAW_GUEST_TRAP) 9611 9639 { 9612 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */ 9613 pMixedCtx->dr[6] = uDR6; 9614 9640 /* 9641 * The exception was for the guest. Update DR6, DR7.GD and 9642 * IA32_DEBUGCTL.LBR before forwarding it. 9643 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".) 9644 */ 9645 pMixedCtx->dr[6] |= uDR6; 9615 9646 if (CPUMIsGuestDebugStateActive(pVCpu)) 9616 9647 ASMSetDR6(pMixedCtx->dr[6]); 9617 9648 9618 9649 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 9650 AssertRCReturn(rc, rc); 9619 9651 9620 9652 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */ … … 9622 9654 9623 9655 /* Paranoia. */ 9624 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */ 9625 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */ 9626 pMixedCtx->dr[7] |= 0x400; /* MB1. */ 9627 9628 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]); 9629 AssertRCReturn(rc,rc); 9630 9631 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 9632 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 9633 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 9634 AssertRCReturn(rc2, rc2); 9656 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK; 9657 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK; 9658 9659 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]); 9660 AssertRCReturn(rc, rc); 9661 9662 /* 9663 * Raise #DB in the guest. 9664 */ 9665 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 9666 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 9667 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 9668 AssertRCReturn(rc, rc); 9635 9669 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 9636 9670 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */); 9637 rc = VINF_SUCCESS; 9638 } 9671 return VINF_SUCCESS; 9672 } 9673 9674 /* 9675 * Not a guest trap, must be a hypervisor related debug event then. 9676 * Update DR6 in case someone is interested in it. 9677 */ 9678 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc)); 9679 AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5); 9680 CPUMSetHyperDR6(pVCpu, uDR6); 9639 9681 9640 9682 return rc; -
trunk/src/VBox/VMM/VMMR3/DBGFBp.cpp
r44528 r47660 566 566 { 567 567 NOREF(pVM); NOREF(pvUser); 568 return CPUMRecalcHyperDRx(pVCpu );568 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX); 569 569 } 570 570 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r47619 r47660 872 872 873 873 default: /** @todo don't use default for guru, but make special errors code! */ 874 { 875 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc))); 874 876 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR); 875 877 break; 878 } 876 879 } 877 880 … … 2464 2467 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc)); 2465 2468 TMR3NotifyResume(pVM, pVCpu); 2466 Log2(("EMR3ExecuteVM: e nmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));2469 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState)); 2467 2470 break; 2468 2471 … … 2476 2479 2477 2480 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc)); 2478 Log2(("EMR3ExecuteVM: e nmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));2481 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState)); 2479 2482 if (rc != VINF_SUCCESS) 2480 2483 { -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r46420 r47660 323 323 int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6); 324 324 if (rc == VINF_EM_RAW_GUEST_TRAP) 325 CPUMSetGuestDR6(pVCpu, uDr6);325 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | uDr6); 326 326 327 327 rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame); -
trunk/src/VBox/VMM/VMMRZ/DBGFRZ.cpp
r44528 r47660 41 41 * @param pVCpu Pointer to the VMCPU. 42 42 * @param pRegFrame Pointer to the register frame for the trap. 43 * @param uDr6 The DR6 register value.43 * @param uDr6 The DR6 hypervisor register value. 44 44 */ 45 45 VMMRZ_INT_DECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCUINTREG uDr6) … … 85 85 } 86 86 87 #ifdef IN_RC88 87 /* 89 * Currently we only implement single stepping in the guest,90 * so we'll bitch if this is not a BS event.88 * Either an ICEBP in hypervisor code or a guest related debug exception 89 * of sorts. 91 90 */ 92 AssertMsg(uDr6 & X86_DR6_BS, ("hey! we're not doing guest BPs yet! dr6=%RTreg %04x:%RGv\n", 93 uDr6, pRegFrame->cs.Sel, pRegFrame->rip)); 94 #endif 91 if (RT_UNLIKELY(fInHyper)) 92 { 93 LogFlow(("DBGFRZTrap01Handler: unabled bp at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip)); 94 return VERR_DBGF_HYPER_DB_XCPT; 95 } 95 96 96 97 LogFlow(("DBGFRZTrap01Handler: guest debug event %RTreg at %04x:%RGv!\n", uDr6, pRegFrame->cs.Sel, pRegFrame->rip)); 97 return fInHyper ? VERR_DBGF_HYPER_DB_XCPT :VINF_EM_RAW_GUEST_TRAP;98 return VINF_EM_RAW_GUEST_TRAP; 98 99 } 99 100 -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r41985 r47660 5 5 6 6 ; 7 ; Copyright (C) 2006-201 2Oracle Corporation7 ; Copyright (C) 2006-2013 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 355 355 356 356 ; debug registers. 357 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST 358 jz htg_debug_regs_no 359 jmp htg_debug_regs_save 357 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST 358 jnz htg_debug_regs_save 360 359 htg_debug_regs_no: 361 360 DEBUG_CHAR('a') ; trashes esi … … 439 438 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!! 440 439 mov [rdx + r8 + CPUMCPU.Host.dr7], rax 441 xor eax, eax ; clear everything. (bit 12? is read as 1...) 442 mov dr7, rax 440 mov ecx, X86_DR7_INIT_VAL 441 cmp eax, ecx 442 je .htg_debug_regs_dr7_disabled 443 mov dr7, rcx 444 .htg_debug_regs_dr7_disabled: 443 445 mov rax, dr6 ; just in case we save the state register too. 444 446 mov [rdx + r8 + CPUMCPU.Host.dr6], rax 445 447 ; save host DR0-3? 446 test esi, CPUM_USE_DEBUG_REGS 447 jz nearhtg_debug_regs_no448 test esi, CPUM_USE_DEBUG_REGS_HYPER 449 jz htg_debug_regs_no 448 450 DEBUG_S_CHAR('S'); 449 451 mov rax, dr0 … … 455 457 mov rax, dr3 456 458 mov [rdx + r8 + CPUMCPU.Host.dr3], rax 459 or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST 457 460 jmp htg_debug_regs_no 458 461 … … 513 516 GLOBALNAME JmpGCTarget 514 517 DEBUG_CHAR('-') 515 ;mov eax, 0ffff0000h516 ;.delay_loop:517 ;nop518 ;dec eax519 ;nop520 ;jnz .delay_loop521 518 ; load final cr3 and do far jump to load cs. 522 519 mov cr3, ebp ; ebp set above … … 565 562 566 563 ; debug registers 567 test esi, CPUM_USE_DEBUG_REGS 564 test esi, CPUM_USE_DEBUG_REGS_HYPER 568 565 jnz htg_debug_regs_guest 569 566 htg_debug_regs_guest_done: … … 622 619 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3] 623 620 mov dr3, ebx 624 ;mov eax, [edx + CPUMCPU.Hyper.dr + 8*6] 625 mov ecx, 0ffff0ff0h 621 mov ecx, X86_DR6_INIT_VAL 626 622 mov dr6, ecx 627 623 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7] 628 624 mov dr7, eax 625 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 629 626 jmp htg_debug_regs_guest_done 630 627 … … 793 790 ; FPU context is saved before restore of host saving (another) branch. 794 791 792 ; Disable debug registers if active so they cannot trigger while switching. 793 test dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 794 jz .gth_disabled_dr7 795 mov eax, X86_DR7_INIT_VAL 796 mov dr7, eax 797 .gth_disabled_dr7: 795 798 796 799 ;; … … 991 994 ;mov cr2, rcx 992 995 993 ; restore debug registers (if modified) (esi must still be fUseFlags!)994 ; (must be done after cr4 reload because of the debug extension.)995 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST996 jz short gth_debug_regs_no997 jmp gth_debug_regs_restore998 gth_debug_regs_no:999 1000 996 ; Restore MSRs 1001 997 mov rbx, rdx … … 1014 1010 mov rdx, rbx 1015 1011 1016 ; restore general registers. 1012 ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!) 1013 test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER 1014 jnz gth_debug_regs_restore 1015 gth_debug_regs_done: 1016 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER) 1017 1018 ; Restore general registers. 1017 1019 mov eax, edi ; restore return code. eax = return code !! 1018 1020 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code … … 1049 1051 gth_debug_regs_restore: 1050 1052 DEBUG_S_CHAR('d') 1051 xor eax, eax 1052 mov dr7, rax ; paranoia or not? 1053 test esi, CPUM_USE_DEBUG_REGS 1054 jz short gth_debug_regs_dr7 1053 mov rax, dr7 ; Some DR7 paranoia first... 1054 mov ecx, X86_DR7_INIT_VAL 1055 cmp rax, rcx 1056 je .gth_debug_skip_dr7_disabling 1057 mov dr7, rcx 1058 .gth_debug_skip_dr7_disabling: 1059 test esi, CPUM_USED_DEBUG_REGS_HOST 1060 jz .gth_debug_regs_dr7 1061 1055 1062 DEBUG_S_CHAR('r') 1056 1063 mov rax, [rdx + r8 + CPUMCPU.Host.dr0] … … 1062 1069 mov rax, [rdx + r8 + CPUMCPU.Host.dr3] 1063 1070 mov dr3, rax 1064 gth_debug_regs_dr7:1071 .gth_debug_regs_dr7: 1065 1072 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6] 1066 1073 mov dr6, rbx 1067 1074 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7] 1068 1075 mov dr7, rcx 1069 jmp gth_debug_regs_no 1076 1077 ; We clear the USED flags in the main code path. 1078 jmp gth_debug_regs_done 1070 1079 1071 1080 ENDPROC vmmRCToHostAsm -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r47652 r47660 445 445 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags. 446 446 test esi, CPUM_SYNC_FPU_STATE 447 jz near gth_fpu_no447 jz near htg_fpu_no 448 448 449 449 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 460 460 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE 461 461 462 gth_fpu_no:462 htg_fpu_no: 463 463 ; Check if we need to restore the guest debug state 464 test esi, CPUM_SYNC_DEBUG_ STATE465 jz near gth_debug_no464 test esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER 465 jz htg_debug_done 466 466 467 467 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 468 468 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8 469 469 %endif 470 470 test esi, CPUM_SYNC_DEBUG_REGS_HYPER 471 jnz htg_debug_hyper 472 473 ; Guest values in DRx, letting the guest access them directly. 471 474 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8] 472 475 mov dr0, rax … … 480 483 mov dr6, rax ; not required for AMD-V 481 484 482 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE 483 484 gth_debug_no: 485 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST 486 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST 487 jmp htg_debug_done 488 489 htg_debug_hyper: 490 ; Combined values in DRx, intercepting all accesses. 491 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8] 492 mov dr0, rax 493 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8] 494 mov dr1, rax 495 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8] 496 mov dr2, rax 497 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8] 498 mov dr3, rax 499 mov rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8] 500 mov dr6, rax ; not required for AMD-V 501 502 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER 503 or dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 504 505 htg_debug_done: 485 506 486 507 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 487 508 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9 488 509 %endif 510 511 ; 512 ; "Call" the specified helper function. 513 ; 489 514 490 515 ; parameter for all helper functions (pCtx) 491 516 DEBUG64_CHAR('9') 492 517 lea rsi, [rdx + CPUMCPU.Guest.fpu] 493 lea rax, [ gth_return wrt rip]518 lea rax, [htg_return wrt rip] 494 519 push rax ; return address 495 520 … … 505 530 jz NAME(HMRCTestSwitcher64) 506 531 mov eax, VERR_HM_INVALID_HM64ON32OP 507 gth_return:532 htg_return: 508 533 DEBUG64_CHAR('r') 509 534 -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r41985 r47660 5 5 6 6 ; 7 ; Copyright (C) 2006-201 2Oracle Corporation7 ; Copyright (C) 2006-2013 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 256 256 257 257 ; debug registers. 258 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST258 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST 259 259 jnz htg_debug_regs_save_dr7and6 260 260 htg_debug_regs_no: … … 407 407 408 408 ; debug registers 409 test esi, CPUM_USE_DEBUG_REGS 409 test esi, CPUM_USE_DEBUG_REGS_HYPER 410 410 jnz htg_debug_regs_guest 411 411 htg_debug_regs_guest_done: … … 515 515 mov eax, dr3 516 516 mov [edx + CPUMCPU.Host.dr3], eax 517 or dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST 517 518 518 519 ; load hyper DR0-7 … … 525 526 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3] 526 527 mov dr3, ebx 527 ;mov eax, [edx + CPUMCPU.Hyper.dr + 8*6] 528 mov ecx, 0ffff0ff0h 528 mov ecx, X86_DR6_INIT_VAL 529 529 mov dr6, ecx 530 530 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7] 531 531 mov dr7, eax 532 or dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 532 533 jmp htg_debug_regs_guest_done 533 534 … … 686 687 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements. 687 688 ; FPU context is saved before restore of host saving (another) branch. 689 690 ; Disable debug regsiters if active so they cannot trigger while switching. 691 test dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER 692 jz .gth_disabled_dr7 693 mov eax, X86_DR7_INIT_VAL 694 mov dr7, eax 695 .gth_disabled_dr7: 688 696 689 697 %ifdef VBOX_WITH_NMI … … 887 895 ; restore debug registers (if modified) (esi must still be fUseFlags!) 888 896 ; (must be done after cr4 reload because of the debug extension.) 889 test esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST 890 jz short gth_debug_regs_no 891 jmp gth_debug_regs_restore 892 gth_debug_regs_no: 897 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST 898 jnz gth_debug_regs_restore 899 gth_debug_regs_done: 893 900 894 901 ; restore general registers. … … 911 918 gth_debug_regs_restore: 912 919 DEBUG_S_CHAR('d') 913 xor eax, eax 914 mov dr7, eax ; paranoia or not? 915 test esi, CPUM_USE_DEBUG_REGS 916 jz short gth_debug_regs_dr7 920 mov eax, dr7 ; Some DR7 paranoia first... 921 mov ecx, X86_DR7_INIT_VAL 922 cmp eax, ecx 923 je .gth_debug_skip_dr7_disabling 924 mov dr7, ecx 925 .gth_debug_skip_dr7_disabling: 926 test esi, CPUM_USED_DEBUG_REGS_HOST 927 jz .gth_debug_regs_dr7 928 917 929 DEBUG_S_CHAR('r') 918 930 mov eax, [edx + CPUMCPU.Host.dr0] … … 924 936 mov eax, [edx + CPUMCPU.Host.dr3] 925 937 mov dr3, eax 926 gth_debug_regs_dr7:938 .gth_debug_regs_dr7: 927 939 mov ebx, [edx + CPUMCPU.Host.dr6] 928 940 mov dr6, ebx 929 941 mov ecx, [edx + CPUMCPU.Host.dr7] 930 942 mov dr7, ecx 931 jmp gth_debug_regs_no 943 944 and dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER) 945 jmp gth_debug_regs_done 932 946 933 947 ENDPROC vmmRCToHostAsm -
trunk/src/VBox/VMM/include/CPUMInternal.h
r41932 r47660 57 57 58 58 /** Use flags (CPUM::fUseFlags). 59 * (Don't forget to sync this with CPUMInternal.mac !)59 * (Don't forget to sync this with CPUMInternal.mac !) 60 60 * @{ */ 61 61 /** Used the FPU, SSE or such stuff. */ … … 64 64 * REM syncing is clearing this, lazy FPU is setting it. */ 65 65 #define CPUM_USED_FPU_SINCE_REM RT_BIT(1) 66 /** The XMM state was manually restored. (AMD only) */ 67 #define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2) 68 66 69 /** Host OS is using SYSENTER and we must NULL the CS. */ 67 #define CPUM_USE_SYSENTER RT_BIT( 2)70 #define CPUM_USE_SYSENTER RT_BIT(3) 68 71 /** Host OS is using SYSENTER and we must NULL the CS. */ 69 #define CPUM_USE_SYSCALL RT_BIT(3) 70 /** Debug registers are used by host and must be disabled. */ 71 #define CPUM_USE_DEBUG_REGS_HOST RT_BIT(4) 72 /** Enabled use of debug registers in guest context. */ 73 #define CPUM_USE_DEBUG_REGS RT_BIT(5) 74 /** The XMM state was manually restored. (AMD only) */ 75 #define CPUM_MANUAL_XMM_RESTORE RT_BIT(6) 76 /** Sync the FPU state on entry (32->64 switcher only). */ 77 #define CPUM_SYNC_FPU_STATE RT_BIT(7) 78 /** Sync the debug state on entry (32->64 switcher only). */ 79 #define CPUM_SYNC_DEBUG_STATE RT_BIT(8) 80 /** Enabled use of hypervisor debug registers in guest context. */ 81 #define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(9) 72 #define CPUM_USE_SYSCALL RT_BIT(4) 73 74 /** Debug registers are used by host and that DR7 and DR6 must be saved and 75 * disabled when switching to raw-mode. */ 76 #define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5) 77 /** Records that we've saved the host DRx registers. 78 * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3 79 * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */ 80 #define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6) 81 /** Set to indicate that we should save host DR0-7 and load the hypervisor debug 82 * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */ 83 #define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7) 84 /** Used in ring-0 to indicate that we have loaded the hypervisor debug 85 * registers. */ 86 #define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8) 87 /** Used in ring-0 to indicate that we have loaded the guest debug 88 * registers (DR0-3 and maybe DR6) for direct use by the guest. 89 * DR7 (and AMD-V DR6) are handled via the VMCB. */ 90 #define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9) 91 92 93 /** Sync the FPU state on next entry (32->64 switcher only). */ 94 #define CPUM_SYNC_FPU_STATE RT_BIT(16) 95 /** Sync the debug state on next entry (32->64 switcher only). */ 96 #define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17) 97 /** Sync the debug state on next entry (32->64 switcher only). 98 * Almost the same as CPUM_USE_DEBUG_REGS_HYPER in the raw-mode switchers. */ 99 #define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18) 82 100 /** @} */ 83 101 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r44528 r47660 18 18 %include "VBox/asmdefs.mac" 19 19 20 20 21 %define CPUM_USED_FPU RT_BIT(0) 21 22 %define CPUM_USED_FPU_SINCE_REM RT_BIT(1) 22 %define CPUM_USE_SYSENTER RT_BIT(2) 23 %define CPUM_USE_SYSCALL RT_BIT(3) 24 %define CPUM_USE_DEBUG_REGS_HOST RT_BIT(4) 25 %define CPUM_USE_DEBUG_REGS RT_BIT(5) 26 %define CPUM_SYNC_FPU_STATE RT_BIT(7) 27 %define CPUM_SYNC_DEBUG_STATE RT_BIT(8) 23 %define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2) 24 %define CPUM_USE_SYSENTER RT_BIT(3) 25 %define CPUM_USE_SYSCALL RT_BIT(4) 26 %define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5) 27 %define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6) 28 %define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7) 29 %define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8) 30 %define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9) 31 %define CPUM_SYNC_FPU_STATE RT_BIT(16) 32 %define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17) 33 %define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18) 34 28 35 29 36 %define CPUM_HANDLER_DS 1
Note:
See TracChangeset
for help on using the changeset viewer.