Changeset 86477 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 7, 2020 8:10:05 PM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 140787
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r86476 r86477 464 464 } 465 465 466 /* Take the big lock now before we update flags. */ 467 *pfLockTaken = true; 468 pgmLock(pVM); 469 466 470 /* 467 471 * Set the accessed and dirty flags. 468 472 */ 469 /** @todo Use atomics here as we don't own the lock and stuff: */ 473 /** @todo Should probably use cmpxchg logic here as we're potentially racing 474 * other CPUs in SMP configs. (the lock isn't enough, since we take it 475 * after walking and the page tables could be stale already) */ 470 476 # if PGM_GST_TYPE == PGM_TYPE_AMD64 471 GstWalk.Pml4e.u |= X86_PML4E_A; 472 GstWalk.pPml4e->u |= X86_PML4E_A; 473 GstWalk.Pdpe.u |= X86_PDPE_A; 474 GstWalk.pPdpe->u |= X86_PDPE_A; 477 if (!(GstWalk.Pml4e.u & X86_PML4E_A)) 478 { 479 GstWalk.Pml4e.u |= X86_PML4E_A; 480 GST_ATOMIC_OR(&GstWalk.pPml4e->u, X86_PML4E_A); 481 } 482 if (!(GstWalk.Pdpe.u & X86_PDPE_A)) 483 { 484 GstWalk.Pdpe.u |= X86_PDPE_A; 485 GST_ATOMIC_OR(&GstWalk.pPdpe->u, X86_PDPE_A); 486 } 475 487 # endif 476 488 if (GstWalk.Core.fBigPage) … … 479 491 if (uErr & X86_TRAP_PF_RW) 480 492 { 481 GstWalk.Pde.u |= X86_PDE4M_A | X86_PDE4M_D; 482 GstWalk.pPde->u |= X86_PDE4M_A | X86_PDE4M_D; 493 if ((GstWalk.Pde.u & (X86_PDE4M_A | X86_PDE4M_D)) != (X86_PDE4M_A | X86_PDE4M_D)) 494 { 495 GstWalk.Pde.u |= X86_PDE4M_A | X86_PDE4M_D; 496 GST_ATOMIC_OR(&GstWalk.pPde->u, X86_PDE4M_A | X86_PDE4M_D); 497 } 483 498 } 484 499 else 485 500 { 486 GstWalk.Pde.u |= X86_PDE4M_A; 487 GstWalk.pPde->u |= X86_PDE4M_A; 501 if (!(GstWalk.Pde.u & X86_PDE4M_A)) 502 { 503 GstWalk.Pde.u |= X86_PDE4M_A; 504 GST_ATOMIC_OR(&GstWalk.pPde->u, X86_PDE4M_A); 505 } 488 506 } 489 507 } … … 491 509 { 492 510 Assert(!(GstWalk.Pde.u & X86_PDE_PS)); 493 GstWalk.Pde.u |= X86_PDE_A; 494 GstWalk.pPde->u |= X86_PDE_A; 511 if (!(GstWalk.Pde.u & X86_PDE_A)) 512 { 513 GstWalk.Pde.u |= X86_PDE_A; 514 GST_ATOMIC_OR(&GstWalk.pPde->u, X86_PDE_A); 515 } 516 495 517 if (uErr & X86_TRAP_PF_RW) 496 518 { 497 519 # ifdef VBOX_WITH_STATISTICS 498 if ( !GstWalk.Pte.n.u1Dirty)520 if (GstWalk.Pte.u & X86_PTE_D) 499 521 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtiedPage)); 500 522 else 501 523 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageAlreadyDirty)); 502 524 # endif 503 GstWalk.Pte.u |= X86_PTE_A | X86_PTE_D; 504 GstWalk.pPte->u |= X86_PTE_A | X86_PTE_D; 525 if ((GstWalk.Pte.u & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D)) 526 { 527 GstWalk.Pte.u |= X86_PTE_A | X86_PTE_D; 528 GST_ATOMIC_OR(&GstWalk.pPte->u, X86_PTE_A | X86_PTE_D); 529 } 505 530 } 506 531 else 507 532 { 508 GstWalk.Pte.u |= X86_PTE_A; 509 GstWalk.pPte->u |= X86_PTE_A; 533 if (!(GstWalk.Pte.u & X86_PTE_A)) 534 { 535 GstWalk.Pte.u |= X86_PTE_A; 536 GST_ATOMIC_OR(&GstWalk.pPte->u, X86_PTE_A); 537 } 510 538 } 511 539 Assert(GstWalk.Pte.u == GstWalk.pPte->u); … … 515 543 # else /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 516 544 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */ 517 # endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */518 545 519 546 /* Take the big lock now. */ 520 547 *pfLockTaken = true; 521 548 pgmLock(pVM); 549 # endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 522 550 523 551 # ifdef PGM_WITH_MMIO_OPTIMIZATIONS
Note:
See TracChangeset
for help on using the changeset viewer.