Changeset 100868 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 14, 2023 12:49:27 AM (18 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100860 r100868 79 79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM. 80 80 * - Level 7 (Log7) : iret++ execution logging. 81 * - Level 8 (Log8) : Memory writes.82 * - Level 9 (Log9) : Memory reads.81 * - Level 8 (Log8) : 82 * - Level 9 (Log9) : 83 83 * - Level 10 (Log10): TLBs. 84 84 * - Level 11 (Log11): Unmasked FPU exceptions. 85 * 86 * The \"IEM_MEM\" log group covers most of memory related details logging, 87 * except for errors and exceptions: 88 * - Level 1 (Log) : Reads. 89 * - Level 2 (Log2) : Read fallbacks. 90 * - Level 3 (Log3) : MemMap read. 91 * - Level 4 (Log4) : MemMap read fallbacks. 92 * - Level 5 (Log5) : Writes 93 * - Level 6 (Log6) : Write fallbacks. 94 * - Level 7 (Log7) : MemMap writes and read-writes. 95 * - Level 8 (Log8) : MemMap write and read-write fallbacks. 96 * - Level 9 (Log9) : Stack reads. 97 * - Level 10 (Log10): Stack read fallbacks. 98 * - Level 11 (Log11): Stack writes. 99 * - Level 12 (Log12): Stack write fallbacks. 100 * - Flow (LogFlow) : 85 101 * 86 102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments: … … 5463 5479 */ 5464 5480 5481 #undef LOG_GROUP 5482 #define LOG_GROUP LOG_GROUP_IEM_MEM 5465 5483 5466 5484 /** … … 5617 5635 if (RT_FAILURE(rc)) 5618 5636 { 5619 Log (("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));5637 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem)); 5620 5638 /** @todo Check unassigned memory in unpaged mode. */ 5621 5639 /** @todo Reserved bits in page tables. Requires new PGM interface. */ … … 5639 5657 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP))) 5640 5658 { 5641 Log (("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));5659 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem)); 5642 5660 *pGCPhysMem = NIL_RTGCPHYS; 5643 5661 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT … … 5653 5671 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 5654 5672 { 5655 Log (("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));5673 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem)); 5656 5674 *pGCPhysMem = NIL_RTGCPHYS; 5657 5675 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT … … 5667 5685 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) 5668 5686 { 5669 Log (("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));5687 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem)); 5670 5688 *pGCPhysMem = NIL_RTGCPHYS; 5671 5689 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT … … 5802 5820 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 5803 5821 { 5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n", 5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5822 LogEx(LOG_GROUP_IEM, 5823 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n", 5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5807 5826 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5808 5827 } … … 5810 5829 else if (fPostponeFail) 5811 5830 { 5812 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5831 LogEx(LOG_GROUP_IEM, 5832 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5815 5835 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND; 5816 5836 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM); … … 5820 5840 else 5821 5841 { 5822 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5842 LogEx(LOG_GROUP_IEM, 5843 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5825 5846 return rcStrict; 5826 5847 } … … 5831 5852 if (!cbSecond) 5832 5853 { 5833 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n", 5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) )); 5854 LogEx(LOG_GROUP_IEM, 5855 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n", 5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) )); 5835 5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5836 5858 } … … 5844 5866 if (rcStrict2 == VINF_SUCCESS) 5845 5867 { 5846 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n", 5847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5868 LogEx(LOG_GROUP_IEM, 5869 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n", 5870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5849 5872 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5850 5873 } 5851 5874 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2)) 5852 5875 { 5853 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n", 5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 5876 LogEx(LOG_GROUP_IEM, 5877 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n", 5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 5856 5880 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2); 5857 5881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); … … 5860 5884 else if (fPostponeFail) 5861 5885 { 5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5886 LogEx(LOG_GROUP_IEM, 5887 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5865 5890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND; 5866 5891 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM); … … 5870 5895 else 5871 5896 { 5872 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 5897 LogEx(LOG_GROUP_IEM, 5898 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 5875 5901 return rcStrict2; 5876 5902 } … … 5880 5906 else if (fPostponeFail) 5881 5907 { 5882 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5908 LogEx(LOG_GROUP_IEM, 5909 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 5885 5912 if (!cbSecond) 5886 5913 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST; … … 5893 5920 else 5894 5921 { 5895 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 5896 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5897 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5922 LogEx(LOG_GROUP_IEM, 5923 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 5925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5898 5926 return rcStrict; 5899 5927 } … … 5914 5942 else 5915 5943 { 5916 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5918 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc)); 5944 LogEx(LOG_GROUP_IEM, 5945 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 5946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 5947 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc)); 5919 5948 return rc; 5920 5949 } … … 5923 5952 else 5924 5953 { 5925 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc, 5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5954 LogEx(LOG_GROUP_IEM, 5955 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 5956 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc, 5957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 5928 5958 return rc; 5929 5959 } … … 5932 5962 5933 5963 #if defined(IEM_LOG_MEMORY_WRITES) 5934 Log (("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,5935 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));5964 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 5965 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0])); 5936 5966 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond) 5937 Log (("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,5938 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),5939 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));5967 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 5968 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64), 5969 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst])); 5940 5970 5941 5971 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; … … 6007 6037 else 6008 6038 { 6009 Log (("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",6010 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));6039 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n", 6040 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) )); 6011 6041 return rcStrict; 6012 6042 } … … 6022 6052 else 6023 6053 { 6024 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n", 6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) )); 6054 LogEx(LOG_GROUP_IEM, 6055 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n", 6056 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) )); 6026 6057 return rcStrict2; 6027 6058 } … … 6029 6060 else 6030 6061 { 6031 Log (("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6032 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6062 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n", 6063 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) )); 6033 6064 return rcStrict; 6034 6065 } … … 6048 6079 else 6049 6080 { 6050 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc)); 6081 LogEx(LOG_GROUP_IEM, 6082 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc)); 6051 6083 return rc; 6052 6084 } … … 6054 6086 else 6055 6087 { 6056 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc)); 6088 LogEx(LOG_GROUP_IEM, 6089 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc)); 6057 6090 return rc; 6058 6091 } … … 6125 6158 else 6126 6159 { 6127 Log (("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6128 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6160 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n", 6161 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) )); 6129 6162 return rcStrict; 6130 6163 } … … 6137 6170 else 6138 6171 { 6139 Log (("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6140 GCPhysFirst, rc));6172 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n", 6173 GCPhysFirst, rc)); 6141 6174 return rc; 6142 6175 } … … 6297 6330 if (RT_FAILURE(rc)) 6298 6331 { 6299 Log (("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));6332 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem)); 6300 6333 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6301 6334 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6326 6359 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP))) 6327 6360 { 6328 Log (("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));6361 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem)); 6329 6362 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6330 6363 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6339 6372 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 6340 6373 { 6341 Log (("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));6374 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem)); 6342 6375 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6343 6376 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6428 6461 6429 6462 if (fAccess & IEM_ACCESS_TYPE_WRITE) 6430 Log 8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));6463 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem)); 6431 6464 if (fAccess & IEM_ACCESS_TYPE_READ) 6432 Log 9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));6465 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem)); 6433 6466 6434 6467 #else /* !IEM_WITH_DATA_TLB */ … … 6440 6473 6441 6474 if (fAccess & IEM_ACCESS_TYPE_WRITE) 6442 Log 8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));6475 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 6443 6476 if (fAccess & IEM_ACCESS_TYPE_READ) 6444 Log 9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));6477 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 6445 6478 6446 6479 void *pvMem; … … 6624 6657 if (RT_FAILURE(rc)) 6625 6658 { 6626 Log (("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));6659 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem)); 6627 6660 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6628 6661 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6668 6701 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE) 6669 6702 { 6670 Log (("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));6703 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem)); 6671 6704 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6672 6705 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6679 6712 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER) 6680 6713 { 6681 Log (("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));6714 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem)); 6682 6715 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 6683 6716 if (Walk.fFailed & PGM_WALKFAIL_EPT) … … 6764 6797 6765 6798 if (fAccess & IEM_ACCESS_TYPE_WRITE) 6766 Log 8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));6799 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem)); 6767 6800 if (fAccess & IEM_ACCESS_TYPE_READ) 6768 Log 9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));6801 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem)); 6769 6802 6770 6803 #else /* !IEM_WITH_DATA_TLB */ … … 6777 6810 6778 6811 if (fAccess & IEM_ACCESS_TYPE_WRITE) 6779 Log 8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));6812 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 6780 6813 if (fAccess & IEM_ACCESS_TYPE_READ) 6781 Log 9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));6814 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 6782 6815 6783 6816 void *pvMem; … … 6997 7030 *pu64Dst = *pu32Src; 6998 7031 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 6999 Log 9(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));7032 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst)); 7000 7033 } 7001 7034 return rc; … … 7024 7057 *pu64Dst = *pi32Src; 7025 7058 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R); 7026 Log 9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));7059 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst)); 7027 7060 } 7028 7061 #ifdef __GNUC__ /* warning: GCC may be a royal pain */ … … 7054 7087 *pr80Dst = *pr80Src; 7055 7088 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); 7056 Log 9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));7089 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst)); 7057 7090 } 7058 7091 return rc; … … 7076 7109 *pr80Dst = *pr80Src; 7077 7110 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); 7078 Log 9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));7111 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst)); 7079 7112 } 7080 7113 #endif … … 7101 7134 *pd80Dst = *pd80Src; 7102 7135 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R); 7103 Log 9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));7136 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst)); 7104 7137 } 7105 7138 return rc; … … 7124 7157 *pd80Dst = *pd80Src; 7125 7158 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R); 7126 Log 9(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));7159 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst)); 7127 7160 } 7128 7161 #endif … … 7150 7183 pu128Dst->au64[1] = pu128Src->au64[1]; 7151 7184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 7152 Log 9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7185 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7153 7186 } 7154 7187 return rc; … … 7174 7207 pu128Dst->au64[1] = pu128Src->au64[1]; 7175 7208 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 7176 Log 9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7209 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7177 7210 } 7178 7211 #endif … … 7203 7236 pu128Dst->au64[1] = pu128Src->au64[1]; 7204 7237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 7205 Log 9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7238 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7206 7239 } 7207 7240 return rc; … … 7231 7264 pu128Dst->au64[1] = pu128Src->au64[1]; 7232 7265 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 7233 Log 9(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7266 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7234 7267 } 7235 7268 #endif … … 7259 7292 pu256Dst->au64[3] = pu256Src->au64[3]; 7260 7293 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7261 Log 9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7294 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7262 7295 } 7263 7296 return rc; … … 7285 7318 pu256Dst->au64[3] = pu256Src->au64[3]; 7286 7319 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7287 Log 9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7320 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7288 7321 } 7289 7322 #endif … … 7316 7349 pu256Dst->au64[3] = pu256Src->au64[3]; 7317 7350 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7318 Log 9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7351 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7319 7352 } 7320 7353 return rc; … … 7346 7379 pu256Dst->au64[3] = pu256Src->au64[3]; 7347 7380 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7348 Log 9(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7381 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7349 7382 } 7350 7383 #endif … … 7444 7477 pu128Dst->au64[1] = u128Value.au64[1]; 7445 7478 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7446 Log 8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7479 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7447 7480 } 7448 7481 return rc; … … 7468 7501 pu128Dst->au64[1] = u128Value.au64[1]; 7469 7502 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7470 Log 8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7503 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7471 7504 } 7472 7505 #endif … … 7494 7527 pu128Dst->au64[1] = u128Value.au64[1]; 7495 7528 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7496 Log 8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7529 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7497 7530 } 7498 7531 return rc; … … 7520 7553 pu128Dst->au64[1] = u128Value.au64[1]; 7521 7554 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7522 Log 8(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7555 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7523 7556 } 7524 7557 #endif … … 7548 7581 pu256Dst->au64[3] = pu256Value->au64[3]; 7549 7582 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7550 Log 8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7583 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7551 7584 } 7552 7585 return rc; … … 7574 7607 pu256Dst->au64[3] = pu256Value->au64[3]; 7575 7608 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7576 Log 8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7609 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7577 7610 } 7578 7611 #endif … … 7602 7635 pu256Dst->au64[3] = pu256Value->au64[3]; 7603 7636 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7604 Log 8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7637 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7605 7638 } 7606 7639 return rc; … … 7630 7663 pu256Dst->au64[3] = pu256Value->au64[3]; 7631 7664 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7632 Log 8(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));7665 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7633 7666 } 7634 7667 #endif … … 7918 7951 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit ) 7919 7952 { 7920 Log (("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",7921 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));7953 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n", 7954 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel)); 7922 7955 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 7923 7956 uErrorCode, 0); … … 7931 7964 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt) 7932 7965 { 7933 Log (("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));7966 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt)); 7934 7967 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 7935 7968 uErrorCode, 0); … … 7968 8001 else 7969 8002 { 7970 Log (("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));8003 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel)); 7971 8004 /** @todo is this the right exception? */ 7972 8005 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0); … … 8045 8078 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW); 8046 8079 } 8080 8081 8082 #undef LOG_GROUP 8083 #define LOG_GROUP LOG_GROUP_IEM 8047 8084 8048 8085 /** @} */ -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r100860 r100868 62 62 *puDst = *puSrc; 63 63 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R); 64 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));64 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst)); 65 65 } 66 66 return rc; … … 82 82 TMPL_MEM_TYPE const uRet = *puSrc; 83 83 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R); 84 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));84 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet)); 85 85 return uRet; 86 86 } … … 105 105 *puDst = uValue; 106 106 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W); 107 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));107 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 108 108 } 109 109 return rc; … … 127 127 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 128 128 # endif 129 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));129 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 130 130 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem, 131 131 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); … … 201 201 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 202 202 # endif 203 Log 9(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));203 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 204 204 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ 205 205 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); … … 237 237 if (rc == VINF_SUCCESS) 238 238 { 239 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",240 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));239 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 240 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 241 241 pVCpu->cpum.GstCtx.rsp = uNewRsp; 242 242 return VINF_SUCCESS; … … 274 274 if (rc == VINF_SUCCESS) 275 275 { 276 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",277 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));276 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 277 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue)); 278 278 pVCpu->cpum.GstCtx.rsp = uNewRsp; 279 279 return VINF_SUCCESS; … … 311 311 if (rc == VINF_SUCCESS) 312 312 { 313 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",314 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));313 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n", 314 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue)); 315 315 *pTmpRsp = NewRsp; 316 316 return VINF_SUCCESS; … … 349 349 if (rc == VINF_SUCCESS) 350 350 { 351 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",352 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));351 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n", 352 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue)); 353 353 *pTmpRsp = NewRsp; 354 354 return VINF_SUCCESS; … … 381 381 382 382 /* Commit the RSP change. */ 383 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",384 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));383 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 384 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 385 385 pVCpu->cpum.GstCtx.rsp = uNewRsp; 386 386 } … … 407 407 408 408 /* Commit the RSP change and return the popped value. */ 409 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",410 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));409 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 410 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet)); 411 411 pVCpu->cpum.GstCtx.rsp = uNewRsp; 412 412 … … 445 445 446 446 /* Commit the RSP change. */ 447 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",448 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));447 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n", 448 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 449 449 pVCpu->cpum.GstCtx.rsp = uNewRsp; 450 450 } -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r100866 r100868 51 51 #endif 52 52 53 /** @todo fix logging */54 53 55 54 #ifdef IEM_WITH_SETJMP … … 102 101 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 103 102 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 104 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));103 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet)); 105 104 return uRet; 106 105 } … … 110 109 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 111 110 outdated page pointer, or other troubles. (This will do a TLB load.) */ 112 Log 10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));111 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 113 112 # endif 114 113 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem); … … 156 155 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 157 156 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 158 Log 9(("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));157 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet)); 159 158 return uRet; 160 159 } … … 164 163 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 165 164 outdated page pointer, or other troubles. (This will do a TLB load.) */ 166 Log 10Func(("%RGv falling back\n", GCPtrMem));165 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 167 166 # endif 168 167 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem); … … 220 219 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 221 220 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 222 Log 9(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));221 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 223 222 return; 224 223 } … … 228 227 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 229 228 outdated page pointer, or other troubles. (This will do a TLB load.) */ 230 Log 10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));229 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 231 230 # endif 232 231 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue); … … 274 273 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 275 274 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue; 276 Log 9(("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));275 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue)); 277 276 return; 278 277 } … … 282 281 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 283 282 outdated page pointer, or other troubles. (This will do a TLB load.) */ 284 Log 10Func(("%RGv falling back\n", GCPtrMem));283 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 285 284 # endif 286 285 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue); … … 337 336 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 338 337 *pbUnmapInfo = 0; 339 Log 8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",340 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));338 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 339 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 341 340 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 342 341 } … … 346 345 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 347 346 outdated page pointer, or other troubles. (This will do a TLB load.) */ 348 Log 10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));347 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 349 348 # endif 350 349 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); … … 393 392 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 394 393 *pbUnmapInfo = 0; 395 Log 8(("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",396 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));394 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 395 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 397 396 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 398 397 } … … 402 401 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 403 402 outdated page pointer, or other troubles. (This will do a TLB load.) */ 404 Log 10Func(("%RGv falling back\n", GCPtrMem));403 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 405 404 # endif 406 405 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); … … 450 449 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 451 450 *pbUnmapInfo = 0; 452 Log 8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",453 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));451 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 452 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 454 453 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 455 454 } … … 459 458 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 460 459 outdated page pointer, or other troubles. (This will do a TLB load.) */ 461 Log 10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));460 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 462 461 # endif 463 462 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); … … 506 505 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 507 506 *pbUnmapInfo = 0; 508 Log 8(("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",509 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));507 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 508 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 510 509 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 511 510 } … … 515 514 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 516 515 outdated page pointer, or other troubles. (This will do a TLB load.) */ 517 Log 10Func(("%RGv falling back\n", GCPtrMem));516 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 518 517 # endif 519 518 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); … … 562 561 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 563 562 *pbUnmapInfo = 0; 564 Log 9(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n",565 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));563 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 564 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 566 565 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 567 566 } … … 571 570 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 572 571 outdated page pointer, or other troubles. (This will do a TLB load.) */ 573 Log 10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem));572 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 574 573 # endif 575 574 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); … … 617 616 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 618 617 *pbUnmapInfo = 0; 619 Log 9(("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",620 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));618 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 619 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 621 620 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 622 621 } … … 626 625 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 627 626 outdated page pointer, or other troubles. (This will do a TLB load.) */ 628 Log 10Func(("%RGv falling back\n", GCPtrMem));627 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 629 628 # endif 630 629 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); … … 683 682 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 684 683 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 685 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",686 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));684 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 685 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 687 686 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 688 687 pVCpu->cpum.GstCtx.rsp = uNewRsp; … … 694 693 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 695 694 outdated page pointer, or other troubles. (This will do a TLB load.) */ 696 Log1 0Func(("%RGv falling back\n", GCPtrEff));695 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 697 696 # endif 698 697 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); … … 743 742 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 744 743 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 745 Log9 (("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",746 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));744 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 745 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet)); 747 746 pVCpu->cpum.GstCtx.rsp = uNewRsp; 748 747 return uRet; … … 753 752 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 754 753 outdated page pointer, or other troubles. (This will do a TLB load.) */ 755 Log10 Func(("%RGv falling back\n", GCPtrEff));754 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 756 755 # endif 757 756 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu); … … 805 804 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 806 805 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 807 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",808 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));806 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n", 807 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 809 808 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 810 809 pVCpu->cpum.GstCtx.rsp = uNewRsp; … … 816 815 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 817 816 outdated page pointer, or other troubles. (This will do a TLB load.) */ 818 Log1 0Func(("%RGv falling back\n", GCPtrEff));817 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 819 818 # endif 820 819 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); … … 868 867 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 869 868 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 870 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",871 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));869 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n", 870 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 872 871 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue; 873 872 pVCpu->cpum.GstCtx.rsp = uNewEsp; … … 879 878 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 880 879 outdated page pointer, or other troubles. (This will do a TLB load.) */ 881 Log1 0Func(("%RX32 falling back\n", uNewEsp));880 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 882 881 # endif 883 882 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); … … 926 925 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 927 926 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); 928 Log9 (("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n",929 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet));927 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n", 928 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet)); 930 929 return uRet; 931 930 } … … 935 934 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 936 935 outdated page pointer, or other troubles. (This will do a TLB load.) */ 937 Log10 Func(("%RX32 falling back\n", uOldEsp));936 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp)); 938 937 # endif 939 938 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu); … … 983 982 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 984 983 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 985 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",986 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));984 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n", 985 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 987 986 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 988 987 pVCpu->cpum.GstCtx.rsp = uNewEsp; … … 994 993 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 995 994 outdated page pointer, or other troubles. (This will do a TLB load.) */ 996 Log1 0Func(("%RX32 falling back\n", uNewEsp));995 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 997 996 # endif 998 997 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); … … 1042 1041 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1043 1042 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1044 Log 8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",1045 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));1043 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n", 1044 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue)); 1046 1045 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue; 1047 1046 pVCpu->cpum.GstCtx.rsp = uNewRsp; … … 1053 1052 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 1054 1053 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1055 Log1 0Func(("%RX64 falling back\n", uNewRsp));1054 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp)); 1056 1055 # endif 1057 1056 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); … … 1100 1099 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK]; 1101 1100 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); 1102 Log9 (("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n",1103 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet));1101 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n", 1102 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet)); 1104 1103 return uRet; 1105 1104 } … … 1109 1108 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 1110 1109 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1111 Log10 Func(("%RX64 falling back\n", uOldRsp));1110 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp)); 1112 1111 # endif 1113 1112 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu); -
trunk/src/VBox/VMM/include/IEMInline.h
r100860 r100868 100 100 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict)) 101 101 { 102 Log (("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));102 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 103 103 pVCpu->iem.s.cRetPassUpStatus++; 104 104 rcStrict = rcPassUp; … … 106 106 else 107 107 { 108 Log (("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));108 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 109 109 pVCpu->iem.s.cRetInfStatuses++; 110 110 } … … 152 152 if (rcPassUp < rcOldPassUp) 153 153 { 154 Log (("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));154 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 155 155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 156 156 } 157 157 else 158 Log (("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));158 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 159 159 } 160 160 /* Override EM scheduling with specific status code. */ 161 161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST) 162 162 { 163 Log (("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));163 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 164 164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 165 165 } 166 166 /* Don't override specific status code, first come first served. */ 167 167 else 168 Log (("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));168 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 169 169 return VINF_SUCCESS; 170 170 } … … 3146 3146 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg); 3147 3147 AssertRelease(uSel == 0); 3148 Log (("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));3148 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg)); 3149 3149 return iemRaiseGeneralProtectionFault0(pVCpu); 3150 3150 } … … 3186 3186 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg); 3187 3187 AssertRelease(uSel == 0); 3188 Log (("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));3188 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg)); 3189 3189 return iemRaiseGeneralProtectionFault0(pVCpu); 3190 3190 }
Note:
See TracChangeset
for help on using the changeset viewer.