- Timestamp:
- Jun 13, 2009 11:56:48 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMM.cpp
r20533 r20545 380 380 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_ERROR calls."); 381 381 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_RUNTIME_ERROR calls."); 382 383 #ifdef VBOX_WITH_STATISTICS 384 for (VMCPUID i = 0; i < pVM->cCPUs; i++) 385 { 386 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallHostR0JmpBuf.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i); 387 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallHostR0JmpBuf.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i); 388 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallHostR0JmpBuf.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i); 389 } 390 #endif 382 391 } 383 392 -
trunk/src/VBox/VMM/VMMInternal.h
r20533 r20545 170 170 /** EBP/RBP at the time of the jump to ring 3. */ 171 171 RTHCUINTREG SavedEbp; 172 173 /** Stats: Max amount of stack used. */ 174 uint32_t cbUsedMax; 175 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */ 176 uint32_t cbUsedAvg; 177 /** Stats: Total amount of stack used. */ 178 uint64_t cbUsedTotal; 179 /** Stats: Number of stack usages. */ 180 uint64_t cUsedTotal; 172 181 } VMMR0JMPBUF; 173 182 /** Pointer to a ring-0 jump buffer. */ -
trunk/src/VBox/VMM/VMMInternal.mac
r20543 r20545 78 78 .SavedEbp resq 1 79 79 %endif 80 81 ; Statistics 82 .cbUsedMax resd 1 83 .cbUsedAvg resd 1 84 .cbUsedTotal resq 1 85 .cUsedTotal resq 1 80 86 endstruc 81 87 -
trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm
r20543 r20545 27 27 %include "iprt/err.mac" 28 28 %include "VBox/param.mac" 29 30 31 ;******************************************************************************* 32 ;* Defined Constants And Macros * 33 ;******************************************************************************* 34 %define RESUME_MAGIC 07eadf00dh 35 %define STACK_PADDING 0eeeeeeeeeeeeeeeeh 36 37 38 ; For vmmR0LoggerWrapper. (The other architecture(s) use(s) C99 variadict macros.) 39 extern NAME(RTLogLogger) 29 40 30 41 … … 94 105 mov rdi, r15 95 106 mov rcx, VMM_STACK_SIZE / 8 96 mov rax, 00eeeeeeeeffeeeeeeeh107 mov rax, qword 0eeeeeeeffeeeeeeeh 97 108 repne stosq 98 109 mov [rdi - 10h], rbx … … 205 216 ; 206 217 %ifdef VBOX_STRICT 207 pop eax ; magic208 cmp eax, 0f00dbed0h218 pop rax ; magic 219 cmp rax, RESUME_MAGIC 209 220 je .magic_ok 210 221 mov ecx, 0123h … … 252 263 pushf 253 264 %ifdef VBOX_STRICT 254 push dword 0f00dbed0h265 push RESUME_MAGIC 255 266 %endif 256 267 … … 333 344 %ifdef VBOX_STRICT 334 345 pop rax ; magic 335 cmp eax, 0f00dbed0h346 cmp rax, RESUME_MAGIC 336 347 je .magic_ok 337 348 mov ecx, 0123h -
trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-x86.asm
r20543 r20545 27 27 %include "iprt/err.mac" 28 28 %include "VBox/param.mac" 29 30 31 ;******************************************************************************* 32 ;* Defined Constants And Macros * 33 ;******************************************************************************* 34 %define RESUME_MAGIC 07eadf00dh 35 %define STACK_PADDING 0eeeeeeeeh 29 36 30 37 … … 81 88 mov edi, esi 82 89 mov ecx, VMM_STACK_SIZE / 4 83 mov eax, 0eeeeeeeeh90 mov eax, STACK_PADDING 84 91 repne stosd 85 92 %endif … … 96 103 mov esp, esi ; Switch stack! 97 104 call eax 98 and dword [esi + 1ch], byte 0 ; clearmarker.105 and dword [esi + 1ch], byte 0 ; reset marker. 99 106 100 107 %ifdef VBOX_STRICT 101 mov esi, [ebx + VMMR0JMPBUF.pvSavedStack] 102 cmp [esi], 0eeeeeeeeh ; Check for stack overflow 108 ; Calc stack usage and check for overflows. 109 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack] 110 cmp dword [edi], STACK_PADDING ; Check for obvious stack overflow. 103 111 jne .stack_overflow 104 cmp [esi + 04h], 0eeeeeeeeh 105 jne .stack_overflow 106 cmp [esi + 08h], 0eeeeeeeeh 107 jne .stack_overflow 108 cmp [esi + 0ch], 0eeeeeeeeh 109 jne .stack_overflow 110 cmp [esi + 10h], 0eeeeeeeeh 111 jne .stack_overflow 112 cmp [esi + 20h], 0eeeeeeeeh 113 jne .stack_overflow 114 cmp [esi + 30h], 0eeeeeeeeh 115 jne .stack_overflow 116 mov dword [esi], 0h ; Reset the marker 117 %endif 112 mov esi, eax ; save eax 113 mov eax, STACK_PADDING 114 mov ecx, VMM_STACK_SIZE / 4 115 cld 116 repe scasd 117 mov eax, esi ; restore eax in case of overflow (esi remains used) 118 mov edi, VMM_STACK_SIZE 119 shl ecx, 2 ; *4 120 sub edi, ecx 121 cmp edi, VMM_STACK_SIZE - 64 ; Less than 64 bytes left -> overflow as well. 122 jae .stack_overflow_almost 123 124 ; Update stack usage statistics. 125 cmp edi, [ebx + VMMR0JMPBUF.cbUsedMax] ; New max usage? 126 jle .no_used_max 127 mov [ebx + VMMR0JMPBUF.cbUsedMax], edi 128 .no_used_max: 129 ; To simplify the average stuff, just historize before we hit div errors. 130 inc dword [ebx + VMMR0JMPBUF.cUsedTotal] 131 test [ebx + VMMR0JMPBUF.cUsedTotal], dword 0c0000000h 132 jz .no_historize 133 mov dword [ebx + VMMR0JMPBUF.cUsedTotal], 2 134 mov ecx, [ebx + VMMR0JMPBUF.cbUsedAvg] 135 mov [ebx + VMMR0JMPBUF.cbUsedTotal], ecx 136 mov dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0 137 .no_historize: 138 add [ebx + VMMR0JMPBUF.cbUsedTotal], edi 139 adc dword [ebx + VMMR0JMPBUF.cbUsedTotal + 4], 0 140 mov eax, [ebx + VMMR0JMPBUF.cbUsedTotal] 141 mov edx, [ebx + VMMR0JMPBUF.cbUsedTotal + 4] 142 mov ecx, [ebx + VMMR0JMPBUF.cUsedTotal] 143 div ecx 144 mov [ebx + VMMR0JMPBUF.cbUsedAvg], eax 145 146 mov eax, esi ; restore eax (final, esi released) 147 148 mov edi, [ebx + VMMR0JMPBUF.pvSavedStack] 149 mov dword [edi], 0h ; Reset the overflow marker. 150 %endif ; VBOX_STRICT 118 151 119 152 %else ; !VMM_R0_SWITCH_STACK … … 147 180 .stack_overflow: 148 181 mov eax, VERR_INTERNAL_ERROR_5 182 mov edx, ebx 183 jmp .proper_return 184 185 .stack_overflow_almost: 186 mov eax, VERR_INTERNAL_ERROR 187 mov edx, ebx 149 188 jmp .proper_return 150 189 … … 168 207 mov eax, [xDX + VMMR0JMPBUF.pvSavedStack] 169 208 %ifdef RT_STRICT 170 cmp dword [eax], 0eeeeeeeeh209 cmp dword [eax], STACK_PADDING 171 210 %endif 172 211 lea eax, [eax + VMM_STACK_SIZE - 32] … … 215 254 %ifdef VBOX_STRICT 216 255 pop eax ; magic 217 cmp eax, 0f00dbed0h256 cmp eax, RESUME_MAGIC 218 257 je .magic_ok 219 258 mov ecx, 0123h … … 249 288 pushf 250 289 %ifdef VBOX_STRICT 251 push dword 0f00dbed0h290 push RESUME_MAGIC 252 291 %endif 253 292 … … 318 357 %ifdef VBOX_STRICT 319 358 pop eax ; magic 320 cmp eax, 0f00dbed0h359 cmp eax, RESUME_MAGIC 321 360 je .magic_ok 322 361 mov ecx, 0123h -
trunk/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp
r20543 r20545 23 23 * Header Files * 24 24 *******************************************************************************/ 25 #include <VBox/err.h> 26 #include <VBox/param.h> 27 #include <iprt/alloca.h> 25 28 #include <iprt/initterm.h> 26 29 #include <iprt/string.h> 27 30 #include <iprt/stream.h> 28 #include <iprt/alloca.h>29 31 #include <iprt/test.h> 30 #include <VBox/err.h>31 32 32 33 #define IN_VMM_R0 … … 51 52 /** The number of jumps we've done. */ 52 53 static unsigned volatile g_cJmps; 54 /** Number of bytes allocated last time we called foo(). */ 55 static size_t volatile g_cbFoo; 56 /** Number of bytes used last time we called foo(). */ 57 static intptr_t volatile g_cbFooUsed; 53 58 54 59 … … 56 61 { 57 62 /* allocate a buffer which we fill up to the end. */ 58 size_t cb = (i % 5555) + 32; 63 size_t cb = (i % 1555) + 32; 64 g_cbFoo = cb; 59 65 char *pv = (char *)alloca(cb); 60 66 RTStrPrintf(pv, cb, "i=%d%*s\n", i, cb, ""); 67 #ifdef VMM_R0_SWITCH_STACK 68 g_cbFooUsed = VMM_STACK_SIZE - ((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack); 69 RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%#x - (%p - %p) -> %#x; cb=%#x i=%d\n", VMM_STACK_SIZE, pv, g_Jmp.pvSavedStack, g_cbFooUsed, cb, i), -15); 70 #elif defined(RT_ARCH_AMD64) 71 g_cbFooUsed = (uintptr_t)g_Jmp.rsp - (uintptr_t)pv; 72 RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.rsp, pv, g_cbFooUsed, cb, i), -15); 73 #elif defined(RT_ARCH_X86) 74 g_cbFooUsed = (uintptr_t)g_Jmp.esp - (uintptr_t)pv; 75 RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.esp, pv, g_cbFooUsed, cb, i), -15); 76 #endif 61 77 62 78 /* Do long jmps every 7th time */ … … 92 108 #endif 93 109 110 RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack; 111 RT_ZERO(g_Jmp); 112 g_Jmp.pvSavedStack = R0PtrSaved; 113 memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE); 114 g_cbFoo = 0; 94 115 g_cJmps = 0; 116 g_cbFooUsed = 0; 117 95 118 for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++) 96 119 { 97 120 int rc = vmmR0CallHostSetJmp(&g_Jmp, (PFNVMMR0SETJMP)tst2, (PVM)i, 0); 98 RTTESTI_CHECK_MSG_RETV(rc == 0 || rc == 42, ("i=%d rc=%d setjmp \n", i, rc));121 RTTESTI_CHECK_MSG_RETV(rc == 0 || rc == 42, ("i=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x\n", i, rc, g_cbFoo, g_cbFooUsed)); 99 122 100 123 #ifdef VMM_R0_SWITCH_STACK … … 113 136 } 114 137 RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!")); 138 if (g_Jmp.cbUsedAvg || g_Jmp.cUsedTotal) 139 RTTestIPrintf(RTTESTLVL_ALWAYS, "cbUsedAvg=%#x cbUsedMax=%#x cUsedTotal=%#llx\n", 140 g_Jmp.cbUsedAvg, g_Jmp.cbUsedMax, g_Jmp.cUsedTotal); 115 141 } 116 142 … … 131 157 RTTestBanner(hTest); 132 158 133 g_Jmp.pvSavedStack = (RTR0PTR)RTTestGuardedAllocTail(hTest, 8192);159 g_Jmp.pvSavedStack = (RTR0PTR)RTTestGuardedAllocTail(hTest, VMM_STACK_SIZE); 134 160 135 161 /*
Note:
See TracChangeset
for help on using the changeset viewer.