Changeset 21498 in vbox for trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp
- Timestamp:
- Jul 10, 2009 8:26:23 PM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp
r21491 r21498 64 64 static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt) 65 65 { 66 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */ 66 /* 67 * Query the required space. 68 */ 69 VMMDevReqHypervisorInfo *pReq; 70 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo); 71 if (RT_FAILURE(rc)) 72 return rc; 73 pReq->hypervisorStart = 0; 74 pReq->hypervisorSize = 0; 75 rc = VbglGRPerform(&pReq->header); 76 if (RT_FAILURE(rc)) /* this shouldn't happen! */ 77 { 78 VbglGRFree(&pReq->header); 79 return rc; 80 } 81 82 /* 83 * The VMM will report back if there is nothing it wants to map, like for 84 * insance in VT-x and AMD-V mode. 85 */ 86 if (pReq->hypervisorSize == 0) 87 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n")); 88 else 89 { 90 /* 91 * We have to try several times since the host can be picky 92 * about certain addresses. 93 */ 94 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ; 95 uint32_t cbHypervisor = pReq->hypervisorSize; 96 RTR0MEMOBJ ahTries[5]; 97 uint32_t iTry; 98 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor)); 99 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++) 100 { 101 /* 102 * Reserve space, or if that isn't supported, create a object for 103 * some fictive physical memory and map that in to kernel space. 104 * 105 * To make the code a bit uglier, most systems cannot help with 106 * 4MB alignment, so we have to deal with that in addition to 107 * having two ways of getting the memory. 108 */ 109 uint32_t uAlignment = _4M; 110 RTR0MEMOBJ hObj; 111 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment); 112 if (rc == VERR_NOT_SUPPORTED) 113 { 114 uAlignment = PAGE_SIZE; 115 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment); 116 } 117 if (rc == VERR_NOT_SUPPORTED) 118 { 119 if (hFictive == NIL_RTR0MEMOBJ) 120 { 121 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M); 122 if (RT_FAILURE(rc)) 123 break; 124 hFictive = hObj; 125 } 126 uAlignment = _4M; 127 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 128 if (rc == VERR_NOT_SUPPORTED) 129 { 130 uAlignment = PAGE_SIZE; 131 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 132 } 133 } 134 if (RT_FAILURE(rc)) 135 { 136 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n", 137 rc, cbHypervisor, uAlignment, iTry)); 138 break; 139 } 140 141 /* 142 * Try set it. 143 */ 144 pReq->header.requestType = VMMDevReq_SetHypervisorInfo; 145 pReq->header.rc = VERR_INTERNAL_ERROR; 146 pReq->hypervisorSize = cbHypervisor; 147 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj); 148 if ( uAlignment == PAGE_SIZE 149 && pReq->hypervisorStart & (_4M - 1)) 150 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M); 151 (pReq->hypervisorStart | (_4M - 1)) + 1; 152 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart)); 153 154 rc = VbglGRPerform(&pReq->header); 155 if (RT_SUCCESS(rc)) 156 { 157 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj; 158 Log(("vboxGuestInitFixateGuestMappings: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n", 159 RTR0MemObjAddress(pDevExt->hGuestMappings), 160 RTR0MemObjSize(pDevExt->hGuestMappings), 161 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation")); 162 break; 163 } 164 ahTries[iTry] = hObj; 165 } 166 167 /* 168 * Cleanup failed attempts. 169 */ 170 while (iTry-- > 0) 171 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */); 172 if ( RT_FAILURE(rc) 173 && hFictive != NIL_RTR0PTR) 174 RTR0MemObjFree(hFictive, false /* fFreeMappings */); 175 } 176 VbglGRFree(&pReq->header); 177 178 /* 179 * We ignore failed attempts for now. 180 */ 67 181 return VINF_SUCCESS; 68 182 } … … 70 184 71 185 /** 72 * Initializes the interrupt filter mask. 186 * Undo what vboxGuestInitFixateGuestMappings did. 187 * 188 * @param pDevExt The device extension. 189 */ 190 static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt) 191 { 192 if (pDevExt->hGuestMappings != NIL_RTR0PTR) 193 { 194 /* 195 * Tell the host that we're going to free the memory we reserved for 196 * it, the free it up. (Leak the memory if anything goes wrong here.) 197 */ 198 VMMDevReqHypervisorInfo *pReq; 199 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo); 200 if (RT_SUCCESS(rc)) 201 { 202 pReq->hypervisorStart = 0; 203 pReq->hypervisorSize = 0; 204 rc = VbglGRPerform(&pReq->header); 205 VbglGRFree(&pReq->header); 206 } 207 if (RT_SUCCESS(rc)) 208 { 209 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */); 210 AssertRC(rc); 211 } 212 else 213 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc)); 214 215 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ; 216 } 217 } 218 219 220 /** 221 * Sets the interrupt filter mask during initialization and termination. 73 222 * 74 223 * This will ASSUME that we're the ones in carge over the mask, so … … 79 228 * @param fMask The new mask. 80 229 */ 81 static int vboxGuest InitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)230 static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask) 82 231 { 83 232 VMMDevCtlGuestFilterMask *pReq; … … 86 235 { 87 236 pReq->u32OrMask = fMask; 88 pReq->u32NotMask = ~fMask; /* It's an AND mask. */237 pReq->u32NotMask = ~fMask; 89 238 rc = VbglGRPerform(&pReq->header); 90 239 if ( RT_FAILURE(rc) 91 240 || RT_FAILURE(pReq->header.rc)) 92 LogRel(("vboxGuest InitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",241 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n", 93 242 rc, pReq->header.rc)); 94 243 VbglGRFree(&pReq->header); … … 163 312 pDevExt->pVMMDevMemory = NULL; 164 313 pDevExt->fFixedEvents = fFixedEvents; 314 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ; 165 315 pDevExt->pIrqAckEvents = NULL; 166 316 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS; … … 228 378 if (RT_SUCCESS(rc)) 229 379 { 230 rc = vboxGuest InitFilterMask(pDevExt, fFixedEvents);380 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents); 231 381 if (RT_SUCCESS(rc)) 232 382 { … … 297 447 int rc2; 298 448 Log(("VBoxGuestDeleteDevExt:\n")); 299 300 /** @todo tell VMMDev that the guest additions are no longer running (clear all capability masks). 301 * Like calling VBoxGuestSetGuestCapabilities. This wasn't done initially since it was not 302 * relevant for OS/2. On solaris modules can be unloaded, so we should implement it. 303 */ 304 449 LogRel(("VBoxGuest: The additions driver is terminating.\n")); 450 451 /* 452 * Unfix the guest mappings, filter all events and clear 453 * all capabilities. 454 */ 455 vboxGuestTermUnfixGuestMappings(pDevExt); 456 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); 457 vboxGuestSetFilterMask(pDevExt, 0); 458 459 /* 460 * Cleanup resources. 461 */ 305 462 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2); 306 463 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
Note:
See TracChangeset
for help on using the changeset viewer.