VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp@ 14484

Last change on this file since 14484 was 14484, checked in by vboxsync, 16 years ago

#1865: More code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.3 KB
Line 
1/* $Id: PGMR0DynMap.cpp 14484 2008-11-21 19:23:59Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
4 */
5
6/*
7 * Copyright (C) 2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Internal Functions *
24*******************************************************************************/
25#include <VBox/pgm.h>
26#include "../PGMInternal.h"
27#include <VBox/vm.h>
28#include <VBox/sup.h>
29#include <VBox/err.h>
30#include <iprt/asm.h>
31#include <iprt/alloc.h>
32#include <iprt/assert.h>
33#include <iprt/cpuset.h>
34#include <iprt/memobj.h>
35#include <iprt/mp.h>
36#include <iprt/spinlock.h>
37#include <iprt/semaphore.h>
38
39
40/*******************************************************************************
41* Defined Constants And Macros *
42*******************************************************************************/
43/** The max size of the mapping cache (in pages). */
44#define PGMR0DYNMAP_MAX_PAGES ((8*_1M) >> PAGE_SHIFT)
45/* * The max segment size. */
46/** @todo #define PGMR0DYNMAP_SEG_MAX_PAGES (_1M >> PAGE_SHIFT) */
47/** The number of pages we reserve per CPU. */
48#define PGMR0DYNMAP_PAGES_PER_CPU 64
49/** Calcs the overload threshold. Current set at 50%. */
50#define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2)
51
52
53/*******************************************************************************
54* Structures and Typedefs *
55*******************************************************************************/
56/**
57 * Ring-0 dynamic mapping cache segment.
58 *
59 * The dynamic mapping cache can be extended with additional segments if the
60 * load is found to be too high. This done the next time a VM is created, under
61 * the protection of the init mutex. The arrays is reallocated and the new
62 * segment is added to the end of these. Nothing is rehashed of course, as the
63 * indexes / addresses must remain unchanged.
64 *
65 * This structure is only modified while owning the init mutex or during module
66 * init / term.
67 */
68typedef struct PGMR0DYNMAPSEG
69{
70 /** Pointer to the next segment. */
71 struct PGMR0DYNMAPSEG *pNext;
72 /** The memory object for the virtual address range that we're abusing. */
73 RTR0MEMOBJ hMemObj;
74 /** The start page in the cache. (I.e. index into the arrays.) */
75 uint16_t iPage;
76 /** The number of pages this segment contributes. */
77 uint16_t cPages;
78 /** The number of page tables. */
79 uint16_t cPTs;
80 /** The memory objects for the page tables. */
81 RTR0MEMOBJ ahMemObjPT[1];
82} PGMR0DYNMAPSEG;
83/** Pointer to a ring-0 dynamic mapping cache segment. */
84typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
85
86
87/**
88 * Ring-0 dynamic mapping cache entry.
89 *
90 * This structure tracks
91 */
92typedef struct PGMR0DYNMAPENTRY
93{
94 /** The physical address of the currently mapped page.
95 * This is duplicate for three reasons: cache locality, cache policy of the PT
96 * mappings and sanity checks. */
97 RTHCPHYS HCPhys;
98 /** Pointer to the page. */
99 void *pvPage;
100 /** The number of references. */
101 int32_t volatile cRefs;
102 /** PTE pointer union. */
103 union PGMR0DYNMAPENTRY_PPTE
104 {
105 /** PTE pointer, 32-bit legacy version. */
106 PX86PTE pLegacy;
107 /** PTE pointer, PAE version. */
108 PX86PTEPAE pPae;
109 } uPte;
110 /** CPUs that haven't invalidated this entry after it's last update. */
111 RTCPUSET PendingSet;
112} PGMR0DYNMAPENTRY;
113/** Pointer to a ring-0 dynamic mapping cache entry. */
114typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
115
116
117/**
118 * Ring-0 dynamic mapping cache.
119 *
120 * This is initialized during VMMR0 module init but no segments are allocated at
121 * that time. Segments will be added when the first VM is started and removed
122 * again when the last VM shuts down, thus avoid consuming memory while dormant.
123 * At module termination, the remaining bits will be freed up.
124 */
125typedef struct PGMR0DYNMAP
126{
127 /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
128 uint32_t u32Magic;
129 /** Spinlock serializing the normal operation of the cache. */
130 RTSPINLOCK hSpinlock;
131 /** Array for tracking and managing the pages. */
132 PPGMR0DYNMAPENTRY paPages;
133 /** The cache size given as a number of pages. */
134 uint32_t cPages;
135 /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
136 bool fLegacyMode;
137 /** The current load. */
138 uint32_t cLoad;
139 /** The max load.
140 * This is maintained to get trigger adding of more mapping space. */
141 uint32_t cMaxLoad;
142 /** Initialization / termination lock. */
143 RTSEMFASTMUTEX hInitLock;
144 /** The number of users (protected by hInitLock). */
145 uint32_t cUsers;
146 /** Array containing a copy of the original page tables.
147 * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
148 void *pvSavedPTEs;
149 /** List of segments. */
150 PPGMR0DYNMAPSEG pSegHead;
151 /** The paging mode. */
152 SUPPAGINGMODE enmPgMode;
153} PGMR0DYNMAP;
154/** Pointer to the ring-0 dynamic mapping cache */
155typedef PGMR0DYNMAP *PPGMR0DYNMAP;
156
157/** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
158#define PGMR0DYNMAP_MAGIC 0x19640201
159
160
161/*******************************************************************************
162* Global Variables *
163*******************************************************************************/
164/** Pointer to the ring-0 dynamic mapping cache. */
165static PPGMR0DYNMAP g_pPGMR0DynMap;
166
167
168/*******************************************************************************
169* Internal Functions *
170*******************************************************************************/
171static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
172static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
173static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis);
174static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
175
176
177/**
178 * Initializes the ring-0 dynamic mapping cache.
179 *
180 * @returns VBox status code.
181 */
182VMMR0DECL(int) PGMR0DynMapInit(void)
183{
184#ifndef DEBUG_bird
185 return VINF_SUCCESS;
186#else
187 Assert(!g_pPGMR0DynMap);
188
189 /*
190 * Create and initialize the cache instance.
191 */
192 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
193 AssertLogRelReturn(pThis, VERR_NO_MEMORY);
194 int rc = VINF_SUCCESS;
195 pThis->enmPgMode = SUPR0GetPagingMode();
196 switch (pThis->enmPgMode)
197 {
198 case SUPPAGINGMODE_32_BIT:
199 case SUPPAGINGMODE_32_BIT_GLOBAL:
200 pThis->fLegacyMode = false;
201 break;
202 case SUPPAGINGMODE_PAE:
203 case SUPPAGINGMODE_PAE_GLOBAL:
204 case SUPPAGINGMODE_PAE_NX:
205 case SUPPAGINGMODE_PAE_GLOBAL_NX:
206 case SUPPAGINGMODE_AMD64:
207 case SUPPAGINGMODE_AMD64_GLOBAL:
208 case SUPPAGINGMODE_AMD64_NX:
209 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
210 pThis->fLegacyMode = false;
211 break;
212 default:
213 rc = VERR_INTERNAL_ERROR;
214 break;
215 }
216 if (RT_SUCCESS(rc))
217 {
218 rc = RTSemFastMutexCreate(&pThis->hInitLock);
219 if (RT_SUCCESS(rc))
220 {
221 rc = RTSpinlockCreate(&pThis->hSpinlock);
222 if (RT_SUCCESS(rc))
223 {
224 pThis->u32Magic = PGMR0DYNMAP_MAGIC;
225 g_pPGMR0DynMap = pThis;
226 return VINF_SUCCESS;
227 }
228 RTSemFastMutexDestroy(pThis->hInitLock);
229 }
230 }
231 RTMemFree(pThis);
232 return rc;
233#endif
234}
235
236
237/**
238 * Terminates the ring-0 dynamic mapping cache.
239 */
240VMMR0DECL(void) PGMR0DynMapTerm(void)
241{
242#ifdef DEBUG_bird
243 /*
244 * Destroy the cache.
245 *
246 * There is not supposed to be any races here, the loader should
247 * make sure about that. So, don't bother locking anything.
248 *
249 * The VM objects should all be destroyed by now, so there is no
250 * dangling users or anything like that to clean up. This routine
251 * is just a mirror image of PGMR0DynMapInit.
252 */
253 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
254 if (pThis)
255 {
256 AssertPtr(pThis);
257 g_pPGMR0DynMap = NULL;
258
259 AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->cPages,
260 ("cUsers=%d paPages=%p cPages=%#x\n",
261 pThis->cUsers, pThis->paPages, pThis->cPages));
262
263 /* Free the associated resources. */
264 RTSemFastMutexDestroy(pThis->hInitLock);
265 pThis->hInitLock = NIL_RTSEMFASTMUTEX;
266 RTSpinlockDestroy(pThis->hSpinlock);
267 pThis->hSpinlock = NIL_RTSPINLOCK;
268 pThis->u32Magic = UINT32_MAX;
269 RTMemFree(pThis);
270 }
271#endif
272}
273
274
275/**
276 * Initializes the dynamic mapping cache for a new VM.
277 *
278 * @returns VBox status code.
279 * @param pVM Pointer to the shared VM structure.
280 */
281VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
282{
283#ifndef DEBUG_bird
284 return VINF_SUCCESS;
285#else
286 /*
287 * Initialize the auto sets.
288 */
289 VMCPUID idCpu = pVM->cCPUs;
290 while (idCpu-- > 0)
291 {
292 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
293 uint32_t j = RT_ELEMENTS(pSet->aEntries);
294 while (j-- > 0)
295 {
296 pSet->aEntries[j].iPage = UINT16_MAX;
297 pSet->aEntries[j].cRefs = 0;
298 }
299 pSet->cEntries = PGMMAPSET_CLOSED;
300 }
301
302 /*
303 * Do we need the cache? Skip the last bit if we don't.
304 */
305 Assert(!pVM->pgm.s.pvR0DynMapUsed);
306 pVM->pgm.s.pvR0DynMapUsed = NULL;
307 if (!HWACCMIsEnabled(pVM))
308 return VINF_SUCCESS;
309
310 /*
311 * Reference and if necessary setup or grow the cache.
312 */
313 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
314 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
315 int rc = RTSemFastMutexRequest(pThis->hInitLock);
316 AssertLogRelRCReturn(rc, rc);
317
318 pThis->cUsers++;
319 if (pThis->cUsers == 1)
320 rc = pgmR0DynMapSetup(pThis);
321 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages))
322 rc = pgmR0DynMapGrow(pThis);
323 if (RT_FAILURE(rc))
324 pThis->cUsers--;
325
326 RTSemFastMutexRelease(pThis->hInitLock);
327
328 return rc;
329#endif
330}
331
332
333/**
334 * Terminates the dynamic mapping cache usage for a VM.
335 *
336 * @param pVM Pointer to the shared VM structure.
337 */
338VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM)
339{
340#ifdef DEBUG_bird
341 /*
342 * Return immediately if we're not using the cache.
343 */
344 if (!pVM->pgm.s.pvR0DynMapUsed)
345 return;
346
347 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
348 AssertPtrReturnVoid(pThis);
349
350 int rc = RTSemFastMutexRequest(pThis->hInitLock);
351 AssertLogRelRCReturnVoid(rc);
352
353 if (pVM->pgm.s.pvR0DynMapUsed == pThis)
354 {
355 pVM->pgm.s.pvR0DynMapUsed = NULL;
356
357 /*
358 * Clean up and check the auto sets.
359 */
360 VMCPUID idCpu = pVM->cCPUs;
361 while (idCpu-- > 0)
362 {
363 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
364 uint32_t j = pSet->cEntries;
365 if (j <= RT_ELEMENTS(pSet->aEntries))
366 {
367 /*
368 * The set is open, close it.
369 */
370 while (j-- > 0)
371 {
372 int32_t cRefs = pSet->aEntries[j].cRefs;
373 uint32_t iPage = pSet->aEntries[j].iPage;
374 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
375 if (iPage < pThis->cPages && cRefs > 0)
376 pgmR0DynMapReleasePage(pThis, iPage, cRefs);
377 else
378 AssertMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
379
380 pSet->aEntries[j].iPage = UINT16_MAX;
381 pSet->aEntries[j].cRefs = 0;
382 }
383 pSet->cEntries = PGMMAPSET_CLOSED;
384 }
385
386 j = RT_ELEMENTS(pSet->aEntries);
387 while (j-- > 0)
388 {
389 Assert(pSet->aEntries[j].iPage == UINT16_MAX);
390 Assert(!pSet->aEntries[j].cRefs);
391 }
392 }
393
394 /*
395 * Release our reference to the mapping cache.
396 */
397 Assert(pThis->cUsers > 0);
398 pThis->cUsers--;
399 if (!pThis->cUsers)
400 pgmR0DynMapTearDown(pThis);
401 }
402 else
403 AssertMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));
404
405 RTSemFastMutexRelease(pThis->hInitLock);
406#endif
407}
408
409
410/**
411 * Calculate the new cache size based on cMaxLoad statistics.
412 *
413 * @returns Number of pages.
414 * @param pThis The dynamic mapping cache instance.
415 */
416static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis)
417{
418 /*
419 * cCpus * PGMR0DYNMAP_PAGES_PER_CPU.
420 */
421 RTCPUID cCpus = RTMpGetCount();
422 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU;
423
424 /* adjust against cMaxLoad. */
425 AssertMsg(pThis->cMaxLoad <= PGMR0DYNMAP_MAX_PAGES, ("%#x\n", pThis->cMaxLoad));
426 if (pThis->cMaxLoad > PGMR0DYNMAP_MAX_PAGES)
427 pThis->cMaxLoad = 0;
428
429 while (pThis->cMaxLoad < PGMR0DYNMAP_CALC_OVERLOAD(cPages))
430 cPages += PGMR0DYNMAP_PAGES_PER_CPU;
431
432 /* adjust against max size. */
433 if (cPages > PGMR0DYNMAP_MAX_PAGES)
434 cPages = PGMR0DYNMAP_MAX_PAGES;
435
436 return cPages;
437}
438
439
440/**
441 * Adds a new segment of the specified size.
442 *
443 * @returns VBox status code.
444 * @param pThis The dynamic mapping cache instance.
445 * @param cPages The size of the new segment, give as a page count.
446 */
447static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages)
448{
449#if 0
450 int rc2;
451
452 /*
453 * Do the array rellocation first.
454 * (Too lazy to clean these up on failure.)
455 */
456 void *pv = RTMemRealloc(pThis->paPages, sizeof(pThis->paPages[0]) * (pThis->cPages + cPages));
457 if (!pv)
458 return VERR_NO_MEMORY;
459 pThis->paPages = (PPGMR0DYNMAPENTRY)pv;
460
461 pv = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages));
462 if (!pv)
463 return VERR_NO_MEMORY;
464 pThis->pvSavedPTEs = pv;
465
466 /*
467 * Allocate the segment structure and pages memory.
468 */
469 uint32_t cPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2;
470 PPGMR0DYNMAPSEG pSeg = RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cPTs]));
471 if (!pSeg)
472 return VERR_NO_MEMORY;
473 pSeg->pNext = NULL;
474 pSeg->cPages = cPages;
475 pSeg->iPage = pThis->cPages;
476 pSeg->cPTs = 0;
477 int rc = RTR0MemObjAllocPage(&pSeg->hMemObj, cPages << PAGE_SHIFT, false);
478 if (RT_SUCCESS(rc))
479 {
480 /*
481 * Walk the paging hierarchy and map the relevant page tables.
482 */
483 uint8_t *pbPage = RTR0MemObjAddress(pSeg->hMemObj);
484 AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage));
485 uint32_t iPage = pThis->cPages;
486 uint32_t iEndPage = iPage + cPages;
487 struct
488 {
489 RTHCPHYS HCPhys; /**< The entry that's currently mapped */
490 RTHCPHYS fPhysMask; /**< Mask for extracting HCPhys from uEntry. */
491 RTR0MEMOBJ hMemObj;
492 RTR0MEMOBJ hMapObj;
493 uint64_t fPtrMask;
494 uint32_t fPtrShift;
495 uint64_t fAndMask;
496 uint64_t fResMask;
497 union
498 {
499 void *pv;
500 } u;
501 } a[4];
502 RTCCUINTREG cr4 = ASMGetCR4();
503 uint32_t cLevels;
504 switch (pThis->enmPgMode)
505 {
506 case SUPPAGINGMODE_32_BIT:
507 case SUPPAGINGMODE_32_BIT_GLOBAL:
508 cLevels = 2;
509 a[0].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
510 a[0].fResMask = X86_PDE_P | X86_PDE_RW;
511 a[0].fPtrMask = X86_PD_MASK;
512 a[0].fPtrShift = X86_PD_SHIFT;
513 a[1].fAndMask = X86_PTE_P | X86_PTE_RW;
514 a[1].fResMask = X86_PTE_P | X86_PTE_RW;
515 a[1].fPtrMask = X86_PT_MASK;
516 a[1].fPtrShift = X86_PT_SHIFT;
517 break;
518
519 case SUPPAGINGMODE_PAE:
520 case SUPPAGINGMODE_PAE_GLOBAL:
521 case SUPPAGINGMODE_PAE_NX:
522 case SUPPAGINGMODE_PAE_GLOBAL_NX:
523 cLevels = 3;
524 a[0].fAndMask = X86_PDPE_P;
525 a[0].fResMask = X86_PDPE_P;
526 a[0].fPtrMask = X86_PDPT_MASK_PAE;
527 a[0].fPtrShift = X86_PDPT_SHIFT;
528 a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
529 a[1].fResMask = X86_PDE_P | X86_PDE_RW;
530 a[1].fPtrMask = X86_PD_MASK;
531 a[1].fPtrShift = X86_PD_SHIFT;
532 a[2].fAndMask = X86_PTE_P | X86_PTE_RW;
533 a[2].fResMask = X86_PTE_P | X86_PTE_RW;
534 a[2].fPtrMask = X86_PT_MASK;
535 a[2].fPtrShift = X86_PT_SHIFT;
536 break;
537
538 case SUPPAGINGMODE_AMD64:
539 case SUPPAGINGMODE_AMD64_GLOBAL:
540 case SUPPAGINGMODE_AMD64_NX:
541 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
542 cLevels = 3;
543 a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW;
544 a[0].fResMask = X86_PML4E_P | X86_PML4E_RW;
545 a[0].fPtrMask = X86_PML4_MASK;
546 a[0].fPtrShift = X86_PML4_SHIFT;
547 a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */;
548 a[1].fResMask = X86_PDPE_P | X86_PDPE_RW;
549 a[1].fPtrMask = X86_PDPT_MASK_AMD64;
550 a[1].fPtrShift = X86_PDPT_SHIFT;
551 a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
552 a[2].fResMask = X86_PDE_P | X86_PDE_RW;
553 a[2].fPtrMask = X86_PD_MASK;
554 a[2].fPtrShift = X86_PD_SHIFT;
555 a[3].fAndMask = X86_PTE_P | X86_PTE_RW;
556 a[3].fResMask = X86_PTE_P | X86_PTE_RW;
557 a[3].fPtrMask = X86_PT_MASK;
558 a[3].fPtrShift = X86_PT_SHIFT;
559 break;
560 default:
561 cLevels = 0;
562 break;
563 }
564 for (uint32_t i = 0; i < RT_ELEMENTS(a); i++)
565 {
566 a[i].HCPhys = NIL_RTHCPHYS;
567 a[i].hMapObj = a[i].hMemObj = NIL_RTR0MEMOBJ;
568 a[i].u.pv = NULL;
569 }
570
571 for (; iPage < iEndPage && RT_SUCCESS(rc); iPage++, pbPage += PAGE_SIZE)
572 {
573 /* Initialize it */
574 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
575 pThis->paPages[iPage].pvPage = pbPage;
576 pThis->paPages[iPage].cRefs = 0;
577 pThis->paPages[iPage].uPte.pPae = NULL;
578 RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
579
580 /*
581 * Map its page table.
582 *
583 * This is a bit ASSUMPTIVE, it should really do a clean run thru
584 * the tables everything something was mapped and disable preemption
585 * or/and interrupts.
586 */
587 X86PGPAEUINT uEntry = ASMGetCR3();
588 for (unsigned i = 0; i < cLevels && RT_SUCCESS(rc); i++)
589 {
590 RTHCPHYS HCPhys = uEntry & a[i].fPhysMask;
591 if (a[i].HCPhys != HCPhys)
592 {
593 if (i + 1 != cLevels)
594 {
595 RTR0MemObjFree(a[i].hMemObj, true /* fFreeMappings */);
596 a[i].hMemObj = a[i].hMapObj = NIL_RTR0MEMOBJ;
597 }
598 rc = RTR0MemObjEnterPhys(&a[i].hMemObj, HCPhys, PAGE_SIZE);
599 if (RT_SUCCESS(rc))
600 rc = RTR0MemObjMapKernel(&a[i].hMapObj, a[i].hMemObj, &a[i].u.pv, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ);
601 if (RT_FAILURE(rc))
602 break;
603 }
604
605 }
606
607
608 } /* for each page */
609
610 for (iPage = 0; i < cLevels; )
611
612 rc2 = RTR0MemObjFree(hMemObjCR3, true /* fFreeMappings */); AssertRC(rc2);
613
614 rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc2);
615 }
616 RTMemFree(pSeg);
617 return rc;
618#else
619 return VERR_NOT_IMPLEMENTED;
620#endif
621}
622
623
624/**
625 * Called by PGMR0DynMapInitVM under the init lock.
626 *
627 * @returns VBox status code.
628 * @param pThis The dynamic mapping cache instance.
629 */
630static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
631{
632 /*
633 * Calc the size and add a segment of that size.
634 */
635 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis);
636 AssertReturn(cPages, VERR_INTERNAL_ERROR);
637 return pgmR0DynMapAddSeg(pThis, cPages);
638}
639
640
641/**
642 * Called by PGMR0DynMapInitVM under the init lock.
643 *
644 * @returns VBox status code.
645 * @param pThis The dynamic mapping cache instance.
646 */
647static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis)
648{
649 /*
650 * Calc the new target size and add a segment of the appropriate size.
651 */
652 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis);
653 if (pThis->cPages >= cPages)
654 return VINF_SUCCESS;
655
656 uint32_t cAdd = cPages - pThis->cPages;
657 return pgmR0DynMapAddSeg(pThis, cAdd);
658}
659
660
661/**
662 * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.
663 *
664 * @param idCpu The current CPU.
665 * @param pvUser1 The dynamic mapping cache instance.
666 * @param pvUser2 Unused, NULL.
667 */
668static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2)
669{
670 Assert(!pvUser2);
671 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1;
672 AssertPtr(pThis == g_pPGMR0DynMap);
673 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
674 uint32_t iPage = pThis->cPages;
675 while (iPage-- > 0)
676 ASMInvalidatePage(paPages[iPage].pvPage);
677}
678
679
680/**
681 * Called by PGMR0DynMapTermVM under the init lock.
682 *
683 * @returns VBox status code.
684 * @param pThis The dynamic mapping cache instance.
685 */
686static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
687{
688 /*
689 * Restore the original page table entries
690 */
691 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
692 uint32_t iPage = pThis->cPages;
693 if (pThis->fLegacyMode)
694 {
695 X86PGUINT const *paSavedPTEs = (X86PGUINT const *)pThis->pvSavedPTEs;
696 while (iPage-- > 0)
697 {
698 X86PGUINT uOld = paPages[iPage].uPte.pLegacy->u;
699 X86PGUINT uOld2 = uOld; NOREF(uOld2);
700 X86PGUINT uNew = paSavedPTEs[iPage];
701 while (!ASMAtomicCmpXchgExU32(&paPages[iPage].uPte.pLegacy->u, uNew, uOld, &uOld))
702 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
703 }
704 }
705 else
706 {
707 X86PGPAEUINT const *paSavedPTEs = (X86PGPAEUINT const *)pThis->pvSavedPTEs;
708 while (iPage-- > 0)
709 {
710 X86PGPAEUINT uOld = paPages[iPage].uPte.pPae->u;
711 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2);
712 X86PGPAEUINT uNew = paSavedPTEs[iPage];
713 while (!ASMAtomicCmpXchgExU64(&paPages[iPage].uPte.pPae->u, uNew, uOld, &uOld))
714 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
715 }
716 }
717
718 /*
719 * Shoot down the TLBs on all CPUs before freeing them.
720 * If RTMpOnAll fails, make sure the TLBs are invalidated on the current CPU at least.
721 */
722 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
723 AssertRC(rc);
724 if (RT_FAILURE(rc))
725 {
726 iPage = pThis->cPages;
727 while (iPage-- > 0)
728 ASMInvalidatePage(paPages[iPage].pvPage);
729 }
730
731 /*
732 * Free the segments.
733 */
734 while (pThis->pSegHead)
735 {
736 PPGMR0DYNMAPSEG pSeg = pThis->pSegHead;
737 pThis->pSegHead = pSeg->pNext;
738
739 uint32_t iPT = pSeg->cPTs;
740 while (iPT-- > 0)
741 {
742 rc = RTR0MemObjFree(pSeg->ahMemObjPT[iPT], true /* fFreeMappings */); AssertRC(rc);
743 pSeg->ahMemObjPT[iPT] = NIL_RTR0MEMOBJ;
744 }
745 rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc);
746 pSeg->hMemObj = NIL_RTR0MEMOBJ;
747 pSeg->pNext = NULL;
748 pSeg->iPage = UINT16_MAX;
749 pSeg->cPages = 0;
750 pSeg->cPTs = 0;
751 RTMemFree(pSeg);
752 }
753
754 /*
755 * Free the arrays and restore the initial state.
756 * The cLoadMax value is left behind for the next setup.
757 */
758 RTMemFree(pThis->paPages);
759 pThis->paPages = NULL;
760 RTMemFree(pThis->pvSavedPTEs);
761 pThis->pvSavedPTEs = NULL;
762 pThis->cPages = 0;
763 pThis->cLoad = 0;
764}
765
766
767/**
768 * Release references to a page, caller owns the spin lock.
769 *
770 * @param pThis The dynamic mapping cache instance.
771 * @param iPage The page.
772 * @param cRefs The number of references to release.
773 */
774DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
775{
776 cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs);
777 AssertMsg(cRefs >= 0, ("%d\n", cRefs));
778 if (!cRefs)
779 pThis->cLoad--;
780}
781
782
783/**
784 * Release references to a page, caller does not own the spin lock.
785 *
786 * @param pThis The dynamic mapping cache instance.
787 * @param iPage The page.
788 * @param cRefs The number of references to release.
789 */
790static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
791{
792 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
793 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
794 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
795 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
796}
797
798
799/**
800 * pgmR0DynMapPage worker that deals with the tedious bits.
801 *
802 * @returns The page index on success, UINT32_MAX on failure.
803 * @param pThis The dynamic mapping cache instance.
804 * @param HCPhys The address of the page to be mapped.
805 * @param iPage The page index pgmR0DynMapPage hashed HCPhys to.
806 */
807static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage)
808{
809 /*
810 * Check if any of the first 5 pages are unreferenced since the caller
811 * already has made sure they aren't matching.
812 */
813 uint32_t const cPages = cPages;
814 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
815 uint32_t iFreePage;
816 if (!paPages[iPage].cRefs)
817 iFreePage = iPage;
818 else if (!paPages[(iPage + 1) % cPages].cRefs)
819 iFreePage = iPage;
820 else if (!paPages[(iPage + 2) % cPages].cRefs)
821 iFreePage = iPage;
822 else if (!paPages[(iPage + 3) % cPages].cRefs)
823 iFreePage = iPage;
824 else if (!paPages[(iPage + 4) % cPages].cRefs)
825 iFreePage = iPage;
826 else
827 {
828 /*
829 * Search for an unused or matching entry.
830 */
831 iFreePage = (iPage + 5) % pThis->cPages;
832 for (;;)
833 {
834 if (paPages[iFreePage].HCPhys == HCPhys)
835 return iFreePage;
836 if (!paPages[iFreePage].cRefs)
837 break;
838
839 /* advance */
840 iFreePage = (iFreePage + 1) % cPages;
841 if (RT_UNLIKELY(iFreePage != iPage))
842 return UINT32_MAX;
843 }
844 }
845
846 /*
847 * Setup the new entry.
848 */
849 paPages[iFreePage].HCPhys = HCPhys;
850 RTCpuSetFill(&paPages[iFreePage].PendingSet);
851 if (pThis->fLegacyMode)
852 {
853 X86PGUINT uOld = paPages[iFreePage].uPte.pLegacy->u;
854 X86PGUINT uOld2 = uOld; NOREF(uOld2);
855 X86PGUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)
856 | X86_PTE_P | X86_PTE_A | X86_PTE_D
857 | (HCPhys & X86_PTE_PG_MASK);
858 while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld))
859 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
860 }
861 else
862 {
863 X86PGPAEUINT uOld = paPages[iFreePage].uPte.pPae->u;
864 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2);
865 X86PGPAEUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)
866 | X86_PTE_P | X86_PTE_A | X86_PTE_D
867 | (HCPhys & X86_PTE_PAE_PG_MASK);
868 while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld))
869 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
870 }
871 return iFreePage;
872}
873
874
875/**
876 * Maps a page into the pool.
877 *
878 * @returns Pointer to the mapping.
879 * @param pThis The dynamic mapping cache instance.
880 * @param HCPhys The address of the page to be mapped.
881 * @param piPage Where to store the page index.
882 */
883DECLINLINE(void *) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t *piPage)
884{
885 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
886 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
887 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
888
889 /*
890 * Find an entry, if possible a matching one. The HCPhys address is hashed
891 * down to a page index, collisions are handled by linear searching. Optimize
892 * for a hit in the first 5 pages.
893 *
894 * To the cheap hits here and defer the tedious searching and inserting
895 * to a helper function.
896 */
897 uint32_t const cPages = cPages;
898 uint32_t iPage = (HCPhys >> PAGE_SHIFT) % cPages;
899 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
900 if (paPages[iPage].HCPhys != HCPhys)
901 {
902 uint32_t iPage2 = (iPage + 1) % cPages;
903 if (paPages[iPage2].HCPhys != HCPhys)
904 {
905 iPage2 = (iPage + 2) % cPages;
906 if (paPages[iPage2].HCPhys != HCPhys)
907 {
908 iPage2 = (iPage + 3) % cPages;
909 if (paPages[iPage2].HCPhys != HCPhys)
910 {
911 iPage2 = (iPage + 4) % cPages;
912 if (paPages[iPage2].HCPhys != HCPhys)
913 {
914 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage);
915 if (RT_UNLIKELY(iPage == UINT32_MAX))
916 {
917 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
918 return NULL;
919 }
920 }
921 else
922 iPage = iPage2;
923 }
924 else
925 iPage = iPage2;
926 }
927 else
928 iPage = iPage2;
929 }
930 else
931 iPage = iPage2;
932 }
933
934 /*
935 * Reference it, update statistics and get the return address.
936 */
937 if (ASMAtomicIncS32(&paPages[iPage].cRefs) == 1)
938 {
939 pThis->cLoad++;
940 if (pThis->cLoad > pThis->cMaxLoad)
941 pThis->cMaxLoad = pThis->cLoad;
942 Assert(pThis->cLoad <= pThis->cPages);
943 }
944 void *pvPage = paPages[iPage].pvPage;
945
946 /*
947 * Invalidate the entry?
948 */
949 RTCPUID idRealCpu = RTMpCpuId();
950 bool fInvalidateIt = RTCpuSetIsMember(&paPages[iPage].PendingSet, idRealCpu);
951 if (fInvalidateIt)
952 RTCpuSetDel(&paPages[iPage].PendingSet, idRealCpu);
953
954 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
955
956 /*
957 * Do the actual invalidation outside the spinlock.
958 */
959 ASMInvalidatePage(pvPage);
960
961 *piPage = iPage;
962 return pvPage;
963}
964
965
966/**
967 * Signals the start of a new set of mappings.
968 *
969 * Mostly for strictness. PGMDynMapHCPage won't work unless this
970 * API is called.
971 *
972 * @param pVCpu The shared data for the current virtual CPU.
973 */
974VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
975{
976 Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
977 pVCpu->pgm.s.AutoSet.cEntries = 0;
978}
979
980
981/**
982 * Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
983 * since the PGMDynMapStartAutoSet call.
984 *
985 * @param pVCpu The shared data for the current virtual CPU.
986 */
987VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
988{
989 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
990
991 /* close the set */
992 uint32_t i = pVCpu->pgm.s.AutoSet.cEntries;
993 AssertMsg(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries), ("%#x (%u)\n", i, i));
994 pVCpu->pgm.s.AutoSet.cEntries = PGMMAPSET_CLOSED;
995
996 /* release any pages we're referencing. */
997 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries)))
998 {
999 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1000 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1001 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1002
1003 while (i-- > 0)
1004 {
1005 uint32_t iPage = pSet->aEntries[i].iPage;
1006 Assert(iPage < pThis->cPages);
1007 int32_t cRefs = pSet->aEntries[i].cRefs;
1008 Assert(cRefs > 0);
1009 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
1010
1011 pSet->aEntries[i].iPage = UINT16_MAX;
1012 pSet->aEntries[i].cRefs = 0;
1013 }
1014
1015 Assert(pThis->cLoad <= pThis->cPages);
1016 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1017 }
1018}
1019
1020
1021/**
1022 * Migrates the automatic mapping set of the current vCPU if necessary.
1023 *
1024 * This is called when re-entering the hardware assisted execution mode after a
1025 * nip down to ring-3. We run the risk that the CPU might have change and we
1026 * will therefore make sure all the cache entries currently in the auto set will
1027 * be valid on the new CPU. If the cpu didn't change nothing will happen as all
1028 * the entries will have been flagged as invalidated.
1029 *
1030 * @param pVCpu The shared data for the current virtual CPU.
1031 * @thread EMT
1032 */
1033VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
1034{
1035 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1036 uint32_t i = pVCpu->pgm.s.AutoSet.cEntries;
1037 AssertMsg(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries), ("%#x (%u)\n", i, i));
1038 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries)))
1039 {
1040 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1041 RTCPUID idRealCpu = RTMpCpuId();
1042
1043 while (i-- > 0)
1044 {
1045 Assert(pSet->aEntries[i].cRefs > 0);
1046 uint32_t iPage = pSet->aEntries[i].iPage;
1047 Assert(iPage < pThis->cPages);
1048 if (RTCpuSetIsMember(&pThis->paPages[iPage].PendingSet, idRealCpu))
1049 {
1050 RTCpuSetDel(&pThis->paPages[iPage].PendingSet, idRealCpu);
1051 ASMInvalidatePage(pThis->paPages[iPage].pvPage);
1052 }
1053 }
1054 }
1055}
1056
1057
1058/**
1059 * As a final resort for a full auto set, try merge duplicate entries.
1060 *
1061 * @param pSet The set.
1062 */
1063static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
1064{
1065 for (uint32_t i = 0 ; i < pSet->cEntries; i++)
1066 {
1067 uint16_t const iPage = pSet->aEntries[i].iPage;
1068 uint32_t j = i + 1;
1069 while (j < pSet->cEntries)
1070 {
1071 if (pSet->aEntries[j].iPage != iPage)
1072 j++;
1073 else
1074 {
1075 /* merge j with i removing j. */
1076 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
1077 pSet->cEntries--;
1078 if (j < pSet->cEntries)
1079 {
1080 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
1081 pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
1082 pSet->aEntries[pSet->cEntries].cRefs = 0;
1083 }
1084 else
1085 {
1086 pSet->aEntries[j].iPage = UINT16_MAX;
1087 pSet->aEntries[j].cRefs = 0;
1088 }
1089 }
1090 }
1091 }
1092}
1093
1094
1095/* documented elsewhere - a bit of a mess. */
1096VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1097{
1098 /*
1099 * Validate state.
1100 */
1101 AssertMsgReturn(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
1102 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap),
1103 VERR_ACCESS_DENIED);
1104 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1105 PVMCPU pVCpu = VMMGetCpu(pVM);
1106 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1107 AssertPtrReturn(pVCpu, VERR_INTERNAL_ERROR);
1108 AssertMsgReturn(pSet->cEntries > RT_ELEMENTS(pSet->aEntries),
1109 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries), VERR_WRONG_ORDER);
1110
1111 /*
1112 * Map it.
1113 */
1114 uint32_t iPage;
1115 void *pvPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, &iPage);
1116 if (RT_UNLIKELY(!pvPage))
1117 {
1118 static uint32_t s_cBitched = 0;
1119 if (++s_cBitched < 10)
1120 LogRel(("PGMDynMapHCPage: cLoad=%u/%u cPages=%u\n",
1121 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages));
1122 return VERR_PGM_DYNMAP_FAILED;
1123 }
1124
1125 /*
1126 * Add the page to the auto reference set.
1127 * If it's less than half full, don't bother looking for duplicates.
1128 */
1129 if (pSet->cEntries < RT_ELEMENTS(pSet->aEntries) / 2)
1130 {
1131 pSet->aEntries[pSet->cEntries].cRefs = 1;
1132 pSet->aEntries[pSet->cEntries].iPage = iPage;
1133 }
1134 else
1135 {
1136 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
1137 int32_t i = pSet->cEntries;
1138 while (i-- > 0)
1139 if (pSet->aEntries[i].iPage)
1140 {
1141 pSet->aEntries[i].cRefs++;
1142 break;
1143 }
1144 if (i < 0)
1145 {
1146 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
1147 pgmDynMapOptimizeAutoSet(pSet);
1148 if (RT_LIKELY(pSet->cEntries < RT_ELEMENTS(pSet->aEntries)))
1149 {
1150 pSet->aEntries[pSet->cEntries].cRefs = 1;
1151 pSet->aEntries[pSet->cEntries].iPage = iPage;
1152 }
1153 else
1154 {
1155 /* We're screwed. */
1156 pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
1157
1158 static uint32_t s_cBitched = 0;
1159 if (++s_cBitched < 10)
1160 LogRel(("PGMDynMapHCPage: set is full!\n"));
1161 return VERR_PGM_DYNMAP_FULL_SET;
1162 }
1163 }
1164 }
1165
1166 return VINF_SUCCESS;
1167}
1168
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette