VirtualBox

Changeset 71906 in vbox


Ignore:
Timestamp:
Apr 19, 2018 4:53:56 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Clean up VMCB initialization.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71859 r71906  
    395395static RTHCPHYS             g_HCPhysIOBitmap;
    396396/** Pointer to the IO bitmap. */
    397 static R0PTRTYPE(void *)        g_pvIOBitmap;
     397static R0PTRTYPE(void *)    g_pvIOBitmap;
    398398
    399399#ifdef VBOX_STRICT
     
    906906#endif
    907907
    908     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    909     {
    910         PVMCPU   pVCpu = &pVM->aCpus[i];
    911         PSVMVMCB pVmcb = pVM->aCpus[i].hm.s.svm.pVmcb;
    912 
    913         AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
     908    PVMCPU       pVCpu = &pVM->aCpus[0];
     909    PSVMVMCB     pVmcb = pVCpu->hm.s.svm.pVmcb;
     910    AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[0]\n"), VERR_SVM_INVALID_PVMCB);
     911    PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
     912
     913    /* Always trap #AC for reasons of security. */
     914    pVmcbCtrl->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
     915
     916    /* Always trap #DB for reasons of security. */
     917    pVmcbCtrl->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
     918
     919    /* Trap exceptions unconditionally (debug purposes). */
     920#ifdef HMSVM_ALWAYS_TRAP_PF
     921    pVmcbCtrl->u32InterceptXcpt |=   RT_BIT(X86_XCPT_PF);
     922#endif
     923#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
     924    /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
     925    pVmcbCtrl->u32InterceptXcpt |= 0
     926                                 | RT_BIT(X86_XCPT_BP)
     927                                 | RT_BIT(X86_XCPT_DE)
     928                                 | RT_BIT(X86_XCPT_NM)
     929                                 | RT_BIT(X86_XCPT_UD)
     930                                 | RT_BIT(X86_XCPT_NP)
     931                                 | RT_BIT(X86_XCPT_SS)
     932                                 | RT_BIT(X86_XCPT_GP)
     933                                 | RT_BIT(X86_XCPT_PF)
     934                                 | RT_BIT(X86_XCPT_MF)
     935                                 ;
     936#endif
     937
     938    /* Apply the exceptions intercepts needed by the GIM provider. */
     939    if (pVCpu->hm.s.fGIMTrapXcptUD)
     940        pVmcbCtrl->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
     941
     942    /* Set up unconditional intercepts and conditions. */
     943    pVmcbCtrl->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
     944                                | SVM_CTRL_INTERCEPT_VMMCALL;
     945
     946#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
     947    pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
     948#endif
     949
     950#ifdef VBOX_WITH_NESTED_HWVIRT
     951    /* Virtualized VMSAVE/VMLOAD. */
     952    pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = fUseVirtVmsaveVmload;
     953    if (!fUseVirtVmsaveVmload)
     954    {
     955        pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
     956                                    |  SVM_CTRL_INTERCEPT_VMLOAD;
     957    }
     958
     959    /* Virtual GIF. */
     960    pVmcbCtrl->IntCtrl.n.u1VGifEnable = fUseVGif;
     961    if (!fUseVGif)
     962    {
     963        pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
     964                                    |  SVM_CTRL_INTERCEPT_STGI;
     965    }
     966#endif
     967
     968    /* CR4 writes must always be intercepted for tracking PGM mode changes. */
     969    pVmcbCtrl->u16InterceptWrCRx = RT_BIT(4);
     970
     971    /* Intercept all DRx reads and writes by default. Changed later on. */
     972    pVmcbCtrl->u16InterceptRdDRx = 0xffff;
     973    pVmcbCtrl->u16InterceptWrDRx = 0xffff;
     974
     975    /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
     976    pVmcbCtrl->IntCtrl.n.u1VIntrMasking = 1;
     977
     978    /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
     979       and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
     980    pVmcbCtrl->IntCtrl.n.u1IgnoreTPR = 1;
     981
     982    /* Set the IO permission bitmap physical addresses. */
     983    pVmcbCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
     984
     985    /* LBR virtualization. */
     986    pVmcbCtrl->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
     987
     988    /* The host ASID MBZ, for the guest start with 1. */
     989    pVmcbCtrl->TLBCtrl.n.u32ASID = 1;
     990
     991    /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
     992    pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
     993
     994    /* Without Nested Paging, we need additionally intercepts. */
     995    if (!pVM->hm.s.fNestedPaging)
     996    {
     997        /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
     998        pVmcbCtrl->u16InterceptRdCRx |= RT_BIT(3);
     999        pVmcbCtrl->u16InterceptWrCRx |= RT_BIT(3);
     1000
     1001        /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
     1002        pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
     1003                                    |  SVM_CTRL_INTERCEPT_TASK_SWITCH;
     1004
     1005        /* Page faults must be intercepted to implement shadow paging. */
     1006        pVmcbCtrl->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
     1007    }
     1008
     1009    /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
     1010    if (fUsePauseFilter)
     1011    {
     1012        Assert(pVM->hm.s.svm.cPauseFilter > 0);
     1013        pVmcbCtrl->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
     1014        if (fPauseFilterThreshold)
     1015            pVmcbCtrl->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
     1016        pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
     1017    }
     1018
     1019    /*
     1020     * Setup the MSR permission bitmap.
     1021     * The following MSRs are saved/restored automatically during the world-switch.
     1022     * Don't intercept guest read/write accesses to these MSRs.
     1023     */
     1024    uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
     1025    PCPUMCTX pCtx        = &pVCpu->cpum.GstCtx;
     1026    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1027    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1028    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1029    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1030    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1031    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1032    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1033    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1034    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1035    hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1036    pVmcbCtrl->u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
     1037
     1038    /* Initialize the #VMEXIT history array with end-of-array markers (UINT16_MAX). */
     1039    Assert(!pVCpu->hm.s.idxExitHistoryFree);
     1040    HMCPU_EXIT_HISTORY_RESET(pVCpu);
     1041
     1042    /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
     1043    Assert(pVmcbCtrl->u32VmcbCleanBits == 0);
     1044
     1045    for (VMCPUID i = 1; i < pVM->cCpus; i++)
     1046    {
     1047        PVMCPU       pVCpuCur = &pVM->aCpus[i];
     1048        PSVMVMCB     pVmcbCur = pVM->aCpus[i].hm.s.svm.pVmcb;
     1049        AssertMsgReturn(pVmcbCur, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
     1050        PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
     1051
     1052        /* Copy the VMCB control area. */
     1053        memcpy(pVmcbCtrlCur, pVmcbCtrl, sizeof(*pVmcbCtrlCur));
     1054
     1055        /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
     1056        uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hm.s.svm.pvMsrBitmap;
     1057        memcpy(pbMsrBitmapCur, pbMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
     1058        pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hm.s.svm.HCPhysMsrBitmap;
    9141059
    9151060        /* Initialize the #VMEXIT history array with end-of-array markers (UINT16_MAX). */
    916         Assert(!pVCpu->hm.s.idxExitHistoryFree);
    917         HMCPU_EXIT_HISTORY_RESET(pVCpu);
    918 
    919         /* Always trap #AC for reasons of security. */
    920         pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
    921 
    922         /* Always trap #DB for reasons of security. */
    923         pVmcb->ctrl.u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
    924 
    925         /* Trap exceptions unconditionally (debug purposes). */
    926 #ifdef HMSVM_ALWAYS_TRAP_PF
    927         pVmcb->ctrl.u32InterceptXcpt |=   RT_BIT(X86_XCPT_PF);
    928 #endif
    929 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    930         /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
    931         pVmcb->ctrl.u32InterceptXcpt |= 0
    932                                      | RT_BIT(X86_XCPT_BP)
    933                                      | RT_BIT(X86_XCPT_DE)
    934                                      | RT_BIT(X86_XCPT_NM)
    935                                      | RT_BIT(X86_XCPT_UD)
    936                                      | RT_BIT(X86_XCPT_NP)
    937                                      | RT_BIT(X86_XCPT_SS)
    938                                      | RT_BIT(X86_XCPT_GP)
    939                                      | RT_BIT(X86_XCPT_PF)
    940                                      | RT_BIT(X86_XCPT_MF)
    941                                      ;
    942 #endif
    943 
    944         /* Set up unconditional intercepts and conditions. */
    945         pVmcb->ctrl.u64InterceptCtrl =   HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
    946                                        | SVM_CTRL_INTERCEPT_VMMCALL;
    947 
    948         /* CR4 writes must always be intercepted for tracking PGM mode changes. */
    949         pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(4);
    950 
    951         /* Intercept all DRx reads and writes by default. Changed later on. */
    952         pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
    953         pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
    954 
    955         /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
    956         pVmcb->ctrl.IntCtrl.n.u1VIntrMasking = 1;
    957 
    958         /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
    959            and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
    960         pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR   = 1;
    961 
    962         /* Set IO and MSR bitmap permission bitmap physical addresses. */
    963         pVmcb->ctrl.u64IOPMPhysAddr  = g_HCPhysIOBitmap;
    964         pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
    965 
    966         /* LBR virtualization. */
    967         if (fUseLbrVirt)
    968         {
    969             pVmcb->ctrl.LbrVirt.n.u1LbrVirt = fUseLbrVirt;
    970             pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
    971         }
    972         else
    973             Assert(pVmcb->ctrl.LbrVirt.n.u1LbrVirt == 0);
    974 
    975 #ifdef VBOX_WITH_NESTED_HWVIRT
    976         /* Virtualized VMSAVE/VMLOAD. */
    977         pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload = fUseVirtVmsaveVmload;
    978         if (!fUseVirtVmsaveVmload)
    979         {
    980             pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
    981                                          |  SVM_CTRL_INTERCEPT_VMLOAD;
    982         }
    983 
    984         /* Virtual GIF. */
    985         pVmcb->ctrl.IntCtrl.n.u1VGifEnable = fUseVGif;
    986         if (!fUseVGif)
    987         {
    988             pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
    989                                          |  SVM_CTRL_INTERCEPT_STGI;
    990         }
    991 #endif
     1061        Assert(!pVCpuCur->hm.s.idxExitHistoryFree);
     1062        HMCPU_EXIT_HISTORY_RESET(pVCpuCur);
    9921063
    9931064        /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
    994         Assert(pVmcb->ctrl.u32VmcbCleanBits == 0);
    995 
    996         /* The host ASID MBZ, for the guest start with 1. */
    997         pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
    998 
    999         /*
    1000          * Setup the PAT MSR (applicable for Nested Paging only).
    1001          *
    1002          * While guests can modify and see the modified values throug the shadow values,
    1003          * we shall not honor any guest modifications of this MSR to ensure caching is always
    1004          * enabled similar to how we always run with CR0.CD and NW bits cleared.
    1005          */
    1006         pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
    1007 
    1008         /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
    1009         pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
    1010 
    1011         /* Without Nested Paging, we need additionally intercepts. */
    1012         if (!pVM->hm.s.fNestedPaging)
    1013         {
    1014             /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
    1015             pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
    1016             pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
    1017 
    1018             /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
    1019             pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
    1020                                          |  SVM_CTRL_INTERCEPT_TASK_SWITCH;
    1021 
    1022             /* Page faults must be intercepted to implement shadow paging. */
    1023             pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
    1024         }
    1025 
    1026 #ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
    1027         pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
    1028 #endif
    1029 
    1030         /* Apply the exceptions intercepts needed by the GIM provider. */
    1031         if (pVCpu->hm.s.fGIMTrapXcptUD)
    1032             pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
    1033 
    1034         /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
    1035         if (fUsePauseFilter)
    1036         {
    1037             Assert(pVM->hm.s.svm.cPauseFilter > 0);
    1038             pVmcb->ctrl.u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
    1039             if (fPauseFilterThreshold)
    1040                 pVmcb->ctrl.u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
    1041             pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
    1042         }
    1043 
    1044         /*
    1045          * The following MSRs are saved/restored automatically during the world-switch.
    1046          * Don't intercept guest read/write accesses to these MSRs.
    1047          */
    1048         uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    1049         PCPUMCTX pCtx        = CPUMQueryGuestCtxPtr(pVCpu);
    1050         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1051         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1052         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1053         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1054         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1055         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1056         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1057         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1058         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1059         hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1060         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     1065        Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
     1066
     1067        /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs. */
     1068        Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu->hm.s.fGIMTrapXcptUD);
    10611069    }
    10621070
     
    17521760    pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
    17531761
    1754     /* We don't honor guest modifications to its PAT MSR (similar to ignoring CR0.CD, NW bits). */
     1762    /*
     1763     * Setup the PAT MSR (applicable for Nested Paging only).
     1764     *
     1765     * While guests can modify and see the modified values throug the shadow values,
     1766     * we shall not honor any guest modifications of this MSR to ensure caching is always
     1767     * enabled similar to how we always run with CR0.CD and NW bits cleared.
     1768     */
     1769    pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
     1770
     1771    /* Enable the last branch record bit if LBR virtualization is enabled. */
     1772    if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
     1773        pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
    17551774}
    17561775
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette