VirtualBox

Changeset 87428 in vbox


Ignore:
Timestamp:
Jan 26, 2021 10:59:27 AM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
142434
Message:

VMM/HMR0A.asm: Refactored hmR0VMXStartVM into a more regular frame setup.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r87417 r87428  
    4747; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation.
    4848;
     49; @note This is normally done by hmR0VmxExportHostSegmentRegs and VMXRestoreHostState,
     50;       so much of this is untested code.
     51; @{
    4952%define VMX_SKIP_GDTR
    5053%define VMX_SKIP_TR
     
    5457 ; risk loading a stale LDT value or something invalid.
    5558 %define HM_64_BIT_USE_NULL_SEL
    56  ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always.
     59 ; Darwin (Mavericks) uses IDTR limit to store the CPU number so we need to always restore it.
    5760 ; See @bugref{6875}.
     61 %undef VMX_SKIP_IDTR
    5862%else
    5963 %define VMX_SKIP_IDTR
    6064%endif
    61 
     65;; @}
    6266
    6367;; @def CALLEE_PRESERVED_REGISTER_COUNT
     
    143147%endmacro
    144148
     149
    145150;; @def PUSH_RELEVANT_SEGMENT_REGISTERS
    146151; Macro saving all segment registers on the stack.
    147152; @param 1  Full width register name.
    148153; @param 2  16-bit register name for \a 1.
    149 
    150 ;; @def POP_RELEVANT_SEGMENT_REGISTERS
    151 ; Macro restoring all segment registers on the stack.
    152 ; @param 1  Full width register name.
    153 ; @param 2  16-bit register name for \a 1.
    154 %ifdef VBOX_SKIP_RESTORE_SEG
    155  %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
    156  %endmacro
    157 
    158  %macro POP_RELEVANT_SEGMENT_REGISTERS 2
    159  %endmacro
    160 %else       ; !VBOX_SKIP_RESTORE_SEG
    161  ; Trashes, rax, rdx & rcx.
    162  %macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
     154; @cobbers rax, rdx, rcx
     155%macro PUSH_RELEVANT_SEGMENT_REGISTERS 2
     156 %ifndef VBOX_SKIP_RESTORE_SEG
     157  %error untested code. probably does not work any more!
    163158  %ifndef HM_64_BIT_USE_NULL_SEL
    164159        mov     %2, es
     
    187182        push    gs
    188183  %endif
    189  %endmacro
    190 
    191  ; trashes, rax, rdx & rcx
    192  %macro POP_RELEVANT_SEGMENT_REGISTERS 2
     184 %endif   ; !VBOX_SKIP_RESTORE_SEG
     185%endmacro ; PUSH_RELEVANT_SEGMENT_REGISTERS
     186
     187;; @def POP_RELEVANT_SEGMENT_REGISTERS
     188; Macro restoring all segment registers on the stack.
     189; @param 1  Full width register name.
     190; @param 2  16-bit register name for \a 1.
     191; @cobbers rax, rdx, rcx
     192%macro POP_RELEVANT_SEGMENT_REGISTERS 2
     193 %ifndef VBOX_SKIP_RESTORE_SEG
     194  %error untested code. probably does not work any more!
    193195        ; Note: do not step through this code with a debugger!
    194196  %ifndef HM_64_BIT_USE_NULL_SEL
     
    223225        mov     es, %2
    224226  %endif
    225  %endmacro
    226 %endif ; VBOX_SKIP_RESTORE_SEG
     227 %endif   ; !VBOX_SKIP_RESTORE_SEG
     228%endmacro ; POP_RELEVANT_SEGMENT_REGISTERS
    227229
    228230
     
    740742
    741743
    742 ;; @def RESTORE_STATE_VM64
    743 ; Macro restoring essential host state and updating guest state
    744 ; for 64-bit host, 64-bit guest for VT-x.
    745 ;
    746 %macro RESTORE_STATE_VM64 0
    747         ; Restore base and limit of the IDTR & GDTR.
    748  %ifndef VMX_SKIP_IDTR
    749         lidt    [xSP]
    750         add     xSP, xCB * 2
    751  %endif
    752  %ifndef VMX_SKIP_GDTR
    753         lgdt    [xSP]
    754         add     xSP, xCB * 2
    755  %endif
    756 
    757         ; Save the guest state.
    758         push    xDI
    759  %ifndef VMX_SKIP_TR
    760         mov     xDI, [xSP + xCB * 3]        ; pCtx (*3 to skip the saved xDI, TR, LDTR)
    761  %else
    762         mov     xDI, [xSP + xCB * 2]        ; pCtx (*2 to skip the saved xDI, LDTR)
    763  %endif
    764 
    765         mov     qword [xDI + CPUMCTX.eax], rax
    766         mov     rax, SPECTRE_FILLER
    767         mov     qword [xDI + CPUMCTX.ebx], rbx
    768         mov     rbx, rax
    769         mov     qword [xDI + CPUMCTX.ecx], rcx
    770         mov     rcx, rax
    771         mov     qword [xDI + CPUMCTX.edx], rdx
    772         mov     rdx, rax
    773         mov     qword [xDI + CPUMCTX.esi], rsi
    774         mov     rsi, rax
    775         mov     qword [xDI + CPUMCTX.ebp], rbp
    776         mov     rbp, rax
    777         mov     qword [xDI + CPUMCTX.r8],  r8
    778         mov     r8, rax
    779         mov     qword [xDI + CPUMCTX.r9],  r9
    780         mov     r9, rax
    781         mov     qword [xDI + CPUMCTX.r10], r10
    782         mov     r10, rax
    783         mov     qword [xDI + CPUMCTX.r11], r11
    784         mov     r11, rax
    785         mov     qword [xDI + CPUMCTX.r12], r12
    786         mov     r12, rax
    787         mov     qword [xDI + CPUMCTX.r13], r13
    788         mov     r13, rax
    789         mov     qword [xDI + CPUMCTX.r14], r14
    790         mov     r14, rax
    791         mov     qword [xDI + CPUMCTX.r15], r15
    792         mov     r15, rax
    793         mov     rax, cr2
    794         mov     qword [xDI + CPUMCTX.cr2], rax
    795 
    796         pop     xAX                                 ; The guest rdi we pushed above
    797         mov     qword [xDI + CPUMCTX.edi], rax
    798 
    799         ; Fight spectre.
    800         INDIRECT_BRANCH_PREDICTION_BARRIER_CTX xDI, CPUMCTX_WSF_IBPB_EXIT
    801 
    802  %ifndef VMX_SKIP_TR
    803         ; Restore TSS selector; must mark it as not busy before using ltr!
    804         ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
    805         ; @todo get rid of sgdt
    806         pop     xBX         ; Saved TR
    807         sub     xSP, xCB * 2
    808         sgdt    [xSP]
    809         mov     xAX, xBX
    810         and     eax, X86_SEL_MASK_OFF_RPL           ; mask away TI and RPL bits leaving only the descriptor offset
    811         add     xAX, [xSP + 2]                      ; eax <- GDTR.address + descriptor offset
    812         and     dword [xAX + 4], ~RT_BIT(9)         ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
    813         ltr     bx
    814         add     xSP, xCB * 2
    815  %endif
    816 
    817         pop     xAX         ; Saved LDTR
    818         cmp     eax, 0
    819         je      %%skip_ldt_write64
    820         lldt    ax
    821 
    822 %%skip_ldt_write64:
    823         pop     xSI         ; pCtx (needed in rsi by the macros below)
    824 
    825         ; Restore segment registers.
    826         POP_RELEVANT_SEGMENT_REGISTERS xAX, ax
    827 
    828         ; Restore the host XCR0 if necessary.
    829         pop     xCX
    830         test    ecx, ecx
    831         jnz     %%xcr0_after_skip
    832         pop     xAX
    833         pop     xDX
    834         xsetbv                              ; ecx is already zero.
    835 %%xcr0_after_skip:
    836 
    837         ; Restore general purpose registers.
    838         POP_CALLEE_PRESERVED_REGISTERS
    839 %endmacro
    840 
    841 
    842744;;
    843745; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
     
    852754        push    xBP
    853755        mov     xBP, xSP
    854 
     756        SEH64_SET_FRAME_xBP 0
    855757        pushf
    856758        cli
    857759
     760 %define frm_fRFlags         -008h
     761 %define frm_pGstCtx         -010h              ; Where we stash guest CPU context for use after the vmrun.
     762 %define frm_uHostXcr0       -020h              ; 128-bit
     763 %define frm_saved_gdtr      -036h              ; 16+64:  Only used when VMX_SKIP_GDTR isn't defined
     764 %define frm_saved_tr        -034h              ; 16-bit: Only used when VMX_SKIP_TR isn't defined
     765 %define frm_fNoRestoreXcr0  -030h              ; 32-bit: Non-zero if we should skip XCR0 restoring.
     766 %define frm_saved_idtr      -046h              ; 16+64:  Only used when VMX_SKIP_IDTR isn't defined
     767 %define frm_saved_ldtr      -044h              ; 16-bit: always saved.
     768 %define frm_rcError         -040h              ; 32-bit: Error status code (not used in the success path)
     769 %define frm_guest_rax       -048h              ; Temporary storage slot for guest RAX.
     770 %assign cbFrame              050h
     771        sub     rsp, cbFrame - 8
     772
    858773        ; Save all general purpose host registers.
    859 %assign cbFrame 8
    860774        PUSH_CALLEE_PRESERVED_REGISTERS
     775        ;PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax - currently broken
    861776        SEH64_END_PROLOGUE
    862777
    863         ; First we have to save some final CPU context registers.
    864         lea     r10, [.vmlaunch64_done wrt rip]
    865         mov     rax, VMX_VMCS_HOST_RIP      ; return address (too difficult to continue after VMLAUNCH?)
    866         vmwrite rax, r10
    867         ; Note: ASSUMES success!
    868 
    869778        ;
    870779        ; Unify the input parameter registers: rsi=pVCpu, bl=fResume, rdi=&pVCpu->cpum.GstCtx;
    871780        ;
    872 %ifdef ASM_CALL64_GCC
     781 %ifdef ASM_CALL64_GCC
    873782        mov     ebx, edx        ; fResume
    874 %else
     783 %else
    875784        mov     rsi, rdx        ; pVCpu
    876785        mov     ebx, r8d        ; fResume
    877 %endif
     786 %endif
    878787        lea     rdi, [rsi + VMCPU.cpum.GstCtx]
     788        mov     [rbp + frm_pGstCtx], rdi
    879789
    880790        ;
    881791        ; Save the host XCR0 and load the guest one if necessary.
    882         ; Note! Trashes rdx and rcx.
    883         ;
     792        ; Note! Trashes rax, rdx and rcx.
     793        ;
     794        mov     ecx, 3fh                    ; indicate that we need not restore XCR0
    884795        test    byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1
    885         jz      .xcr0_before_skip
     796        jz      .xcr0_before_done
    886797
    887798        xor     ecx, ecx
    888799        xgetbv                              ; save the host one on the stack
    889         push    xDX
    890         push    xAX
     800        mov     [rbp + frm_uHostXcr0], eax
     801        mov     [rbp + frm_uHostXcr0 + 4], edx
    891802
    892803        mov     eax, [rdi + CPUMCTX.aXcr]   ; load the guest one
    893804        mov     edx, [rdi + CPUMCTX.aXcr + 4]
    894         xor     ecx, ecx                    ; paranoia
     805        xor     ecx, ecx                    ; paranoia; indicate that we must restore XCR0 (popped into ecx, thus 0)
    895806        xsetbv
    896 
    897         push    0                           ; indicate that we must restore XCR0 (popped into ecx, thus 0)
    898         jmp     .xcr0_before_done
    899 
    900 .xcr0_before_skip:
    901         push    3fh                         ; indicate that we need not
    902807.xcr0_before_done:
    903 
    904         ;
    905         ; Save segment registers.
    906         ; Note! Trashes rdx & rcx, so we moved it here (amd64 case).
    907         ;
    908         PUSH_RELEVANT_SEGMENT_REGISTERS xAX, ax
    909 
    910         ; Save the pCtx pointer.
    911         push    rdi
     808        mov     [rbp + frm_fNoRestoreXcr0], ecx ; only 32-bit!
    912809
    913810        ; Save host LDTR.
    914         xor     eax, eax
    915         sldt    ax
    916         push    xAX
    917 
    918 %ifndef VMX_SKIP_TR
     811        sldt    [rbp + frm_saved_tr]
     812
     813 %ifndef VMX_SKIP_TR
    919814        ; The host TR limit is reset to 0x67; save & restore it manually.
    920         str     eax
    921         push    xAX
    922 %endif
    923 
    924 %ifndef VMX_SKIP_GDTR
     815        str     word [rbp + frm_saved_tr]
     816 %endif
     817
     818 %ifndef VMX_SKIP_GDTR
    925819        ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    926         sub     xSP, xCB * 2
    927         sgdt    [xSP]
    928 %endif
    929 %ifndef VMX_SKIP_IDTR
    930         sub     xSP, xCB * 2
    931         sidt    [xSP]
    932 %endif
     820        sgdt    [rbp + frm_saved_gdtr]
     821 %endif
     822 %ifndef VMX_SKIP_IDTR
     823        sidt    [rbp + frm_saved_idtr]
     824 %endif
    933825
    934826        ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
     
    940832
    941833.skip_cr2_write:
    942         mov     eax, VMX_VMCS_HOST_RSP
    943         vmwrite xAX, xSP
    944         ; Note: ASSUMES success!
    945         ; Don't mess with ESP anymore!!!
    946 
    947         ; Fight spectre and similar.
     834        ; Set the vmlaunch/vmresume "return" host RIP and RSP values.
     835        lea     rcx, [hmR0VMXStartVMHostRIP wrt rip] ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when
     836        mov     eax, VMX_VMCS_HOST_RIP               ;;       the VMXVMCSINFO::pfnStartVM function changes (eventually
     837        vmwrite rax, rcx                             ;;       take the Windows/SSE stuff into account then)...
     838 %ifdef VBOX_STRICT
     839        jna     hmR0VMXStartVMHostRIP.vmwrite_failed
     840 %endif
     841        mov     edx, VMX_VMCS_HOST_RSP               ;; @todo The HOST_RSP value is unlikely to change much, so if vmwrite
     842        vmwrite rdx, rsp                             ;;       can be noticably more expensive than a memory read, we could
     843 %ifdef VBOX_STRICT                                  ;;       easily optimize this one away almost completely by comparing
     844        jna     hmR0VMXStartVMHostRIP.vmwrite_failed ;;       rsp with a shadow copy of VMX_VMCS_HOST_RSP.
     845 %endif
     846
     847        ; Fight spectre and similar. Trashes rax, rcx, and rdx.
    948848        INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER rdi, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY
    949849
     
    971871
    972872        vmresume
    973         jc      near .vmxstart64_invalid_vmcs_ptr
    974         jz      near .vmxstart64_start_failed
    975         jmp     .vmlaunch64_done        ; here if vmresume detected a failure
     873        jc      hmR0VMXStartVMHostRIP.vmxstart64_invalid_vmcs_ptr
     874        jz      hmR0VMXStartVMHostRIP.vmxstart64_start_failed
     875        jmp     hmR0VMXStartVMHostRIP   ; here if vmresume detected a failure
    976876
    977877.vmlaunch64_launch:
    978878        vmlaunch
    979         jc      near .vmxstart64_invalid_vmcs_ptr
    980         jz      near .vmxstart64_start_failed
    981         jmp     .vmlaunch64_done        ; here if vmlaunch detected a failure
     879        jc      hmR0VMXStartVMHostRIP.vmxstart64_invalid_vmcs_ptr
     880        jz      hmR0VMXStartVMHostRIP.vmxstart64_start_failed
     881        jmp     hmR0VMXStartVMHostRIP   ; here if vmlaunch detected a failure
    982882
    983883ALIGNCODE(64)
    984 .vmlaunch64_done:
    985         RESTORE_STATE_VM64
     884GLOBALNAME hmR0VMXStartVMHostRIP
     885
     886;;
     887; Common restore logic for success and error paths.  We duplicate this because we
     888; don't want to waste writing the VINF_SUCCESS return value to the stack in the
     889; regular code path.
     890;
     891; @param    1   Zero if regular return, non-zero if error return.  Controls label emission.
     892;
     893; @note Important that this does not modify cbFrame or rsp.
     894%macro RESTORE_STATE_VMX 1
     895        ; Restore base and limit of the IDTR & GDTR.
     896 %ifndef VMX_SKIP_IDTR
     897        lidt    [rsp + cbFrame + frm_saved_idtr]
     898 %endif
     899 %ifndef VMX_SKIP_GDTR
     900        lgdt    [rsp + cbFrame + frm_saved_gdtr]
     901 %endif
     902
     903        ; Save the guest state and restore the non-volatile registers.  We use rax=pGstCtx here.
     904        mov     [rsp + cbFrame + frm_guest_rax], rax
     905        mov     rax, [rsp + cbFrame + frm_pGstCtx]
     906
     907        mov     qword [rax + CPUMCTX.ebp], rbp
     908        lea     rbp, [rsp + cbFrame]    ; re-establish the frame pointer as early as possible.
     909        mov     qword [rax + CPUMCTX.ecx], rcx
     910        mov     rcx, SPECTRE_FILLER
     911        mov     qword [rax + CPUMCTX.edx], rdx
     912        mov     rdx, [rbp + frm_guest_rax]
     913        mov     qword [rax + CPUMCTX.eax], rdx
     914        mov     rdx, rcx
     915        mov     qword [rax + CPUMCTX.r8],  r8
     916        mov     r8, rcx
     917        mov     qword [rax + CPUMCTX.r9],  r9
     918        mov     r9, rcx
     919        mov     qword [rax + CPUMCTX.r10], r10
     920        mov     r10, rcx
     921        mov     qword [rax + CPUMCTX.r11], r11
     922        mov     r11, rcx
     923        mov     qword [rax + CPUMCTX.esi], rsi
     924 %ifdef ASM_CALL64_MSC
     925        mov     rsi, [rbp + frm_saved_rsi]
     926 %else
     927        mov     rbx, rcx
     928 %endif
     929        mov     qword [rax + CPUMCTX.edi], rdi
     930 %ifdef ASM_CALL64_MSC
     931        mov     rdi, [rbp + frm_saved_rdi]
     932 %else
     933        mov     rbx, rcx
     934 %endif
     935        mov     qword [rax + CPUMCTX.ebx], rbx
     936        mov     rbx, [rbp + frm_saved_rbx]
     937        mov     qword [rax + CPUMCTX.r12], r12
     938        mov     r12,  [rbp + frm_saved_r12]
     939        mov     qword [rax + CPUMCTX.r13], r13
     940        mov     r13,  [rbp + frm_saved_r13]
     941        mov     qword [rax + CPUMCTX.r14], r14
     942        mov     r14,  [rbp + frm_saved_r14]
     943        mov     qword [rax + CPUMCTX.r15], r15
     944        mov     r15,  [rbp + frm_saved_r15]
     945
     946        mov     rdx, cr2
     947        mov     qword [rax + CPUMCTX.cr2], rdx
     948        mov     rdx, rcx
     949
     950 %if %1 = 0 ; Skip this in failure branch (=> guru)
     951        ; Fight spectre.
     952        INDIRECT_BRANCH_PREDICTION_BARRIER_CTX rax, CPUMCTX_WSF_IBPB_EXIT
     953 %endif
     954
     955 %ifndef VMX_SKIP_TR
     956        ; Restore TSS selector; must mark it as not busy before using ltr!
     957        ; ASSUME that this is supposed to be 'BUSY' (saves 20-30 ticks on the T42p).
     958  %ifndef VMX_SKIP_GDTR
     959        lgdt    [rbp + frm_saved_gdtr]
     960  %endif
     961        movzx   eax, word [rbp + frm_saved_tr]
     962        mov     ecx, eax
     963        and     eax, X86_SEL_MASK_OFF_RPL           ; mask away TI and RPL bits leaving only the descriptor offset
     964        add     rax, [rbp + frm_saved_gdtr + 2]     ; eax <- GDTR.address + descriptor offset
     965        and     dword [rax + 4], ~RT_BIT(9)         ; clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit)
     966        ltr     cx
     967 %endif
     968        movzx   edx, word [rbp + frm_saved_ldtr]
     969        test    edx, edx
     970        jz      %%skip_ldt_write
     971        lldt    dx
     972%%skip_ldt_write:
     973
     974 %if %1 != 0
     975.return_after_vmwrite_error:
     976 %endif
     977        ; Restore segment registers.
     978        ;POP_RELEVANT_SEGMENT_REGISTERS rax, ax - currently broken.
     979
     980        ; Restore the host XCR0 if necessary.
     981        mov     ecx, [rbp + frm_fNoRestoreXcr0]
     982        test    ecx, ecx
     983        jnz     %%xcr0_after_skip
     984        mov     eax, [rbp + frm_uHostXcr0]
     985        mov     edx, [rbp + frm_uHostXcr0 + 4]
     986        xsetbv                              ; ecx is already zero.
     987%%xcr0_after_skip:
     988
     989%endmacro ; RESTORE_STATE_VMX
     990
     991        RESTORE_STATE_VMX 0
    986992        mov     eax, VINF_SUCCESS
    987993
    988994.vmstart64_end:
     995        lea     rsp, [rbp + frm_fRFlags]
    989996        popf
    990         pop     xBP
     997        leave
    991998        ret
    992999
     1000        ;
     1001        ; Error returns.
     1002        ;
     1003 %ifdef VBOX_STRICT
     1004.vmwrite_failed:
     1005        mov     dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_FIELD
     1006        jz      .return_after_vmwrite_error
     1007        mov     dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR
     1008        jmp     .return_after_vmwrite_error
     1009 %endif
    9931010.vmxstart64_invalid_vmcs_ptr:
    994         RESTORE_STATE_VM64
    995         mov     eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
     1011        mov     dword [rsp + cbFrame + frm_rcError], VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
     1012        jmp     .vmstart64_error_return
     1013.vmxstart64_start_failed:
     1014        mov     dword [rsp + cbFrame + frm_rcError], VERR_VMX_UNABLE_TO_START_VM
     1015.vmstart64_error_return:
     1016        RESTORE_STATE_VMX 1
     1017        mov     eax, [rbp + frm_rcError]
    9961018        jmp     .vmstart64_end
    997 
    998 .vmxstart64_start_failed:
    999         RESTORE_STATE_VM64
    1000         mov     eax, VERR_VMX_UNABLE_TO_START_VM
    1001         jmp     .vmstart64_end
     1019 %undef frm_fRFlags
     1020 %undef frm_pGstCtx
     1021 %undef frm_uHostXcr0
     1022 %undef frm_saved_gdtr
     1023 %undef frm_saved_tr
     1024 %undef frm_fNoRestoreXcr0
     1025 %undef frm_saved_idtr
     1026 %undef frm_saved_ldtr
     1027 %undef frm_rcError
     1028 %undef frm_guest_rax
     1029 %undef cbFrame
    10021030ENDPROC hmR0VMXStartVM
    10031031
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette