VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.0g/crypto/modes/asm/ghash-armv4.pl@ 69881

Last change on this file since 69881 was 69881, checked in by vboxsync, 7 years ago

Update OpenSSL to 1.1.0g.
bugref:8070: src/libs maintenance

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
File size: 13.9 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# April 2010
18#
19# The module implements "4-bit" GCM GHASH function and underlying
20# single multiplication operation in GF(2^128). "4-bit" means that it
21# uses 256 bytes per-key table [+32 bytes shared table]. There is no
22# experimental performance data available yet. The only approximation
23# that can be made at this point is based on code size. Inner loop is
24# 32 instructions long and on single-issue core should execute in <40
25# cycles. Having verified that gcc 3.4 didn't unroll corresponding
26# loop, this assembler loop body was found to be ~3x smaller than
27# compiler-generated one...
28#
29# July 2010
30#
31# Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
32# Cortex A8 core and ~25 cycles per processed byte (which was observed
33# to be ~3 times faster than gcc-generated code:-)
34#
35# February 2011
36#
37# Profiler-assisted and platform-specific optimization resulted in 7%
38# improvement on Cortex A8 core and ~23.5 cycles per byte.
39#
40# March 2011
41#
42# Add NEON implementation featuring polynomial multiplication, i.e. no
43# lookup tables involved. On Cortex A8 it was measured to process one
44# byte in 15 cycles or 55% faster than integer-only code.
45#
46# April 2014
47#
48# Switch to multiplication algorithm suggested in paper referred
49# below and combine it with reduction algorithm from x86 module.
50# Performance improvement over previous version varies from 65% on
51# Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8
52# processes one byte in 8.45 cycles, A9 - in 10.2, A15 - in 7.63,
53# Snapdragon S4 - in 9.33.
54#
55# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
56# Polynomial Multiplication on ARM Processors using the NEON Engine.
57#
58# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
59
60# ====================================================================
61# Note about "528B" variant. In ARM case it makes lesser sense to
62# implement it for following reasons:
63#
64# - performance improvement won't be anywhere near 50%, because 128-
65# bit shift operation is neatly fused with 128-bit xor here, and
66# "538B" variant would eliminate only 4-5 instructions out of 32
67# in the inner loop (meaning that estimated improvement is ~15%);
68# - ARM-based systems are often embedded ones and extra memory
69# consumption might be unappreciated (for so little improvement);
70#
71# Byte order [in]dependence. =========================================
72#
73# Caller is expected to maintain specific *dword* order in Htable,
74# namely with *least* significant dword of 128-bit value at *lower*
75# address. This differs completely from C code and has everything to
76# do with ldm instruction and order in which dwords are "consumed" by
77# algorithm. *Byte* order within these dwords in turn is whatever
78# *native* byte order on current platform. See gcm128.c for working
79# example...
80
81$flavour = shift;
82if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
83else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
84
85if ($flavour && $flavour ne "void") {
86 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
87 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
88 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
89 die "can't locate arm-xlate.pl";
90
91 open STDOUT,"| \"$^X\" $xlate $flavour $output";
92} else {
93 open STDOUT,">$output";
94}
95
96$Xi="r0"; # argument block
97$Htbl="r1";
98$inp="r2";
99$len="r3";
100
101$Zll="r4"; # variables
102$Zlh="r5";
103$Zhl="r6";
104$Zhh="r7";
105$Tll="r8";
106$Tlh="r9";
107$Thl="r10";
108$Thh="r11";
109$nlo="r12";
110################# r13 is stack pointer
111$nhi="r14";
112################# r15 is program counter
113
114$rem_4bit=$inp; # used in gcm_gmult_4bit
115$cnt=$len;
116
117sub Zsmash() {
118 my $i=12;
119 my @args=@_;
120 for ($Zll,$Zlh,$Zhl,$Zhh) {
121 $code.=<<___;
122#if __ARM_ARCH__>=7 && defined(__ARMEL__)
123 rev $_,$_
124 str $_,[$Xi,#$i]
125#elif defined(__ARMEB__)
126 str $_,[$Xi,#$i]
127#else
128 mov $Tlh,$_,lsr#8
129 strb $_,[$Xi,#$i+3]
130 mov $Thl,$_,lsr#16
131 strb $Tlh,[$Xi,#$i+2]
132 mov $Thh,$_,lsr#24
133 strb $Thl,[$Xi,#$i+1]
134 strb $Thh,[$Xi,#$i]
135#endif
136___
137 $code.="\t".shift(@args)."\n";
138 $i-=4;
139 }
140}
141
142$code=<<___;
143#include "arm_arch.h"
144
145.text
146#if defined(__thumb2__) || defined(__clang__)
147.syntax unified
148#endif
149#if defined(__thumb2__)
150.thumb
151#else
152.code 32
153#endif
154
155#ifdef __clang__
156#define ldrplb ldrbpl
157#define ldrneb ldrbne
158#endif
159
160.type rem_4bit,%object
161.align 5
162rem_4bit:
163.short 0x0000,0x1C20,0x3840,0x2460
164.short 0x7080,0x6CA0,0x48C0,0x54E0
165.short 0xE100,0xFD20,0xD940,0xC560
166.short 0x9180,0x8DA0,0xA9C0,0xB5E0
167.size rem_4bit,.-rem_4bit
168
169.type rem_4bit_get,%function
170rem_4bit_get:
171#if defined(__thumb2__)
172 adr $rem_4bit,rem_4bit
173#else
174 sub $rem_4bit,pc,#8+32 @ &rem_4bit
175#endif
176 b .Lrem_4bit_got
177 nop
178 nop
179.size rem_4bit_get,.-rem_4bit_get
180
181.global gcm_ghash_4bit
182.type gcm_ghash_4bit,%function
183.align 4
184gcm_ghash_4bit:
185#if defined(__thumb2__)
186 adr r12,rem_4bit
187#else
188 sub r12,pc,#8+48 @ &rem_4bit
189#endif
190 add $len,$inp,$len @ $len to point at the end
191 stmdb sp!,{r3-r11,lr} @ save $len/end too
192
193 ldmia r12,{r4-r11} @ copy rem_4bit ...
194 stmdb sp!,{r4-r11} @ ... to stack
195
196 ldrb $nlo,[$inp,#15]
197 ldrb $nhi,[$Xi,#15]
198.Louter:
199 eor $nlo,$nlo,$nhi
200 and $nhi,$nlo,#0xf0
201 and $nlo,$nlo,#0x0f
202 mov $cnt,#14
203
204 add $Zhh,$Htbl,$nlo,lsl#4
205 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
206 add $Thh,$Htbl,$nhi
207 ldrb $nlo,[$inp,#14]
208
209 and $nhi,$Zll,#0xf @ rem
210 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
211 add $nhi,$nhi,$nhi
212 eor $Zll,$Tll,$Zll,lsr#4
213 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
214 eor $Zll,$Zll,$Zlh,lsl#28
215 ldrb $nhi,[$Xi,#14]
216 eor $Zlh,$Tlh,$Zlh,lsr#4
217 eor $Zlh,$Zlh,$Zhl,lsl#28
218 eor $Zhl,$Thl,$Zhl,lsr#4
219 eor $Zhl,$Zhl,$Zhh,lsl#28
220 eor $Zhh,$Thh,$Zhh,lsr#4
221 eor $nlo,$nlo,$nhi
222 and $nhi,$nlo,#0xf0
223 and $nlo,$nlo,#0x0f
224 eor $Zhh,$Zhh,$Tll,lsl#16
225
226.Linner:
227 add $Thh,$Htbl,$nlo,lsl#4
228 and $nlo,$Zll,#0xf @ rem
229 subs $cnt,$cnt,#1
230 add $nlo,$nlo,$nlo
231 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
232 eor $Zll,$Tll,$Zll,lsr#4
233 eor $Zll,$Zll,$Zlh,lsl#28
234 eor $Zlh,$Tlh,$Zlh,lsr#4
235 eor $Zlh,$Zlh,$Zhl,lsl#28
236 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
237 eor $Zhl,$Thl,$Zhl,lsr#4
238#ifdef __thumb2__
239 it pl
240#endif
241 ldrplb $nlo,[$inp,$cnt]
242 eor $Zhl,$Zhl,$Zhh,lsl#28
243 eor $Zhh,$Thh,$Zhh,lsr#4
244
245 add $Thh,$Htbl,$nhi
246 and $nhi,$Zll,#0xf @ rem
247 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
248 add $nhi,$nhi,$nhi
249 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
250 eor $Zll,$Tll,$Zll,lsr#4
251#ifdef __thumb2__
252 it pl
253#endif
254 ldrplb $Tll,[$Xi,$cnt]
255 eor $Zll,$Zll,$Zlh,lsl#28
256 eor $Zlh,$Tlh,$Zlh,lsr#4
257 ldrh $Tlh,[sp,$nhi]
258 eor $Zlh,$Zlh,$Zhl,lsl#28
259 eor $Zhl,$Thl,$Zhl,lsr#4
260 eor $Zhl,$Zhl,$Zhh,lsl#28
261#ifdef __thumb2__
262 it pl
263#endif
264 eorpl $nlo,$nlo,$Tll
265 eor $Zhh,$Thh,$Zhh,lsr#4
266#ifdef __thumb2__
267 itt pl
268#endif
269 andpl $nhi,$nlo,#0xf0
270 andpl $nlo,$nlo,#0x0f
271 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem]
272 bpl .Linner
273
274 ldr $len,[sp,#32] @ re-load $len/end
275 add $inp,$inp,#16
276 mov $nhi,$Zll
277___
278 &Zsmash("cmp\t$inp,$len","\n".
279 "#ifdef __thumb2__\n".
280 " it ne\n".
281 "#endif\n".
282 " ldrneb $nlo,[$inp,#15]");
283$code.=<<___;
284 bne .Louter
285
286 add sp,sp,#36
287#if __ARM_ARCH__>=5
288 ldmia sp!,{r4-r11,pc}
289#else
290 ldmia sp!,{r4-r11,lr}
291 tst lr,#1
292 moveq pc,lr @ be binary compatible with V4, yet
293 bx lr @ interoperable with Thumb ISA:-)
294#endif
295.size gcm_ghash_4bit,.-gcm_ghash_4bit
296
297.global gcm_gmult_4bit
298.type gcm_gmult_4bit,%function
299gcm_gmult_4bit:
300 stmdb sp!,{r4-r11,lr}
301 ldrb $nlo,[$Xi,#15]
302 b rem_4bit_get
303.Lrem_4bit_got:
304 and $nhi,$nlo,#0xf0
305 and $nlo,$nlo,#0x0f
306 mov $cnt,#14
307
308 add $Zhh,$Htbl,$nlo,lsl#4
309 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
310 ldrb $nlo,[$Xi,#14]
311
312 add $Thh,$Htbl,$nhi
313 and $nhi,$Zll,#0xf @ rem
314 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
315 add $nhi,$nhi,$nhi
316 eor $Zll,$Tll,$Zll,lsr#4
317 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
318 eor $Zll,$Zll,$Zlh,lsl#28
319 eor $Zlh,$Tlh,$Zlh,lsr#4
320 eor $Zlh,$Zlh,$Zhl,lsl#28
321 eor $Zhl,$Thl,$Zhl,lsr#4
322 eor $Zhl,$Zhl,$Zhh,lsl#28
323 eor $Zhh,$Thh,$Zhh,lsr#4
324 and $nhi,$nlo,#0xf0
325 eor $Zhh,$Zhh,$Tll,lsl#16
326 and $nlo,$nlo,#0x0f
327
328.Loop:
329 add $Thh,$Htbl,$nlo,lsl#4
330 and $nlo,$Zll,#0xf @ rem
331 subs $cnt,$cnt,#1
332 add $nlo,$nlo,$nlo
333 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
334 eor $Zll,$Tll,$Zll,lsr#4
335 eor $Zll,$Zll,$Zlh,lsl#28
336 eor $Zlh,$Tlh,$Zlh,lsr#4
337 eor $Zlh,$Zlh,$Zhl,lsl#28
338 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
339 eor $Zhl,$Thl,$Zhl,lsr#4
340#ifdef __thumb2__
341 it pl
342#endif
343 ldrplb $nlo,[$Xi,$cnt]
344 eor $Zhl,$Zhl,$Zhh,lsl#28
345 eor $Zhh,$Thh,$Zhh,lsr#4
346
347 add $Thh,$Htbl,$nhi
348 and $nhi,$Zll,#0xf @ rem
349 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
350 add $nhi,$nhi,$nhi
351 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
352 eor $Zll,$Tll,$Zll,lsr#4
353 eor $Zll,$Zll,$Zlh,lsl#28
354 eor $Zlh,$Tlh,$Zlh,lsr#4
355 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
356 eor $Zlh,$Zlh,$Zhl,lsl#28
357 eor $Zhl,$Thl,$Zhl,lsr#4
358 eor $Zhl,$Zhl,$Zhh,lsl#28
359 eor $Zhh,$Thh,$Zhh,lsr#4
360#ifdef __thumb2__
361 itt pl
362#endif
363 andpl $nhi,$nlo,#0xf0
364 andpl $nlo,$nlo,#0x0f
365 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
366 bpl .Loop
367___
368 &Zsmash();
369$code.=<<___;
370#if __ARM_ARCH__>=5
371 ldmia sp!,{r4-r11,pc}
372#else
373 ldmia sp!,{r4-r11,lr}
374 tst lr,#1
375 moveq pc,lr @ be binary compatible with V4, yet
376 bx lr @ interoperable with Thumb ISA:-)
377#endif
378.size gcm_gmult_4bit,.-gcm_gmult_4bit
379___
380{
381my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
382my ($t0,$t1,$t2,$t3)=map("q$_",(8..12));
383my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31));
384
385sub clmul64x64 {
386my ($r,$a,$b)=@_;
387$code.=<<___;
388 vext.8 $t0#lo, $a, $a, #1 @ A1
389 vmull.p8 $t0, $t0#lo, $b @ F = A1*B
390 vext.8 $r#lo, $b, $b, #1 @ B1
391 vmull.p8 $r, $a, $r#lo @ E = A*B1
392 vext.8 $t1#lo, $a, $a, #2 @ A2
393 vmull.p8 $t1, $t1#lo, $b @ H = A2*B
394 vext.8 $t3#lo, $b, $b, #2 @ B2
395 vmull.p8 $t3, $a, $t3#lo @ G = A*B2
396 vext.8 $t2#lo, $a, $a, #3 @ A3
397 veor $t0, $t0, $r @ L = E + F
398 vmull.p8 $t2, $t2#lo, $b @ J = A3*B
399 vext.8 $r#lo, $b, $b, #3 @ B3
400 veor $t1, $t1, $t3 @ M = G + H
401 vmull.p8 $r, $a, $r#lo @ I = A*B3
402 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
403 vand $t0#hi, $t0#hi, $k48
404 vext.8 $t3#lo, $b, $b, #4 @ B4
405 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
406 vand $t1#hi, $t1#hi, $k32
407 vmull.p8 $t3, $a, $t3#lo @ K = A*B4
408 veor $t2, $t2, $r @ N = I + J
409 veor $t0#lo, $t0#lo, $t0#hi
410 veor $t1#lo, $t1#lo, $t1#hi
411 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
412 vand $t2#hi, $t2#hi, $k16
413 vext.8 $t0, $t0, $t0, #15
414 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
415 vmov.i64 $t3#hi, #0
416 vext.8 $t1, $t1, $t1, #14
417 veor $t2#lo, $t2#lo, $t2#hi
418 vmull.p8 $r, $a, $b @ D = A*B
419 vext.8 $t3, $t3, $t3, #12
420 vext.8 $t2, $t2, $t2, #13
421 veor $t0, $t0, $t1
422 veor $t2, $t2, $t3
423 veor $r, $r, $t0
424 veor $r, $r, $t2
425___
426}
427
428$code.=<<___;
429#if __ARM_MAX_ARCH__>=7
430.arch armv7-a
431.fpu neon
432
433.global gcm_init_neon
434.type gcm_init_neon,%function
435.align 4
436gcm_init_neon:
437 vld1.64 $IN#hi,[r1]! @ load H
438 vmov.i8 $t0,#0xe1
439 vld1.64 $IN#lo,[r1]
440 vshl.i64 $t0#hi,#57
441 vshr.u64 $t0#lo,#63 @ t0=0xc2....01
442 vdup.8 $t1,$IN#hi[7]
443 vshr.u64 $Hlo,$IN#lo,#63
444 vshr.s8 $t1,#7 @ broadcast carry bit
445 vshl.i64 $IN,$IN,#1
446 vand $t0,$t0,$t1
447 vorr $IN#hi,$Hlo @ H<<<=1
448 veor $IN,$IN,$t0 @ twisted H
449 vstmia r0,{$IN}
450
451 ret @ bx lr
452.size gcm_init_neon,.-gcm_init_neon
453
454.global gcm_gmult_neon
455.type gcm_gmult_neon,%function
456.align 4
457gcm_gmult_neon:
458 vld1.64 $IN#hi,[$Xi]! @ load Xi
459 vld1.64 $IN#lo,[$Xi]!
460 vmov.i64 $k48,#0x0000ffffffffffff
461 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
462 vmov.i64 $k32,#0x00000000ffffffff
463#ifdef __ARMEL__
464 vrev64.8 $IN,$IN
465#endif
466 vmov.i64 $k16,#0x000000000000ffff
467 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
468 mov $len,#16
469 b .Lgmult_neon
470.size gcm_gmult_neon,.-gcm_gmult_neon
471
472.global gcm_ghash_neon
473.type gcm_ghash_neon,%function
474.align 4
475gcm_ghash_neon:
476 vld1.64 $Xl#hi,[$Xi]! @ load Xi
477 vld1.64 $Xl#lo,[$Xi]!
478 vmov.i64 $k48,#0x0000ffffffffffff
479 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
480 vmov.i64 $k32,#0x00000000ffffffff
481#ifdef __ARMEL__
482 vrev64.8 $Xl,$Xl
483#endif
484 vmov.i64 $k16,#0x000000000000ffff
485 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
486
487.Loop_neon:
488 vld1.64 $IN#hi,[$inp]! @ load inp
489 vld1.64 $IN#lo,[$inp]!
490#ifdef __ARMEL__
491 vrev64.8 $IN,$IN
492#endif
493 veor $IN,$Xl @ inp^=Xi
494.Lgmult_neon:
495___
496 &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
497$code.=<<___;
498 veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
499___
500 &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
501 &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
502$code.=<<___;
503 veor $Xm,$Xm,$Xl @ Karatsuba post-processing
504 veor $Xm,$Xm,$Xh
505 veor $Xl#hi,$Xl#hi,$Xm#lo
506 veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result
507
508 @ equivalent of reduction_avx from ghash-x86_64.pl
509 vshl.i64 $t1,$Xl,#57 @ 1st phase
510 vshl.i64 $t2,$Xl,#62
511 veor $t2,$t2,$t1 @
512 vshl.i64 $t1,$Xl,#63
513 veor $t2, $t2, $t1 @
514 veor $Xl#hi,$Xl#hi,$t2#lo @
515 veor $Xh#lo,$Xh#lo,$t2#hi
516
517 vshr.u64 $t2,$Xl,#1 @ 2nd phase
518 veor $Xh,$Xh,$Xl
519 veor $Xl,$Xl,$t2 @
520 vshr.u64 $t2,$t2,#6
521 vshr.u64 $Xl,$Xl,#1 @
522 veor $Xl,$Xl,$Xh @
523 veor $Xl,$Xl,$t2 @
524
525 subs $len,#16
526 bne .Loop_neon
527
528#ifdef __ARMEL__
529 vrev64.8 $Xl,$Xl
530#endif
531 sub $Xi,#16
532 vst1.64 $Xl#hi,[$Xi]! @ write out Xi
533 vst1.64 $Xl#lo,[$Xi]
534
535 ret @ bx lr
536.size gcm_ghash_neon,.-gcm_ghash_neon
537#endif
538___
539}
540$code.=<<___;
541.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
542.align 2
543___
544
545foreach (split("\n",$code)) {
546 s/\`([^\`]*)\`/eval $1/geo;
547
548 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
549 s/\bret\b/bx lr/go or
550 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
551
552 print $_,"\n";
553}
554close STDOUT; # enforce flush
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette