...

Text file src/pkg/crypto/md5/md5block_amd64p32.s

     1	// Original source:
     2	//	http://www.zorinaq.com/papers/md5-amd64.html
     3	//	http://www.zorinaq.com/papers/md5-amd64.tar.bz2
     4	//
     5	// Translated from Perl generating GNU assembly into
     6	// #defines generating 6a assembly by the Go Authors.
     7	//
     8	// Restrictions to make code safe for Native Client:
     9	// replace BP with R11, reloaded before use at return.
    10	// replace R15 with R11.
    11	
    12	#include "textflag.h"
    13	
    14	// MD5 optimized for AMD64.
    15	//
    16	// Author: Marc Bevand <bevand_m (at) epita.fr>
    17	// Licence: I hereby disclaim the copyright on this code and place it
    18	// in the public domain.
    19	
    20	TEXT	·block(SB),NOSPLIT,$0-16
    21		MOVL	dig+0(FP),	R11
    22		MOVL	p+4(FP),	SI
    23		MOVL	p_len+8(FP), DX
    24		SHRQ	$6,		DX
    25		SHLQ	$6,		DX
    26	
    27		LEAQ	(SI)(DX*1),	DI
    28		MOVL	(0*4)(R11),	AX
    29		MOVL	(1*4)(R11),	BX
    30		MOVL	(2*4)(R11),	CX
    31		MOVL	(3*4)(R11),	DX
    32	
    33		CMPQ	SI,		DI
    34		JEQ	end
    35	
    36	loop:
    37		MOVL	AX,		R12
    38		MOVL	BX,		R13
    39		MOVL	CX,		R14
    40		MOVL	DX,		R11
    41	
    42		MOVL	(0*4)(SI),	R8
    43		MOVL	DX,		R9
    44	
    45	#define ROUND1(a, b, c, d, index, const, shift) \
    46		XORL	c, R9; \
    47		LEAL	const(a)(R8*1), a; \
    48		ANDL	b, R9; \
    49		XORL d, R9; \
    50		MOVL (index*4)(SI), R8; \
    51		ADDL R9, a; \
    52		ROLL $shift, a; \
    53		MOVL c, R9; \
    54		ADDL b, a
    55	
    56		ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7);
    57		ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12);
    58		ROUND1(CX,DX,AX,BX, 3,0x242070db,17);
    59		ROUND1(BX,CX,DX,AX, 4,0xc1bdceee,22);
    60		ROUND1(AX,BX,CX,DX, 5,0xf57c0faf, 7);
    61		ROUND1(DX,AX,BX,CX, 6,0x4787c62a,12);
    62		ROUND1(CX,DX,AX,BX, 7,0xa8304613,17);
    63		ROUND1(BX,CX,DX,AX, 8,0xfd469501,22);
    64		ROUND1(AX,BX,CX,DX, 9,0x698098d8, 7);
    65		ROUND1(DX,AX,BX,CX,10,0x8b44f7af,12);
    66		ROUND1(CX,DX,AX,BX,11,0xffff5bb1,17);
    67		ROUND1(BX,CX,DX,AX,12,0x895cd7be,22);
    68		ROUND1(AX,BX,CX,DX,13,0x6b901122, 7);
    69		ROUND1(DX,AX,BX,CX,14,0xfd987193,12);
    70		ROUND1(CX,DX,AX,BX,15,0xa679438e,17);
    71		ROUND1(BX,CX,DX,AX, 0,0x49b40821,22);
    72	
    73		MOVL	(1*4)(SI),	R8
    74		MOVL	DX,		R9
    75		MOVL	DX,		R10
    76	
    77	#define ROUND2(a, b, c, d, index, const, shift) \
    78		NOTL	R9; \
    79		LEAL	const(a)(R8*1),a; \
    80		ANDL	b,		R10; \
    81		ANDL	c,		R9; \
    82		MOVL	(index*4)(SI),R8; \
    83		ORL	R9,		R10; \
    84		MOVL	c,		R9; \
    85		ADDL	R10,		a; \
    86		MOVL	c,		R10; \
    87		ROLL	$shift,	a; \
    88		ADDL	b,		a
    89	
    90		ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5);
    91		ROUND2(DX,AX,BX,CX,11,0xc040b340, 9);
    92		ROUND2(CX,DX,AX,BX, 0,0x265e5a51,14);
    93		ROUND2(BX,CX,DX,AX, 5,0xe9b6c7aa,20);
    94		ROUND2(AX,BX,CX,DX,10,0xd62f105d, 5);
    95		ROUND2(DX,AX,BX,CX,15, 0x2441453, 9);
    96		ROUND2(CX,DX,AX,BX, 4,0xd8a1e681,14);
    97		ROUND2(BX,CX,DX,AX, 9,0xe7d3fbc8,20);
    98		ROUND2(AX,BX,CX,DX,14,0x21e1cde6, 5);
    99		ROUND2(DX,AX,BX,CX, 3,0xc33707d6, 9);
   100		ROUND2(CX,DX,AX,BX, 8,0xf4d50d87,14);
   101		ROUND2(BX,CX,DX,AX,13,0x455a14ed,20);
   102		ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5);
   103		ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9);
   104		ROUND2(CX,DX,AX,BX,12,0x676f02d9,14);
   105		ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20);
   106	
   107		MOVL	(5*4)(SI),	R8
   108		MOVL	CX,		R9
   109	
   110	#define ROUND3(a, b, c, d, index, const, shift) \
   111		LEAL	const(a)(R8*1),a; \
   112		MOVL	(index*4)(SI),R8; \
   113		XORL	d,		R9; \
   114		XORL	b,		R9; \
   115		ADDL	R9,		a; \
   116		ROLL	$shift,		a; \
   117		MOVL	b,		R9; \
   118		ADDL	b,		a
   119	
   120		ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4);
   121		ROUND3(DX,AX,BX,CX,11,0x8771f681,11);
   122		ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16);
   123		ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23);
   124		ROUND3(AX,BX,CX,DX, 4,0xa4beea44, 4);
   125		ROUND3(DX,AX,BX,CX, 7,0x4bdecfa9,11);
   126		ROUND3(CX,DX,AX,BX,10,0xf6bb4b60,16);
   127		ROUND3(BX,CX,DX,AX,13,0xbebfbc70,23);
   128		ROUND3(AX,BX,CX,DX, 0,0x289b7ec6, 4);
   129		ROUND3(DX,AX,BX,CX, 3,0xeaa127fa,11);
   130		ROUND3(CX,DX,AX,BX, 6,0xd4ef3085,16);
   131		ROUND3(BX,CX,DX,AX, 9, 0x4881d05,23);
   132		ROUND3(AX,BX,CX,DX,12,0xd9d4d039, 4);
   133		ROUND3(DX,AX,BX,CX,15,0xe6db99e5,11);
   134		ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16);
   135		ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23);
   136	
   137		MOVL	(0*4)(SI),	R8
   138		MOVL	$0xffffffff,	R9
   139		XORL	DX,		R9
   140	
   141	#define ROUND4(a, b, c, d, index, const, shift) \
   142		LEAL	const(a)(R8*1),a; \
   143		ORL	b,		R9; \
   144		XORL	c,		R9; \
   145		ADDL	R9,		a; \
   146		MOVL	(index*4)(SI),R8; \
   147		MOVL	$0xffffffff,	R9; \
   148		ROLL	$shift,		a; \
   149		XORL	c,		R9; \
   150		ADDL	b,		a
   151	
   152		ROUND4(AX,BX,CX,DX, 7,0xf4292244, 6);
   153		ROUND4(DX,AX,BX,CX,14,0x432aff97,10);
   154		ROUND4(CX,DX,AX,BX, 5,0xab9423a7,15);
   155		ROUND4(BX,CX,DX,AX,12,0xfc93a039,21);
   156		ROUND4(AX,BX,CX,DX, 3,0x655b59c3, 6);
   157		ROUND4(DX,AX,BX,CX,10,0x8f0ccc92,10);
   158		ROUND4(CX,DX,AX,BX, 1,0xffeff47d,15);
   159		ROUND4(BX,CX,DX,AX, 8,0x85845dd1,21);
   160		ROUND4(AX,BX,CX,DX,15,0x6fa87e4f, 6);
   161		ROUND4(DX,AX,BX,CX, 6,0xfe2ce6e0,10);
   162		ROUND4(CX,DX,AX,BX,13,0xa3014314,15);
   163		ROUND4(BX,CX,DX,AX, 4,0x4e0811a1,21);
   164		ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6);
   165		ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10);
   166		ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15);
   167		ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21);
   168	
   169		ADDL	R12,	AX
   170		ADDL	R13,	BX
   171		ADDL	R14,	CX
   172		ADDL	R11,	DX
   173	
   174		ADDQ	$64,		SI
   175		CMPQ	SI,		DI
   176		JB	loop
   177	
   178	end:
   179		MOVL	dig+0(FP),	R11
   180		MOVL	AX,		(0*4)(R11)
   181		MOVL	BX,		(1*4)(R11)
   182		MOVL	CX,		(2*4)(R11)
   183		MOVL	DX,		(3*4)(R11)
   184		RET

View as plain text