#14371: Add opensll to main repository
diff --git a/jni/openssl/crypto/md5/asm/md5-586.pl b/jni/openssl/crypto/md5/asm/md5-586.pl
new file mode 100644
index 0000000..6cb66bb
--- /dev/null
+++ b/jni/openssl/crypto/md5/asm/md5-586.pl
@@ -0,0 +1,307 @@
+#!/usr/local/bin/perl
+
+# Normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X);
+# version, non-normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X,int blocks);
+
+$normal=0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$tmp1="edi";
+$tmp2="ebp";
+$X="esi";
+
+# What we need to load into $tmp for the next round
+%Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D));
+@xo=(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,	# R0
+ 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12,	# R1
+ 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2,	# R2
+ 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,	# R3
+ );
+
+&md5_block("md5_block_asm_data_order");
+&asm_finish();
+
+sub Np
+	{
+	local($p)=@_;
+	local(%n)=($A,$D,$B,$A,$C,$B,$D,$C);
+	return($n{$p});
+	}
+
+sub R0
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&mov($tmp1,$C)  if $pos < 0;
+	&mov($tmp2,&DWP($xo[$ki]*4,$K,"",0)) if $pos < 0; # very first one 
+
+	# body proper
+
+	&comment("R0 $ki");
+	&xor($tmp1,$d); # F function - part 2
+
+	&and($tmp1,$b); # F function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$d); # F function - part 4
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# next tmp1 for R0
+	&mov($tmp1,&Np($c)) if $pos == 1;	# next tmp1 for R1
+
+	&rotl($a,$s);
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+	&add($a,$b);
+	}
+
+sub R1
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&comment("R1 $ki");
+
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$b); # G function - part 2
+	&and($tmp1,$d); # G function - part 3
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+	&xor($tmp1,$c);			# G function - part 4
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# G function - part 1
+	&mov($tmp1,&Np($c)) if $pos == 1;	# G function - part 1
+
+	&rotl($a,$s);
+
+	&add($a,$b);
+	}
+
+sub R2
+	{
+	local($n,$pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+	# This one is different, only 3 logical operations
+
+if (($n & 1) == 0)
+	{
+	&comment("R2 $ki");
+	# make sure to do 'D' first, not 'B', else we clash with
+	# the last add from the previous round.
+
+	&xor($tmp1,$d); # H function - part 2
+
+	&xor($tmp1,$b); # H function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&add($a,$tmp1);
+
+	&rotl($a,$s);
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0));
+	&mov($tmp1,&Np($c));
+	}
+else
+	{
+	&comment("R2 $ki");
+	# make sure to do 'D' first, not 'B', else we clash with
+	# the last add from the previous round.
+
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&add($b,$c);			# MOVED FORWARD
+	&xor($tmp1,$d); # H function - part 2
+
+	&xor($tmp1,$b); # H function - part 3
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# H function - part 1
+	&mov($tmp1,-1) if $pos == 1;		# I function - part 1
+
+	&rotl($a,$s);
+
+	&add($a,$b);
+	}
+	}
+
+sub R3
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&comment("R3 $ki");
+
+	# &not($tmp1)
+	&xor($tmp1,$d) if $pos < 0; 	# I function - part 2
+
+	&or($tmp1,$b);				# I function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$c); 			# I function - part 4
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0))	if $pos != 2; # load X/k value
+	&mov($tmp2,&wparam(0)) if $pos == 2;
+
+	&add($a,$tmp1);
+	&mov($tmp1,-1) if $pos < 1;	# H function - part 1
+	&add($K,64) if $pos >=1 && !$normal;
+
+	&rotl($a,$s);
+
+	&xor($tmp1,&Np($d)) if $pos <= 0; 	# I function - part = first time
+	&mov($tmp1,&DWP( 0,$tmp2,"",0)) if $pos > 0;
+	&add($a,$b);
+	}
+
+
+sub md5_block
+	{
+	local($name)=@_;
+
+	&function_begin_B($name,"",3);
+
+	# parameter 1 is the MD5_CTX structure.
+	# A	0
+	# B	4
+	# C	8
+	# D 	12
+
+	&push("esi");
+	 &push("edi");
+	&mov($tmp1,	&wparam(0)); # edi
+	 &mov($X,	&wparam(1)); # esi
+	&mov($C,	&wparam(2));
+	 &push("ebp");
+	&shl($C,	6);
+	&push("ebx");
+	 &add($C,	$X); # offset we end at
+	&sub($C,	64);
+	 &mov($A,	&DWP( 0,$tmp1,"",0));
+	&push($C);	# Put on the TOS
+	 &mov($B,	&DWP( 4,$tmp1,"",0));
+	&mov($C,	&DWP( 8,$tmp1,"",0));
+	 &mov($D,	&DWP(12,$tmp1,"",0));
+
+	&set_label("start") unless $normal;
+	&comment("");
+	&comment("R0 section");
+
+	&R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478);
+	&R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756);
+	&R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db);
+	&R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee);
+	&R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf);
+	&R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a);
+	&R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613);
+	&R0( 0,$B,$C,$D,$A,$X, 7,22,0xfd469501);
+	&R0( 0,$A,$B,$C,$D,$X, 8, 7,0x698098d8);
+	&R0( 0,$D,$A,$B,$C,$X, 9,12,0x8b44f7af);
+	&R0( 0,$C,$D,$A,$B,$X,10,17,0xffff5bb1);
+	&R0( 0,$B,$C,$D,$A,$X,11,22,0x895cd7be);
+	&R0( 0,$A,$B,$C,$D,$X,12, 7,0x6b901122);
+	&R0( 0,$D,$A,$B,$C,$X,13,12,0xfd987193);
+	&R0( 0,$C,$D,$A,$B,$X,14,17,0xa679438e);
+	&R0( 1,$B,$C,$D,$A,$X,15,22,0x49b40821);
+
+	&comment("");
+	&comment("R1 section");
+	&R1(-1,$A,$B,$C,$D,$X,16, 5,0xf61e2562);
+	&R1( 0,$D,$A,$B,$C,$X,17, 9,0xc040b340);
+	&R1( 0,$C,$D,$A,$B,$X,18,14,0x265e5a51);
+	&R1( 0,$B,$C,$D,$A,$X,19,20,0xe9b6c7aa);
+	&R1( 0,$A,$B,$C,$D,$X,20, 5,0xd62f105d);
+	&R1( 0,$D,$A,$B,$C,$X,21, 9,0x02441453);
+	&R1( 0,$C,$D,$A,$B,$X,22,14,0xd8a1e681);
+	&R1( 0,$B,$C,$D,$A,$X,23,20,0xe7d3fbc8);
+	&R1( 0,$A,$B,$C,$D,$X,24, 5,0x21e1cde6);
+	&R1( 0,$D,$A,$B,$C,$X,25, 9,0xc33707d6);
+	&R1( 0,$C,$D,$A,$B,$X,26,14,0xf4d50d87);
+	&R1( 0,$B,$C,$D,$A,$X,27,20,0x455a14ed);
+	&R1( 0,$A,$B,$C,$D,$X,28, 5,0xa9e3e905);
+	&R1( 0,$D,$A,$B,$C,$X,29, 9,0xfcefa3f8);
+	&R1( 0,$C,$D,$A,$B,$X,30,14,0x676f02d9);
+	&R1( 1,$B,$C,$D,$A,$X,31,20,0x8d2a4c8a);
+
+	&comment("");
+	&comment("R2 section");
+	&R2( 0,-1,$A,$B,$C,$D,$X,32, 4,0xfffa3942);
+	&R2( 1, 0,$D,$A,$B,$C,$X,33,11,0x8771f681);
+	&R2( 2, 0,$C,$D,$A,$B,$X,34,16,0x6d9d6122);
+	&R2( 3, 0,$B,$C,$D,$A,$X,35,23,0xfde5380c);
+	&R2( 4, 0,$A,$B,$C,$D,$X,36, 4,0xa4beea44);
+	&R2( 5, 0,$D,$A,$B,$C,$X,37,11,0x4bdecfa9);
+	&R2( 6, 0,$C,$D,$A,$B,$X,38,16,0xf6bb4b60);
+	&R2( 7, 0,$B,$C,$D,$A,$X,39,23,0xbebfbc70);
+	&R2( 8, 0,$A,$B,$C,$D,$X,40, 4,0x289b7ec6);
+	&R2( 9, 0,$D,$A,$B,$C,$X,41,11,0xeaa127fa);
+	&R2(10, 0,$C,$D,$A,$B,$X,42,16,0xd4ef3085);
+	&R2(11, 0,$B,$C,$D,$A,$X,43,23,0x04881d05);
+	&R2(12, 0,$A,$B,$C,$D,$X,44, 4,0xd9d4d039);
+	&R2(13, 0,$D,$A,$B,$C,$X,45,11,0xe6db99e5);
+	&R2(14, 0,$C,$D,$A,$B,$X,46,16,0x1fa27cf8);
+	&R2(15, 1,$B,$C,$D,$A,$X,47,23,0xc4ac5665);
+
+	&comment("");
+	&comment("R3 section");
+	&R3(-1,$A,$B,$C,$D,$X,48, 6,0xf4292244);
+	&R3( 0,$D,$A,$B,$C,$X,49,10,0x432aff97);
+	&R3( 0,$C,$D,$A,$B,$X,50,15,0xab9423a7);
+	&R3( 0,$B,$C,$D,$A,$X,51,21,0xfc93a039);
+	&R3( 0,$A,$B,$C,$D,$X,52, 6,0x655b59c3);
+	&R3( 0,$D,$A,$B,$C,$X,53,10,0x8f0ccc92);
+	&R3( 0,$C,$D,$A,$B,$X,54,15,0xffeff47d);
+	&R3( 0,$B,$C,$D,$A,$X,55,21,0x85845dd1);
+	&R3( 0,$A,$B,$C,$D,$X,56, 6,0x6fa87e4f);
+	&R3( 0,$D,$A,$B,$C,$X,57,10,0xfe2ce6e0);
+	&R3( 0,$C,$D,$A,$B,$X,58,15,0xa3014314);
+	&R3( 0,$B,$C,$D,$A,$X,59,21,0x4e0811a1);
+	&R3( 0,$A,$B,$C,$D,$X,60, 6,0xf7537e82);
+	&R3( 0,$D,$A,$B,$C,$X,61,10,0xbd3af235);
+	&R3( 0,$C,$D,$A,$B,$X,62,15,0x2ad7d2bb);
+	&R3( 2,$B,$C,$D,$A,$X,63,21,0xeb86d391);
+
+	# &mov($tmp2,&wparam(0));	# done in the last R3
+	# &mov($tmp1,	&DWP( 0,$tmp2,"",0)); # done is the last R3
+
+	&add($A,$tmp1);
+	 &mov($tmp1,	&DWP( 4,$tmp2,"",0));
+
+	&add($B,$tmp1);
+	&mov($tmp1,	&DWP( 8,$tmp2,"",0));
+
+	&add($C,$tmp1);
+	&mov($tmp1,	&DWP(12,$tmp2,"",0));
+
+	&add($D,$tmp1);
+	&mov(&DWP( 0,$tmp2,"",0),$A);
+
+	&mov(&DWP( 4,$tmp2,"",0),$B);
+	&mov($tmp1,&swtmp(0)) unless $normal;
+
+	&mov(&DWP( 8,$tmp2,"",0),$C);
+	 &mov(&DWP(12,$tmp2,"",0),$D);
+
+	&cmp($tmp1,$X) unless $normal;			# check count
+	 &jae(&label("start")) unless $normal;
+
+	&pop("eax"); # pop the temp variable off the stack
+	 &pop("ebx");
+	&pop("ebp");
+	 &pop("edi");
+	&pop("esi");
+	 &ret();
+	&function_end_B($name);
+	}
+
diff --git a/jni/openssl/crypto/md5/asm/md5-ia64.S b/jni/openssl/crypto/md5/asm/md5-ia64.S
new file mode 100644
index 0000000..e7de08d
--- /dev/null
+++ b/jni/openssl/crypto/md5/asm/md5-ia64.S
@@ -0,0 +1,992 @@
+/* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
+
+//	Common registers are assigned as follows:
+//
+//	COMMON
+//
+//	t0		Const Tbl Ptr	TPtr
+//	t1		Round Constant	TRound
+//	t4		Block residual	LenResid
+//	t5		Residual Data	DTmp
+//
+//	{in,out}0	Block 0 Cycle	RotateM0
+//	{in,out}1	Block Value 12	M12
+//	{in,out}2	Block Value 8	M8
+//	{in,out}3	Block Value 4	M4
+//	{in,out}4	Block Value 0	M0
+//	{in,out}5	Block 1 Cycle	RotateM1
+//	{in,out}6	Block Value 13	M13
+//	{in,out}7	Block Value 9	M9
+//	{in,out}8	Block Value 5	M5
+//	{in,out}9	Block Value 1	M1
+//	{in,out}10	Block 2 Cycle	RotateM2
+//	{in,out}11	Block Value 14	M14
+//	{in,out}12	Block Value 10	M10
+//	{in,out}13	Block Value 6	M6
+//	{in,out}14	Block Value 2	M2
+//	{in,out}15	Block 3 Cycle	RotateM3
+//	{in,out}16	Block Value 15	M15
+//	{in,out}17	Block Value 11	M11
+//	{in,out}18	Block Value 7	M7
+//	{in,out}19	Block Value 3	M3
+//	{in,out}20	Scratch			Z
+//	{in,out}21	Scratch			Y
+//	{in,out}22	Scratch			X
+//	{in,out}23	Scratch			W
+//	{in,out}24	Digest A		A
+//	{in,out}25	Digest B		B
+//	{in,out}26	Digest C		C
+//	{in,out}27	Digest D		D
+//	{in,out}28	Active Data Ptr	DPtr
+//	in28		Dummy Value		-
+//	out28		Dummy Value		-
+//	bt0			Coroutine Link	QUICK_RTN
+//
+///	These predicates are used for computing the padding block(s) and
+///	are shared between the driver and digest co-routines
+//
+//	pt0			Extra Pad Block	pExtra
+//	pt1			Load next word	pLoad
+//	pt2			Skip next word	pSkip
+//	pt3			Search for Pad	pNoPad
+//	pt4			Pad Word 0		pPad0
+//	pt5			Pad Word 1		pPad1
+//	pt6			Pad Word 2		pPad2
+//	pt7			Pad Word 3		pPad3
+
+#define	DTmp		r19
+#define	LenResid	r18
+#define	QUICK_RTN	b6
+#define	TPtr		r14
+#define	TRound		r15
+#define	pExtra		p6
+#define	pLoad		p7
+#define	pNoPad		p9
+#define	pPad0		p10
+#define	pPad1		p11
+#define	pPad2		p12
+#define	pPad3		p13
+#define	pSkip		p8
+
+#define	A_		out24
+#define	B_		out25
+#define	C_		out26
+#define	D_		out27
+#define	DPtr_		out28
+#define	M0_		out4
+#define	M1_		out9
+#define	M10_		out12
+#define	M11_		out17
+#define	M12_		out1
+#define	M13_		out6
+#define	M14_		out11
+#define	M15_		out16
+#define	M2_		out14
+#define	M3_		out19
+#define	M4_		out3
+#define	M5_		out8
+#define	M6_		out13
+#define	M7_		out18
+#define	M8_		out2
+#define	M9_		out7
+#define	RotateM0_	out0
+#define	RotateM1_	out5
+#define	RotateM2_	out10
+#define	RotateM3_	out15
+#define	W_		out23
+#define	X_		out22
+#define	Y_		out21
+#define	Z_		out20
+
+#define	A		in24
+#define	B		in25
+#define	C		in26
+#define	D		in27
+#define	DPtr		in28
+#define	M0		in4
+#define	M1		in9
+#define	M10		in12
+#define	M11		in17
+#define	M12		in1
+#define	M13		in6
+#define	M14		in11
+#define	M15		in16
+#define	M2		in14
+#define	M3		in19
+#define	M4		in3
+#define	M5		in8
+#define	M6		in13
+#define	M7		in18
+#define	M8		in2
+#define	M9		in7
+#define	RotateM0	in0
+#define	RotateM1	in5
+#define	RotateM2	in10
+#define	RotateM3	in15
+#define	W		in23
+#define	X		in22
+#define	Y		in21
+#define	Z		in20
+
+/* register stack configuration for md5_block_asm_data_order(): */
+#define	MD5_NINP	3
+#define	MD5_NLOC	0
+#define MD5_NOUT	29
+#define MD5_NROT	0
+
+/* register stack configuration for helpers: */
+#define	_NINPUTS	MD5_NOUT
+#define	_NLOCALS	0
+#define _NOUTPUT	0
+#define	_NROTATE	24	/* this must be <= _NINPUTS */
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+#define	ADDP	addp4
+#else
+#define	ADDP	add
+#endif
+
+#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
+#define HOST_IS_BIG_ENDIAN
+#endif
+
+//	Macros for getting the left and right portions of little-endian words
+
+#define	GETLW(dst, src, align)	dep.z dst = src, 32 - 8 * align, 8 * align
+#define	GETRW(dst, src, align)	extr.u dst = src, 8 * align, 32 - 8 * align
+
+//	MD5 driver
+//
+//		Reads an input block, then calls the digest block
+//		subroutine and adds the results to the accumulated
+//		digest.  It allocates 32 outs which the subroutine
+//		uses as it's inputs and rotating
+//		registers. Initializes the round constant pointer and
+//		takes care of saving/restoring ar.lc
+//
+///	INPUT
+//
+//	in0		Context Ptr		CtxPtr0
+//	in1		Input Data Ptr		DPtrIn
+//	in2		Integral Blocks		BlockCount
+//	rp		Return Address		-
+//
+///	CODE
+//
+//	v2		Input Align		InAlign
+//	t0		Shared w/digest		-
+//	t1		Shared w/digest		-
+//	t2		Shared w/digest		-
+//	t3		Shared w/digest		-
+//	t4		Shared w/digest		-
+//	t5		Shared w/digest		-
+//	t6		PFS Save		PFSSave
+//	t7		ar.lc Save		LCSave
+//	t8		Saved PR		PRSave
+//	t9		2nd CtxPtr		CtxPtr1
+//	t10		Table Base		CTable
+//	t11		Table[0]		CTable0
+//	t13		Accumulator A		AccumA
+//	t14		Accumulator B		AccumB
+//	t15		Accumulator C		AccumC
+//	t16		Accumulator D		AccumD
+//	pt0		Shared w/digest		-
+//	pt1		Shared w/digest		-
+//	pt2		Shared w/digest		-
+//	pt3		Shared w/digest		-
+//	pt4		Shared w/digest		-
+//	pt5		Shared w/digest		-
+//	pt6		Shared w/digest		-
+//	pt7		Shared w/digest		-
+//	pt8		Not Aligned		pOff
+//	pt8		Blocks Left		pAgain
+
+#define	AccumA		r27
+#define	AccumB		r28
+#define	AccumC		r29
+#define	AccumD		r30
+#define	CTable		r24
+#define	CTable0		r25
+#define	CtxPtr0		in0
+#define	CtxPtr1		r23
+#define	DPtrIn		in1
+#define	BlockCount	in2
+#define	InAlign		r10
+#define	LCSave		r21
+#define	PFSSave		r20
+#define	PRSave		r22
+#define	pAgain		p63
+#define	pOff		p63
+
+	.text
+
+/* md5_block_asm_data_order(MD5_CTX *c, const void *data, size_t num)
+
+     where:
+      c: a pointer to a structure of this type:
+
+	   typedef struct MD5state_st
+	     {
+	       MD5_LONG A,B,C,D;
+	       MD5_LONG Nl,Nh;
+	       MD5_LONG data[MD5_LBLOCK];
+	       unsigned int num;
+	     }
+	   MD5_CTX;
+
+      data: a pointer to the input data (may be misaligned)
+      num:  the number of 16-byte blocks to hash (i.e., the length
+            of DATA is 16*NUM.
+
+   */
+
+	.type	md5_block_asm_data_order, @function
+	.global	md5_block_asm_data_order
+	.align	32
+	.proc	md5_block_asm_data_order
+md5_block_asm_data_order:
+.md5_block:
+	.prologue
+{	.mmi
+	.save	ar.pfs, PFSSave
+	alloc	PFSSave = ar.pfs, MD5_NINP, MD5_NLOC, MD5_NOUT, MD5_NROT
+	ADDP	CtxPtr1 = 8, CtxPtr0
+	mov	CTable = ip
+}
+{	.mmi
+	ADDP	DPtrIn = 0, DPtrIn
+	ADDP	CtxPtr0 = 0, CtxPtr0
+	.save	ar.lc, LCSave
+	mov	LCSave = ar.lc
+}
+;;
+{	.mmi
+	add	CTable = .md5_tbl_data_order#-.md5_block#, CTable
+	and	InAlign = 0x3, DPtrIn
+}
+
+{	.mmi
+	ld4	AccumA = [CtxPtr0], 4
+	ld4	AccumC = [CtxPtr1], 4
+	.save pr, PRSave
+	mov	PRSave = pr
+	.body
+}
+;;
+{	.mmi
+	ld4	AccumB = [CtxPtr0]
+	ld4	AccumD = [CtxPtr1]
+	dep	DPtr_ = 0, DPtrIn, 0, 2
+} ;;
+#ifdef HOST_IS_BIG_ENDIAN
+	rum	psr.be;;	// switch to little-endian
+#endif
+{	.mmb
+	ld4	CTable0 = [CTable], 4
+	cmp.ne	pOff, p0 = 0, InAlign
+(pOff)	br.cond.spnt.many .md5_unaligned
+} ;;
+
+//	The FF load/compute loop rotates values three times, so that
+//	loading into M12 here produces the M0 value, M13 -> M1, etc.
+
+.md5_block_loop0:
+{	.mmi
+	ld4	M12_ = [DPtr_], 4
+	mov	TPtr = CTable
+	mov	TRound = CTable0
+} ;;
+{	.mmi
+	ld4	M13_ = [DPtr_], 4
+	mov	A_ = AccumA
+	mov	B_ = AccumB
+} ;;
+{	.mmi
+	ld4	M14_ = [DPtr_], 4
+	mov	C_ = AccumC
+	mov	D_ = AccumD
+} ;;
+{	.mmb
+	ld4	M15_ = [DPtr_], 4
+	add	BlockCount = -1, BlockCount
+	br.call.sptk.many QUICK_RTN = md5_digest_block0
+} ;;
+
+//	Now, we add the new digest values and do some clean-up
+//	before checking if there's another full block to process
+
+{	.mmi
+	add	AccumA = AccumA, A_
+	add	AccumB = AccumB, B_
+	cmp.ne	pAgain, p0 = 0, BlockCount
+}
+{	.mib
+	add	AccumC = AccumC, C_
+	add	AccumD = AccumD, D_
+(pAgain) br.cond.dptk.many .md5_block_loop0
+} ;;
+
+.md5_exit:
+#ifdef HOST_IS_BIG_ENDIAN
+	sum	psr.be;;	// switch back to big-endian mode
+#endif
+{	.mmi
+	st4	[CtxPtr0] = AccumB, -4
+	st4	[CtxPtr1] = AccumD, -4
+	mov	pr = PRSave, 0x1ffff ;;
+}
+{	.mmi
+	st4	[CtxPtr0] = AccumA
+	st4	[CtxPtr1] = AccumC
+	mov	ar.lc = LCSave
+} ;;
+{	.mib
+	mov	ar.pfs = PFSSave
+	br.ret.sptk.few	rp
+} ;;
+
+#define	MD5UNALIGNED(offset)						\
+.md5_process##offset:							\
+{	.mib ;								\
+	nop	0x0	;						\
+	GETRW(DTmp, DTmp, offset) ;					\
+} ;;									\
+.md5_block_loop##offset:						\
+{	.mmi ;								\
+	ld4	Y_ = [DPtr_], 4 ;					\
+	mov	TPtr = CTable ;						\
+	mov	TRound = CTable0 ;					\
+} ;;									\
+{	.mmi ;								\
+	ld4	M13_ = [DPtr_], 4 ;					\
+	mov	A_ = AccumA ;						\
+	mov	B_ = AccumB ;						\
+} ;;									\
+{	.mii ;								\
+	ld4	M14_ = [DPtr_], 4 ;					\
+	GETLW(W_, Y_, offset) ;						\
+	mov	C_ = AccumC ;						\
+}									\
+{	.mmi ;								\
+	mov	D_ = AccumD ;;						\
+	or	M12_ = W_, DTmp ;					\
+	GETRW(DTmp, Y_, offset) ;					\
+}									\
+{	.mib ;								\
+	ld4	M15_ = [DPtr_], 4 ;					\
+	add	BlockCount = -1, BlockCount ;				\
+	br.call.sptk.many QUICK_RTN = md5_digest_block##offset;		\
+} ;;									\
+{	.mmi ;								\
+	add	AccumA = AccumA, A_ ;					\
+	add	AccumB = AccumB, B_ ;					\
+	cmp.ne	pAgain, p0 = 0, BlockCount ;				\
+}									\
+{	.mib ;								\
+	add	AccumC = AccumC, C_ ;					\
+	add	AccumD = AccumD, D_ ;					\
+(pAgain) br.cond.dptk.many .md5_block_loop##offset ;			\
+} ;;									\
+{	.mib ;								\
+	nop	0x0 ;							\
+	nop	0x0 ;							\
+	br.cond.sptk.many .md5_exit ;					\
+} ;;
+
+	.align	32
+.md5_unaligned:
+//
+//	Because variable shifts are expensive, we special case each of
+//	the four alignements. In practice, this won't hurt too much
+//	since only one working set of code will be loaded.
+//
+{	.mib
+	ld4	DTmp = [DPtr_], 4
+	cmp.eq	pOff, p0 = 1, InAlign
+(pOff)	br.cond.dpnt.many .md5_process1
+} ;;
+{	.mib
+	cmp.eq	pOff, p0 = 2, InAlign
+	nop	0x0
+(pOff)	br.cond.dpnt.many .md5_process2
+} ;;
+	MD5UNALIGNED(3)
+	MD5UNALIGNED(1)
+	MD5UNALIGNED(2)
+
+	.endp md5_block_asm_data_order
+
+
+// MD5 Perform the F function and load
+//
+// Passed the first 4 words (M0 - M3) and initial (A, B, C, D) values,
+// computes the FF() round of functions, then branches to the common
+// digest code to finish up with GG(), HH, and II().
+//
+// INPUT
+//
+// rp Return Address -
+//
+// CODE
+//
+// v0 PFS bit bucket PFS
+// v1 Loop Trip Count LTrip
+// pt0 Load next word pMore
+
+/* For F round: */
+#define LTrip	r9
+#define PFS	r8
+#define pMore	p6
+
+/* For GHI rounds: */
+#define T	r9
+#define U	r10
+#define V	r11
+
+#define COMPUTE(a, b, s, M, R)			\
+{						\
+	.mii ;					\
+	ld4 TRound = [TPtr], 4 ;		\
+	dep.z Y = Z, 32, 32 ;;			\
+	shrp Z = Z, Y, 64 - s ;			\
+} ;;						\
+{						\
+	.mmi ;					\
+	add a = Z, b ;				\
+	mov R = M ;				\
+	nop 0x0 ;				\
+} ;;
+
+#define LOOP(a, b, s, M, R, label)		\
+{	.mii ;					\
+	ld4 TRound = [TPtr], 4 ;		\
+	dep.z Y = Z, 32, 32 ;;			\
+	shrp Z = Z, Y, 64 - s ;			\
+} ;;						\
+{	.mib ;					\
+	add a = Z, b ;				\
+	mov R = M ;				\
+	br.ctop.sptk.many label ;		\
+} ;;
+
+// G(B, C, D) = (B & D) | (C & ~D)
+
+#define G(a, b, c, d, M)			\
+{	.mmi ;					\
+	add Z = M, TRound ;			\
+	and Y = b, d ;				\
+	andcm X = c, d ;			\
+} ;;						\
+{	.mii ;					\
+	add Z = Z, a ;				\
+	or Y = Y, X ;;				\
+	add Z = Z, Y ;				\
+} ;;
+
+// H(B, C, D) = B ^ C ^ D
+
+#define H(a, b, c, d, M)			\
+{	.mmi ;					\
+	add Z = M, TRound ;			\
+	xor Y = b, c ;				\
+	nop 0x0 ;				\
+} ;;						\
+{	.mii ;					\
+	add Z = Z, a ;				\
+	xor Y = Y, d ;;				\
+	add Z = Z, Y ;				\
+} ;;
+
+// I(B, C, D) = C ^ (B | ~D)
+//
+// However, since we have an andcm operator, we use the fact that
+//
+// Y ^ Z == ~Y ^ ~Z
+//
+// to rewrite the expression as
+//
+// I(B, C, D) = ~C ^ (~B & D)
+
+#define I(a, b, c, d, M)			\
+{	.mmi ;					\
+	add Z = M, TRound ;			\
+	andcm Y = d, b ;			\
+	andcm X = -1, c ;			\
+} ;;						\
+{	.mii ;					\
+	add Z = Z, a ;				\
+	xor Y = Y, X ;;				\
+	add Z = Z, Y ;				\
+} ;;
+
+#define GG4(label)				\
+	G(A, B, C, D, M0)			\
+	COMPUTE(A, B, 5, M0, RotateM0)		\
+	G(D, A, B, C, M1)			\
+	COMPUTE(D, A, 9, M1, RotateM1)		\
+	G(C, D, A, B, M2)			\
+	COMPUTE(C, D, 14, M2, RotateM2)		\
+	G(B, C, D, A, M3)			\
+	LOOP(B, C, 20, M3, RotateM3, label)
+
+#define HH4(label)				\
+	H(A, B, C, D, M0)			\
+	COMPUTE(A, B, 4, M0, RotateM0)		\
+	H(D, A, B, C, M1)			\
+	COMPUTE(D, A, 11, M1, RotateM1)		\
+	H(C, D, A, B, M2)			\
+	COMPUTE(C, D, 16, M2, RotateM2)		\
+	H(B, C, D, A, M3)			\
+	LOOP(B, C, 23, M3, RotateM3, label)
+
+#define II4(label)				\
+	I(A, B, C, D, M0)			\
+	COMPUTE(A, B, 6, M0, RotateM0)		\
+	I(D, A, B, C, M1)			\
+	COMPUTE(D, A, 10, M1, RotateM1)		\
+	I(C, D, A, B, M2)			\
+	COMPUTE(C, D, 15, M2, RotateM2)		\
+	I(B, C, D, A, M3)			\
+	LOOP(B, C, 21, M3, RotateM3, label)
+
+#define FFLOAD(a, b, c, d, M, N, s)		\
+{	.mii ;					\
+(pMore) ld4 N = [DPtr], 4 ;			\
+	add Z = M, TRound ;			\
+	and Y = c, b ;				\
+}						\
+{	.mmi ;					\
+	andcm X = d, b ;;			\
+	add Z = Z, a ;				\
+	or Y = Y, X ;				\
+} ;;						\
+{	.mii ;					\
+	ld4 TRound = [TPtr], 4 ;		\
+	add Z = Z, Y ;;				\
+	dep.z Y = Z, 32, 32 ;			\
+} ;;						\
+{	.mii ;					\
+	nop 0x0 ;				\
+	shrp Z = Z, Y, 64 - s ;;		\
+	add a = Z, b ;				\
+} ;;
+
+#define FFLOOP(a, b, c, d, M, N, s, dest)	\
+{	.mii ;					\
+(pMore)	ld4 N = [DPtr], 4 ;			\
+	add Z = M, TRound ;			\
+	and Y = c, b ;				\
+}						\
+{	.mmi ;					\
+	andcm X = d, b ;;			\
+	add Z = Z, a ;				\
+	or Y = Y, X ;				\
+} ;;						\
+{	.mii ;					\
+	ld4 TRound = [TPtr], 4 ;		\
+	add Z = Z, Y ;;				\
+	dep.z Y = Z, 32, 32 ;			\
+} ;;						\
+{	.mii ;					\
+	nop 0x0 ;				\
+	shrp Z = Z, Y, 64 - s ;;		\
+	add a = Z, b ;				\
+}						\
+{	.mib ;					\
+	cmp.ne pMore, p0 = 0, LTrip ;		\
+	add LTrip = -1, LTrip ;			\
+	br.ctop.dptk.many dest ;		\
+} ;;
+
+	.type md5_digest_block0, @function
+	.align 32
+
+	.proc md5_digest_block0
+	.prologue
+md5_digest_block0:
+	.altrp QUICK_RTN
+	.body
+{	.mmi
+	alloc PFS = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
+	mov LTrip = 2
+	mov ar.lc = 3
+} ;;
+{	.mii
+	cmp.eq pMore, p0 = r0, r0
+	mov ar.ec = 0
+	nop 0x0
+} ;;
+
+.md5_FF_round0:
+	FFLOAD(A, B, C, D, M12, RotateM0, 7)
+	FFLOAD(D, A, B, C, M13, RotateM1, 12)
+	FFLOAD(C, D, A, B, M14, RotateM2, 17)
+	FFLOOP(B, C, D, A, M15, RotateM3, 22, .md5_FF_round0)
+	//
+	// !!! Fall through to md5_digest_GHI
+	//
+	.endp md5_digest_block0
+
+	.type md5_digest_GHI, @function
+	.align 32
+
+	.proc md5_digest_GHI
+	.prologue
+	.regstk _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
+md5_digest_GHI:
+	.altrp QUICK_RTN
+	.body
+//
+// The following sequence shuffles the block counstants round for the
+// next round:
+//
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+// 1 6 11 0 5 10 14 4 9 14 3 8 13 2 7 12
+//
+{	.mmi
+	mov Z = M0
+	mov Y = M15
+	mov ar.lc = 3
+}
+{	.mmi
+	mov X = M2
+	mov W = M9
+	mov V = M4
+} ;;
+
+{	.mmi
+	mov M0 = M1
+	mov M15 = M12
+	mov ar.ec = 1
+}
+{	.mmi
+	mov M2 = M11
+	mov M9 = M14
+	mov M4 = M5
+} ;;
+
+{	.mmi
+	mov M1 = M6
+	mov M12 = M13
+	mov U = M3
+}
+{	.mmi
+	mov M11 = M8
+	mov M14 = M7
+	mov M5 = M10
+} ;;
+
+{	.mmi
+	mov M6 = Y
+	mov M13 = X
+	mov M3 = Z
+}
+{	.mmi
+	mov M8 = W
+	mov M7 = V
+	mov M10 = U
+} ;;
+
+.md5_GG_round:
+	GG4(.md5_GG_round)
+
+// The following sequence shuffles the block constants round for the
+// next round:
+//
+// 1 6 11 0 5 10 14 4 9 14 3 8 13 2 7 12
+// 5 8 11 14 1 4 7 10 13 0 3 6 9 12 15 2
+
+{	.mmi
+	mov Z = M0
+	mov Y = M1
+	mov ar.lc = 3
+}
+{	.mmi
+	mov X = M3
+	mov W = M5
+	mov V = M6
+} ;;
+
+{	.mmi
+	mov M0 = M4
+	mov M1 = M11
+	mov ar.ec = 1
+}
+{	.mmi
+	mov M3 = M9
+	mov U = M8
+	mov T = M13
+} ;;
+
+{	.mmi
+	mov M4 = Z
+	mov M11 = Y
+	mov M5 = M7
+}
+{	.mmi
+	mov M6 = M14
+	mov M8 = M12
+	mov M13 = M15
+} ;;
+
+{	.mmi
+	mov M7 = W
+	mov M14 = V
+	nop 0x0
+}
+{	.mmi
+	mov M9 = X
+	mov M12 = U
+	mov M15 = T
+} ;;
+
+.md5_HH_round:
+	HH4(.md5_HH_round)
+
+// The following sequence shuffles the block constants round for the
+// next round:
+//
+// 5 8 11 14 1 4 7 10 13 0 3 6 9 12 15 2
+// 0 7 14 5 12 3 10 1 8 15 6 13 4 11 2 9
+
+{	.mmi
+	mov Z = M0
+	mov Y = M15
+	mov ar.lc = 3
+}
+{	.mmi
+	mov X = M10
+	mov W = M1
+	mov V = M4
+} ;;
+
+{	.mmi
+	mov M0 = M9
+	mov M15 = M12
+	mov ar.ec = 1
+}
+{	.mmi
+	mov M10 = M11
+	mov M1 = M6
+	mov M4 = M13
+} ;;
+
+{	.mmi
+	mov M9 = M14
+	mov M12 = M5
+	mov U = M3
+}
+{	.mmi
+	mov M11 = M8
+	mov M6 = M7
+	mov M13 = M2
+} ;;
+
+{	.mmi
+	mov M14 = Y
+	mov M5 = X
+	mov M3 = Z
+}
+{	.mmi
+	mov M8 = W
+	mov M7 = V
+	mov M2 = U
+} ;;
+
+.md5_II_round:
+	II4(.md5_II_round)
+
+{	.mib
+	nop 0x0
+	nop 0x0
+	br.ret.sptk.many QUICK_RTN
+} ;;
+
+	.endp md5_digest_GHI
+
+#define FFLOADU(a, b, c, d, M, P, N, s, offset)	\
+{	.mii ;					\
+(pMore) ld4 N = [DPtr], 4 ;			\
+	add Z = M, TRound ;			\
+	and Y = c, b ;				\
+}						\
+{	.mmi ;					\
+	andcm X = d, b ;;			\
+	add Z = Z, a ;				\
+	or Y = Y, X ;				\
+} ;;						\
+{	.mii ;					\
+	ld4 TRound = [TPtr], 4 ;		\
+	GETLW(W, P, offset) ;			\
+	add Z = Z, Y ;				\
+} ;;						\
+{	.mii ;					\
+	or W = W, DTmp ;			\
+	dep.z Y = Z, 32, 32 ;;			\
+	shrp Z = Z, Y, 64 - s ;			\
+} ;;						\
+{	.mii ;					\
+	add a = Z, b ;				\
+	GETRW(DTmp, P, offset) ;		\
+	mov P = W ;				\
+} ;;
+
+#define FFLOOPU(a, b, c, d, M, P, N, s, offset)		\
+{	.mii ;						\
+(pMore) ld4 N = [DPtr], 4 ;				\
+	add Z = M, TRound ;				\
+	and Y = c, b ;					\
+}							\
+{	.mmi ;						\
+	andcm X = d, b ;;				\
+	add Z = Z, a ;					\
+	or Y = Y, X ;					\
+} ;;							\
+{	.mii ;						\
+	ld4 TRound = [TPtr], 4 ;			\
+(pMore) GETLW(W, P, offset) 	;			\
+	add Z = Z, Y ;					\
+} ;;							\
+{	.mii ;						\
+(pMore) or W = W, DTmp ;				\
+	dep.z Y = Z, 32, 32 ;;				\
+	shrp Z = Z, Y, 64 - s ;				\
+} ;;							\
+{	.mii ;						\
+	add a = Z, b ;					\
+(pMore) GETRW(DTmp, P, offset) 	;			\
+(pMore) mov P = W ;					\
+}							\
+{	.mib ;						\
+	cmp.ne pMore, p0 = 0, LTrip ;			\
+	add LTrip = -1, LTrip ;				\
+	br.ctop.sptk.many .md5_FF_round##offset ;	\
+} ;;
+
+#define MD5FBLOCK(offset)						\
+	.type md5_digest_block##offset, @function ;			\
+									\
+	.align 32 ;							\
+	.proc md5_digest_block##offset ;				\
+	.prologue ;							\
+	.altrp QUICK_RTN ;						\
+	.body ;								\
+md5_digest_block##offset:						\
+{	.mmi ;								\
+	alloc PFS = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE ;	\
+	mov LTrip = 2 ;							\
+	mov ar.lc = 3 ;							\
+} ;;									\
+{	.mii ;								\
+	cmp.eq pMore, p0 = r0, r0 ;					\
+	mov ar.ec = 0 ;							\
+	nop 0x0 ;							\
+} ;;									\
+									\
+	.pred.rel "mutex", pLoad, pSkip ;				\
+.md5_FF_round##offset:							\
+	FFLOADU(A, B, C, D, M12, M13, RotateM0, 7, offset)		\
+	FFLOADU(D, A, B, C, M13, M14, RotateM1, 12, offset)		\
+	FFLOADU(C, D, A, B, M14, M15, RotateM2, 17, offset)		\
+	FFLOOPU(B, C, D, A, M15, RotateM0, RotateM3, 22, offset)	\
+									\
+{	.mib ;								\
+	nop 0x0 ;							\
+	nop 0x0 ;							\
+	br.cond.sptk.many md5_digest_GHI ;				\
+} ;;									\
+	.endp md5_digest_block##offset
+
+MD5FBLOCK(1)
+MD5FBLOCK(2)
+MD5FBLOCK(3)
+
+	.align 64
+	.type md5_constants, @object
+md5_constants:
+.md5_tbl_data_order:			// To ensure little-endian data
+					// order, code as bytes.
+	data1 0x78, 0xa4, 0x6a, 0xd7	//     0
+	data1 0x56, 0xb7, 0xc7, 0xe8	//     1
+	data1 0xdb, 0x70, 0x20, 0x24	//     2
+	data1 0xee, 0xce, 0xbd, 0xc1	//     3
+	data1 0xaf, 0x0f, 0x7c, 0xf5	//     4
+	data1 0x2a, 0xc6, 0x87, 0x47	//     5
+	data1 0x13, 0x46, 0x30, 0xa8	//     6
+	data1 0x01, 0x95, 0x46, 0xfd	//     7
+	data1 0xd8, 0x98, 0x80, 0x69	//     8
+	data1 0xaf, 0xf7, 0x44, 0x8b	//     9
+	data1 0xb1, 0x5b, 0xff, 0xff	//    10
+	data1 0xbe, 0xd7, 0x5c, 0x89	//    11
+	data1 0x22, 0x11, 0x90, 0x6b	//    12
+	data1 0x93, 0x71, 0x98, 0xfd	//    13
+	data1 0x8e, 0x43, 0x79, 0xa6	//    14
+	data1 0x21, 0x08, 0xb4, 0x49	//    15
+	data1 0x62, 0x25, 0x1e, 0xf6	//    16
+	data1 0x40, 0xb3, 0x40, 0xc0	//    17
+	data1 0x51, 0x5a, 0x5e, 0x26	//    18
+	data1 0xaa, 0xc7, 0xb6, 0xe9	//    19
+	data1 0x5d, 0x10, 0x2f, 0xd6	//    20
+	data1 0x53, 0x14, 0x44, 0x02	//    21
+	data1 0x81, 0xe6, 0xa1, 0xd8	//    22
+	data1 0xc8, 0xfb, 0xd3, 0xe7	//    23
+	data1 0xe6, 0xcd, 0xe1, 0x21	//    24
+	data1 0xd6, 0x07, 0x37, 0xc3	//    25
+	data1 0x87, 0x0d, 0xd5, 0xf4	//    26
+	data1 0xed, 0x14, 0x5a, 0x45	//    27
+	data1 0x05, 0xe9, 0xe3, 0xa9	//    28
+	data1 0xf8, 0xa3, 0xef, 0xfc	//    29
+	data1 0xd9, 0x02, 0x6f, 0x67	//    30
+	data1 0x8a, 0x4c, 0x2a, 0x8d	//    31
+	data1 0x42, 0x39, 0xfa, 0xff	//    32
+	data1 0x81, 0xf6, 0x71, 0x87	//    33
+	data1 0x22, 0x61, 0x9d, 0x6d	//    34
+	data1 0x0c, 0x38, 0xe5, 0xfd	//    35
+	data1 0x44, 0xea, 0xbe, 0xa4	//    36
+	data1 0xa9, 0xcf, 0xde, 0x4b	//    37
+	data1 0x60, 0x4b, 0xbb, 0xf6	//    38
+	data1 0x70, 0xbc, 0xbf, 0xbe	//    39
+	data1 0xc6, 0x7e, 0x9b, 0x28	//    40
+	data1 0xfa, 0x27, 0xa1, 0xea	//    41
+	data1 0x85, 0x30, 0xef, 0xd4	//    42
+	data1 0x05, 0x1d, 0x88, 0x04	//    43
+	data1 0x39, 0xd0, 0xd4, 0xd9	//    44
+	data1 0xe5, 0x99, 0xdb, 0xe6	//    45
+	data1 0xf8, 0x7c, 0xa2, 0x1f	//    46
+	data1 0x65, 0x56, 0xac, 0xc4	//    47
+	data1 0x44, 0x22, 0x29, 0xf4	//    48
+	data1 0x97, 0xff, 0x2a, 0x43	//    49
+	data1 0xa7, 0x23, 0x94, 0xab	//    50
+	data1 0x39, 0xa0, 0x93, 0xfc	//    51
+	data1 0xc3, 0x59, 0x5b, 0x65	//    52
+	data1 0x92, 0xcc, 0x0c, 0x8f	//    53
+	data1 0x7d, 0xf4, 0xef, 0xff	//    54
+	data1 0xd1, 0x5d, 0x84, 0x85	//    55
+	data1 0x4f, 0x7e, 0xa8, 0x6f	//    56
+	data1 0xe0, 0xe6, 0x2c, 0xfe	//    57
+	data1 0x14, 0x43, 0x01, 0xa3	//    58
+	data1 0xa1, 0x11, 0x08, 0x4e	//    59
+	data1 0x82, 0x7e, 0x53, 0xf7	//    60
+	data1 0x35, 0xf2, 0x3a, 0xbd	//    61
+	data1 0xbb, 0xd2, 0xd7, 0x2a	//    62
+	data1 0x91, 0xd3, 0x86, 0xeb	//    63
+.size	md5_constants#,64*4
diff --git a/jni/openssl/crypto/md5/asm/md5-x86_64.pl b/jni/openssl/crypto/md5/asm/md5-x86_64.pl
new file mode 100755
index 0000000..8678854
--- /dev/null
+++ b/jni/openssl/crypto/md5/asm/md5-x86_64.pl
@@ -0,0 +1,369 @@
+#!/usr/bin/perl -w
+#
+# MD5 optimized for AMD64.
+#
+# Author: Marc Bevand <bevand_m (at) epita.fr>
+# Licence: I hereby disclaim the copyright on this code and place it
+# in the public domain.
+#
+
+use strict;
+
+my $code;
+
+# round1_step() does:
+#   dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = z' (copy of z for the next step)
+# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
+sub round1_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	xor	$y,		%r11d		/* y ^ ... */
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	and	$x,		%r11d		/* x & ... */
+	xor	$z,		%r11d		/* z ^ ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	add	%r11d,		$dst		/* dst += ... */
+	rol	\$$s,		$dst		/* dst <<< s */
+	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round2_step() does:
+#   dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = z' (copy of z for the next step)
+#   %r12d = z' (copy of z for the next step)
+# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
+sub round2_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	1*4(%rsi),	%r10d		/* (NEXT STEP) X[1] */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r12d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	not	%r11d				/* not z */
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	and	$x,		%r12d		/* x & z */
+	and	$y,		%r11d		/* y & (not z) */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	or	%r11d,		%r12d		/* (y & (not z)) | (x & z) */
+	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
+	add	%r12d,		$dst		/* dst += ... */
+	mov	$y,		%r12d		/* (NEXT STEP) z' = $y */
+	rol	\$$s,		$dst		/* dst <<< s */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round3_step() does:
+#   dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = y' (copy of y for the next step)
+# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
+sub round3_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	5*4(%rsi),	%r10d		/* (NEXT STEP) X[5] */\n" if ($pos == -1);
+    $code .= " mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	xor	$z,		%r11d		/* z ^ ... */
+	xor	$x,		%r11d		/* x ^ ... */
+	add	%r11d,		$dst		/* dst += ... */
+	rol	\$$s,		$dst		/* dst <<< s */
+	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round4_step() does:
+#   dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = not z' (copy of not z for the next step)
+# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
+sub round4_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
+    $code .= " mov	\$0xffffffff,	%r11d\n" if ($pos == -1);
+    $code .= " xor	%edx,		%r11d		/* (NEXT STEP) not z' = not %edx*/\n"
+    if ($pos == -1);
+    $code .= <<EOF;
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	or	$x,		%r11d		/* x | ... */
+	xor	$y,		%r11d		/* y ^ ... */
+	add	%r11d,		$dst		/* dst += ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	mov	\$0xffffffff,	%r11d
+	rol	\$$s,		$dst		/* dst <<< s */
+	xor	$y,		%r11d		/* (NEXT STEP) not z' = not $y */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+my $flavour = shift;
+my $output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+no warnings qw(uninitialized);
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$code .= <<EOF;
+.text
+.align 16
+
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,\@function,3
+md5_block_asm_data_order:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r14
+	push	%r15
+.Lprologue:
+
+	# rdi = arg #1 (ctx, MD5_CTX pointer)
+	# rsi = arg #2 (ptr, data pointer)
+	# rdx = arg #3 (nbr, number of 16-word blocks to process)
+	mov	%rdi,		%rbp	# rbp = ctx
+	shl	\$6,		%rdx	# rdx = nbr in bytes
+	lea	(%rsi,%rdx),	%rdi	# rdi = end
+	mov	0*4(%rbp),	%eax	# eax = ctx->A
+	mov	1*4(%rbp),	%ebx	# ebx = ctx->B
+	mov	2*4(%rbp),	%ecx	# ecx = ctx->C
+	mov	3*4(%rbp),	%edx	# edx = ctx->D
+	# end is 'rdi'
+	# ptr is 'rsi'
+	# A is 'eax'
+	# B is 'ebx'
+	# C is 'ecx'
+	# D is 'edx'
+
+	cmp	%rdi,		%rsi		# cmp end with ptr
+	je	.Lend				# jmp if ptr == end
+
+	# BEGIN of loop over 16-word blocks
+.Lloop:	# save old values of A, B, C, D
+	mov	%eax,		%r8d
+	mov	%ebx,		%r9d
+	mov	%ecx,		%r14d
+	mov	%edx,		%r15d
+EOF
+round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
+round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
+
+round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
+round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
+
+round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
+round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
+
+round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
+round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
+$code .= <<EOF;
+	# add old values of A, B, C, D
+	add	%r8d,	%eax
+	add	%r9d,	%ebx
+	add	%r14d,	%ecx
+	add	%r15d,	%edx
+
+	# loop control
+	add	\$64,		%rsi		# ptr += 64
+	cmp	%rdi,		%rsi		# cmp end with ptr
+	jb	.Lloop				# jmp if ptr < end
+	# END of loop over 16-word blocks
+
+.Lend:
+	mov	%eax,		0*4(%rbp)	# ctx->A = A
+	mov	%ebx,		1*4(%rbp)	# ctx->B = B
+	mov	%ecx,		2*4(%rbp)	# ctx->C = C
+	mov	%edx,		3*4(%rbp)	# ctx->D = D
+
+	mov	(%rsp),%r15
+	mov	8(%rsp),%r14
+	mov	16(%rsp),%r12
+	mov	24(%rsp),%rbx
+	mov	32(%rsp),%rbp
+	add	\$40,%rsp
+.Lepilogue:
+	ret
+.size md5_block_asm_data_order,.-md5_block_asm_data_order
+EOF
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lin_prologue
+
+	lea	40(%rax),%rax
+
+	mov	-8(%rax),%rbp
+	mov	-16(%rax),%rbx
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r14
+	mov	-40(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_md5_block_asm_data_order
+	.rva	.LSEH_end_md5_block_asm_data_order
+	.rva	.LSEH_info_md5_block_asm_data_order
+
+.section	.xdata
+.align	8
+.LSEH_info_md5_block_asm_data_order:
+	.byte	9,0,0,0
+	.rva	se_handler
+___
+}
+
+print $code;
+
+close STDOUT;