Update OpenSSL to 1.0.1g

Bleeding has stopped.

Refs #45190
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-586.pl b/jni/libopenssl/crypto/rc4/asm/rc4-586.pl
index 38a44a7..5c9ac6a 100644
--- a/jni/libopenssl/crypto/rc4/asm/rc4-586.pl
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-586.pl
@@ -28,6 +28,34 @@
 #
 #					<appro@fy.chalmers.se>
 
+# May 2011
+#
+# Optimize for Core2 and Westmere [and incidentally Opteron]. Current
+# performance in cycles per processed byte (less is better) and
+# improvement relative to previous version of this module is:
+#
+# Pentium	10.2			# original numbers
+# Pentium III	7.8(*)
+# Intel P4	7.5
+#
+# Opteron	6.1/+20%		# new MMX numbers
+# Core2		5.3/+67%(**)
+# Westmere	5.1/+94%(**)
+# Sandy Bridge	5.0/+8%
+# Atom		12.6/+6%
+#
+# (*)	PIII can actually deliver 6.6 cycles per byte with MMX code,
+#	but this specific code performs poorly on Core2. And vice
+#	versa, below MMX/SSE code delivering 5.8/7.1 on Core2 performs
+#	poorly on PIII, at 8.0/14.5:-( As PIII is not a "hot" CPU
+#	[anymore], I chose to discard PIII-specific code path and opt
+#	for original IALU-only code, which is why MMX/SSE code path
+#	is guarded by SSE2 bit (see below), not MMX/SSE.
+# (**)	Performance vs. block size on Core2 and Westmere had a maximum
+#	at ... 64 bytes block size. And it was quite a maximum, 40-60%
+#	in comparison to largest 8KB block size. Above improvement
+#	coefficients are for the largest block size.
+
 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
 push(@INC,"${dir}","${dir}../../perlasm");
 require "x86asm.pl";
@@ -62,6 +90,68 @@
 	&$func	($out,&DWP(0,$dat,$ty,4));
 }
 
+if ($alt=0) {
+  # >20% faster on Atom and Sandy Bridge[!], 8% faster on Opteron,
+  # but ~40% slower on Core2 and Westmere... Attempt to add movz
+  # brings down Opteron by 25%, Atom and Sandy Bridge by 15%, yet
+  # on Core2 with movz it's almost 20% slower than below alternative
+  # code... Yes, it's a total mess...
+  my @XX=($xx,$out);
+  $RC4_loop_mmx = sub {		# SSE actually...
+    my $i=shift;
+    my $j=$i<=0?0:$i>>1;
+    my $mm=$i<=0?"mm0":"mm".($i&1);
+
+	&add	(&LB($yy),&LB($tx));
+	&lea	(@XX[1],&DWP(1,@XX[0]));
+	&pxor	("mm2","mm0")				if ($i==0);
+	&psllq	("mm1",8)				if ($i==0);
+	&and	(@XX[1],0xff);
+	&pxor	("mm0","mm0")				if ($i<=0);
+	&mov	($ty,&DWP(0,$dat,$yy,4));
+	&mov	(&DWP(0,$dat,$yy,4),$tx);
+	&pxor	("mm1","mm2")				if ($i==0);
+	&mov	(&DWP(0,$dat,$XX[0],4),$ty);
+	&add	(&LB($ty),&LB($tx));
+	&movd	(@XX[0],"mm7")				if ($i==0);
+	&mov	($tx,&DWP(0,$dat,@XX[1],4));
+	&pxor	("mm1","mm1")				if ($i==1);
+	&movq	("mm2",&QWP(0,$inp))			if ($i==1);
+	&movq	(&QWP(-8,(@XX[0],$inp)),"mm1")		if ($i==0);
+	&pinsrw	($mm,&DWP(0,$dat,$ty,4),$j);
+
+	push	(@XX,shift(@XX))			if ($i>=0);
+  }
+} else {
+  # Using pinsrw here improves performane on Intel CPUs by 2-3%, but
+  # brings down AMD by 7%...
+  $RC4_loop_mmx = sub {
+    my $i=shift;
+
+	&add	(&LB($yy),&LB($tx));
+	&psllq	("mm1",8*(($i-1)&7))			if (abs($i)!=1);
+	&mov	($ty,&DWP(0,$dat,$yy,4));
+	&mov	(&DWP(0,$dat,$yy,4),$tx);
+	&mov	(&DWP(0,$dat,$xx,4),$ty);
+	&inc	($xx);
+	&add	($ty,$tx);
+	&movz	($xx,&LB($xx));				# (*)
+	&movz	($ty,&LB($ty));				# (*)
+	&pxor	("mm2",$i==1?"mm0":"mm1")		if ($i>=0);
+	&movq	("mm0",&QWP(0,$inp))			if ($i<=0);
+	&movq	(&QWP(-8,($out,$inp)),"mm2")		if ($i==0);
+	&mov	($tx,&DWP(0,$dat,$xx,4));
+	&movd	($i>0?"mm1":"mm2",&DWP(0,$dat,$ty,4));
+
+	# (*)	This is the key to Core2 and Westmere performance.
+	#	Whithout movz out-of-order execution logic confuses
+	#	itself and fails to reorder loads and stores. Problem
+	#	appears to be fixed in Sandy Bridge...
+  }
+}
+
+&external_label("OPENSSL_ia32cap_P");
+
 # void RC4(RC4_KEY *key,size_t len,const unsigned char *inp,unsigned char *out);
 &function_begin("RC4");
 	&mov	($dat,&wparam(0));	# load key schedule pointer
@@ -94,11 +184,56 @@
 	&and	($ty,-4);		# how many 4-byte chunks?
 	&jz	(&label("loop1"));
 
+	&test	($ty,-8);
+	&mov	(&wparam(3),$out);	# $out as accumulator in these loops
+	&jz	(&label("go4loop4"));
+
+	&picmeup($out,"OPENSSL_ia32cap_P");
+	&bt	(&DWP(0,$out),26);	# check SSE2 bit [could have been MMX]
+	&jnc	(&label("go4loop4"));
+
+	&mov	($out,&wparam(3))	if (!$alt);
+	&movd	("mm7",&wparam(3))	if ($alt);
+	&and	($ty,-8);
+	&lea	($ty,&DWP(-8,$inp,$ty));
+	&mov	(&DWP(-4,$dat),$ty);	# save input+(len/8)*8-8
+
+	&$RC4_loop_mmx(-1);
+	&jmp(&label("loop_mmx_enter"));
+
+	&set_label("loop_mmx",16);
+		&$RC4_loop_mmx(0);
+	&set_label("loop_mmx_enter");
+		for 	($i=1;$i<8;$i++) { &$RC4_loop_mmx($i); }
+		&mov	($ty,$yy);
+		&xor	($yy,$yy);		# this is second key to Core2
+		&mov	(&LB($yy),&LB($ty));	# and Westmere performance...
+		&cmp	($inp,&DWP(-4,$dat));
+		&lea	($inp,&DWP(8,$inp));
+	&jb	(&label("loop_mmx"));
+
+    if ($alt) {
+	&movd	($out,"mm7");
+	&pxor	("mm2","mm0");
+	&psllq	("mm1",8);
+	&pxor	("mm1","mm2");
+	&movq	(&QWP(-8,$out,$inp),"mm1");
+    } else {
+	&psllq	("mm1",56);
+	&pxor	("mm2","mm1");
+	&movq	(&QWP(-8,$out,$inp),"mm2");
+    }
+	&emms	();
+
+	&cmp	($inp,&wparam(1));	# compare to input+len
+	&je	(&label("done"));
+	&jmp	(&label("loop1"));
+
+&set_label("go4loop4",16);
 	&lea	($ty,&DWP(-4,$inp,$ty));
 	&mov	(&wparam(2),$ty);	# save input+(len/4)*4-4
-	&mov	(&wparam(3),$out);	# $out as accumulator in this loop
 
-	&set_label("loop4",16);
+	&set_label("loop4");
 		for ($i=0;$i<4;$i++) { RC4_loop($i); }
 		&ror	($out,8);
 		&xor	($out,&DWP(0,$inp));
@@ -151,7 +286,7 @@
 
 &set_label("done");
 	&dec	(&LB($xx));
-	&mov	(&BP(-4,$dat),&LB($yy));	# save key->y
+	&mov	(&DWP(-4,$dat),$yy);		# save key->y
 	&mov	(&BP(-8,$dat),&LB($xx));	# save key->x
 &set_label("abort");
 &function_end("RC4");
@@ -164,10 +299,8 @@
 $ido="ecx";
 $idx="edx";
 
-&external_label("OPENSSL_ia32cap_P");
-
 # void RC4_set_key(RC4_KEY *key,int len,const unsigned char *data);
-&function_begin("RC4_set_key");
+&function_begin("private_RC4_set_key");
 	&mov	($out,&wparam(0));		# load key
 	&mov	($idi,&wparam(1));		# load len
 	&mov	($inp,&wparam(2));		# load data
@@ -245,7 +378,7 @@
 	&xor	("eax","eax");
 	&mov	(&DWP(-8,$out),"eax");		# key->x=0;
 	&mov	(&DWP(-4,$out),"eax");		# key->y=0;
-&function_end("RC4_set_key");
+&function_end("private_RC4_set_key");
 
 # const char *RC4_options(void);
 &function_begin_B("RC4_options");
@@ -254,14 +387,21 @@
 	&blindpop("eax");
 	&lea	("eax",&DWP(&label("opts")."-".&label("pic_point"),"eax"));
 	&picmeup("edx","OPENSSL_ia32cap_P");
-	&bt	(&DWP(0,"edx"),20);
-	&jnc	(&label("skip"));
-	  &add	("eax",12);
-	&set_label("skip");
+	&mov	("edx",&DWP(0,"edx"));
+	&bt	("edx",20);
+	&jc	(&label("1xchar"));
+	&bt	("edx",26);
+	&jnc	(&label("ret"));
+	&add	("eax",25);
+	&ret	();
+&set_label("1xchar");
+	&add	("eax",12);
+&set_label("ret");
 	&ret	();
 &set_label("opts",64);
 &asciz	("rc4(4x,int)");
 &asciz	("rc4(1x,char)");
+&asciz	("rc4(8x,mmx)");
 &asciz	("RC4 for x86, CRYPTOGAMS by <appro\@openssl.org>");
 &align	(64);
 &function_end_B("RC4_options");
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.S b/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.S
new file mode 100644
index 0000000..aab3c6d
--- /dev/null
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.S
@@ -0,0 +1,1259 @@
+.text	
+.align	16
+
+.globl	rc4_md5_enc
+.type	rc4_md5_enc,@function
+rc4_md5_enc:
+	cmpq	$0,%r9
+	je	.Labort
+	pushq	%rbx
+	pushq	%rbp
+	pushq	%r12
+	pushq	%r13
+	pushq	%r14
+	pushq	%r15
+	subq	$40,%rsp
+.Lbody:
+	movq	%rcx,%r11
+	movq	%r9,%r12
+	movq	%rsi,%r13
+	movq	%rdx,%r14
+	movq	%r8,%r15
+	xorq	%rbp,%rbp
+	xorq	%rcx,%rcx
+
+	leaq	8(%rdi),%rdi
+	movb	-8(%rdi),%bpl
+	movb	-4(%rdi),%cl
+
+	incb	%bpl
+	subq	%r13,%r14
+	movl	(%rdi,%rbp,4),%eax
+	addb	%al,%cl
+	leaq	(%rdi,%rbp,4),%rsi
+	shlq	$6,%r12
+	addq	%r15,%r12
+	movq	%r12,16(%rsp)
+
+	movq	%r11,24(%rsp)
+	movl	0(%r11),%r8d
+	movl	4(%r11),%r9d
+	movl	8(%r11),%r10d
+	movl	12(%r11),%r11d
+	jmp	.Loop
+
+.align	16
+.Loop:
+	movl	%r8d,0(%rsp)
+	movl	%r9d,4(%rsp)
+	movl	%r10d,8(%rsp)
+	movl	%r11d,%r12d
+	movl	%r11d,12(%rsp)
+	pxor	%xmm0,%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	0(%r15),%r8d
+	addb	%dl,%al
+	movl	4(%rsi),%ebx
+	addl	$3614090360,%r8d
+	xorl	%r11d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,0(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$7,%r8d
+	movl	%r10d,%r12d
+	movd	(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	pxor	%xmm1,%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	4(%r15),%r11d
+	addb	%dl,%bl
+	movl	8(%rsi),%eax
+	addl	$3905402710,%r11d
+	xorl	%r10d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,4(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$12,%r11d
+	movl	%r9d,%r12d
+	movd	(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	8(%r15),%r10d
+	addb	%dl,%al
+	movl	12(%rsi),%ebx
+	addl	$606105819,%r10d
+	xorl	%r9d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,8(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$17,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$1,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	12(%r15),%r9d
+	addb	%dl,%bl
+	movl	16(%rsi),%eax
+	addl	$3250441966,%r9d
+	xorl	%r8d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,12(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$22,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$1,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	16(%r15),%r8d
+	addb	%dl,%al
+	movl	20(%rsi),%ebx
+	addl	$4118548399,%r8d
+	xorl	%r11d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,16(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$7,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$2,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	20(%r15),%r11d
+	addb	%dl,%bl
+	movl	24(%rsi),%eax
+	addl	$1200080426,%r11d
+	xorl	%r10d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,20(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$12,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$2,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	24(%r15),%r10d
+	addb	%dl,%al
+	movl	28(%rsi),%ebx
+	addl	$2821735955,%r10d
+	xorl	%r9d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,24(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$17,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$3,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	28(%r15),%r9d
+	addb	%dl,%bl
+	movl	32(%rsi),%eax
+	addl	$4249261313,%r9d
+	xorl	%r8d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,28(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$22,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$3,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	32(%r15),%r8d
+	addb	%dl,%al
+	movl	36(%rsi),%ebx
+	addl	$1770035416,%r8d
+	xorl	%r11d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,32(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$7,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$4,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	36(%r15),%r11d
+	addb	%dl,%bl
+	movl	40(%rsi),%eax
+	addl	$2336552879,%r11d
+	xorl	%r10d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,36(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$12,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$4,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	40(%r15),%r10d
+	addb	%dl,%al
+	movl	44(%rsi),%ebx
+	addl	$4294925233,%r10d
+	xorl	%r9d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,40(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$17,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$5,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	44(%r15),%r9d
+	addb	%dl,%bl
+	movl	48(%rsi),%eax
+	addl	$2304563134,%r9d
+	xorl	%r8d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,44(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$22,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$5,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	48(%r15),%r8d
+	addb	%dl,%al
+	movl	52(%rsi),%ebx
+	addl	$1804603682,%r8d
+	xorl	%r11d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,48(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$7,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$6,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	52(%r15),%r11d
+	addb	%dl,%bl
+	movl	56(%rsi),%eax
+	addl	$4254626195,%r11d
+	xorl	%r10d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,52(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$12,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$6,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	56(%r15),%r10d
+	addb	%dl,%al
+	movl	60(%rsi),%ebx
+	addl	$2792965006,%r10d
+	xorl	%r9d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,56(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$17,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$7,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movdqu	(%r13),%xmm2
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	60(%r15),%r9d
+	addb	%dl,%bl
+	movl	64(%rsi),%eax
+	addl	$1236535329,%r9d
+	xorl	%r8d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,60(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$22,%r9d
+	movl	%r10d,%r12d
+	pinsrw	$7,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm2
+	pxor	%xmm1,%xmm2
+	pxor	%xmm0,%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	4(%r15),%r8d
+	addb	%dl,%al
+	movl	68(%rsi),%ebx
+	addl	$4129170786,%r8d
+	xorl	%r10d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,64(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$5,%r8d
+	movl	%r9d,%r12d
+	movd	(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	pxor	%xmm1,%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	24(%r15),%r11d
+	addb	%dl,%bl
+	movl	72(%rsi),%eax
+	addl	$3225465664,%r11d
+	xorl	%r9d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,68(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$9,%r11d
+	movl	%r8d,%r12d
+	movd	(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	44(%r15),%r10d
+	addb	%dl,%al
+	movl	76(%rsi),%ebx
+	addl	$643717713,%r10d
+	xorl	%r8d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,72(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$14,%r10d
+	movl	%r11d,%r12d
+	pinsrw	$1,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	0(%r15),%r9d
+	addb	%dl,%bl
+	movl	80(%rsi),%eax
+	addl	$3921069994,%r9d
+	xorl	%r11d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,76(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$20,%r9d
+	movl	%r10d,%r12d
+	pinsrw	$1,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	20(%r15),%r8d
+	addb	%dl,%al
+	movl	84(%rsi),%ebx
+	addl	$3593408605,%r8d
+	xorl	%r10d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,80(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$5,%r8d
+	movl	%r9d,%r12d
+	pinsrw	$2,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	40(%r15),%r11d
+	addb	%dl,%bl
+	movl	88(%rsi),%eax
+	addl	$38016083,%r11d
+	xorl	%r9d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,84(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$9,%r11d
+	movl	%r8d,%r12d
+	pinsrw	$2,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	60(%r15),%r10d
+	addb	%dl,%al
+	movl	92(%rsi),%ebx
+	addl	$3634488961,%r10d
+	xorl	%r8d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,88(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$14,%r10d
+	movl	%r11d,%r12d
+	pinsrw	$3,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	16(%r15),%r9d
+	addb	%dl,%bl
+	movl	96(%rsi),%eax
+	addl	$3889429448,%r9d
+	xorl	%r11d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,92(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$20,%r9d
+	movl	%r10d,%r12d
+	pinsrw	$3,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	36(%r15),%r8d
+	addb	%dl,%al
+	movl	100(%rsi),%ebx
+	addl	$568446438,%r8d
+	xorl	%r10d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,96(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$5,%r8d
+	movl	%r9d,%r12d
+	pinsrw	$4,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	56(%r15),%r11d
+	addb	%dl,%bl
+	movl	104(%rsi),%eax
+	addl	$3275163606,%r11d
+	xorl	%r9d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,100(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$9,%r11d
+	movl	%r8d,%r12d
+	pinsrw	$4,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	12(%r15),%r10d
+	addb	%dl,%al
+	movl	108(%rsi),%ebx
+	addl	$4107603335,%r10d
+	xorl	%r8d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,104(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$14,%r10d
+	movl	%r11d,%r12d
+	pinsrw	$5,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	32(%r15),%r9d
+	addb	%dl,%bl
+	movl	112(%rsi),%eax
+	addl	$1163531501,%r9d
+	xorl	%r11d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,108(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$20,%r9d
+	movl	%r10d,%r12d
+	pinsrw	$5,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r11d,%r12d
+	addl	52(%r15),%r8d
+	addb	%dl,%al
+	movl	116(%rsi),%ebx
+	addl	$2850285829,%r8d
+	xorl	%r10d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,112(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$5,%r8d
+	movl	%r9d,%r12d
+	pinsrw	$6,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r10d,%r12d
+	addl	8(%r15),%r11d
+	addb	%dl,%bl
+	movl	120(%rsi),%eax
+	addl	$4243563512,%r11d
+	xorl	%r9d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,116(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$9,%r11d
+	movl	%r8d,%r12d
+	pinsrw	$6,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	andl	%r9d,%r12d
+	addl	28(%r15),%r10d
+	addb	%dl,%al
+	movl	124(%rsi),%ebx
+	addl	$1735328473,%r10d
+	xorl	%r8d,%r12d
+	movzbl	%al,%eax
+	movl	%edx,120(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$14,%r10d
+	movl	%r11d,%r12d
+	pinsrw	$7,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movdqu	16(%r13),%xmm3
+	addb	$32,%bpl
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	andl	%r8d,%r12d
+	addl	48(%r15),%r9d
+	addb	%dl,%bl
+	movl	0(%rdi,%rbp,4),%eax
+	addl	$2368359562,%r9d
+	xorl	%r11d,%r12d
+	movzbl	%bl,%ebx
+	movl	%edx,124(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$20,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$7,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movq	%rcx,%rsi
+	xorq	%rcx,%rcx
+	movb	%sil,%cl
+	leaq	(%rdi,%rbp,4),%rsi
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm3
+	pxor	%xmm1,%xmm3
+	pxor	%xmm0,%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r9d,%r12d
+	addl	20(%r15),%r8d
+	addb	%dl,%al
+	movl	4(%rsi),%ebx
+	addl	$4294588738,%r8d
+	movzbl	%al,%eax
+	addl	%r12d,%r8d
+	movl	%edx,0(%rsi)
+	addb	%bl,%cl
+	roll	$4,%r8d
+	movl	%r10d,%r12d
+	movd	(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	pxor	%xmm1,%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r8d,%r12d
+	addl	32(%r15),%r11d
+	addb	%dl,%bl
+	movl	8(%rsi),%eax
+	addl	$2272392833,%r11d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r11d
+	movl	%edx,4(%rsi)
+	addb	%al,%cl
+	roll	$11,%r11d
+	movl	%r9d,%r12d
+	movd	(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r11d,%r12d
+	addl	44(%r15),%r10d
+	addb	%dl,%al
+	movl	12(%rsi),%ebx
+	addl	$1839030562,%r10d
+	movzbl	%al,%eax
+	addl	%r12d,%r10d
+	movl	%edx,8(%rsi)
+	addb	%bl,%cl
+	roll	$16,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$1,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r10d,%r12d
+	addl	56(%r15),%r9d
+	addb	%dl,%bl
+	movl	16(%rsi),%eax
+	addl	$4259657740,%r9d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r9d
+	movl	%edx,12(%rsi)
+	addb	%al,%cl
+	roll	$23,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$1,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r9d,%r12d
+	addl	4(%r15),%r8d
+	addb	%dl,%al
+	movl	20(%rsi),%ebx
+	addl	$2763975236,%r8d
+	movzbl	%al,%eax
+	addl	%r12d,%r8d
+	movl	%edx,16(%rsi)
+	addb	%bl,%cl
+	roll	$4,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$2,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r8d,%r12d
+	addl	16(%r15),%r11d
+	addb	%dl,%bl
+	movl	24(%rsi),%eax
+	addl	$1272893353,%r11d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r11d
+	movl	%edx,20(%rsi)
+	addb	%al,%cl
+	roll	$11,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$2,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r11d,%r12d
+	addl	28(%r15),%r10d
+	addb	%dl,%al
+	movl	28(%rsi),%ebx
+	addl	$4139469664,%r10d
+	movzbl	%al,%eax
+	addl	%r12d,%r10d
+	movl	%edx,24(%rsi)
+	addb	%bl,%cl
+	roll	$16,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$3,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r10d,%r12d
+	addl	40(%r15),%r9d
+	addb	%dl,%bl
+	movl	32(%rsi),%eax
+	addl	$3200236656,%r9d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r9d
+	movl	%edx,28(%rsi)
+	addb	%al,%cl
+	roll	$23,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$3,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r9d,%r12d
+	addl	52(%r15),%r8d
+	addb	%dl,%al
+	movl	36(%rsi),%ebx
+	addl	$681279174,%r8d
+	movzbl	%al,%eax
+	addl	%r12d,%r8d
+	movl	%edx,32(%rsi)
+	addb	%bl,%cl
+	roll	$4,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$4,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r8d,%r12d
+	addl	0(%r15),%r11d
+	addb	%dl,%bl
+	movl	40(%rsi),%eax
+	addl	$3936430074,%r11d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r11d
+	movl	%edx,36(%rsi)
+	addb	%al,%cl
+	roll	$11,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$4,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r11d,%r12d
+	addl	12(%r15),%r10d
+	addb	%dl,%al
+	movl	44(%rsi),%ebx
+	addl	$3572445317,%r10d
+	movzbl	%al,%eax
+	addl	%r12d,%r10d
+	movl	%edx,40(%rsi)
+	addb	%bl,%cl
+	roll	$16,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$5,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r10d,%r12d
+	addl	24(%r15),%r9d
+	addb	%dl,%bl
+	movl	48(%rsi),%eax
+	addl	$76029189,%r9d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r9d
+	movl	%edx,44(%rsi)
+	addb	%al,%cl
+	roll	$23,%r9d
+	movl	%r11d,%r12d
+	pinsrw	$5,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r9d,%r12d
+	addl	36(%r15),%r8d
+	addb	%dl,%al
+	movl	52(%rsi),%ebx
+	addl	$3654602809,%r8d
+	movzbl	%al,%eax
+	addl	%r12d,%r8d
+	movl	%edx,48(%rsi)
+	addb	%bl,%cl
+	roll	$4,%r8d
+	movl	%r10d,%r12d
+	pinsrw	$6,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r8d,%r12d
+	addl	48(%r15),%r11d
+	addb	%dl,%bl
+	movl	56(%rsi),%eax
+	addl	$3873151461,%r11d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r11d
+	movl	%edx,52(%rsi)
+	addb	%al,%cl
+	roll	$11,%r11d
+	movl	%r9d,%r12d
+	pinsrw	$6,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	xorl	%r11d,%r12d
+	addl	60(%r15),%r10d
+	addb	%dl,%al
+	movl	60(%rsi),%ebx
+	addl	$530742520,%r10d
+	movzbl	%al,%eax
+	addl	%r12d,%r10d
+	movl	%edx,56(%rsi)
+	addb	%bl,%cl
+	roll	$16,%r10d
+	movl	%r8d,%r12d
+	pinsrw	$7,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movdqu	32(%r13),%xmm4
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	xorl	%r10d,%r12d
+	addl	8(%r15),%r9d
+	addb	%dl,%bl
+	movl	64(%rsi),%eax
+	addl	$3299628645,%r9d
+	movzbl	%bl,%ebx
+	addl	%r12d,%r9d
+	movl	%edx,60(%rsi)
+	addb	%al,%cl
+	roll	$23,%r9d
+	movl	$-1,%r12d
+	pinsrw	$7,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm4
+	pxor	%xmm1,%xmm4
+	pxor	%xmm0,%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r9d,%r12d
+	addl	0(%r15),%r8d
+	addb	%dl,%al
+	movl	68(%rsi),%ebx
+	addl	$4096336452,%r8d
+	movzbl	%al,%eax
+	xorl	%r10d,%r12d
+	movl	%edx,64(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$6,%r8d
+	movl	$-1,%r12d
+	movd	(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	pxor	%xmm1,%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r8d,%r12d
+	addl	28(%r15),%r11d
+	addb	%dl,%bl
+	movl	72(%rsi),%eax
+	addl	$1126891415,%r11d
+	movzbl	%bl,%ebx
+	xorl	%r9d,%r12d
+	movl	%edx,68(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$10,%r11d
+	movl	$-1,%r12d
+	movd	(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r11d,%r12d
+	addl	56(%r15),%r10d
+	addb	%dl,%al
+	movl	76(%rsi),%ebx
+	addl	$2878612391,%r10d
+	movzbl	%al,%eax
+	xorl	%r8d,%r12d
+	movl	%edx,72(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$15,%r10d
+	movl	$-1,%r12d
+	pinsrw	$1,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r10d,%r12d
+	addl	20(%r15),%r9d
+	addb	%dl,%bl
+	movl	80(%rsi),%eax
+	addl	$4237533241,%r9d
+	movzbl	%bl,%ebx
+	xorl	%r11d,%r12d
+	movl	%edx,76(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$21,%r9d
+	movl	$-1,%r12d
+	pinsrw	$1,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r9d,%r12d
+	addl	48(%r15),%r8d
+	addb	%dl,%al
+	movl	84(%rsi),%ebx
+	addl	$1700485571,%r8d
+	movzbl	%al,%eax
+	xorl	%r10d,%r12d
+	movl	%edx,80(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$6,%r8d
+	movl	$-1,%r12d
+	pinsrw	$2,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r8d,%r12d
+	addl	12(%r15),%r11d
+	addb	%dl,%bl
+	movl	88(%rsi),%eax
+	addl	$2399980690,%r11d
+	movzbl	%bl,%ebx
+	xorl	%r9d,%r12d
+	movl	%edx,84(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$10,%r11d
+	movl	$-1,%r12d
+	pinsrw	$2,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r11d,%r12d
+	addl	40(%r15),%r10d
+	addb	%dl,%al
+	movl	92(%rsi),%ebx
+	addl	$4293915773,%r10d
+	movzbl	%al,%eax
+	xorl	%r8d,%r12d
+	movl	%edx,88(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$15,%r10d
+	movl	$-1,%r12d
+	pinsrw	$3,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r10d,%r12d
+	addl	4(%r15),%r9d
+	addb	%dl,%bl
+	movl	96(%rsi),%eax
+	addl	$2240044497,%r9d
+	movzbl	%bl,%ebx
+	xorl	%r11d,%r12d
+	movl	%edx,92(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$21,%r9d
+	movl	$-1,%r12d
+	pinsrw	$3,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r9d,%r12d
+	addl	32(%r15),%r8d
+	addb	%dl,%al
+	movl	100(%rsi),%ebx
+	addl	$1873313359,%r8d
+	movzbl	%al,%eax
+	xorl	%r10d,%r12d
+	movl	%edx,96(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$6,%r8d
+	movl	$-1,%r12d
+	pinsrw	$4,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r8d,%r12d
+	addl	60(%r15),%r11d
+	addb	%dl,%bl
+	movl	104(%rsi),%eax
+	addl	$4264355552,%r11d
+	movzbl	%bl,%ebx
+	xorl	%r9d,%r12d
+	movl	%edx,100(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$10,%r11d
+	movl	$-1,%r12d
+	pinsrw	$4,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r11d,%r12d
+	addl	24(%r15),%r10d
+	addb	%dl,%al
+	movl	108(%rsi),%ebx
+	addl	$2734768916,%r10d
+	movzbl	%al,%eax
+	xorl	%r8d,%r12d
+	movl	%edx,104(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$15,%r10d
+	movl	$-1,%r12d
+	pinsrw	$5,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r10d,%r12d
+	addl	52(%r15),%r9d
+	addb	%dl,%bl
+	movl	112(%rsi),%eax
+	addl	$1309151649,%r9d
+	movzbl	%bl,%ebx
+	xorl	%r11d,%r12d
+	movl	%edx,108(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$21,%r9d
+	movl	$-1,%r12d
+	pinsrw	$5,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r11d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r9d,%r12d
+	addl	16(%r15),%r8d
+	addb	%dl,%al
+	movl	116(%rsi),%ebx
+	addl	$4149444226,%r8d
+	movzbl	%al,%eax
+	xorl	%r10d,%r12d
+	movl	%edx,112(%rsi)
+	addl	%r12d,%r8d
+	addb	%bl,%cl
+	roll	$6,%r8d
+	movl	$-1,%r12d
+	pinsrw	$6,(%rdi,%rax,4),%xmm0
+
+	addl	%r9d,%r8d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r10d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r8d,%r12d
+	addl	44(%r15),%r11d
+	addb	%dl,%bl
+	movl	120(%rsi),%eax
+	addl	$3174756917,%r11d
+	movzbl	%bl,%ebx
+	xorl	%r9d,%r12d
+	movl	%edx,116(%rsi)
+	addl	%r12d,%r11d
+	addb	%al,%cl
+	roll	$10,%r11d
+	movl	$-1,%r12d
+	pinsrw	$6,(%rdi,%rbx,4),%xmm1
+
+	addl	%r8d,%r11d
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r9d,%r12d
+	movl	%eax,(%rdi,%rcx,4)
+	orl	%r11d,%r12d
+	addl	8(%r15),%r10d
+	addb	%dl,%al
+	movl	124(%rsi),%ebx
+	addl	$718787259,%r10d
+	movzbl	%al,%eax
+	xorl	%r8d,%r12d
+	movl	%edx,120(%rsi)
+	addl	%r12d,%r10d
+	addb	%bl,%cl
+	roll	$15,%r10d
+	movl	$-1,%r12d
+	pinsrw	$7,(%rdi,%rax,4),%xmm0
+
+	addl	%r11d,%r10d
+	movdqu	48(%r13),%xmm5
+	addb	$32,%bpl
+	movl	(%rdi,%rcx,4),%edx
+	xorl	%r8d,%r12d
+	movl	%ebx,(%rdi,%rcx,4)
+	orl	%r10d,%r12d
+	addl	36(%r15),%r9d
+	addb	%dl,%bl
+	movl	0(%rdi,%rbp,4),%eax
+	addl	$3951481745,%r9d
+	movzbl	%bl,%ebx
+	xorl	%r11d,%r12d
+	movl	%edx,124(%rsi)
+	addl	%r12d,%r9d
+	addb	%al,%cl
+	roll	$21,%r9d
+	movl	$-1,%r12d
+	pinsrw	$7,(%rdi,%rbx,4),%xmm1
+
+	addl	%r10d,%r9d
+	movq	%rbp,%rsi
+	xorq	%rbp,%rbp
+	movb	%sil,%bpl
+	movq	%rcx,%rsi
+	xorq	%rcx,%rcx
+	movb	%sil,%cl
+	leaq	(%rdi,%rbp,4),%rsi
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm5
+	pxor	%xmm1,%xmm5
+	addl	0(%rsp),%r8d
+	addl	4(%rsp),%r9d
+	addl	8(%rsp),%r10d
+	addl	12(%rsp),%r11d
+
+	movdqu	%xmm2,(%r14,%r13,1)
+	movdqu	%xmm3,16(%r14,%r13,1)
+	movdqu	%xmm4,32(%r14,%r13,1)
+	movdqu	%xmm5,48(%r14,%r13,1)
+	leaq	64(%r15),%r15
+	leaq	64(%r13),%r13
+	cmpq	16(%rsp),%r15
+	jb	.Loop
+
+	movq	24(%rsp),%r12
+	subb	%al,%cl
+	movl	%r8d,0(%r12)
+	movl	%r9d,4(%r12)
+	movl	%r10d,8(%r12)
+	movl	%r11d,12(%r12)
+	subb	$1,%bpl
+	movl	%ebp,-8(%rdi)
+	movl	%ecx,-4(%rdi)
+
+	movq	40(%rsp),%r15
+	movq	48(%rsp),%r14
+	movq	56(%rsp),%r13
+	movq	64(%rsp),%r12
+	movq	72(%rsp),%rbp
+	movq	80(%rsp),%rbx
+	leaq	88(%rsp),%rsp
+.Lepilogue:
+.Labort:
+	.byte	0xf3,0xc3
+.size	rc4_md5_enc,.-rc4_md5_enc
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.pl b/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.pl
new file mode 100644
index 0000000..272fa91
--- /dev/null
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-md5-x86_64.pl
@@ -0,0 +1,632 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# June 2011
+#
+# This is RC4+MD5 "stitch" implementation. The idea, as spelled in
+# http://download.intel.com/design/intarch/papers/323686.pdf, is that
+# since both algorithms exhibit instruction-level parallelism, ILP,
+# below theoretical maximum, interleaving them would allow to utilize
+# processor resources better and achieve better performance. RC4
+# instruction sequence is virtually identical to rc4-x86_64.pl, which
+# is heavily based on submission by Maxim Perminov, Maxim Locktyukhin
+# and Jim Guilford of Intel. MD5 is fresh implementation aiming to
+# minimize register usage, which was used as "main thread" with RC4
+# weaved into it, one RC4 round per one MD5 round. In addition to the
+# stiched subroutine the script can generate standalone replacement
+# md5_block_asm_data_order and RC4. Below are performance numbers in
+# cycles per processed byte, less is better, for these the standalone
+# subroutines, sum of them, and stitched one:
+#
+#		RC4	MD5	RC4+MD5	stitch	gain
+# Opteron	6.5(*)	5.4	11.9	7.0	+70%(*)
+# Core2		6.5	5.8	12.3	7.7	+60%
+# Westmere	4.3	5.2	9.5	7.0	+36%
+# Sandy Bridge	4.2	5.5	9.7	6.8	+43%
+# Atom		9.3	6.5	15.8	11.1	+42%
+#
+# (*)	rc4-x86_64.pl delivers 5.3 on Opteron, so real improvement
+#	is +53%...
+
+my ($rc4,$md5)=(1,1);	# what to generate?
+my $D="#" if (!$md5);	# if set to "#", MD5 is stitched into RC4(),
+			# but its result is discarded. Idea here is
+			# to be able to use 'openssl speed rc4' for
+			# benchmarking the stitched subroutine... 
+
+my $flavour = shift;
+my $output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my ($dat,$in0,$out,$ctx,$inp,$len, $func,$nargs);
+
+if ($rc4 && !$md5) {
+  ($dat,$len,$in0,$out) = ("%rdi","%rsi","%rdx","%rcx");
+  $func="RC4";				$nargs=4;
+} elsif ($md5 && !$rc4) {
+  ($ctx,$inp,$len) = ("%rdi","%rsi","%rdx");
+  $func="md5_block_asm_data_order";	$nargs=3;
+} else {
+  ($dat,$in0,$out,$ctx,$inp,$len) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+  $func="rc4_md5_enc";			$nargs=6;
+  # void rc4_md5_enc(
+  #		RC4_KEY *key,		#
+  #		const void *in0,	# RC4 input
+  #		void *out,		# RC4 output
+  #		MD5_CTX *ctx,		#
+  #		const void *inp,	# MD5 input
+  #		size_t len);		# number of 64-byte blocks
+}
+
+my @K=(	0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee,
+	0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501,
+	0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be,
+	0x6b901122,0xfd987193,0xa679438e,0x49b40821,
+
+	0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa,
+	0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8,
+	0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed,
+	0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a,
+
+	0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c,
+	0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70,
+	0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05,
+	0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665,
+
+	0xf4292244,0x432aff97,0xab9423a7,0xfc93a039,
+	0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1,
+	0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1,
+	0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391	);
+
+my @V=("%r8d","%r9d","%r10d","%r11d");	# MD5 registers
+my $tmp="%r12d";
+
+my @XX=("%rbp","%rsi");			# RC4 registers
+my @TX=("%rax","%rbx");
+my $YY="%rcx";
+my $TY="%rdx";
+
+my $MOD=32;				# 16, 32 or 64
+
+$code.=<<___;
+.text
+.align 16
+
+.globl	$func
+.type	$func,\@function,$nargs
+$func:
+	cmp	\$0,$len
+	je	.Labort
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	sub	\$40,%rsp
+.Lbody:
+___
+if ($rc4) {
+$code.=<<___;
+$D#md5#	mov	$ctx,%r11		# reassign arguments
+	mov	$len,%r12
+	mov	$in0,%r13
+	mov	$out,%r14
+$D#md5#	mov	$inp,%r15
+___
+    $ctx="%r11"	if ($md5);		# reassign arguments
+    $len="%r12";
+    $in0="%r13";
+    $out="%r14";
+    $inp="%r15"	if ($md5);
+    $inp=$in0	if (!$md5);
+$code.=<<___;
+	xor	$XX[0],$XX[0]
+	xor	$YY,$YY
+
+	lea	8($dat),$dat
+	mov	-8($dat),$XX[0]#b
+	mov	-4($dat),$YY#b
+
+	inc	$XX[0]#b
+	sub	$in0,$out
+	movl	($dat,$XX[0],4),$TX[0]#d
+___
+$code.=<<___ if (!$md5);
+	xor	$TX[1],$TX[1]
+	test	\$-128,$len
+	jz	.Loop1
+	sub	$XX[0],$TX[1]
+	and	\$`$MOD-1`,$TX[1]
+	jz	.Loop${MOD}_is_hot
+	sub	$TX[1],$len
+.Loop${MOD}_warmup:
+	add	$TX[0]#b,$YY#b
+	movl	($dat,$YY,4),$TY#d
+	movl	$TX[0]#d,($dat,$YY,4)
+	movl	$TY#d,($dat,$XX[0],4)
+	add	$TY#b,$TX[0]#b
+	inc	$XX[0]#b
+	movl	($dat,$TX[0],4),$TY#d
+	movl	($dat,$XX[0],4),$TX[0]#d
+	xorb	($in0),$TY#b
+	movb	$TY#b,($out,$in0)
+	lea	1($in0),$in0
+	dec	$TX[1]
+	jnz	.Loop${MOD}_warmup
+
+	mov	$YY,$TX[1]
+	xor	$YY,$YY
+	mov	$TX[1]#b,$YY#b
+
+.Loop${MOD}_is_hot:
+	mov	$len,32(%rsp)		# save original $len
+	shr	\$6,$len		# number of 64-byte blocks
+___
+  if ($D && !$md5) {			# stitch in dummy MD5
+    $md5=1;
+    $ctx="%r11";
+    $inp="%r15";
+    $code.=<<___;
+	mov	%rsp,$ctx
+	mov	$in0,$inp
+___
+  }
+}
+$code.=<<___;
+#rc4#	add	$TX[0]#b,$YY#b
+#rc4#	lea	($dat,$XX[0],4),$XX[1]
+	shl	\$6,$len
+	add	$inp,$len		# pointer to the end of input
+	mov	$len,16(%rsp)
+
+#md5#	mov	$ctx,24(%rsp)		# save pointer to MD5_CTX
+#md5#	mov	0*4($ctx),$V[0]		# load current hash value from MD5_CTX
+#md5#	mov	1*4($ctx),$V[1]
+#md5#	mov	2*4($ctx),$V[2]
+#md5#	mov	3*4($ctx),$V[3]
+	jmp	.Loop
+
+.align	16
+.Loop:
+#md5#	mov	$V[0],0*4(%rsp)		# put aside current hash value
+#md5#	mov	$V[1],1*4(%rsp)
+#md5#	mov	$V[2],2*4(%rsp)
+#md5#	mov	$V[3],$tmp		# forward reference
+#md5#	mov	$V[3],3*4(%rsp)
+___
+
+sub R0 {
+  my ($i,$a,$b,$c,$d)=@_;
+  my @rot0=(7,12,17,22);
+  my $j=$i%16;
+  my $k=$i%$MOD;
+  my $xmm="%xmm".($j&1);
+    $code.="	movdqu	($in0),%xmm2\n"		if ($rc4 && $j==15);
+    $code.="	add	\$$MOD,$XX[0]#b\n"	if ($rc4 && $j==15 && $k==$MOD-1);
+    $code.="	pxor	$xmm,$xmm\n"		if ($rc4 && $j<=1);
+    $code.=<<___;
+#rc4#	movl	($dat,$YY,4),$TY#d
+#md5#	xor	$c,$tmp
+#rc4#	movl	$TX[0]#d,($dat,$YY,4)
+#md5#	and	$b,$tmp
+#md5#	add	4*`$j`($inp),$a
+#rc4#	add	$TY#b,$TX[0]#b
+#rc4#	movl	`4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d
+#md5#	add	\$$K[$i],$a
+#md5#	xor	$d,$tmp
+#rc4#	movz	$TX[0]#b,$TX[0]#d
+#rc4#	movl	$TY#d,4*$k($XX[1])
+#md5#	add	$tmp,$a
+#rc4#	add	$TX[1]#b,$YY#b
+#md5#	rol	\$$rot0[$j%4],$a
+#md5#	mov	`$j==15?"$b":"$c"`,$tmp		# forward reference
+#rc4#	pinsrw	\$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n
+#md5#	add	$b,$a
+___
+    $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1);
+	mov	$YY,$XX[1]
+	xor	$YY,$YY				# keyword to partial register
+	mov	$XX[1]#b,$YY#b
+	lea	($dat,$XX[0],4),$XX[1]
+___
+    $code.=<<___ if ($rc4 && $j==15);
+	psllq	\$8,%xmm1
+	pxor	%xmm0,%xmm2
+	pxor	%xmm1,%xmm2
+___
+}
+sub R1 {
+  my ($i,$a,$b,$c,$d)=@_;
+  my @rot1=(5,9,14,20);
+  my $j=$i%16;
+  my $k=$i%$MOD;
+  my $xmm="%xmm".($j&1);
+    $code.="	movdqu	16($in0),%xmm3\n"	if ($rc4 && $j==15);
+    $code.="	add	\$$MOD,$XX[0]#b\n"	if ($rc4 && $j==15 && $k==$MOD-1);
+    $code.="	pxor	$xmm,$xmm\n"		if ($rc4 && $j<=1);
+    $code.=<<___;
+#rc4#	movl	($dat,$YY,4),$TY#d
+#md5#	xor	$b,$tmp
+#rc4#	movl	$TX[0]#d,($dat,$YY,4)
+#md5#	and	$d,$tmp
+#md5#	add	4*`((1+5*$j)%16)`($inp),$a
+#rc4#	add	$TY#b,$TX[0]#b
+#rc4#	movl	`4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d
+#md5#	add	\$$K[$i],$a
+#md5#	xor	$c,$tmp
+#rc4#	movz	$TX[0]#b,$TX[0]#d
+#rc4#	movl	$TY#d,4*$k($XX[1])
+#md5#	add	$tmp,$a
+#rc4#	add	$TX[1]#b,$YY#b
+#md5#	rol	\$$rot1[$j%4],$a
+#md5#	mov	`$j==15?"$c":"$b"`,$tmp		# forward reference
+#rc4#	pinsrw	\$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n
+#md5#	add	$b,$a
+___
+    $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1);
+	mov	$YY,$XX[1]
+	xor	$YY,$YY				# keyword to partial register
+	mov	$XX[1]#b,$YY#b
+	lea	($dat,$XX[0],4),$XX[1]
+___
+    $code.=<<___ if ($rc4 && $j==15);
+	psllq	\$8,%xmm1
+	pxor	%xmm0,%xmm3
+	pxor	%xmm1,%xmm3
+___
+}
+sub R2 {
+  my ($i,$a,$b,$c,$d)=@_;
+  my @rot2=(4,11,16,23);
+  my $j=$i%16;
+  my $k=$i%$MOD;
+  my $xmm="%xmm".($j&1);
+    $code.="	movdqu	32($in0),%xmm4\n"	if ($rc4 && $j==15);
+    $code.="	add	\$$MOD,$XX[0]#b\n"	if ($rc4 && $j==15 && $k==$MOD-1);
+    $code.="	pxor	$xmm,$xmm\n"		if ($rc4 && $j<=1);
+    $code.=<<___;
+#rc4#	movl	($dat,$YY,4),$TY#d
+#md5#	xor	$c,$tmp
+#rc4#	movl	$TX[0]#d,($dat,$YY,4)
+#md5#	xor	$b,$tmp
+#md5#	add	4*`((5+3*$j)%16)`($inp),$a
+#rc4#	add	$TY#b,$TX[0]#b
+#rc4#	movl	`4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d
+#md5#	add	\$$K[$i],$a
+#rc4#	movz	$TX[0]#b,$TX[0]#d
+#md5#	add	$tmp,$a
+#rc4#	movl	$TY#d,4*$k($XX[1])
+#rc4#	add	$TX[1]#b,$YY#b
+#md5#	rol	\$$rot2[$j%4],$a
+#md5#	mov	`$j==15?"\\\$-1":"$c"`,$tmp	# forward reference
+#rc4#	pinsrw	\$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n
+#md5#	add	$b,$a
+___
+    $code.=<<___ if ($rc4 && $j==15 && $k==$MOD-1);
+	mov	$YY,$XX[1]
+	xor	$YY,$YY				# keyword to partial register
+	mov	$XX[1]#b,$YY#b
+	lea	($dat,$XX[0],4),$XX[1]
+___
+    $code.=<<___ if ($rc4 && $j==15);
+	psllq	\$8,%xmm1
+	pxor	%xmm0,%xmm4
+	pxor	%xmm1,%xmm4
+___
+}
+sub R3 {
+  my ($i,$a,$b,$c,$d)=@_;
+  my @rot3=(6,10,15,21);
+  my $j=$i%16;
+  my $k=$i%$MOD;
+  my $xmm="%xmm".($j&1);
+    $code.="	movdqu	48($in0),%xmm5\n"	if ($rc4 && $j==15);
+    $code.="	add	\$$MOD,$XX[0]#b\n"	if ($rc4 && $j==15 && $k==$MOD-1);
+    $code.="	pxor	$xmm,$xmm\n"		if ($rc4 && $j<=1);
+    $code.=<<___;
+#rc4#	movl	($dat,$YY,4),$TY#d
+#md5#	xor	$d,$tmp
+#rc4#	movl	$TX[0]#d,($dat,$YY,4)
+#md5#	or	$b,$tmp
+#md5#	add	4*`((7*$j)%16)`($inp),$a
+#rc4#	add	$TY#b,$TX[0]#b
+#rc4#	movl	`4*(($k+1)%$MOD)`(`$k==$MOD-1?"$dat,$XX[0],4":"$XX[1]"`),$TX[1]#d
+#md5#	add	\$$K[$i],$a
+#rc4#	movz	$TX[0]#b,$TX[0]#d
+#md5#	xor	$c,$tmp
+#rc4#	movl	$TY#d,4*$k($XX[1])
+#md5#	add	$tmp,$a
+#rc4#	add	$TX[1]#b,$YY#b
+#md5#	rol	\$$rot3[$j%4],$a
+#md5#	mov	\$-1,$tmp			# forward reference
+#rc4#	pinsrw	\$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n
+#md5#	add	$b,$a
+___
+    $code.=<<___ if ($rc4 && $j==15);
+	mov	$XX[0],$XX[1]
+	xor	$XX[0],$XX[0]			# keyword to partial register
+	mov	$XX[1]#b,$XX[0]#b
+	mov	$YY,$XX[1]
+	xor	$YY,$YY				# keyword to partial register
+	mov	$XX[1]#b,$YY#b
+	lea	($dat,$XX[0],4),$XX[1]
+	psllq	\$8,%xmm1
+	pxor	%xmm0,%xmm5
+	pxor	%xmm1,%xmm5
+___
+}
+
+my $i=0;
+for(;$i<16;$i++) { R0($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); }
+for(;$i<32;$i++) { R1($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); }
+for(;$i<48;$i++) { R2($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); }
+for(;$i<64;$i++) { R3($i,@V); unshift(@V,pop(@V)); push(@TX,shift(@TX)); }
+
+$code.=<<___;
+#md5#	add	0*4(%rsp),$V[0]		# accumulate hash value
+#md5#	add	1*4(%rsp),$V[1]
+#md5#	add	2*4(%rsp),$V[2]
+#md5#	add	3*4(%rsp),$V[3]
+
+#rc4#	movdqu	%xmm2,($out,$in0)	# write RC4 output
+#rc4#	movdqu	%xmm3,16($out,$in0)
+#rc4#	movdqu	%xmm4,32($out,$in0)
+#rc4#	movdqu	%xmm5,48($out,$in0)
+#md5#	lea	64($inp),$inp
+#rc4#	lea	64($in0),$in0
+	cmp	16(%rsp),$inp		# are we done?
+	jb	.Loop
+
+#md5#	mov	24(%rsp),$len		# restore pointer to MD5_CTX
+#rc4#	sub	$TX[0]#b,$YY#b		# correct $YY
+#md5#	mov	$V[0],0*4($len)		# write MD5_CTX
+#md5#	mov	$V[1],1*4($len)
+#md5#	mov	$V[2],2*4($len)
+#md5#	mov	$V[3],3*4($len)
+___
+$code.=<<___ if ($rc4 && (!$md5 || $D));
+	mov	32(%rsp),$len		# restore original $len
+	and	\$63,$len		# remaining bytes
+	jnz	.Loop1
+	jmp	.Ldone
+	
+.align	16
+.Loop1:
+	add	$TX[0]#b,$YY#b
+	movl	($dat,$YY,4),$TY#d
+	movl	$TX[0]#d,($dat,$YY,4)
+	movl	$TY#d,($dat,$XX[0],4)
+	add	$TY#b,$TX[0]#b
+	inc	$XX[0]#b
+	movl	($dat,$TX[0],4),$TY#d
+	movl	($dat,$XX[0],4),$TX[0]#d
+	xorb	($in0),$TY#b
+	movb	$TY#b,($out,$in0)
+	lea	1($in0),$in0
+	dec	$len
+	jnz	.Loop1
+
+.Ldone:
+___
+$code.=<<___;
+#rc4#	sub	\$1,$XX[0]#b
+#rc4#	movl	$XX[0]#d,-8($dat)
+#rc4#	movl	$YY#d,-4($dat)
+
+	mov	40(%rsp),%r15
+	mov	48(%rsp),%r14
+	mov	56(%rsp),%r13
+	mov	64(%rsp),%r12
+	mov	72(%rsp),%rbp
+	mov	80(%rsp),%rbx
+	lea	88(%rsp),%rsp
+.Lepilogue:
+.Labort:
+	ret
+.size $func,.-$func
+___
+
+if ($rc4 && $D) {	# sole purpose of this section is to provide
+			# option to use the generated module as drop-in
+			# replacement for rc4-x86_64.pl for debugging
+			# and testing purposes...
+my ($idx,$ido)=("%r8","%r9");
+my ($dat,$len,$inp)=("%rdi","%rsi","%rdx");
+
+$code.=<<___;
+.globl	RC4_set_key
+.type	RC4_set_key,\@function,3
+.align	16
+RC4_set_key:
+	lea	8($dat),$dat
+	lea	($inp,$len),$inp
+	neg	$len
+	mov	$len,%rcx
+	xor	%eax,%eax
+	xor	$ido,$ido
+	xor	%r10,%r10
+	xor	%r11,%r11
+	jmp	.Lw1stloop
+
+.align	16
+.Lw1stloop:
+	mov	%eax,($dat,%rax,4)
+	add	\$1,%al
+	jnc	.Lw1stloop
+
+	xor	$ido,$ido
+	xor	$idx,$idx
+.align	16
+.Lw2ndloop:
+	mov	($dat,$ido,4),%r10d
+	add	($inp,$len,1),$idx#b
+	add	%r10b,$idx#b
+	add	\$1,$len
+	mov	($dat,$idx,4),%r11d
+	cmovz	%rcx,$len
+	mov	%r10d,($dat,$idx,4)
+	mov	%r11d,($dat,$ido,4)
+	add	\$1,$ido#b
+	jnc	.Lw2ndloop
+
+	xor	%eax,%eax
+	mov	%eax,-8($dat)
+	mov	%eax,-4($dat)
+	ret
+.size	RC4_set_key,.-RC4_set_key
+
+.globl	RC4_options
+.type	RC4_options,\@abi-omnipotent
+.align	16
+RC4_options:
+	lea	.Lopts(%rip),%rax
+	ret
+.align	64
+.Lopts:
+.asciz	"rc4(64x,int)"
+.align	64
+.size	RC4_options,.-RC4_options
+___
+}
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lbody(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lbody
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lin_prologue
+
+	mov	40(%rax),%r15
+	mov	48(%rax),%r14
+	mov	56(%rax),%r13
+	mov	64(%rax),%r12
+	mov	72(%rax),%rbp
+	mov	80(%rax),%rbx
+	lea	88(%rax),%rax
+
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R12
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_$func
+	.rva	.LSEH_end_$func
+	.rva	.LSEH_info_$func
+
+.section	.xdata
+.align	8
+.LSEH_info_$func:
+	.byte	9,0,0,0
+	.rva	se_handler
+___
+}
+
+sub reg_part {
+my ($reg,$conv)=@_;
+    if ($reg =~ /%r[0-9]+/)     { $reg .= $conv; }
+    elsif ($conv eq "b")        { $reg =~ s/%[er]([^x]+)x?/%$1l/;       }
+    elsif ($conv eq "w")        { $reg =~ s/%[er](.+)/%$1/;             }
+    elsif ($conv eq "d")        { $reg =~ s/%[er](.+)/%e$1/;            }
+    return $reg;
+}
+
+$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/pinsrw\s+\$0,/movd	/gm;
+
+$code =~ s/#md5#//gm	if ($md5);
+$code =~ s/#rc4#//gm	if ($rc4);
+
+print $code;
+
+close STDOUT;
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-parisc.pl b/jni/libopenssl/crypto/rc4/asm/rc4-parisc.pl
new file mode 100644
index 0000000..ad7e656
--- /dev/null
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-parisc.pl
@@ -0,0 +1,314 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# RC4 for PA-RISC.
+
+# June 2009.
+#
+# Performance is 33% better than gcc 3.2 generated code on PA-7100LC.
+# For reference, [4x] unrolled loop is >40% faster than folded one.
+# It's possible to unroll loop 8 times on PA-RISC 2.0, but improvement
+# is believed to be not sufficient to justify the effort...
+#
+# Special thanks to polarhome.com for providing HP-UX account.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+
+$flavour = shift;
+$output = shift;
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+	$LEVEL		="2.0W";
+	$SIZE_T		=8;
+	$FRAME_MARKER	=80;
+	$SAVED_RP	=16;
+	$PUSH		="std";
+	$PUSHMA		="std,ma";
+	$POP		="ldd";
+	$POPMB		="ldd,mb";
+} else {
+	$LEVEL		="1.0";
+	$SIZE_T		=4;
+	$FRAME_MARKER	=48;
+	$SAVED_RP	=20;
+	$PUSH		="stw";
+	$PUSHMA		="stwm";
+	$POP		="ldw";
+	$POPMB		="ldwm";
+}
+
+$FRAME=4*$SIZE_T+$FRAME_MARKER;	# 4 saved regs + frame marker
+				#                [+ argument transfer]
+$SZ=1;				# defaults to RC4_CHAR
+if (open CONF,"<${dir}../../opensslconf.h") {
+    while(<CONF>) {
+	if (m/#\s*define\s+RC4_INT\s+(.*)/) {
+	    $SZ = ($1=~/char$/) ? 1 : 4;
+	    last;
+	}
+    }
+    close CONF;
+}
+
+if ($SZ==1) {	# RC4_CHAR
+    $LD="ldb";
+    $LDX="ldbx";
+    $MKX="addl";
+    $ST="stb";
+} else {	# RC4_INT (~5% faster than RC4_CHAR on PA-7100LC)
+    $LD="ldw";
+    $LDX="ldwx,s";
+    $MKX="sh2addl";
+    $ST="stw";
+}
+
+$key="%r26";
+$len="%r25";
+$inp="%r24";
+$out="%r23";
+
+@XX=("%r19","%r20");
+@TX=("%r21","%r22");
+$YY="%r28";
+$TY="%r29";
+
+$acc="%r1";
+$ix="%r2";
+$iy="%r3";
+$dat0="%r4";
+$dat1="%r5";
+$rem="%r6";
+$mask="%r31";
+
+sub unrolledloopbody {
+for ($i=0;$i<4;$i++) {
+$code.=<<___;
+	ldo	1($XX[0]),$XX[1]
+	`sprintf("$LDX	%$TY(%$key),%$dat1") if ($i>0)`	
+	and	$mask,$XX[1],$XX[1]
+	$LDX	$YY($key),$TY
+	$MKX	$YY,$key,$ix
+	$LDX	$XX[1]($key),$TX[1]
+	$MKX	$XX[0],$key,$iy
+	$ST	$TX[0],0($ix)
+	comclr,<> $XX[1],$YY,%r0	; conditional
+	copy	$TX[0],$TX[1]		; move
+	`sprintf("%sdep	%$dat1,%d,8,%$acc",$i==1?"z":"",8*($i-1)+7) if ($i>0)`
+	$ST	$TY,0($iy)
+	addl	$TX[0],$TY,$TY
+	addl	$TX[1],$YY,$YY
+	and	$mask,$TY,$TY
+	and	$mask,$YY,$YY
+___
+push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
+} }
+
+sub foldedloop {
+my ($label,$count)=@_;
+$code.=<<___;
+$label
+	$MKX	$YY,$key,$iy
+	$LDX	$YY($key),$TY
+	$MKX	$XX[0],$key,$ix
+	$ST	$TX[0],0($iy)
+	ldo	1($XX[0]),$XX[0]
+	$ST	$TY,0($ix)
+	addl	$TX[0],$TY,$TY
+	ldbx	$inp($out),$dat1
+	and	$mask,$TY,$TY
+	and	$mask,$XX[0],$XX[0]
+	$LDX	$TY($key),$acc
+	$LDX	$XX[0]($key),$TX[0]
+	ldo	1($out),$out
+	xor	$dat1,$acc,$acc
+	addl	$TX[0],$YY,$YY
+	stb	$acc,-1($out)
+	addib,<> -1,$count,$label	; $count is always small
+	and	$mask,$YY,$YY
+___
+}
+
+$code=<<___;
+	.LEVEL	$LEVEL
+	.SPACE	\$TEXT\$
+	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+	.EXPORT	RC4,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
+RC4
+	.PROC
+	.CALLINFO	FRAME=`$FRAME-4*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=6
+	.ENTRY
+	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
+	$PUSHMA	%r3,$FRAME(%sp)
+	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
+	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
+	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
+
+	cmpib,*= 0,$len,L\$abort
+	sub	$inp,$out,$inp		; distance between $inp and $out
+
+	$LD	`0*$SZ`($key),$XX[0]
+	$LD	`1*$SZ`($key),$YY
+	ldo	`2*$SZ`($key),$key
+
+	ldi	0xff,$mask
+	ldi	3,$dat0		
+
+	ldo	1($XX[0]),$XX[0]	; warm up loop
+	and	$mask,$XX[0],$XX[0]
+	$LDX	$XX[0]($key),$TX[0]
+	addl	$TX[0],$YY,$YY
+	cmpib,*>>= 6,$len,L\$oop1	; is $len large enough to bother?
+	and	$mask,$YY,$YY
+
+	and,<>	$out,$dat0,$rem		; is $out aligned?
+	b	L\$alignedout
+	subi	4,$rem,$rem
+	sub	$len,$rem,$len
+___
+&foldedloop("L\$alignout",$rem);	# process till $out is aligned
+
+$code.=<<___;
+L\$alignedout				; $len is at least 4 here
+	and,<>	$inp,$dat0,$acc		; is $inp aligned?
+	b	L\$oop4
+	sub	$inp,$acc,$rem		; align $inp
+
+	sh3addl	$acc,%r0,$acc
+	subi	32,$acc,$acc
+	mtctl	$acc,%cr11		; load %sar with vshd align factor
+	ldwx	$rem($out),$dat0
+	ldo	4($rem),$rem
+L\$oop4misalignedinp
+___
+&unrolledloopbody();
+$code.=<<___;
+	$LDX	$TY($key),$ix
+	ldwx	$rem($out),$dat1
+	ldo	-4($len),$len
+	or	$ix,$acc,$acc		; last piece, no need to dep
+	vshd	$dat0,$dat1,$iy		; align data
+	copy	$dat1,$dat0
+	xor	$iy,$acc,$acc
+	stw	$acc,0($out)
+	cmpib,*<< 3,$len,L\$oop4misalignedinp
+	ldo	4($out),$out
+	cmpib,*= 0,$len,L\$done
+	nop
+	b	L\$oop1
+	nop
+
+	.ALIGN	8
+L\$oop4
+___
+&unrolledloopbody();
+$code.=<<___;
+	$LDX	$TY($key),$ix
+	ldwx	$inp($out),$dat0
+	ldo	-4($len),$len
+	or	$ix,$acc,$acc		; last piece, no need to dep
+	xor	$dat0,$acc,$acc
+	stw	$acc,0($out)
+	cmpib,*<< 3,$len,L\$oop4
+	ldo	4($out),$out
+	cmpib,*= 0,$len,L\$done
+	nop
+___
+&foldedloop("L\$oop1",$len);
+$code.=<<___;
+L\$done
+	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2
+	ldo	-1($XX[0]),$XX[0]	; chill out loop
+	sub	$YY,$TX[0],$YY
+	and	$mask,$XX[0],$XX[0]
+	and	$mask,$YY,$YY
+	$ST	$XX[0],`-2*$SZ`($key)
+	$ST	$YY,`-1*$SZ`($key)
+	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
+	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
+	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
+L\$abort
+	bv	(%r2)
+	.EXIT
+	$POPMB	-$FRAME(%sp),%r3
+	.PROCEND
+___
+
+$code.=<<___;
+
+	.EXPORT	private_RC4_set_key,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+	.ALIGN	8
+private_RC4_set_key
+	.PROC
+	.CALLINFO	NO_CALLS
+	.ENTRY
+	$ST	%r0,`0*$SZ`($key)
+	$ST	%r0,`1*$SZ`($key)
+	ldo	`2*$SZ`($key),$key
+	copy	%r0,@XX[0]
+L\$1st
+	$ST	@XX[0],0($key)
+	ldo	1(@XX[0]),@XX[0]
+	bb,>=	@XX[0],`31-8`,L\$1st	; @XX[0]<256
+	ldo	$SZ($key),$key
+
+	ldo	`-256*$SZ`($key),$key	; rewind $key
+	addl	$len,$inp,$inp		; $inp to point at the end
+	sub	%r0,$len,%r23		; inverse index
+	copy	%r0,@XX[0]
+	copy	%r0,@XX[1]
+	ldi	0xff,$mask
+
+L\$2nd
+	$LDX	@XX[0]($key),@TX[0]
+	ldbx	%r23($inp),@TX[1]
+	addi,nuv 1,%r23,%r23		; increment and conditional
+	sub	%r0,$len,%r23		; inverse index
+	addl	@TX[0],@XX[1],@XX[1]
+	addl	@TX[1],@XX[1],@XX[1]
+	and	$mask,@XX[1],@XX[1]
+	$MKX	@XX[0],$key,$TY
+	$LDX	@XX[1]($key),@TX[1]
+	$MKX	@XX[1],$key,$YY
+	ldo	1(@XX[0]),@XX[0]
+	$ST	@TX[0],0($YY)
+	bb,>=	@XX[0],`31-8`,L\$2nd	; @XX[0]<256
+	$ST	@TX[1],0($TY)
+
+	bv,n	(%r2)
+	.EXIT
+	nop
+	.PROCEND
+
+	.EXPORT	RC4_options,ENTRY
+	.ALIGN	8
+RC4_options
+	.PROC
+	.CALLINFO	NO_CALLS
+	.ENTRY
+	blr	%r0,%r28
+	ldi	3,%r1
+L\$pic
+	andcm	%r28,%r1,%r28
+	bv	(%r2)
+	.EXIT
+	ldo	L\$opts-L\$pic(%r28),%r28
+	.PROCEND
+	.ALIGN	8
+L\$opts
+	.STRINGZ "rc4(4x,`$SZ==1?"char":"int"`)"
+	.STRINGZ "RC4 for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/cmpib,\*/comib,/gm	if ($SIZE_T==4);
+$code =~ s/\bbv\b/bve/gm	if ($SIZE_T==8);
+
+print $code;
+close STDOUT;
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-s390x.pl b/jni/libopenssl/crypto/rc4/asm/rc4-s390x.pl
index 96681fa..7528ece 100644
--- a/jni/libopenssl/crypto/rc4/asm/rc4-s390x.pl
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-s390x.pl
@@ -13,6 +13,29 @@
 # "cluster" Address Generation Interlocks, so that one pipeline stall
 # resolves several dependencies.
 
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. On z990 it was measured to perform
+# 50% better than code generated by gcc 4.3.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+	$SIZE_T=4;
+	$g="";
+} else {
+	$SIZE_T=8;
+	$g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
 $rp="%r14";
 $sp="%r15";
 $code=<<___;
@@ -39,7 +62,12 @@
 .type	RC4,\@function
 .align	64
 RC4:
-	stmg	%r6,%r11,48($sp)
+	stm${g}	%r6,%r11,6*$SIZE_T($sp)
+___
+$code.=<<___ if ($flavour =~ /3[12]/);
+	llgfr	$len,$len
+___
+$code.=<<___;
 	llgc	$XX[0],0($key)
 	llgc	$YY,1($key)
 	la	$XX[0],1($XX[0])
@@ -90,7 +118,7 @@
 	xgr	$acc,$TX[1]
 	stg	$acc,0($out)
 	la	$out,8($out)
-	brct	$cnt,.Loop8
+	brctg	$cnt,.Loop8
 
 .Lshort:
 	lghi	$acc,7
@@ -122,7 +150,7 @@
 	ahi	$XX[0],-1
 	stc	$XX[0],0($key)
 	stc	$YY,1($key)
-	lmg	%r6,%r11,48($sp)
+	lm${g}	%r6,%r11,6*$SIZE_T($sp)
 	br	$rp
 .size	RC4,.-RC4
 .string	"RC4 for s390x, CRYPTOGAMS by <appro\@openssl.org>"
@@ -143,11 +171,11 @@
 $iinp="%r8";
 
 $code.=<<___;
-.globl	RC4_set_key
-.type	RC4_set_key,\@function
+.globl	private_RC4_set_key
+.type	private_RC4_set_key,\@function
 .align	64
-RC4_set_key:
-	stmg	%r6,%r8,48($sp)
+private_RC4_set_key:
+	stm${g}	%r6,%r8,6*$SIZE_T($sp)
 	lhi	$cnt,256
 	la	$idx,0(%r0)
 	sth	$idx,0($key)
@@ -180,9 +208,9 @@
 	la	$iinp,0(%r0)
 	j	.L2ndloop
 .Ldone:
-	lmg	%r6,%r8,48($sp)
+	lm${g}	%r6,%r8,6*$SIZE_T($sp)
 	br	$rp
-.size	RC4_set_key,.-RC4_set_key
+.size	private_RC4_set_key,.-private_RC4_set_key
 
 ___
 }
@@ -203,3 +231,4 @@
 ___
 
 print $code;
+close STDOUT;	# force flush
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.S b/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.S
new file mode 100644
index 0000000..af16158
--- /dev/null
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.S
@@ -0,0 +1,615 @@
+.text	
+
+
+.globl	RC4
+.type	RC4,@function
+.align	16
+RC4:	orq	%rsi,%rsi
+	jne	.Lentry
+	.byte	0xf3,0xc3
+.Lentry:
+	pushq	%rbx
+	pushq	%r12
+	pushq	%r13
+.Lprologue:
+	movq	%rsi,%r11
+	movq	%rdx,%r12
+	movq	%rcx,%r13
+	xorq	%r10,%r10
+	xorq	%rcx,%rcx
+
+	leaq	8(%rdi),%rdi
+	movb	-8(%rdi),%r10b
+	movb	-4(%rdi),%cl
+	cmpl	$-1,256(%rdi)
+	je	.LRC4_CHAR
+	movl	OPENSSL_ia32cap_P(%rip),%r8d
+	xorq	%rbx,%rbx
+	incb	%r10b
+	subq	%r10,%rbx
+	subq	%r12,%r13
+	movl	(%rdi,%r10,4),%eax
+	testq	$-16,%r11
+	jz	.Lloop1
+	btl	$30,%r8d
+	jc	.Lintel
+	andq	$7,%rbx
+	leaq	1(%r10),%rsi
+	jz	.Loop8
+	subq	%rbx,%r11
+.Loop8_warmup:
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	%edx,(%rdi,%r10,4)
+	addb	%dl,%al
+	incb	%r10b
+	movl	(%rdi,%rax,4),%edx
+	movl	(%rdi,%r10,4),%eax
+	xorb	(%r12),%dl
+	movb	%dl,(%r13,%r12,1)
+	leaq	1(%r12),%r12
+	decq	%rbx
+	jnz	.Loop8_warmup
+
+	leaq	1(%r10),%rsi
+	jmp	.Loop8
+.align	16
+.Loop8:
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	0(%rdi,%rsi,4),%ebx
+	rorq	$8,%r8
+	movl	%edx,0(%rdi,%r10,4)
+	addb	%al,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%bl,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	movl	4(%rdi,%rsi,4),%eax
+	rorq	$8,%r8
+	movl	%edx,4(%rdi,%r10,4)
+	addb	%bl,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	8(%rdi,%rsi,4),%ebx
+	rorq	$8,%r8
+	movl	%edx,8(%rdi,%r10,4)
+	addb	%al,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%bl,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	movl	12(%rdi,%rsi,4),%eax
+	rorq	$8,%r8
+	movl	%edx,12(%rdi,%r10,4)
+	addb	%bl,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	16(%rdi,%rsi,4),%ebx
+	rorq	$8,%r8
+	movl	%edx,16(%rdi,%r10,4)
+	addb	%al,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%bl,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	movl	20(%rdi,%rsi,4),%eax
+	rorq	$8,%r8
+	movl	%edx,20(%rdi,%r10,4)
+	addb	%bl,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	24(%rdi,%rsi,4),%ebx
+	rorq	$8,%r8
+	movl	%edx,24(%rdi,%r10,4)
+	addb	%al,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	$8,%sil
+	addb	%bl,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	movl	-4(%rdi,%rsi,4),%eax
+	rorq	$8,%r8
+	movl	%edx,28(%rdi,%r10,4)
+	addb	%bl,%dl
+	movb	(%rdi,%rdx,4),%r8b
+	addb	$8,%r10b
+	rorq	$8,%r8
+	subq	$8,%r11
+
+	xorq	(%r12),%r8
+	movq	%r8,(%r13,%r12,1)
+	leaq	8(%r12),%r12
+
+	testq	$-8,%r11
+	jnz	.Loop8
+	cmpq	$0,%r11
+	jne	.Lloop1
+	jmp	.Lexit
+
+.align	16
+.Lintel:
+	testq	$-32,%r11
+	jz	.Lloop1
+	andq	$15,%rbx
+	jz	.Loop16_is_hot
+	subq	%rbx,%r11
+.Loop16_warmup:
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	%edx,(%rdi,%r10,4)
+	addb	%dl,%al
+	incb	%r10b
+	movl	(%rdi,%rax,4),%edx
+	movl	(%rdi,%r10,4),%eax
+	xorb	(%r12),%dl
+	movb	%dl,(%r13,%r12,1)
+	leaq	1(%r12),%r12
+	decq	%rbx
+	jnz	.Loop16_warmup
+
+	movq	%rcx,%rbx
+	xorq	%rcx,%rcx
+	movb	%bl,%cl
+
+.Loop16_is_hot:
+	leaq	(%rdi,%r10,4),%rsi
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	pxor	%xmm0,%xmm0
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	4(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,0(%rsi)
+	addb	%bl,%cl
+	pinsrw	$0,(%rdi,%rax,4),%xmm0
+	jmp	.Loop16_enter
+.align	16
+.Loop16:
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	pxor	%xmm0,%xmm2
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm0
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	4(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,0(%rsi)
+	pxor	%xmm1,%xmm2
+	addb	%bl,%cl
+	pinsrw	$0,(%rdi,%rax,4),%xmm0
+	movdqu	%xmm2,(%r13,%r12,1)
+	leaq	16(%r12),%r12
+.Loop16_enter:
+	movl	(%rdi,%rcx,4),%edx
+	pxor	%xmm1,%xmm1
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	8(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,4(%rsi)
+	addb	%al,%cl
+	pinsrw	$0,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	12(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,8(%rsi)
+	addb	%bl,%cl
+	pinsrw	$1,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	16(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,12(%rsi)
+	addb	%al,%cl
+	pinsrw	$1,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	20(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,16(%rsi)
+	addb	%bl,%cl
+	pinsrw	$2,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	24(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,20(%rsi)
+	addb	%al,%cl
+	pinsrw	$2,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	28(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,24(%rsi)
+	addb	%bl,%cl
+	pinsrw	$3,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	32(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,28(%rsi)
+	addb	%al,%cl
+	pinsrw	$3,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	36(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,32(%rsi)
+	addb	%bl,%cl
+	pinsrw	$4,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	40(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,36(%rsi)
+	addb	%al,%cl
+	pinsrw	$4,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	44(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,40(%rsi)
+	addb	%bl,%cl
+	pinsrw	$5,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	48(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,44(%rsi)
+	addb	%al,%cl
+	pinsrw	$5,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	52(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,48(%rsi)
+	addb	%bl,%cl
+	pinsrw	$6,(%rdi,%rax,4),%xmm0
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movl	56(%rsi),%eax
+	movzbl	%bl,%ebx
+	movl	%edx,52(%rsi)
+	addb	%al,%cl
+	pinsrw	$6,(%rdi,%rbx,4),%xmm1
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	addb	%dl,%al
+	movl	60(%rsi),%ebx
+	movzbl	%al,%eax
+	movl	%edx,56(%rsi)
+	addb	%bl,%cl
+	pinsrw	$7,(%rdi,%rax,4),%xmm0
+	addb	$16,%r10b
+	movdqu	(%r12),%xmm2
+	movl	(%rdi,%rcx,4),%edx
+	movl	%ebx,(%rdi,%rcx,4)
+	addb	%dl,%bl
+	movzbl	%bl,%ebx
+	movl	%edx,60(%rsi)
+	leaq	(%rdi,%r10,4),%rsi
+	pinsrw	$7,(%rdi,%rbx,4),%xmm1
+	movl	(%rsi),%eax
+	movq	%rcx,%rbx
+	xorq	%rcx,%rcx
+	subq	$16,%r11
+	movb	%bl,%cl
+	testq	$-16,%r11
+	jnz	.Loop16
+
+	psllq	$8,%xmm1
+	pxor	%xmm0,%xmm2
+	pxor	%xmm1,%xmm2
+	movdqu	%xmm2,(%r13,%r12,1)
+	leaq	16(%r12),%r12
+
+	cmpq	$0,%r11
+	jne	.Lloop1
+	jmp	.Lexit
+
+.align	16
+.Lloop1:
+	addb	%al,%cl
+	movl	(%rdi,%rcx,4),%edx
+	movl	%eax,(%rdi,%rcx,4)
+	movl	%edx,(%rdi,%r10,4)
+	addb	%dl,%al
+	incb	%r10b
+	movl	(%rdi,%rax,4),%edx
+	movl	(%rdi,%r10,4),%eax
+	xorb	(%r12),%dl
+	movb	%dl,(%r13,%r12,1)
+	leaq	1(%r12),%r12
+	decq	%r11
+	jnz	.Lloop1
+	jmp	.Lexit
+
+.align	16
+.LRC4_CHAR:
+	addb	$1,%r10b
+	movzbl	(%rdi,%r10,1),%eax
+	testq	$-8,%r11
+	jz	.Lcloop1
+	jmp	.Lcloop8
+.align	16
+.Lcloop8:
+	movl	(%r12),%r8d
+	movl	4(%r12),%r9d
+	addb	%al,%cl
+	leaq	1(%r10),%rsi
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%sil,%esi
+	movzbl	(%rdi,%rsi,1),%ebx
+	movb	%al,(%rdi,%rcx,1)
+	cmpq	%rsi,%rcx
+	movb	%dl,(%rdi,%r10,1)
+	jne	.Lcmov0			
+	movq	%rax,%rbx
+.Lcmov0:
+	addb	%al,%dl
+	xorb	(%rdi,%rdx,1),%r8b
+	rorl	$8,%r8d
+	addb	%bl,%cl
+	leaq	1(%rsi),%r10
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%r10b,%r10d
+	movzbl	(%rdi,%r10,1),%eax
+	movb	%bl,(%rdi,%rcx,1)
+	cmpq	%r10,%rcx
+	movb	%dl,(%rdi,%rsi,1)
+	jne	.Lcmov1			
+	movq	%rbx,%rax
+.Lcmov1:
+	addb	%bl,%dl
+	xorb	(%rdi,%rdx,1),%r8b
+	rorl	$8,%r8d
+	addb	%al,%cl
+	leaq	1(%r10),%rsi
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%sil,%esi
+	movzbl	(%rdi,%rsi,1),%ebx
+	movb	%al,(%rdi,%rcx,1)
+	cmpq	%rsi,%rcx
+	movb	%dl,(%rdi,%r10,1)
+	jne	.Lcmov2			
+	movq	%rax,%rbx
+.Lcmov2:
+	addb	%al,%dl
+	xorb	(%rdi,%rdx,1),%r8b
+	rorl	$8,%r8d
+	addb	%bl,%cl
+	leaq	1(%rsi),%r10
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%r10b,%r10d
+	movzbl	(%rdi,%r10,1),%eax
+	movb	%bl,(%rdi,%rcx,1)
+	cmpq	%r10,%rcx
+	movb	%dl,(%rdi,%rsi,1)
+	jne	.Lcmov3			
+	movq	%rbx,%rax
+.Lcmov3:
+	addb	%bl,%dl
+	xorb	(%rdi,%rdx,1),%r8b
+	rorl	$8,%r8d
+	addb	%al,%cl
+	leaq	1(%r10),%rsi
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%sil,%esi
+	movzbl	(%rdi,%rsi,1),%ebx
+	movb	%al,(%rdi,%rcx,1)
+	cmpq	%rsi,%rcx
+	movb	%dl,(%rdi,%r10,1)
+	jne	.Lcmov4			
+	movq	%rax,%rbx
+.Lcmov4:
+	addb	%al,%dl
+	xorb	(%rdi,%rdx,1),%r9b
+	rorl	$8,%r9d
+	addb	%bl,%cl
+	leaq	1(%rsi),%r10
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%r10b,%r10d
+	movzbl	(%rdi,%r10,1),%eax
+	movb	%bl,(%rdi,%rcx,1)
+	cmpq	%r10,%rcx
+	movb	%dl,(%rdi,%rsi,1)
+	jne	.Lcmov5			
+	movq	%rbx,%rax
+.Lcmov5:
+	addb	%bl,%dl
+	xorb	(%rdi,%rdx,1),%r9b
+	rorl	$8,%r9d
+	addb	%al,%cl
+	leaq	1(%r10),%rsi
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%sil,%esi
+	movzbl	(%rdi,%rsi,1),%ebx
+	movb	%al,(%rdi,%rcx,1)
+	cmpq	%rsi,%rcx
+	movb	%dl,(%rdi,%r10,1)
+	jne	.Lcmov6			
+	movq	%rax,%rbx
+.Lcmov6:
+	addb	%al,%dl
+	xorb	(%rdi,%rdx,1),%r9b
+	rorl	$8,%r9d
+	addb	%bl,%cl
+	leaq	1(%rsi),%r10
+	movzbl	(%rdi,%rcx,1),%edx
+	movzbl	%r10b,%r10d
+	movzbl	(%rdi,%r10,1),%eax
+	movb	%bl,(%rdi,%rcx,1)
+	cmpq	%r10,%rcx
+	movb	%dl,(%rdi,%rsi,1)
+	jne	.Lcmov7			
+	movq	%rbx,%rax
+.Lcmov7:
+	addb	%bl,%dl
+	xorb	(%rdi,%rdx,1),%r9b
+	rorl	$8,%r9d
+	leaq	-8(%r11),%r11
+	movl	%r8d,(%r13)
+	leaq	8(%r12),%r12
+	movl	%r9d,4(%r13)
+	leaq	8(%r13),%r13
+
+	testq	$-8,%r11
+	jnz	.Lcloop8
+	cmpq	$0,%r11
+	jne	.Lcloop1
+	jmp	.Lexit
+.align	16
+.Lcloop1:
+	addb	%al,%cl
+	movzbl	%cl,%ecx
+	movzbl	(%rdi,%rcx,1),%edx
+	movb	%al,(%rdi,%rcx,1)
+	movb	%dl,(%rdi,%r10,1)
+	addb	%al,%dl
+	addb	$1,%r10b
+	movzbl	%dl,%edx
+	movzbl	%r10b,%r10d
+	movzbl	(%rdi,%rdx,1),%edx
+	movzbl	(%rdi,%r10,1),%eax
+	xorb	(%r12),%dl
+	leaq	1(%r12),%r12
+	movb	%dl,(%r13)
+	leaq	1(%r13),%r13
+	subq	$1,%r11
+	jnz	.Lcloop1
+	jmp	.Lexit
+
+.align	16
+.Lexit:
+	subb	$1,%r10b
+	movl	%r10d,-8(%rdi)
+	movl	%ecx,-4(%rdi)
+
+	movq	(%rsp),%r13
+	movq	8(%rsp),%r12
+	movq	16(%rsp),%rbx
+	addq	$24,%rsp
+.Lepilogue:
+	.byte	0xf3,0xc3
+.size	RC4,.-RC4
+.globl	private_RC4_set_key
+.type	private_RC4_set_key,@function
+.align	16
+private_RC4_set_key:
+	leaq	8(%rdi),%rdi
+	leaq	(%rdx,%rsi,1),%rdx
+	negq	%rsi
+	movq	%rsi,%rcx
+	xorl	%eax,%eax
+	xorq	%r9,%r9
+	xorq	%r10,%r10
+	xorq	%r11,%r11
+
+	movl	OPENSSL_ia32cap_P(%rip),%r8d
+	btl	$20,%r8d
+	jc	.Lc1stloop
+	jmp	.Lw1stloop
+
+.align	16
+.Lw1stloop:
+	movl	%eax,(%rdi,%rax,4)
+	addb	$1,%al
+	jnc	.Lw1stloop
+
+	xorq	%r9,%r9
+	xorq	%r8,%r8
+.align	16
+.Lw2ndloop:
+	movl	(%rdi,%r9,4),%r10d
+	addb	(%rdx,%rsi,1),%r8b
+	addb	%r10b,%r8b
+	addq	$1,%rsi
+	movl	(%rdi,%r8,4),%r11d
+	cmovzq	%rcx,%rsi
+	movl	%r10d,(%rdi,%r8,4)
+	movl	%r11d,(%rdi,%r9,4)
+	addb	$1,%r9b
+	jnc	.Lw2ndloop
+	jmp	.Lexit_key
+
+.align	16
+.Lc1stloop:
+	movb	%al,(%rdi,%rax,1)
+	addb	$1,%al
+	jnc	.Lc1stloop
+
+	xorq	%r9,%r9
+	xorq	%r8,%r8
+.align	16
+.Lc2ndloop:
+	movb	(%rdi,%r9,1),%r10b
+	addb	(%rdx,%rsi,1),%r8b
+	addb	%r10b,%r8b
+	addq	$1,%rsi
+	movb	(%rdi,%r8,1),%r11b
+	jnz	.Lcnowrap
+	movq	%rcx,%rsi
+.Lcnowrap:
+	movb	%r10b,(%rdi,%r8,1)
+	movb	%r11b,(%rdi,%r9,1)
+	addb	$1,%r9b
+	jnc	.Lc2ndloop
+	movl	$-1,256(%rdi)
+
+.align	16
+.Lexit_key:
+	xorl	%eax,%eax
+	movl	%eax,-8(%rdi)
+	movl	%eax,-4(%rdi)
+	.byte	0xf3,0xc3
+.size	private_RC4_set_key,.-private_RC4_set_key
+
+.globl	RC4_options
+.type	RC4_options,@function
+.align	16
+RC4_options:
+	leaq	.Lopts(%rip),%rax
+	movl	OPENSSL_ia32cap_P(%rip),%edx
+	btl	$20,%edx
+	jc	.L8xchar
+	btl	$30,%edx
+	jnc	.Ldone
+	addq	$25,%rax
+	.byte	0xf3,0xc3
+.L8xchar:
+	addq	$12,%rax
+.Ldone:
+	.byte	0xf3,0xc3
+.align	64
+.Lopts:
+.byte	114,99,52,40,56,120,44,105,110,116,41,0
+.byte	114,99,52,40,56,120,44,99,104,97,114,41,0
+.byte	114,99,52,40,49,54,120,44,105,110,116,41,0
+.byte	82,67,52,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align	64
+.size	RC4_options,.-RC4_options
diff --git a/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.pl b/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.pl
old mode 100755
new mode 100644
index 677be5f..20722d3
--- a/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.pl
+++ b/jni/libopenssl/crypto/rc4/asm/rc4-x86_64.pl
@@ -7,6 +7,8 @@
 # details see http://www.openssl.org/~appro/cryptogams/.
 # ====================================================================
 #
+# July 2004
+#
 # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
 # "hand-coded assembler"] doesn't stand for the whole improvement
 # coefficient. It turned out that eliminating RC4_CHAR from config
@@ -19,6 +21,8 @@
 # to operate on partial registers, it turned out to be the best bet.
 # At least for AMD... How IA32E would perform remains to be seen...
 
+# November 2004
+#
 # As was shown by Marc Bevand reordering of couple of load operations
 # results in even higher performance gain of 3.3x:-) At least on
 # Opteron... For reference, 1x in this case is RC4_CHAR C-code
@@ -26,6 +30,8 @@
 # Latter means that if you want to *estimate* what to expect from
 # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
 
+# November 2004
+#
 # Intel P4 EM64T core was found to run the AMD64 code really slow...
 # The only way to achieve comparable performance on P4 was to keep
 # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
@@ -33,10 +39,14 @@
 # on either AMD and Intel platforms, I implement both cases. See
 # rc4_skey.c for further details...
 
+# April 2005
+#
 # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing 
 # those with add/sub results in 50% performance improvement of folded
 # loop...
 
+# May 2005
+#
 # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
 # performance by >30% [unlike P4 32-bit case that is]. But this is
 # provided that loads are reordered even more aggressively! Both code
@@ -46,10 +56,12 @@
 # achieves respectful 432MBps on 2.8GHz processor now. For reference.
 # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
 # RC4_INT code-path. While if executed on Opteron, it's only 25%
-# slower than the RC4_INT one [meaning that if CPU µ-arch detection
+# slower than the RC4_INT one [meaning that if CPU µ-arch detection
 # is not implemented, then this final RC4_CHAR code-path should be
 # preferred, as it provides better *all-round* performance].
 
+# March 2007
+#
 # Intel Core2 was observed to perform poorly on both code paths:-( It
 # apparently suffers from some kind of partial register stall, which
 # occurs in 64-bit mode only [as virtually identical 32-bit loop was
@@ -58,6 +70,37 @@
 # fit for Core2 and therefore the code was modified to skip cloop8 on
 # this CPU.
 
+# May 2010
+#
+# Intel Westmere was observed to perform suboptimally. Adding yet
+# another movzb to cloop1 improved performance by almost 50%! Core2
+# performance is improved too, but nominally...
+
+# May 2011
+#
+# The only code path that was not modified is P4-specific one. Non-P4
+# Intel code path optimization is heavily based on submission by Maxim
+# Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
+# some of the ideas even in attempt to optmize the original RC4_INT
+# code path... Current performance in cycles per processed byte (less
+# is better) and improvement coefficients relative to previous
+# version of this module are:
+#
+# Opteron	5.3/+0%(*)
+# P4		6.5
+# Core2		6.2/+15%(**)
+# Westmere	4.2/+60%
+# Sandy Bridge	4.2/+120%
+# Atom		9.3/+80%
+#
+# (*)	But corresponding loop has less instructions, which should have
+#	positive effect on upcoming Bulldozer, which has one less ALU.
+#	For reference, Intel code runs at 6.8 cpb rate on Opteron.
+# (**)	Note that Core2 result is ~15% lower than corresponding result
+#	for 32-bit code, meaning that it's possible to improve it,
+#	but more than likely at the cost of the others (see rc4-586.pl
+#	to get the idea)...
+
 $flavour = shift;
 $output  = shift;
 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
@@ -69,20 +112,18 @@
 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
 die "can't locate x86_64-xlate.pl";
 
-open STDOUT,"| $^X $xlate $flavour $output";
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
 
 $dat="%rdi";	    # arg1
 $len="%rsi";	    # arg2
 $inp="%rdx";	    # arg3
 $out="%rcx";	    # arg4
 
-@XX=("%r8","%r10");
-@TX=("%r9","%r11");
-$YY="%r12";
-$TY="%r13";
-
+{
 $code=<<___;
 .text
+.extern	OPENSSL_ia32cap_P
 
 .globl	RC4
 .type	RC4,\@function,4
@@ -95,48 +136,173 @@
 	push	%r12
 	push	%r13
 .Lprologue:
+	mov	$len,%r11
+	mov	$inp,%r12
+	mov	$out,%r13
+___
+my $len="%r11";		# reassign input arguments
+my $inp="%r12";
+my $out="%r13";
 
-	add	\$8,$dat
-	movl	-8($dat),$XX[0]#d
-	movl	-4($dat),$YY#d
+my @XX=("%r10","%rsi");
+my @TX=("%rax","%rbx");
+my $YY="%rcx";
+my $TY="%rdx";
+
+$code.=<<___;
+	xor	$XX[0],$XX[0]
+	xor	$YY,$YY
+
+	lea	8($dat),$dat
+	mov	-8($dat),$XX[0]#b
+	mov	-4($dat),$YY#b
 	cmpl	\$-1,256($dat)
 	je	.LRC4_CHAR
+	mov	OPENSSL_ia32cap_P(%rip),%r8d
+	xor	$TX[1],$TX[1]
 	inc	$XX[0]#b
+	sub	$XX[0],$TX[1]
+	sub	$inp,$out
 	movl	($dat,$XX[0],4),$TX[0]#d
-	test	\$-8,$len
+	test	\$-16,$len
 	jz	.Lloop1
-	jmp	.Lloop8
+	bt	\$30,%r8d	# Intel CPU?
+	jc	.Lintel
+	and	\$7,$TX[1]
+	lea	1($XX[0]),$XX[1]
+	jz	.Loop8
+	sub	$TX[1],$len
+.Loop8_warmup:
+	add	$TX[0]#b,$YY#b
+	movl	($dat,$YY,4),$TY#d
+	movl	$TX[0]#d,($dat,$YY,4)
+	movl	$TY#d,($dat,$XX[0],4)
+	add	$TY#b,$TX[0]#b
+	inc	$XX[0]#b
+	movl	($dat,$TX[0],4),$TY#d
+	movl	($dat,$XX[0],4),$TX[0]#d
+	xorb	($inp),$TY#b
+	movb	$TY#b,($out,$inp)
+	lea	1($inp),$inp
+	dec	$TX[1]
+	jnz	.Loop8_warmup
+
+	lea	1($XX[0]),$XX[1]
+	jmp	.Loop8
 .align	16
-.Lloop8:
+.Loop8:
 ___
 for ($i=0;$i<8;$i++) {
+$code.=<<___ if ($i==7);
+	add	\$8,$XX[1]#b
+___
 $code.=<<___;
 	add	$TX[0]#b,$YY#b
-	mov	$XX[0],$XX[1]
 	movl	($dat,$YY,4),$TY#d
-	ror	\$8,%rax			# ror is redundant when $i=0
-	inc	$XX[1]#b
-	movl	($dat,$XX[1],4),$TX[1]#d
-	cmp	$XX[1],$YY
 	movl	$TX[0]#d,($dat,$YY,4)
-	cmove	$TX[0],$TX[1]
-	movl	$TY#d,($dat,$XX[0],4)
+	movl	`4*($i==7?-1:$i)`($dat,$XX[1],4),$TX[1]#d
+	ror	\$8,%r8				# ror is redundant when $i=0
+	movl	$TY#d,4*$i($dat,$XX[0],4)
 	add	$TX[0]#b,$TY#b
-	movb	($dat,$TY,4),%al
+	movb	($dat,$TY,4),%r8b
 ___
-push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
+push(@TX,shift(@TX)); #push(@XX,shift(@XX));	# "rotate" registers
 }
 $code.=<<___;
-	ror	\$8,%rax
+	add	\$8,$XX[0]#b
+	ror	\$8,%r8
 	sub	\$8,$len
 
-	xor	($inp),%rax
-	add	\$8,$inp
-	mov	%rax,($out)
-	add	\$8,$out
+	xor	($inp),%r8
+	mov	%r8,($out,$inp)
+	lea	8($inp),$inp
 
 	test	\$-8,$len
-	jnz	.Lloop8
+	jnz	.Loop8
+	cmp	\$0,$len
+	jne	.Lloop1
+	jmp	.Lexit
+
+.align	16
+.Lintel:
+	test	\$-32,$len
+	jz	.Lloop1
+	and	\$15,$TX[1]
+	jz	.Loop16_is_hot
+	sub	$TX[1],$len
+.Loop16_warmup:
+	add	$TX[0]#b,$YY#b
+	movl	($dat,$YY,4),$TY#d
+	movl	$TX[0]#d,($dat,$YY,4)
+	movl	$TY#d,($dat,$XX[0],4)
+	add	$TY#b,$TX[0]#b
+	inc	$XX[0]#b
+	movl	($dat,$TX[0],4),$TY#d
+	movl	($dat,$XX[0],4),$TX[0]#d
+	xorb	($inp),$TY#b
+	movb	$TY#b,($out,$inp)
+	lea	1($inp),$inp
+	dec	$TX[1]
+	jnz	.Loop16_warmup
+
+	mov	$YY,$TX[1]
+	xor	$YY,$YY
+	mov	$TX[1]#b,$YY#b
+
+.Loop16_is_hot:
+	lea	($dat,$XX[0],4),$XX[1]
+___
+sub RC4_loop {
+  my $i=shift;
+  my $j=$i<0?0:$i;
+  my $xmm="%xmm".($j&1);
+
+    $code.="	add	\$16,$XX[0]#b\n"		if ($i==15);
+    $code.="	movdqu	($inp),%xmm2\n"			if ($i==15);
+    $code.="	add	$TX[0]#b,$YY#b\n"		if ($i<=0);
+    $code.="	movl	($dat,$YY,4),$TY#d\n";
+    $code.="	pxor	%xmm0,%xmm2\n"			if ($i==0);
+    $code.="	psllq	\$8,%xmm1\n"			if ($i==0);
+    $code.="	pxor	$xmm,$xmm\n"			if ($i<=1);
+    $code.="	movl	$TX[0]#d,($dat,$YY,4)\n";
+    $code.="	add	$TY#b,$TX[0]#b\n";
+    $code.="	movl	`4*($j+1)`($XX[1]),$TX[1]#d\n"	if ($i<15);
+    $code.="	movz	$TX[0]#b,$TX[0]#d\n";
+    $code.="	movl	$TY#d,4*$j($XX[1])\n";
+    $code.="	pxor	%xmm1,%xmm2\n"			if ($i==0);
+    $code.="	lea	($dat,$XX[0],4),$XX[1]\n"	if ($i==15);
+    $code.="	add	$TX[1]#b,$YY#b\n"		if ($i<15);
+    $code.="	pinsrw	\$`($j>>1)&7`,($dat,$TX[0],4),$xmm\n";
+    $code.="	movdqu	%xmm2,($out,$inp)\n"		if ($i==0);
+    $code.="	lea	16($inp),$inp\n"		if ($i==0);
+    $code.="	movl	($XX[1]),$TX[1]#d\n"		if ($i==15);
+}
+	RC4_loop(-1);
+$code.=<<___;
+	jmp	.Loop16_enter
+.align	16
+.Loop16:
+___
+
+for ($i=0;$i<16;$i++) {
+    $code.=".Loop16_enter:\n"		if ($i==1);
+	RC4_loop($i);
+	push(@TX,shift(@TX)); 		# "rotate" registers
+}
+$code.=<<___;
+	mov	$YY,$TX[1]
+	xor	$YY,$YY			# keyword to partial register
+	sub	\$16,$len
+	mov	$TX[1]#b,$YY#b
+	test	\$-16,$len
+	jnz	.Loop16
+
+	psllq	\$8,%xmm1
+	pxor	%xmm0,%xmm2
+	pxor	%xmm1,%xmm2
+	movdqu	%xmm2,($out,$inp)
+	lea	16($inp),$inp
+
 	cmp	\$0,$len
 	jne	.Lloop1
 	jmp	.Lexit
@@ -152,9 +318,8 @@
 	movl	($dat,$TX[0],4),$TY#d
 	movl	($dat,$XX[0],4),$TX[0]#d
 	xorb	($inp),$TY#b
-	inc	$inp
-	movb	$TY#b,($out)
-	inc	$out
+	movb	$TY#b,($out,$inp)
+	lea	1($inp),$inp
 	dec	$len
 	jnz	.Lloop1
 	jmp	.Lexit
@@ -165,13 +330,11 @@
 	movzb	($dat,$XX[0]),$TX[0]#d
 	test	\$-8,$len
 	jz	.Lcloop1
-	cmpl	\$0,260($dat)
-	jnz	.Lcloop1
 	jmp	.Lcloop8
 .align	16
 .Lcloop8:
-	mov	($inp),%eax
-	mov	4($inp),%ebx
+	mov	($inp),%r8d
+	mov	4($inp),%r9d
 ___
 # unroll 2x4-wise, because 64-bit rotates kill Intel P4...
 for ($i=0;$i<4;$i++) {
@@ -188,8 +351,8 @@
 	mov	$TX[0],$TX[1]
 .Lcmov$i:
 	add	$TX[0]#b,$TY#b
-	xor	($dat,$TY),%al
-	ror	\$8,%eax
+	xor	($dat,$TY),%r8b
+	ror	\$8,%r8d
 ___
 push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
 }
@@ -207,16 +370,16 @@
 	mov	$TX[0],$TX[1]
 .Lcmov$i:
 	add	$TX[0]#b,$TY#b
-	xor	($dat,$TY),%bl
-	ror	\$8,%ebx
+	xor	($dat,$TY),%r9b
+	ror	\$8,%r9d
 ___
 push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
 }
 $code.=<<___;
 	lea	-8($len),$len
-	mov	%eax,($out)
+	mov	%r8d,($out)
 	lea	8($inp),$inp
-	mov	%ebx,4($out)
+	mov	%r9d,4($out)
 	lea	8($out),$out
 
 	test	\$-8,$len
@@ -229,6 +392,7 @@
 .align	16
 .Lcloop1:
 	add	$TX[0]#b,$YY#b
+	movzb	$YY#b,$YY#d
 	movzb	($dat,$YY),$TY#d
 	movb	$TX[0]#b,($dat,$YY)
 	movb	$TY#b,($dat,$XX[0])
@@ -260,16 +424,16 @@
 	ret
 .size	RC4,.-RC4
 ___
+}
 
 $idx="%r8";
 $ido="%r9";
 
 $code.=<<___;
-.extern	OPENSSL_ia32cap_P
-.globl	RC4_set_key
-.type	RC4_set_key,\@function,3
+.globl	private_RC4_set_key
+.type	private_RC4_set_key,\@function,3
 .align	16
-RC4_set_key:
+private_RC4_set_key:
 	lea	8($dat),$dat
 	lea	($inp,$len),$inp
 	neg	$len
@@ -280,12 +444,9 @@
 	xor	%r11,%r11
 
 	mov	OPENSSL_ia32cap_P(%rip),$idx#d
-	bt	\$20,$idx#d
-	jnc	.Lw1stloop
-	bt	\$30,$idx#d
-	setc	$ido#b
-	mov	$ido#d,260($dat)
-	jmp	.Lc1stloop
+	bt	\$20,$idx#d	# RC4_CHAR?
+	jc	.Lc1stloop
+	jmp	.Lw1stloop
 
 .align	16
 .Lw1stloop:
@@ -339,7 +500,7 @@
 	mov	%eax,-8($dat)
 	mov	%eax,-4($dat)
 	ret
-.size	RC4_set_key,.-RC4_set_key
+.size	private_RC4_set_key,.-private_RC4_set_key
 
 .globl	RC4_options
 .type	RC4_options,\@abi-omnipotent
@@ -348,18 +509,20 @@
 	lea	.Lopts(%rip),%rax
 	mov	OPENSSL_ia32cap_P(%rip),%edx
 	bt	\$20,%edx
-	jnc	.Ldone
-	add	\$12,%rax
+	jc	.L8xchar
 	bt	\$30,%edx
 	jnc	.Ldone
-	add	\$13,%rax
+	add	\$25,%rax
+	ret
+.L8xchar:
+	add	\$12,%rax
 .Ldone:
 	ret
 .align	64
 .Lopts:
 .asciz	"rc4(8x,int)"
 .asciz	"rc4(8x,char)"
-.asciz	"rc4(1x,char)"
+.asciz	"rc4(16x,int)"
 .asciz	"RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
 .align	64
 .size	RC4_options,.-RC4_options
@@ -482,22 +645,32 @@
 	.rva	.LSEH_end_RC4
 	.rva	.LSEH_info_RC4
 
-	.rva	.LSEH_begin_RC4_set_key
-	.rva	.LSEH_end_RC4_set_key
-	.rva	.LSEH_info_RC4_set_key
+	.rva	.LSEH_begin_private_RC4_set_key
+	.rva	.LSEH_end_private_RC4_set_key
+	.rva	.LSEH_info_private_RC4_set_key
 
 .section	.xdata
 .align	8
 .LSEH_info_RC4:
 	.byte	9,0,0,0
 	.rva	stream_se_handler
-.LSEH_info_RC4_set_key:
+.LSEH_info_private_RC4_set_key:
 	.byte	9,0,0,0
 	.rva	key_se_handler
 ___
 }
 
-$code =~ s/#([bwd])/$1/gm;
+sub reg_part {
+my ($reg,$conv)=@_;
+    if ($reg =~ /%r[0-9]+/)	{ $reg .= $conv; }
+    elsif ($conv eq "b")	{ $reg =~ s/%[er]([^x]+)x?/%$1l/;	}
+    elsif ($conv eq "w")	{ $reg =~ s/%[er](.+)/%$1/;		}
+    elsif ($conv eq "d")	{ $reg =~ s/%[er](.+)/%e$1/;		}
+    return $reg;
+}
+
+$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
 
 print $code;