#14371: Add opensll to main repository
diff --git a/jni/openssl/crypto/aes/README b/jni/openssl/crypto/aes/README
new file mode 100644
index 0000000..0f9620a
--- /dev/null
+++ b/jni/openssl/crypto/aes/README
@@ -0,0 +1,3 @@
+This is an OpenSSL-compatible version of AES (also called Rijndael).
+aes_core.c is basically the same as rijndael-alg-fst.c but with an
+API that looks like the rest of the OpenSSL symmetric cipher suite.
diff --git a/jni/openssl/crypto/aes/aes.h b/jni/openssl/crypto/aes/aes.h
new file mode 100644
index 0000000..031abf0
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes.h
@@ -0,0 +1,147 @@
+/* crypto/aes/aes.h -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#ifndef HEADER_AES_H
+#define HEADER_AES_H
+
+#include <openssl/opensslconf.h>
+
+#ifdef OPENSSL_NO_AES
+#error AES is disabled.
+#endif
+
+#include <stddef.h>
+
+#define AES_ENCRYPT	1
+#define AES_DECRYPT	0
+
+/* Because array size can't be a const in C, the following two are macros.
+   Both sizes are in bytes. */
+#define AES_MAXNR 14
+#define AES_BLOCK_SIZE 16
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+/* This should be a hidden type, but EVP requires that the size be known */
+struct aes_key_st {
+#ifdef AES_LONG
+    unsigned long rd_key[4 *(AES_MAXNR + 1)];
+#else
+    unsigned int rd_key[4 *(AES_MAXNR + 1)];
+#endif
+    int rounds;
+};
+typedef struct aes_key_st AES_KEY;
+
+const char *AES_options(void);
+
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+	AES_KEY *key);
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+	AES_KEY *key);
+
+int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+	AES_KEY *key);
+int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+	AES_KEY *key);
+
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+	const AES_KEY *key);
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+	const AES_KEY *key);
+
+void AES_ecb_encrypt(const unsigned char *in, unsigned char *out,
+	const AES_KEY *key, const int enc);
+void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, const int enc);
+void AES_cfb128_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num, const int enc);
+void AES_cfb1_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num, const int enc);
+void AES_cfb8_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num, const int enc);
+void AES_ofb128_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num);
+void AES_ctr128_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char ivec[AES_BLOCK_SIZE],
+	unsigned char ecount_buf[AES_BLOCK_SIZE],
+	unsigned int *num);
+/* NB: the IV is _two_ blocks long */
+void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
+		     size_t length, const AES_KEY *key,
+		     unsigned char *ivec, const int enc);
+/* NB: the IV is _four_ blocks long */
+void AES_bi_ige_encrypt(const unsigned char *in, unsigned char *out,
+			size_t length, const AES_KEY *key,
+			const AES_KEY *key2, const unsigned char *ivec,
+			const int enc);
+
+int AES_wrap_key(AES_KEY *key, const unsigned char *iv,
+		unsigned char *out,
+		const unsigned char *in, unsigned int inlen);
+int AES_unwrap_key(AES_KEY *key, const unsigned char *iv,
+		unsigned char *out,
+		const unsigned char *in, unsigned int inlen);
+
+
+#ifdef  __cplusplus
+}
+#endif
+
+#endif /* !HEADER_AES_H */
diff --git a/jni/openssl/crypto/aes/aes_cbc.c b/jni/openssl/crypto/aes/aes_cbc.c
new file mode 100644
index 0000000..227f756
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_cbc.c
@@ -0,0 +1,63 @@
+/* crypto/aes/aes_cbc.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+		     size_t len, const AES_KEY *key,
+		     unsigned char *ivec, const int enc) {
+
+	if (enc)
+		CRYPTO_cbc128_encrypt(in,out,len,key,ivec,(block128_f)AES_encrypt);
+	else
+		CRYPTO_cbc128_decrypt(in,out,len,key,ivec,(block128_f)AES_decrypt);
+}
diff --git a/jni/openssl/crypto/aes/aes_cfb.c b/jni/openssl/crypto/aes/aes_cfb.c
new file mode 100644
index 0000000..0c6d058
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_cfb.c
@@ -0,0 +1,81 @@
+/* crypto/aes/aes_cfb.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 2002-2006 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+/* The input and output encrypted as though 128bit cfb mode is being
+ * used.  The extra state information to record how much of the
+ * 128bit block we have used is contained in *num;
+ */
+
+void AES_cfb128_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num, const int enc) {
+
+	CRYPTO_cfb128_encrypt(in,out,length,key,ivec,num,enc,(block128_f)AES_encrypt);
+}
+
+/* N.B. This expects the input to be packed, MS bit first */
+void AES_cfb1_encrypt(const unsigned char *in, unsigned char *out,
+		      size_t length, const AES_KEY *key,
+		      unsigned char *ivec, int *num, const int enc)
+    {
+    CRYPTO_cfb128_1_encrypt(in,out,length,key,ivec,num,enc,(block128_f)AES_encrypt);
+    }
+
+void AES_cfb8_encrypt(const unsigned char *in, unsigned char *out,
+		      size_t length, const AES_KEY *key,
+		      unsigned char *ivec, int *num, const int enc)
+    {
+    CRYPTO_cfb128_8_encrypt(in,out,length,key,ivec,num,enc,(block128_f)AES_encrypt);
+    }
+
diff --git a/jni/openssl/crypto/aes/aes_core.c b/jni/openssl/crypto/aes/aes_core.c
new file mode 100644
index 0000000..8f5210a
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_core.c
@@ -0,0 +1,1358 @@
+/* crypto/aes/aes_core.c -*- mode:C; c-file-style: "eay" -*- */
+/**
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+ * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+ * @author Paulo Barreto <paulo.barreto@terra.com.br>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Note: rewritten a little bit to provide error control and an OpenSSL-
+   compatible API */
+
+#ifndef AES_DEBUG
+# ifndef NDEBUG
+#  define NDEBUG
+# endif
+#endif
+#include <assert.h>
+
+#include <stdlib.h>
+#include <openssl/aes.h>
+#include "aes_locl.h"
+
+#ifndef AES_ASM
+/*
+Te0[x] = S [x].[02, 01, 01, 03];
+Te1[x] = S [x].[03, 02, 01, 01];
+Te2[x] = S [x].[01, 03, 02, 01];
+Te3[x] = S [x].[01, 01, 03, 02];
+
+Td0[x] = Si[x].[0e, 09, 0d, 0b];
+Td1[x] = Si[x].[0b, 0e, 09, 0d];
+Td2[x] = Si[x].[0d, 0b, 0e, 09];
+Td3[x] = Si[x].[09, 0d, 0b, 0e];
+Td4[x] = Si[x].[01];
+*/
+
+static const u32 Te0[256] = {
+    0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+    0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+    0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+    0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+    0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+    0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+    0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+    0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+    0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+    0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+    0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+    0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+    0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+    0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+    0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+    0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+    0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+    0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+    0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+    0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+    0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+    0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+    0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+    0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+    0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+    0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+    0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+    0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+    0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+    0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+    0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+    0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+    0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+    0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+    0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+    0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+    0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+    0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+    0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+    0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+    0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+    0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+    0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+    0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+    0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+    0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+    0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+    0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+    0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+    0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+    0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+    0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+    0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+    0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+    0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+    0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+    0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+    0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+    0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+    0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+    0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+    0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+    0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+    0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+static const u32 Te1[256] = {
+    0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
+    0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
+    0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
+    0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
+    0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+    0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
+    0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
+    0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
+    0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
+    0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+    0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
+    0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
+    0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
+    0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
+    0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+    0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
+    0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
+    0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
+    0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
+    0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+    0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
+    0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
+    0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
+    0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
+    0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+    0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
+    0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
+    0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
+    0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
+    0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+    0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
+    0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
+    0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
+    0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
+    0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+    0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
+    0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
+    0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
+    0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
+    0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+    0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
+    0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
+    0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
+    0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
+    0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+    0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
+    0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
+    0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
+    0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
+    0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+    0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
+    0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
+    0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
+    0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
+    0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+    0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
+    0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
+    0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
+    0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
+    0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+    0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
+    0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
+    0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
+    0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
+};
+static const u32 Te2[256] = {
+    0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
+    0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
+    0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
+    0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
+    0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+    0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
+    0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
+    0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
+    0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
+    0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+    0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
+    0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
+    0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
+    0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
+    0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+    0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
+    0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
+    0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
+    0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
+    0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+    0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
+    0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
+    0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
+    0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
+    0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+    0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
+    0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
+    0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
+    0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
+    0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+    0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
+    0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
+    0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
+    0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
+    0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+    0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
+    0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
+    0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
+    0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
+    0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+    0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
+    0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
+    0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
+    0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
+    0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+    0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
+    0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
+    0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
+    0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
+    0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+    0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
+    0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
+    0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
+    0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
+    0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+    0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
+    0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
+    0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
+    0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
+    0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+    0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
+    0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
+    0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
+    0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
+};
+static const u32 Te3[256] = {
+    0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
+    0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
+    0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
+    0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
+    0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+    0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
+    0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
+    0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
+    0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
+    0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+    0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
+    0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
+    0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
+    0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
+    0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+    0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
+    0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
+    0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
+    0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
+    0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+    0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
+    0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
+    0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
+    0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
+    0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+    0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
+    0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
+    0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
+    0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
+    0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+    0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
+    0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
+    0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
+    0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
+    0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+    0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
+    0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
+    0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
+    0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
+    0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+    0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
+    0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
+    0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
+    0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
+    0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+    0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
+    0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
+    0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
+    0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
+    0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+    0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
+    0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
+    0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
+    0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
+    0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+    0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
+    0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
+    0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
+    0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
+    0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+    0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
+    0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
+    0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
+    0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
+};
+
+static const u32 Td0[256] = {
+    0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+    0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+    0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+    0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+    0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+    0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+    0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+    0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+    0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+    0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+    0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+    0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+    0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+    0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+    0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+    0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+    0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+    0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+    0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+    0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+    0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+    0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+    0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+    0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+    0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+    0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+    0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+    0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+    0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+    0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+    0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+    0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+    0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+    0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+    0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+    0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+    0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+    0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+    0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+    0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+    0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+    0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+    0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+    0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+    0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+    0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+    0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+    0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+    0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+    0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+    0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+    0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+    0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+    0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+    0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+    0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+    0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+    0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+    0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+    0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+    0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+    0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+    0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+    0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+static const u32 Td1[256] = {
+    0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
+    0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
+    0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
+    0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
+    0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+    0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
+    0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
+    0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
+    0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
+    0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+    0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
+    0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
+    0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
+    0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
+    0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+    0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
+    0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
+    0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
+    0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
+    0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+    0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
+    0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
+    0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
+    0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
+    0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+    0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
+    0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
+    0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
+    0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
+    0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+    0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
+    0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
+    0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
+    0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
+    0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+    0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
+    0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
+    0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
+    0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
+    0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+    0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
+    0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
+    0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
+    0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
+    0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+    0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
+    0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
+    0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
+    0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
+    0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+    0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
+    0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
+    0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
+    0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
+    0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+    0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
+    0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
+    0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
+    0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
+    0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+    0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
+    0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
+    0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
+    0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
+};
+static const u32 Td2[256] = {
+    0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
+    0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
+    0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
+    0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
+    0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+    0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
+    0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
+    0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
+    0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
+    0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+    0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
+    0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
+    0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
+    0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
+    0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+    0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
+    0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
+    0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
+    0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
+    0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+    0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
+    0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
+    0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
+    0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
+    0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+    0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
+    0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
+    0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
+    0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
+    0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+    0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
+    0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
+    0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
+    0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
+    0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+    0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
+    0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
+    0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
+    0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
+    0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+    0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
+    0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
+    0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
+    0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
+    0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+    0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
+    0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
+    0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
+    0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
+    0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+    0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
+    0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
+    0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
+    0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
+    0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+    0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
+    0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
+    0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
+    0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
+    0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+    0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
+    0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
+    0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
+    0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
+};
+static const u32 Td3[256] = {
+    0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
+    0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
+    0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
+    0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
+    0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+    0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
+    0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
+    0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
+    0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
+    0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+    0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
+    0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
+    0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
+    0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
+    0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+    0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
+    0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
+    0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
+    0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
+    0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+    0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
+    0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
+    0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
+    0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
+    0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+    0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
+    0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
+    0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
+    0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
+    0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+    0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
+    0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
+    0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
+    0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
+    0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+    0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
+    0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
+    0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
+    0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
+    0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+    0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
+    0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
+    0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
+    0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
+    0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+    0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
+    0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
+    0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
+    0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
+    0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+    0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
+    0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
+    0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
+    0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
+    0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+    0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
+    0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
+    0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
+    0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
+    0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+    0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
+    0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
+    0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
+    0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
+};
+static const u8 Td4[256] = {
+    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+static const u32 rcon[] = {
+	0x01000000, 0x02000000, 0x04000000, 0x08000000,
+	0x10000000, 0x20000000, 0x40000000, 0x80000000,
+	0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+			AES_KEY *key) {
+
+	u32 *rk;
+   	int i = 0;
+	u32 temp;
+
+	if (!userKey || !key)
+		return -1;
+	if (bits != 128 && bits != 192 && bits != 256)
+		return -2;
+
+	rk = key->rd_key;
+
+	if (bits==128)
+		key->rounds = 10;
+	else if (bits==192)
+		key->rounds = 12;
+	else
+		key->rounds = 14;
+
+	rk[0] = GETU32(userKey     );
+	rk[1] = GETU32(userKey +  4);
+	rk[2] = GETU32(userKey +  8);
+	rk[3] = GETU32(userKey + 12);
+	if (bits == 128) {
+		while (1) {
+			temp  = rk[3];
+			rk[4] = rk[0] ^
+				(Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+				(Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+				(Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+				(Te1[(temp >> 24)       ] & 0x000000ff) ^
+				rcon[i];
+			rk[5] = rk[1] ^ rk[4];
+			rk[6] = rk[2] ^ rk[5];
+			rk[7] = rk[3] ^ rk[6];
+			if (++i == 10) {
+				return 0;
+			}
+			rk += 4;
+		}
+	}
+	rk[4] = GETU32(userKey + 16);
+	rk[5] = GETU32(userKey + 20);
+	if (bits == 192) {
+		while (1) {
+			temp = rk[ 5];
+			rk[ 6] = rk[ 0] ^
+				(Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+				(Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+				(Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+				(Te1[(temp >> 24)       ] & 0x000000ff) ^
+				rcon[i];
+			rk[ 7] = rk[ 1] ^ rk[ 6];
+			rk[ 8] = rk[ 2] ^ rk[ 7];
+			rk[ 9] = rk[ 3] ^ rk[ 8];
+			if (++i == 8) {
+				return 0;
+			}
+			rk[10] = rk[ 4] ^ rk[ 9];
+			rk[11] = rk[ 5] ^ rk[10];
+			rk += 6;
+		}
+	}
+	rk[6] = GETU32(userKey + 24);
+	rk[7] = GETU32(userKey + 28);
+	if (bits == 256) {
+		while (1) {
+			temp = rk[ 7];
+			rk[ 8] = rk[ 0] ^
+				(Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+				(Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+				(Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+				(Te1[(temp >> 24)       ] & 0x000000ff) ^
+				rcon[i];
+			rk[ 9] = rk[ 1] ^ rk[ 8];
+			rk[10] = rk[ 2] ^ rk[ 9];
+			rk[11] = rk[ 3] ^ rk[10];
+			if (++i == 7) {
+				return 0;
+			}
+			temp = rk[11];
+			rk[12] = rk[ 4] ^
+				(Te2[(temp >> 24)       ] & 0xff000000) ^
+				(Te3[(temp >> 16) & 0xff] & 0x00ff0000) ^
+				(Te0[(temp >>  8) & 0xff] & 0x0000ff00) ^
+				(Te1[(temp      ) & 0xff] & 0x000000ff);
+			rk[13] = rk[ 5] ^ rk[12];
+			rk[14] = rk[ 6] ^ rk[13];
+			rk[15] = rk[ 7] ^ rk[14];
+
+			rk += 8;
+        	}
+	}
+	return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+			 AES_KEY *key) {
+
+        u32 *rk;
+	int i, j, status;
+	u32 temp;
+
+	/* first, start with an encryption schedule */
+	status = private_AES_set_encrypt_key(userKey, bits, key);
+	if (status < 0)
+		return status;
+
+	rk = key->rd_key;
+
+	/* invert the order of the round keys: */
+	for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+		temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+		temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+		temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+		temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+	}
+	/* apply the inverse MixColumn transform to all round keys but the first and the last: */
+	for (i = 1; i < (key->rounds); i++) {
+		rk += 4;
+		rk[0] =
+			Td0[Te1[(rk[0] >> 24)       ] & 0xff] ^
+			Td1[Te1[(rk[0] >> 16) & 0xff] & 0xff] ^
+			Td2[Te1[(rk[0] >>  8) & 0xff] & 0xff] ^
+			Td3[Te1[(rk[0]      ) & 0xff] & 0xff];
+		rk[1] =
+			Td0[Te1[(rk[1] >> 24)       ] & 0xff] ^
+			Td1[Te1[(rk[1] >> 16) & 0xff] & 0xff] ^
+			Td2[Te1[(rk[1] >>  8) & 0xff] & 0xff] ^
+			Td3[Te1[(rk[1]      ) & 0xff] & 0xff];
+		rk[2] =
+			Td0[Te1[(rk[2] >> 24)       ] & 0xff] ^
+			Td1[Te1[(rk[2] >> 16) & 0xff] & 0xff] ^
+			Td2[Te1[(rk[2] >>  8) & 0xff] & 0xff] ^
+			Td3[Te1[(rk[2]      ) & 0xff] & 0xff];
+		rk[3] =
+			Td0[Te1[(rk[3] >> 24)       ] & 0xff] ^
+			Td1[Te1[(rk[3] >> 16) & 0xff] & 0xff] ^
+			Td2[Te1[(rk[3] >>  8) & 0xff] & 0xff] ^
+			Td3[Te1[(rk[3]      ) & 0xff] & 0xff];
+	}
+	return 0;
+}
+
+/*
+ * Encrypt a single block
+ * in and out can overlap
+ */
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+		 const AES_KEY *key) {
+
+	const u32 *rk;
+	u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+	int r;
+#endif /* ?FULL_UNROLL */
+
+	assert(in && out && key);
+	rk = key->rd_key;
+
+	/*
+	 * map byte array block to cipher state
+	 * and add initial round key:
+	 */
+	s0 = GETU32(in     ) ^ rk[0];
+	s1 = GETU32(in +  4) ^ rk[1];
+	s2 = GETU32(in +  8) ^ rk[2];
+	s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+	/* round 1: */
+   	t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4];
+   	t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5];
+   	t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6];
+   	t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7];
+   	/* round 2: */
+   	s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8];
+   	s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9];
+   	s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10];
+   	s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11];
+	/* round 3: */
+   	t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12];
+   	t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13];
+   	t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14];
+   	t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15];
+   	/* round 4: */
+   	s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16];
+   	s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17];
+   	s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18];
+   	s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19];
+	/* round 5: */
+   	t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20];
+   	t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21];
+   	t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22];
+   	t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23];
+   	/* round 6: */
+   	s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24];
+   	s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25];
+   	s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26];
+   	s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27];
+	/* round 7: */
+   	t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28];
+   	t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29];
+   	t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30];
+   	t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31];
+   	/* round 8: */
+   	s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32];
+   	s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33];
+   	s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34];
+   	s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35];
+	/* round 9: */
+   	t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36];
+   	t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37];
+   	t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38];
+   	t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39];
+    if (key->rounds > 10) {
+        /* round 10: */
+        s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40];
+        s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41];
+        s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42];
+        s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43];
+        /* round 11: */
+        t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44];
+        t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45];
+        t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46];
+        t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47];
+        if (key->rounds > 12) {
+            /* round 12: */
+            s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48];
+            s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49];
+            s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50];
+            s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51];
+            /* round 13: */
+            t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52];
+            t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53];
+            t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54];
+            t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55];
+        }
+    }
+    rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+    /*
+     * Nr - 1 full rounds:
+     */
+    r = key->rounds >> 1;
+    for (;;) {
+        t0 =
+            Te0[(s0 >> 24)       ] ^
+            Te1[(s1 >> 16) & 0xff] ^
+            Te2[(s2 >>  8) & 0xff] ^
+            Te3[(s3      ) & 0xff] ^
+            rk[4];
+        t1 =
+            Te0[(s1 >> 24)       ] ^
+            Te1[(s2 >> 16) & 0xff] ^
+            Te2[(s3 >>  8) & 0xff] ^
+            Te3[(s0      ) & 0xff] ^
+            rk[5];
+        t2 =
+            Te0[(s2 >> 24)       ] ^
+            Te1[(s3 >> 16) & 0xff] ^
+            Te2[(s0 >>  8) & 0xff] ^
+            Te3[(s1      ) & 0xff] ^
+            rk[6];
+        t3 =
+            Te0[(s3 >> 24)       ] ^
+            Te1[(s0 >> 16) & 0xff] ^
+            Te2[(s1 >>  8) & 0xff] ^
+            Te3[(s2      ) & 0xff] ^
+            rk[7];
+
+        rk += 8;
+        if (--r == 0) {
+            break;
+        }
+
+        s0 =
+            Te0[(t0 >> 24)       ] ^
+            Te1[(t1 >> 16) & 0xff] ^
+            Te2[(t2 >>  8) & 0xff] ^
+            Te3[(t3      ) & 0xff] ^
+            rk[0];
+        s1 =
+            Te0[(t1 >> 24)       ] ^
+            Te1[(t2 >> 16) & 0xff] ^
+            Te2[(t3 >>  8) & 0xff] ^
+            Te3[(t0      ) & 0xff] ^
+            rk[1];
+        s2 =
+            Te0[(t2 >> 24)       ] ^
+            Te1[(t3 >> 16) & 0xff] ^
+            Te2[(t0 >>  8) & 0xff] ^
+            Te3[(t1      ) & 0xff] ^
+            rk[2];
+        s3 =
+            Te0[(t3 >> 24)       ] ^
+            Te1[(t0 >> 16) & 0xff] ^
+            Te2[(t1 >>  8) & 0xff] ^
+            Te3[(t2      ) & 0xff] ^
+            rk[3];
+    }
+#endif /* ?FULL_UNROLL */
+    /*
+	 * apply last round and
+	 * map cipher state to byte array block:
+	 */
+	s0 =
+		(Te2[(t0 >> 24)       ] & 0xff000000) ^
+		(Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+		(Te0[(t2 >>  8) & 0xff] & 0x0000ff00) ^
+		(Te1[(t3      ) & 0xff] & 0x000000ff) ^
+		rk[0];
+	PUTU32(out     , s0);
+	s1 =
+		(Te2[(t1 >> 24)       ] & 0xff000000) ^
+		(Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+		(Te0[(t3 >>  8) & 0xff] & 0x0000ff00) ^
+		(Te1[(t0      ) & 0xff] & 0x000000ff) ^
+		rk[1];
+	PUTU32(out +  4, s1);
+	s2 =
+		(Te2[(t2 >> 24)       ] & 0xff000000) ^
+		(Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+		(Te0[(t0 >>  8) & 0xff] & 0x0000ff00) ^
+		(Te1[(t1      ) & 0xff] & 0x000000ff) ^
+		rk[2];
+	PUTU32(out +  8, s2);
+	s3 =
+		(Te2[(t3 >> 24)       ] & 0xff000000) ^
+		(Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+		(Te0[(t1 >>  8) & 0xff] & 0x0000ff00) ^
+		(Te1[(t2      ) & 0xff] & 0x000000ff) ^
+		rk[3];
+	PUTU32(out + 12, s3);
+}
+
+/*
+ * Decrypt a single block
+ * in and out can overlap
+ */
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+		 const AES_KEY *key) {
+
+	const u32 *rk;
+	u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+	int r;
+#endif /* ?FULL_UNROLL */
+
+	assert(in && out && key);
+	rk = key->rd_key;
+
+	/*
+	 * map byte array block to cipher state
+	 * and add initial round key:
+	 */
+    s0 = GETU32(in     ) ^ rk[0];
+    s1 = GETU32(in +  4) ^ rk[1];
+    s2 = GETU32(in +  8) ^ rk[2];
+    s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+    /* round 1: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7];
+    /* round 2: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11];
+    /* round 3: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15];
+    /* round 4: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19];
+    /* round 5: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23];
+    /* round 6: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27];
+    /* round 7: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31];
+    /* round 8: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35];
+    /* round 9: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39];
+    if (key->rounds > 10) {
+        /* round 10: */
+        s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40];
+        s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41];
+        s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42];
+        s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43];
+        /* round 11: */
+        t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44];
+        t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45];
+        t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46];
+        t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47];
+        if (key->rounds > 12) {
+            /* round 12: */
+            s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48];
+            s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49];
+            s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50];
+            s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51];
+            /* round 13: */
+            t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52];
+            t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53];
+            t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54];
+            t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55];
+        }
+    }
+	rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+    /*
+     * Nr - 1 full rounds:
+     */
+    r = key->rounds >> 1;
+    for (;;) {
+        t0 =
+            Td0[(s0 >> 24)       ] ^
+            Td1[(s3 >> 16) & 0xff] ^
+            Td2[(s2 >>  8) & 0xff] ^
+            Td3[(s1      ) & 0xff] ^
+            rk[4];
+        t1 =
+            Td0[(s1 >> 24)       ] ^
+            Td1[(s0 >> 16) & 0xff] ^
+            Td2[(s3 >>  8) & 0xff] ^
+            Td3[(s2      ) & 0xff] ^
+            rk[5];
+        t2 =
+            Td0[(s2 >> 24)       ] ^
+            Td1[(s1 >> 16) & 0xff] ^
+            Td2[(s0 >>  8) & 0xff] ^
+            Td3[(s3      ) & 0xff] ^
+            rk[6];
+        t3 =
+            Td0[(s3 >> 24)       ] ^
+            Td1[(s2 >> 16) & 0xff] ^
+            Td2[(s1 >>  8) & 0xff] ^
+            Td3[(s0      ) & 0xff] ^
+            rk[7];
+
+        rk += 8;
+        if (--r == 0) {
+            break;
+        }
+
+        s0 =
+            Td0[(t0 >> 24)       ] ^
+            Td1[(t3 >> 16) & 0xff] ^
+            Td2[(t2 >>  8) & 0xff] ^
+            Td3[(t1      ) & 0xff] ^
+            rk[0];
+        s1 =
+            Td0[(t1 >> 24)       ] ^
+            Td1[(t0 >> 16) & 0xff] ^
+            Td2[(t3 >>  8) & 0xff] ^
+            Td3[(t2      ) & 0xff] ^
+            rk[1];
+        s2 =
+            Td0[(t2 >> 24)       ] ^
+            Td1[(t1 >> 16) & 0xff] ^
+            Td2[(t0 >>  8) & 0xff] ^
+            Td3[(t3      ) & 0xff] ^
+            rk[2];
+        s3 =
+            Td0[(t3 >> 24)       ] ^
+            Td1[(t2 >> 16) & 0xff] ^
+            Td2[(t1 >>  8) & 0xff] ^
+            Td3[(t0      ) & 0xff] ^
+            rk[3];
+    }
+#endif /* ?FULL_UNROLL */
+    /*
+	 * apply last round and
+	 * map cipher state to byte array block:
+	 */
+   	s0 =
+   		(Td4[(t0 >> 24)       ] << 24) ^
+   		(Td4[(t3 >> 16) & 0xff] << 16) ^
+   		(Td4[(t2 >>  8) & 0xff] <<  8) ^
+   		(Td4[(t1      ) & 0xff])       ^
+   		rk[0];
+	PUTU32(out     , s0);
+   	s1 =
+   		(Td4[(t1 >> 24)       ] << 24) ^
+   		(Td4[(t0 >> 16) & 0xff] << 16) ^
+   		(Td4[(t3 >>  8) & 0xff] <<  8) ^
+   		(Td4[(t2      ) & 0xff])       ^
+   		rk[1];
+	PUTU32(out +  4, s1);
+   	s2 =
+   		(Td4[(t2 >> 24)       ] << 24) ^
+   		(Td4[(t1 >> 16) & 0xff] << 16) ^
+   		(Td4[(t0 >>  8) & 0xff] <<  8) ^
+   		(Td4[(t3      ) & 0xff])       ^
+   		rk[2];
+	PUTU32(out +  8, s2);
+   	s3 =
+   		(Td4[(t3 >> 24)       ] << 24) ^
+   		(Td4[(t2 >> 16) & 0xff] << 16) ^
+   		(Td4[(t1 >>  8) & 0xff] <<  8) ^
+   		(Td4[(t0      ) & 0xff])       ^
+   		rk[3];
+	PUTU32(out + 12, s3);
+}
+
+#else /* AES_ASM */
+
+static const u8 Te4[256] = {
+    0x63U, 0x7cU, 0x77U, 0x7bU, 0xf2U, 0x6bU, 0x6fU, 0xc5U,
+    0x30U, 0x01U, 0x67U, 0x2bU, 0xfeU, 0xd7U, 0xabU, 0x76U,
+    0xcaU, 0x82U, 0xc9U, 0x7dU, 0xfaU, 0x59U, 0x47U, 0xf0U,
+    0xadU, 0xd4U, 0xa2U, 0xafU, 0x9cU, 0xa4U, 0x72U, 0xc0U,
+    0xb7U, 0xfdU, 0x93U, 0x26U, 0x36U, 0x3fU, 0xf7U, 0xccU,
+    0x34U, 0xa5U, 0xe5U, 0xf1U, 0x71U, 0xd8U, 0x31U, 0x15U,
+    0x04U, 0xc7U, 0x23U, 0xc3U, 0x18U, 0x96U, 0x05U, 0x9aU,
+    0x07U, 0x12U, 0x80U, 0xe2U, 0xebU, 0x27U, 0xb2U, 0x75U,
+    0x09U, 0x83U, 0x2cU, 0x1aU, 0x1bU, 0x6eU, 0x5aU, 0xa0U,
+    0x52U, 0x3bU, 0xd6U, 0xb3U, 0x29U, 0xe3U, 0x2fU, 0x84U,
+    0x53U, 0xd1U, 0x00U, 0xedU, 0x20U, 0xfcU, 0xb1U, 0x5bU,
+    0x6aU, 0xcbU, 0xbeU, 0x39U, 0x4aU, 0x4cU, 0x58U, 0xcfU,
+    0xd0U, 0xefU, 0xaaU, 0xfbU, 0x43U, 0x4dU, 0x33U, 0x85U,
+    0x45U, 0xf9U, 0x02U, 0x7fU, 0x50U, 0x3cU, 0x9fU, 0xa8U,
+    0x51U, 0xa3U, 0x40U, 0x8fU, 0x92U, 0x9dU, 0x38U, 0xf5U,
+    0xbcU, 0xb6U, 0xdaU, 0x21U, 0x10U, 0xffU, 0xf3U, 0xd2U,
+    0xcdU, 0x0cU, 0x13U, 0xecU, 0x5fU, 0x97U, 0x44U, 0x17U,
+    0xc4U, 0xa7U, 0x7eU, 0x3dU, 0x64U, 0x5dU, 0x19U, 0x73U,
+    0x60U, 0x81U, 0x4fU, 0xdcU, 0x22U, 0x2aU, 0x90U, 0x88U,
+    0x46U, 0xeeU, 0xb8U, 0x14U, 0xdeU, 0x5eU, 0x0bU, 0xdbU,
+    0xe0U, 0x32U, 0x3aU, 0x0aU, 0x49U, 0x06U, 0x24U, 0x5cU,
+    0xc2U, 0xd3U, 0xacU, 0x62U, 0x91U, 0x95U, 0xe4U, 0x79U,
+    0xe7U, 0xc8U, 0x37U, 0x6dU, 0x8dU, 0xd5U, 0x4eU, 0xa9U,
+    0x6cU, 0x56U, 0xf4U, 0xeaU, 0x65U, 0x7aU, 0xaeU, 0x08U,
+    0xbaU, 0x78U, 0x25U, 0x2eU, 0x1cU, 0xa6U, 0xb4U, 0xc6U,
+    0xe8U, 0xddU, 0x74U, 0x1fU, 0x4bU, 0xbdU, 0x8bU, 0x8aU,
+    0x70U, 0x3eU, 0xb5U, 0x66U, 0x48U, 0x03U, 0xf6U, 0x0eU,
+    0x61U, 0x35U, 0x57U, 0xb9U, 0x86U, 0xc1U, 0x1dU, 0x9eU,
+    0xe1U, 0xf8U, 0x98U, 0x11U, 0x69U, 0xd9U, 0x8eU, 0x94U,
+    0x9bU, 0x1eU, 0x87U, 0xe9U, 0xceU, 0x55U, 0x28U, 0xdfU,
+    0x8cU, 0xa1U, 0x89U, 0x0dU, 0xbfU, 0xe6U, 0x42U, 0x68U,
+    0x41U, 0x99U, 0x2dU, 0x0fU, 0xb0U, 0x54U, 0xbbU, 0x16U
+};
+static const u32 rcon[] = {
+	0x01000000, 0x02000000, 0x04000000, 0x08000000,
+	0x10000000, 0x20000000, 0x40000000, 0x80000000,
+	0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+			AES_KEY *key) {
+	u32 *rk;
+   	int i = 0;
+	u32 temp;
+
+	if (!userKey || !key)
+		return -1;
+	if (bits != 128 && bits != 192 && bits != 256)
+		return -2;
+
+	rk = key->rd_key;
+
+	if (bits==128)
+		key->rounds = 10;
+	else if (bits==192)
+		key->rounds = 12;
+	else
+		key->rounds = 14;
+
+	rk[0] = GETU32(userKey     );
+	rk[1] = GETU32(userKey +  4);
+	rk[2] = GETU32(userKey +  8);
+	rk[3] = GETU32(userKey + 12);
+	if (bits == 128) {
+		while (1) {
+			temp  = rk[3];
+			rk[4] = rk[0] ^
+				(Te4[(temp >> 16) & 0xff] << 24) ^
+				(Te4[(temp >>  8) & 0xff] << 16) ^
+				(Te4[(temp      ) & 0xff] << 8) ^
+				(Te4[(temp >> 24)       ]) ^
+				rcon[i];
+			rk[5] = rk[1] ^ rk[4];
+			rk[6] = rk[2] ^ rk[5];
+			rk[7] = rk[3] ^ rk[6];
+			if (++i == 10) {
+				return 0;
+			}
+			rk += 4;
+		}
+	}
+	rk[4] = GETU32(userKey + 16);
+	rk[5] = GETU32(userKey + 20);
+	if (bits == 192) {
+		while (1) {
+			temp = rk[ 5];
+			rk[ 6] = rk[ 0] ^
+				(Te4[(temp >> 16) & 0xff] << 24) ^
+				(Te4[(temp >>  8) & 0xff] << 16) ^
+				(Te4[(temp      ) & 0xff] << 8) ^
+				(Te4[(temp >> 24)       ]) ^
+				rcon[i];
+			rk[ 7] = rk[ 1] ^ rk[ 6];
+			rk[ 8] = rk[ 2] ^ rk[ 7];
+			rk[ 9] = rk[ 3] ^ rk[ 8];
+			if (++i == 8) {
+				return 0;
+			}
+			rk[10] = rk[ 4] ^ rk[ 9];
+			rk[11] = rk[ 5] ^ rk[10];
+			rk += 6;
+		}
+	}
+	rk[6] = GETU32(userKey + 24);
+	rk[7] = GETU32(userKey + 28);
+	if (bits == 256) {
+		while (1) {
+			temp = rk[ 7];
+			rk[ 8] = rk[ 0] ^
+				(Te4[(temp >> 16) & 0xff] << 24) ^
+				(Te4[(temp >>  8) & 0xff] << 16) ^
+				(Te4[(temp      ) & 0xff] << 8) ^
+				(Te4[(temp >> 24)       ]) ^
+				rcon[i];
+			rk[ 9] = rk[ 1] ^ rk[ 8];
+			rk[10] = rk[ 2] ^ rk[ 9];
+			rk[11] = rk[ 3] ^ rk[10];
+			if (++i == 7) {
+				return 0;
+			}
+			temp = rk[11];
+			rk[12] = rk[ 4] ^
+				(Te4[(temp >> 24)       ] << 24) ^
+				(Te4[(temp >> 16) & 0xff] << 16) ^
+				(Te4[(temp >>  8) & 0xff] << 8) ^
+				(Te4[(temp      ) & 0xff]);
+			rk[13] = rk[ 5] ^ rk[12];
+			rk[14] = rk[ 6] ^ rk[13];
+			rk[15] = rk[ 7] ^ rk[14];
+
+			rk += 8;
+        	}
+	}
+	return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+			 AES_KEY *key) {
+
+        u32 *rk;
+	int i, j, status;
+	u32 temp;
+
+	/* first, start with an encryption schedule */
+	status = private_AES_set_encrypt_key(userKey, bits, key);
+	if (status < 0)
+		return status;
+
+	rk = key->rd_key;
+
+	/* invert the order of the round keys: */
+	for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+		temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+		temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+		temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+		temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+	}
+	/* apply the inverse MixColumn transform to all round keys but the first and the last: */
+	for (i = 1; i < (key->rounds); i++) {
+		rk += 4;
+		for (j = 0; j < 4; j++) {
+			u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+			tp1 = rk[j];
+			m = tp1 & 0x80808080;
+			tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp2 & 0x80808080;
+			tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp4 & 0x80808080;
+			tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			tp9 = tp8 ^ tp1;
+			tpb = tp9 ^ tp2;
+			tpd = tp9 ^ tp4;
+			tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+			rk[j] = tpe ^ ROTATE(tpd,16) ^
+				ROTATE(tp9,24) ^ ROTATE(tpb,8);
+#else
+			rk[j] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^ 
+				(tp9 >> 8) ^ (tp9 << 24) ^
+				(tpb >> 24) ^ (tpb << 8);
+#endif
+		}
+	}
+	return 0;
+}
+
+#endif /* AES_ASM */
diff --git a/jni/openssl/crypto/aes/aes_ctr.c b/jni/openssl/crypto/aes/aes_ctr.c
new file mode 100644
index 0000000..7c9d165
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_ctr.c
@@ -0,0 +1,61 @@
+/* crypto/aes/aes_ctr.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+void AES_ctr128_encrypt(const unsigned char *in, unsigned char *out,
+			size_t length, const AES_KEY *key,
+			unsigned char ivec[AES_BLOCK_SIZE],
+			unsigned char ecount_buf[AES_BLOCK_SIZE],
+			unsigned int *num) {
+	CRYPTO_ctr128_encrypt(in,out,length,key,ivec,ecount_buf,num,(block128_f)AES_encrypt);
+}
diff --git a/jni/openssl/crypto/aes/aes_ecb.c b/jni/openssl/crypto/aes/aes_ecb.c
new file mode 100644
index 0000000..28aa561
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_ecb.c
@@ -0,0 +1,73 @@
+/* crypto/aes/aes_ecb.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#ifndef AES_DEBUG
+# ifndef NDEBUG
+#  define NDEBUG
+# endif
+#endif
+#include <assert.h>
+
+#include <openssl/aes.h>
+#include "aes_locl.h"
+
+void AES_ecb_encrypt(const unsigned char *in, unsigned char *out,
+		     const AES_KEY *key, const int enc) {
+
+        assert(in && out && key);
+	assert((AES_ENCRYPT == enc)||(AES_DECRYPT == enc));
+
+	if (AES_ENCRYPT == enc)
+		AES_encrypt(in, out, key);
+	else
+		AES_decrypt(in, out, key);
+}
+
diff --git a/jni/openssl/crypto/aes/aes_ige.c b/jni/openssl/crypto/aes/aes_ige.c
new file mode 100644
index 0000000..c161351
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_ige.c
@@ -0,0 +1,323 @@
+/* crypto/aes/aes_ige.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 2006 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include "cryptlib.h"
+
+#include <openssl/aes.h>
+#include "aes_locl.h"
+
+#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long))
+typedef struct {
+        unsigned long data[N_WORDS];
+} aes_block_t;
+
+/* XXX: probably some better way to do this */
+#if defined(__i386__) || defined(__x86_64__)
+#define UNALIGNED_MEMOPS_ARE_FAST 1
+#else
+#define UNALIGNED_MEMOPS_ARE_FAST 0
+#endif
+
+#if UNALIGNED_MEMOPS_ARE_FAST
+#define load_block(d, s)        (d) = *(const aes_block_t *)(s)
+#define store_block(d, s)       *(aes_block_t *)(d) = (s)
+#else
+#define load_block(d, s)        memcpy((d).data, (s), AES_BLOCK_SIZE)
+#define store_block(d, s)       memcpy((d), (s).data, AES_BLOCK_SIZE)
+#endif
+
+/* N.B. The IV for this mode is _twice_ the block size */
+
+void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
+					 size_t length, const AES_KEY *key,
+					 unsigned char *ivec, const int enc)
+	{
+	size_t n;
+	size_t len = length;
+
+	OPENSSL_assert(in && out && key && ivec);
+	OPENSSL_assert((AES_ENCRYPT == enc)||(AES_DECRYPT == enc));
+	OPENSSL_assert((length%AES_BLOCK_SIZE) == 0);
+
+	len = length / AES_BLOCK_SIZE;
+
+	if (AES_ENCRYPT == enc)
+		{
+		if (in != out &&
+		    (UNALIGNED_MEMOPS_ARE_FAST || ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(long)==0))
+			{
+			aes_block_t *ivp = (aes_block_t *)ivec;
+			aes_block_t *iv2p = (aes_block_t *)(ivec + AES_BLOCK_SIZE);
+
+			while (len)
+				{
+				aes_block_t *inp = (aes_block_t *)in;
+				aes_block_t *outp = (aes_block_t *)out;
+
+				for(n=0 ; n < N_WORDS; ++n)
+					outp->data[n] = inp->data[n] ^ ivp->data[n];
+				AES_encrypt((unsigned char *)outp->data, (unsigned char *)outp->data, key);
+				for(n=0 ; n < N_WORDS; ++n)
+					outp->data[n] ^= iv2p->data[n];
+				ivp = outp;
+				iv2p = inp;
+				--len;
+				in += AES_BLOCK_SIZE;
+				out += AES_BLOCK_SIZE;
+				}
+			memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
+			memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
+			}
+		else
+			{
+			aes_block_t tmp, tmp2;
+			aes_block_t iv;
+			aes_block_t iv2;
+
+			load_block(iv, ivec);
+			load_block(iv2, ivec + AES_BLOCK_SIZE);
+
+			while (len)
+				{
+				load_block(tmp, in);
+				for(n=0 ; n < N_WORDS; ++n)
+					tmp2.data[n] = tmp.data[n] ^ iv.data[n];
+				AES_encrypt((unsigned char *)tmp2.data, (unsigned char *)tmp2.data, key);
+				for(n=0 ; n < N_WORDS; ++n)
+					tmp2.data[n] ^= iv2.data[n];
+				store_block(out, tmp2);
+				iv = tmp2;
+				iv2 = tmp;
+				--len;
+				in += AES_BLOCK_SIZE;
+				out += AES_BLOCK_SIZE;
+				}
+			memcpy(ivec, iv.data, AES_BLOCK_SIZE);
+			memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
+			}
+		}
+	else
+		{
+		if (in != out &&
+		    (UNALIGNED_MEMOPS_ARE_FAST || ((size_t)in|(size_t)out|(size_t)ivec)%sizeof(long)==0))
+			{
+			aes_block_t *ivp = (aes_block_t *)ivec;
+			aes_block_t *iv2p = (aes_block_t *)(ivec + AES_BLOCK_SIZE);
+
+			while (len)
+				{
+				aes_block_t tmp;
+				aes_block_t *inp = (aes_block_t *)in;
+				aes_block_t *outp = (aes_block_t *)out;
+
+				for(n=0 ; n < N_WORDS; ++n)
+					tmp.data[n] = inp->data[n] ^ iv2p->data[n];
+				AES_decrypt((unsigned char *)tmp.data, (unsigned char *)outp->data, key);
+				for(n=0 ; n < N_WORDS; ++n)
+					outp->data[n] ^= ivp->data[n];
+				ivp = inp;
+				iv2p = outp;
+				--len;
+				in += AES_BLOCK_SIZE;
+				out += AES_BLOCK_SIZE;
+				}
+			memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
+			memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
+			}
+		else
+			{
+			aes_block_t tmp, tmp2;
+			aes_block_t iv;
+			aes_block_t iv2;
+
+			load_block(iv, ivec);
+			load_block(iv2, ivec + AES_BLOCK_SIZE);
+
+			while (len)
+				{
+				load_block(tmp, in);
+				tmp2 = tmp;
+				for(n=0 ; n < N_WORDS; ++n)
+					tmp.data[n] ^= iv2.data[n];
+				AES_decrypt((unsigned char *)tmp.data, (unsigned char *)tmp.data, key);
+				for(n=0 ; n < N_WORDS; ++n)
+					tmp.data[n] ^= iv.data[n];
+				store_block(out, tmp);
+				iv = tmp2;
+				iv2 = tmp;
+				--len;
+				in += AES_BLOCK_SIZE;
+				out += AES_BLOCK_SIZE;
+				}
+			memcpy(ivec, iv.data, AES_BLOCK_SIZE);
+			memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
+			}
+		}
+	}
+
+/*
+ * Note that its effectively impossible to do biIGE in anything other
+ * than a single pass, so no provision is made for chaining.
+ */
+
+/* N.B. The IV for this mode is _four times_ the block size */
+
+void AES_bi_ige_encrypt(const unsigned char *in, unsigned char *out,
+						size_t length, const AES_KEY *key,
+						const AES_KEY *key2, const unsigned char *ivec,
+						const int enc)
+	{
+	size_t n;
+	size_t len = length;
+	unsigned char tmp[AES_BLOCK_SIZE];
+	unsigned char tmp2[AES_BLOCK_SIZE];
+	unsigned char tmp3[AES_BLOCK_SIZE];
+	unsigned char prev[AES_BLOCK_SIZE];
+	const unsigned char *iv;
+	const unsigned char *iv2;
+
+	OPENSSL_assert(in && out && key && ivec);
+	OPENSSL_assert((AES_ENCRYPT == enc)||(AES_DECRYPT == enc));
+	OPENSSL_assert((length%AES_BLOCK_SIZE) == 0);
+
+	if (AES_ENCRYPT == enc)
+		{
+		/* XXX: Do a separate case for when in != out (strictly should
+		   check for overlap, too) */
+
+		/* First the forward pass */ 
+		iv = ivec;
+		iv2 = ivec + AES_BLOCK_SIZE;
+		while (len >= AES_BLOCK_SIZE)
+			{
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] = in[n] ^ iv[n];
+			AES_encrypt(out, out, key);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] ^= iv2[n];
+			iv = out;
+			memcpy(prev, in, AES_BLOCK_SIZE);
+			iv2 = prev;
+			len -= AES_BLOCK_SIZE;
+			in += AES_BLOCK_SIZE;
+			out += AES_BLOCK_SIZE;
+			}
+
+		/* And now backwards */
+		iv = ivec + AES_BLOCK_SIZE*2;
+		iv2 = ivec + AES_BLOCK_SIZE*3;
+		len = length;
+		while(len >= AES_BLOCK_SIZE)
+			{
+			out -= AES_BLOCK_SIZE;
+			/* XXX: reduce copies by alternating between buffers */
+			memcpy(tmp, out, AES_BLOCK_SIZE);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] ^= iv[n];
+			/*			hexdump(stdout, "out ^ iv", out, AES_BLOCK_SIZE); */
+			AES_encrypt(out, out, key);
+			/*			hexdump(stdout,"enc", out, AES_BLOCK_SIZE); */
+			/*			hexdump(stdout,"iv2", iv2, AES_BLOCK_SIZE); */
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] ^= iv2[n];
+			/*			hexdump(stdout,"out", out, AES_BLOCK_SIZE); */
+			iv = out;
+			memcpy(prev, tmp, AES_BLOCK_SIZE);
+			iv2 = prev;
+			len -= AES_BLOCK_SIZE;
+			}
+		}
+	else
+		{
+		/* First backwards */
+		iv = ivec + AES_BLOCK_SIZE*2;
+		iv2 = ivec + AES_BLOCK_SIZE*3;
+		in += length;
+		out += length;
+		while (len >= AES_BLOCK_SIZE)
+			{
+			in -= AES_BLOCK_SIZE;
+			out -= AES_BLOCK_SIZE;
+			memcpy(tmp, in, AES_BLOCK_SIZE);
+			memcpy(tmp2, in, AES_BLOCK_SIZE);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				tmp[n] ^= iv2[n];
+			AES_decrypt(tmp, out, key);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] ^= iv[n];
+			memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
+			iv = tmp3;
+			iv2 = out;
+			len -= AES_BLOCK_SIZE;
+			}
+
+		/* And now forwards */
+		iv = ivec;
+		iv2 = ivec + AES_BLOCK_SIZE;
+		len = length;
+		while (len >= AES_BLOCK_SIZE)
+			{
+			memcpy(tmp, out, AES_BLOCK_SIZE);
+			memcpy(tmp2, out, AES_BLOCK_SIZE);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				tmp[n] ^= iv2[n];
+			AES_decrypt(tmp, out, key);
+			for(n=0 ; n < AES_BLOCK_SIZE ; ++n)
+				out[n] ^= iv[n];
+			memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
+			iv = tmp3;
+			iv2 = out;
+			len -= AES_BLOCK_SIZE;
+			in += AES_BLOCK_SIZE;
+			out += AES_BLOCK_SIZE;
+			}
+		}
+	}
diff --git a/jni/openssl/crypto/aes/aes_locl.h b/jni/openssl/crypto/aes/aes_locl.h
new file mode 100644
index 0000000..054b442
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_locl.h
@@ -0,0 +1,89 @@
+/* crypto/aes/aes.h -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#ifndef HEADER_AES_LOCL_H
+#define HEADER_AES_LOCL_H
+
+#include <openssl/e_os2.h>
+
+#ifdef OPENSSL_NO_AES
+#error AES is disabled.
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
+# define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
+# define GETU32(p) SWAP(*((u32 *)(p)))
+# define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st)); }
+#else
+# define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] <<  8) ^ ((u32)(pt)[3]))
+# define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >>  8); (ct)[3] = (u8)(st); }
+#endif
+
+#ifdef AES_LONG
+typedef unsigned long u32;
+#else
+typedef unsigned int u32;
+#endif
+typedef unsigned short u16;
+typedef unsigned char u8;
+
+#define MAXKC   (256/32)
+#define MAXKB   (256/8)
+#define MAXNR   14
+
+/* This controls loop-unrolling in aes_core.c */
+#undef FULL_UNROLL
+
+#endif /* !HEADER_AES_LOCL_H */
diff --git a/jni/openssl/crypto/aes/aes_misc.c b/jni/openssl/crypto/aes/aes_misc.c
new file mode 100644
index 0000000..f083488
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_misc.c
@@ -0,0 +1,85 @@
+/* crypto/aes/aes_misc.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include <openssl/opensslv.h>
+#include <openssl/crypto.h>
+#include <openssl/aes.h>
+#include "aes_locl.h"
+
+const char AES_version[]="AES" OPENSSL_VERSION_PTEXT;
+
+const char *AES_options(void) {
+#ifdef FULL_UNROLL
+        return "aes(full)";
+#else   
+        return "aes(partial)";
+#endif
+}
+
+/* FIPS wrapper functions to block low level AES calls in FIPS mode */
+
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+			AES_KEY *key)
+	{
+#ifdef OPENSSL_FIPS
+	fips_cipher_abort(AES);
+#endif
+	return private_AES_set_encrypt_key(userKey, bits, key);
+	}
+
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+			AES_KEY *key)
+	{
+#ifdef OPENSSL_FIPS
+	fips_cipher_abort(AES);
+#endif
+	return private_AES_set_decrypt_key(userKey, bits, key);
+	}
diff --git a/jni/openssl/crypto/aes/aes_ofb.c b/jni/openssl/crypto/aes/aes_ofb.c
new file mode 100644
index 0000000..50bf0b8
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_ofb.c
@@ -0,0 +1,60 @@
+/* crypto/aes/aes_ofb.c -*- mode:C; c-file-style: "eay" -*- */
+/* ====================================================================
+ * Copyright (c) 2002-2006 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+void AES_ofb128_encrypt(const unsigned char *in, unsigned char *out,
+	size_t length, const AES_KEY *key,
+	unsigned char *ivec, int *num)
+{
+	CRYPTO_ofb128_encrypt(in,out,length,key,ivec,num,(block128_f)AES_encrypt);
+}
diff --git a/jni/openssl/crypto/aes/aes_wrap.c b/jni/openssl/crypto/aes/aes_wrap.c
new file mode 100644
index 0000000..e2d73d3
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_wrap.c
@@ -0,0 +1,259 @@
+/* crypto/aes/aes_wrap.c */
+/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
+ * project.
+ */
+/* ====================================================================
+ * Copyright (c) 2008 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ */
+
+#include "cryptlib.h"
+#include <openssl/aes.h>
+#include <openssl/bio.h>
+
+static const unsigned char default_iv[] = {
+  0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
+};
+
+int AES_wrap_key(AES_KEY *key, const unsigned char *iv,
+		unsigned char *out,
+		const unsigned char *in, unsigned int inlen)
+	{
+	unsigned char *A, B[16], *R;
+	unsigned int i, j, t;
+	if ((inlen & 0x7) || (inlen < 8))
+		return -1;
+	A = B;
+	t = 1;
+	memcpy(out + 8, in, inlen);
+	if (!iv)
+		iv = default_iv;
+
+	memcpy(A, iv, 8);
+
+	for (j = 0; j < 6; j++)
+		{
+		R = out + 8;
+		for (i = 0; i < inlen; i += 8, t++, R += 8)
+			{
+			memcpy(B + 8, R, 8);
+			AES_encrypt(B, B, key);
+			A[7] ^= (unsigned char)(t & 0xff);
+			if (t > 0xff)	
+				{
+				A[6] ^= (unsigned char)((t >> 8) & 0xff);
+				A[5] ^= (unsigned char)((t >> 16) & 0xff);
+				A[4] ^= (unsigned char)((t >> 24) & 0xff);
+				}
+			memcpy(R, B + 8, 8);
+			}
+		}
+	memcpy(out, A, 8);
+	return inlen + 8;
+	}
+
+int AES_unwrap_key(AES_KEY *key, const unsigned char *iv,
+		unsigned char *out,
+		const unsigned char *in, unsigned int inlen)
+	{
+	unsigned char *A, B[16], *R;
+	unsigned int i, j, t;
+	inlen -= 8;
+	if (inlen & 0x7)
+		return -1;
+	if (inlen < 8)
+		return -1;
+	A = B;
+	t =  6 * (inlen >> 3);
+	memcpy(A, in, 8);
+	memcpy(out, in + 8, inlen);
+	for (j = 0; j < 6; j++)
+		{
+		R = out + inlen - 8;
+		for (i = 0; i < inlen; i += 8, t--, R -= 8)
+			{
+			A[7] ^= (unsigned char)(t & 0xff);
+			if (t > 0xff)	
+				{
+				A[6] ^= (unsigned char)((t >> 8) & 0xff);
+				A[5] ^= (unsigned char)((t >> 16) & 0xff);
+				A[4] ^= (unsigned char)((t >> 24) & 0xff);
+				}
+			memcpy(B + 8, R, 8);
+			AES_decrypt(B, B, key);
+			memcpy(R, B + 8, 8);
+			}
+		}
+	if (!iv)
+		iv = default_iv;
+	if (memcmp(A, iv, 8))
+		{
+		OPENSSL_cleanse(out, inlen);
+		return 0;
+		}
+	return inlen;
+	}
+
+#ifdef AES_WRAP_TEST
+
+int AES_wrap_unwrap_test(const unsigned char *kek, int keybits,
+			 const unsigned char *iv,
+			 const unsigned char *eout,
+			 const unsigned char *key, int keylen)
+	{
+	unsigned char *otmp = NULL, *ptmp = NULL;
+	int r, ret = 0;
+	AES_KEY wctx;
+	otmp = OPENSSL_malloc(keylen + 8);
+	ptmp = OPENSSL_malloc(keylen);
+	if (!otmp || !ptmp)
+		return 0;
+	if (AES_set_encrypt_key(kek, keybits, &wctx))
+		goto err;
+	r = AES_wrap_key(&wctx, iv, otmp, key, keylen);
+	if (r <= 0)
+		goto err;
+
+	if (eout && memcmp(eout, otmp, keylen))
+		goto err;
+		
+	if (AES_set_decrypt_key(kek, keybits, &wctx))
+		goto err;
+	r = AES_unwrap_key(&wctx, iv, ptmp, otmp, r);
+
+	if (memcmp(key, ptmp, keylen))
+		goto err;
+
+	ret = 1;
+
+	err:
+	if (otmp)
+		OPENSSL_free(otmp);
+	if (ptmp)
+		OPENSSL_free(ptmp);
+
+	return ret;
+
+	}
+
+
+
+int main(int argc, char **argv)
+{
+
+static const unsigned char kek[] = {
+  0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+  0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+  0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+  0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+};
+
+static const unsigned char key[] = {
+  0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+  0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff,
+  0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+  0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+
+static const unsigned char e1[] = {
+  0x1f, 0xa6, 0x8b, 0x0a, 0x81, 0x12, 0xb4, 0x47,
+  0xae, 0xf3, 0x4b, 0xd8, 0xfb, 0x5a, 0x7b, 0x82,
+  0x9d, 0x3e, 0x86, 0x23, 0x71, 0xd2, 0xcf, 0xe5
+};
+
+static const unsigned char e2[] = {
+  0x96, 0x77, 0x8b, 0x25, 0xae, 0x6c, 0xa4, 0x35,
+  0xf9, 0x2b, 0x5b, 0x97, 0xc0, 0x50, 0xae, 0xd2,
+  0x46, 0x8a, 0xb8, 0xa1, 0x7a, 0xd8, 0x4e, 0x5d
+};
+
+static const unsigned char e3[] = {
+  0x64, 0xe8, 0xc3, 0xf9, 0xce, 0x0f, 0x5b, 0xa2,
+  0x63, 0xe9, 0x77, 0x79, 0x05, 0x81, 0x8a, 0x2a,
+  0x93, 0xc8, 0x19, 0x1e, 0x7d, 0x6e, 0x8a, 0xe7
+};
+
+static const unsigned char e4[] = {
+  0x03, 0x1d, 0x33, 0x26, 0x4e, 0x15, 0xd3, 0x32,
+  0x68, 0xf2, 0x4e, 0xc2, 0x60, 0x74, 0x3e, 0xdc,
+  0xe1, 0xc6, 0xc7, 0xdd, 0xee, 0x72, 0x5a, 0x93,
+  0x6b, 0xa8, 0x14, 0x91, 0x5c, 0x67, 0x62, 0xd2
+};
+
+static const unsigned char e5[] = {
+  0xa8, 0xf9, 0xbc, 0x16, 0x12, 0xc6, 0x8b, 0x3f,
+  0xf6, 0xe6, 0xf4, 0xfb, 0xe3, 0x0e, 0x71, 0xe4,
+  0x76, 0x9c, 0x8b, 0x80, 0xa3, 0x2c, 0xb8, 0x95,
+  0x8c, 0xd5, 0xd1, 0x7d, 0x6b, 0x25, 0x4d, 0xa1
+};
+
+static const unsigned char e6[] = {
+  0x28, 0xc9, 0xf4, 0x04, 0xc4, 0xb8, 0x10, 0xf4,
+  0xcb, 0xcc, 0xb3, 0x5c, 0xfb, 0x87, 0xf8, 0x26,
+  0x3f, 0x57, 0x86, 0xe2, 0xd8, 0x0e, 0xd3, 0x26,
+  0xcb, 0xc7, 0xf0, 0xe7, 0x1a, 0x99, 0xf4, 0x3b,
+  0xfb, 0x98, 0x8b, 0x9b, 0x7a, 0x02, 0xdd, 0x21
+};
+
+	AES_KEY wctx, xctx;
+	int ret;
+	ret = AES_wrap_unwrap_test(kek, 128, NULL, e1, key, 16);
+	fprintf(stderr, "Key test result %d\n", ret);
+	ret = AES_wrap_unwrap_test(kek, 192, NULL, e2, key, 16);
+	fprintf(stderr, "Key test result %d\n", ret);
+	ret = AES_wrap_unwrap_test(kek, 256, NULL, e3, key, 16);
+	fprintf(stderr, "Key test result %d\n", ret);
+	ret = AES_wrap_unwrap_test(kek, 192, NULL, e4, key, 24);
+	fprintf(stderr, "Key test result %d\n", ret);
+	ret = AES_wrap_unwrap_test(kek, 256, NULL, e5, key, 24);
+	fprintf(stderr, "Key test result %d\n", ret);
+	ret = AES_wrap_unwrap_test(kek, 256, NULL, e6, key, 32);
+	fprintf(stderr, "Key test result %d\n", ret);
+}
+	
+	
+#endif
diff --git a/jni/openssl/crypto/aes/aes_x86core.c b/jni/openssl/crypto/aes/aes_x86core.c
new file mode 100644
index 0000000..d323e26
--- /dev/null
+++ b/jni/openssl/crypto/aes/aes_x86core.c
@@ -0,0 +1,1063 @@
+/* crypto/aes/aes_core.c -*- mode:C; c-file-style: "eay" -*- */
+/**
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+ * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+ * @author Paulo Barreto <paulo.barreto@terra.com.br>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This is experimental x86[_64] derivative. It assumes little-endian
+ * byte order and expects CPU to sustain unaligned memory references.
+ * It is used as playground for cache-time attack mitigations and
+ * serves as reference C implementation for x86[_64] assembler.
+ *
+ *					<appro@fy.chalmers.se>
+ */
+
+
+#ifndef AES_DEBUG
+# ifndef NDEBUG
+#  define NDEBUG
+# endif
+#endif
+#include <assert.h>
+
+#include <stdlib.h>
+#include <openssl/aes.h>
+#include "aes_locl.h"
+
+/*
+ * These two parameters control which table, 256-byte or 2KB, is
+ * referenced in outer and respectively inner rounds.
+ */
+#define AES_COMPACT_IN_OUTER_ROUNDS
+#ifdef  AES_COMPACT_IN_OUTER_ROUNDS
+/* AES_COMPACT_IN_OUTER_ROUNDS costs ~30% in performance, while
+ * adding AES_COMPACT_IN_INNER_ROUNDS reduces benchmark *further*
+ * by factor of ~2. */
+# undef  AES_COMPACT_IN_INNER_ROUNDS
+#endif
+
+#if 1
+static void prefetch256(const void *table)
+{
+	volatile unsigned long *t=(void *)table,ret;
+	unsigned long sum;
+	int i;
+
+	/* 32 is common least cache-line size */
+	for (sum=0,i=0;i<256/sizeof(t[0]);i+=32/sizeof(t[0]))	sum ^= t[i];
+
+	ret = sum;
+}
+#else
+# define prefetch256(t)
+#endif
+
+#undef GETU32
+#define GETU32(p) (*((u32*)(p)))
+
+#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
+typedef unsigned __int64 u64;
+#define U64(C)	C##UI64
+#elif defined(__arch64__)
+typedef unsigned long u64;
+#define U64(C)	C##UL
+#else
+typedef unsigned long long u64;
+#define U64(C)	C##ULL
+#endif
+
+#undef ROTATE
+#if defined(_MSC_VER) || defined(__ICC)
+# define ROTATE(a,n)	_lrotl(a,n)
+#elif defined(__GNUC__) && __GNUC__>=2
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+#   define ROTATE(a,n)	({ register unsigned int ret;	\
+				asm (			\
+				"roll %1,%0"		\
+				: "=r"(ret)		\
+				: "I"(n), "0"(a)	\
+				: "cc");		\
+			   ret;				\
+			})
+# endif
+#endif
+/*
+Te [x] = S [x].[02, 01, 01, 03, 02, 01, 01, 03];
+Te0[x] = S [x].[02, 01, 01, 03];
+Te1[x] = S [x].[03, 02, 01, 01];
+Te2[x] = S [x].[01, 03, 02, 01];
+Te3[x] = S [x].[01, 01, 03, 02];
+*/
+#define Te0 (u32)((u64*)((u8*)Te+0))
+#define Te1 (u32)((u64*)((u8*)Te+3))
+#define Te2 (u32)((u64*)((u8*)Te+2))
+#define Te3 (u32)((u64*)((u8*)Te+1))
+/*
+Td [x] = Si[x].[0e, 09, 0d, 0b, 0e, 09, 0d, 0b];
+Td0[x] = Si[x].[0e, 09, 0d, 0b];
+Td1[x] = Si[x].[0b, 0e, 09, 0d];
+Td2[x] = Si[x].[0d, 0b, 0e, 09];
+Td3[x] = Si[x].[09, 0d, 0b, 0e];
+Td4[x] = Si[x].[01];
+*/
+#define Td0 (u32)((u64*)((u8*)Td+0))
+#define Td1 (u32)((u64*)((u8*)Td+3))
+#define Td2 (u32)((u64*)((u8*)Td+2))
+#define Td3 (u32)((u64*)((u8*)Td+1))
+
+static const u64 Te[256] = {
+    U64(0xa56363c6a56363c6), U64(0x847c7cf8847c7cf8),
+    U64(0x997777ee997777ee), U64(0x8d7b7bf68d7b7bf6),
+    U64(0x0df2f2ff0df2f2ff), U64(0xbd6b6bd6bd6b6bd6),
+    U64(0xb16f6fdeb16f6fde), U64(0x54c5c59154c5c591),
+    U64(0x5030306050303060), U64(0x0301010203010102),
+    U64(0xa96767cea96767ce), U64(0x7d2b2b567d2b2b56),
+    U64(0x19fefee719fefee7), U64(0x62d7d7b562d7d7b5),
+    U64(0xe6abab4de6abab4d), U64(0x9a7676ec9a7676ec),
+    U64(0x45caca8f45caca8f), U64(0x9d82821f9d82821f),
+    U64(0x40c9c98940c9c989), U64(0x877d7dfa877d7dfa),
+    U64(0x15fafaef15fafaef), U64(0xeb5959b2eb5959b2),
+    U64(0xc947478ec947478e), U64(0x0bf0f0fb0bf0f0fb),
+    U64(0xecadad41ecadad41), U64(0x67d4d4b367d4d4b3),
+    U64(0xfda2a25ffda2a25f), U64(0xeaafaf45eaafaf45),
+    U64(0xbf9c9c23bf9c9c23), U64(0xf7a4a453f7a4a453),
+    U64(0x967272e4967272e4), U64(0x5bc0c09b5bc0c09b),
+    U64(0xc2b7b775c2b7b775), U64(0x1cfdfde11cfdfde1),
+    U64(0xae93933dae93933d), U64(0x6a26264c6a26264c),
+    U64(0x5a36366c5a36366c), U64(0x413f3f7e413f3f7e),
+    U64(0x02f7f7f502f7f7f5), U64(0x4fcccc834fcccc83),
+    U64(0x5c3434685c343468), U64(0xf4a5a551f4a5a551),
+    U64(0x34e5e5d134e5e5d1), U64(0x08f1f1f908f1f1f9),
+    U64(0x937171e2937171e2), U64(0x73d8d8ab73d8d8ab),
+    U64(0x5331316253313162), U64(0x3f15152a3f15152a),
+    U64(0x0c0404080c040408), U64(0x52c7c79552c7c795),
+    U64(0x6523234665232346), U64(0x5ec3c39d5ec3c39d),
+    U64(0x2818183028181830), U64(0xa1969637a1969637),
+    U64(0x0f05050a0f05050a), U64(0xb59a9a2fb59a9a2f),
+    U64(0x0907070e0907070e), U64(0x3612122436121224),
+    U64(0x9b80801b9b80801b), U64(0x3de2e2df3de2e2df),
+    U64(0x26ebebcd26ebebcd), U64(0x6927274e6927274e),
+    U64(0xcdb2b27fcdb2b27f), U64(0x9f7575ea9f7575ea),
+    U64(0x1b0909121b090912), U64(0x9e83831d9e83831d),
+    U64(0x742c2c58742c2c58), U64(0x2e1a1a342e1a1a34),
+    U64(0x2d1b1b362d1b1b36), U64(0xb26e6edcb26e6edc),
+    U64(0xee5a5ab4ee5a5ab4), U64(0xfba0a05bfba0a05b),
+    U64(0xf65252a4f65252a4), U64(0x4d3b3b764d3b3b76),
+    U64(0x61d6d6b761d6d6b7), U64(0xceb3b37dceb3b37d),
+    U64(0x7b2929527b292952), U64(0x3ee3e3dd3ee3e3dd),
+    U64(0x712f2f5e712f2f5e), U64(0x9784841397848413),
+    U64(0xf55353a6f55353a6), U64(0x68d1d1b968d1d1b9),
+    U64(0x0000000000000000), U64(0x2cededc12cededc1),
+    U64(0x6020204060202040), U64(0x1ffcfce31ffcfce3),
+    U64(0xc8b1b179c8b1b179), U64(0xed5b5bb6ed5b5bb6),
+    U64(0xbe6a6ad4be6a6ad4), U64(0x46cbcb8d46cbcb8d),
+    U64(0xd9bebe67d9bebe67), U64(0x4b3939724b393972),
+    U64(0xde4a4a94de4a4a94), U64(0xd44c4c98d44c4c98),
+    U64(0xe85858b0e85858b0), U64(0x4acfcf854acfcf85),
+    U64(0x6bd0d0bb6bd0d0bb), U64(0x2aefefc52aefefc5),
+    U64(0xe5aaaa4fe5aaaa4f), U64(0x16fbfbed16fbfbed),
+    U64(0xc5434386c5434386), U64(0xd74d4d9ad74d4d9a),
+    U64(0x5533336655333366), U64(0x9485851194858511),
+    U64(0xcf45458acf45458a), U64(0x10f9f9e910f9f9e9),
+    U64(0x0602020406020204), U64(0x817f7ffe817f7ffe),
+    U64(0xf05050a0f05050a0), U64(0x443c3c78443c3c78),
+    U64(0xba9f9f25ba9f9f25), U64(0xe3a8a84be3a8a84b),
+    U64(0xf35151a2f35151a2), U64(0xfea3a35dfea3a35d),
+    U64(0xc0404080c0404080), U64(0x8a8f8f058a8f8f05),
+    U64(0xad92923fad92923f), U64(0xbc9d9d21bc9d9d21),
+    U64(0x4838387048383870), U64(0x04f5f5f104f5f5f1),
+    U64(0xdfbcbc63dfbcbc63), U64(0xc1b6b677c1b6b677),
+    U64(0x75dadaaf75dadaaf), U64(0x6321214263212142),
+    U64(0x3010102030101020), U64(0x1affffe51affffe5),
+    U64(0x0ef3f3fd0ef3f3fd), U64(0x6dd2d2bf6dd2d2bf),
+    U64(0x4ccdcd814ccdcd81), U64(0x140c0c18140c0c18),
+    U64(0x3513132635131326), U64(0x2fececc32fececc3),
+    U64(0xe15f5fbee15f5fbe), U64(0xa2979735a2979735),
+    U64(0xcc444488cc444488), U64(0x3917172e3917172e),
+    U64(0x57c4c49357c4c493), U64(0xf2a7a755f2a7a755),
+    U64(0x827e7efc827e7efc), U64(0x473d3d7a473d3d7a),
+    U64(0xac6464c8ac6464c8), U64(0xe75d5dbae75d5dba),
+    U64(0x2b1919322b191932), U64(0x957373e6957373e6),
+    U64(0xa06060c0a06060c0), U64(0x9881811998818119),
+    U64(0xd14f4f9ed14f4f9e), U64(0x7fdcdca37fdcdca3),
+    U64(0x6622224466222244), U64(0x7e2a2a547e2a2a54),
+    U64(0xab90903bab90903b), U64(0x8388880b8388880b),
+    U64(0xca46468cca46468c), U64(0x29eeeec729eeeec7),
+    U64(0xd3b8b86bd3b8b86b), U64(0x3c1414283c141428),
+    U64(0x79dedea779dedea7), U64(0xe25e5ebce25e5ebc),
+    U64(0x1d0b0b161d0b0b16), U64(0x76dbdbad76dbdbad),
+    U64(0x3be0e0db3be0e0db), U64(0x5632326456323264),
+    U64(0x4e3a3a744e3a3a74), U64(0x1e0a0a141e0a0a14),
+    U64(0xdb494992db494992), U64(0x0a06060c0a06060c),
+    U64(0x6c2424486c242448), U64(0xe45c5cb8e45c5cb8),
+    U64(0x5dc2c29f5dc2c29f), U64(0x6ed3d3bd6ed3d3bd),
+    U64(0xefacac43efacac43), U64(0xa66262c4a66262c4),
+    U64(0xa8919139a8919139), U64(0xa4959531a4959531),
+    U64(0x37e4e4d337e4e4d3), U64(0x8b7979f28b7979f2),
+    U64(0x32e7e7d532e7e7d5), U64(0x43c8c88b43c8c88b),
+    U64(0x5937376e5937376e), U64(0xb76d6ddab76d6dda),
+    U64(0x8c8d8d018c8d8d01), U64(0x64d5d5b164d5d5b1),
+    U64(0xd24e4e9cd24e4e9c), U64(0xe0a9a949e0a9a949),
+    U64(0xb46c6cd8b46c6cd8), U64(0xfa5656acfa5656ac),
+    U64(0x07f4f4f307f4f4f3), U64(0x25eaeacf25eaeacf),
+    U64(0xaf6565caaf6565ca), U64(0x8e7a7af48e7a7af4),
+    U64(0xe9aeae47e9aeae47), U64(0x1808081018080810),
+    U64(0xd5baba6fd5baba6f), U64(0x887878f0887878f0),
+    U64(0x6f25254a6f25254a), U64(0x722e2e5c722e2e5c),
+    U64(0x241c1c38241c1c38), U64(0xf1a6a657f1a6a657),
+    U64(0xc7b4b473c7b4b473), U64(0x51c6c69751c6c697),
+    U64(0x23e8e8cb23e8e8cb), U64(0x7cdddda17cdddda1),
+    U64(0x9c7474e89c7474e8), U64(0x211f1f3e211f1f3e),
+    U64(0xdd4b4b96dd4b4b96), U64(0xdcbdbd61dcbdbd61),
+    U64(0x868b8b0d868b8b0d), U64(0x858a8a0f858a8a0f),
+    U64(0x907070e0907070e0), U64(0x423e3e7c423e3e7c),
+    U64(0xc4b5b571c4b5b571), U64(0xaa6666ccaa6666cc),
+    U64(0xd8484890d8484890), U64(0x0503030605030306),
+    U64(0x01f6f6f701f6f6f7), U64(0x120e0e1c120e0e1c),
+    U64(0xa36161c2a36161c2), U64(0x5f35356a5f35356a),
+    U64(0xf95757aef95757ae), U64(0xd0b9b969d0b9b969),
+    U64(0x9186861791868617), U64(0x58c1c19958c1c199),
+    U64(0x271d1d3a271d1d3a), U64(0xb99e9e27b99e9e27),
+    U64(0x38e1e1d938e1e1d9), U64(0x13f8f8eb13f8f8eb),
+    U64(0xb398982bb398982b), U64(0x3311112233111122),
+    U64(0xbb6969d2bb6969d2), U64(0x70d9d9a970d9d9a9),
+    U64(0x898e8e07898e8e07), U64(0xa7949433a7949433),
+    U64(0xb69b9b2db69b9b2d), U64(0x221e1e3c221e1e3c),
+    U64(0x9287871592878715), U64(0x20e9e9c920e9e9c9),
+    U64(0x49cece8749cece87), U64(0xff5555aaff5555aa),
+    U64(0x7828285078282850), U64(0x7adfdfa57adfdfa5),
+    U64(0x8f8c8c038f8c8c03), U64(0xf8a1a159f8a1a159),
+    U64(0x8089890980898909), U64(0x170d0d1a170d0d1a),
+    U64(0xdabfbf65dabfbf65), U64(0x31e6e6d731e6e6d7),
+    U64(0xc6424284c6424284), U64(0xb86868d0b86868d0),
+    U64(0xc3414182c3414182), U64(0xb0999929b0999929),
+    U64(0x772d2d5a772d2d5a), U64(0x110f0f1e110f0f1e),
+    U64(0xcbb0b07bcbb0b07b), U64(0xfc5454a8fc5454a8),
+    U64(0xd6bbbb6dd6bbbb6d), U64(0x3a16162c3a16162c)
+};
+
+static const u8 Te4[256] = {
+    0x63U, 0x7cU, 0x77U, 0x7bU, 0xf2U, 0x6bU, 0x6fU, 0xc5U,
+    0x30U, 0x01U, 0x67U, 0x2bU, 0xfeU, 0xd7U, 0xabU, 0x76U,
+    0xcaU, 0x82U, 0xc9U, 0x7dU, 0xfaU, 0x59U, 0x47U, 0xf0U,
+    0xadU, 0xd4U, 0xa2U, 0xafU, 0x9cU, 0xa4U, 0x72U, 0xc0U,
+    0xb7U, 0xfdU, 0x93U, 0x26U, 0x36U, 0x3fU, 0xf7U, 0xccU,
+    0x34U, 0xa5U, 0xe5U, 0xf1U, 0x71U, 0xd8U, 0x31U, 0x15U,
+    0x04U, 0xc7U, 0x23U, 0xc3U, 0x18U, 0x96U, 0x05U, 0x9aU,
+    0x07U, 0x12U, 0x80U, 0xe2U, 0xebU, 0x27U, 0xb2U, 0x75U,
+    0x09U, 0x83U, 0x2cU, 0x1aU, 0x1bU, 0x6eU, 0x5aU, 0xa0U,
+    0x52U, 0x3bU, 0xd6U, 0xb3U, 0x29U, 0xe3U, 0x2fU, 0x84U,
+    0x53U, 0xd1U, 0x00U, 0xedU, 0x20U, 0xfcU, 0xb1U, 0x5bU,
+    0x6aU, 0xcbU, 0xbeU, 0x39U, 0x4aU, 0x4cU, 0x58U, 0xcfU,
+    0xd0U, 0xefU, 0xaaU, 0xfbU, 0x43U, 0x4dU, 0x33U, 0x85U,
+    0x45U, 0xf9U, 0x02U, 0x7fU, 0x50U, 0x3cU, 0x9fU, 0xa8U,
+    0x51U, 0xa3U, 0x40U, 0x8fU, 0x92U, 0x9dU, 0x38U, 0xf5U,
+    0xbcU, 0xb6U, 0xdaU, 0x21U, 0x10U, 0xffU, 0xf3U, 0xd2U,
+    0xcdU, 0x0cU, 0x13U, 0xecU, 0x5fU, 0x97U, 0x44U, 0x17U,
+    0xc4U, 0xa7U, 0x7eU, 0x3dU, 0x64U, 0x5dU, 0x19U, 0x73U,
+    0x60U, 0x81U, 0x4fU, 0xdcU, 0x22U, 0x2aU, 0x90U, 0x88U,
+    0x46U, 0xeeU, 0xb8U, 0x14U, 0xdeU, 0x5eU, 0x0bU, 0xdbU,
+    0xe0U, 0x32U, 0x3aU, 0x0aU, 0x49U, 0x06U, 0x24U, 0x5cU,
+    0xc2U, 0xd3U, 0xacU, 0x62U, 0x91U, 0x95U, 0xe4U, 0x79U,
+    0xe7U, 0xc8U, 0x37U, 0x6dU, 0x8dU, 0xd5U, 0x4eU, 0xa9U,
+    0x6cU, 0x56U, 0xf4U, 0xeaU, 0x65U, 0x7aU, 0xaeU, 0x08U,
+    0xbaU, 0x78U, 0x25U, 0x2eU, 0x1cU, 0xa6U, 0xb4U, 0xc6U,
+    0xe8U, 0xddU, 0x74U, 0x1fU, 0x4bU, 0xbdU, 0x8bU, 0x8aU,
+    0x70U, 0x3eU, 0xb5U, 0x66U, 0x48U, 0x03U, 0xf6U, 0x0eU,
+    0x61U, 0x35U, 0x57U, 0xb9U, 0x86U, 0xc1U, 0x1dU, 0x9eU,
+    0xe1U, 0xf8U, 0x98U, 0x11U, 0x69U, 0xd9U, 0x8eU, 0x94U,
+    0x9bU, 0x1eU, 0x87U, 0xe9U, 0xceU, 0x55U, 0x28U, 0xdfU,
+    0x8cU, 0xa1U, 0x89U, 0x0dU, 0xbfU, 0xe6U, 0x42U, 0x68U,
+    0x41U, 0x99U, 0x2dU, 0x0fU, 0xb0U, 0x54U, 0xbbU, 0x16U
+};
+
+static const u64 Td[256] = {
+    U64(0x50a7f45150a7f451), U64(0x5365417e5365417e),
+    U64(0xc3a4171ac3a4171a), U64(0x965e273a965e273a),
+    U64(0xcb6bab3bcb6bab3b), U64(0xf1459d1ff1459d1f),
+    U64(0xab58faacab58faac), U64(0x9303e34b9303e34b),
+    U64(0x55fa302055fa3020), U64(0xf66d76adf66d76ad),
+    U64(0x9176cc889176cc88), U64(0x254c02f5254c02f5),
+    U64(0xfcd7e54ffcd7e54f), U64(0xd7cb2ac5d7cb2ac5),
+    U64(0x8044352680443526), U64(0x8fa362b58fa362b5),
+    U64(0x495ab1de495ab1de), U64(0x671bba25671bba25),
+    U64(0x980eea45980eea45), U64(0xe1c0fe5de1c0fe5d),
+    U64(0x02752fc302752fc3), U64(0x12f04c8112f04c81),
+    U64(0xa397468da397468d), U64(0xc6f9d36bc6f9d36b),
+    U64(0xe75f8f03e75f8f03), U64(0x959c9215959c9215),
+    U64(0xeb7a6dbfeb7a6dbf), U64(0xda595295da595295),
+    U64(0x2d83bed42d83bed4), U64(0xd3217458d3217458),
+    U64(0x2969e0492969e049), U64(0x44c8c98e44c8c98e),
+    U64(0x6a89c2756a89c275), U64(0x78798ef478798ef4),
+    U64(0x6b3e58996b3e5899), U64(0xdd71b927dd71b927),
+    U64(0xb64fe1beb64fe1be), U64(0x17ad88f017ad88f0),
+    U64(0x66ac20c966ac20c9), U64(0xb43ace7db43ace7d),
+    U64(0x184adf63184adf63), U64(0x82311ae582311ae5),
+    U64(0x6033519760335197), U64(0x457f5362457f5362),
+    U64(0xe07764b1e07764b1), U64(0x84ae6bbb84ae6bbb),
+    U64(0x1ca081fe1ca081fe), U64(0x942b08f9942b08f9),
+    U64(0x5868487058684870), U64(0x19fd458f19fd458f),
+    U64(0x876cde94876cde94), U64(0xb7f87b52b7f87b52),
+    U64(0x23d373ab23d373ab), U64(0xe2024b72e2024b72),
+    U64(0x578f1fe3578f1fe3), U64(0x2aab55662aab5566),
+    U64(0x0728ebb20728ebb2), U64(0x03c2b52f03c2b52f),
+    U64(0x9a7bc5869a7bc586), U64(0xa50837d3a50837d3),
+    U64(0xf2872830f2872830), U64(0xb2a5bf23b2a5bf23),
+    U64(0xba6a0302ba6a0302), U64(0x5c8216ed5c8216ed),
+    U64(0x2b1ccf8a2b1ccf8a), U64(0x92b479a792b479a7),
+    U64(0xf0f207f3f0f207f3), U64(0xa1e2694ea1e2694e),
+    U64(0xcdf4da65cdf4da65), U64(0xd5be0506d5be0506),
+    U64(0x1f6234d11f6234d1), U64(0x8afea6c48afea6c4),
+    U64(0x9d532e349d532e34), U64(0xa055f3a2a055f3a2),
+    U64(0x32e18a0532e18a05), U64(0x75ebf6a475ebf6a4),
+    U64(0x39ec830b39ec830b), U64(0xaaef6040aaef6040),
+    U64(0x069f715e069f715e), U64(0x51106ebd51106ebd),
+    U64(0xf98a213ef98a213e), U64(0x3d06dd963d06dd96),
+    U64(0xae053eddae053edd), U64(0x46bde64d46bde64d),
+    U64(0xb58d5491b58d5491), U64(0x055dc471055dc471),
+    U64(0x6fd406046fd40604), U64(0xff155060ff155060),
+    U64(0x24fb981924fb9819), U64(0x97e9bdd697e9bdd6),
+    U64(0xcc434089cc434089), U64(0x779ed967779ed967),
+    U64(0xbd42e8b0bd42e8b0), U64(0x888b8907888b8907),
+    U64(0x385b19e7385b19e7), U64(0xdbeec879dbeec879),
+    U64(0x470a7ca1470a7ca1), U64(0xe90f427ce90f427c),
+    U64(0xc91e84f8c91e84f8), U64(0x0000000000000000),
+    U64(0x8386800983868009), U64(0x48ed2b3248ed2b32),
+    U64(0xac70111eac70111e), U64(0x4e725a6c4e725a6c),
+    U64(0xfbff0efdfbff0efd), U64(0x5638850f5638850f),
+    U64(0x1ed5ae3d1ed5ae3d), U64(0x27392d3627392d36),
+    U64(0x64d90f0a64d90f0a), U64(0x21a65c6821a65c68),
+    U64(0xd1545b9bd1545b9b), U64(0x3a2e36243a2e3624),
+    U64(0xb1670a0cb1670a0c), U64(0x0fe757930fe75793),
+    U64(0xd296eeb4d296eeb4), U64(0x9e919b1b9e919b1b),
+    U64(0x4fc5c0804fc5c080), U64(0xa220dc61a220dc61),
+    U64(0x694b775a694b775a), U64(0x161a121c161a121c),
+    U64(0x0aba93e20aba93e2), U64(0xe52aa0c0e52aa0c0),
+    U64(0x43e0223c43e0223c), U64(0x1d171b121d171b12),
+    U64(0x0b0d090e0b0d090e), U64(0xadc78bf2adc78bf2),
+    U64(0xb9a8b62db9a8b62d), U64(0xc8a91e14c8a91e14),
+    U64(0x8519f1578519f157), U64(0x4c0775af4c0775af),
+    U64(0xbbdd99eebbdd99ee), U64(0xfd607fa3fd607fa3),
+    U64(0x9f2601f79f2601f7), U64(0xbcf5725cbcf5725c),
+    U64(0xc53b6644c53b6644), U64(0x347efb5b347efb5b),
+    U64(0x7629438b7629438b), U64(0xdcc623cbdcc623cb),
+    U64(0x68fcedb668fcedb6), U64(0x63f1e4b863f1e4b8),
+    U64(0xcadc31d7cadc31d7), U64(0x1085634210856342),
+    U64(0x4022971340229713), U64(0x2011c6842011c684),
+    U64(0x7d244a857d244a85), U64(0xf83dbbd2f83dbbd2),
+    U64(0x1132f9ae1132f9ae), U64(0x6da129c76da129c7),
+    U64(0x4b2f9e1d4b2f9e1d), U64(0xf330b2dcf330b2dc),
+    U64(0xec52860dec52860d), U64(0xd0e3c177d0e3c177),
+    U64(0x6c16b32b6c16b32b), U64(0x99b970a999b970a9),
+    U64(0xfa489411fa489411), U64(0x2264e9472264e947),
+    U64(0xc48cfca8c48cfca8), U64(0x1a3ff0a01a3ff0a0),
+    U64(0xd82c7d56d82c7d56), U64(0xef903322ef903322),
+    U64(0xc74e4987c74e4987), U64(0xc1d138d9c1d138d9),
+    U64(0xfea2ca8cfea2ca8c), U64(0x360bd498360bd498),
+    U64(0xcf81f5a6cf81f5a6), U64(0x28de7aa528de7aa5),
+    U64(0x268eb7da268eb7da), U64(0xa4bfad3fa4bfad3f),
+    U64(0xe49d3a2ce49d3a2c), U64(0x0d9278500d927850),
+    U64(0x9bcc5f6a9bcc5f6a), U64(0x62467e5462467e54),
+    U64(0xc2138df6c2138df6), U64(0xe8b8d890e8b8d890),
+    U64(0x5ef7392e5ef7392e), U64(0xf5afc382f5afc382),
+    U64(0xbe805d9fbe805d9f), U64(0x7c93d0697c93d069),
+    U64(0xa92dd56fa92dd56f), U64(0xb31225cfb31225cf),
+    U64(0x3b99acc83b99acc8), U64(0xa77d1810a77d1810),
+    U64(0x6e639ce86e639ce8), U64(0x7bbb3bdb7bbb3bdb),
+    U64(0x097826cd097826cd), U64(0xf418596ef418596e),
+    U64(0x01b79aec01b79aec), U64(0xa89a4f83a89a4f83),
+    U64(0x656e95e6656e95e6), U64(0x7ee6ffaa7ee6ffaa),
+    U64(0x08cfbc2108cfbc21), U64(0xe6e815efe6e815ef),
+    U64(0xd99be7bad99be7ba), U64(0xce366f4ace366f4a),
+    U64(0xd4099fead4099fea), U64(0xd67cb029d67cb029),
+    U64(0xafb2a431afb2a431), U64(0x31233f2a31233f2a),
+    U64(0x3094a5c63094a5c6), U64(0xc066a235c066a235),
+    U64(0x37bc4e7437bc4e74), U64(0xa6ca82fca6ca82fc),
+    U64(0xb0d090e0b0d090e0), U64(0x15d8a73315d8a733),
+    U64(0x4a9804f14a9804f1), U64(0xf7daec41f7daec41),
+    U64(0x0e50cd7f0e50cd7f), U64(0x2ff691172ff69117),
+    U64(0x8dd64d768dd64d76), U64(0x4db0ef434db0ef43),
+    U64(0x544daacc544daacc), U64(0xdf0496e4df0496e4),
+    U64(0xe3b5d19ee3b5d19e), U64(0x1b886a4c1b886a4c),
+    U64(0xb81f2cc1b81f2cc1), U64(0x7f5165467f516546),
+    U64(0x04ea5e9d04ea5e9d), U64(0x5d358c015d358c01),
+    U64(0x737487fa737487fa), U64(0x2e410bfb2e410bfb),
+    U64(0x5a1d67b35a1d67b3), U64(0x52d2db9252d2db92),
+    U64(0x335610e9335610e9), U64(0x1347d66d1347d66d),
+    U64(0x8c61d79a8c61d79a), U64(0x7a0ca1377a0ca137),
+    U64(0x8e14f8598e14f859), U64(0x893c13eb893c13eb),
+    U64(0xee27a9ceee27a9ce), U64(0x35c961b735c961b7),
+    U64(0xede51ce1ede51ce1), U64(0x3cb1477a3cb1477a),
+    U64(0x59dfd29c59dfd29c), U64(0x3f73f2553f73f255),
+    U64(0x79ce141879ce1418), U64(0xbf37c773bf37c773),
+    U64(0xeacdf753eacdf753), U64(0x5baafd5f5baafd5f),
+    U64(0x146f3ddf146f3ddf), U64(0x86db447886db4478),
+    U64(0x81f3afca81f3afca), U64(0x3ec468b93ec468b9),
+    U64(0x2c3424382c342438), U64(0x5f40a3c25f40a3c2),
+    U64(0x72c31d1672c31d16), U64(0x0c25e2bc0c25e2bc),
+    U64(0x8b493c288b493c28), U64(0x41950dff41950dff),
+    U64(0x7101a8397101a839), U64(0xdeb30c08deb30c08),
+    U64(0x9ce4b4d89ce4b4d8), U64(0x90c1566490c15664),
+    U64(0x6184cb7b6184cb7b), U64(0x70b632d570b632d5),
+    U64(0x745c6c48745c6c48), U64(0x4257b8d04257b8d0)
+};
+static const u8 Td4[256] = {
+    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU
+};
+
+static const u32 rcon[] = {
+    0x00000001U, 0x00000002U, 0x00000004U, 0x00000008U,
+    0x00000010U, 0x00000020U, 0x00000040U, 0x00000080U,
+    0x0000001bU, 0x00000036U, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+			AES_KEY *key) {
+
+	u32 *rk;
+   	int i = 0;
+	u32 temp;
+
+	if (!userKey || !key)
+		return -1;
+	if (bits != 128 && bits != 192 && bits != 256)
+		return -2;
+
+	rk = key->rd_key;
+
+	if (bits==128)
+		key->rounds = 10;
+	else if (bits==192)
+		key->rounds = 12;
+	else
+		key->rounds = 14;
+
+	rk[0] = GETU32(userKey     );
+	rk[1] = GETU32(userKey +  4);
+	rk[2] = GETU32(userKey +  8);
+	rk[3] = GETU32(userKey + 12);
+	if (bits == 128) {
+		while (1) {
+			temp  = rk[3];
+			rk[4] = rk[0] ^
+				(Te4[(temp >>  8) & 0xff]      ) ^
+				(Te4[(temp >> 16) & 0xff] <<  8) ^
+				(Te4[(temp >> 24)       ] << 16) ^
+				(Te4[(temp      ) & 0xff] << 24) ^
+				rcon[i];
+			rk[5] = rk[1] ^ rk[4];
+			rk[6] = rk[2] ^ rk[5];
+			rk[7] = rk[3] ^ rk[6];
+			if (++i == 10) {
+				return 0;
+			}
+			rk += 4;
+		}
+	}
+	rk[4] = GETU32(userKey + 16);
+	rk[5] = GETU32(userKey + 20);
+	if (bits == 192) {
+		while (1) {
+			temp = rk[ 5];
+			rk[ 6] = rk[ 0] ^
+				(Te4[(temp >>  8) & 0xff]      ) ^
+				(Te4[(temp >> 16) & 0xff] <<  8) ^
+				(Te4[(temp >> 24)       ] << 16) ^
+				(Te4[(temp      ) & 0xff] << 24) ^
+				rcon[i];
+			rk[ 7] = rk[ 1] ^ rk[ 6];
+			rk[ 8] = rk[ 2] ^ rk[ 7];
+			rk[ 9] = rk[ 3] ^ rk[ 8];
+			if (++i == 8) {
+				return 0;
+			}
+			rk[10] = rk[ 4] ^ rk[ 9];
+			rk[11] = rk[ 5] ^ rk[10];
+			rk += 6;
+		}
+	}
+	rk[6] = GETU32(userKey + 24);
+	rk[7] = GETU32(userKey + 28);
+	if (bits == 256) {
+		while (1) {
+			temp = rk[ 7];
+			rk[ 8] = rk[ 0] ^
+				(Te4[(temp >>  8) & 0xff]      ) ^
+				(Te4[(temp >> 16) & 0xff] <<  8) ^
+				(Te4[(temp >> 24)       ] << 16) ^
+				(Te4[(temp      ) & 0xff] << 24) ^
+				rcon[i];
+			rk[ 9] = rk[ 1] ^ rk[ 8];
+			rk[10] = rk[ 2] ^ rk[ 9];
+			rk[11] = rk[ 3] ^ rk[10];
+			if (++i == 7) {
+				return 0;
+			}
+			temp = rk[11];
+			rk[12] = rk[ 4] ^
+				(Te4[(temp      ) & 0xff]      ) ^
+				(Te4[(temp >>  8) & 0xff] <<  8) ^
+				(Te4[(temp >> 16) & 0xff] << 16) ^
+				(Te4[(temp >> 24)       ] << 24);
+			rk[13] = rk[ 5] ^ rk[12];
+			rk[14] = rk[ 6] ^ rk[13];
+			rk[15] = rk[ 7] ^ rk[14];
+
+			rk += 8;
+        	}
+	}
+	return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+			 AES_KEY *key) {
+
+        u32 *rk;
+	int i, j, status;
+	u32 temp;
+
+	/* first, start with an encryption schedule */
+	status = AES_set_encrypt_key(userKey, bits, key);
+	if (status < 0)
+		return status;
+
+	rk = key->rd_key;
+
+	/* invert the order of the round keys: */
+	for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+		temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+		temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+		temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+		temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+	}
+	/* apply the inverse MixColumn transform to all round keys but the first and the last: */
+	for (i = 1; i < (key->rounds); i++) {
+		rk += 4;
+#if 1
+		for (j = 0; j < 4; j++) {
+			u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+			tp1 = rk[j];
+			m = tp1 & 0x80808080;
+			tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp2 & 0x80808080;
+			tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp4 & 0x80808080;
+			tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			tp9 = tp8 ^ tp1;
+			tpb = tp9 ^ tp2;
+			tpd = tp9 ^ tp4;
+			tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+			rk[j] = tpe ^ ROTATE(tpd,16) ^
+				ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+			rk[j] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^ 
+				(tp9 >> 24) ^ (tp9 << 8) ^
+				(tpb >> 8) ^ (tpb << 24);
+#endif
+		}
+#else
+		rk[0] =
+			Td0[Te2[(rk[0]      ) & 0xff] & 0xff] ^
+			Td1[Te2[(rk[0] >>  8) & 0xff] & 0xff] ^
+			Td2[Te2[(rk[0] >> 16) & 0xff] & 0xff] ^
+			Td3[Te2[(rk[0] >> 24)       ] & 0xff];
+		rk[1] =
+			Td0[Te2[(rk[1]      ) & 0xff] & 0xff] ^
+			Td1[Te2[(rk[1] >>  8) & 0xff] & 0xff] ^
+			Td2[Te2[(rk[1] >> 16) & 0xff] & 0xff] ^
+			Td3[Te2[(rk[1] >> 24)       ] & 0xff];
+		rk[2] =
+			Td0[Te2[(rk[2]      ) & 0xff] & 0xff] ^
+			Td1[Te2[(rk[2] >>  8) & 0xff] & 0xff] ^
+			Td2[Te2[(rk[2] >> 16) & 0xff] & 0xff] ^
+			Td3[Te2[(rk[2] >> 24)       ] & 0xff];
+		rk[3] =
+			Td0[Te2[(rk[3]      ) & 0xff] & 0xff] ^
+			Td1[Te2[(rk[3] >>  8) & 0xff] & 0xff] ^
+			Td2[Te2[(rk[3] >> 16) & 0xff] & 0xff] ^
+			Td3[Te2[(rk[3] >> 24)       ] & 0xff];
+#endif
+	}
+	return 0;
+}
+
+/*
+ * Encrypt a single block
+ * in and out can overlap
+ */
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+		 const AES_KEY *key) {
+
+	const u32 *rk;
+	u32 s0, s1, s2, s3, t[4];
+	int r;
+
+	assert(in && out && key);
+	rk = key->rd_key;
+
+	/*
+	 * map byte array block to cipher state
+	 * and add initial round key:
+	 */
+	s0 = GETU32(in     ) ^ rk[0];
+	s1 = GETU32(in +  4) ^ rk[1];
+	s2 = GETU32(in +  8) ^ rk[2];
+	s3 = GETU32(in + 12) ^ rk[3];
+
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+	prefetch256(Te4);
+
+	t[0] =	Te4[(s0      ) & 0xff]       ^
+		Te4[(s1 >>  8) & 0xff] <<  8 ^
+		Te4[(s2 >> 16) & 0xff] << 16 ^
+		Te4[(s3 >> 24)       ] << 24;
+	t[1] =	Te4[(s1      ) & 0xff]       ^
+		Te4[(s2 >>  8) & 0xff] <<  8 ^
+		Te4[(s3 >> 16) & 0xff] << 16 ^
+		Te4[(s0 >> 24)       ] << 24;
+	t[2] =	Te4[(s2      ) & 0xff]       ^
+		Te4[(s3 >>  8) & 0xff] <<  8 ^
+		Te4[(s0 >> 16) & 0xff] << 16 ^
+		Te4[(s1 >> 24)       ] << 24;
+	t[3] =	Te4[(s3      ) & 0xff]       ^
+		Te4[(s0 >>  8) & 0xff] <<  8 ^
+		Te4[(s1 >> 16) & 0xff] << 16 ^
+		Te4[(s2 >> 24)       ] << 24;
+
+	/* now do the linear transform using words */
+	{	int i;
+		u32 r0, r1, r2;
+
+		for (i = 0; i < 4; i++) {
+			r0 = t[i];
+			r1 = r0 & 0x80808080;
+			r2 = ((r0 & 0x7f7f7f7f) << 1) ^
+				((r1 - (r1 >> 7)) & 0x1b1b1b1b);
+#if defined(ROTATE)
+			t[i] = r2 ^ ROTATE(r2,24) ^ ROTATE(r0,24) ^
+				ROTATE(r0,16) ^ ROTATE(r0,8);
+#else
+			t[i] = r2 ^ ((r2 ^ r0) << 24) ^ ((r2 ^ r0) >> 8) ^
+				(r0 << 16) ^ (r0 >> 16) ^
+				(r0 << 8) ^ (r0 >> 24);
+#endif
+			t[i] ^= rk[4+i];
+		}
+	}
+#else
+	t[0] =	Te0[(s0      ) & 0xff] ^
+		Te1[(s1 >>  8) & 0xff] ^
+		Te2[(s2 >> 16) & 0xff] ^
+		Te3[(s3 >> 24)       ] ^
+		rk[4];
+	t[1] =	Te0[(s1      ) & 0xff] ^
+		Te1[(s2 >>  8) & 0xff] ^
+		Te2[(s3 >> 16) & 0xff] ^
+		Te3[(s0 >> 24)       ] ^
+		rk[5];
+	t[2] =	Te0[(s2      ) & 0xff] ^
+		Te1[(s3 >>  8) & 0xff] ^
+		Te2[(s0 >> 16) & 0xff] ^
+		Te3[(s1 >> 24)       ] ^
+		rk[6];
+	t[3] =	Te0[(s3      ) & 0xff] ^
+		Te1[(s0 >>  8) & 0xff] ^
+		Te2[(s1 >> 16) & 0xff] ^
+		Te3[(s2 >> 24)       ] ^
+		rk[7];
+#endif
+	s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+
+    /*
+     * Nr - 2 full rounds:
+     */
+    for (rk+=8,r=key->rounds-2; r>0; rk+=4,r--) {
+#if defined(AES_COMPACT_IN_INNER_ROUNDS)
+	t[0] =	Te4[(s0      ) & 0xff]       ^
+		Te4[(s1 >>  8) & 0xff] <<  8 ^
+		Te4[(s2 >> 16) & 0xff] << 16 ^
+		Te4[(s3 >> 24)       ] << 24;
+	t[1] =	Te4[(s1      ) & 0xff]       ^
+		Te4[(s2 >>  8) & 0xff] <<  8 ^
+		Te4[(s3 >> 16) & 0xff] << 16 ^
+		Te4[(s0 >> 24)       ] << 24;
+	t[2] =	Te4[(s2      ) & 0xff]       ^
+		Te4[(s3 >>  8) & 0xff] <<  8 ^
+		Te4[(s0 >> 16) & 0xff] << 16 ^
+		Te4[(s1 >> 24)       ] << 24;
+	t[3] =	Te4[(s3      ) & 0xff]       ^
+		Te4[(s0 >>  8) & 0xff] <<  8 ^
+		Te4[(s1 >> 16) & 0xff] << 16 ^
+		Te4[(s2 >> 24)       ] << 24;
+
+	/* now do the linear transform using words */
+	{	int i;
+		u32 r0, r1, r2;
+
+		for (i = 0; i < 4; i++) {
+			r0 = t[i];
+			r1 = r0 & 0x80808080;
+			r2 = ((r0 & 0x7f7f7f7f) << 1) ^
+				((r1 - (r1 >> 7)) & 0x1b1b1b1b);
+#if defined(ROTATE)
+			t[i] = r2 ^ ROTATE(r2,24) ^ ROTATE(r0,24) ^
+				ROTATE(r0,16) ^ ROTATE(r0,8);
+#else
+			t[i] = r2 ^ ((r2 ^ r0) << 24) ^ ((r2 ^ r0) >> 8) ^
+				(r0 << 16) ^ (r0 >> 16) ^
+				(r0 << 8) ^ (r0 >> 24);
+#endif
+			t[i] ^= rk[i];
+		}
+	}
+#else
+	t[0] =	Te0[(s0      ) & 0xff] ^
+		Te1[(s1 >>  8) & 0xff] ^
+		Te2[(s2 >> 16) & 0xff] ^
+		Te3[(s3 >> 24)       ] ^
+		rk[0];
+	t[1] =	Te0[(s1      ) & 0xff] ^
+		Te1[(s2 >>  8) & 0xff] ^
+		Te2[(s3 >> 16) & 0xff] ^
+		Te3[(s0 >> 24)       ] ^
+		rk[1];
+	t[2] =	Te0[(s2      ) & 0xff] ^
+		Te1[(s3 >>  8) & 0xff] ^
+		Te2[(s0 >> 16) & 0xff] ^
+		Te3[(s1 >> 24)       ] ^
+		rk[2];
+	t[3] =	Te0[(s3      ) & 0xff] ^
+		Te1[(s0 >>  8) & 0xff] ^
+		Te2[(s1 >> 16) & 0xff] ^
+		Te3[(s2 >> 24)       ] ^
+		rk[3];
+#endif
+	s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+    }
+    /*
+	 * apply last round and
+	 * map cipher state to byte array block:
+	 */
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+	prefetch256(Te4);
+
+	*(u32*)(out+0) =
+		Te4[(s0      ) & 0xff]       ^
+		Te4[(s1 >>  8) & 0xff] <<  8 ^
+		Te4[(s2 >> 16) & 0xff] << 16 ^
+		Te4[(s3 >> 24)       ] << 24 ^
+		rk[0];
+	*(u32*)(out+4) =
+		Te4[(s1      ) & 0xff]       ^
+		Te4[(s2 >>  8) & 0xff] <<  8 ^
+		Te4[(s3 >> 16) & 0xff] << 16 ^
+		Te4[(s0 >> 24)       ] << 24 ^
+		rk[1];
+	*(u32*)(out+8) =
+		Te4[(s2      ) & 0xff]       ^
+		Te4[(s3 >>  8) & 0xff] <<  8 ^
+		Te4[(s0 >> 16) & 0xff] << 16 ^
+		Te4[(s1 >> 24)       ] << 24 ^
+		rk[2];
+	*(u32*)(out+12) =
+		Te4[(s3      ) & 0xff]       ^
+		Te4[(s0 >>  8) & 0xff] <<  8 ^
+		Te4[(s1 >> 16) & 0xff] << 16 ^
+		Te4[(s2 >> 24)       ] << 24 ^
+		rk[3];
+#else
+	*(u32*)(out+0) =
+		(Te2[(s0      ) & 0xff] & 0x000000ffU) ^
+		(Te3[(s1 >>  8) & 0xff] & 0x0000ff00U) ^
+		(Te0[(s2 >> 16) & 0xff] & 0x00ff0000U) ^
+		(Te1[(s3 >> 24)       ] & 0xff000000U) ^
+		rk[0];
+	*(u32*)(out+4) =
+		(Te2[(s1      ) & 0xff] & 0x000000ffU) ^
+		(Te3[(s2 >>  8) & 0xff] & 0x0000ff00U) ^
+		(Te0[(s3 >> 16) & 0xff] & 0x00ff0000U) ^
+		(Te1[(s0 >> 24)       ] & 0xff000000U) ^
+		rk[1];
+	*(u32*)(out+8) =
+		(Te2[(s2      ) & 0xff] & 0x000000ffU) ^
+		(Te3[(s3 >>  8) & 0xff] & 0x0000ff00U) ^
+		(Te0[(s0 >> 16) & 0xff] & 0x00ff0000U) ^
+		(Te1[(s1 >> 24)       ] & 0xff000000U) ^
+		rk[2];
+	*(u32*)(out+12) =
+		(Te2[(s3      ) & 0xff] & 0x000000ffU) ^
+		(Te3[(s0 >>  8) & 0xff] & 0x0000ff00U) ^
+		(Te0[(s1 >> 16) & 0xff] & 0x00ff0000U) ^
+		(Te1[(s2 >> 24)       ] & 0xff000000U) ^
+		rk[3];
+#endif
+}
+
+/*
+ * Decrypt a single block
+ * in and out can overlap
+ */
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+		 const AES_KEY *key) {
+
+	const u32 *rk;
+	u32 s0, s1, s2, s3, t[4];
+	int r;
+
+	assert(in && out && key);
+	rk = key->rd_key;
+
+	/*
+	 * map byte array block to cipher state
+	 * and add initial round key:
+	 */
+	s0 = GETU32(in     ) ^ rk[0];
+	s1 = GETU32(in +  4) ^ rk[1];
+	s2 = GETU32(in +  8) ^ rk[2];
+	s3 = GETU32(in + 12) ^ rk[3];
+
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+	prefetch256(Td4);
+
+        t[0] =	Td4[(s0      ) & 0xff]       ^
+		Td4[(s3 >>  8) & 0xff] <<  8 ^
+		Td4[(s2 >> 16) & 0xff] << 16 ^
+		Td4[(s1 >> 24)       ] << 24;
+        t[1] =	Td4[(s1      ) & 0xff]       ^
+		Td4[(s0 >>  8) & 0xff] <<  8 ^
+		Td4[(s3 >> 16) & 0xff] << 16 ^
+		Td4[(s2 >> 24)       ] << 24;
+        t[2] =	Td4[(s2      ) & 0xff]       ^
+		Td4[(s1 >>  8) & 0xff] <<  8 ^
+		Td4[(s0 >> 16) & 0xff] << 16 ^
+		Td4[(s3 >> 24)       ] << 24;
+        t[3] =	Td4[(s3      ) & 0xff]       ^
+		Td4[(s2 >>  8) & 0xff] <<  8 ^
+		Td4[(s1 >> 16) & 0xff] << 16 ^
+		Td4[(s0 >> 24)       ] << 24;
+
+	/* now do the linear transform using words */ 
+	{	int i;
+		u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+		for (i = 0; i < 4; i++) {
+			tp1 = t[i];
+			m = tp1 & 0x80808080;
+			tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp2 & 0x80808080;
+			tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp4 & 0x80808080;
+			tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			tp9 = tp8 ^ tp1;
+			tpb = tp9 ^ tp2;
+			tpd = tp9 ^ tp4;
+			tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+			t[i] = tpe ^ ROTATE(tpd,16) ^
+				ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+			t[i] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^ 
+				(tp9 >> 24) ^ (tp9 << 8) ^
+				(tpb >> 8) ^ (tpb << 24);
+#endif
+			t[i] ^= rk[4+i];
+		}
+	}
+#else
+	t[0] =	Td0[(s0      ) & 0xff] ^
+		Td1[(s3 >>  8) & 0xff] ^
+		Td2[(s2 >> 16) & 0xff] ^
+		Td3[(s1 >> 24)       ] ^
+		rk[4];
+	t[1] =	Td0[(s1      ) & 0xff] ^
+		Td1[(s0 >>  8) & 0xff] ^
+		Td2[(s3 >> 16) & 0xff] ^
+		Td3[(s2 >> 24)       ] ^
+		rk[5];
+	t[2] =	Td0[(s2      ) & 0xff] ^
+		Td1[(s1 >>  8) & 0xff] ^
+		Td2[(s0 >> 16) & 0xff] ^
+		Td3[(s3 >> 24)       ] ^
+		rk[6];
+	t[3] =	Td0[(s3      ) & 0xff] ^
+		Td1[(s2 >>  8) & 0xff] ^
+		Td2[(s1 >> 16) & 0xff] ^
+		Td3[(s0 >> 24)       ] ^
+		rk[7];
+#endif
+	s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+
+    /*
+     * Nr - 2 full rounds:
+     */
+    for (rk+=8,r=key->rounds-2; r>0; rk+=4,r--) {
+#if defined(AES_COMPACT_IN_INNER_ROUNDS)
+        t[0] =	Td4[(s0      ) & 0xff]       ^
+		Td4[(s3 >>  8) & 0xff] <<  8 ^
+		Td4[(s2 >> 16) & 0xff] << 16 ^
+		Td4[(s1 >> 24)       ] << 24;
+        t[1] =	Td4[(s1      ) & 0xff]       ^
+		Td4[(s0 >>  8) & 0xff] <<  8 ^
+		Td4[(s3 >> 16) & 0xff] << 16 ^
+		Td4[(s2 >> 24)       ] << 24;
+        t[2] =	Td4[(s2      ) & 0xff]       ^
+		Td4[(s1 >>  8) & 0xff] <<  8 ^
+		Td4[(s0 >> 16) & 0xff] << 16 ^
+		Td4[(s3 >> 24)       ] << 24;
+        t[3] =	Td4[(s3      ) & 0xff]       ^
+		Td4[(s2 >>  8) & 0xff] <<  8 ^
+		Td4[(s1 >> 16) & 0xff] << 16 ^
+		Td4[(s0 >> 24)       ] << 24;
+
+	/* now do the linear transform using words */ 
+	{	int i;
+		u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+		for (i = 0; i < 4; i++) {
+			tp1 = t[i];
+			m = tp1 & 0x80808080;
+			tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp2 & 0x80808080;
+			tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			m = tp4 & 0x80808080;
+			tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+				((m - (m >> 7)) & 0x1b1b1b1b);
+			tp9 = tp8 ^ tp1;
+			tpb = tp9 ^ tp2;
+			tpd = tp9 ^ tp4;
+			tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+			t[i] = tpe ^ ROTATE(tpd,16) ^
+				ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+			t[i] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^ 
+				(tp9 >> 24) ^ (tp9 << 8) ^
+				(tpb >> 8) ^ (tpb << 24);
+#endif
+			t[i] ^= rk[i];
+		}
+	}
+#else
+	t[0] =	Td0[(s0      ) & 0xff] ^
+		Td1[(s3 >>  8) & 0xff] ^
+		Td2[(s2 >> 16) & 0xff] ^
+		Td3[(s1 >> 24)       ] ^
+		rk[0];
+	t[1] =	Td0[(s1      ) & 0xff] ^
+		Td1[(s0 >>  8) & 0xff] ^
+		Td2[(s3 >> 16) & 0xff] ^
+		Td3[(s2 >> 24)       ] ^
+		rk[1];
+	t[2] =	Td0[(s2      ) & 0xff] ^
+		Td1[(s1 >>  8) & 0xff] ^
+		Td2[(s0 >> 16) & 0xff] ^
+		Td3[(s3 >> 24)       ] ^
+		rk[2];
+	t[3] =	Td0[(s3      ) & 0xff] ^
+		Td1[(s2 >>  8) & 0xff] ^
+		Td2[(s1 >> 16) & 0xff] ^
+		Td3[(s0 >> 24)       ] ^
+		rk[3];
+#endif
+	s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+    }
+    /*
+	 * apply last round and
+	 * map cipher state to byte array block:
+	 */
+	prefetch256(Td4);
+
+	*(u32*)(out+0) =
+		(Td4[(s0      ) & 0xff])	^
+		(Td4[(s3 >>  8) & 0xff] <<  8) ^
+		(Td4[(s2 >> 16) & 0xff] << 16) ^
+		(Td4[(s1 >> 24)       ] << 24) ^
+		rk[0];
+	*(u32*)(out+4) =
+		(Td4[(s1      ) & 0xff])	 ^
+		(Td4[(s0 >>  8) & 0xff] <<  8) ^
+		(Td4[(s3 >> 16) & 0xff] << 16) ^
+		(Td4[(s2 >> 24)       ] << 24) ^
+		rk[1];
+	*(u32*)(out+8) =
+		(Td4[(s2      ) & 0xff])	 ^
+		(Td4[(s1 >>  8) & 0xff] <<  8) ^
+		(Td4[(s0 >> 16) & 0xff] << 16) ^
+		(Td4[(s3 >> 24)       ] << 24) ^
+		rk[2];
+	*(u32*)(out+12) =
+		(Td4[(s3      ) & 0xff])	 ^
+		(Td4[(s2 >>  8) & 0xff] <<  8) ^
+		(Td4[(s1 >> 16) & 0xff] << 16) ^
+		(Td4[(s0 >> 24)       ] << 24) ^
+		rk[3];
+}
diff --git a/jni/openssl/crypto/aes/asm/aes-586.pl b/jni/openssl/crypto/aes/asm/aes-586.pl
new file mode 100755
index 0000000..687ed81
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-586.pl
@@ -0,0 +1,2980 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Version 4.3.
+#
+# You might fail to appreciate this module performance from the first
+# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
+# to be *the* best Intel C compiler without -KPIC, performance appears
+# to be virtually identical... But try to re-configure with shared
+# library support... Aha! Intel compiler "suddenly" lags behind by 30%
+# [on P4, more on others]:-) And if compared to position-independent
+# code generated by GNU C, this code performs *more* than *twice* as
+# fast! Yes, all this buzz about PIC means that unlike other hand-
+# coded implementations, this one was explicitly designed to be safe
+# to use even in shared library context... This also means that this
+# code isn't necessarily absolutely fastest "ever," because in order
+# to achieve position independence an extra register has to be
+# off-loaded to stack, which affects the benchmark result.
+#
+# Special note about instruction choice. Do you recall RC4_INT code
+# performing poorly on P4? It might be the time to figure out why.
+# RC4_INT code implies effective address calculations in base+offset*4
+# form. Trouble is that it seems that offset scaling turned to be
+# critical path... At least eliminating scaling resulted in 2.8x RC4
+# performance improvement [as you might recall]. As AES code is hungry
+# for scaling too, I [try to] avoid the latter by favoring off-by-2
+# shifts and masking the result with 0xFF<<2 instead of "boring" 0xFF.
+#
+# As was shown by Dean Gaudet <dean@arctic.org>, the above note turned
+# void. Performance improvement with off-by-2 shifts was observed on
+# intermediate implementation, which was spilling yet another register
+# to stack... Final offset*4 code below runs just a tad faster on P4,
+# but exhibits up to 10% improvement on other cores.
+#
+# Second version is "monolithic" replacement for aes_core.c, which in
+# addition to AES_[de|en]crypt implements private_AES_set_[de|en]cryption_key.
+# This made it possible to implement little-endian variant of the
+# algorithm without modifying the base C code. Motivating factor for
+# the undertaken effort was that it appeared that in tight IA-32
+# register window little-endian flavor could achieve slightly higher
+# Instruction Level Parallelism, and it indeed resulted in up to 15%
+# better performance on most recent µ-archs...
+#
+# Third version adds AES_cbc_encrypt implementation, which resulted in
+# up to 40% performance imrovement of CBC benchmark results. 40% was
+# observed on P4 core, where "overall" imrovement coefficient, i.e. if
+# compared to PIC generated by GCC and in CBC mode, was observed to be
+# as large as 4x:-) CBC performance is virtually identical to ECB now
+# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
+# Opteron, because certain function prologues and epilogues are
+# effectively taken out of the loop...
+#
+# Version 3.2 implements compressed tables and prefetch of these tables
+# in CBC[!] mode. Former means that 3/4 of table references are now
+# misaligned, which unfortunately has negative impact on elder IA-32
+# implementations, Pentium suffered 30% penalty, PIII - 10%.
+#
+# Version 3.3 avoids L1 cache aliasing between stack frame and
+# S-boxes, and 3.4 - L1 cache aliasing even between key schedule. The
+# latter is achieved by copying the key schedule to controlled place in
+# stack. This unfortunately has rather strong impact on small block CBC
+# performance, ~2x deterioration on 16-byte block if compared to 3.3.
+#
+# Version 3.5 checks if there is L1 cache aliasing between user-supplied
+# key schedule and S-boxes and abstains from copying the former if
+# there is no. This allows end-user to consciously retain small block
+# performance by aligning key schedule in specific manner.
+#
+# Version 3.6 compresses Td4 to 256 bytes and prefetches it in ECB.
+#
+# Current ECB performance numbers for 128-bit key in CPU cycles per
+# processed byte [measure commonly used by AES benchmarkers] are:
+#
+#		small footprint		fully unrolled
+# P4		24			22
+# AMD K8	20			19
+# PIII		25			23
+# Pentium	81			78
+#
+# Version 3.7 reimplements outer rounds as "compact." Meaning that
+# first and last rounds reference compact 256 bytes S-box. This means
+# that first round consumes a lot more CPU cycles and that encrypt
+# and decrypt performance becomes asymmetric. Encrypt performance
+# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
+# aggressively pre-fetched.
+#
+# Version 4.0 effectively rolls back to 3.6 and instead implements
+# additional set of functions, _[x86|sse]_AES_[en|de]crypt_compact,
+# which use exclusively 256 byte S-box. These functions are to be
+# called in modes not concealing plain text, such as ECB, or when
+# we're asked to process smaller amount of data [or unconditionally
+# on hyper-threading CPU]. Currently it's called unconditionally from
+# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
+# still needs to be modified to switch between slower and faster
+# mode when appropriate... But in either case benchmark landscape
+# changes dramatically and below numbers are CPU cycles per processed
+# byte for 128-bit key.
+#
+#		ECB encrypt	ECB decrypt	CBC large chunk
+# P4		56[60]		84[100]		23
+# AMD K8	48[44]		70[79]		18
+# PIII		41[50]		61[91]		24
+# Core 2	32[38]		45[70]		18.5
+# Pentium	120		160		77
+#
+# Version 4.1 switches to compact S-box even in key schedule setup.
+#
+# Version 4.2 prefetches compact S-box in every SSE round or in other
+# words every cache-line is *guaranteed* to be accessed within ~50
+# cycles window. Why just SSE? Because it's needed on hyper-threading
+# CPU! Which is also why it's prefetched with 64 byte stride. Best
+# part is that it has no negative effect on performance:-)  
+#
+# Version 4.3 implements switch between compact and non-compact block
+# functions in AES_cbc_encrypt depending on how much data was asked
+# to be processed in one stroke.
+#
+######################################################################
+# Timing attacks are classified in two classes: synchronous when
+# attacker consciously initiates cryptographic operation and collects
+# timing data of various character afterwards, and asynchronous when
+# malicious code is executed on same CPU simultaneously with AES,
+# instruments itself and performs statistical analysis of this data.
+#
+# As far as synchronous attacks go the root to the AES timing
+# vulnerability is twofold. Firstly, of 256 S-box elements at most 160
+# are referred to in single 128-bit block operation. Well, in C
+# implementation with 4 distinct tables it's actually as little as 40
+# references per 256 elements table, but anyway... Secondly, even
+# though S-box elements are clustered into smaller amount of cache-
+# lines, smaller than 160 and even 40, it turned out that for certain
+# plain-text pattern[s] or simply put chosen plain-text and given key
+# few cache-lines remain unaccessed during block operation. Now, if
+# attacker can figure out this access pattern, he can deduct the key
+# [or at least part of it]. The natural way to mitigate this kind of
+# attacks is to minimize the amount of cache-lines in S-box and/or
+# prefetch them to ensure that every one is accessed for more uniform
+# timing. But note that *if* plain-text was concealed in such way that
+# input to block function is distributed *uniformly*, then attack
+# wouldn't apply. Now note that some encryption modes, most notably
+# CBC, do mask the plain-text in this exact way [secure cipher output
+# is distributed uniformly]. Yes, one still might find input that
+# would reveal the information about given key, but if amount of
+# candidate inputs to be tried is larger than amount of possible key
+# combinations then attack becomes infeasible. This is why revised
+# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
+# of data is to be processed in one stroke. The current size limit of
+# 512 bytes is chosen to provide same [diminishigly low] probability
+# for cache-line to remain untouched in large chunk operation with
+# large S-box as for single block operation with compact S-box and
+# surely needs more careful consideration...
+#
+# As for asynchronous attacks. There are two flavours: attacker code
+# being interleaved with AES on hyper-threading CPU at *instruction*
+# level, and two processes time sharing single core. As for latter.
+# Two vectors. 1. Given that attacker process has higher priority,
+# yield execution to process performing AES just before timer fires
+# off the scheduler, immediately regain control of CPU and analyze the
+# cache state. For this attack to be efficient attacker would have to
+# effectively slow down the operation by several *orders* of magnitute,
+# by ratio of time slice to duration of handful of AES rounds, which
+# unlikely to remain unnoticed. Not to mention that this also means
+# that he would spend correspondigly more time to collect enough
+# statistical data to mount the attack. It's probably appropriate to
+# say that if adeversary reckons that this attack is beneficial and
+# risks to be noticed, you probably have larger problems having him
+# mere opportunity. In other words suggested code design expects you
+# to preclude/mitigate this attack by overall system security design.
+# 2. Attacker manages to make his code interrupt driven. In order for
+# this kind of attack to be feasible, interrupt rate has to be high
+# enough, again comparable to duration of handful of AES rounds. But
+# is there interrupt source of such rate? Hardly, not even 1Gbps NIC
+# generates interrupts at such raging rate...
+#
+# And now back to the former, hyper-threading CPU or more specifically
+# Intel P4. Recall that asynchronous attack implies that malicious
+# code instruments itself. And naturally instrumentation granularity
+# has be noticeably lower than duration of codepath accessing S-box.
+# Given that all cache-lines are accessed during that time that is.
+# Current implementation accesses *all* cache-lines within ~50 cycles
+# window, which is actually *less* than RDTSC latency on Intel P4!
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"aes-586.pl",$x86only = $ARGV[$#ARGV] eq "386");
+&static_label("AES_Te");
+&static_label("AES_Td");
+
+$s0="eax";
+$s1="ebx";
+$s2="ecx";
+$s3="edx";
+$key="edi";
+$acc="esi";
+$tbl="ebp";
+
+# stack frame layout in _[x86|sse]_AES_* routines, frame is allocated
+# by caller
+$__ra=&DWP(0,"esp");	# return address
+$__s0=&DWP(4,"esp");	# s0 backing store
+$__s1=&DWP(8,"esp");	# s1 backing store
+$__s2=&DWP(12,"esp");	# s2 backing store
+$__s3=&DWP(16,"esp");	# s3 backing store
+$__key=&DWP(20,"esp");	# pointer to key schedule
+$__end=&DWP(24,"esp");	# pointer to end of key schedule
+$__tbl=&DWP(28,"esp");	# %ebp backing store
+
+# stack frame layout in AES_[en|crypt] routines, which differs from
+# above by 4 and overlaps by %ebp backing store
+$_tbl=&DWP(24,"esp");
+$_esp=&DWP(28,"esp");
+
+sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
+
+$speed_limit=512;	# chunks smaller than $speed_limit are
+			# processed with compact routine in CBC mode
+$small_footprint=1;	# $small_footprint=1 code is ~5% slower [on
+			# recent µ-archs], but ~5 times smaller!
+			# I favor compact code to minimize cache
+			# contention and in hope to "collect" 5% back
+			# in real-life applications...
+
+$vertical_spin=0;	# shift "verticaly" defaults to 0, because of
+			# its proof-of-concept status...
+# Note that there is no decvert(), as well as last encryption round is
+# performed with "horizontal" shifts. This is because this "vertical"
+# implementation [one which groups shifts on a given $s[i] to form a
+# "column," unlike "horizontal" one, which groups shifts on different
+# $s[i] to form a "row"] is work in progress. It was observed to run
+# few percents faster on Intel cores, but not AMD. On AMD K8 core it's
+# whole 12% slower:-( So we face a trade-off... Shall it be resolved
+# some day? Till then the code is considered experimental and by
+# default remains dormant...
+
+sub encvert()
+{ my ($te,@s) = @_;
+  my $v0 = $acc, $v1 = $key;
+
+	&mov	($v0,$s[3]);				# copy s3
+	&mov	(&DWP(4,"esp"),$s[2]);			# save s2
+	&mov	($v1,$s[0]);				# copy s0
+	&mov	(&DWP(8,"esp"),$s[1]);			# save s1
+
+	&movz	($s[2],&HB($s[0]));
+	&and	($s[0],0xFF);
+	&mov	($s[0],&DWP(0,$te,$s[0],8));		# s0>>0
+	&shr	($v1,16);
+	&mov	($s[3],&DWP(3,$te,$s[2],8));		# s0>>8
+	&movz	($s[1],&HB($v1));
+	&and	($v1,0xFF);
+	&mov	($s[2],&DWP(2,$te,$v1,8));		# s0>>16
+	 &mov	($v1,$v0);
+	&mov	($s[1],&DWP(1,$te,$s[1],8));		# s0>>24
+
+	&and	($v0,0xFF);
+	&xor	($s[3],&DWP(0,$te,$v0,8));		# s3>>0
+	&movz	($v0,&HB($v1));
+	&shr	($v1,16);
+	&xor	($s[2],&DWP(3,$te,$v0,8));		# s3>>8
+	&movz	($v0,&HB($v1));
+	&and	($v1,0xFF);
+	&xor	($s[1],&DWP(2,$te,$v1,8));		# s3>>16
+	 &mov	($v1,&DWP(4,"esp"));			# restore s2
+	&xor	($s[0],&DWP(1,$te,$v0,8));		# s3>>24
+
+	&mov	($v0,$v1);
+	&and	($v1,0xFF);
+	&xor	($s[2],&DWP(0,$te,$v1,8));		# s2>>0
+	&movz	($v1,&HB($v0));
+	&shr	($v0,16);
+	&xor	($s[1],&DWP(3,$te,$v1,8));		# s2>>8
+	&movz	($v1,&HB($v0));
+	&and	($v0,0xFF);
+	&xor	($s[0],&DWP(2,$te,$v0,8));		# s2>>16
+	 &mov	($v0,&DWP(8,"esp"));			# restore s1
+	&xor	($s[3],&DWP(1,$te,$v1,8));		# s2>>24
+
+	&mov	($v1,$v0);
+	&and	($v0,0xFF);
+	&xor	($s[1],&DWP(0,$te,$v0,8));		# s1>>0
+	&movz	($v0,&HB($v1));
+	&shr	($v1,16);
+	&xor	($s[0],&DWP(3,$te,$v0,8));		# s1>>8
+	&movz	($v0,&HB($v1));
+	&and	($v1,0xFF);
+	&xor	($s[3],&DWP(2,$te,$v1,8));		# s1>>16
+	 &mov	($key,$__key);				# reincarnate v1 as key
+	&xor	($s[2],&DWP(1,$te,$v0,8));		# s1>>24
+}
+
+# Another experimental routine, which features "horizontal spin," but
+# eliminates one reference to stack. Strangely enough runs slower...
+sub enchoriz()
+{ my $v0 = $key, $v1 = $acc;
+
+	&movz	($v0,&LB($s0));			#  3, 2, 1, 0*
+	&rotr	($s2,8);			#  8,11,10, 9
+	&mov	($v1,&DWP(0,$te,$v0,8));	#  0
+	&movz	($v0,&HB($s1));			#  7, 6, 5*, 4
+	&rotr	($s3,16);			# 13,12,15,14
+	&xor	($v1,&DWP(3,$te,$v0,8));	#  5
+	&movz	($v0,&HB($s2));			#  8,11,10*, 9
+	&rotr	($s0,16);			#  1, 0, 3, 2
+	&xor	($v1,&DWP(2,$te,$v0,8));	# 10
+	&movz	($v0,&HB($s3));			# 13,12,15*,14
+	&xor	($v1,&DWP(1,$te,$v0,8));	# 15, t[0] collected
+	&mov	($__s0,$v1);			# t[0] saved
+
+	&movz	($v0,&LB($s1));			#  7, 6, 5, 4*
+	&shr	($s1,16);			#  -, -, 7, 6
+	&mov	($v1,&DWP(0,$te,$v0,8));	#  4
+	&movz	($v0,&LB($s3));			# 13,12,15,14*
+	&xor	($v1,&DWP(2,$te,$v0,8));	# 14
+	&movz	($v0,&HB($s0));			#  1, 0, 3*, 2
+	&and	($s3,0xffff0000);		# 13,12, -, -
+	&xor	($v1,&DWP(1,$te,$v0,8));	#  3
+	&movz	($v0,&LB($s2));			#  8,11,10, 9*
+	&or	($s3,$s1);			# 13,12, 7, 6
+	&xor	($v1,&DWP(3,$te,$v0,8));	#  9, t[1] collected
+	&mov	($s1,$v1);			#  s[1]=t[1]
+
+	&movz	($v0,&LB($s0));			#  1, 0, 3, 2*
+	&shr	($s2,16);			#  -, -, 8,11
+	&mov	($v1,&DWP(2,$te,$v0,8));	#  2
+	&movz	($v0,&HB($s3));			# 13,12, 7*, 6
+	&xor	($v1,&DWP(1,$te,$v0,8));	#  7
+	&movz	($v0,&HB($s2));			#  -, -, 8*,11
+	&xor	($v1,&DWP(0,$te,$v0,8));	#  8
+	&mov	($v0,$s3);
+	&shr	($v0,24);			# 13
+	&xor	($v1,&DWP(3,$te,$v0,8));	# 13, t[2] collected
+
+	&movz	($v0,&LB($s2));			#  -, -, 8,11*
+	&shr	($s0,24);			#  1*
+	&mov	($s2,&DWP(1,$te,$v0,8));	# 11
+	&xor	($s2,&DWP(3,$te,$s0,8));	#  1
+	&mov	($s0,$__s0);			# s[0]=t[0]
+	&movz	($v0,&LB($s3));			# 13,12, 7, 6*
+	&shr	($s3,16);			#   ,  ,13,12
+	&xor	($s2,&DWP(2,$te,$v0,8));	#  6
+	&mov	($key,$__key);			# reincarnate v0 as key
+	&and	($s3,0xff);			#   ,  ,13,12*
+	&mov	($s3,&DWP(0,$te,$s3,8));	# 12
+	&xor	($s3,$s2);			# s[2]=t[3] collected
+	&mov	($s2,$v1);			# s[2]=t[2]
+}
+
+# More experimental code... SSE one... Even though this one eliminates
+# *all* references to stack, it's not faster...
+sub sse_encbody()
+{
+	&movz	($acc,&LB("eax"));		#  0
+	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  0
+	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
+	&movz	("edx",&HB("eax"));		#  1
+	&mov	("edx",&DWP(3,$tbl,"edx",8));	#  1
+	&shr	("eax",16);			#  5, 4
+
+	&movz	($acc,&LB("ebx"));		# 10
+	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 10
+	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
+	&movz	($acc,&HB("ebx"));		# 11
+	&xor	("edx",&DWP(1,$tbl,$acc,8));	# 11
+	&shr	("ebx",16);			# 15,14
+
+	&movz	($acc,&HB("eax"));		#  5
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  5
+	&movq	("mm3",QWP(16,$key));
+	&movz	($acc,&HB("ebx"));		# 15
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	# 15
+	&movd	("mm0","ecx");			# t[0] collected
+
+	&movz	($acc,&LB("eax"));		#  4
+	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  4
+	&movd	("eax","mm2");			#  7, 6, 3, 2
+	&movz	($acc,&LB("ebx"));		# 14
+	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 14
+	&movd	("ebx","mm6");			# 13,12, 9, 8
+
+	&movz	($acc,&HB("eax"));		#  3
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  3
+	&movz	($acc,&HB("ebx"));		#  9
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  9
+	&movd	("mm1","ecx");			# t[1] collected
+
+	&movz	($acc,&LB("eax"));		#  2
+	&mov	("ecx",&DWP(2,$tbl,$acc,8));	#  2
+	&shr	("eax",16);			#  7, 6
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+	&movz	($acc,&LB("ebx"));		#  8
+	&xor	("ecx",&DWP(0,$tbl,$acc,8));	#  8
+	&shr	("ebx",16);			# 13,12
+
+	&movz	($acc,&HB("eax"));		#  7
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  7
+	&pxor	("mm0","mm3");
+	&movz	("eax",&LB("eax"));		#  6
+	&xor	("edx",&DWP(2,$tbl,"eax",8));	#  6
+	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
+	&movz	($acc,&HB("ebx"));		# 13
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	# 13
+	&xor	("ecx",&DWP(24,$key));		# t[2]
+	&movd	("mm4","ecx");			# t[2] collected
+	&movz	("ebx",&LB("ebx"));		# 12
+	&xor	("edx",&DWP(0,$tbl,"ebx",8));	# 12
+	&shr	("ecx",16);
+	&movd	("eax","mm1");			#  5, 4, 1, 0
+	&mov	("ebx",&DWP(28,$key));		# t[3]
+	&xor	("ebx","edx");
+	&movd	("mm5","ebx");			# t[3] collected
+	&and	("ebx",0xffff0000);
+	&or	("ebx","ecx");
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub enccompact()
+{ my $Fn = mov;
+  while ($#_>5) { pop(@_); $Fn=sub{}; }
+  my ($i,$te,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# $Fn is used in first compact round and its purpose is to
+	# void restoration of some values from stack, so that after
+	# 4xenccompact with extra argument $key value is left there...
+	if ($i==3)  {	&$Fn	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&movz	($out,&BP(-128,$te,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]);
+			&shr	($tmp,24);			}
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+	&comment();
+}
+
+sub enctransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+  my $i = shift;
+  my $tmp = $tbl;
+  my $r2  = $key ;
+
+	&mov	($acc,$s[$i]);
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($r2,&DWP(0,$s[$i],$s[$i]));
+	&sub	($acc,$tmp);
+	&and	($r2,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	&mov	($tmp,$s[$i]);
+	&xor	($acc,$r2);	# r2
+
+	&xor	($s[$i],$acc);	# r0 ^ r2
+	&rotl	($s[$i],24);
+	&xor	($s[$i],$acc)	# ROTATE(r2^r0,24) ^ r2
+	&rotr	($tmp,16);
+	&xor	($s[$i],$tmp);
+	&rotr	($tmp,8);
+	&xor	($s[$i],$tmp);
+}
+
+&function_begin_B("_x86_AES_encrypt_compact");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	# prefetch Te4
+	&mov	($key,&DWP(0-128,$tbl));
+	&mov	($acc,&DWP(32-128,$tbl));
+	&mov	($key,&DWP(64-128,$tbl));
+	&mov	($acc,&DWP(96-128,$tbl));
+	&mov	($key,&DWP(128-128,$tbl));
+	&mov	($acc,&DWP(160-128,$tbl));
+	&mov	($key,&DWP(192-128,$tbl));
+	&mov	($acc,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+
+		&enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
+		&enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
+		&enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
+		&enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
+		&enctransform(2);
+		&enctransform(3);
+		&enctransform(0);
+		&enctransform(1);
+		&mov 	($key,$__key);
+		&mov	($tbl,$__tbl);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+	&cmp	($key,$__end);
+	&mov	($__key,$key);
+	&jb	(&label("loop"));
+
+	&enccompact(0,$tbl,$s0,$s1,$s2,$s3);
+	&enccompact(1,$tbl,$s1,$s2,$s3,$s0);
+	&enccompact(2,$tbl,$s2,$s3,$s0,$s1);
+	&enccompact(3,$tbl,$s3,$s0,$s1,$s2);
+
+	&xor	($s0,&DWP(16,$key));
+	&xor	($s1,&DWP(20,$key));
+	&xor	($s2,&DWP(24,$key));
+	&xor	($s3,&DWP(28,$key));
+
+	&ret	();
+&function_end_B("_x86_AES_encrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+#
+# Performance is not actually extraordinary in comparison to pure
+# x86 code. In particular encrypt performance is virtually the same.
+# Decrypt performance on the other hand is 15-20% better on newer
+# µ-archs [but we're thankful for *any* improvement here], and ~50%
+# better on PIII:-) And additionally on the pros side this code
+# eliminates redundant references to stack and thus relieves/
+# minimizes the pressure on the memory bus.
+#
+# MMX register layout                           lsb
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |          mm4          |          mm0          |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |     s3    |     s2    |     s1    |     s0    |    
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#
+# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
+# In this terms encryption and decryption "compact" permutation
+# matrices can be depicted as following:
+#
+# encryption              lsb	# decryption              lsb
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t0 || 15 | 10 |  5 |  0 |	# | t0 ||  7 | 10 | 13 |  0 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t1 ||  3 | 14 |  9 |  4 |	# | t1 || 11 | 14 |  1 |  4 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t2 ||  7 |  2 | 13 |  8 |	# | t2 || 15 |  2 |  5 |  8 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t3 || 11 |  6 |  1 | 12 |	# | t3 ||  3 |  6 |  9 | 12 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+#
+######################################################################
+# Why not xmm registers? Short answer. It was actually tested and
+# was not any faster, but *contrary*, most notably on Intel CPUs.
+# Longer answer. Main advantage of using mm registers is that movd
+# latency is lower, especially on Intel P4. While arithmetic
+# instructions are twice as many, they can be scheduled every cycle
+# and not every second one when they are operating on xmm register,
+# so that "arithmetic throughput" remains virtually the same. And
+# finally the code can be executed even on elder SSE-only CPUs:-)
+
+sub sse_enccompact()
+{
+	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
+	&pshufw	("mm5","mm4",0x0d);		# 15,14,11,10
+	&movd	("eax","mm1");			#  5, 4, 1, 0
+	&movd	("ebx","mm5");			# 15,14,11,10
+
+	&movz	($acc,&LB("eax"));		#  0
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
+	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
+	&movz	("edx",&HB("eax"));		#  1
+	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
+	&shl	("edx",8);			#  1
+	&shr	("eax",16);			#  5, 4
+
+	&movz	($acc,&LB("ebx"));		# 10
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 10
+	&shl	($acc,16);			# 10
+	&or	("ecx",$acc);			# 10
+	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
+	&movz	($acc,&HB("ebx"));		# 11
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 11
+	&shl	($acc,24);			# 11
+	&or	("edx",$acc);			# 11
+	&shr	("ebx",16);			# 15,14
+
+	&movz	($acc,&HB("eax"));		#  5
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  5
+	&shl	($acc,8);			#  5
+	&or	("ecx",$acc);			#  5
+	&movz	($acc,&HB("ebx"));		# 15
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 15
+	&shl	($acc,24);			# 15
+	&or	("ecx",$acc);			# 15
+	&movd	("mm0","ecx");			# t[0] collected
+
+	&movz	($acc,&LB("eax"));		#  4
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  4
+	&movd	("eax","mm2");			#  7, 6, 3, 2
+	&movz	($acc,&LB("ebx"));		# 14
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 14
+	&shl	($acc,16);			# 14
+	&or	("ecx",$acc);			# 14
+
+	&movd	("ebx","mm6");			# 13,12, 9, 8
+	&movz	($acc,&HB("eax"));		#  3
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  3
+	&shl	($acc,24);			#  3
+	&or	("ecx",$acc);			#  3
+	&movz	($acc,&HB("ebx"));		#  9
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  9
+	&shl	($acc,8);			#  9
+	&or	("ecx",$acc);			#  9
+	&movd	("mm1","ecx");			# t[1] collected
+
+	&movz	($acc,&LB("ebx"));		#  8
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  8
+	&shr	("ebx",16);			# 13,12
+	&movz	($acc,&LB("eax"));		#  2
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  2
+	&shl	($acc,16);			#  2
+	&or	("ecx",$acc);			#  2
+	&shr	("eax",16);			#  7, 6
+
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+
+	&movz	($acc,&HB("eax"));		#  7
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  7
+	&shl	($acc,24);			#  7
+	&or	("ecx",$acc);			#  7
+	&and	("eax",0xff);			#  6
+	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  6
+	&shl	("eax",16);			#  6
+	&or	("edx","eax");			#  6
+	&movz	($acc,&HB("ebx"));		# 13
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 13
+	&shl	($acc,8);			# 13
+	&or	("ecx",$acc);			# 13
+	&movd	("mm4","ecx");			# t[2] collected
+	&and	("ebx",0xff);			# 12
+	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	# 12
+	&or	("edx","ebx");			# 12
+	&movd	("mm5","edx");			# t[3] collected
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+					if (!$x86only) {
+&function_begin_B("_sse_AES_encrypt_compact");
+	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
+	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	&mov	($s0,0x1b1b1b1b);		# magic constant
+	&mov	(&DWP(8,"esp"),$s0);
+	&mov	(&DWP(12,"esp"),$s0);
+
+	# prefetch Te4
+	&mov	($s0,&DWP(0-128,$tbl));
+	&mov	($s1,&DWP(32-128,$tbl));
+	&mov	($s2,&DWP(64-128,$tbl));
+	&mov	($s3,&DWP(96-128,$tbl));
+	&mov	($s0,&DWP(128-128,$tbl));
+	&mov	($s1,&DWP(160-128,$tbl));
+	&mov	($s2,&DWP(192-128,$tbl));
+	&mov	($s3,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+		&sse_enccompact();
+		&add	($key,16);
+		&cmp	($key,$__end);
+		&ja	(&label("out"));
+
+		&movq	("mm2",&QWP(8,"esp"));
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&movq	("mm1","mm0");		&movq	("mm5","mm4");	# r0
+		&pcmpgtb("mm3","mm0");		&pcmpgtb("mm7","mm4");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&pshufw	("mm2","mm0",0xb1);	&pshufw	("mm6","mm4",0xb1);# ROTATE(r0,16)
+		&paddb	("mm0","mm0");		&paddb	("mm4","mm4");
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# = r2
+		&pshufw	("mm3","mm2",0xb1);	&pshufw	("mm7","mm6",0xb1);# r0
+		&pxor	("mm1","mm0");		&pxor	("mm5","mm4");	# r0^r2
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(r0,16)
+
+		&movq	("mm2","mm3");		&movq	("mm6","mm7");
+		&pslld	("mm3",8);		&pslld	("mm7",8);
+		&psrld	("mm2",24);		&psrld	("mm6",24);
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= r0<<8
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= r0>>24
+
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
+		&psrld	("mm1",8);		&psrld	("mm5",8);
+		&mov	($s0,&DWP(0-128,$tbl));
+		&pslld	("mm3",24);		&pslld	("mm7",24);
+		&mov	($s1,&DWP(64-128,$tbl));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= (r2^r0)<<8
+		&mov	($s2,&DWP(128-128,$tbl));
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= (r2^r0)>>24
+		&mov	($s3,&DWP(192-128,$tbl));
+
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
+	&jmp	(&label("loop"));
+
+	&set_label("out",16);
+	&pxor	("mm0",&QWP(0,$key));
+	&pxor	("mm4",&QWP(8,$key));
+
+	&ret	();
+&function_end_B("_sse_AES_encrypt_compact");
+					}
+
+######################################################################
+# Vanilla block function.
+######################################################################
+
+sub encstep()
+{ my ($i,$te,@s) = @_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# lines marked with #%e?x[i] denote "reordered" instructions...
+	if ($i==3)  {	&mov	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);
+			&and	($out,0xFF);			}
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&mov	($out,&DWP(0,$te,$out,8));
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&xor	($out,&DWP(3,$te,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&xor	($out,&DWP(2,$te,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]); 
+			&shr	($tmp,24)			}
+			&xor	($out,&DWP(1,$te,$tmp,8));
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+			&comment();
+}
+
+sub enclast()
+{ my ($i,$te,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	if ($i==3)  {	&mov	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&mov	($out,&DWP(2,$te,$out,8));
+			&and	($out,0x000000ff);
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&mov	($tmp,&DWP(0,$te,$tmp,8));
+			&and	($tmp,0x0000ff00);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&mov	($tmp,&DWP(0,$te,$tmp,8));
+			&and	($tmp,0x00ff0000);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]);
+			&shr	($tmp,24);			}
+			&mov	($tmp,&DWP(2,$te,$tmp,8));
+			&and	($tmp,0xff000000);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+}
+
+&function_begin_B("_x86_AES_encrypt");
+	if ($vertical_spin) {
+		# I need high parts of volatile registers to be accessible...
+		&exch	($s1="edi",$key="ebx");
+		&mov	($s2="esi",$acc="ecx");
+	}
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	if ($small_footprint) {
+	    &lea	($acc,&DWP(-2,$acc,$acc));
+	    &lea	($acc,&DWP(0,$key,$acc,8));
+	    &mov	($__end,$acc);		# end of key schedule
+
+	    &set_label("loop",16);
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+	    &cmp	($key,$__end);
+	    &mov	($__key,$key);
+	    &jb		(&label("loop"));
+	}
+	else {
+	    &cmp	($acc,10);
+	    &jle	(&label("10rounds"));
+	    &cmp	($acc,12);
+	    &jle	(&label("12rounds"));
+
+	&set_label("14rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("12rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("10rounds",4);
+	    for ($i=1;$i<10;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	}
+
+	if ($vertical_spin) {
+	    # "reincarnate" some registers for "horizontal" spin...
+	    &mov	($s1="ebx",$key="edi");
+	    &mov	($s2="ecx",$acc="esi");
+	}
+	&enclast(0,$tbl,$s0,$s1,$s2,$s3);
+	&enclast(1,$tbl,$s1,$s2,$s3,$s0);
+	&enclast(2,$tbl,$s2,$s3,$s0,$s1);
+	&enclast(3,$tbl,$s3,$s0,$s1,$s2);
+
+	&add	($key,$small_footprint?16:160);
+	&xor	($s0,&DWP(0,$key));
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&ret	();
+
+&set_label("AES_Te",64);	# Yes! I keep it in the code segment!
+	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
+	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
+	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
+	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
+	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
+	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
+	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
+	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
+	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
+	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
+	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
+	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
+	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
+	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
+	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
+	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
+	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
+	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
+	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
+	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
+	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
+	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
+	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
+	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
+	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
+	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
+	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
+	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
+	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
+	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
+	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
+	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
+	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
+	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
+	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
+	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
+	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
+	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
+	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
+	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
+	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
+	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
+	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
+	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
+	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
+	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
+	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
+	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
+	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
+	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
+	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
+	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
+	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
+	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
+	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
+	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
+	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
+	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
+	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
+	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
+	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
+	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
+	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
+	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+#rcon:
+	&data_word(0x00000001, 0x00000002, 0x00000004, 0x00000008);
+	&data_word(0x00000010, 0x00000020, 0x00000040, 0x00000080);
+	&data_word(0x0000001b, 0x00000036, 0x00000000, 0x00000000);
+	&data_word(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+&function_end_B("_x86_AES_encrypt");
+
+# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
+&function_begin("AES_encrypt");
+	&mov	($acc,&wparam(0));		# load inp
+	&mov	($key,&wparam(2));		# load key
+
+	&mov	($s0,"esp");
+	&sub	("esp",36);
+	&and	("esp",-64);			# align to cache-line
+
+	# place stack frame just "above" the key schedule
+	&lea	($s1,&DWP(-64-63,$key));
+	&sub	($s1,"esp");
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	("esp",$s1);
+	&add	("esp",4);	# 4 is reserved for caller's return address
+	&mov	($_esp,$s0);			# save stack pointer
+
+	&call   (&label("pic_point"));          # make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if (!$x86only);
+	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+
+	# pick Te4 copy which can't "overlap" with stack frame or key schedule
+	&lea	($s1,&DWP(768-4,"esp"));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),25);	# check for SSE bit
+	&jnc	(&label("x86"));
+
+	&movq	("mm0",&QWP(0,$acc));
+	&movq	("mm4",&QWP(8,$acc));
+	&call	("_sse_AES_encrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&movq	(&QWP(0,$acc),"mm0");		# write output data
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&function_end_A();
+					}
+	&set_label("x86",16);
+	&mov	($_tbl,$tbl);
+	&mov	($s0,&DWP(0,$acc));		# load input data
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+	&call	("_x86_AES_encrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&mov	(&DWP(0,$acc),$s0);		# write output data
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+&function_end("AES_encrypt");
+
+#--------------------------------------------------------------------#
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub deccompact()
+{ my $Fn = mov;
+  while ($#_>5) { pop(@_); $Fn=sub{}; }
+  my ($i,$td,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# $Fn is used in first compact round and its purpose is to
+	# void restoration of some values from stack, so that after
+	# 4xdeccompact with extra argument $key, $s0 and $s1 values
+	# are left there...
+	if($i==3)   {	&$Fn	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&movz	($out,&BP(-128,$td,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &$Fn ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&$Fn	($s[3],$__s0);			}
+}
+
+# must be called with 2,3,0,1 as argument sequence!!!
+sub dectransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+  my $i = shift;
+  my $tmp = $key;
+  my $tp2 = @s[($i+2)%4]; $tp2 = @s[2] if ($i==1);
+  my $tp4 = @s[($i+3)%4]; $tp4 = @s[3] if ($i==1);
+  my $tp8 = $tbl;
+
+	&mov	($acc,$s[$i]);
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp2,&DWP(0,$s[$i],$s[$i]));
+	&sub	($acc,$tmp);
+	&and	($tp2,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	&xor	($acc,$tp2);
+	&mov	($tp2,$acc);
+
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp4,&DWP(0,$tp2,$tp2));
+	&sub	($acc,$tmp);
+	&and	($tp4,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp2,$s[$i]);	# tp2^tp1
+	&xor	($acc,$tp4);
+	&mov	($tp4,$acc);
+
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp8,&DWP(0,$tp4,$tp4));
+	&sub	($acc,$tmp);
+	&and	($tp8,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp4,$s[$i]);	# tp4^tp1
+	 &rotl	($s[$i],8);	# = ROTATE(tp1,8)
+	&xor	($tp8,$acc);
+
+	&xor	($s[$i],$tp2);
+	&xor	($tp2,$tp8);
+	&rotl	($tp2,24);
+	&xor	($s[$i],$tp4);
+	&xor	($tp4,$tp8);
+	&rotl	($tp4,16);
+	&xor	($s[$i],$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
+	&rotl	($tp8,8);
+	&xor	($s[$i],$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
+	&xor	($s[$i],$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
+	 &mov	($s[0],$__s0)			if($i==2); #prefetch $s0
+	 &mov	($s[1],$__s1)			if($i==3); #prefetch $s1
+	 &mov	($s[2],$__s2)			if($i==1);
+	&xor	($s[$i],$tp8);	# ^= ROTATE(tp8,8)
+
+	&mov	($s[3],$__s3)			if($i==1);
+	&mov	(&DWP(4+4*$i,"esp"),$s[$i])	if($i>=2);
+}
+
+&function_begin_B("_x86_AES_decrypt_compact");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	# prefetch Td4
+	&mov	($key,&DWP(0-128,$tbl));
+	&mov	($acc,&DWP(32-128,$tbl));
+	&mov	($key,&DWP(64-128,$tbl));
+	&mov	($acc,&DWP(96-128,$tbl));
+	&mov	($key,&DWP(128-128,$tbl));
+	&mov	($acc,&DWP(160-128,$tbl));
+	&mov	($key,&DWP(192-128,$tbl));
+	&mov	($acc,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+
+		&deccompact(0,$tbl,$s0,$s3,$s2,$s1,1);
+		&deccompact(1,$tbl,$s1,$s0,$s3,$s2,1);
+		&deccompact(2,$tbl,$s2,$s1,$s0,$s3,1);
+		&deccompact(3,$tbl,$s3,$s2,$s1,$s0,1);
+		&dectransform(2);
+		&dectransform(3);
+		&dectransform(0);
+		&dectransform(1);
+		&mov 	($key,$__key);
+		&mov	($tbl,$__tbl);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+	&cmp	($key,$__end);
+	&mov	($__key,$key);
+	&jb	(&label("loop"));
+
+	&deccompact(0,$tbl,$s0,$s3,$s2,$s1);
+	&deccompact(1,$tbl,$s1,$s0,$s3,$s2);
+	&deccompact(2,$tbl,$s2,$s1,$s0,$s3);
+	&deccompact(3,$tbl,$s3,$s2,$s1,$s0);
+
+	&xor	($s0,&DWP(16,$key));
+	&xor	($s1,&DWP(20,$key));
+	&xor	($s2,&DWP(24,$key));
+	&xor	($s3,&DWP(28,$key));
+
+	&ret	();
+&function_end_B("_x86_AES_decrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+
+sub sse_deccompact()
+{
+	&pshufw	("mm1","mm0",0x0c);		#  7, 6, 1, 0
+	&movd	("eax","mm1");			#  7, 6, 1, 0
+
+	&pshufw	("mm5","mm4",0x09);		# 13,12,11,10
+	&movz	($acc,&LB("eax"));		#  0
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
+	&movd	("ebx","mm5");			# 13,12,11,10
+	&movz	("edx",&HB("eax"));		#  1
+	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
+	&shl	("edx",8);			#  1
+
+	&pshufw	("mm2","mm0",0x06);		#  3, 2, 5, 4
+	&movz	($acc,&LB("ebx"));		# 10
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 10
+	&shl	($acc,16);			# 10
+	&or	("ecx",$acc);			# 10
+	&shr	("eax",16);			#  7, 6
+	&movz	($acc,&HB("ebx"));		# 11
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 11
+	&shl	($acc,24);			# 11
+	&or	("edx",$acc);			# 11
+	&shr	("ebx",16);			# 13,12
+
+	&pshufw	("mm6","mm4",0x03);		# 9, 8,15,14
+	&movz	($acc,&HB("eax"));		#  7
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  7
+	&shl	($acc,24);			#  7
+	&or	("ecx",$acc);			#  7
+	&movz	($acc,&HB("ebx"));		# 13
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 13
+	&shl	($acc,8);			# 13
+	&or	("ecx",$acc);			# 13
+	&movd	("mm0","ecx");			# t[0] collected
+
+	&movz	($acc,&LB("eax"));		#  6
+	&movd	("eax","mm2");			#  3, 2, 5, 4
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  6
+	&shl	("ecx",16);			#  6
+	&movz	($acc,&LB("ebx"));		# 12
+	&movd	("ebx","mm6");			#  9, 8,15,14
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 12
+	&or	("ecx",$acc);			# 12
+
+	&movz	($acc,&LB("eax"));		#  4
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  4
+	&or	("edx",$acc);			#  4
+	&movz	($acc,&LB("ebx"));		# 14
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 14
+	&shl	($acc,16);			# 14
+	&or	("edx",$acc);			# 14
+	&movd	("mm1","edx");			# t[1] collected
+
+	&movz	($acc,&HB("eax"));		#  5
+	&movz	("edx",&BP(-128,$tbl,$acc,1));	#  5
+	&shl	("edx",8);			#  5
+	&movz	($acc,&HB("ebx"));		# 15
+	&shr	("eax",16);			#  3, 2
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	# 15
+	&shl	($acc,24);			# 15
+	&or	("edx",$acc);			# 15
+	&shr	("ebx",16);			#  9, 8
+
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+
+	&movz	($acc,&HB("ebx"));		#  9
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  9
+	&shl	($acc,8);			#  9
+	&or	("ecx",$acc);			#  9
+	&and	("ebx",0xff);			#  8
+	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	#  8
+	&or	("edx","ebx");			#  8
+	&movz	($acc,&LB("eax"));		#  2
+	&movz	($acc,&BP(-128,$tbl,$acc,1));	#  2
+	&shl	($acc,16);			#  2
+	&or	("edx",$acc);			#  2
+	&movd	("mm4","edx");			# t[2] collected
+	&movz	("eax",&HB("eax"));		#  3
+	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  3
+	&shl	("eax",24);			#  3
+	&or	("ecx","eax");			#  3
+	&movd	("mm5","ecx");			# t[3] collected
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+					if (!$x86only) {
+&function_begin_B("_sse_AES_decrypt_compact");
+	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
+	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	&mov	($s0,0x1b1b1b1b);		# magic constant
+	&mov	(&DWP(8,"esp"),$s0);
+	&mov	(&DWP(12,"esp"),$s0);
+
+	# prefetch Td4
+	&mov	($s0,&DWP(0-128,$tbl));
+	&mov	($s1,&DWP(32-128,$tbl));
+	&mov	($s2,&DWP(64-128,$tbl));
+	&mov	($s3,&DWP(96-128,$tbl));
+	&mov	($s0,&DWP(128-128,$tbl));
+	&mov	($s1,&DWP(160-128,$tbl));
+	&mov	($s2,&DWP(192-128,$tbl));
+	&mov	($s3,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+		&sse_deccompact();
+		&add	($key,16);
+		&cmp	($key,$__end);
+		&ja	(&label("out"));
+
+		# ROTATE(x^y,N) == ROTATE(x,N)^ROTATE(y,N)
+		&movq	("mm3","mm0");		&movq	("mm7","mm4");
+		&movq	("mm2","mm0",1);	&movq	("mm6","mm4",1);
+		&movq	("mm1","mm0");		&movq	("mm5","mm4");
+		&pshufw	("mm0","mm0",0xb1);	&pshufw	("mm4","mm4",0xb1);# = ROTATE(tp0,16)
+		&pslld	("mm2",8);		&pslld	("mm6",8);
+		&psrld	("mm3",8);		&psrld	("mm7",8);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<8
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>8
+		&pslld	("mm2",16);		&pslld	("mm6",16);
+		&psrld	("mm3",16);		&psrld	("mm7",16);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<24
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>24
+
+		&movq	("mm3",&QWP(8,"esp"));
+		&pxor	("mm2","mm2");		&pxor	("mm6","mm6");
+		&pcmpgtb("mm2","mm1");		&pcmpgtb("mm6","mm5");
+		&pand	("mm2","mm3");		&pand	("mm6","mm3");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm2");		&pxor	("mm5","mm6");	# tp2
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&movq	("mm2","mm1");		&movq	("mm6","mm5");
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp2
+		&pslld	("mm3",24);		&pslld	("mm7",24);
+		&psrld	("mm2",8);		&psrld	("mm6",8);
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp2<<24
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp2>>8
+
+		&movq	("mm2",&QWP(8,"esp"));
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp4
+		&pshufw	("mm3","mm1",0xb1);	&pshufw	("mm7","mm5",0xb1);
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp4
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= ROTATE(tp4,16)	
+
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp8
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&pshufw	("mm2","mm1",0xb1);	&pshufw	("mm6","mm5",0xb1);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(tp8,16)
+		&pslld	("mm1",8);		&pslld	("mm5",8);
+		&psrld	("mm3",8);		&psrld	("mm7",8);
+		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<8
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>8
+		&mov	($s0,&DWP(0-128,$tbl));
+		&pslld	("mm1",16);		&pslld	("mm5",16);
+		&mov	($s1,&DWP(64-128,$tbl));
+		&psrld	("mm3",16);		&psrld	("mm7",16);
+		&mov	($s2,&DWP(128-128,$tbl));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<24
+		&mov	($s3,&DWP(192-128,$tbl));
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>24
+
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
+	&jmp	(&label("loop"));
+
+	&set_label("out",16);
+	&pxor	("mm0",&QWP(0,$key));
+	&pxor	("mm4",&QWP(8,$key));
+
+	&ret	();
+&function_end_B("_sse_AES_decrypt_compact");
+					}
+
+######################################################################
+# Vanilla block function.
+######################################################################
+
+sub decstep()
+{ my ($i,$td,@s) = @_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# no instructions are reordered, as performance appears
+	# optimal... or rather that all attempts to reorder didn't
+	# result in better performance [which by the way is not a
+	# bit lower than ecryption].
+	if($i==3)   {	&mov	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&mov	($out,&DWP(0,$td,$out,8));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&xor	($out,&DWP(3,$td,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	&mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&xor	($out,&DWP(2,$td,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&xor	($out,&DWP(1,$td,$tmp,8));
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$__s0);			}
+			&comment();
+}
+
+sub declast()
+{ my ($i,$td,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	if($i==0)   {	&lea	($td,&DWP(2048+128,$td));
+			&mov	($tmp,&DWP(0-128,$td));
+			&mov	($acc,&DWP(32-128,$td));
+			&mov	($tmp,&DWP(64-128,$td));
+			&mov	($acc,&DWP(96-128,$td));
+			&mov	($tmp,&DWP(128-128,$td));
+			&mov	($acc,&DWP(160-128,$td));
+			&mov	($tmp,&DWP(192-128,$td));
+			&mov	($acc,&DWP(224-128,$td));
+			&lea	($td,&DWP(-128,$td));		}
+	if($i==3)   {	&mov	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&movz	($out,&BP(0,$td,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$__s0);
+			&lea	($td,&DWP(-2048,$td));		}
+}
+
+&function_begin_B("_x86_AES_decrypt");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	if ($small_footprint) {
+	    &lea	($acc,&DWP(-2,$acc,$acc));
+	    &lea	($acc,&DWP(0,$key,$acc,8));
+	    &mov	($__end,$acc);		# end of key schedule
+	    &set_label("loop",16);
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+	    &cmp	($key,$__end);
+	    &mov	($__key,$key);
+	    &jb		(&label("loop"));
+	}
+	else {
+	    &cmp	($acc,10);
+	    &jle	(&label("10rounds"));
+	    &cmp	($acc,12);
+	    &jle	(&label("12rounds"));
+
+	&set_label("14rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("12rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("10rounds",4);
+	    for ($i=1;$i<10;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	}
+
+	&declast(0,$tbl,$s0,$s3,$s2,$s1);
+	&declast(1,$tbl,$s1,$s0,$s3,$s2);
+	&declast(2,$tbl,$s2,$s1,$s0,$s3);
+	&declast(3,$tbl,$s3,$s2,$s1,$s0);
+
+	&add	($key,$small_footprint?16:160);
+	&xor	($s0,&DWP(0,$key));
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&ret	();
+
+&set_label("AES_Td",64);	# Yes! I keep it in the code segment!
+	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
+	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
+	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
+	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
+	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
+	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
+	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
+	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
+	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
+	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
+	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
+	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
+	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
+	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
+	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
+	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
+	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
+	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
+	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
+	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
+	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
+	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
+	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
+	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
+	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
+	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
+	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
+	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
+	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
+	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
+	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
+	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
+	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
+	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
+	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
+	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
+	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
+	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
+	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
+	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
+	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
+	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
+	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
+	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
+	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
+	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
+	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
+	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
+	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
+	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
+	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
+	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
+	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
+	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
+	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
+	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
+	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
+	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
+	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
+	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
+	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
+	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
+	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
+	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
+
+#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+&function_end_B("_x86_AES_decrypt");
+
+# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
+&function_begin("AES_decrypt");
+	&mov	($acc,&wparam(0));		# load inp
+	&mov	($key,&wparam(2));		# load key
+
+	&mov	($s0,"esp");
+	&sub	("esp",36);
+	&and	("esp",-64);			# align to cache-line
+
+	# place stack frame just "above" the key schedule
+	&lea	($s1,&DWP(-64-63,$key));
+	&sub	($s1,"esp");
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	("esp",$s1);
+	&add	("esp",4);	# 4 is reserved for caller's return address
+	&mov	($_esp,$s0);	# save stack pointer
+
+	&call   (&label("pic_point"));          # make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
+	&lea    ($tbl,&DWP(&label("AES_Td")."-".&label("pic_point"),$tbl));
+
+	# pick Td4 copy which can't "overlap" with stack frame or key schedule
+	&lea	($s1,&DWP(768-4,"esp"));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),25);	# check for SSE bit
+	&jnc	(&label("x86"));
+
+	&movq	("mm0",&QWP(0,$acc));
+	&movq	("mm4",&QWP(8,$acc));
+	&call	("_sse_AES_decrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&movq	(&QWP(0,$acc),"mm0");		# write output data
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&function_end_A();
+					}
+	&set_label("x86",16);
+	&mov	($_tbl,$tbl);
+	&mov	($s0,&DWP(0,$acc));		# load input data
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+	&call	("_x86_AES_decrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&mov	(&DWP(0,$acc),$s0);		# write output data
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+&function_end("AES_decrypt");
+
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#			size_t length, const AES_KEY *key,
+#			unsigned char *ivp,const int enc);
+{
+# stack frame layout
+#             -4(%esp)		# return address	 0(%esp)
+#              0(%esp)		# s0 backing store	 4(%esp)	
+#              4(%esp)		# s1 backing store	 8(%esp)
+#              8(%esp)		# s2 backing store	12(%esp)
+#             12(%esp)		# s3 backing store	16(%esp)
+#             16(%esp)		# key backup		20(%esp)
+#             20(%esp)		# end of key schedule	24(%esp)
+#             24(%esp)		# %ebp backup		28(%esp)
+#             28(%esp)		# %esp backup
+my $_inp=&DWP(32,"esp");	# copy of wparam(0)
+my $_out=&DWP(36,"esp");	# copy of wparam(1)
+my $_len=&DWP(40,"esp");	# copy of wparam(2)
+my $_key=&DWP(44,"esp");	# copy of wparam(3)
+my $_ivp=&DWP(48,"esp");	# copy of wparam(4)
+my $_tmp=&DWP(52,"esp");	# volatile variable
+#
+my $ivec=&DWP(60,"esp");	# ivec[16]
+my $aes_key=&DWP(76,"esp");	# copy of aes_key
+my $mark=&DWP(76+240,"esp");	# copy of aes_key->rounds
+
+&function_begin("AES_cbc_encrypt");
+	&mov	($s2 eq "ecx"? $s2 : "",&wparam(2));	# load len
+	&cmp	($s2,0);
+	&je	(&label("drop_out"));
+
+	&call   (&label("pic_point"));		# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
+
+	&cmp	(&wparam(5),0);
+	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+	&jne	(&label("picked_te"));
+	&lea	($tbl,&DWP(&label("AES_Td")."-".&label("AES_Te"),$tbl));
+	&set_label("picked_te");
+
+	# one can argue if this is required
+	&pushf	();
+	&cld	();
+
+	&cmp	($s2,$speed_limit);
+	&jb	(&label("slow_way"));
+	&test	($s2,15);
+	&jnz	(&label("slow_way"));
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),28);	# check for hyper-threading bit
+	&jc	(&label("slow_way"));
+					}
+	# pre-allocate aligned stack frame...
+	&lea	($acc,&DWP(-80-244,"esp"));
+	&and	($acc,-64);
+
+	# ... and make sure it doesn't alias with $tbl modulo 4096
+	&mov	($s0,$tbl);
+	&lea	($s1,&DWP(2048+256,$tbl));
+	&mov	($s3,$acc);
+	&and	($s0,0xfff);		# s = %ebp&0xfff
+	&and	($s1,0xfff);		# e = (%ebp+2048+256)&0xfff
+	&and	($s3,0xfff);		# p = %esp&0xfff
+
+	&cmp	($s3,$s1);		# if (p>=e) %esp =- (p-e);
+	&jb	(&label("tbl_break_out"));
+	&sub	($s3,$s1);
+	&sub	($acc,$s3);
+	&jmp	(&label("tbl_ok"));
+	&set_label("tbl_break_out",4);	# else %esp -= (p-s)&0xfff + framesz;
+	&sub	($s3,$s0);
+	&and	($s3,0xfff);
+	&add	($s3,384);
+	&sub	($acc,$s3);
+	&set_label("tbl_ok",4);
+
+	&lea	($s3,&wparam(0));	# obtain pointer to parameter block
+	&exch	("esp",$acc);		# allocate stack frame
+	&add	("esp",4);		# reserve for return address!
+	&mov	($_tbl,$tbl);		# save %ebp
+	&mov	($_esp,$acc);		# save %esp
+
+	&mov	($s0,&DWP(0,$s3));	# load inp
+	&mov	($s1,&DWP(4,$s3));	# load out
+	#&mov	($s2,&DWP(8,$s3));	# load len
+	&mov	($key,&DWP(12,$s3));	# load key
+	&mov	($acc,&DWP(16,$s3));	# load ivp
+	&mov	($s3,&DWP(20,$s3));	# load enc flag
+
+	&mov	($_inp,$s0);		# save copy of inp
+	&mov	($_out,$s1);		# save copy of out
+	&mov	($_len,$s2);		# save copy of len
+	&mov	($_key,$key);		# save copy of key
+	&mov	($_ivp,$acc);		# save copy of ivp
+
+	&mov	($mark,0);		# copy of aes_key->rounds = 0;
+	# do we copy key schedule to stack?
+	&mov	($s1 eq "ebx" ? $s1 : "",$key);
+	&mov	($s2 eq "ecx" ? $s2 : "",244/4);
+	&sub	($s1,$tbl);
+	&mov	("esi",$key);
+	&and	($s1,0xfff);
+	&lea	("edi",$aes_key);
+	&cmp	($s1,2048+256);
+	&jb	(&label("do_copy"));
+	&cmp	($s1,4096-244);
+	&jb	(&label("skip_copy"));
+	&set_label("do_copy",4);
+		&mov	($_key,"edi");
+		&data_word(0xA5F3F689);	# rep movsd
+	&set_label("skip_copy");
+
+	&mov	($key,16);
+	&set_label("prefetch_tbl",4);
+		&mov	($s0,&DWP(0,$tbl));
+		&mov	($s1,&DWP(32,$tbl));
+		&mov	($s2,&DWP(64,$tbl));
+		&mov	($acc,&DWP(96,$tbl));
+		&lea	($tbl,&DWP(128,$tbl));
+		&sub	($key,1);
+	&jnz	(&label("prefetch_tbl"));
+	&sub	($tbl,2048);
+
+	&mov	($acc,$_inp);
+	&mov	($key,$_ivp);
+
+	&cmp	($s3,0);
+	&je	(&label("fast_decrypt"));
+
+#----------------------------- ENCRYPT -----------------------------#
+	&mov	($s0,&DWP(0,$key));		# load iv
+	&mov	($s1,&DWP(4,$key));
+
+	&set_label("fast_enc_loop",16);
+		&mov	($s2,&DWP(8,$key));
+		&mov	($s3,&DWP(12,$key));
+
+		&xor	($s0,&DWP(0,$acc));	# xor input data
+		&xor	($s1,&DWP(4,$acc));
+		&xor	($s2,&DWP(8,$acc));
+		&xor	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_encrypt");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+
+		&mov	(&DWP(0,$key),$s0);	# save output data
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($s2,$_len);		# load len
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_enc_loop"));
+	&mov	($acc,$_ivp);		# load ivp
+	&mov	($s2,&DWP(8,$key));	# restore last 2 dwords
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# save ivec
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&cmp	($mark,0);		# was the key schedule copied?
+	&mov	("edi",$_key);
+	&je	(&label("skip_ezero"));
+	# zero copy of key schedule
+	&mov	("ecx",240/4);
+	&xor	("eax","eax");
+	&align	(4);
+	&data_word(0xABF3F689);	# rep stosd
+	&set_label("skip_ezero")
+	&mov	("esp",$_esp);
+	&popf	();
+    &set_label("drop_out");
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+#----------------------------- DECRYPT -----------------------------#
+&set_label("fast_decrypt",16);
+
+	&cmp	($acc,$_out);
+	&je	(&label("fast_dec_in_place"));	# in-place processing...
+
+	&mov	($_tmp,$key);
+
+	&align	(4);
+	&set_label("fast_dec_loop",16);
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt");
+
+		&mov	($key,$_tmp);		# load ivp
+		&mov	($acc,$_len);		# load len
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&mov	($key,$_out);		# load out
+		&mov	($acc,$_inp);		# load inp
+
+		&mov	(&DWP(0,$key),$s0);	# write output
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($s2,$_len);		# load len
+		&mov	($_tmp,$acc);		# save ivp
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($key,&DWP(16,$key));	# advance out
+		&mov	($_out,$key);		# save out
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_dec_loop"));
+	&mov	($key,$_tmp);		# load temp ivp
+	&mov	($acc,$_ivp);		# load user ivp
+	&mov	($s0,&DWP(0,$key));	# load iv
+	&mov	($s1,&DWP(4,$key));
+	&mov	($s2,&DWP(8,$key));
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# copy back to user
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+	&jmp	(&label("fast_dec_out"));
+
+    &set_label("fast_dec_in_place",16);
+	&set_label("fast_dec_in_place_loop");
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&lea	($key,$ivec);
+		&mov	(&DWP(0,$key),$s0);	# copy to temp
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt");
+
+		&mov	($key,$_ivp);		# load ivp
+		&mov	($acc,$_out);		# load out
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&mov	(&DWP(0,$acc),$s0);	# write output
+		&mov	(&DWP(4,$acc),$s1);
+		&mov	(&DWP(8,$acc),$s2);
+		&mov	(&DWP(12,$acc),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance out
+		&mov	($_out,$acc);		# save out
+
+		&lea	($acc,$ivec);
+		&mov	($s0,&DWP(0,$acc));	# read temp
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	(&DWP(0,$key),$s0);	# copy iv
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($s2,$_len);		# load len
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_dec_in_place_loop"));
+
+    &set_label("fast_dec_out",4);
+	&cmp	($mark,0);		# was the key schedule copied?
+	&mov	("edi",$_key);
+	&je	(&label("skip_dzero"));
+	# zero copy of key schedule
+	&mov	("ecx",240/4);
+	&xor	("eax","eax");
+	&align	(4);
+	&data_word(0xABF3F689);	# rep stosd
+	&set_label("skip_dzero")
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+&set_label("slow_way",16);
+
+	&mov	($s0,&DWP(0,$s0)) if (!$x86only);# load OPENSSL_ia32cap
+	&mov	($key,&wparam(3));	# load key
+
+	# pre-allocate aligned stack frame...
+	&lea	($acc,&DWP(-80,"esp"));
+	&and	($acc,-64);
+
+	# ... and make sure it doesn't alias with $key modulo 1024
+	&lea	($s1,&DWP(-80-63,$key));
+	&sub	($s1,$acc);
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	($acc,$s1);
+
+	# pick S-box copy which can't overlap with stack frame or $key
+	&lea	($s1,&DWP(768,$acc));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+	&lea	($s3,&wparam(0));	# pointer to parameter block
+
+	&exch	("esp",$acc);
+	&add	("esp",4);		# reserve for return address!
+	&mov	($_tbl,$tbl);		# save %ebp
+	&mov	($_esp,$acc);		# save %esp
+	&mov	($_tmp,$s0);		# save OPENSSL_ia32cap
+
+	&mov	($s0,&DWP(0,$s3));	# load inp
+	&mov	($s1,&DWP(4,$s3));	# load out
+	#&mov	($s2,&DWP(8,$s3));	# load len
+	#&mov	($key,&DWP(12,$s3));	# load key
+	&mov	($acc,&DWP(16,$s3));	# load ivp
+	&mov	($s3,&DWP(20,$s3));	# load enc flag
+
+	&mov	($_inp,$s0);		# save copy of inp
+	&mov	($_out,$s1);		# save copy of out
+	&mov	($_len,$s2);		# save copy of len
+	&mov	($_key,$key);		# save copy of key
+	&mov	($_ivp,$acc);		# save copy of ivp
+
+	&mov	($key,$acc);
+	&mov	($acc,$s0);
+
+	&cmp	($s3,0);
+	&je	(&label("slow_decrypt"));
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+	&cmp	($s2,16);
+	&mov	($s3,$s1);
+	&jb	(&label("slow_enc_tail"));
+
+					if (!$x86only) {
+	&bt	($_tmp,25);		# check for SSE bit
+	&jnc	(&label("slow_enc_x86"));
+
+	&movq	("mm0",&QWP(0,$key));	# load iv
+	&movq	("mm4",&QWP(8,$key));
+
+	&set_label("slow_enc_loop_sse",16);
+		&pxor	("mm0",&QWP(0,$acc));	# xor input data
+		&pxor	("mm4",&QWP(8,$acc));
+
+		&mov	($key,$_key);
+		&call	("_sse_AES_encrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+		&mov	($s2,$_len);		# load len
+
+		&movq	(&QWP(0,$key),"mm0");	# save output data
+		&movq	(&QWP(8,$key),"mm4");
+
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&cmp	($s2,16);
+		&mov	($_len,$s2);		# save len
+	&jae	(&label("slow_enc_loop_sse"));
+	&test	($s2,15);
+	&jnz	(&label("slow_enc_tail"));
+	&mov	($acc,$_ivp);		# load ivp
+	&movq	(&QWP(0,$acc),"mm0");	# save ivec
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+					}
+    &set_label("slow_enc_x86",16);
+	&mov	($s0,&DWP(0,$key));	# load iv
+	&mov	($s1,&DWP(4,$key));
+
+	&set_label("slow_enc_loop_x86",4);
+		&mov	($s2,&DWP(8,$key));
+		&mov	($s3,&DWP(12,$key));
+
+		&xor	($s0,&DWP(0,$acc));	# xor input data
+		&xor	($s1,&DWP(4,$acc));
+		&xor	($s2,&DWP(8,$acc));
+		&xor	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_encrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+
+		&mov	(&DWP(0,$key),$s0);	# save output data
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($s2,$_len);		# load len
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&cmp	($s2,16);
+		&mov	($_len,$s2);		# save len
+	&jae	(&label("slow_enc_loop_x86"));
+	&test	($s2,15);
+	&jnz	(&label("slow_enc_tail"));
+	&mov	($acc,$_ivp);		# load ivp
+	&mov	($s2,&DWP(8,$key));	# restore last dwords
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# save ivec
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_enc_tail",16);
+	&emms	()	if (!$x86only);
+	&mov	($key eq "edi"? $key:"",$s3);	# load out to edi
+	&mov	($s1,16);
+	&sub	($s1,$s2);
+	&cmp	($key,$acc eq "esi"? $acc:"");	# compare with inp
+	&je	(&label("enc_in_place"));
+	&align	(4);
+	&data_word(0xA4F3F689);	# rep movsb	# copy input
+	&jmp	(&label("enc_skip_in_place"));
+    &set_label("enc_in_place");
+	&lea	($key,&DWP(0,$key,$s2));
+    &set_label("enc_skip_in_place");
+	&mov	($s2,$s1);
+	&xor	($s0,$s0);
+	&align	(4);
+	&data_word(0xAAF3F689);	# rep stosb	# zero tail
+
+	&mov	($key,$_ivp);			# restore ivp
+	&mov	($acc,$s3);			# output as input
+	&mov	($s0,&DWP(0,$key));
+	&mov	($s1,&DWP(4,$key));
+	&mov	($_len,16);			# len=16
+	&jmp	(&label("slow_enc_loop_x86"));	# one more spin...
+
+#--------------------------- SLOW DECRYPT ---------------------------#
+&set_label("slow_decrypt",16);
+					if (!$x86only) {
+	&bt	($_tmp,25);		# check for SSE bit
+	&jnc	(&label("slow_dec_loop_x86"));
+
+	&set_label("slow_dec_loop_sse",4);
+		&movq	("mm0",&QWP(0,$acc));	# read input
+		&movq	("mm4",&QWP(8,$acc));
+
+		&mov	($key,$_key);
+		&call	("_sse_AES_decrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&lea	($s0,$ivec);
+		&mov	($s1,$_out);		# load out
+		&mov	($s2,$_len);		# load len
+		&mov	($key,$_ivp);		# load ivp
+
+		&movq	("mm1",&QWP(0,$acc));	# re-read input
+		&movq	("mm5",&QWP(8,$acc));
+
+		&pxor	("mm0",&QWP(0,$key));	# xor iv
+		&pxor	("mm4",&QWP(8,$key));
+
+		&movq	(&QWP(0,$key),"mm1");	# copy input to iv
+		&movq	(&QWP(8,$key),"mm5");
+
+		&sub	($s2,16);		# decrease len
+		&jc	(&label("slow_dec_partial_sse"));
+
+		&movq	(&QWP(0,$s1),"mm0");	# write output
+		&movq	(&QWP(8,$s1),"mm4");
+
+		&lea	($s1,&DWP(16,$s1));	# advance out
+		&mov	($_out,$s1);		# save out
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("slow_dec_loop_sse"));
+	&emms	();
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_dec_partial_sse",16);
+	&movq	(&QWP(0,$s0),"mm0");	# save output to temp
+	&movq	(&QWP(8,$s0),"mm4");
+	&emms	();
+
+	&add	($s2 eq "ecx" ? "ecx":"",16);
+	&mov	("edi",$s1);		# out
+	&mov	("esi",$s0);		# temp
+	&align	(4);
+	&data_word(0xA4F3F689);		# rep movsb # copy partial output
+
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+					}
+	&set_label("slow_dec_loop_x86",16);
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&lea	($key,$ivec);
+		&mov	(&DWP(0,$key),$s0);	# copy to temp
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt_compact");
+
+		&mov	($key,$_ivp);		# load ivp
+		&mov	($acc,$_len);		# load len
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&sub	($acc,16);
+		&jc	(&label("slow_dec_partial_x86"));
+
+		&mov	($_len,$acc);		# save len
+		&mov	($acc,$_out);		# load out
+
+		&mov	(&DWP(0,$acc),$s0);	# write output
+		&mov	(&DWP(4,$acc),$s1);
+		&mov	(&DWP(8,$acc),$s2);
+		&mov	(&DWP(12,$acc),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance out
+		&mov	($_out,$acc);		# save out
+
+		&lea	($acc,$ivec);
+		&mov	($s0,&DWP(0,$acc));	# read temp
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	(&DWP(0,$key),$s0);	# copy it to iv
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($acc,$_inp);		# load inp
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+	&jnz	(&label("slow_dec_loop_x86"));
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_dec_partial_x86",16);
+	&lea	($acc,$ivec);
+	&mov	(&DWP(0,$acc),$s0);	# save output to temp
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&mov	($acc,$_inp);
+	&mov	($s0,&DWP(0,$acc));	# re-read input
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+
+	&mov	(&DWP(0,$key),$s0);	# copy it to iv
+	&mov	(&DWP(4,$key),$s1);
+	&mov	(&DWP(8,$key),$s2);
+	&mov	(&DWP(12,$key),$s3);
+
+	&mov	("ecx",$_len);
+	&mov	("edi",$_out);
+	&lea	("esi",$ivec);
+	&align	(4);
+	&data_word(0xA4F3F689);		# rep movsb # copy partial output
+
+	&mov	("esp",$_esp);
+	&popf	();
+&function_end("AES_cbc_encrypt");
+}
+
+#------------------------------------------------------------------#
+
+sub enckey()
+{
+	&movz	("esi",&LB("edx"));		# rk[i]>>0
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&movz	("esi",&HB("edx"));		# rk[i]>>8
+	&shl	("ebx",24);
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&shr	("edx",16);
+	&movz	("esi",&LB("edx"));		# rk[i]>>16
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&movz	("esi",&HB("edx"));		# rk[i]>>24
+	&shl	("ebx",8);
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&shl	("ebx",16);
+	&xor	("eax","ebx");
+
+	&xor	("eax",&DWP(1024-128,$tbl,"ecx",4));	# rcon
+}
+
+&function_begin("_x86_AES_set_encrypt_key");
+	&mov	("esi",&wparam(1));		# user supplied key
+	&mov	("edi",&wparam(3));		# private key schedule
+
+	&test	("esi",-1);
+	&jz	(&label("badpointer"));
+	&test	("edi",-1);
+	&jz	(&label("badpointer"));
+
+	&call	(&label("pic_point"));
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&lea	($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+	&lea	($tbl,&DWP(2048+128,$tbl));
+
+	# prefetch Te4
+	&mov	("eax",&DWP(0-128,$tbl));
+	&mov	("ebx",&DWP(32-128,$tbl));
+	&mov	("ecx",&DWP(64-128,$tbl));
+	&mov	("edx",&DWP(96-128,$tbl));
+	&mov	("eax",&DWP(128-128,$tbl));
+	&mov	("ebx",&DWP(160-128,$tbl));
+	&mov	("ecx",&DWP(192-128,$tbl));
+	&mov	("edx",&DWP(224-128,$tbl));
+
+	&mov	("ecx",&wparam(2));		# number of bits in key
+	&cmp	("ecx",128);
+	&je	(&label("10rounds"));
+	&cmp	("ecx",192);
+	&je	(&label("12rounds"));
+	&cmp	("ecx",256);
+	&je	(&label("14rounds"));
+	&mov	("eax",-2);			# invalid number of bits
+	&jmp	(&label("exit"));
+
+    &set_label("10rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 4 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("10shortcut"));
+
+	&align	(4);
+	&set_label("10loop");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+		&mov	("edx",&DWP(12,"edi"));		# rk[3]
+	&set_label("10shortcut");
+		&enckey	();
+
+		&mov	(&DWP(16,"edi"),"eax");		# rk[4]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(20,"edi"),"eax");		# rk[5]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
+		&inc	("ecx");
+		&add	("edi",16);
+		&cmp	("ecx",10);
+	&jl	(&label("10loop"));
+
+	&mov	(&DWP(80,"edi"),10);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+		
+    &set_label("12rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 6 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+	&mov	("ecx",&DWP(16,"esi"));
+	&mov	("edx",&DWP(20,"esi"));
+	&mov	(&DWP(16,"edi"),"ecx");
+	&mov	(&DWP(20,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("12shortcut"));
+
+	&align	(4);
+	&set_label("12loop");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+		&mov	("edx",&DWP(20,"edi"));		# rk[5]
+	&set_label("12shortcut");
+		&enckey	();
+
+		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
+
+		&cmp	("ecx",7);
+		&je	(&label("12break"));
+		&inc	("ecx");
+
+		&xor	("eax",&DWP(16,"edi"));
+		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
+		&xor	("eax",&DWP(20,"edi"));
+		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
+
+		&add	("edi",24);
+	&jmp	(&label("12loop"));
+
+	&set_label("12break");
+	&mov	(&DWP(72,"edi"),12);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+
+    &set_label("14rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 8 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+	&mov	("eax",&DWP(16,"esi"));
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("edx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"edi"),"eax");
+	&mov	(&DWP(20,"edi"),"ebx");
+	&mov	(&DWP(24,"edi"),"ecx");
+	&mov	(&DWP(28,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("14shortcut"));
+
+	&align	(4);
+	&set_label("14loop");
+		&mov	("edx",&DWP(28,"edi"));		# rk[7]
+	&set_label("14shortcut");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+
+		&enckey	();
+
+		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
+
+		&cmp	("ecx",6);
+		&je	(&label("14break"));
+		&inc	("ecx");
+
+		&mov	("edx","eax");
+		&mov	("eax",&DWP(16,"edi"));		# rk[4]
+		&movz	("esi",&LB("edx"));		# rk[11]>>0
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&movz	("esi",&HB("edx"));		# rk[11]>>8
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&shr	("edx",16);
+		&shl	("ebx",8);
+		&movz	("esi",&LB("edx"));		# rk[11]>>16
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&movz	("esi",&HB("edx"));		# rk[11]>>24
+		&shl	("ebx",16);
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&shl	("ebx",24);
+		&xor	("eax","ebx");
+
+		&mov	(&DWP(48,"edi"),"eax");		# rk[12]
+		&xor	("eax",&DWP(20,"edi"));
+		&mov	(&DWP(52,"edi"),"eax");		# rk[13]
+		&xor	("eax",&DWP(24,"edi"));
+		&mov	(&DWP(56,"edi"),"eax");		# rk[14]
+		&xor	("eax",&DWP(28,"edi"));
+		&mov	(&DWP(60,"edi"),"eax");		# rk[15]
+
+		&add	("edi",32);
+	&jmp	(&label("14loop"));
+
+	&set_label("14break");
+	&mov	(&DWP(48,"edi"),14);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+
+    &set_label("badpointer");
+	&mov	("eax",-1);
+    &set_label("exit");
+&function_end("_x86_AES_set_encrypt_key");
+
+# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+#                        AES_KEY *key)
+&function_begin_B("private_AES_set_encrypt_key");
+	&call	("_x86_AES_set_encrypt_key");
+	&ret	();
+&function_end_B("private_AES_set_encrypt_key");
+
+sub deckey()
+{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
+  my $tmp = $tbl;
+
+	&mov	($acc,$tp1);
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp2,&DWP(0,$tp1,$tp1));
+	&sub	($acc,$tmp);
+	&and	($tp2,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	&xor	($acc,$tp2);
+	&mov	($tp2,$acc);
+
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp4,&DWP(0,$tp2,$tp2));
+	&sub	($acc,$tmp);
+	&and	($tp4,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp2,$tp1);	# tp2^tp1
+	&xor	($acc,$tp4);
+	&mov	($tp4,$acc);
+
+	&and	($acc,0x80808080);
+	&mov	($tmp,$acc);
+	&shr	($tmp,7);
+	&lea	($tp8,&DWP(0,$tp4,$tp4));
+	 &xor	($tp4,$tp1);	# tp4^tp1
+	&sub	($acc,$tmp);
+	&and	($tp8,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &rotl	($tp1,8);	# = ROTATE(tp1,8)
+	&xor	($tp8,$acc);
+
+	&mov	($tmp,&DWP(4*($i+1),$key));	# modulo-scheduled load
+
+	&xor	($tp1,$tp2);
+	&xor	($tp2,$tp8);
+	&xor	($tp1,$tp4);
+	&rotl	($tp2,24);
+	&xor	($tp4,$tp8);
+	&xor	($tp1,$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
+	&rotl	($tp4,16);
+	&xor	($tp1,$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
+	&rotl	($tp8,8);
+	&xor	($tp1,$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
+	&mov	($tp2,$tmp);
+	&xor	($tp1,$tp8);	# ^= ROTATE(tp8,8)
+
+	&mov	(&DWP(4*$i,$key),$tp1);
+}
+
+# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+#                        AES_KEY *key)
+&function_begin_B("private_AES_set_decrypt_key");
+	&call	("_x86_AES_set_encrypt_key");
+	&cmp	("eax",0);
+	&je	(&label("proceed"));
+	&ret	();
+
+    &set_label("proceed");
+	&push	("ebp");
+	&push	("ebx");
+	&push	("esi");
+	&push	("edi");
+
+	&mov	("esi",&wparam(2));
+	&mov	("ecx",&DWP(240,"esi"));	# pull number of rounds
+	&lea	("ecx",&DWP(0,"","ecx",4));
+	&lea	("edi",&DWP(0,"esi","ecx",4));	# pointer to last chunk
+
+	&set_label("invert",4);			# invert order of chunks
+		&mov	("eax",&DWP(0,"esi"));
+		&mov	("ebx",&DWP(4,"esi"));
+		&mov	("ecx",&DWP(0,"edi"));
+		&mov	("edx",&DWP(4,"edi"));
+		&mov	(&DWP(0,"edi"),"eax");
+		&mov	(&DWP(4,"edi"),"ebx");
+		&mov	(&DWP(0,"esi"),"ecx");
+		&mov	(&DWP(4,"esi"),"edx");
+		&mov	("eax",&DWP(8,"esi"));
+		&mov	("ebx",&DWP(12,"esi"));
+		&mov	("ecx",&DWP(8,"edi"));
+		&mov	("edx",&DWP(12,"edi"));
+		&mov	(&DWP(8,"edi"),"eax");
+		&mov	(&DWP(12,"edi"),"ebx");
+		&mov	(&DWP(8,"esi"),"ecx");
+		&mov	(&DWP(12,"esi"),"edx");
+		&add	("esi",16);
+		&sub	("edi",16);
+		&cmp	("esi","edi");
+	&jne	(&label("invert"));
+
+	&mov	($key,&wparam(2));
+	&mov	($acc,&DWP(240,$key));		# pull number of rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	(&wparam(2),$acc);
+
+	&mov	($s0,&DWP(16,$key));		# modulo-scheduled load
+	&set_label("permute",4);		# permute the key schedule
+		&add	($key,16);
+		&deckey	(0,$key,$s0,$s1,$s2,$s3);
+		&deckey	(1,$key,$s1,$s2,$s3,$s0);
+		&deckey	(2,$key,$s2,$s3,$s0,$s1);
+		&deckey	(3,$key,$s3,$s0,$s1,$s2);
+		&cmp	($key,&wparam(2));
+	&jb	(&label("permute"));
+
+	&xor	("eax","eax");			# return success
+&function_end("private_AES_set_decrypt_key");
+&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/jni/openssl/crypto/aes/asm/aes-armv4.pl b/jni/openssl/crypto/aes/asm/aes-armv4.pl
new file mode 100644
index 0000000..86b86c4
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-armv4.pl
@@ -0,0 +1,1134 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for ARMv4
+
+# January 2007.
+#
+# Code uses single 1K S-box and is >2 times faster than code generated
+# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
+# allows to merge logical or arithmetic operation with shift or rotate
+# in one instruction and emit combined result every cycle. The module
+# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
+# key [on single-issue Xscale PXA250 core].
+
+# May 2007.
+#
+# AES_set_[en|de]crypt_key is added.
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 12% improvement on
+# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~21.5 cycles per byte.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$s0="r0";
+$s1="r1";
+$s2="r2";
+$s3="r3";
+$t1="r4";
+$t2="r5";
+$t3="r6";
+$i1="r7";
+$i2="r8";
+$i3="r9";
+
+$tbl="r10";
+$key="r11";
+$rounds="r12";
+
+$code=<<___;
+#include "arm_arch.h"
+.text
+.code	32
+
+.type	AES_Te,%object
+.align	5
+AES_Te:
+.word	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size	AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_encrypt
+.type   AES_encrypt,%function
+.align	5
+AES_encrypt:
+	sub	r3,pc,#8		@ AES_encrypt
+	stmdb   sp!,{r1,r4-r12,lr}
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+	sub	$tbl,r3,#AES_encrypt-AES_Te	@ Te
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_encrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_encrypt,.-AES_encrypt
+
+.type   _armv4_AES_encrypt,%function
+.align	2
+_armv4_AES_encrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0,lsr#16
+	mov	$s0,$s0,lsr#24
+.Lenc_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Te3[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Te2[s0>>8]
+	and	$i2,lr,$s1
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Te1[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Te0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te1[s1>>16]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te3[s1>>0]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te2[s1>>8]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Te0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,ror#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$t3,$i3,ror#8
+	and	$i3,lr,$s2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te2[s2>>8]
+	eor	$s1,$s1,$t1,ror#24
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te1[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te3[s2>>0]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Te0[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,ror#8
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$t3,$i3,ror#16
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te3[s3>>0]
+	eor	$s2,$s2,$t2,ror#16
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te1[s3>>16]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Te0[s3>>24]
+	eor	$s2,$s2,$i3,ror#8
+	ldr	$t1,[$key,#-12]
+	eor	$s3,$s3,$t3,ror#8
+
+	ldr	$t2,[$key,#-8]
+	eor	$s0,$s0,$i1
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0,lsr#16
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Lenc_loop
+
+	add	$tbl,$tbl,#2
+
+	ldrb	$t1,[$tbl,$i1,lsl#2]	@ Te4[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldrb	$t2,[$tbl,$i2,lsl#2]	@ Te4[s0>>8]
+	and	$i2,lr,$s1
+	ldrb	$t3,[$tbl,$i3,lsl#2]	@ Te4[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldrb	$s0,[$tbl,$s0,lsl#2]	@ Te4[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s1>>16]
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s1>>0]
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s1>>8]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s1,[$tbl,$s1,lsl#2]	@ Te4[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,lsl#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s2>>8]
+	eor	$s1,$t1,$s1,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s2>>0]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s2,[$tbl,$s2,lsl#2]	@ Te4[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s3>>0]
+	eor	$s2,$t2,$s2,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s3>>16]
+	eor	$s0,$i1,$s0,lsl#8
+	ldr	$i1,[$key,#0]
+	ldrb	$s3,[$tbl,$s3,lsl#2]	@ Te4[s3>>24]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$s2,$i3,lsl#16
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#2
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global private_AES_set_encrypt_key
+.type   private_AES_set_encrypt_key,%function
+.align	5
+private_AES_set_encrypt_key:
+_armv4_AES_set_encrypt_key:
+	sub	r3,pc,#8		@ AES_set_encrypt_key
+	teq	r0,#0
+	moveq	r0,#-1
+	beq	.Labrt
+	teq	r2,#0
+	moveq	r0,#-1
+	beq	.Labrt
+
+	teq	r1,#128
+	beq	.Lok
+	teq	r1,#192
+	beq	.Lok
+	teq	r1,#256
+	movne	r0,#-1
+	bne	.Labrt
+
+.Lok:	stmdb   sp!,{r4-r12,lr}
+	sub	$tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024	@ Te4
+
+	mov	$rounds,r0		@ inp
+	mov	lr,r1			@ bits
+	mov	$key,r2			@ key
+
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	str	$s0,[$key],#16
+	orr	$s3,$s3,$t2,lsl#16
+	str	$s1,[$key,#-12]
+	orr	$s3,$s3,$t3,lsl#24
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$key],#16
+	str	$s1,[$key,#-12]
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#endif
+
+	teq	lr,#128
+	bne	.Lnot128
+	mov	$rounds,#10
+	str	$rounds,[$key,#240-16]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+
+.L128_loop:
+	and	$t2,lr,$s3,lsr#24
+	and	$i1,lr,$s3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$t2,$t2,$t1
+	eor	$s0,$s0,$t2			@ rk[4]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[5]=rk[1]^rk[4]
+	str	$s0,[$key],#16
+	eor	$s2,$s2,$s1			@ rk[6]=rk[2]^rk[5]
+	str	$s1,[$key,#-12]
+	eor	$s3,$s3,$s2			@ rk[7]=rk[3]^rk[6]
+	str	$s2,[$key,#-8]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-4]
+	bne	.L128_loop
+	sub	r2,$key,#176
+	b	.Ldone
+
+.Lnot128:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#19]
+	ldrb	$t1,[$rounds,#18]
+	ldrb	$t2,[$rounds,#17]
+	ldrb	$t3,[$rounds,#16]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#23]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#22]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#21]
+	ldrb	$t3,[$rounds,#20]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#16]
+	ldr	$i3,[$rounds,#20]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	teq	lr,#192
+	bne	.Lnot192
+	mov	$rounds,#12
+	str	$rounds,[$key,#240-24]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#8
+
+.L192_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[6]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[7]=rk[1]^rk[6]
+	str	$s0,[$key],#24
+	eor	$s2,$s2,$s1			@ rk[8]=rk[2]^rk[7]
+	str	$s1,[$key,#-20]
+	eor	$s3,$s3,$s2			@ rk[9]=rk[3]^rk[8]
+	str	$s2,[$key,#-16]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-12]
+	subeq	r2,$key,#216
+	beq	.Ldone
+
+	ldr	$i1,[$key,#-32]
+	ldr	$i2,[$key,#-28]
+	eor	$i1,$i1,$s3			@ rk[10]=rk[4]^rk[9]
+	eor	$i3,$i2,$i1			@ rk[11]=rk[5]^rk[10]
+	str	$i1,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L192_loop
+
+.Lnot192:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#27]
+	ldrb	$t1,[$rounds,#26]
+	ldrb	$t2,[$rounds,#25]
+	ldrb	$t3,[$rounds,#24]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#31]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#30]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#29]
+	ldrb	$t3,[$rounds,#28]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#24]
+	ldr	$i3,[$rounds,#28]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	mov	$rounds,#14
+	str	$rounds,[$key,#240-32]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#7
+
+.L256_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[8]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[9]=rk[1]^rk[8]
+	str	$s0,[$key],#32
+	eor	$s2,$s2,$s1			@ rk[10]=rk[2]^rk[9]
+	str	$s1,[$key,#-28]
+	eor	$s3,$s3,$s2			@ rk[11]=rk[3]^rk[10]
+	str	$s2,[$key,#-24]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-20]
+	subeq	r2,$key,#256
+	beq	.Ldone
+
+	and	$t2,lr,$s3
+	and	$i1,lr,$s3,lsr#8
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#16
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3,lsr#24
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$key,#-48]
+	orr	$t2,$t2,$i3,lsl#24
+
+	ldr	$i1,[$key,#-44]
+	ldr	$i2,[$key,#-40]
+	eor	$t1,$t1,$t2			@ rk[12]=rk[4]^...
+	ldr	$i3,[$key,#-36]
+	eor	$i1,$i1,$t1			@ rk[13]=rk[5]^rk[12]
+	str	$t1,[$key,#-16]
+	eor	$i2,$i2,$i1			@ rk[14]=rk[6]^rk[13]
+	str	$i1,[$key,#-12]
+	eor	$i3,$i3,$i2			@ rk[15]=rk[7]^rk[14]
+	str	$i2,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L256_loop
+
+.Ldone:	mov	r0,#0
+	ldmia   sp!,{r4-r12,lr}
+.Labrt:	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+
+.global private_AES_set_decrypt_key
+.type   private_AES_set_decrypt_key,%function
+.align	5
+private_AES_set_decrypt_key:
+	str	lr,[sp,#-4]!            @ push lr
+	bl	_armv4_AES_set_encrypt_key
+	teq	r0,#0
+	ldrne	lr,[sp],#4              @ pop lr
+	bne	.Labrt
+
+	stmdb   sp!,{r4-r12}
+
+	ldr	$rounds,[r2,#240]	@ AES_set_encrypt_key preserves r2,
+	mov	$key,r2			@ which is AES_KEY *key
+	mov	$i1,r2
+	add	$i2,r2,$rounds,lsl#4
+
+.Linv:	ldr	$s0,[$i1]
+	ldr	$s1,[$i1,#4]
+	ldr	$s2,[$i1,#8]
+	ldr	$s3,[$i1,#12]
+	ldr	$t1,[$i2]
+	ldr	$t2,[$i2,#4]
+	ldr	$t3,[$i2,#8]
+	ldr	$i3,[$i2,#12]
+	str	$s0,[$i2],#-16
+	str	$s1,[$i2,#16+4]
+	str	$s2,[$i2,#16+8]
+	str	$s3,[$i2,#16+12]
+	str	$t1,[$i1],#16
+	str	$t2,[$i1,#-12]
+	str	$t3,[$i1,#-8]
+	str	$i3,[$i1,#-4]
+	teq	$i1,$i2
+	bne	.Linv
+___
+$mask80=$i1;
+$mask1b=$i2;
+$mask7f=$i3;
+$code.=<<___;
+	ldr	$s0,[$key,#16]!		@ prefetch tp1
+	mov	$mask80,#0x80
+	mov	$mask1b,#0x1b
+	orr	$mask80,$mask80,#0x8000
+	orr	$mask1b,$mask1b,#0x1b00
+	orr	$mask80,$mask80,$mask80,lsl#16
+	orr	$mask1b,$mask1b,$mask1b,lsl#16
+	sub	$rounds,$rounds,#1
+	mvn	$mask7f,$mask80
+	mov	$rounds,$rounds,lsl#2	@ (rounds-1)*4
+
+.Lmix:	and	$t1,$s0,$mask80
+	and	$s1,$s0,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s1,$t1,$s1,lsl#1	@ tp2
+
+	and	$t1,$s1,$mask80
+	and	$s2,$s1,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s2,$t1,$s2,lsl#1	@ tp4
+
+	and	$t1,$s2,$mask80
+	and	$s3,$s2,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s3,$t1,$s3,lsl#1	@ tp8
+
+	eor	$t1,$s1,$s2
+	eor	$t2,$s0,$s3		@ tp9
+	eor	$t1,$t1,$s3		@ tpe
+	eor	$t1,$t1,$s1,ror#24
+	eor	$t1,$t1,$t2,ror#24	@ ^= ROTATE(tpb=tp9^tp2,8)
+	eor	$t1,$t1,$s2,ror#16
+	eor	$t1,$t1,$t2,ror#16	@ ^= ROTATE(tpd=tp9^tp4,16)
+	eor	$t1,$t1,$t2,ror#8	@ ^= ROTATE(tp9,24)
+
+	ldr	$s0,[$key,#4]		@ prefetch tp1
+	str	$t1,[$key],#4
+	subs	$rounds,$rounds,#1
+	bne	.Lmix
+
+	mov	r0,#0
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+
+.type	AES_Td,%object
+.align	5
+AES_Td:
+.word	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_decrypt
+.type   AES_decrypt,%function
+.align	5
+AES_decrypt:
+	sub	r3,pc,#8		@ AES_decrypt
+	stmdb   sp!,{r1,r4-r12,lr}
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+	sub	$tbl,r3,#AES_decrypt-AES_Td		@ Td
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_decrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_decrypt,.-AES_decrypt
+
+.type   _armv4_AES_decrypt,%function
+.align	2
+_armv4_AES_decrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0,lsr#16
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0
+	mov	$s0,$s0,lsr#24
+.Ldec_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Td1[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Td2[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Td3[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Td0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td3[s1>>0]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td1[s1>>16]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td2[s1>>8]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Td0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,ror#8
+	and	$i2,lr,$s2		@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s2,lsr#16
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td2[s2>>8]
+	eor	$s1,$s1,$t1,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td3[s2>>0]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td1[s2>>16]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Td0[s2>>24]
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s1,$s1,$i2,ror#24
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s3		@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td1[s3>>16]
+	eor	$s2,$s2,$t2,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td3[s3>>0]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Td0[s3>>24]
+	eor	$s2,$s2,$i3,ror#24
+
+	ldr	$t1,[$key,#-12]
+	eor	$s0,$s0,$i1
+	ldr	$t2,[$key,#-8]
+	eor	$s3,$s3,$t3,ror#8
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0,lsr#16
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Ldec_loop
+
+	add	$tbl,$tbl,#1024
+
+	ldr	$t2,[$tbl,#0]		@ prefetch Td4
+	ldr	$t3,[$tbl,#32]
+	ldr	$t1,[$tbl,#64]
+	ldr	$t2,[$tbl,#96]
+	ldr	$t3,[$tbl,#128]
+	ldr	$t1,[$tbl,#160]
+	ldr	$t2,[$tbl,#192]
+	ldr	$t3,[$tbl,#224]
+
+	ldrb	$s0,[$tbl,$s0]		@ Td4[s0>>24]
+	ldrb	$t1,[$tbl,$i1]		@ Td4[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldrb	$t2,[$tbl,$i2]		@ Td4[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldrb	$t3,[$tbl,$i3]		@ Td4[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s1>>0]
+	ldrb	$s1,[$tbl,$s1,lsr#24]	@ Td4[s1>>24]
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s1>>16]
+	eor	$s0,$i1,$s0,lsl#24
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s1>>8]
+	eor	$s1,$t1,$s1,lsl#8
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,lsl#8
+	and	$i2,lr,$s2		@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s2>>8]
+	eor	$t3,$t3,$i3,lsl#8
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s2>>0]
+	and	$i3,lr,$s2,lsr#16
+
+	ldrb	$s2,[$tbl,$s2,lsr#24]	@ Td4[s2>>24]
+	eor	$s0,$s0,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s2>>16]
+	eor	$s1,$i2,$s1,lsl#16
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s2,$t2,$s2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s3>>16]
+	eor	$t3,$t3,$i3,lsl#16
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s3>>8]
+	and	$i3,lr,$s3		@ i2
+
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s3>>0]
+	ldrb	$s3,[$tbl,$s3,lsr#24]	@ Td4[s3>>24]
+	eor	$s0,$s0,$i1,lsl#16
+	ldr	$i1,[$key,#0]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$i3,$s2,lsl#8
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#1024
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz	"AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+print $code;
+close STDOUT;	# enforce flush
diff --git a/jni/openssl/crypto/aes/asm/aes-armv4.s b/jni/openssl/crypto/aes/asm/aes-armv4.s
new file mode 100644
index 0000000..2697d4c
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-armv4.s
@@ -0,0 +1,1071 @@
+#include "arm_arch.h"
+.text
+.code	32
+
+.type	AES_Te,%object
+.align	5
+AES_Te:
+.word	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size	AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_encrypt
+.type   AES_encrypt,%function
+.align	5
+AES_encrypt:
+	sub	r3,pc,#8		@ AES_encrypt
+	stmdb   sp!,{r1,r4-r12,lr}
+	mov	r12,r0		@ inp
+	mov	r11,r2
+	sub	r10,r3,#AES_encrypt-AES_Te	@ Te
+#if __ARM_ARCH__<7
+	ldrb	r0,[r12,#3]	@ load input data in endian-neutral
+	ldrb	r4,[r12,#2]	@ manner...
+	ldrb	r5,[r12,#1]
+	ldrb	r6,[r12,#0]
+	orr	r0,r0,r4,lsl#8
+	ldrb	r1,[r12,#7]
+	orr	r0,r0,r5,lsl#16
+	ldrb	r4,[r12,#6]
+	orr	r0,r0,r6,lsl#24
+	ldrb	r5,[r12,#5]
+	ldrb	r6,[r12,#4]
+	orr	r1,r1,r4,lsl#8
+	ldrb	r2,[r12,#11]
+	orr	r1,r1,r5,lsl#16
+	ldrb	r4,[r12,#10]
+	orr	r1,r1,r6,lsl#24
+	ldrb	r5,[r12,#9]
+	ldrb	r6,[r12,#8]
+	orr	r2,r2,r4,lsl#8
+	ldrb	r3,[r12,#15]
+	orr	r2,r2,r5,lsl#16
+	ldrb	r4,[r12,#14]
+	orr	r2,r2,r6,lsl#24
+	ldrb	r5,[r12,#13]
+	ldrb	r6,[r12,#12]
+	orr	r3,r3,r4,lsl#8
+	orr	r3,r3,r5,lsl#16
+	orr	r3,r3,r6,lsl#24
+#else
+	ldr	r0,[r12,#0]
+	ldr	r1,[r12,#4]
+	ldr	r2,[r12,#8]
+	ldr	r3,[r12,#12]
+#ifdef __ARMEL__
+	rev	r0,r0
+	rev	r1,r1
+	rev	r2,r2
+	rev	r3,r3
+#endif
+#endif
+	bl	_armv4_AES_encrypt
+
+	ldr	r12,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	r0,r0
+	rev	r1,r1
+	rev	r2,r2
+	rev	r3,r3
+#endif
+	str	r0,[r12,#0]
+	str	r1,[r12,#4]
+	str	r2,[r12,#8]
+	str	r3,[r12,#12]
+#else
+	mov	r4,r0,lsr#24		@ write output in endian-neutral
+	mov	r5,r0,lsr#16		@ manner...
+	mov	r6,r0,lsr#8
+	strb	r4,[r12,#0]
+	strb	r5,[r12,#1]
+	mov	r4,r1,lsr#24
+	strb	r6,[r12,#2]
+	mov	r5,r1,lsr#16
+	strb	r0,[r12,#3]
+	mov	r6,r1,lsr#8
+	strb	r4,[r12,#4]
+	strb	r5,[r12,#5]
+	mov	r4,r2,lsr#24
+	strb	r6,[r12,#6]
+	mov	r5,r2,lsr#16
+	strb	r1,[r12,#7]
+	mov	r6,r2,lsr#8
+	strb	r4,[r12,#8]
+	strb	r5,[r12,#9]
+	mov	r4,r3,lsr#24
+	strb	r6,[r12,#10]
+	mov	r5,r3,lsr#16
+	strb	r2,[r12,#11]
+	mov	r6,r3,lsr#8
+	strb	r4,[r12,#12]
+	strb	r5,[r12,#13]
+	strb	r6,[r12,#14]
+	strb	r3,[r12,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_encrypt,.-AES_encrypt
+
+.type   _armv4_AES_encrypt,%function
+.align	2
+_armv4_AES_encrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	r11!,{r4-r7}
+	eor	r0,r0,r4
+	ldr	r12,[r11,#240-16]
+	eor	r1,r1,r5
+	eor	r2,r2,r6
+	eor	r3,r3,r7
+	sub	r12,r12,#1
+	mov	lr,#255
+
+	and	r7,lr,r0
+	and	r8,lr,r0,lsr#8
+	and	r9,lr,r0,lsr#16
+	mov	r0,r0,lsr#24
+.Lenc_loop:
+	ldr	r4,[r10,r7,lsl#2]	@ Te3[s0>>0]
+	and	r7,lr,r1,lsr#16	@ i0
+	ldr	r5,[r10,r8,lsl#2]	@ Te2[s0>>8]
+	and	r8,lr,r1
+	ldr	r6,[r10,r9,lsl#2]	@ Te1[s0>>16]
+	and	r9,lr,r1,lsr#8
+	ldr	r0,[r10,r0,lsl#2]	@ Te0[s0>>24]
+	mov	r1,r1,lsr#24
+
+	ldr	r7,[r10,r7,lsl#2]	@ Te1[s1>>16]
+	ldr	r8,[r10,r8,lsl#2]	@ Te3[s1>>0]
+	ldr	r9,[r10,r9,lsl#2]	@ Te2[s1>>8]
+	eor	r0,r0,r7,ror#8
+	ldr	r1,[r10,r1,lsl#2]	@ Te0[s1>>24]
+	and	r7,lr,r2,lsr#8	@ i0
+	eor	r5,r5,r8,ror#8
+	and	r8,lr,r2,lsr#16	@ i1
+	eor	r6,r6,r9,ror#8
+	and	r9,lr,r2
+	ldr	r7,[r10,r7,lsl#2]	@ Te2[s2>>8]
+	eor	r1,r1,r4,ror#24
+	ldr	r8,[r10,r8,lsl#2]	@ Te1[s2>>16]
+	mov	r2,r2,lsr#24
+
+	ldr	r9,[r10,r9,lsl#2]	@ Te3[s2>>0]
+	eor	r0,r0,r7,ror#16
+	ldr	r2,[r10,r2,lsl#2]	@ Te0[s2>>24]
+	and	r7,lr,r3		@ i0
+	eor	r1,r1,r8,ror#8
+	and	r8,lr,r3,lsr#8	@ i1
+	eor	r6,r6,r9,ror#16
+	and	r9,lr,r3,lsr#16	@ i2
+	ldr	r7,[r10,r7,lsl#2]	@ Te3[s3>>0]
+	eor	r2,r2,r5,ror#16
+	ldr	r8,[r10,r8,lsl#2]	@ Te2[s3>>8]
+	mov	r3,r3,lsr#24
+
+	ldr	r9,[r10,r9,lsl#2]	@ Te1[s3>>16]
+	eor	r0,r0,r7,ror#24
+	ldr	r7,[r11],#16
+	eor	r1,r1,r8,ror#16
+	ldr	r3,[r10,r3,lsl#2]	@ Te0[s3>>24]
+	eor	r2,r2,r9,ror#8
+	ldr	r4,[r11,#-12]
+	eor	r3,r3,r6,ror#8
+
+	ldr	r5,[r11,#-8]
+	eor	r0,r0,r7
+	ldr	r6,[r11,#-4]
+	and	r7,lr,r0
+	eor	r1,r1,r4
+	and	r8,lr,r0,lsr#8
+	eor	r2,r2,r5
+	and	r9,lr,r0,lsr#16
+	eor	r3,r3,r6
+	mov	r0,r0,lsr#24
+
+	subs	r12,r12,#1
+	bne	.Lenc_loop
+
+	add	r10,r10,#2
+
+	ldrb	r4,[r10,r7,lsl#2]	@ Te4[s0>>0]
+	and	r7,lr,r1,lsr#16	@ i0
+	ldrb	r5,[r10,r8,lsl#2]	@ Te4[s0>>8]
+	and	r8,lr,r1
+	ldrb	r6,[r10,r9,lsl#2]	@ Te4[s0>>16]
+	and	r9,lr,r1,lsr#8
+	ldrb	r0,[r10,r0,lsl#2]	@ Te4[s0>>24]
+	mov	r1,r1,lsr#24
+
+	ldrb	r7,[r10,r7,lsl#2]	@ Te4[s1>>16]
+	ldrb	r8,[r10,r8,lsl#2]	@ Te4[s1>>0]
+	ldrb	r9,[r10,r9,lsl#2]	@ Te4[s1>>8]
+	eor	r0,r7,r0,lsl#8
+	ldrb	r1,[r10,r1,lsl#2]	@ Te4[s1>>24]
+	and	r7,lr,r2,lsr#8	@ i0
+	eor	r5,r8,r5,lsl#8
+	and	r8,lr,r2,lsr#16	@ i1
+	eor	r6,r9,r6,lsl#8
+	and	r9,lr,r2
+	ldrb	r7,[r10,r7,lsl#2]	@ Te4[s2>>8]
+	eor	r1,r4,r1,lsl#24
+	ldrb	r8,[r10,r8,lsl#2]	@ Te4[s2>>16]
+	mov	r2,r2,lsr#24
+
+	ldrb	r9,[r10,r9,lsl#2]	@ Te4[s2>>0]
+	eor	r0,r7,r0,lsl#8
+	ldrb	r2,[r10,r2,lsl#2]	@ Te4[s2>>24]
+	and	r7,lr,r3		@ i0
+	eor	r1,r1,r8,lsl#16
+	and	r8,lr,r3,lsr#8	@ i1
+	eor	r6,r9,r6,lsl#8
+	and	r9,lr,r3,lsr#16	@ i2
+	ldrb	r7,[r10,r7,lsl#2]	@ Te4[s3>>0]
+	eor	r2,r5,r2,lsl#24
+	ldrb	r8,[r10,r8,lsl#2]	@ Te4[s3>>8]
+	mov	r3,r3,lsr#24
+
+	ldrb	r9,[r10,r9,lsl#2]	@ Te4[s3>>16]
+	eor	r0,r7,r0,lsl#8
+	ldr	r7,[r11,#0]
+	ldrb	r3,[r10,r3,lsl#2]	@ Te4[s3>>24]
+	eor	r1,r1,r8,lsl#8
+	ldr	r4,[r11,#4]
+	eor	r2,r2,r9,lsl#16
+	ldr	r5,[r11,#8]
+	eor	r3,r6,r3,lsl#24
+	ldr	r6,[r11,#12]
+
+	eor	r0,r0,r7
+	eor	r1,r1,r4
+	eor	r2,r2,r5
+	eor	r3,r3,r6
+
+	sub	r10,r10,#2
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global private_AES_set_encrypt_key
+.type   private_AES_set_encrypt_key,%function
+.align	5
+private_AES_set_encrypt_key:
+_armv4_AES_set_encrypt_key:
+	sub	r3,pc,#8		@ AES_set_encrypt_key
+	teq	r0,#0
+	moveq	r0,#-1
+	beq	.Labrt
+	teq	r2,#0
+	moveq	r0,#-1
+	beq	.Labrt
+
+	teq	r1,#128
+	beq	.Lok
+	teq	r1,#192
+	beq	.Lok
+	teq	r1,#256
+	movne	r0,#-1
+	bne	.Labrt
+
+.Lok:	stmdb   sp!,{r4-r12,lr}
+	sub	r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024	@ Te4
+
+	mov	r12,r0		@ inp
+	mov	lr,r1			@ bits
+	mov	r11,r2			@ key
+
+#if __ARM_ARCH__<7
+	ldrb	r0,[r12,#3]	@ load input data in endian-neutral
+	ldrb	r4,[r12,#2]	@ manner...
+	ldrb	r5,[r12,#1]
+	ldrb	r6,[r12,#0]
+	orr	r0,r0,r4,lsl#8
+	ldrb	r1,[r12,#7]
+	orr	r0,r0,r5,lsl#16
+	ldrb	r4,[r12,#6]
+	orr	r0,r0,r6,lsl#24
+	ldrb	r5,[r12,#5]
+	ldrb	r6,[r12,#4]
+	orr	r1,r1,r4,lsl#8
+	ldrb	r2,[r12,#11]
+	orr	r1,r1,r5,lsl#16
+	ldrb	r4,[r12,#10]
+	orr	r1,r1,r6,lsl#24
+	ldrb	r5,[r12,#9]
+	ldrb	r6,[r12,#8]
+	orr	r2,r2,r4,lsl#8
+	ldrb	r3,[r12,#15]
+	orr	r2,r2,r5,lsl#16
+	ldrb	r4,[r12,#14]
+	orr	r2,r2,r6,lsl#24
+	ldrb	r5,[r12,#13]
+	ldrb	r6,[r12,#12]
+	orr	r3,r3,r4,lsl#8
+	str	r0,[r11],#16
+	orr	r3,r3,r5,lsl#16
+	str	r1,[r11,#-12]
+	orr	r3,r3,r6,lsl#24
+	str	r2,[r11,#-8]
+	str	r3,[r11,#-4]
+#else
+	ldr	r0,[r12,#0]
+	ldr	r1,[r12,#4]
+	ldr	r2,[r12,#8]
+	ldr	r3,[r12,#12]
+#ifdef __ARMEL__
+	rev	r0,r0
+	rev	r1,r1
+	rev	r2,r2
+	rev	r3,r3
+#endif
+	str	r0,[r11],#16
+	str	r1,[r11,#-12]
+	str	r2,[r11,#-8]
+	str	r3,[r11,#-4]
+#endif
+
+	teq	lr,#128
+	bne	.Lnot128
+	mov	r12,#10
+	str	r12,[r11,#240-16]
+	add	r6,r10,#256			@ rcon
+	mov	lr,#255
+
+.L128_loop:
+	and	r5,lr,r3,lsr#24
+	and	r7,lr,r3,lsr#16
+	ldrb	r5,[r10,r5]
+	and	r8,lr,r3,lsr#8
+	ldrb	r7,[r10,r7]
+	and	r9,lr,r3
+	ldrb	r8,[r10,r8]
+	orr	r5,r5,r7,lsl#24
+	ldrb	r9,[r10,r9]
+	orr	r5,r5,r8,lsl#16
+	ldr	r4,[r6],#4			@ rcon[i++]
+	orr	r5,r5,r9,lsl#8
+	eor	r5,r5,r4
+	eor	r0,r0,r5			@ rk[4]=rk[0]^...
+	eor	r1,r1,r0			@ rk[5]=rk[1]^rk[4]
+	str	r0,[r11],#16
+	eor	r2,r2,r1			@ rk[6]=rk[2]^rk[5]
+	str	r1,[r11,#-12]
+	eor	r3,r3,r2			@ rk[7]=rk[3]^rk[6]
+	str	r2,[r11,#-8]
+	subs	r12,r12,#1
+	str	r3,[r11,#-4]
+	bne	.L128_loop
+	sub	r2,r11,#176
+	b	.Ldone
+
+.Lnot128:
+#if __ARM_ARCH__<7
+	ldrb	r8,[r12,#19]
+	ldrb	r4,[r12,#18]
+	ldrb	r5,[r12,#17]
+	ldrb	r6,[r12,#16]
+	orr	r8,r8,r4,lsl#8
+	ldrb	r9,[r12,#23]
+	orr	r8,r8,r5,lsl#16
+	ldrb	r4,[r12,#22]
+	orr	r8,r8,r6,lsl#24
+	ldrb	r5,[r12,#21]
+	ldrb	r6,[r12,#20]
+	orr	r9,r9,r4,lsl#8
+	orr	r9,r9,r5,lsl#16
+	str	r8,[r11],#8
+	orr	r9,r9,r6,lsl#24
+	str	r9,[r11,#-4]
+#else
+	ldr	r8,[r12,#16]
+	ldr	r9,[r12,#20]
+#ifdef __ARMEL__
+	rev	r8,r8
+	rev	r9,r9
+#endif
+	str	r8,[r11],#8
+	str	r9,[r11,#-4]
+#endif
+
+	teq	lr,#192
+	bne	.Lnot192
+	mov	r12,#12
+	str	r12,[r11,#240-24]
+	add	r6,r10,#256			@ rcon
+	mov	lr,#255
+	mov	r12,#8
+
+.L192_loop:
+	and	r5,lr,r9,lsr#24
+	and	r7,lr,r9,lsr#16
+	ldrb	r5,[r10,r5]
+	and	r8,lr,r9,lsr#8
+	ldrb	r7,[r10,r7]
+	and	r9,lr,r9
+	ldrb	r8,[r10,r8]
+	orr	r5,r5,r7,lsl#24
+	ldrb	r9,[r10,r9]
+	orr	r5,r5,r8,lsl#16
+	ldr	r4,[r6],#4			@ rcon[i++]
+	orr	r5,r5,r9,lsl#8
+	eor	r9,r5,r4
+	eor	r0,r0,r9			@ rk[6]=rk[0]^...
+	eor	r1,r1,r0			@ rk[7]=rk[1]^rk[6]
+	str	r0,[r11],#24
+	eor	r2,r2,r1			@ rk[8]=rk[2]^rk[7]
+	str	r1,[r11,#-20]
+	eor	r3,r3,r2			@ rk[9]=rk[3]^rk[8]
+	str	r2,[r11,#-16]
+	subs	r12,r12,#1
+	str	r3,[r11,#-12]
+	subeq	r2,r11,#216
+	beq	.Ldone
+
+	ldr	r7,[r11,#-32]
+	ldr	r8,[r11,#-28]
+	eor	r7,r7,r3			@ rk[10]=rk[4]^rk[9]
+	eor	r9,r8,r7			@ rk[11]=rk[5]^rk[10]
+	str	r7,[r11,#-8]
+	str	r9,[r11,#-4]
+	b	.L192_loop
+
+.Lnot192:
+#if __ARM_ARCH__<7
+	ldrb	r8,[r12,#27]
+	ldrb	r4,[r12,#26]
+	ldrb	r5,[r12,#25]
+	ldrb	r6,[r12,#24]
+	orr	r8,r8,r4,lsl#8
+	ldrb	r9,[r12,#31]
+	orr	r8,r8,r5,lsl#16
+	ldrb	r4,[r12,#30]
+	orr	r8,r8,r6,lsl#24
+	ldrb	r5,[r12,#29]
+	ldrb	r6,[r12,#28]
+	orr	r9,r9,r4,lsl#8
+	orr	r9,r9,r5,lsl#16
+	str	r8,[r11],#8
+	orr	r9,r9,r6,lsl#24
+	str	r9,[r11,#-4]
+#else
+	ldr	r8,[r12,#24]
+	ldr	r9,[r12,#28]
+#ifdef __ARMEL__
+	rev	r8,r8
+	rev	r9,r9
+#endif
+	str	r8,[r11],#8
+	str	r9,[r11,#-4]
+#endif
+
+	mov	r12,#14
+	str	r12,[r11,#240-32]
+	add	r6,r10,#256			@ rcon
+	mov	lr,#255
+	mov	r12,#7
+
+.L256_loop:
+	and	r5,lr,r9,lsr#24
+	and	r7,lr,r9,lsr#16
+	ldrb	r5,[r10,r5]
+	and	r8,lr,r9,lsr#8
+	ldrb	r7,[r10,r7]
+	and	r9,lr,r9
+	ldrb	r8,[r10,r8]
+	orr	r5,r5,r7,lsl#24
+	ldrb	r9,[r10,r9]
+	orr	r5,r5,r8,lsl#16
+	ldr	r4,[r6],#4			@ rcon[i++]
+	orr	r5,r5,r9,lsl#8
+	eor	r9,r5,r4
+	eor	r0,r0,r9			@ rk[8]=rk[0]^...
+	eor	r1,r1,r0			@ rk[9]=rk[1]^rk[8]
+	str	r0,[r11],#32
+	eor	r2,r2,r1			@ rk[10]=rk[2]^rk[9]
+	str	r1,[r11,#-28]
+	eor	r3,r3,r2			@ rk[11]=rk[3]^rk[10]
+	str	r2,[r11,#-24]
+	subs	r12,r12,#1
+	str	r3,[r11,#-20]
+	subeq	r2,r11,#256
+	beq	.Ldone
+
+	and	r5,lr,r3
+	and	r7,lr,r3,lsr#8
+	ldrb	r5,[r10,r5]
+	and	r8,lr,r3,lsr#16
+	ldrb	r7,[r10,r7]
+	and	r9,lr,r3,lsr#24
+	ldrb	r8,[r10,r8]
+	orr	r5,r5,r7,lsl#8
+	ldrb	r9,[r10,r9]
+	orr	r5,r5,r8,lsl#16
+	ldr	r4,[r11,#-48]
+	orr	r5,r5,r9,lsl#24
+
+	ldr	r7,[r11,#-44]
+	ldr	r8,[r11,#-40]
+	eor	r4,r4,r5			@ rk[12]=rk[4]^...
+	ldr	r9,[r11,#-36]
+	eor	r7,r7,r4			@ rk[13]=rk[5]^rk[12]
+	str	r4,[r11,#-16]
+	eor	r8,r8,r7			@ rk[14]=rk[6]^rk[13]
+	str	r7,[r11,#-12]
+	eor	r9,r9,r8			@ rk[15]=rk[7]^rk[14]
+	str	r8,[r11,#-8]
+	str	r9,[r11,#-4]
+	b	.L256_loop
+
+.Ldone:	mov	r0,#0
+	ldmia   sp!,{r4-r12,lr}
+.Labrt:	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
+.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+
+.global private_AES_set_decrypt_key
+.type   private_AES_set_decrypt_key,%function
+.align	5
+private_AES_set_decrypt_key:
+	str	lr,[sp,#-4]!            @ push lr
+	bl	_armv4_AES_set_encrypt_key
+	teq	r0,#0
+	ldrne	lr,[sp],#4              @ pop lr
+	bne	.Labrt
+
+	stmdb   sp!,{r4-r12}
+
+	ldr	r12,[r2,#240]	@ AES_set_encrypt_key preserves r2,
+	mov	r11,r2			@ which is AES_KEY *key
+	mov	r7,r2
+	add	r8,r2,r12,lsl#4
+
+.Linv:	ldr	r0,[r7]
+	ldr	r1,[r7,#4]
+	ldr	r2,[r7,#8]
+	ldr	r3,[r7,#12]
+	ldr	r4,[r8]
+	ldr	r5,[r8,#4]
+	ldr	r6,[r8,#8]
+	ldr	r9,[r8,#12]
+	str	r0,[r8],#-16
+	str	r1,[r8,#16+4]
+	str	r2,[r8,#16+8]
+	str	r3,[r8,#16+12]
+	str	r4,[r7],#16
+	str	r5,[r7,#-12]
+	str	r6,[r7,#-8]
+	str	r9,[r7,#-4]
+	teq	r7,r8
+	bne	.Linv
+	ldr	r0,[r11,#16]!		@ prefetch tp1
+	mov	r7,#0x80
+	mov	r8,#0x1b
+	orr	r7,r7,#0x8000
+	orr	r8,r8,#0x1b00
+	orr	r7,r7,r7,lsl#16
+	orr	r8,r8,r8,lsl#16
+	sub	r12,r12,#1
+	mvn	r9,r7
+	mov	r12,r12,lsl#2	@ (rounds-1)*4
+
+.Lmix:	and	r4,r0,r7
+	and	r1,r0,r9
+	sub	r4,r4,r4,lsr#7
+	and	r4,r4,r8
+	eor	r1,r4,r1,lsl#1	@ tp2
+
+	and	r4,r1,r7
+	and	r2,r1,r9
+	sub	r4,r4,r4,lsr#7
+	and	r4,r4,r8
+	eor	r2,r4,r2,lsl#1	@ tp4
+
+	and	r4,r2,r7
+	and	r3,r2,r9
+	sub	r4,r4,r4,lsr#7
+	and	r4,r4,r8
+	eor	r3,r4,r3,lsl#1	@ tp8
+
+	eor	r4,r1,r2
+	eor	r5,r0,r3		@ tp9
+	eor	r4,r4,r3		@ tpe
+	eor	r4,r4,r1,ror#24
+	eor	r4,r4,r5,ror#24	@ ^= ROTATE(tpb=tp9^tp2,8)
+	eor	r4,r4,r2,ror#16
+	eor	r4,r4,r5,ror#16	@ ^= ROTATE(tpd=tp9^tp4,16)
+	eor	r4,r4,r5,ror#8	@ ^= ROTATE(tp9,24)
+
+	ldr	r0,[r11,#4]		@ prefetch tp1
+	str	r4,[r11],#4
+	subs	r12,r12,#1
+	bne	.Lmix
+
+	mov	r0,#0
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
+#endif
+.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+
+.type	AES_Td,%object
+.align	5
+AES_Td:
+.word	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_decrypt
+.type   AES_decrypt,%function
+.align	5
+AES_decrypt:
+	sub	r3,pc,#8		@ AES_decrypt
+	stmdb   sp!,{r1,r4-r12,lr}
+	mov	r12,r0		@ inp
+	mov	r11,r2
+	sub	r10,r3,#AES_decrypt-AES_Td		@ Td
+#if __ARM_ARCH__<7
+	ldrb	r0,[r12,#3]	@ load input data in endian-neutral
+	ldrb	r4,[r12,#2]	@ manner...
+	ldrb	r5,[r12,#1]
+	ldrb	r6,[r12,#0]
+	orr	r0,r0,r4,lsl#8
+	ldrb	r1,[r12,#7]
+	orr	r0,r0,r5,lsl#16
+	ldrb	r4,[r12,#6]
+	orr	r0,r0,r6,lsl#24
+	ldrb	r5,[r12,#5]
+	ldrb	r6,[r12,#4]
+	orr	r1,r1,r4,lsl#8
+	ldrb	r2,[r12,#11]
+	orr	r1,r1,r5,lsl#16
+	ldrb	r4,[r12,#10]
+	orr	r1,r1,r6,lsl#24
+	ldrb	r5,[r12,#9]
+	ldrb	r6,[r12,#8]
+	orr	r2,r2,r4,lsl#8
+	ldrb	r3,[r12,#15]
+	orr	r2,r2,r5,lsl#16
+	ldrb	r4,[r12,#14]
+	orr	r2,r2,r6,lsl#24
+	ldrb	r5,[r12,#13]
+	ldrb	r6,[r12,#12]
+	orr	r3,r3,r4,lsl#8
+	orr	r3,r3,r5,lsl#16
+	orr	r3,r3,r6,lsl#24
+#else
+	ldr	r0,[r12,#0]
+	ldr	r1,[r12,#4]
+	ldr	r2,[r12,#8]
+	ldr	r3,[r12,#12]
+#ifdef __ARMEL__
+	rev	r0,r0
+	rev	r1,r1
+	rev	r2,r2
+	rev	r3,r3
+#endif
+#endif
+	bl	_armv4_AES_decrypt
+
+	ldr	r12,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	r0,r0
+	rev	r1,r1
+	rev	r2,r2
+	rev	r3,r3
+#endif
+	str	r0,[r12,#0]
+	str	r1,[r12,#4]
+	str	r2,[r12,#8]
+	str	r3,[r12,#12]
+#else
+	mov	r4,r0,lsr#24		@ write output in endian-neutral
+	mov	r5,r0,lsr#16		@ manner...
+	mov	r6,r0,lsr#8
+	strb	r4,[r12,#0]
+	strb	r5,[r12,#1]
+	mov	r4,r1,lsr#24
+	strb	r6,[r12,#2]
+	mov	r5,r1,lsr#16
+	strb	r0,[r12,#3]
+	mov	r6,r1,lsr#8
+	strb	r4,[r12,#4]
+	strb	r5,[r12,#5]
+	mov	r4,r2,lsr#24
+	strb	r6,[r12,#6]
+	mov	r5,r2,lsr#16
+	strb	r1,[r12,#7]
+	mov	r6,r2,lsr#8
+	strb	r4,[r12,#8]
+	strb	r5,[r12,#9]
+	mov	r4,r3,lsr#24
+	strb	r6,[r12,#10]
+	mov	r5,r3,lsr#16
+	strb	r2,[r12,#11]
+	mov	r6,r3,lsr#8
+	strb	r4,[r12,#12]
+	strb	r5,[r12,#13]
+	strb	r6,[r12,#14]
+	strb	r3,[r12,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_decrypt,.-AES_decrypt
+
+.type   _armv4_AES_decrypt,%function
+.align	2
+_armv4_AES_decrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	r11!,{r4-r7}
+	eor	r0,r0,r4
+	ldr	r12,[r11,#240-16]
+	eor	r1,r1,r5
+	eor	r2,r2,r6
+	eor	r3,r3,r7
+	sub	r12,r12,#1
+	mov	lr,#255
+
+	and	r7,lr,r0,lsr#16
+	and	r8,lr,r0,lsr#8
+	and	r9,lr,r0
+	mov	r0,r0,lsr#24
+.Ldec_loop:
+	ldr	r4,[r10,r7,lsl#2]	@ Td1[s0>>16]
+	and	r7,lr,r1		@ i0
+	ldr	r5,[r10,r8,lsl#2]	@ Td2[s0>>8]
+	and	r8,lr,r1,lsr#16
+	ldr	r6,[r10,r9,lsl#2]	@ Td3[s0>>0]
+	and	r9,lr,r1,lsr#8
+	ldr	r0,[r10,r0,lsl#2]	@ Td0[s0>>24]
+	mov	r1,r1,lsr#24
+
+	ldr	r7,[r10,r7,lsl#2]	@ Td3[s1>>0]
+	ldr	r8,[r10,r8,lsl#2]	@ Td1[s1>>16]
+	ldr	r9,[r10,r9,lsl#2]	@ Td2[s1>>8]
+	eor	r0,r0,r7,ror#24
+	ldr	r1,[r10,r1,lsl#2]	@ Td0[s1>>24]
+	and	r7,lr,r2,lsr#8	@ i0
+	eor	r5,r8,r5,ror#8
+	and	r8,lr,r2		@ i1
+	eor	r6,r9,r6,ror#8
+	and	r9,lr,r2,lsr#16
+	ldr	r7,[r10,r7,lsl#2]	@ Td2[s2>>8]
+	eor	r1,r1,r4,ror#8
+	ldr	r8,[r10,r8,lsl#2]	@ Td3[s2>>0]
+	mov	r2,r2,lsr#24
+
+	ldr	r9,[r10,r9,lsl#2]	@ Td1[s2>>16]
+	eor	r0,r0,r7,ror#16
+	ldr	r2,[r10,r2,lsl#2]	@ Td0[s2>>24]
+	and	r7,lr,r3,lsr#16	@ i0
+	eor	r1,r1,r8,ror#24
+	and	r8,lr,r3,lsr#8	@ i1
+	eor	r6,r9,r6,ror#8
+	and	r9,lr,r3		@ i2
+	ldr	r7,[r10,r7,lsl#2]	@ Td1[s3>>16]
+	eor	r2,r2,r5,ror#8
+	ldr	r8,[r10,r8,lsl#2]	@ Td2[s3>>8]
+	mov	r3,r3,lsr#24
+
+	ldr	r9,[r10,r9,lsl#2]	@ Td3[s3>>0]
+	eor	r0,r0,r7,ror#8
+	ldr	r7,[r11],#16
+	eor	r1,r1,r8,ror#16
+	ldr	r3,[r10,r3,lsl#2]	@ Td0[s3>>24]
+	eor	r2,r2,r9,ror#24
+
+	ldr	r4,[r11,#-12]
+	eor	r0,r0,r7
+	ldr	r5,[r11,#-8]
+	eor	r3,r3,r6,ror#8
+	ldr	r6,[r11,#-4]
+	and	r7,lr,r0,lsr#16
+	eor	r1,r1,r4
+	and	r8,lr,r0,lsr#8
+	eor	r2,r2,r5
+	and	r9,lr,r0
+	eor	r3,r3,r6
+	mov	r0,r0,lsr#24
+
+	subs	r12,r12,#1
+	bne	.Ldec_loop
+
+	add	r10,r10,#1024
+
+	ldr	r5,[r10,#0]		@ prefetch Td4
+	ldr	r6,[r10,#32]
+	ldr	r4,[r10,#64]
+	ldr	r5,[r10,#96]
+	ldr	r6,[r10,#128]
+	ldr	r4,[r10,#160]
+	ldr	r5,[r10,#192]
+	ldr	r6,[r10,#224]
+
+	ldrb	r0,[r10,r0]		@ Td4[s0>>24]
+	ldrb	r4,[r10,r7]		@ Td4[s0>>16]
+	and	r7,lr,r1		@ i0
+	ldrb	r5,[r10,r8]		@ Td4[s0>>8]
+	and	r8,lr,r1,lsr#16
+	ldrb	r6,[r10,r9]		@ Td4[s0>>0]
+	and	r9,lr,r1,lsr#8
+
+	ldrb	r7,[r10,r7]		@ Td4[s1>>0]
+	ldrb	r1,[r10,r1,lsr#24]	@ Td4[s1>>24]
+	ldrb	r8,[r10,r8]		@ Td4[s1>>16]
+	eor	r0,r7,r0,lsl#24
+	ldrb	r9,[r10,r9]		@ Td4[s1>>8]
+	eor	r1,r4,r1,lsl#8
+	and	r7,lr,r2,lsr#8	@ i0
+	eor	r5,r5,r8,lsl#8
+	and	r8,lr,r2		@ i1
+	ldrb	r7,[r10,r7]		@ Td4[s2>>8]
+	eor	r6,r6,r9,lsl#8
+	ldrb	r8,[r10,r8]		@ Td4[s2>>0]
+	and	r9,lr,r2,lsr#16
+
+	ldrb	r2,[r10,r2,lsr#24]	@ Td4[s2>>24]
+	eor	r0,r0,r7,lsl#8
+	ldrb	r9,[r10,r9]		@ Td4[s2>>16]
+	eor	r1,r8,r1,lsl#16
+	and	r7,lr,r3,lsr#16	@ i0
+	eor	r2,r5,r2,lsl#16
+	and	r8,lr,r3,lsr#8	@ i1
+	ldrb	r7,[r10,r7]		@ Td4[s3>>16]
+	eor	r6,r6,r9,lsl#16
+	ldrb	r8,[r10,r8]		@ Td4[s3>>8]
+	and	r9,lr,r3		@ i2
+
+	ldrb	r9,[r10,r9]		@ Td4[s3>>0]
+	ldrb	r3,[r10,r3,lsr#24]	@ Td4[s3>>24]
+	eor	r0,r0,r7,lsl#16
+	ldr	r7,[r11,#0]
+	eor	r1,r1,r8,lsl#8
+	ldr	r4,[r11,#4]
+	eor	r2,r9,r2,lsl#8
+	ldr	r5,[r11,#8]
+	eor	r3,r6,r3,lsl#24
+	ldr	r6,[r11,#12]
+
+	eor	r0,r0,r7
+	eor	r1,r1,r4
+	eor	r2,r2,r5
+	eor	r3,r3,r6
+
+	sub	r10,r10,#1024
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz	"AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
diff --git a/jni/openssl/crypto/aes/asm/aes-ia64.S b/jni/openssl/crypto/aes/asm/aes-ia64.S
new file mode 100644
index 0000000..7f6c4c3
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-ia64.S
@@ -0,0 +1,1123 @@
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project. Rights for redistribution and usage in source and binary
+// forms are granted according to the OpenSSL license.
+// ====================================================================
+//
+// What's wrong with compiler generated code? Compiler never uses
+// variable 'shr' which is pairable with 'extr'/'dep' instructions.
+// Then it uses 'zxt' which is an I-type, but can be replaced with
+// 'and' which in turn can be assigned to M-port [there're double as
+// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
+// registers for small constants (255, 24 and 16) to be used with
+// 'shr' and 'and' instructions I can achieve better ILP, Intruction
+// Level Parallelism, and performance. This code outperforms GCC 3.3
+// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
+// HP C - by 40%. Measured best-case scenario, i.e. aligned
+// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
+// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
+
+// Version 1.2 mitigates the hazard of cache-timing attacks by
+// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
+// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
+// prior last round. As result performance dropped to (26 + 15*rounds)
+// ticks per block or 11 cycles per byte processed with 128-bit key.
+// This is ~16% deterioration. For reference Itanium 2 L1 cache has
+// 64 bytes line size and L2 - 128 bytes...
+
+.ident	"aes-ia64.S, version 1.2"
+.ident	"IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+.explicit
+.text
+
+rk0=r8;     rk1=r9;
+
+pfssave=r2;
+lcsave=r10;
+prsave=r3;
+maskff=r11;
+twenty4=r14;
+sixteen=r15;
+
+te00=r16;   te11=r17;   te22=r18;   te33=r19;
+te01=r20;   te12=r21;   te23=r22;   te30=r23;
+te02=r24;   te13=r25;   te20=r26;   te31=r27;
+te03=r28;   te10=r29;   te21=r30;   te32=r31;
+
+// these are rotating...
+t0=r32;     s0=r33;
+t1=r34;     s1=r35;
+t2=r36;     s2=r37;
+t3=r38;     s3=r39;
+
+te0=r40;    te1=r41;    te2=r42;    te3=r43;
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+# define ADDP	addp4
+#else
+# define ADDP	add
+#endif
+
+// Offsets from Te0
+#define TE0	0
+#define TE2	2
+#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
+#define TE1	3
+#define TE3	1
+#else
+#define TE1	1
+#define TE3	3
+#endif
+
+// This implies that AES_KEY comprises 32-bit key schedule elements
+// even on LP64 platforms.
+#ifndef	KSZ
+# define KSZ	4
+# define LDKEY	ld4
+#endif
+
+.proc	_ia64_AES_encrypt#
+// Input:	rk0-rk1
+//		te0
+//		te3	as AES_KEY->rounds!!!
+//		s0-s3
+//		maskff,twenty4,sixteen
+// Output:	r16,r20,r24,r28 as s0-s3
+// Clobber:	r16-r31,rk0-rk1,r32-r43
+.align	32
+_ia64_AES_encrypt:
+	.prologue
+	.altrp	b6
+	.body
+{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
+	LDKEY	t0=[rk0],2*KSZ
+	mov	pr.rot=1<<16	}
+{ .mmi;	LDKEY	t1=[rk1],2*KSZ
+	add	te1=TE1,te0
+	add	te3=-3,te3	};;
+{ .mib;	LDKEY	t2=[rk0],2*KSZ
+	mov	ar.ec=2		}
+{ .mib;	LDKEY	t3=[rk1],2*KSZ
+	add	te2=TE2,te0
+	brp.loop.imp	.Le_top,.Le_end-16	};;
+
+{ .mmi;	xor	s0=s0,t0
+	xor	s1=s1,t1
+	mov	ar.lc=te3	}
+{ .mmi;	xor	s2=s2,t2
+	xor	s3=s3,t3
+	add	te3=TE3,te0	};;
+
+.align	32
+.Le_top:
+{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	(p0)	and	te33=s3,maskff		// 0/0:s3&0xff
+	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	(p0)	and	te30=s0,maskff		// 0/1:s0&0xff
+	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	(p0)	shladd	te33=te33,3,te3		// 1/0:te0+s0>>24
+	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	(p0)	shladd	te30=te30,3,te3		// 1/1:te3+s0
+	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	(p0)	ld4	te33=[te33]		// 2/0:te3[s3&0xff]
+	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
+	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	(p0)	ld4	te30=[te30]		// 2/1:te3[s0]
+	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
+	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
+	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
+	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
+	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
+	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
+	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
+	(p0)	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
+	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
+	(p0)	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
+	(p0)	shladd	te11=te11,3,te1		// 5/0:te1+s1>>16
+	(p0)	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
+	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
+	(p0)	and	te31=s1,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	(p0)	ld4	te11=[te11]		// 6/0:te1[s1>>16]
+	(p0)	shladd	te12=te12,3,te1		// 6/1:te1+s2>>16
+	(p0)	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
+	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
+	(p0)	and	te32=s2,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	(p0)	ld4	te12=[te12]		// 7/1:te1[s2>>16]
+	(p0)	shladd	te31=te31,3,te3		// 7/2:te3+s1&0xff
+	(p0)	and	te13=te13,maskff}	// 7/2:s3>>16&0xff
+{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
+	(p0)	shladd	te32=te32,3,te3		// 7/3:te3+s2
+	(p0)	xor	t0=t0,te33	};;	// 7/0:
+{ .mmi;	(p0)	ld4	te31=[te31]		// 8/2:te3[s1]
+	(p0)	shladd	te13=te13,3,te1		// 8/2:te1+s3>>16
+	(p0)	xor	t0=t0,te22	}	// 8/0:
+{ .mmi;	(p0)	ld4	te32=[te32]		// 8/3:te3[s2]
+	(p0)	shladd	te10=te10,3,te1		// 8/3:te1+s0>>16
+	(p0)	xor	t1=t1,te30	};;	// 8/1:
+{ .mmi;	(p0)	ld4	te13=[te13]		// 9/2:te1[s3>>16]
+	(p0)	ld4	te10=[te10]		// 9/3:te1[s0>>16]
+	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
+{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
+	(p0)	xor	t2=t2,te20		// 10[9]/2:
+	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
+{ .mmi;	(p0)	xor	t0=t0,te11		// 11[10]/0:done!
+	(p0)	xor	t1=t1,te01		// 11[10]/1:
+	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
+{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
+	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
+{ .mmi;	(p0)	xor	t1=t1,te12		// 13[11]/1:done!
+	(p0)	xor	t2=t2,te31		// 13[11]/2:
+	(p0)	xor	t3=t3,te32	}	// 13[11]/3:
+{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
+	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
+{ .mib;	(p0)	xor	t2=t2,te13		// 14[12]/2:done!
+	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
+{ .mib;	(p0)	xor	t3=t3,te10		// 14[12]/3:done!
+	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
+	br.ctop.sptk	.Le_top		};;
+.Le_end:
+
+
+{ .mmi;	ld8	te12=[te0]		// prefetch Te4
+	ld8	te31=[te1]	}
+{ .mmi;	ld8	te10=[te2]
+	ld8	te32=[te3]	}
+
+{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	and	te33=s3,maskff		// 0/0:s3&0xff
+	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	and	te30=s0,maskff		// 0/1:s0&0xff
+	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	add	te33=te33,te0		// 1/0:te0+s0>>24
+	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	add	te30=te30,te0		// 1/1:te0+s0
+	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	ld1	te33=[te33]		// 2/0:te0[s3&0xff]
+	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
+	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	ld1	te30=[te30]		// 2/1:te0[s0]
+	add	te23=te23,te0		// 2/1:te0+s3>>8
+	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
+	add	te20=te20,te0		// 3/2:te0+s0>>8
+	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
+	add	te00=te00,te0		// 3/0:te0+s0>>24
+	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
+	add	te21=te21,te0		// 4/3:te0+s2
+	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
+	add	te01=te01,te0		// 4/1:te0+s1>>24
+	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
+	add	te11=te11,te0		// 5/0:te0+s1>>16
+	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
+	add	te02=te02,te0		// 5/2:te0+s2>>24
+	and	te31=s1,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	ld1	te11=[te11]		// 6/0:te0[s1>>16]
+	add	te12=te12,te0		// 6/1:te0+s2>>16
+	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
+	add	te03=te03,te0		// 6/3:te0+s0>>16
+	and	te32=s2,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	ld1	te12=[te12]		// 7/1:te0[s2>>16]
+	add	te31=te31,te0		// 7/2:te0+s1&0xff
+	dep	te33=te22,te33,8,8}	// 7/0:
+{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
+	add	te32=te32,te0		// 7/3:te0+s2
+	and	te13=te13,maskff};;	// 7/2:s3>>16&0xff
+{ .mmi;	ld1	te31=[te31]		// 8/2:te0[s1]
+	add	te13=te13,te0		// 8/2:te0+s3>>16
+	dep	te30=te23,te30,8,8}	// 8/1:
+{ .mmi;	ld1	te32=[te32]		// 8/3:te0[s2]
+	add	te10=te10,te0		// 8/3:te0+s0>>16
+	shl	te00=te00,twenty4};;	// 8/0:
+{ .mii;	ld1	te13=[te13]		// 9/2:te0[s3>>16]
+	dep	te33=te11,te33,16,8	// 9/0:
+	shl	te01=te01,twenty4};;	// 9/1:
+{ .mii;	ld1	te10=[te10]		// 10/3:te0[s0>>16]
+	dep	te31=te20,te31,8,8	// 10/2:
+	shl	te02=te02,twenty4};;	// 10/2:
+{ .mii;	xor	t0=t0,te33		// 11/0:
+	dep	te32=te21,te32,8,8	// 11/3:
+	shl	te12=te12,sixteen};;	// 11/1:
+{ .mii;	xor	r16=t0,te00		// 12/0:done!
+	dep	te31=te13,te31,16,8	// 12/2:
+	shl	te03=te03,twenty4};;	// 12/3:
+{ .mmi;	xor	t1=t1,te01		// 13/1:
+	xor	t2=t2,te02		// 13/2:
+	dep	te32=te10,te32,16,8};;	// 13/3:
+{ .mmi;	xor	t1=t1,te30		// 14/1:
+	xor	r24=t2,te31		// 14/2:done!
+	xor	t3=t3,te32	};;	// 14/3:
+{ .mib;	xor	r20=t1,te12		// 15/1:done!
+	xor	r28=t3,te03		// 15/3:done!
+	br.ret.sptk	b6	};;
+.endp	_ia64_AES_encrypt#
+
+// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
+.global	AES_encrypt#
+.proc	AES_encrypt#
+.align	32
+AES_encrypt:
+	.prologue
+	.save	ar.pfs,pfssave
+{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
+	and	out0=3,in0
+	mov	r3=ip			}
+{ .mmi;	ADDP	in0=0,in0
+	mov	loc0=psr.um
+	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
+
+{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
+	add	out8=(AES_Te#-AES_encrypt#),r3	// Te0
+	.save	pr,prsave
+	mov	prsave=pr		}
+{ .mmi;	rum	1<<3				// clear um.ac
+	.save	ar.lc,lcsave
+	mov	lcsave=ar.lc		};;
+
+	.body
+#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
+{ .mib; cmp.ne	p6,p0=out0,r0
+	add	out0=4,in0
+(p6)	br.dpnt.many	.Le_i_unaligned	};;
+
+{ .mmi;	ld4	out1=[in0],8		// s0
+	and	out9=3,in1
+	mov	twenty4=24		}
+{ .mmi;	ld4	out3=[out0],8		// s1
+	ADDP	rk0=0,in2
+	mov	sixteen=16		};;
+{ .mmi;	ld4	out5=[in0]		// s2
+	cmp.ne	p6,p0=out9,r0
+	mov	maskff=0xff		}
+{ .mmb;	ld4	out7=[out0]		// s3
+	ADDP	rk1=KSZ,in2
+	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
+
+{ .mib;	ADDP	in0=4,in1
+	ADDP	in1=0,in1
+(p6)	br.spnt	.Le_o_unaligned		};;
+
+{ .mii;	mov	psr.um=loc0
+	mov	ar.pfs=pfssave
+	mov	ar.lc=lcsave		};;
+{ .mmi;	st4	[in1]=r16,8		// s0
+	st4	[in0]=r20,8		// s1
+	mov	pr=prsave,0x1ffff	};;
+{ .mmb;	st4	[in1]=r24		// s2
+	st4	[in0]=r28		// s3
+	br.ret.sptk.many	b0	};;
+#endif
+
+.align	32
+.Le_i_unaligned:
+{ .mmi;	add	out0=1,in0
+	add	out2=2,in0
+	add	out4=3,in0	};;
+{ .mmi;	ld1	r16=[in0],4
+	ld1	r17=[out0],4	}//;;
+{ .mmi;	ld1	r18=[out2],4
+	ld1	out1=[out4],4	};;	// s0
+{ .mmi;	ld1	r20=[in0],4
+	ld1	r21=[out0],4	}//;;
+{ .mmi;	ld1	r22=[out2],4
+	ld1	out3=[out4],4	};;	// s1
+{ .mmi;	ld1	r24=[in0],4
+	ld1	r25=[out0],4	}//;;
+{ .mmi;	ld1	r26=[out2],4
+	ld1	out5=[out4],4	};;	// s2
+{ .mmi;	ld1	r28=[in0]
+	ld1	r29=[out0]	}//;;
+{ .mmi;	ld1	r30=[out2]
+	ld1	out7=[out4]	};;	// s3
+
+{ .mii;
+	dep	out1=r16,out1,24,8	//;;
+	dep	out3=r20,out3,24,8	}//;;
+{ .mii;	ADDP	rk0=0,in2
+	dep	out5=r24,out5,24,8	//;;
+	dep	out7=r28,out7,24,8	};;
+{ .mii;	ADDP	rk1=KSZ,in2
+	dep	out1=r17,out1,16,8	//;;
+	dep	out3=r21,out3,16,8	}//;;
+{ .mii;	mov	twenty4=24
+	dep	out5=r25,out5,16,8	//;;
+	dep	out7=r29,out7,16,8	};;
+{ .mii;	mov	sixteen=16
+	dep	out1=r18,out1,8,8	//;;
+	dep	out3=r22,out3,8,8	}//;;
+{ .mii;	mov	maskff=0xff
+	dep	out5=r26,out5,8,8	//;;
+	dep	out7=r30,out7,8,8	};;
+
+{ .mib;	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
+
+.Le_o_unaligned:
+{ .mii;	ADDP	out0=0,in1
+	extr.u	r17=r16,8,8			// s0
+	shr.u	r19=r16,twenty4		}//;;
+{ .mii;	ADDP	out1=1,in1
+	extr.u	r18=r16,16,8
+	shr.u	r23=r20,twenty4		}//;;	// s1
+{ .mii;	ADDP	out2=2,in1
+	extr.u	r21=r20,8,8
+	shr.u	r22=r20,sixteen		}//;;
+{ .mii;	ADDP	out3=3,in1
+	extr.u	r25=r24,8,8			// s2
+	shr.u	r27=r24,twenty4		};;
+{ .mii;	st1	[out3]=r16,4
+	extr.u	r26=r24,16,8
+	shr.u	r31=r28,twenty4		}//;;	// s3
+{ .mii;	st1	[out2]=r17,4
+	extr.u	r29=r28,8,8
+	shr.u	r30=r28,sixteen		}//;;
+
+{ .mmi;	st1	[out1]=r18,4
+	st1	[out0]=r19,4		};;
+{ .mmi;	st1	[out3]=r20,4
+	st1	[out2]=r21,4		}//;;
+{ .mmi;	st1	[out1]=r22,4
+	st1	[out0]=r23,4		};;
+{ .mmi;	st1	[out3]=r24,4
+	st1	[out2]=r25,4
+	mov	pr=prsave,0x1ffff	}//;;
+{ .mmi;	st1	[out1]=r26,4
+	st1	[out0]=r27,4
+	mov	ar.pfs=pfssave		};;
+{ .mmi;	st1	[out3]=r28
+	st1	[out2]=r29
+	mov	ar.lc=lcsave		}//;;
+{ .mmi;	st1	[out1]=r30
+	st1	[out0]=r31		}
+{ .mfb;	mov	psr.um=loc0			// restore user mask
+	br.ret.sptk.many	b0	};;
+.endp	AES_encrypt#
+
+// *AES_decrypt are autogenerated by the following script:
+#if 0
+#!/usr/bin/env perl
+print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
+open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
+print "#endif\n";
+while(<>) {
+	$process=1	if (/\.proc\s+_ia64_AES_encrypt/);
+	next		if (!$process);
+
+	#s/te00=s0/td00=s0/;	s/te00/td00/g;
+	s/te11=s1/td13=s3/;	s/te11/td13/g;
+	#s/te22=s2/td22=s2/;	s/te22/td22/g;
+	s/te33=s3/td31=s1/;	s/te33/td31/g;
+
+	#s/te01=s1/td01=s1/;	s/te01/td01/g;
+	s/te12=s2/td10=s0/;	s/te12/td10/g;
+	#s/te23=s3/td23=s3/;	s/te23/td23/g;
+	s/te30=s0/td32=s2/;	s/te30/td32/g;
+
+	#s/te02=s2/td02=s2/;	s/te02/td02/g;
+	s/te13=s3/td11=s1/;	s/te13/td11/g;
+	#s/te20=s0/td20=s0/;	s/te20/td20/g;
+	s/te31=s1/td33=s3/;	s/te31/td33/g;
+
+	#s/te03=s3/td03=s3/;	s/te03/td03/g;
+	s/te10=s0/td12=s2/;	s/te10/td12/g;
+	#s/te21=s1/td21=s1/;	s/te21/td21/g;
+	s/te32=s2/td30=s0/;	s/te32/td30/g;
+
+	s/td/te/g;
+
+	s/AES_encrypt/AES_decrypt/g;
+	s/\.Le_/.Ld_/g;
+	s/AES_Te#/AES_Td#/g;
+
+	print;
+
+	exit		if (/\.endp\s+AES_decrypt/);
+}
+#endif
+.proc	_ia64_AES_decrypt#
+// Input:	rk0-rk1
+//		te0
+//		te3	as AES_KEY->rounds!!!
+//		s0-s3
+//		maskff,twenty4,sixteen
+// Output:	r16,r20,r24,r28 as s0-s3
+// Clobber:	r16-r31,rk0-rk1,r32-r43
+.align	32
+_ia64_AES_decrypt:
+	.prologue
+	.altrp	b6
+	.body
+{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
+	LDKEY	t0=[rk0],2*KSZ
+	mov	pr.rot=1<<16	}
+{ .mmi;	LDKEY	t1=[rk1],2*KSZ
+	add	te1=TE1,te0
+	add	te3=-3,te3	};;
+{ .mib;	LDKEY	t2=[rk0],2*KSZ
+	mov	ar.ec=2		}
+{ .mib;	LDKEY	t3=[rk1],2*KSZ
+	add	te2=TE2,te0
+	brp.loop.imp	.Ld_top,.Ld_end-16	};;
+
+{ .mmi;	xor	s0=s0,t0
+	xor	s1=s1,t1
+	mov	ar.lc=te3	}
+{ .mmi;	xor	s2=s2,t2
+	xor	s3=s3,t3
+	add	te3=TE3,te0	};;
+
+.align	32
+.Ld_top:
+{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	(p0)	and	te31=s1,maskff		// 0/0:s3&0xff
+	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	(p0)	and	te32=s2,maskff		// 0/1:s0&0xff
+	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	(p0)	shladd	te31=te31,3,te3		// 1/0:te0+s0>>24
+	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	(p0)	shladd	te32=te32,3,te3		// 1/1:te3+s0
+	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	(p0)	ld4	te31=[te31]		// 2/0:te3[s3&0xff]
+	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
+	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	(p0)	ld4	te32=[te32]		// 2/1:te3[s0]
+	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
+	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
+	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
+	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
+	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
+	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
+	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
+	(p0)	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
+	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
+	(p0)	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
+	(p0)	shladd	te13=te13,3,te1		// 5/0:te1+s1>>16
+	(p0)	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
+	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
+	(p0)	and	te33=s3,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	(p0)	ld4	te13=[te13]		// 6/0:te1[s1>>16]
+	(p0)	shladd	te10=te10,3,te1		// 6/1:te1+s2>>16
+	(p0)	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
+	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
+	(p0)	and	te30=s0,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	(p0)	ld4	te10=[te10]		// 7/1:te1[s2>>16]
+	(p0)	shladd	te33=te33,3,te3		// 7/2:te3+s1&0xff
+	(p0)	and	te11=te11,maskff}	// 7/2:s3>>16&0xff
+{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
+	(p0)	shladd	te30=te30,3,te3		// 7/3:te3+s2
+	(p0)	xor	t0=t0,te31	};;	// 7/0:
+{ .mmi;	(p0)	ld4	te33=[te33]		// 8/2:te3[s1]
+	(p0)	shladd	te11=te11,3,te1		// 8/2:te1+s3>>16
+	(p0)	xor	t0=t0,te22	}	// 8/0:
+{ .mmi;	(p0)	ld4	te30=[te30]		// 8/3:te3[s2]
+	(p0)	shladd	te12=te12,3,te1		// 8/3:te1+s0>>16
+	(p0)	xor	t1=t1,te32	};;	// 8/1:
+{ .mmi;	(p0)	ld4	te11=[te11]		// 9/2:te1[s3>>16]
+	(p0)	ld4	te12=[te12]		// 9/3:te1[s0>>16]
+	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
+{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
+	(p0)	xor	t2=t2,te20		// 10[9]/2:
+	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
+{ .mmi;	(p0)	xor	t0=t0,te13		// 11[10]/0:done!
+	(p0)	xor	t1=t1,te01		// 11[10]/1:
+	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
+{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
+	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
+{ .mmi;	(p0)	xor	t1=t1,te10		// 13[11]/1:done!
+	(p0)	xor	t2=t2,te33		// 13[11]/2:
+	(p0)	xor	t3=t3,te30	}	// 13[11]/3:
+{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
+	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
+{ .mib;	(p0)	xor	t2=t2,te11		// 14[12]/2:done!
+	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
+{ .mib;	(p0)	xor	t3=t3,te12		// 14[12]/3:done!
+	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
+	br.ctop.sptk	.Ld_top		};;
+.Ld_end:
+
+
+{ .mmi;	ld8	te10=[te0]		// prefetch Td4
+	ld8	te33=[te1]	}
+{ .mmi;	ld8	te12=[te2]
+	ld8	te30=[te3]	}
+
+{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	and	te31=s1,maskff		// 0/0:s3&0xff
+	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	and	te32=s2,maskff		// 0/1:s0&0xff
+	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	add	te31=te31,te0		// 1/0:te0+s0>>24
+	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	add	te32=te32,te0		// 1/1:te0+s0
+	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	ld1	te31=[te31]		// 2/0:te0[s3&0xff]
+	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
+	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	ld1	te32=[te32]		// 2/1:te0[s0]
+	add	te23=te23,te0		// 2/1:te0+s3>>8
+	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
+	add	te20=te20,te0		// 3/2:te0+s0>>8
+	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
+	add	te00=te00,te0		// 3/0:te0+s0>>24
+	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
+	add	te21=te21,te0		// 4/3:te0+s2
+	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
+	add	te01=te01,te0		// 4/1:te0+s1>>24
+	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
+	add	te13=te13,te0		// 5/0:te0+s1>>16
+	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
+	add	te02=te02,te0		// 5/2:te0+s2>>24
+	and	te33=s3,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	ld1	te13=[te13]		// 6/0:te0[s1>>16]
+	add	te10=te10,te0		// 6/1:te0+s2>>16
+	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
+	add	te03=te03,te0		// 6/3:te0+s0>>16
+	and	te30=s0,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	ld1	te10=[te10]		// 7/1:te0[s2>>16]
+	add	te33=te33,te0		// 7/2:te0+s1&0xff
+	dep	te31=te22,te31,8,8}	// 7/0:
+{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
+	add	te30=te30,te0		// 7/3:te0+s2
+	and	te11=te11,maskff};;	// 7/2:s3>>16&0xff
+{ .mmi;	ld1	te33=[te33]		// 8/2:te0[s1]
+	add	te11=te11,te0		// 8/2:te0+s3>>16
+	dep	te32=te23,te32,8,8}	// 8/1:
+{ .mmi;	ld1	te30=[te30]		// 8/3:te0[s2]
+	add	te12=te12,te0		// 8/3:te0+s0>>16
+	shl	te00=te00,twenty4};;	// 8/0:
+{ .mii;	ld1	te11=[te11]		// 9/2:te0[s3>>16]
+	dep	te31=te13,te31,16,8	// 9/0:
+	shl	te01=te01,twenty4};;	// 9/1:
+{ .mii;	ld1	te12=[te12]		// 10/3:te0[s0>>16]
+	dep	te33=te20,te33,8,8	// 10/2:
+	shl	te02=te02,twenty4};;	// 10/2:
+{ .mii;	xor	t0=t0,te31		// 11/0:
+	dep	te30=te21,te30,8,8	// 11/3:
+	shl	te10=te10,sixteen};;	// 11/1:
+{ .mii;	xor	r16=t0,te00		// 12/0:done!
+	dep	te33=te11,te33,16,8	// 12/2:
+	shl	te03=te03,twenty4};;	// 12/3:
+{ .mmi;	xor	t1=t1,te01		// 13/1:
+	xor	t2=t2,te02		// 13/2:
+	dep	te30=te12,te30,16,8};;	// 13/3:
+{ .mmi;	xor	t1=t1,te32		// 14/1:
+	xor	r24=t2,te33		// 14/2:done!
+	xor	t3=t3,te30	};;	// 14/3:
+{ .mib;	xor	r20=t1,te10		// 15/1:done!
+	xor	r28=t3,te03		// 15/3:done!
+	br.ret.sptk	b6	};;
+.endp	_ia64_AES_decrypt#
+
+// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
+.global	AES_decrypt#
+.proc	AES_decrypt#
+.align	32
+AES_decrypt:
+	.prologue
+	.save	ar.pfs,pfssave
+{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
+	and	out0=3,in0
+	mov	r3=ip			}
+{ .mmi;	ADDP	in0=0,in0
+	mov	loc0=psr.um
+	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
+
+{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
+	add	out8=(AES_Td#-AES_decrypt#),r3	// Te0
+	.save	pr,prsave
+	mov	prsave=pr		}
+{ .mmi;	rum	1<<3				// clear um.ac
+	.save	ar.lc,lcsave
+	mov	lcsave=ar.lc		};;
+
+	.body
+#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
+{ .mib; cmp.ne	p6,p0=out0,r0
+	add	out0=4,in0
+(p6)	br.dpnt.many	.Ld_i_unaligned	};;
+
+{ .mmi;	ld4	out1=[in0],8		// s0
+	and	out9=3,in1
+	mov	twenty4=24		}
+{ .mmi;	ld4	out3=[out0],8		// s1
+	ADDP	rk0=0,in2
+	mov	sixteen=16		};;
+{ .mmi;	ld4	out5=[in0]		// s2
+	cmp.ne	p6,p0=out9,r0
+	mov	maskff=0xff		}
+{ .mmb;	ld4	out7=[out0]		// s3
+	ADDP	rk1=KSZ,in2
+	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
+
+{ .mib;	ADDP	in0=4,in1
+	ADDP	in1=0,in1
+(p6)	br.spnt	.Ld_o_unaligned		};;
+
+{ .mii;	mov	psr.um=loc0
+	mov	ar.pfs=pfssave
+	mov	ar.lc=lcsave		};;
+{ .mmi;	st4	[in1]=r16,8		// s0
+	st4	[in0]=r20,8		// s1
+	mov	pr=prsave,0x1ffff	};;
+{ .mmb;	st4	[in1]=r24		// s2
+	st4	[in0]=r28		// s3
+	br.ret.sptk.many	b0	};;
+#endif
+
+.align	32
+.Ld_i_unaligned:
+{ .mmi;	add	out0=1,in0
+	add	out2=2,in0
+	add	out4=3,in0	};;
+{ .mmi;	ld1	r16=[in0],4
+	ld1	r17=[out0],4	}//;;
+{ .mmi;	ld1	r18=[out2],4
+	ld1	out1=[out4],4	};;	// s0
+{ .mmi;	ld1	r20=[in0],4
+	ld1	r21=[out0],4	}//;;
+{ .mmi;	ld1	r22=[out2],4
+	ld1	out3=[out4],4	};;	// s1
+{ .mmi;	ld1	r24=[in0],4
+	ld1	r25=[out0],4	}//;;
+{ .mmi;	ld1	r26=[out2],4
+	ld1	out5=[out4],4	};;	// s2
+{ .mmi;	ld1	r28=[in0]
+	ld1	r29=[out0]	}//;;
+{ .mmi;	ld1	r30=[out2]
+	ld1	out7=[out4]	};;	// s3
+
+{ .mii;
+	dep	out1=r16,out1,24,8	//;;
+	dep	out3=r20,out3,24,8	}//;;
+{ .mii;	ADDP	rk0=0,in2
+	dep	out5=r24,out5,24,8	//;;
+	dep	out7=r28,out7,24,8	};;
+{ .mii;	ADDP	rk1=KSZ,in2
+	dep	out1=r17,out1,16,8	//;;
+	dep	out3=r21,out3,16,8	}//;;
+{ .mii;	mov	twenty4=24
+	dep	out5=r25,out5,16,8	//;;
+	dep	out7=r29,out7,16,8	};;
+{ .mii;	mov	sixteen=16
+	dep	out1=r18,out1,8,8	//;;
+	dep	out3=r22,out3,8,8	}//;;
+{ .mii;	mov	maskff=0xff
+	dep	out5=r26,out5,8,8	//;;
+	dep	out7=r30,out7,8,8	};;
+
+{ .mib;	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
+
+.Ld_o_unaligned:
+{ .mii;	ADDP	out0=0,in1
+	extr.u	r17=r16,8,8			// s0
+	shr.u	r19=r16,twenty4		}//;;
+{ .mii;	ADDP	out1=1,in1
+	extr.u	r18=r16,16,8
+	shr.u	r23=r20,twenty4		}//;;	// s1
+{ .mii;	ADDP	out2=2,in1
+	extr.u	r21=r20,8,8
+	shr.u	r22=r20,sixteen		}//;;
+{ .mii;	ADDP	out3=3,in1
+	extr.u	r25=r24,8,8			// s2
+	shr.u	r27=r24,twenty4		};;
+{ .mii;	st1	[out3]=r16,4
+	extr.u	r26=r24,16,8
+	shr.u	r31=r28,twenty4		}//;;	// s3
+{ .mii;	st1	[out2]=r17,4
+	extr.u	r29=r28,8,8
+	shr.u	r30=r28,sixteen		}//;;
+
+{ .mmi;	st1	[out1]=r18,4
+	st1	[out0]=r19,4		};;
+{ .mmi;	st1	[out3]=r20,4
+	st1	[out2]=r21,4		}//;;
+{ .mmi;	st1	[out1]=r22,4
+	st1	[out0]=r23,4		};;
+{ .mmi;	st1	[out3]=r24,4
+	st1	[out2]=r25,4
+	mov	pr=prsave,0x1ffff	}//;;
+{ .mmi;	st1	[out1]=r26,4
+	st1	[out0]=r27,4
+	mov	ar.pfs=pfssave		};;
+{ .mmi;	st1	[out3]=r28
+	st1	[out2]=r29
+	mov	ar.lc=lcsave		}//;;
+{ .mmi;	st1	[out1]=r30
+	st1	[out0]=r31		}
+{ .mfb;	mov	psr.um=loc0			// restore user mask
+	br.ret.sptk.many	b0	};;
+.endp	AES_decrypt#
+
+// leave it in .text segment...
+.align	64
+.global	AES_Te#
+.type	AES_Te#,@object
+AES_Te:	data4	0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
+	data4	0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
+	data4	0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
+	data4	0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
+	data4	0x60303050,0x60303050, 0x02010103,0x02010103
+	data4	0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
+	data4	0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
+	data4	0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
+	data4	0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
+	data4	0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
+	data4	0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
+	data4	0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
+	data4	0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
+	data4	0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
+	data4	0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
+	data4	0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
+	data4	0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
+	data4	0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
+	data4	0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
+	data4	0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
+	data4	0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
+	data4	0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
+	data4	0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
+	data4	0x62313153,0x62313153, 0x2a15153f,0x2a15153f
+	data4	0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
+	data4	0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
+	data4	0x30181828,0x30181828, 0x379696a1,0x379696a1
+	data4	0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
+	data4	0x0e070709,0x0e070709, 0x24121236,0x24121236
+	data4	0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
+	data4	0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
+	data4	0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
+	data4	0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
+	data4	0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
+	data4	0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
+	data4	0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
+	data4	0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
+	data4	0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
+	data4	0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
+	data4	0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
+	data4	0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
+	data4	0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
+	data4	0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
+	data4	0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
+	data4	0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
+	data4	0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
+	data4	0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
+	data4	0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
+	data4	0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
+	data4	0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
+	data4	0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
+	data4	0x66333355,0x66333355, 0x11858594,0x11858594
+	data4	0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
+	data4	0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
+	data4	0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
+	data4	0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
+	data4	0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
+	data4	0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
+	data4	0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
+	data4	0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
+	data4	0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
+	data4	0xafdada75,0xafdada75, 0x42212163,0x42212163
+	data4	0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
+	data4	0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
+	data4	0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
+	data4	0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
+	data4	0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
+	data4	0x884444cc,0x884444cc, 0x2e171739,0x2e171739
+	data4	0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
+	data4	0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
+	data4	0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
+	data4	0x3219192b,0x3219192b, 0xe6737395,0xe6737395
+	data4	0xc06060a0,0xc06060a0, 0x19818198,0x19818198
+	data4	0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
+	data4	0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
+	data4	0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
+	data4	0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
+	data4	0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
+	data4	0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
+	data4	0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
+	data4	0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
+	data4	0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
+	data4	0x924949db,0x924949db, 0x0c06060a,0x0c06060a
+	data4	0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
+	data4	0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
+	data4	0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
+	data4	0x399191a8,0x399191a8, 0x319595a4,0x319595a4
+	data4	0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
+	data4	0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
+	data4	0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
+	data4	0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
+	data4	0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
+	data4	0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
+	data4	0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
+	data4	0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
+	data4	0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
+	data4	0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
+	data4	0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
+	data4	0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
+	data4	0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
+	data4	0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
+	data4	0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
+	data4	0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
+	data4	0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
+	data4	0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
+	data4	0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
+	data4	0x904848d8,0x904848d8, 0x06030305,0x06030305
+	data4	0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
+	data4	0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
+	data4	0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
+	data4	0x17868691,0x17868691, 0x99c1c158,0x99c1c158
+	data4	0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
+	data4	0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
+	data4	0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
+	data4	0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
+	data4	0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
+	data4	0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
+	data4	0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
+	data4	0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
+	data4	0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
+	data4	0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
+	data4	0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
+	data4	0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
+	data4	0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
+	data4	0x824141c3,0x824141c3, 0x299999b0,0x299999b0
+	data4	0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
+	data4	0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
+	data4	0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
+// Te4:
+	data1	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	data1	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	data1	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	data1	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	data1	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	data1	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	data1	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	data1	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	data1	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	data1	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	data1	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	data1	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	data1	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	data1	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	data1	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	data1	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	data1	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	data1	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	data1	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	data1	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	data1	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	data1	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	data1	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	data1	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	data1	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	data1	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	data1	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	data1	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	data1	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	data1	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	data1	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	data1	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+.size	AES_Te#,2048+256	// HP-UX assembler fails to ".-AES_Te#"
+
+.align	64
+.global	AES_Td#
+.type	AES_Td#,@object
+AES_Td:	data4	0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
+	data4	0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
+	data4	0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
+	data4	0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
+	data4	0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
+	data4	0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
+	data4	0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
+	data4	0x26354480,0x26354480, 0xb562a38f,0xb562a38f
+	data4	0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
+	data4	0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
+	data4	0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
+	data4	0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
+	data4	0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
+	data4	0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
+	data4	0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
+	data4	0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
+	data4	0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
+	data4	0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
+	data4	0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
+	data4	0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
+	data4	0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
+	data4	0x97513360,0x97513360, 0x62537f45,0x62537f45
+	data4	0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
+	data4	0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
+	data4	0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
+	data4	0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
+	data4	0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
+	data4	0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
+	data4	0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
+	data4	0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
+	data4	0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
+	data4	0x02036aba,0x02036aba, 0xed16825c,0xed16825c
+	data4	0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
+	data4	0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
+	data4	0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
+	data4	0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
+	data4	0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
+	data4	0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
+	data4	0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
+	data4	0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
+	data4	0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
+	data4	0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
+	data4	0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
+	data4	0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
+	data4	0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
+	data4	0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
+	data4	0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
+	data4	0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
+	data4	0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
+	data4	0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
+	data4	0x09808683,0x09808683, 0x322bed48,0x322bed48
+	data4	0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
+	data4	0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
+	data4	0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
+	data4	0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
+	data4	0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
+	data4	0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
+	data4	0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
+	data4	0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
+	data4	0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
+	data4	0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
+	data4	0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
+	data4	0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
+	data4	0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
+	data4	0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
+	data4	0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
+	data4	0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
+	data4	0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
+	data4	0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
+	data4	0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
+	data4	0xd731dcca,0xd731dcca, 0x42638510,0x42638510
+	data4	0x13972240,0x13972240, 0x84c61120,0x84c61120
+	data4	0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
+	data4	0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
+	data4	0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
+	data4	0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
+	data4	0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
+	data4	0x119448fa,0x119448fa, 0x47e96422,0x47e96422
+	data4	0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
+	data4	0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
+	data4	0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
+	data4	0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
+	data4	0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
+	data4	0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
+	data4	0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
+	data4	0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
+	data4	0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
+	data4	0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
+	data4	0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
+	data4	0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
+	data4	0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
+	data4	0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
+	data4	0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
+	data4	0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
+	data4	0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
+	data4	0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
+	data4	0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
+	data4	0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
+	data4	0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
+	data4	0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
+	data4	0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
+	data4	0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
+	data4	0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
+	data4	0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
+	data4	0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
+	data4	0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
+	data4	0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
+	data4	0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
+	data4	0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
+	data4	0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
+	data4	0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
+	data4	0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
+	data4	0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
+	data4	0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
+	data4	0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
+	data4	0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
+	data4	0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
+	data4	0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
+	data4	0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
+	data4	0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
+	data4	0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
+	data4	0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
+	data4	0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
+	data4	0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
+	data4	0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
+	data4	0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
+	data4	0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
+	data4	0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
+// Td4:
+	data1	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	data1	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	data1	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	data1	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	data1	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	data1	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	data1	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	data1	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	data1	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	data1	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	data1	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	data1	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	data1	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	data1	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	data1	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	data1	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	data1	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	data1	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	data1	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	data1	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	data1	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	data1	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	data1	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	data1	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	data1	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	data1	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	data1	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	data1	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	data1	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	data1	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	data1	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	data1	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td#,2048+256	// HP-UX assembler fails to ".-AES_Td#"
diff --git a/jni/openssl/crypto/aes/asm/aes-mips.pl b/jni/openssl/crypto/aes/asm/aes-mips.pl
new file mode 100644
index 0000000..2ce6def
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-mips.pl
@@ -0,0 +1,1611 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for MIPS
+
+# October 2010
+#
+# Code uses 1K[+256B] S-box and on single-issue core [such as R5000]
+# spends ~68 cycles per byte processed with 128-bit key. This is ~16%
+# faster than gcc-generated code, which is not very impressive. But
+# recall that compressed S-box requires extra processing, namely
+# additional rotations. Rotations are implemented with lwl/lwr pairs,
+# which is normally used for loading unaligned data. Another cool
+# thing about this module is its endian neutrality, which means that
+# it processes data without ever changing byte order...
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+#   old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+	$PTR_ADD="dadd";	# incidentally works even on n32
+	$PTR_SUB="dsub";	# incidentally works even on n32
+	$REG_S="sd";
+	$REG_L="ld";
+	$PTR_SLL="dsll";	# incidentally works even on n32
+	$SZREG=8;
+} else {
+	$PTR_ADD="add";
+	$PTR_SUB="sub";
+	$REG_S="sw";
+	$REG_L="lw";
+	$PTR_SLL="sll";
+	$SZREG=4;
+}
+$pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0;
+
+for (@ARGV) {	$output=$_ if (/^\w[\w\-]*\.\w+$/);	}
+open STDOUT,">$output";
+
+if (!defined($big_endian))
+{    $big_endian=(unpack('L',pack('N',1))==1);   }
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($MSB,$LSB)=(0,3);	# automatically converted to little-endian
+
+$code.=<<___;
+.text
+#ifdef OPENSSL_FIPSCANISTER
+# include <openssl/fipssyms.h>
+#endif
+
+#if !defined(__vxworks) || defined(__pic__)
+.option	pic2
+#endif
+.set	noat
+___
+
+{{{
+my $FRAMESIZE=16*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000;
+
+my ($inp,$out,$key,$Tbl,$s0,$s1,$s2,$s3)=($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
+my ($key0,$cnt)=($gp,$fp);
+
+# instuction ordering is "stolen" from output from MIPSpro assembler
+# invoked with -mips3 -O3 arguments...
+$code.=<<___;
+.align	5
+.ent	_mips_AES_encrypt
+_mips_AES_encrypt:
+	.frame	$sp,0,$ra
+	.set	reorder
+	lw	$t0,0($key)
+	lw	$t1,4($key)
+	lw	$t2,8($key)
+	lw	$t3,12($key)
+	lw	$cnt,240($key)
+	$PTR_ADD $key0,$key,16
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	sub	$cnt,1
+	_xtr	$i0,$s1,16-2
+.Loop_enc:
+	_xtr	$i1,$s2,16-2
+	_xtr	$i2,$s3,16-2
+	_xtr	$i3,$s0,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t0,3($i0)		# Te1[s1>>16]
+	lwl	$t1,3($i1)		# Te1[s2>>16]
+	lwl	$t2,3($i2)		# Te1[s3>>16]
+	lwl	$t3,3($i3)		# Te1[s0>>16]
+	lwr	$t0,2($i0)		# Te1[s1>>16]
+	lwr	$t1,2($i1)		# Te1[s2>>16]
+	lwr	$t2,2($i2)		# Te1[s3>>16]
+	lwr	$t3,2($i3)		# Te1[s0>>16]
+
+	_xtr	$i0,$s2,8-2
+	_xtr	$i1,$s3,8-2
+	_xtr	$i2,$s0,8-2
+	_xtr	$i3,$s1,8-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t4,2($i0)		# Te2[s2>>8]
+	lwl	$t5,2($i1)		# Te2[s3>>8]
+	lwl	$t6,2($i2)		# Te2[s0>>8]
+	lwl	$t7,2($i3)		# Te2[s1>>8]
+	lwr	$t4,1($i0)		# Te2[s2>>8]
+	lwr	$t5,1($i1)		# Te2[s3>>8]
+	lwr	$t6,1($i2)		# Te2[s0>>8]
+	lwr	$t7,1($i3)		# Te2[s1>>8]
+
+	_xtr	$i0,$s3,0-2
+	_xtr	$i1,$s0,0-2
+	_xtr	$i2,$s1,0-2
+	_xtr	$i3,$s2,0-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t8,1($i0)		# Te3[s3]
+	lwl	$t9,1($i1)		# Te3[s0]
+	lwl	$t10,1($i2)		# Te3[s1]
+	lwl	$t11,1($i3)		# Te3[s2]
+	lwr	$t8,0($i0)		# Te3[s3]
+	lwr	$t9,0($i1)		# Te3[s0]
+	lwr	$t10,0($i2)		# Te3[s1]
+	lwr	$t11,0($i3)		# Te3[s2]
+
+	_xtr	$i0,$s0,24-2
+	_xtr	$i1,$s1,24-2
+	_xtr	$i2,$s2,24-2
+	_xtr	$i3,$s3,24-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+	lw	$t4,0($i0)		# Te0[s0>>24]
+	lw	$t5,0($i1)		# Te0[s1>>24]
+	lw	$t6,0($i2)		# Te0[s2>>24]
+	lw	$t7,0($i3)		# Te0[s3>>24]
+
+	lw	$s0,0($key0)
+	lw	$s1,4($key0)
+	lw	$s2,8($key0)
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	sub	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_enc
+	_xtr	$i0,$s1,16-2
+
+	.set	reorder
+	_xtr	$i1,$s2,16-2
+	_xtr	$i2,$s3,16-2
+	_xtr	$i3,$s0,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t0,2($i0)		# Te4[s1>>16]
+	lbu	$t1,2($i1)		# Te4[s2>>16]
+	lbu	$t2,2($i2)		# Te4[s3>>16]
+	lbu	$t3,2($i3)		# Te4[s0>>16]
+
+	_xtr	$i0,$s2,8-2
+	_xtr	$i1,$s3,8-2
+	_xtr	$i2,$s0,8-2
+	_xtr	$i3,$s1,8-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t4,2($i0)		# Te4[s2>>8]
+	lbu	$t5,2($i1)		# Te4[s3>>8]
+	lbu	$t6,2($i2)		# Te4[s0>>8]
+	lbu	$t7,2($i3)		# Te4[s1>>8]
+
+	_xtr	$i0,$s0,24-2
+	_xtr	$i1,$s1,24-2
+	_xtr	$i2,$s2,24-2
+	_xtr	$i3,$s3,24-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,2($i0)		# Te4[s0>>24]
+	lbu	$t9,2($i1)		# Te4[s1>>24]
+	lbu	$t10,2($i2)		# Te4[s2>>24]
+	lbu	$t11,2($i3)		# Te4[s3>>24]
+
+	_xtr	$i0,$s3,0-2
+	_xtr	$i1,$s0,0-2
+	_xtr	$i2,$s1,0-2
+	_xtr	$i3,$s2,0-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins	$t4,8
+	_ins	$t5,8
+	_ins	$t6,8
+	_ins	$t7,8
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t4,2($i0)		# Te4[s3]
+	lbu	$t5,2($i1)		# Te4[s0]
+	lbu	$t6,2($i2)		# Te4[s1]
+	lbu	$t7,2($i3)		# Te4[s2]
+
+	_ins	$t8,24
+	_ins	$t9,24
+	_ins	$t10,24
+	_ins	$t11,24
+
+	lw	$s0,0($key0)
+	lw	$s1,4($key0)
+	lw	$s2,8($key0)
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	_ins	$t4,0
+	_ins	$t5,0
+	_ins	$t6,0
+	_ins	$t7,0
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	jr	$ra
+.end	_mips_AES_encrypt
+
+.align	5
+.globl	AES_encrypt
+.ent	AES_encrypt
+AES_encrypt:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_encrypt
+___
+$code.=<<___;
+	.set	reorder
+	la	$Tbl,AES_Te		# PIC-ified 'load address'
+
+	lwl	$s0,0+$MSB($inp)
+	lwl	$s1,4+$MSB($inp)
+	lwl	$s2,8+$MSB($inp)
+	lwl	$s3,12+$MSB($inp)
+	lwr	$s0,0+$LSB($inp)
+	lwr	$s1,4+$LSB($inp)
+	lwr	$s2,8+$LSB($inp)
+	lwr	$s3,12+$LSB($inp)
+
+	bal	_mips_AES_encrypt
+
+	swr	$s0,0+$LSB($out)
+	swr	$s1,4+$LSB($out)
+	swr	$s2,8+$LSB($out)
+	swr	$s3,12+$LSB($out)
+	swl	$s0,0+$MSB($out)
+	swl	$s1,4+$MSB($out)
+	swl	$s2,8+$MSB($out)
+	swl	$s3,12+$MSB($out)
+
+	.set	noreorder
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_encrypt
+___
+
+$code.=<<___;
+.align	5
+.ent	_mips_AES_decrypt
+_mips_AES_decrypt:
+	.frame	$sp,0,$ra
+	.set	reorder
+	lw	$t0,0($key)
+	lw	$t1,4($key)
+	lw	$t2,8($key)
+	lw	$t3,12($key)
+	lw	$cnt,240($key)
+	$PTR_ADD $key0,$key,16
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	sub	$cnt,1
+	_xtr	$i0,$s3,16-2
+.Loop_dec:
+	_xtr	$i1,$s0,16-2
+	_xtr	$i2,$s1,16-2
+	_xtr	$i3,$s2,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t0,3($i0)		# Td1[s3>>16]
+	lwl	$t1,3($i1)		# Td1[s0>>16]
+	lwl	$t2,3($i2)		# Td1[s1>>16]
+	lwl	$t3,3($i3)		# Td1[s2>>16]
+	lwr	$t0,2($i0)		# Td1[s3>>16]
+	lwr	$t1,2($i1)		# Td1[s0>>16]
+	lwr	$t2,2($i2)		# Td1[s1>>16]
+	lwr	$t3,2($i3)		# Td1[s2>>16]
+
+	_xtr	$i0,$s2,8-2
+	_xtr	$i1,$s3,8-2
+	_xtr	$i2,$s0,8-2
+	_xtr	$i3,$s1,8-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t4,2($i0)		# Td2[s2>>8]
+	lwl	$t5,2($i1)		# Td2[s3>>8]
+	lwl	$t6,2($i2)		# Td2[s0>>8]
+	lwl	$t7,2($i3)		# Td2[s1>>8]
+	lwr	$t4,1($i0)		# Td2[s2>>8]
+	lwr	$t5,1($i1)		# Td2[s3>>8]
+	lwr	$t6,1($i2)		# Td2[s0>>8]
+	lwr	$t7,1($i3)		# Td2[s1>>8]
+
+	_xtr	$i0,$s1,0-2
+	_xtr	$i1,$s2,0-2
+	_xtr	$i2,$s3,0-2
+	_xtr	$i3,$s0,0-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t8,1($i0)		# Td3[s1]
+	lwl	$t9,1($i1)		# Td3[s2]
+	lwl	$t10,1($i2)		# Td3[s3]
+	lwl	$t11,1($i3)		# Td3[s0]
+	lwr	$t8,0($i0)		# Td3[s1]
+	lwr	$t9,0($i1)		# Td3[s2]
+	lwr	$t10,0($i2)		# Td3[s3]
+	lwr	$t11,0($i3)		# Td3[s0]
+
+	_xtr	$i0,$s0,24-2
+	_xtr	$i1,$s1,24-2
+	_xtr	$i2,$s2,24-2
+	_xtr	$i3,$s3,24-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+
+	lw	$t4,0($i0)		# Td0[s0>>24]
+	lw	$t5,0($i1)		# Td0[s1>>24]
+	lw	$t6,0($i2)		# Td0[s2>>24]
+	lw	$t7,0($i3)		# Td0[s3>>24]
+
+	lw	$s0,0($key0)
+	lw	$s1,4($key0)
+	lw	$s2,8($key0)
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	sub	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_dec
+	_xtr	$i0,$s3,16-2
+
+	.set	reorder
+	lw	$t4,1024($Tbl)		# prefetch Td4
+	lw	$t5,1024+32($Tbl)
+	lw	$t6,1024+64($Tbl)
+	lw	$t7,1024+96($Tbl)
+	lw	$t8,1024+128($Tbl)
+	lw	$t9,1024+160($Tbl)
+	lw	$t10,1024+192($Tbl)
+	lw	$t11,1024+224($Tbl)
+
+	_xtr	$i0,$s3,16
+	_xtr	$i1,$s0,16
+	_xtr	$i2,$s1,16
+	_xtr	$i3,$s2,16
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,0xff
+	and	$i3,0xff
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t0,1024($i0)		# Td4[s3>>16]
+	lbu	$t1,1024($i1)		# Td4[s0>>16]
+	lbu	$t2,1024($i2)		# Td4[s1>>16]
+	lbu	$t3,1024($i3)		# Td4[s2>>16]
+
+	_xtr	$i0,$s2,8
+	_xtr	$i1,$s3,8
+	_xtr	$i2,$s0,8
+	_xtr	$i3,$s1,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,0xff
+	and	$i3,0xff
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t4,1024($i0)		# Td4[s2>>8]
+	lbu	$t5,1024($i1)		# Td4[s3>>8]
+	lbu	$t6,1024($i2)		# Td4[s0>>8]
+	lbu	$t7,1024($i3)		# Td4[s1>>8]
+
+	_xtr	$i0,$s0,24
+	_xtr	$i1,$s1,24
+	_xtr	$i2,$s2,24
+	_xtr	$i3,$s3,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,1024($i0)		# Td4[s0>>24]
+	lbu	$t9,1024($i1)		# Td4[s1>>24]
+	lbu	$t10,1024($i2)		# Td4[s2>>24]
+	lbu	$t11,1024($i3)		# Td4[s3>>24]
+
+	_xtr	$i0,$s1,0
+	_xtr	$i1,$s2,0
+	_xtr	$i2,$s3,0
+	_xtr	$i3,$s0,0
+
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins	$t4,8
+	_ins	$t5,8
+	_ins	$t6,8
+	_ins	$t7,8
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t4,1024($i0)		# Td4[s1]
+	lbu	$t5,1024($i1)		# Td4[s2]
+	lbu	$t6,1024($i2)		# Td4[s3]
+	lbu	$t7,1024($i3)		# Td4[s0]
+
+	_ins	$t8,24
+	_ins	$t9,24
+	_ins	$t10,24
+	_ins	$t11,24
+
+	lw	$s0,0($key0)
+	lw	$s1,4($key0)
+	lw	$s2,8($key0)
+	lw	$s3,12($key0)
+
+	_ins	$t4,0
+	_ins	$t5,0
+	_ins	$t6,0
+	_ins	$t7,0
+
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	jr	$ra
+.end	_mips_AES_decrypt
+
+.align	5
+.globl	AES_decrypt
+.ent	AES_decrypt
+AES_decrypt:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_decrypt
+___
+$code.=<<___;
+	.set	reorder
+	la	$Tbl,AES_Td		# PIC-ified 'load address'
+
+	lwl	$s0,0+$MSB($inp)
+	lwl	$s1,4+$MSB($inp)
+	lwl	$s2,8+$MSB($inp)
+	lwl	$s3,12+$MSB($inp)
+	lwr	$s0,0+$LSB($inp)
+	lwr	$s1,4+$LSB($inp)
+	lwr	$s2,8+$LSB($inp)
+	lwr	$s3,12+$LSB($inp)
+
+	bal	_mips_AES_decrypt
+
+	swr	$s0,0+$LSB($out)
+	swr	$s1,4+$LSB($out)
+	swr	$s2,8+$LSB($out)
+	swr	$s3,12+$LSB($out)
+	swl	$s0,0+$MSB($out)
+	swl	$s1,4+$MSB($out)
+	swl	$s2,8+$MSB($out)
+	swl	$s3,12+$MSB($out)
+
+	.set	noreorder
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_decrypt
+___
+}}}
+
+{{{
+my $FRAMESIZE=8*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc000f008 : 0xc0000000;
+
+my ($inp,$bits,$key,$Tbl)=($a0,$a1,$a2,$a3);
+my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($rcon,$cnt)=($gp,$fp);
+
+$code.=<<___;
+.align	5
+.ent	_mips_AES_set_encrypt_key
+_mips_AES_set_encrypt_key:
+	.frame	$sp,0,$ra
+	.set	noreorder
+	beqz	$inp,.Lekey_done
+	li	$t0,-1
+	beqz	$key,.Lekey_done
+	$PTR_ADD $rcon,$Tbl,1024+256
+
+	.set	reorder
+	lwl	$rk0,0+$MSB($inp)	# load 128 bits
+	lwl	$rk1,4+$MSB($inp)
+	lwl	$rk2,8+$MSB($inp)
+	lwl	$rk3,12+$MSB($inp)
+	li	$at,128
+	lwr	$rk0,0+$LSB($inp)
+	lwr	$rk1,4+$LSB($inp)
+	lwr	$rk2,8+$LSB($inp)
+	lwr	$rk3,12+$LSB($inp)
+	.set	noreorder
+	beq	$bits,$at,.L128bits
+	li	$cnt,10
+
+	.set	reorder
+	lwl	$rk4,16+$MSB($inp)	# load 192 bits
+	lwl	$rk5,20+$MSB($inp)
+	li	$at,192
+	lwr	$rk4,16+$LSB($inp)
+	lwr	$rk5,20+$LSB($inp)
+	.set	noreorder
+	beq	$bits,$at,.L192bits
+	li	$cnt,8
+
+	.set	reorder
+	lwl	$rk6,24+$MSB($inp)	# load 256 bits
+	lwl	$rk7,28+$MSB($inp)
+	li	$at,256
+	lwr	$rk6,24+$LSB($inp)
+	lwr	$rk7,28+$LSB($inp)
+	.set	noreorder
+	beq	$bits,$at,.L256bits
+	li	$cnt,7
+
+	b	.Lekey_done
+	li	$t0,-2
+
+.align	4
+.L128bits:
+	.set	reorder
+	srl	$i0,$rk3,16
+	srl	$i1,$rk3,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk3,0xff
+	srl	$i3,$rk3,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,1024($i0)
+	lbu	$i1,1024($i1)
+	lbu	$i2,1024($i2)
+	lbu	$i3,1024($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	sub	$cnt,1
+	$PTR_ADD $key,16
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+
+	.set	noreorder
+	bnez	$cnt,.L128bits
+	$PTR_ADD $rcon,4
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	li	$cnt,10
+	sw	$rk3,12($key)
+	li	$t0,0
+	sw	$cnt,80($key)
+	b	.Lekey_done
+	$PTR_SUB $key,10*16
+
+.align	4
+.L192bits:
+	.set	reorder
+	srl	$i0,$rk5,16
+	srl	$i1,$rk5,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk5,0xff
+	srl	$i3,$rk5,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,1024($i0)
+	lbu	$i1,1024($i1)
+	lbu	$i2,1024($i2)
+	lbu	$i3,1024($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	sw	$rk4,16($key)
+	sw	$rk5,20($key)
+	sub	$cnt,1
+	$PTR_ADD $key,24
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+	xor	$rk4,$rk3
+	xor	$rk5,$rk4
+
+	.set	noreorder
+	bnez	$cnt,.L192bits
+	$PTR_ADD $rcon,4
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	li	$cnt,12
+	sw	$rk3,12($key)
+	li	$t0,0
+	sw	$cnt,48($key)
+	b	.Lekey_done
+	$PTR_SUB $key,12*16
+
+.align	4
+.L256bits:
+	.set	reorder
+	srl	$i0,$rk7,16
+	srl	$i1,$rk7,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk7,0xff
+	srl	$i3,$rk7,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,1024($i0)
+	lbu	$i1,1024($i1)
+	lbu	$i2,1024($i2)
+	lbu	$i3,1024($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	sw	$rk4,16($key)
+	sw	$rk5,20($key)
+	sw	$rk6,24($key)
+	sw	$rk7,28($key)
+	sub	$cnt,1
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+	beqz	$cnt,.L256bits_done
+
+	srl	$i0,$rk3,24
+	srl	$i1,$rk3,16
+	srl	$i2,$rk3,8
+	and	$i3,$rk3,0xff
+	and	$i1,0xff
+	and	$i2,0xff
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,1024($i0)
+	lbu	$i1,1024($i1)
+	lbu	$i2,1024($i2)
+	lbu	$i3,1024($i3)
+	sll	$i0,24
+	sll	$i1,16
+	sll	$i2,8
+
+	xor	$rk4,$i0
+	xor	$rk4,$i1
+	xor	$rk4,$i2
+	xor	$rk4,$i3
+
+	xor	$rk5,$rk4
+	xor	$rk6,$rk5
+	xor	$rk7,$rk6
+
+	$PTR_ADD $key,32
+	.set	noreorder
+	b	.L256bits
+	$PTR_ADD $rcon,4
+
+.L256bits_done:
+	sw	$rk0,32($key)
+	sw	$rk1,36($key)
+	sw	$rk2,40($key)
+	li	$cnt,14
+	sw	$rk3,44($key)
+	li	$t0,0
+	sw	$cnt,48($key)
+	$PTR_SUB $key,12*16
+
+.Lekey_done:
+	jr	$ra
+	nop
+.end	_mips_AES_set_encrypt_key
+
+.globl	AES_set_encrypt_key
+.ent	AES_set_encrypt_key
+AES_set_encrypt_key:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_set_encrypt_key
+___
+$code.=<<___;
+	.set	reorder
+	la	$Tbl,AES_Te		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	.set	noreorder
+	move	$a0,$t0
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_set_encrypt_key
+___
+
+my ($head,$tail)=($inp,$bits);
+my ($tp1,$tp2,$tp4,$tp8,$tp9,$tpb,$tpd,$tpe)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($m,$x80808080,$x7f7f7f7f,$x1b1b1b1b)=($at,$t0,$t1,$t2);
+$code.=<<___;
+.align	5
+.globl	AES_set_decrypt_key
+.ent	AES_set_decrypt_key
+AES_set_decrypt_key:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_set_decrypt_key
+___
+$code.=<<___;
+	.set	reorder
+	la	$Tbl,AES_Te		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	bltz	$t0,.Ldkey_done
+
+	sll	$at,$cnt,4
+	$PTR_ADD $head,$key,0
+	$PTR_ADD $tail,$key,$at
+.align	4
+.Lswap:
+	lw	$rk0,0($head)
+	lw	$rk1,4($head)
+	lw	$rk2,8($head)
+	lw	$rk3,12($head)
+	lw	$rk4,0($tail)
+	lw	$rk5,4($tail)
+	lw	$rk6,8($tail)
+	lw	$rk7,12($tail)
+	sw	$rk0,0($tail)
+	sw	$rk1,4($tail)
+	sw	$rk2,8($tail)
+	sw	$rk3,12($tail)
+	$PTR_ADD $head,16
+	$PTR_SUB $tail,16
+	sw	$rk4,-16($head)
+	sw	$rk5,-12($head)
+	sw	$rk6,-8($head)
+	sw	$rk7,-4($head)
+	bne	$head,$tail,.Lswap
+
+	lw	$tp1,16($key)		# modulo-scheduled
+	lui	$x80808080,0x8080
+	sub	$cnt,1
+	or	$x80808080,0x8080
+	sll	$cnt,2
+	$PTR_ADD $key,16
+	lui	$x1b1b1b1b,0x1b1b
+	nor	$x7f7f7f7f,$zero,$x80808080
+	or	$x1b1b1b1b,0x1b1b
+.align	4
+.Lmix:
+	and	$m,$tp1,$x80808080
+	and	$tp2,$tp1,$x7f7f7f7f
+	srl	$tp4,$m,7
+	addu	$tp2,$tp2		# tp2<<1
+	subu	$m,$tp4
+	and	$m,$x1b1b1b1b
+	xor	$tp2,$m
+
+	and	$m,$tp2,$x80808080
+	and	$tp4,$tp2,$x7f7f7f7f
+	srl	$tp8,$m,7
+	addu	$tp4,$tp4		# tp4<<1
+	subu	$m,$tp8
+	and	$m,$x1b1b1b1b
+	xor	$tp4,$m
+
+	and	$m,$tp4,$x80808080
+	and	$tp8,$tp4,$x7f7f7f7f
+	srl	$tp9,$m,7
+	addu	$tp8,$tp8		# tp8<<1
+	subu	$m,$tp9
+	and	$m,$x1b1b1b1b
+	xor	$tp8,$m
+
+	xor	$tp9,$tp8,$tp1
+	xor	$tpe,$tp8,$tp4
+	xor	$tpb,$tp9,$tp2
+	xor	$tpd,$tp9,$tp4
+
+	_ror	$tp1,$tpd,16
+	 xor	$tpe,$tp2
+	_ror	$tp2,$tpd,-16
+	xor	$tpe,$tp1
+	_ror	$tp1,$tp9,8
+	xor	$tpe,$tp2
+	_ror	$tp2,$tp9,-24
+	xor	$tpe,$tp1
+	_ror	$tp1,$tpb,24
+	xor	$tpe,$tp2
+	_ror	$tp2,$tpb,-8
+	xor	$tpe,$tp1
+	lw	$tp1,4($key)		# modulo-scheduled
+	xor	$tpe,$tp2
+	sub	$cnt,1
+	sw	$tpe,0($key)
+	$PTR_ADD $key,4
+	bnez	$cnt,.Lmix
+
+	li	$t0,0
+.Ldkey_done:
+	.set	noreorder
+	move	$a0,$t0
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_set_decrypt_key
+___
+}}}
+
+######################################################################
+# Tables are kept in endian-neutral manner
+$code.=<<___;
+.rdata
+.align	6
+AES_Te:
+.byte	0xc6,0x63,0x63,0xa5,	0xf8,0x7c,0x7c,0x84	# Te0
+.byte	0xee,0x77,0x77,0x99,	0xf6,0x7b,0x7b,0x8d
+.byte	0xff,0xf2,0xf2,0x0d,	0xd6,0x6b,0x6b,0xbd
+.byte	0xde,0x6f,0x6f,0xb1,	0x91,0xc5,0xc5,0x54
+.byte	0x60,0x30,0x30,0x50,	0x02,0x01,0x01,0x03
+.byte	0xce,0x67,0x67,0xa9,	0x56,0x2b,0x2b,0x7d
+.byte	0xe7,0xfe,0xfe,0x19,	0xb5,0xd7,0xd7,0x62
+.byte	0x4d,0xab,0xab,0xe6,	0xec,0x76,0x76,0x9a
+.byte	0x8f,0xca,0xca,0x45,	0x1f,0x82,0x82,0x9d
+.byte	0x89,0xc9,0xc9,0x40,	0xfa,0x7d,0x7d,0x87
+.byte	0xef,0xfa,0xfa,0x15,	0xb2,0x59,0x59,0xeb
+.byte	0x8e,0x47,0x47,0xc9,	0xfb,0xf0,0xf0,0x0b
+.byte	0x41,0xad,0xad,0xec,	0xb3,0xd4,0xd4,0x67
+.byte	0x5f,0xa2,0xa2,0xfd,	0x45,0xaf,0xaf,0xea
+.byte	0x23,0x9c,0x9c,0xbf,	0x53,0xa4,0xa4,0xf7
+.byte	0xe4,0x72,0x72,0x96,	0x9b,0xc0,0xc0,0x5b
+.byte	0x75,0xb7,0xb7,0xc2,	0xe1,0xfd,0xfd,0x1c
+.byte	0x3d,0x93,0x93,0xae,	0x4c,0x26,0x26,0x6a
+.byte	0x6c,0x36,0x36,0x5a,	0x7e,0x3f,0x3f,0x41
+.byte	0xf5,0xf7,0xf7,0x02,	0x83,0xcc,0xcc,0x4f
+.byte	0x68,0x34,0x34,0x5c,	0x51,0xa5,0xa5,0xf4
+.byte	0xd1,0xe5,0xe5,0x34,	0xf9,0xf1,0xf1,0x08
+.byte	0xe2,0x71,0x71,0x93,	0xab,0xd8,0xd8,0x73
+.byte	0x62,0x31,0x31,0x53,	0x2a,0x15,0x15,0x3f
+.byte	0x08,0x04,0x04,0x0c,	0x95,0xc7,0xc7,0x52
+.byte	0x46,0x23,0x23,0x65,	0x9d,0xc3,0xc3,0x5e
+.byte	0x30,0x18,0x18,0x28,	0x37,0x96,0x96,0xa1
+.byte	0x0a,0x05,0x05,0x0f,	0x2f,0x9a,0x9a,0xb5
+.byte	0x0e,0x07,0x07,0x09,	0x24,0x12,0x12,0x36
+.byte	0x1b,0x80,0x80,0x9b,	0xdf,0xe2,0xe2,0x3d
+.byte	0xcd,0xeb,0xeb,0x26,	0x4e,0x27,0x27,0x69
+.byte	0x7f,0xb2,0xb2,0xcd,	0xea,0x75,0x75,0x9f
+.byte	0x12,0x09,0x09,0x1b,	0x1d,0x83,0x83,0x9e
+.byte	0x58,0x2c,0x2c,0x74,	0x34,0x1a,0x1a,0x2e
+.byte	0x36,0x1b,0x1b,0x2d,	0xdc,0x6e,0x6e,0xb2
+.byte	0xb4,0x5a,0x5a,0xee,	0x5b,0xa0,0xa0,0xfb
+.byte	0xa4,0x52,0x52,0xf6,	0x76,0x3b,0x3b,0x4d
+.byte	0xb7,0xd6,0xd6,0x61,	0x7d,0xb3,0xb3,0xce
+.byte	0x52,0x29,0x29,0x7b,	0xdd,0xe3,0xe3,0x3e
+.byte	0x5e,0x2f,0x2f,0x71,	0x13,0x84,0x84,0x97
+.byte	0xa6,0x53,0x53,0xf5,	0xb9,0xd1,0xd1,0x68
+.byte	0x00,0x00,0x00,0x00,	0xc1,0xed,0xed,0x2c
+.byte	0x40,0x20,0x20,0x60,	0xe3,0xfc,0xfc,0x1f
+.byte	0x79,0xb1,0xb1,0xc8,	0xb6,0x5b,0x5b,0xed
+.byte	0xd4,0x6a,0x6a,0xbe,	0x8d,0xcb,0xcb,0x46
+.byte	0x67,0xbe,0xbe,0xd9,	0x72,0x39,0x39,0x4b
+.byte	0x94,0x4a,0x4a,0xde,	0x98,0x4c,0x4c,0xd4
+.byte	0xb0,0x58,0x58,0xe8,	0x85,0xcf,0xcf,0x4a
+.byte	0xbb,0xd0,0xd0,0x6b,	0xc5,0xef,0xef,0x2a
+.byte	0x4f,0xaa,0xaa,0xe5,	0xed,0xfb,0xfb,0x16
+.byte	0x86,0x43,0x43,0xc5,	0x9a,0x4d,0x4d,0xd7
+.byte	0x66,0x33,0x33,0x55,	0x11,0x85,0x85,0x94
+.byte	0x8a,0x45,0x45,0xcf,	0xe9,0xf9,0xf9,0x10
+.byte	0x04,0x02,0x02,0x06,	0xfe,0x7f,0x7f,0x81
+.byte	0xa0,0x50,0x50,0xf0,	0x78,0x3c,0x3c,0x44
+.byte	0x25,0x9f,0x9f,0xba,	0x4b,0xa8,0xa8,0xe3
+.byte	0xa2,0x51,0x51,0xf3,	0x5d,0xa3,0xa3,0xfe
+.byte	0x80,0x40,0x40,0xc0,	0x05,0x8f,0x8f,0x8a
+.byte	0x3f,0x92,0x92,0xad,	0x21,0x9d,0x9d,0xbc
+.byte	0x70,0x38,0x38,0x48,	0xf1,0xf5,0xf5,0x04
+.byte	0x63,0xbc,0xbc,0xdf,	0x77,0xb6,0xb6,0xc1
+.byte	0xaf,0xda,0xda,0x75,	0x42,0x21,0x21,0x63
+.byte	0x20,0x10,0x10,0x30,	0xe5,0xff,0xff,0x1a
+.byte	0xfd,0xf3,0xf3,0x0e,	0xbf,0xd2,0xd2,0x6d
+.byte	0x81,0xcd,0xcd,0x4c,	0x18,0x0c,0x0c,0x14
+.byte	0x26,0x13,0x13,0x35,	0xc3,0xec,0xec,0x2f
+.byte	0xbe,0x5f,0x5f,0xe1,	0x35,0x97,0x97,0xa2
+.byte	0x88,0x44,0x44,0xcc,	0x2e,0x17,0x17,0x39
+.byte	0x93,0xc4,0xc4,0x57,	0x55,0xa7,0xa7,0xf2
+.byte	0xfc,0x7e,0x7e,0x82,	0x7a,0x3d,0x3d,0x47
+.byte	0xc8,0x64,0x64,0xac,	0xba,0x5d,0x5d,0xe7
+.byte	0x32,0x19,0x19,0x2b,	0xe6,0x73,0x73,0x95
+.byte	0xc0,0x60,0x60,0xa0,	0x19,0x81,0x81,0x98
+.byte	0x9e,0x4f,0x4f,0xd1,	0xa3,0xdc,0xdc,0x7f
+.byte	0x44,0x22,0x22,0x66,	0x54,0x2a,0x2a,0x7e
+.byte	0x3b,0x90,0x90,0xab,	0x0b,0x88,0x88,0x83
+.byte	0x8c,0x46,0x46,0xca,	0xc7,0xee,0xee,0x29
+.byte	0x6b,0xb8,0xb8,0xd3,	0x28,0x14,0x14,0x3c
+.byte	0xa7,0xde,0xde,0x79,	0xbc,0x5e,0x5e,0xe2
+.byte	0x16,0x0b,0x0b,0x1d,	0xad,0xdb,0xdb,0x76
+.byte	0xdb,0xe0,0xe0,0x3b,	0x64,0x32,0x32,0x56
+.byte	0x74,0x3a,0x3a,0x4e,	0x14,0x0a,0x0a,0x1e
+.byte	0x92,0x49,0x49,0xdb,	0x0c,0x06,0x06,0x0a
+.byte	0x48,0x24,0x24,0x6c,	0xb8,0x5c,0x5c,0xe4
+.byte	0x9f,0xc2,0xc2,0x5d,	0xbd,0xd3,0xd3,0x6e
+.byte	0x43,0xac,0xac,0xef,	0xc4,0x62,0x62,0xa6
+.byte	0x39,0x91,0x91,0xa8,	0x31,0x95,0x95,0xa4
+.byte	0xd3,0xe4,0xe4,0x37,	0xf2,0x79,0x79,0x8b
+.byte	0xd5,0xe7,0xe7,0x32,	0x8b,0xc8,0xc8,0x43
+.byte	0x6e,0x37,0x37,0x59,	0xda,0x6d,0x6d,0xb7
+.byte	0x01,0x8d,0x8d,0x8c,	0xb1,0xd5,0xd5,0x64
+.byte	0x9c,0x4e,0x4e,0xd2,	0x49,0xa9,0xa9,0xe0
+.byte	0xd8,0x6c,0x6c,0xb4,	0xac,0x56,0x56,0xfa
+.byte	0xf3,0xf4,0xf4,0x07,	0xcf,0xea,0xea,0x25
+.byte	0xca,0x65,0x65,0xaf,	0xf4,0x7a,0x7a,0x8e
+.byte	0x47,0xae,0xae,0xe9,	0x10,0x08,0x08,0x18
+.byte	0x6f,0xba,0xba,0xd5,	0xf0,0x78,0x78,0x88
+.byte	0x4a,0x25,0x25,0x6f,	0x5c,0x2e,0x2e,0x72
+.byte	0x38,0x1c,0x1c,0x24,	0x57,0xa6,0xa6,0xf1
+.byte	0x73,0xb4,0xb4,0xc7,	0x97,0xc6,0xc6,0x51
+.byte	0xcb,0xe8,0xe8,0x23,	0xa1,0xdd,0xdd,0x7c
+.byte	0xe8,0x74,0x74,0x9c,	0x3e,0x1f,0x1f,0x21
+.byte	0x96,0x4b,0x4b,0xdd,	0x61,0xbd,0xbd,0xdc
+.byte	0x0d,0x8b,0x8b,0x86,	0x0f,0x8a,0x8a,0x85
+.byte	0xe0,0x70,0x70,0x90,	0x7c,0x3e,0x3e,0x42
+.byte	0x71,0xb5,0xb5,0xc4,	0xcc,0x66,0x66,0xaa
+.byte	0x90,0x48,0x48,0xd8,	0x06,0x03,0x03,0x05
+.byte	0xf7,0xf6,0xf6,0x01,	0x1c,0x0e,0x0e,0x12
+.byte	0xc2,0x61,0x61,0xa3,	0x6a,0x35,0x35,0x5f
+.byte	0xae,0x57,0x57,0xf9,	0x69,0xb9,0xb9,0xd0
+.byte	0x17,0x86,0x86,0x91,	0x99,0xc1,0xc1,0x58
+.byte	0x3a,0x1d,0x1d,0x27,	0x27,0x9e,0x9e,0xb9
+.byte	0xd9,0xe1,0xe1,0x38,	0xeb,0xf8,0xf8,0x13
+.byte	0x2b,0x98,0x98,0xb3,	0x22,0x11,0x11,0x33
+.byte	0xd2,0x69,0x69,0xbb,	0xa9,0xd9,0xd9,0x70
+.byte	0x07,0x8e,0x8e,0x89,	0x33,0x94,0x94,0xa7
+.byte	0x2d,0x9b,0x9b,0xb6,	0x3c,0x1e,0x1e,0x22
+.byte	0x15,0x87,0x87,0x92,	0xc9,0xe9,0xe9,0x20
+.byte	0x87,0xce,0xce,0x49,	0xaa,0x55,0x55,0xff
+.byte	0x50,0x28,0x28,0x78,	0xa5,0xdf,0xdf,0x7a
+.byte	0x03,0x8c,0x8c,0x8f,	0x59,0xa1,0xa1,0xf8
+.byte	0x09,0x89,0x89,0x80,	0x1a,0x0d,0x0d,0x17
+.byte	0x65,0xbf,0xbf,0xda,	0xd7,0xe6,0xe6,0x31
+.byte	0x84,0x42,0x42,0xc6,	0xd0,0x68,0x68,0xb8
+.byte	0x82,0x41,0x41,0xc3,	0x29,0x99,0x99,0xb0
+.byte	0x5a,0x2d,0x2d,0x77,	0x1e,0x0f,0x0f,0x11
+.byte	0x7b,0xb0,0xb0,0xcb,	0xa8,0x54,0x54,0xfc
+.byte	0x6d,0xbb,0xbb,0xd6,	0x2c,0x16,0x16,0x3a
+
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5	# Te4
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.byte	0x01,0x00,0x00,0x00,	0x02,0x00,0x00,0x00	# rcon
+.byte	0x04,0x00,0x00,0x00,	0x08,0x00,0x00,0x00
+.byte	0x10,0x00,0x00,0x00,	0x20,0x00,0x00,0x00
+.byte	0x40,0x00,0x00,0x00,	0x80,0x00,0x00,0x00
+.byte	0x1B,0x00,0x00,0x00,	0x36,0x00,0x00,0x00
+
+.align	6
+AES_Td:
+.byte	0x51,0xf4,0xa7,0x50,	0x7e,0x41,0x65,0x53	# Td0
+.byte	0x1a,0x17,0xa4,0xc3,	0x3a,0x27,0x5e,0x96
+.byte	0x3b,0xab,0x6b,0xcb,	0x1f,0x9d,0x45,0xf1
+.byte	0xac,0xfa,0x58,0xab,	0x4b,0xe3,0x03,0x93
+.byte	0x20,0x30,0xfa,0x55,	0xad,0x76,0x6d,0xf6
+.byte	0x88,0xcc,0x76,0x91,	0xf5,0x02,0x4c,0x25
+.byte	0x4f,0xe5,0xd7,0xfc,	0xc5,0x2a,0xcb,0xd7
+.byte	0x26,0x35,0x44,0x80,	0xb5,0x62,0xa3,0x8f
+.byte	0xde,0xb1,0x5a,0x49,	0x25,0xba,0x1b,0x67
+.byte	0x45,0xea,0x0e,0x98,	0x5d,0xfe,0xc0,0xe1
+.byte	0xc3,0x2f,0x75,0x02,	0x81,0x4c,0xf0,0x12
+.byte	0x8d,0x46,0x97,0xa3,	0x6b,0xd3,0xf9,0xc6
+.byte	0x03,0x8f,0x5f,0xe7,	0x15,0x92,0x9c,0x95
+.byte	0xbf,0x6d,0x7a,0xeb,	0x95,0x52,0x59,0xda
+.byte	0xd4,0xbe,0x83,0x2d,	0x58,0x74,0x21,0xd3
+.byte	0x49,0xe0,0x69,0x29,	0x8e,0xc9,0xc8,0x44
+.byte	0x75,0xc2,0x89,0x6a,	0xf4,0x8e,0x79,0x78
+.byte	0x99,0x58,0x3e,0x6b,	0x27,0xb9,0x71,0xdd
+.byte	0xbe,0xe1,0x4f,0xb6,	0xf0,0x88,0xad,0x17
+.byte	0xc9,0x20,0xac,0x66,	0x7d,0xce,0x3a,0xb4
+.byte	0x63,0xdf,0x4a,0x18,	0xe5,0x1a,0x31,0x82
+.byte	0x97,0x51,0x33,0x60,	0x62,0x53,0x7f,0x45
+.byte	0xb1,0x64,0x77,0xe0,	0xbb,0x6b,0xae,0x84
+.byte	0xfe,0x81,0xa0,0x1c,	0xf9,0x08,0x2b,0x94
+.byte	0x70,0x48,0x68,0x58,	0x8f,0x45,0xfd,0x19
+.byte	0x94,0xde,0x6c,0x87,	0x52,0x7b,0xf8,0xb7
+.byte	0xab,0x73,0xd3,0x23,	0x72,0x4b,0x02,0xe2
+.byte	0xe3,0x1f,0x8f,0x57,	0x66,0x55,0xab,0x2a
+.byte	0xb2,0xeb,0x28,0x07,	0x2f,0xb5,0xc2,0x03
+.byte	0x86,0xc5,0x7b,0x9a,	0xd3,0x37,0x08,0xa5
+.byte	0x30,0x28,0x87,0xf2,	0x23,0xbf,0xa5,0xb2
+.byte	0x02,0x03,0x6a,0xba,	0xed,0x16,0x82,0x5c
+.byte	0x8a,0xcf,0x1c,0x2b,	0xa7,0x79,0xb4,0x92
+.byte	0xf3,0x07,0xf2,0xf0,	0x4e,0x69,0xe2,0xa1
+.byte	0x65,0xda,0xf4,0xcd,	0x06,0x05,0xbe,0xd5
+.byte	0xd1,0x34,0x62,0x1f,	0xc4,0xa6,0xfe,0x8a
+.byte	0x34,0x2e,0x53,0x9d,	0xa2,0xf3,0x55,0xa0
+.byte	0x05,0x8a,0xe1,0x32,	0xa4,0xf6,0xeb,0x75
+.byte	0x0b,0x83,0xec,0x39,	0x40,0x60,0xef,0xaa
+.byte	0x5e,0x71,0x9f,0x06,	0xbd,0x6e,0x10,0x51
+.byte	0x3e,0x21,0x8a,0xf9,	0x96,0xdd,0x06,0x3d
+.byte	0xdd,0x3e,0x05,0xae,	0x4d,0xe6,0xbd,0x46
+.byte	0x91,0x54,0x8d,0xb5,	0x71,0xc4,0x5d,0x05
+.byte	0x04,0x06,0xd4,0x6f,	0x60,0x50,0x15,0xff
+.byte	0x19,0x98,0xfb,0x24,	0xd6,0xbd,0xe9,0x97
+.byte	0x89,0x40,0x43,0xcc,	0x67,0xd9,0x9e,0x77
+.byte	0xb0,0xe8,0x42,0xbd,	0x07,0x89,0x8b,0x88
+.byte	0xe7,0x19,0x5b,0x38,	0x79,0xc8,0xee,0xdb
+.byte	0xa1,0x7c,0x0a,0x47,	0x7c,0x42,0x0f,0xe9
+.byte	0xf8,0x84,0x1e,0xc9,	0x00,0x00,0x00,0x00
+.byte	0x09,0x80,0x86,0x83,	0x32,0x2b,0xed,0x48
+.byte	0x1e,0x11,0x70,0xac,	0x6c,0x5a,0x72,0x4e
+.byte	0xfd,0x0e,0xff,0xfb,	0x0f,0x85,0x38,0x56
+.byte	0x3d,0xae,0xd5,0x1e,	0x36,0x2d,0x39,0x27
+.byte	0x0a,0x0f,0xd9,0x64,	0x68,0x5c,0xa6,0x21
+.byte	0x9b,0x5b,0x54,0xd1,	0x24,0x36,0x2e,0x3a
+.byte	0x0c,0x0a,0x67,0xb1,	0x93,0x57,0xe7,0x0f
+.byte	0xb4,0xee,0x96,0xd2,	0x1b,0x9b,0x91,0x9e
+.byte	0x80,0xc0,0xc5,0x4f,	0x61,0xdc,0x20,0xa2
+.byte	0x5a,0x77,0x4b,0x69,	0x1c,0x12,0x1a,0x16
+.byte	0xe2,0x93,0xba,0x0a,	0xc0,0xa0,0x2a,0xe5
+.byte	0x3c,0x22,0xe0,0x43,	0x12,0x1b,0x17,0x1d
+.byte	0x0e,0x09,0x0d,0x0b,	0xf2,0x8b,0xc7,0xad
+.byte	0x2d,0xb6,0xa8,0xb9,	0x14,0x1e,0xa9,0xc8
+.byte	0x57,0xf1,0x19,0x85,	0xaf,0x75,0x07,0x4c
+.byte	0xee,0x99,0xdd,0xbb,	0xa3,0x7f,0x60,0xfd
+.byte	0xf7,0x01,0x26,0x9f,	0x5c,0x72,0xf5,0xbc
+.byte	0x44,0x66,0x3b,0xc5,	0x5b,0xfb,0x7e,0x34
+.byte	0x8b,0x43,0x29,0x76,	0xcb,0x23,0xc6,0xdc
+.byte	0xb6,0xed,0xfc,0x68,	0xb8,0xe4,0xf1,0x63
+.byte	0xd7,0x31,0xdc,0xca,	0x42,0x63,0x85,0x10
+.byte	0x13,0x97,0x22,0x40,	0x84,0xc6,0x11,0x20
+.byte	0x85,0x4a,0x24,0x7d,	0xd2,0xbb,0x3d,0xf8
+.byte	0xae,0xf9,0x32,0x11,	0xc7,0x29,0xa1,0x6d
+.byte	0x1d,0x9e,0x2f,0x4b,	0xdc,0xb2,0x30,0xf3
+.byte	0x0d,0x86,0x52,0xec,	0x77,0xc1,0xe3,0xd0
+.byte	0x2b,0xb3,0x16,0x6c,	0xa9,0x70,0xb9,0x99
+.byte	0x11,0x94,0x48,0xfa,	0x47,0xe9,0x64,0x22
+.byte	0xa8,0xfc,0x8c,0xc4,	0xa0,0xf0,0x3f,0x1a
+.byte	0x56,0x7d,0x2c,0xd8,	0x22,0x33,0x90,0xef
+.byte	0x87,0x49,0x4e,0xc7,	0xd9,0x38,0xd1,0xc1
+.byte	0x8c,0xca,0xa2,0xfe,	0x98,0xd4,0x0b,0x36
+.byte	0xa6,0xf5,0x81,0xcf,	0xa5,0x7a,0xde,0x28
+.byte	0xda,0xb7,0x8e,0x26,	0x3f,0xad,0xbf,0xa4
+.byte	0x2c,0x3a,0x9d,0xe4,	0x50,0x78,0x92,0x0d
+.byte	0x6a,0x5f,0xcc,0x9b,	0x54,0x7e,0x46,0x62
+.byte	0xf6,0x8d,0x13,0xc2,	0x90,0xd8,0xb8,0xe8
+.byte	0x2e,0x39,0xf7,0x5e,	0x82,0xc3,0xaf,0xf5
+.byte	0x9f,0x5d,0x80,0xbe,	0x69,0xd0,0x93,0x7c
+.byte	0x6f,0xd5,0x2d,0xa9,	0xcf,0x25,0x12,0xb3
+.byte	0xc8,0xac,0x99,0x3b,	0x10,0x18,0x7d,0xa7
+.byte	0xe8,0x9c,0x63,0x6e,	0xdb,0x3b,0xbb,0x7b
+.byte	0xcd,0x26,0x78,0x09,	0x6e,0x59,0x18,0xf4
+.byte	0xec,0x9a,0xb7,0x01,	0x83,0x4f,0x9a,0xa8
+.byte	0xe6,0x95,0x6e,0x65,	0xaa,0xff,0xe6,0x7e
+.byte	0x21,0xbc,0xcf,0x08,	0xef,0x15,0xe8,0xe6
+.byte	0xba,0xe7,0x9b,0xd9,	0x4a,0x6f,0x36,0xce
+.byte	0xea,0x9f,0x09,0xd4,	0x29,0xb0,0x7c,0xd6
+.byte	0x31,0xa4,0xb2,0xaf,	0x2a,0x3f,0x23,0x31
+.byte	0xc6,0xa5,0x94,0x30,	0x35,0xa2,0x66,0xc0
+.byte	0x74,0x4e,0xbc,0x37,	0xfc,0x82,0xca,0xa6
+.byte	0xe0,0x90,0xd0,0xb0,	0x33,0xa7,0xd8,0x15
+.byte	0xf1,0x04,0x98,0x4a,	0x41,0xec,0xda,0xf7
+.byte	0x7f,0xcd,0x50,0x0e,	0x17,0x91,0xf6,0x2f
+.byte	0x76,0x4d,0xd6,0x8d,	0x43,0xef,0xb0,0x4d
+.byte	0xcc,0xaa,0x4d,0x54,	0xe4,0x96,0x04,0xdf
+.byte	0x9e,0xd1,0xb5,0xe3,	0x4c,0x6a,0x88,0x1b
+.byte	0xc1,0x2c,0x1f,0xb8,	0x46,0x65,0x51,0x7f
+.byte	0x9d,0x5e,0xea,0x04,	0x01,0x8c,0x35,0x5d
+.byte	0xfa,0x87,0x74,0x73,	0xfb,0x0b,0x41,0x2e
+.byte	0xb3,0x67,0x1d,0x5a,	0x92,0xdb,0xd2,0x52
+.byte	0xe9,0x10,0x56,0x33,	0x6d,0xd6,0x47,0x13
+.byte	0x9a,0xd7,0x61,0x8c,	0x37,0xa1,0x0c,0x7a
+.byte	0x59,0xf8,0x14,0x8e,	0xeb,0x13,0x3c,0x89
+.byte	0xce,0xa9,0x27,0xee,	0xb7,0x61,0xc9,0x35
+.byte	0xe1,0x1c,0xe5,0xed,	0x7a,0x47,0xb1,0x3c
+.byte	0x9c,0xd2,0xdf,0x59,	0x55,0xf2,0x73,0x3f
+.byte	0x18,0x14,0xce,0x79,	0x73,0xc7,0x37,0xbf
+.byte	0x53,0xf7,0xcd,0xea,	0x5f,0xfd,0xaa,0x5b
+.byte	0xdf,0x3d,0x6f,0x14,	0x78,0x44,0xdb,0x86
+.byte	0xca,0xaf,0xf3,0x81,	0xb9,0x68,0xc4,0x3e
+.byte	0x38,0x24,0x34,0x2c,	0xc2,0xa3,0x40,0x5f
+.byte	0x16,0x1d,0xc3,0x72,	0xbc,0xe2,0x25,0x0c
+.byte	0x28,0x3c,0x49,0x8b,	0xff,0x0d,0x95,0x41
+.byte	0x39,0xa8,0x01,0x71,	0x08,0x0c,0xb3,0xde
+.byte	0xd8,0xb4,0xe4,0x9c,	0x64,0x56,0xc1,0x90
+.byte	0x7b,0xcb,0x84,0x61,	0xd5,0x32,0xb6,0x70
+.byte	0x48,0x6c,0x5c,0x74,	0xd0,0xb8,0x57,0x42
+
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38	# Td4
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+___
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	# made-up _instructions, _xtr, _ins, _ror and _bias, cope
+	# with byte order dependencies...
+	if (/^\s+_/) {
+	    s/(_[a-z]+\s+)(\$[0-9]+),([^,]+)(#.*)*$/$1$2,$2,$3/;
+
+	    s/_xtr\s+(\$[0-9]+),(\$[0-9]+),([0-9]+(\-2)*)/
+		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("24-$3"))/e or
+	    s/_ins\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("24-$3"))/e or
+	    s/_ror\s+(\$[0-9]+),(\$[0-9]+),(\-?[0-9]+)/
+		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("$3*-1"))/e or
+	    s/_bias\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("($3-16)&31"))/e;
+
+	    s/srl\s+(\$[0-9]+),(\$[0-9]+),\-([0-9]+)/
+		sprintf("sll\t$1,$2,$3")/e				or
+	    s/srl\s+(\$[0-9]+),(\$[0-9]+),0/
+		sprintf("and\t$1,$2,0xff")/e				or
+	    s/(sll\s+\$[0-9]+,\$[0-9]+,0)/#$1/;
+	}
+
+	# convert lwl/lwr and swr/swl to little-endian order
+	if (!$big_endian && /^\s+[sl]w[lr]\s+/) {
+	    s/([sl]wl.*)([0-9]+)\((\$[0-9]+)\)/
+		sprintf("$1%d($3)",eval("$2-$2%4+($2%4-1)&3"))/e	or
+	    s/([sl]wr.*)([0-9]+)\((\$[0-9]+)\)/
+		sprintf("$1%d($3)",eval("$2-$2%4+($2%4+1)&3"))/e;
+	}
+
+	print $_,"\n";
+}
+
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/aes-mips.s b/jni/openssl/crypto/aes/asm/aes-mips.s
new file mode 100644
index 0000000..0c1d85a
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-mips.s
@@ -0,0 +1,1341 @@
+.text
+#ifdef OPENSSL_FIPSCANISTER
+# include <openssl/fipssyms.h>
+#endif
+
+#if !defined(__vxworks) || defined(__pic__)
+.option	pic2
+#endif
+.set	noat
+.align	5
+.ent	_mips_AES_encrypt
+_mips_AES_encrypt:
+	.frame	$29,0,$31
+	.set	reorder
+	lw	$12,0($6)
+	lw	$13,4($6)
+	lw	$14,8($6)
+	lw	$15,12($6)
+	lw	$30,240($6)
+	add $3,$6,16
+
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+
+	sub	$30,1
+	srl	$1,$9,6
+.Loop_enc:
+	srl	$2,$10,6
+	srl	$24,$11,6
+	srl	$25,$8,6
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$12,2($1)		# Te1[s1>>16]
+	lwl	$13,2($2)		# Te1[s2>>16]
+	lwl	$14,2($24)		# Te1[s3>>16]
+	lwl	$15,2($25)		# Te1[s0>>16]
+	lwr	$12,3($1)		# Te1[s1>>16]
+	lwr	$13,3($2)		# Te1[s2>>16]
+	lwr	$14,3($24)		# Te1[s3>>16]
+	lwr	$15,3($25)		# Te1[s0>>16]
+
+	srl	$1,$10,14
+	srl	$2,$11,14
+	srl	$24,$8,14
+	srl	$25,$9,14
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$16,1($1)		# Te2[s2>>8]
+	lwl	$17,1($2)		# Te2[s3>>8]
+	lwl	$18,1($24)		# Te2[s0>>8]
+	lwl	$19,1($25)		# Te2[s1>>8]
+	lwr	$16,2($1)		# Te2[s2>>8]
+	lwr	$17,2($2)		# Te2[s3>>8]
+	lwr	$18,2($24)		# Te2[s0>>8]
+	lwr	$19,2($25)		# Te2[s1>>8]
+
+	srl	$1,$11,22
+	srl	$2,$8,22
+	srl	$24,$9,22
+	srl	$25,$10,22
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$20,0($1)		# Te3[s3]
+	lwl	$21,0($2)		# Te3[s0]
+	lwl	$22,0($24)		# Te3[s1]
+	lwl	$23,0($25)		# Te3[s2]
+	lwr	$20,1($1)		# Te3[s3]
+	lwr	$21,1($2)		# Te3[s0]
+	lwr	$22,1($24)		# Te3[s1]
+	lwr	$23,1($25)		# Te3[s2]
+
+	sll	$1,$8,2
+	sll	$2,$9,2
+	sll	$24,$10,2
+	sll	$25,$11,2
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+	lw	$16,0($1)		# Te0[s0>>24]
+	lw	$17,0($2)		# Te0[s1>>24]
+	lw	$18,0($24)		# Te0[s2>>24]
+	lw	$19,0($25)		# Te0[s3>>24]
+
+	lw	$8,0($3)
+	lw	$9,4($3)
+	lw	$10,8($3)
+	lw	$11,12($3)
+
+	xor	$12,$20
+	xor	$13,$21
+	xor	$14,$22
+	xor	$15,$23
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	sub	$30,1
+	add $3,16
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+	.set	noreorder
+	bnez	$30,.Loop_enc
+	srl	$1,$9,6
+
+	.set	reorder
+	srl	$2,$10,6
+	srl	$24,$11,6
+	srl	$25,$8,6
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$12,2($1)		# Te4[s1>>16]
+	lbu	$13,2($2)		# Te4[s2>>16]
+	lbu	$14,2($24)		# Te4[s3>>16]
+	lbu	$15,2($25)		# Te4[s0>>16]
+
+	srl	$1,$10,14
+	srl	$2,$11,14
+	srl	$24,$8,14
+	srl	$25,$9,14
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$16,2($1)		# Te4[s2>>8]
+	lbu	$17,2($2)		# Te4[s3>>8]
+	lbu	$18,2($24)		# Te4[s0>>8]
+	lbu	$19,2($25)		# Te4[s1>>8]
+
+	sll	$1,$8,2
+	sll	$2,$9,2
+	sll	$24,$10,2
+	sll	$25,$11,2
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$20,2($1)		# Te4[s0>>24]
+	lbu	$21,2($2)		# Te4[s1>>24]
+	lbu	$22,2($24)		# Te4[s2>>24]
+	lbu	$23,2($25)		# Te4[s3>>24]
+
+	srl	$1,$11,22
+	srl	$2,$8,22
+	srl	$24,$9,22
+	srl	$25,$10,22
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+
+	sll	$12,$12,8
+	sll	$13,$13,8
+	sll	$14,$14,8
+	sll	$15,$15,8
+
+	sll	$16,$16,16
+	sll	$17,$17,16
+	sll	$18,$18,16
+	sll	$19,$19,16
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$16,2($1)		# Te4[s3]
+	lbu	$17,2($2)		# Te4[s0]
+	lbu	$18,2($24)		# Te4[s1]
+	lbu	$19,2($25)		# Te4[s2]
+
+	#sll	$20,$20,0
+	#sll	$21,$21,0
+	#sll	$22,$22,0
+	#sll	$23,$23,0
+
+	lw	$8,0($3)
+	lw	$9,4($3)
+	lw	$10,8($3)
+	lw	$11,12($3)
+
+	xor	$12,$20
+	xor	$13,$21
+	xor	$14,$22
+	xor	$15,$23
+
+	sll	$16,$16,24
+	sll	$17,$17,24
+	sll	$18,$18,24
+	sll	$19,$19,24
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+
+	jr	$31
+.end	_mips_AES_encrypt
+
+.align	5
+.globl	AES_encrypt
+.ent	AES_encrypt
+AES_encrypt:
+	.frame	$29,64,$31
+	.mask	3237937152,-4
+	.set	noreorder
+	sub $29,64
+	sw	$31,64-1*4($29)
+	sw	$30,64-2*4($29)
+	sw	$23,64-3*4($29)
+	sw	$22,64-4*4($29)
+	sw	$21,64-5*4($29)
+	sw	$20,64-6*4($29)
+	sw	$19,64-7*4($29)
+	sw	$18,64-8*4($29)
+	sw	$17,64-9*4($29)
+	sw	$16,64-10*4($29)
+	.cplocal	$7
+	.cpsetup	$25,$0,AES_encrypt
+	.set	reorder
+	la	$7,AES_Te		# PIC-ified 'load address'
+
+	lwl	$8,0+3($4)
+	lwl	$9,4+3($4)
+	lwl	$10,8+3($4)
+	lwl	$11,12+3($4)
+	lwr	$8,0+0($4)
+	lwr	$9,4+0($4)
+	lwr	$10,8+0($4)
+	lwr	$11,12+0($4)
+
+	bal	_mips_AES_encrypt
+
+	swr	$8,0+0($5)
+	swr	$9,4+0($5)
+	swr	$10,8+0($5)
+	swr	$11,12+0($5)
+	swl	$8,0+3($5)
+	swl	$9,4+3($5)
+	swl	$10,8+3($5)
+	swl	$11,12+3($5)
+
+	.set	noreorder
+	lw	$31,64-1*4($29)
+	lw	$30,64-2*4($29)
+	lw	$23,64-3*4($29)
+	lw	$22,64-4*4($29)
+	lw	$21,64-5*4($29)
+	lw	$20,64-6*4($29)
+	lw	$19,64-7*4($29)
+	lw	$18,64-8*4($29)
+	lw	$17,64-9*4($29)
+	lw	$16,64-10*4($29)
+	jr	$31
+	add $29,64
+.end	AES_encrypt
+.align	5
+.ent	_mips_AES_decrypt
+_mips_AES_decrypt:
+	.frame	$29,0,$31
+	.set	reorder
+	lw	$12,0($6)
+	lw	$13,4($6)
+	lw	$14,8($6)
+	lw	$15,12($6)
+	lw	$30,240($6)
+	add $3,$6,16
+
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+
+	sub	$30,1
+	srl	$1,$11,6
+.Loop_dec:
+	srl	$2,$8,6
+	srl	$24,$9,6
+	srl	$25,$10,6
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$12,2($1)		# Td1[s3>>16]
+	lwl	$13,2($2)		# Td1[s0>>16]
+	lwl	$14,2($24)		# Td1[s1>>16]
+	lwl	$15,2($25)		# Td1[s2>>16]
+	lwr	$12,3($1)		# Td1[s3>>16]
+	lwr	$13,3($2)		# Td1[s0>>16]
+	lwr	$14,3($24)		# Td1[s1>>16]
+	lwr	$15,3($25)		# Td1[s2>>16]
+
+	srl	$1,$10,14
+	srl	$2,$11,14
+	srl	$24,$8,14
+	srl	$25,$9,14
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$16,1($1)		# Td2[s2>>8]
+	lwl	$17,1($2)		# Td2[s3>>8]
+	lwl	$18,1($24)		# Td2[s0>>8]
+	lwl	$19,1($25)		# Td2[s1>>8]
+	lwr	$16,2($1)		# Td2[s2>>8]
+	lwr	$17,2($2)		# Td2[s3>>8]
+	lwr	$18,2($24)		# Td2[s0>>8]
+	lwr	$19,2($25)		# Td2[s1>>8]
+
+	srl	$1,$9,22
+	srl	$2,$10,22
+	srl	$24,$11,22
+	srl	$25,$8,22
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lwl	$20,0($1)		# Td3[s1]
+	lwl	$21,0($2)		# Td3[s2]
+	lwl	$22,0($24)		# Td3[s3]
+	lwl	$23,0($25)		# Td3[s0]
+	lwr	$20,1($1)		# Td3[s1]
+	lwr	$21,1($2)		# Td3[s2]
+	lwr	$22,1($24)		# Td3[s3]
+	lwr	$23,1($25)		# Td3[s0]
+
+	sll	$1,$8,2
+	sll	$2,$9,2
+	sll	$24,$10,2
+	sll	$25,$11,2
+	and	$1,0x3fc
+	and	$2,0x3fc
+	and	$24,0x3fc
+	and	$25,0x3fc
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+
+	lw	$16,0($1)		# Td0[s0>>24]
+	lw	$17,0($2)		# Td0[s1>>24]
+	lw	$18,0($24)		# Td0[s2>>24]
+	lw	$19,0($25)		# Td0[s3>>24]
+
+	lw	$8,0($3)
+	lw	$9,4($3)
+	lw	$10,8($3)
+	lw	$11,12($3)
+
+	xor	$12,$20
+	xor	$13,$21
+	xor	$14,$22
+	xor	$15,$23
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	sub	$30,1
+	add $3,16
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+	.set	noreorder
+	bnez	$30,.Loop_dec
+	srl	$1,$11,6
+
+	.set	reorder
+	lw	$16,1024($7)		# prefetch Td4
+	lw	$17,1024+32($7)
+	lw	$18,1024+64($7)
+	lw	$19,1024+96($7)
+	lw	$20,1024+128($7)
+	lw	$21,1024+160($7)
+	lw	$22,1024+192($7)
+	lw	$23,1024+224($7)
+
+	srl	$1,$11,8
+	srl	$2,$8,8
+	srl	$24,$9,8
+	srl	$25,$10,8
+	and	$1,0xff
+	and	$2,0xff
+	and	$24,0xff
+	and	$25,0xff
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$12,1024($1)		# Td4[s3>>16]
+	lbu	$13,1024($2)		# Td4[s0>>16]
+	lbu	$14,1024($24)		# Td4[s1>>16]
+	lbu	$15,1024($25)		# Td4[s2>>16]
+
+	srl	$1,$10,16
+	srl	$2,$11,16
+	srl	$24,$8,16
+	srl	$25,$9,16
+	and	$1,0xff
+	and	$2,0xff
+	and	$24,0xff
+	and	$25,0xff
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$16,1024($1)		# Td4[s2>>8]
+	lbu	$17,1024($2)		# Td4[s3>>8]
+	lbu	$18,1024($24)		# Td4[s0>>8]
+	lbu	$19,1024($25)		# Td4[s1>>8]
+
+	and	$1,$8,0xff
+	and	$2,$9,0xff
+	and	$24,$10,0xff
+	and	$25,$11,0xff
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$20,1024($1)		# Td4[s0>>24]
+	lbu	$21,1024($2)		# Td4[s1>>24]
+	lbu	$22,1024($24)		# Td4[s2>>24]
+	lbu	$23,1024($25)		# Td4[s3>>24]
+
+	srl	$1,$9,24
+	srl	$2,$10,24
+	srl	$24,$11,24
+	srl	$25,$8,24
+
+	sll	$12,$12,8
+	sll	$13,$13,8
+	sll	$14,$14,8
+	sll	$15,$15,8
+
+	sll	$16,$16,16
+	sll	$17,$17,16
+	sll	$18,$18,16
+	sll	$19,$19,16
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$16,1024($1)		# Td4[s1]
+	lbu	$17,1024($2)		# Td4[s2]
+	lbu	$18,1024($24)		# Td4[s3]
+	lbu	$19,1024($25)		# Td4[s0]
+
+	#sll	$20,$20,0
+	#sll	$21,$21,0
+	#sll	$22,$22,0
+	#sll	$23,$23,0
+
+	lw	$8,0($3)
+	lw	$9,4($3)
+	lw	$10,8($3)
+	lw	$11,12($3)
+
+	sll	$16,$16,24
+	sll	$17,$17,24
+	sll	$18,$18,24
+	sll	$19,$19,24
+
+
+	xor	$12,$20
+	xor	$13,$21
+	xor	$14,$22
+	xor	$15,$23
+
+	xor	$12,$16
+	xor	$13,$17
+	xor	$14,$18
+	xor	$15,$19
+
+	xor	$8,$12
+	xor	$9,$13
+	xor	$10,$14
+	xor	$11,$15
+
+	jr	$31
+.end	_mips_AES_decrypt
+
+.align	5
+.globl	AES_decrypt
+.ent	AES_decrypt
+AES_decrypt:
+	.frame	$29,64,$31
+	.mask	3237937152,-4
+	.set	noreorder
+	sub $29,64
+	sw	$31,64-1*4($29)
+	sw	$30,64-2*4($29)
+	sw	$23,64-3*4($29)
+	sw	$22,64-4*4($29)
+	sw	$21,64-5*4($29)
+	sw	$20,64-6*4($29)
+	sw	$19,64-7*4($29)
+	sw	$18,64-8*4($29)
+	sw	$17,64-9*4($29)
+	sw	$16,64-10*4($29)
+	.cplocal	$7
+	.cpsetup	$25,$0,AES_decrypt
+	.set	reorder
+	la	$7,AES_Td		# PIC-ified 'load address'
+
+	lwl	$8,0+3($4)
+	lwl	$9,4+3($4)
+	lwl	$10,8+3($4)
+	lwl	$11,12+3($4)
+	lwr	$8,0+0($4)
+	lwr	$9,4+0($4)
+	lwr	$10,8+0($4)
+	lwr	$11,12+0($4)
+
+	bal	_mips_AES_decrypt
+
+	swr	$8,0+0($5)
+	swr	$9,4+0($5)
+	swr	$10,8+0($5)
+	swr	$11,12+0($5)
+	swl	$8,0+3($5)
+	swl	$9,4+3($5)
+	swl	$10,8+3($5)
+	swl	$11,12+3($5)
+
+	.set	noreorder
+	lw	$31,64-1*4($29)
+	lw	$30,64-2*4($29)
+	lw	$23,64-3*4($29)
+	lw	$22,64-4*4($29)
+	lw	$21,64-5*4($29)
+	lw	$20,64-6*4($29)
+	lw	$19,64-7*4($29)
+	lw	$18,64-8*4($29)
+	lw	$17,64-9*4($29)
+	lw	$16,64-10*4($29)
+	jr	$31
+	add $29,64
+.end	AES_decrypt
+.align	5
+.ent	_mips_AES_set_encrypt_key
+_mips_AES_set_encrypt_key:
+	.frame	$29,0,$31
+	.set	noreorder
+	beqz	$4,.Lekey_done
+	li	$2,-1
+	beqz	$6,.Lekey_done
+	add $3,$7,1024+256
+
+	.set	reorder
+	lwl	$8,0+3($4)	# load 128 bits
+	lwl	$9,4+3($4)
+	lwl	$10,8+3($4)
+	lwl	$11,12+3($4)
+	li	$1,128
+	lwr	$8,0+0($4)
+	lwr	$9,4+0($4)
+	lwr	$10,8+0($4)
+	lwr	$11,12+0($4)
+	.set	noreorder
+	beq	$5,$1,.L128bits
+	li	$30,10
+
+	.set	reorder
+	lwl	$12,16+3($4)	# load 192 bits
+	lwl	$13,20+3($4)
+	li	$1,192
+	lwr	$12,16+0($4)
+	lwr	$13,20+0($4)
+	.set	noreorder
+	beq	$5,$1,.L192bits
+	li	$30,8
+
+	.set	reorder
+	lwl	$14,24+3($4)	# load 256 bits
+	lwl	$15,28+3($4)
+	li	$1,256
+	lwr	$14,24+0($4)
+	lwr	$15,28+0($4)
+	.set	noreorder
+	beq	$5,$1,.L256bits
+	li	$30,7
+
+	b	.Lekey_done
+	li	$2,-2
+
+.align	4
+.L128bits:
+	.set	reorder
+	srl	$1,$11,16
+	srl	$2,$11,8
+	and	$1,0xff
+	and	$2,0xff
+	and	$24,$11,0xff
+	srl	$25,$11,24
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$1,1024($1)
+	lbu	$2,1024($2)
+	lbu	$24,1024($24)
+	lbu	$25,1024($25)
+
+	sw	$8,0($6)
+	sw	$9,4($6)
+	sw	$10,8($6)
+	sw	$11,12($6)
+	sub	$30,1
+	add $6,16
+
+	sll	$1,$1,8
+	#sll	$2,$2,0
+	sll	$24,$24,24
+	sll	$25,$25,16
+
+	xor	$8,$1
+	lw	$1,0($3)
+	xor	$8,$2
+	xor	$8,$24
+	xor	$8,$25
+	xor	$8,$1
+
+	xor	$9,$8
+	xor	$10,$9
+	xor	$11,$10
+
+	.set	noreorder
+	bnez	$30,.L128bits
+	add $3,4
+
+	sw	$8,0($6)
+	sw	$9,4($6)
+	sw	$10,8($6)
+	li	$30,10
+	sw	$11,12($6)
+	li	$2,0
+	sw	$30,80($6)
+	b	.Lekey_done
+	sub $6,10*16
+
+.align	4
+.L192bits:
+	.set	reorder
+	srl	$1,$13,16
+	srl	$2,$13,8
+	and	$1,0xff
+	and	$2,0xff
+	and	$24,$13,0xff
+	srl	$25,$13,24
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$1,1024($1)
+	lbu	$2,1024($2)
+	lbu	$24,1024($24)
+	lbu	$25,1024($25)
+
+	sw	$8,0($6)
+	sw	$9,4($6)
+	sw	$10,8($6)
+	sw	$11,12($6)
+	sw	$12,16($6)
+	sw	$13,20($6)
+	sub	$30,1
+	add $6,24
+
+	sll	$1,$1,8
+	#sll	$2,$2,0
+	sll	$24,$24,24
+	sll	$25,$25,16
+
+	xor	$8,$1
+	lw	$1,0($3)
+	xor	$8,$2
+	xor	$8,$24
+	xor	$8,$25
+	xor	$8,$1
+
+	xor	$9,$8
+	xor	$10,$9
+	xor	$11,$10
+	xor	$12,$11
+	xor	$13,$12
+
+	.set	noreorder
+	bnez	$30,.L192bits
+	add $3,4
+
+	sw	$8,0($6)
+	sw	$9,4($6)
+	sw	$10,8($6)
+	li	$30,12
+	sw	$11,12($6)
+	li	$2,0
+	sw	$30,48($6)
+	b	.Lekey_done
+	sub $6,12*16
+
+.align	4
+.L256bits:
+	.set	reorder
+	srl	$1,$15,16
+	srl	$2,$15,8
+	and	$1,0xff
+	and	$2,0xff
+	and	$24,$15,0xff
+	srl	$25,$15,24
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$1,1024($1)
+	lbu	$2,1024($2)
+	lbu	$24,1024($24)
+	lbu	$25,1024($25)
+
+	sw	$8,0($6)
+	sw	$9,4($6)
+	sw	$10,8($6)
+	sw	$11,12($6)
+	sw	$12,16($6)
+	sw	$13,20($6)
+	sw	$14,24($6)
+	sw	$15,28($6)
+	sub	$30,1
+
+	sll	$1,$1,8
+	#sll	$2,$2,0
+	sll	$24,$24,24
+	sll	$25,$25,16
+
+	xor	$8,$1
+	lw	$1,0($3)
+	xor	$8,$2
+	xor	$8,$24
+	xor	$8,$25
+	xor	$8,$1
+
+	xor	$9,$8
+	xor	$10,$9
+	xor	$11,$10
+	beqz	$30,.L256bits_done
+
+	srl	$1,$11,24
+	srl	$2,$11,16
+	srl	$24,$11,8
+	and	$25,$11,0xff
+	and	$2,0xff
+	and	$24,0xff
+	add $1,$7
+	add $2,$7
+	add $24,$7
+	add $25,$7
+	lbu	$1,1024($1)
+	lbu	$2,1024($2)
+	lbu	$24,1024($24)
+	lbu	$25,1024($25)
+	sll	$1,24
+	sll	$2,16
+	sll	$24,8
+
+	xor	$12,$1
+	xor	$12,$2
+	xor	$12,$24
+	xor	$12,$25
+
+	xor	$13,$12
+	xor	$14,$13
+	xor	$15,$14
+
+	add $6,32
+	.set	noreorder
+	b	.L256bits
+	add $3,4
+
+.L256bits_done:
+	sw	$8,32($6)
+	sw	$9,36($6)
+	sw	$10,40($6)
+	li	$30,14
+	sw	$11,44($6)
+	li	$2,0
+	sw	$30,48($6)
+	sub $6,12*16
+
+.Lekey_done:
+	jr	$31
+	nop
+.end	_mips_AES_set_encrypt_key
+
+.globl	AES_set_encrypt_key
+.ent	AES_set_encrypt_key
+AES_set_encrypt_key:
+	.frame	$29,32,$31
+	.mask	3221225472,-4
+	.set	noreorder
+	sub $29,32
+	sw	$31,32-1*4($29)
+	sw	$30,32-2*4($29)
+	.cplocal	$7
+	.cpsetup	$25,$0,AES_set_encrypt_key
+	.set	reorder
+	la	$7,AES_Te		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	.set	noreorder
+	move	$4,$2
+	lw	$31,32-1*4($29)
+	lw	$30,32-2*4($29)
+	jr	$31
+	add $29,32
+.end	AES_set_encrypt_key
+.align	5
+.globl	AES_set_decrypt_key
+.ent	AES_set_decrypt_key
+AES_set_decrypt_key:
+	.frame	$29,32,$31
+	.mask	3221225472,-4
+	.set	noreorder
+	sub $29,32
+	sw	$31,32-1*4($29)
+	sw	$30,32-2*4($29)
+	.cplocal	$7
+	.cpsetup	$25,$0,AES_set_decrypt_key
+	.set	reorder
+	la	$7,AES_Te		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	bltz	$2,.Ldkey_done
+
+	sll	$1,$30,4
+	add $4,$6,0
+	add $5,$6,$1
+.align	4
+.Lswap:
+	lw	$8,0($4)
+	lw	$9,4($4)
+	lw	$10,8($4)
+	lw	$11,12($4)
+	lw	$12,0($5)
+	lw	$13,4($5)
+	lw	$14,8($5)
+	lw	$15,12($5)
+	sw	$8,0($5)
+	sw	$9,4($5)
+	sw	$10,8($5)
+	sw	$11,12($5)
+	add $4,16
+	sub $5,16
+	sw	$12,-16($4)
+	sw	$13,-12($4)
+	sw	$14,-8($4)
+	sw	$15,-4($4)
+	bne	$4,$5,.Lswap
+
+	lw	$8,16($6)		# modulo-scheduled
+	lui	$2,0x8080
+	sub	$30,1
+	or	$2,0x8080
+	sll	$30,2
+	add $6,16
+	lui	$25,0x1b1b
+	nor	$24,$0,$2
+	or	$25,0x1b1b
+.align	4
+.Lmix:
+	and	$1,$8,$2
+	and	$9,$8,$24
+	srl	$10,$1,7
+	addu	$9,$9		# tp2<<1
+	subu	$1,$10
+	and	$1,$25
+	xor	$9,$1
+
+	and	$1,$9,$2
+	and	$10,$9,$24
+	srl	$11,$1,7
+	addu	$10,$10		# tp4<<1
+	subu	$1,$11
+	and	$1,$25
+	xor	$10,$1
+
+	and	$1,$10,$2
+	and	$11,$10,$24
+	srl	$12,$1,7
+	addu	$11,$11		# tp8<<1
+	subu	$1,$12
+	and	$1,$25
+	xor	$11,$1
+
+	xor	$12,$11,$8
+	xor	$15,$11,$10
+	xor	$13,$12,$9
+	xor	$14,$12,$10
+
+	sll	$8,$14,16
+	 xor	$15,$9
+	srl	$9,$14,16
+	xor	$15,$8
+	sll	$8,$12,8
+	xor	$15,$9
+	srl	$9,$12,24
+	xor	$15,$8
+	sll	$8,$13,24
+	xor	$15,$9
+	srl	$9,$13,8
+	xor	$15,$8
+	lw	$8,4($6)		# modulo-scheduled
+	xor	$15,$9
+	sub	$30,1
+	sw	$15,0($6)
+	add $6,4
+	bnez	$30,.Lmix
+
+	li	$2,0
+.Ldkey_done:
+	.set	noreorder
+	move	$4,$2
+	lw	$31,32-1*4($29)
+	lw	$30,32-2*4($29)
+	jr	$31
+	add $29,32
+.end	AES_set_decrypt_key
+.rdata
+.align	6
+AES_Te:
+.byte	0xc6,0x63,0x63,0xa5,	0xf8,0x7c,0x7c,0x84	# Te0
+.byte	0xee,0x77,0x77,0x99,	0xf6,0x7b,0x7b,0x8d
+.byte	0xff,0xf2,0xf2,0x0d,	0xd6,0x6b,0x6b,0xbd
+.byte	0xde,0x6f,0x6f,0xb1,	0x91,0xc5,0xc5,0x54
+.byte	0x60,0x30,0x30,0x50,	0x02,0x01,0x01,0x03
+.byte	0xce,0x67,0x67,0xa9,	0x56,0x2b,0x2b,0x7d
+.byte	0xe7,0xfe,0xfe,0x19,	0xb5,0xd7,0xd7,0x62
+.byte	0x4d,0xab,0xab,0xe6,	0xec,0x76,0x76,0x9a
+.byte	0x8f,0xca,0xca,0x45,	0x1f,0x82,0x82,0x9d
+.byte	0x89,0xc9,0xc9,0x40,	0xfa,0x7d,0x7d,0x87
+.byte	0xef,0xfa,0xfa,0x15,	0xb2,0x59,0x59,0xeb
+.byte	0x8e,0x47,0x47,0xc9,	0xfb,0xf0,0xf0,0x0b
+.byte	0x41,0xad,0xad,0xec,	0xb3,0xd4,0xd4,0x67
+.byte	0x5f,0xa2,0xa2,0xfd,	0x45,0xaf,0xaf,0xea
+.byte	0x23,0x9c,0x9c,0xbf,	0x53,0xa4,0xa4,0xf7
+.byte	0xe4,0x72,0x72,0x96,	0x9b,0xc0,0xc0,0x5b
+.byte	0x75,0xb7,0xb7,0xc2,	0xe1,0xfd,0xfd,0x1c
+.byte	0x3d,0x93,0x93,0xae,	0x4c,0x26,0x26,0x6a
+.byte	0x6c,0x36,0x36,0x5a,	0x7e,0x3f,0x3f,0x41
+.byte	0xf5,0xf7,0xf7,0x02,	0x83,0xcc,0xcc,0x4f
+.byte	0x68,0x34,0x34,0x5c,	0x51,0xa5,0xa5,0xf4
+.byte	0xd1,0xe5,0xe5,0x34,	0xf9,0xf1,0xf1,0x08
+.byte	0xe2,0x71,0x71,0x93,	0xab,0xd8,0xd8,0x73
+.byte	0x62,0x31,0x31,0x53,	0x2a,0x15,0x15,0x3f
+.byte	0x08,0x04,0x04,0x0c,	0x95,0xc7,0xc7,0x52
+.byte	0x46,0x23,0x23,0x65,	0x9d,0xc3,0xc3,0x5e
+.byte	0x30,0x18,0x18,0x28,	0x37,0x96,0x96,0xa1
+.byte	0x0a,0x05,0x05,0x0f,	0x2f,0x9a,0x9a,0xb5
+.byte	0x0e,0x07,0x07,0x09,	0x24,0x12,0x12,0x36
+.byte	0x1b,0x80,0x80,0x9b,	0xdf,0xe2,0xe2,0x3d
+.byte	0xcd,0xeb,0xeb,0x26,	0x4e,0x27,0x27,0x69
+.byte	0x7f,0xb2,0xb2,0xcd,	0xea,0x75,0x75,0x9f
+.byte	0x12,0x09,0x09,0x1b,	0x1d,0x83,0x83,0x9e
+.byte	0x58,0x2c,0x2c,0x74,	0x34,0x1a,0x1a,0x2e
+.byte	0x36,0x1b,0x1b,0x2d,	0xdc,0x6e,0x6e,0xb2
+.byte	0xb4,0x5a,0x5a,0xee,	0x5b,0xa0,0xa0,0xfb
+.byte	0xa4,0x52,0x52,0xf6,	0x76,0x3b,0x3b,0x4d
+.byte	0xb7,0xd6,0xd6,0x61,	0x7d,0xb3,0xb3,0xce
+.byte	0x52,0x29,0x29,0x7b,	0xdd,0xe3,0xe3,0x3e
+.byte	0x5e,0x2f,0x2f,0x71,	0x13,0x84,0x84,0x97
+.byte	0xa6,0x53,0x53,0xf5,	0xb9,0xd1,0xd1,0x68
+.byte	0x00,0x00,0x00,0x00,	0xc1,0xed,0xed,0x2c
+.byte	0x40,0x20,0x20,0x60,	0xe3,0xfc,0xfc,0x1f
+.byte	0x79,0xb1,0xb1,0xc8,	0xb6,0x5b,0x5b,0xed
+.byte	0xd4,0x6a,0x6a,0xbe,	0x8d,0xcb,0xcb,0x46
+.byte	0x67,0xbe,0xbe,0xd9,	0x72,0x39,0x39,0x4b
+.byte	0x94,0x4a,0x4a,0xde,	0x98,0x4c,0x4c,0xd4
+.byte	0xb0,0x58,0x58,0xe8,	0x85,0xcf,0xcf,0x4a
+.byte	0xbb,0xd0,0xd0,0x6b,	0xc5,0xef,0xef,0x2a
+.byte	0x4f,0xaa,0xaa,0xe5,	0xed,0xfb,0xfb,0x16
+.byte	0x86,0x43,0x43,0xc5,	0x9a,0x4d,0x4d,0xd7
+.byte	0x66,0x33,0x33,0x55,	0x11,0x85,0x85,0x94
+.byte	0x8a,0x45,0x45,0xcf,	0xe9,0xf9,0xf9,0x10
+.byte	0x04,0x02,0x02,0x06,	0xfe,0x7f,0x7f,0x81
+.byte	0xa0,0x50,0x50,0xf0,	0x78,0x3c,0x3c,0x44
+.byte	0x25,0x9f,0x9f,0xba,	0x4b,0xa8,0xa8,0xe3
+.byte	0xa2,0x51,0x51,0xf3,	0x5d,0xa3,0xa3,0xfe
+.byte	0x80,0x40,0x40,0xc0,	0x05,0x8f,0x8f,0x8a
+.byte	0x3f,0x92,0x92,0xad,	0x21,0x9d,0x9d,0xbc
+.byte	0x70,0x38,0x38,0x48,	0xf1,0xf5,0xf5,0x04
+.byte	0x63,0xbc,0xbc,0xdf,	0x77,0xb6,0xb6,0xc1
+.byte	0xaf,0xda,0xda,0x75,	0x42,0x21,0x21,0x63
+.byte	0x20,0x10,0x10,0x30,	0xe5,0xff,0xff,0x1a
+.byte	0xfd,0xf3,0xf3,0x0e,	0xbf,0xd2,0xd2,0x6d
+.byte	0x81,0xcd,0xcd,0x4c,	0x18,0x0c,0x0c,0x14
+.byte	0x26,0x13,0x13,0x35,	0xc3,0xec,0xec,0x2f
+.byte	0xbe,0x5f,0x5f,0xe1,	0x35,0x97,0x97,0xa2
+.byte	0x88,0x44,0x44,0xcc,	0x2e,0x17,0x17,0x39
+.byte	0x93,0xc4,0xc4,0x57,	0x55,0xa7,0xa7,0xf2
+.byte	0xfc,0x7e,0x7e,0x82,	0x7a,0x3d,0x3d,0x47
+.byte	0xc8,0x64,0x64,0xac,	0xba,0x5d,0x5d,0xe7
+.byte	0x32,0x19,0x19,0x2b,	0xe6,0x73,0x73,0x95
+.byte	0xc0,0x60,0x60,0xa0,	0x19,0x81,0x81,0x98
+.byte	0x9e,0x4f,0x4f,0xd1,	0xa3,0xdc,0xdc,0x7f
+.byte	0x44,0x22,0x22,0x66,	0x54,0x2a,0x2a,0x7e
+.byte	0x3b,0x90,0x90,0xab,	0x0b,0x88,0x88,0x83
+.byte	0x8c,0x46,0x46,0xca,	0xc7,0xee,0xee,0x29
+.byte	0x6b,0xb8,0xb8,0xd3,	0x28,0x14,0x14,0x3c
+.byte	0xa7,0xde,0xde,0x79,	0xbc,0x5e,0x5e,0xe2
+.byte	0x16,0x0b,0x0b,0x1d,	0xad,0xdb,0xdb,0x76
+.byte	0xdb,0xe0,0xe0,0x3b,	0x64,0x32,0x32,0x56
+.byte	0x74,0x3a,0x3a,0x4e,	0x14,0x0a,0x0a,0x1e
+.byte	0x92,0x49,0x49,0xdb,	0x0c,0x06,0x06,0x0a
+.byte	0x48,0x24,0x24,0x6c,	0xb8,0x5c,0x5c,0xe4
+.byte	0x9f,0xc2,0xc2,0x5d,	0xbd,0xd3,0xd3,0x6e
+.byte	0x43,0xac,0xac,0xef,	0xc4,0x62,0x62,0xa6
+.byte	0x39,0x91,0x91,0xa8,	0x31,0x95,0x95,0xa4
+.byte	0xd3,0xe4,0xe4,0x37,	0xf2,0x79,0x79,0x8b
+.byte	0xd5,0xe7,0xe7,0x32,	0x8b,0xc8,0xc8,0x43
+.byte	0x6e,0x37,0x37,0x59,	0xda,0x6d,0x6d,0xb7
+.byte	0x01,0x8d,0x8d,0x8c,	0xb1,0xd5,0xd5,0x64
+.byte	0x9c,0x4e,0x4e,0xd2,	0x49,0xa9,0xa9,0xe0
+.byte	0xd8,0x6c,0x6c,0xb4,	0xac,0x56,0x56,0xfa
+.byte	0xf3,0xf4,0xf4,0x07,	0xcf,0xea,0xea,0x25
+.byte	0xca,0x65,0x65,0xaf,	0xf4,0x7a,0x7a,0x8e
+.byte	0x47,0xae,0xae,0xe9,	0x10,0x08,0x08,0x18
+.byte	0x6f,0xba,0xba,0xd5,	0xf0,0x78,0x78,0x88
+.byte	0x4a,0x25,0x25,0x6f,	0x5c,0x2e,0x2e,0x72
+.byte	0x38,0x1c,0x1c,0x24,	0x57,0xa6,0xa6,0xf1
+.byte	0x73,0xb4,0xb4,0xc7,	0x97,0xc6,0xc6,0x51
+.byte	0xcb,0xe8,0xe8,0x23,	0xa1,0xdd,0xdd,0x7c
+.byte	0xe8,0x74,0x74,0x9c,	0x3e,0x1f,0x1f,0x21
+.byte	0x96,0x4b,0x4b,0xdd,	0x61,0xbd,0xbd,0xdc
+.byte	0x0d,0x8b,0x8b,0x86,	0x0f,0x8a,0x8a,0x85
+.byte	0xe0,0x70,0x70,0x90,	0x7c,0x3e,0x3e,0x42
+.byte	0x71,0xb5,0xb5,0xc4,	0xcc,0x66,0x66,0xaa
+.byte	0x90,0x48,0x48,0xd8,	0x06,0x03,0x03,0x05
+.byte	0xf7,0xf6,0xf6,0x01,	0x1c,0x0e,0x0e,0x12
+.byte	0xc2,0x61,0x61,0xa3,	0x6a,0x35,0x35,0x5f
+.byte	0xae,0x57,0x57,0xf9,	0x69,0xb9,0xb9,0xd0
+.byte	0x17,0x86,0x86,0x91,	0x99,0xc1,0xc1,0x58
+.byte	0x3a,0x1d,0x1d,0x27,	0x27,0x9e,0x9e,0xb9
+.byte	0xd9,0xe1,0xe1,0x38,	0xeb,0xf8,0xf8,0x13
+.byte	0x2b,0x98,0x98,0xb3,	0x22,0x11,0x11,0x33
+.byte	0xd2,0x69,0x69,0xbb,	0xa9,0xd9,0xd9,0x70
+.byte	0x07,0x8e,0x8e,0x89,	0x33,0x94,0x94,0xa7
+.byte	0x2d,0x9b,0x9b,0xb6,	0x3c,0x1e,0x1e,0x22
+.byte	0x15,0x87,0x87,0x92,	0xc9,0xe9,0xe9,0x20
+.byte	0x87,0xce,0xce,0x49,	0xaa,0x55,0x55,0xff
+.byte	0x50,0x28,0x28,0x78,	0xa5,0xdf,0xdf,0x7a
+.byte	0x03,0x8c,0x8c,0x8f,	0x59,0xa1,0xa1,0xf8
+.byte	0x09,0x89,0x89,0x80,	0x1a,0x0d,0x0d,0x17
+.byte	0x65,0xbf,0xbf,0xda,	0xd7,0xe6,0xe6,0x31
+.byte	0x84,0x42,0x42,0xc6,	0xd0,0x68,0x68,0xb8
+.byte	0x82,0x41,0x41,0xc3,	0x29,0x99,0x99,0xb0
+.byte	0x5a,0x2d,0x2d,0x77,	0x1e,0x0f,0x0f,0x11
+.byte	0x7b,0xb0,0xb0,0xcb,	0xa8,0x54,0x54,0xfc
+.byte	0x6d,0xbb,0xbb,0xd6,	0x2c,0x16,0x16,0x3a
+
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5	# Te4
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.byte	0x01,0x00,0x00,0x00,	0x02,0x00,0x00,0x00	# rcon
+.byte	0x04,0x00,0x00,0x00,	0x08,0x00,0x00,0x00
+.byte	0x10,0x00,0x00,0x00,	0x20,0x00,0x00,0x00
+.byte	0x40,0x00,0x00,0x00,	0x80,0x00,0x00,0x00
+.byte	0x1B,0x00,0x00,0x00,	0x36,0x00,0x00,0x00
+
+.align	6
+AES_Td:
+.byte	0x51,0xf4,0xa7,0x50,	0x7e,0x41,0x65,0x53	# Td0
+.byte	0x1a,0x17,0xa4,0xc3,	0x3a,0x27,0x5e,0x96
+.byte	0x3b,0xab,0x6b,0xcb,	0x1f,0x9d,0x45,0xf1
+.byte	0xac,0xfa,0x58,0xab,	0x4b,0xe3,0x03,0x93
+.byte	0x20,0x30,0xfa,0x55,	0xad,0x76,0x6d,0xf6
+.byte	0x88,0xcc,0x76,0x91,	0xf5,0x02,0x4c,0x25
+.byte	0x4f,0xe5,0xd7,0xfc,	0xc5,0x2a,0xcb,0xd7
+.byte	0x26,0x35,0x44,0x80,	0xb5,0x62,0xa3,0x8f
+.byte	0xde,0xb1,0x5a,0x49,	0x25,0xba,0x1b,0x67
+.byte	0x45,0xea,0x0e,0x98,	0x5d,0xfe,0xc0,0xe1
+.byte	0xc3,0x2f,0x75,0x02,	0x81,0x4c,0xf0,0x12
+.byte	0x8d,0x46,0x97,0xa3,	0x6b,0xd3,0xf9,0xc6
+.byte	0x03,0x8f,0x5f,0xe7,	0x15,0x92,0x9c,0x95
+.byte	0xbf,0x6d,0x7a,0xeb,	0x95,0x52,0x59,0xda
+.byte	0xd4,0xbe,0x83,0x2d,	0x58,0x74,0x21,0xd3
+.byte	0x49,0xe0,0x69,0x29,	0x8e,0xc9,0xc8,0x44
+.byte	0x75,0xc2,0x89,0x6a,	0xf4,0x8e,0x79,0x78
+.byte	0x99,0x58,0x3e,0x6b,	0x27,0xb9,0x71,0xdd
+.byte	0xbe,0xe1,0x4f,0xb6,	0xf0,0x88,0xad,0x17
+.byte	0xc9,0x20,0xac,0x66,	0x7d,0xce,0x3a,0xb4
+.byte	0x63,0xdf,0x4a,0x18,	0xe5,0x1a,0x31,0x82
+.byte	0x97,0x51,0x33,0x60,	0x62,0x53,0x7f,0x45
+.byte	0xb1,0x64,0x77,0xe0,	0xbb,0x6b,0xae,0x84
+.byte	0xfe,0x81,0xa0,0x1c,	0xf9,0x08,0x2b,0x94
+.byte	0x70,0x48,0x68,0x58,	0x8f,0x45,0xfd,0x19
+.byte	0x94,0xde,0x6c,0x87,	0x52,0x7b,0xf8,0xb7
+.byte	0xab,0x73,0xd3,0x23,	0x72,0x4b,0x02,0xe2
+.byte	0xe3,0x1f,0x8f,0x57,	0x66,0x55,0xab,0x2a
+.byte	0xb2,0xeb,0x28,0x07,	0x2f,0xb5,0xc2,0x03
+.byte	0x86,0xc5,0x7b,0x9a,	0xd3,0x37,0x08,0xa5
+.byte	0x30,0x28,0x87,0xf2,	0x23,0xbf,0xa5,0xb2
+.byte	0x02,0x03,0x6a,0xba,	0xed,0x16,0x82,0x5c
+.byte	0x8a,0xcf,0x1c,0x2b,	0xa7,0x79,0xb4,0x92
+.byte	0xf3,0x07,0xf2,0xf0,	0x4e,0x69,0xe2,0xa1
+.byte	0x65,0xda,0xf4,0xcd,	0x06,0x05,0xbe,0xd5
+.byte	0xd1,0x34,0x62,0x1f,	0xc4,0xa6,0xfe,0x8a
+.byte	0x34,0x2e,0x53,0x9d,	0xa2,0xf3,0x55,0xa0
+.byte	0x05,0x8a,0xe1,0x32,	0xa4,0xf6,0xeb,0x75
+.byte	0x0b,0x83,0xec,0x39,	0x40,0x60,0xef,0xaa
+.byte	0x5e,0x71,0x9f,0x06,	0xbd,0x6e,0x10,0x51
+.byte	0x3e,0x21,0x8a,0xf9,	0x96,0xdd,0x06,0x3d
+.byte	0xdd,0x3e,0x05,0xae,	0x4d,0xe6,0xbd,0x46
+.byte	0x91,0x54,0x8d,0xb5,	0x71,0xc4,0x5d,0x05
+.byte	0x04,0x06,0xd4,0x6f,	0x60,0x50,0x15,0xff
+.byte	0x19,0x98,0xfb,0x24,	0xd6,0xbd,0xe9,0x97
+.byte	0x89,0x40,0x43,0xcc,	0x67,0xd9,0x9e,0x77
+.byte	0xb0,0xe8,0x42,0xbd,	0x07,0x89,0x8b,0x88
+.byte	0xe7,0x19,0x5b,0x38,	0x79,0xc8,0xee,0xdb
+.byte	0xa1,0x7c,0x0a,0x47,	0x7c,0x42,0x0f,0xe9
+.byte	0xf8,0x84,0x1e,0xc9,	0x00,0x00,0x00,0x00
+.byte	0x09,0x80,0x86,0x83,	0x32,0x2b,0xed,0x48
+.byte	0x1e,0x11,0x70,0xac,	0x6c,0x5a,0x72,0x4e
+.byte	0xfd,0x0e,0xff,0xfb,	0x0f,0x85,0x38,0x56
+.byte	0x3d,0xae,0xd5,0x1e,	0x36,0x2d,0x39,0x27
+.byte	0x0a,0x0f,0xd9,0x64,	0x68,0x5c,0xa6,0x21
+.byte	0x9b,0x5b,0x54,0xd1,	0x24,0x36,0x2e,0x3a
+.byte	0x0c,0x0a,0x67,0xb1,	0x93,0x57,0xe7,0x0f
+.byte	0xb4,0xee,0x96,0xd2,	0x1b,0x9b,0x91,0x9e
+.byte	0x80,0xc0,0xc5,0x4f,	0x61,0xdc,0x20,0xa2
+.byte	0x5a,0x77,0x4b,0x69,	0x1c,0x12,0x1a,0x16
+.byte	0xe2,0x93,0xba,0x0a,	0xc0,0xa0,0x2a,0xe5
+.byte	0x3c,0x22,0xe0,0x43,	0x12,0x1b,0x17,0x1d
+.byte	0x0e,0x09,0x0d,0x0b,	0xf2,0x8b,0xc7,0xad
+.byte	0x2d,0xb6,0xa8,0xb9,	0x14,0x1e,0xa9,0xc8
+.byte	0x57,0xf1,0x19,0x85,	0xaf,0x75,0x07,0x4c
+.byte	0xee,0x99,0xdd,0xbb,	0xa3,0x7f,0x60,0xfd
+.byte	0xf7,0x01,0x26,0x9f,	0x5c,0x72,0xf5,0xbc
+.byte	0x44,0x66,0x3b,0xc5,	0x5b,0xfb,0x7e,0x34
+.byte	0x8b,0x43,0x29,0x76,	0xcb,0x23,0xc6,0xdc
+.byte	0xb6,0xed,0xfc,0x68,	0xb8,0xe4,0xf1,0x63
+.byte	0xd7,0x31,0xdc,0xca,	0x42,0x63,0x85,0x10
+.byte	0x13,0x97,0x22,0x40,	0x84,0xc6,0x11,0x20
+.byte	0x85,0x4a,0x24,0x7d,	0xd2,0xbb,0x3d,0xf8
+.byte	0xae,0xf9,0x32,0x11,	0xc7,0x29,0xa1,0x6d
+.byte	0x1d,0x9e,0x2f,0x4b,	0xdc,0xb2,0x30,0xf3
+.byte	0x0d,0x86,0x52,0xec,	0x77,0xc1,0xe3,0xd0
+.byte	0x2b,0xb3,0x16,0x6c,	0xa9,0x70,0xb9,0x99
+.byte	0x11,0x94,0x48,0xfa,	0x47,0xe9,0x64,0x22
+.byte	0xa8,0xfc,0x8c,0xc4,	0xa0,0xf0,0x3f,0x1a
+.byte	0x56,0x7d,0x2c,0xd8,	0x22,0x33,0x90,0xef
+.byte	0x87,0x49,0x4e,0xc7,	0xd9,0x38,0xd1,0xc1
+.byte	0x8c,0xca,0xa2,0xfe,	0x98,0xd4,0x0b,0x36
+.byte	0xa6,0xf5,0x81,0xcf,	0xa5,0x7a,0xde,0x28
+.byte	0xda,0xb7,0x8e,0x26,	0x3f,0xad,0xbf,0xa4
+.byte	0x2c,0x3a,0x9d,0xe4,	0x50,0x78,0x92,0x0d
+.byte	0x6a,0x5f,0xcc,0x9b,	0x54,0x7e,0x46,0x62
+.byte	0xf6,0x8d,0x13,0xc2,	0x90,0xd8,0xb8,0xe8
+.byte	0x2e,0x39,0xf7,0x5e,	0x82,0xc3,0xaf,0xf5
+.byte	0x9f,0x5d,0x80,0xbe,	0x69,0xd0,0x93,0x7c
+.byte	0x6f,0xd5,0x2d,0xa9,	0xcf,0x25,0x12,0xb3
+.byte	0xc8,0xac,0x99,0x3b,	0x10,0x18,0x7d,0xa7
+.byte	0xe8,0x9c,0x63,0x6e,	0xdb,0x3b,0xbb,0x7b
+.byte	0xcd,0x26,0x78,0x09,	0x6e,0x59,0x18,0xf4
+.byte	0xec,0x9a,0xb7,0x01,	0x83,0x4f,0x9a,0xa8
+.byte	0xe6,0x95,0x6e,0x65,	0xaa,0xff,0xe6,0x7e
+.byte	0x21,0xbc,0xcf,0x08,	0xef,0x15,0xe8,0xe6
+.byte	0xba,0xe7,0x9b,0xd9,	0x4a,0x6f,0x36,0xce
+.byte	0xea,0x9f,0x09,0xd4,	0x29,0xb0,0x7c,0xd6
+.byte	0x31,0xa4,0xb2,0xaf,	0x2a,0x3f,0x23,0x31
+.byte	0xc6,0xa5,0x94,0x30,	0x35,0xa2,0x66,0xc0
+.byte	0x74,0x4e,0xbc,0x37,	0xfc,0x82,0xca,0xa6
+.byte	0xe0,0x90,0xd0,0xb0,	0x33,0xa7,0xd8,0x15
+.byte	0xf1,0x04,0x98,0x4a,	0x41,0xec,0xda,0xf7
+.byte	0x7f,0xcd,0x50,0x0e,	0x17,0x91,0xf6,0x2f
+.byte	0x76,0x4d,0xd6,0x8d,	0x43,0xef,0xb0,0x4d
+.byte	0xcc,0xaa,0x4d,0x54,	0xe4,0x96,0x04,0xdf
+.byte	0x9e,0xd1,0xb5,0xe3,	0x4c,0x6a,0x88,0x1b
+.byte	0xc1,0x2c,0x1f,0xb8,	0x46,0x65,0x51,0x7f
+.byte	0x9d,0x5e,0xea,0x04,	0x01,0x8c,0x35,0x5d
+.byte	0xfa,0x87,0x74,0x73,	0xfb,0x0b,0x41,0x2e
+.byte	0xb3,0x67,0x1d,0x5a,	0x92,0xdb,0xd2,0x52
+.byte	0xe9,0x10,0x56,0x33,	0x6d,0xd6,0x47,0x13
+.byte	0x9a,0xd7,0x61,0x8c,	0x37,0xa1,0x0c,0x7a
+.byte	0x59,0xf8,0x14,0x8e,	0xeb,0x13,0x3c,0x89
+.byte	0xce,0xa9,0x27,0xee,	0xb7,0x61,0xc9,0x35
+.byte	0xe1,0x1c,0xe5,0xed,	0x7a,0x47,0xb1,0x3c
+.byte	0x9c,0xd2,0xdf,0x59,	0x55,0xf2,0x73,0x3f
+.byte	0x18,0x14,0xce,0x79,	0x73,0xc7,0x37,0xbf
+.byte	0x53,0xf7,0xcd,0xea,	0x5f,0xfd,0xaa,0x5b
+.byte	0xdf,0x3d,0x6f,0x14,	0x78,0x44,0xdb,0x86
+.byte	0xca,0xaf,0xf3,0x81,	0xb9,0x68,0xc4,0x3e
+.byte	0x38,0x24,0x34,0x2c,	0xc2,0xa3,0x40,0x5f
+.byte	0x16,0x1d,0xc3,0x72,	0xbc,0xe2,0x25,0x0c
+.byte	0x28,0x3c,0x49,0x8b,	0xff,0x0d,0x95,0x41
+.byte	0x39,0xa8,0x01,0x71,	0x08,0x0c,0xb3,0xde
+.byte	0xd8,0xb4,0xe4,0x9c,	0x64,0x56,0xc1,0x90
+.byte	0x7b,0xcb,0x84,0x61,	0xd5,0x32,0xb6,0x70
+.byte	0x48,0x6c,0x5c,0x74,	0xd0,0xb8,0x57,0x42
+
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38	# Td4
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/jni/openssl/crypto/aes/asm/aes-parisc.pl b/jni/openssl/crypto/aes/asm/aes-parisc.pl
new file mode 100644
index 0000000..c36b6a2
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-parisc.pl
@@ -0,0 +1,1021 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for PA-RISC.
+#
+# June 2009.
+#
+# The module is mechanical transliteration of aes-sparcv9.pl, but with
+# a twist: S-boxes are compressed even further down to 1K+256B. On
+# PA-7100LC performance is ~40% better than gcc 3.2 generated code and
+# is about 33 cycles per byte processed with 128-bit key. Newer CPUs
+# perform at 16 cycles per byte. It's not faster than code generated
+# by vendor compiler, but recall that it has compressed S-boxes, which
+# requires extra processing.
+#
+# Special thanks to polarhome.com for providing HP-UX account.
+
+$flavour = shift;
+$output = shift;
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+	$LEVEL		="2.0W";
+	$SIZE_T		=8;
+	$FRAME_MARKER	=80;
+	$SAVED_RP	=16;
+	$PUSH		="std";
+	$PUSHMA		="std,ma";
+	$POP		="ldd";
+	$POPMB		="ldd,mb";
+} else {
+	$LEVEL		="1.0";
+	$SIZE_T		=4;
+	$FRAME_MARKER	=48;
+	$SAVED_RP	=20;
+	$PUSH		="stw";
+	$PUSHMA		="stwm";
+	$POP		="ldw";
+	$POPMB		="ldwm";
+}
+
+$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker
+				#                 [+ argument transfer]
+$inp="%r26";	# arg0
+$out="%r25";	# arg1
+$key="%r24";	# arg2
+
+($s0,$s1,$s2,$s3) = ("%r1","%r2","%r3","%r4");
+($t0,$t1,$t2,$t3) = ("%r5","%r6","%r7","%r8");
+
+($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
+ $acc8, $acc9,$acc10,$acc11,$acc12,$acc13,$acc14,$acc15) =
+("%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16",
+"%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r26");
+
+$tbl="%r28";
+$rounds="%r29";
+
+$code=<<___;
+	.LEVEL	$LEVEL
+	.SPACE	\$TEXT\$
+	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+	.EXPORT	AES_encrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+	.ALIGN	64
+AES_encrypt
+	.PROC
+	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+	.ENTRY
+	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
+	$PUSHMA	%r3,$FRAME(%sp)
+	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
+	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
+	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
+	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
+	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
+	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
+	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
+	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
+	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
+	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
+	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
+	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
+	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
+	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
+	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+	blr	%r0,$tbl
+	ldi	3,$t0
+L\$enc_pic
+	andcm	$tbl,$t0,$tbl
+	ldo	L\$AES_Te-L\$enc_pic($tbl),$tbl
+
+	and	$inp,$t0,$t0
+	sub	$inp,$t0,$inp
+	ldw	0($inp),$s0
+	ldw	4($inp),$s1
+	ldw	8($inp),$s2
+	comib,=	0,$t0,L\$enc_inp_aligned
+	ldw	12($inp),$s3
+
+	sh3addl	$t0,%r0,$t0
+	subi	32,$t0,$t0
+	mtctl	$t0,%cr11
+	ldw	16($inp),$t1
+	vshd	$s0,$s1,$s0
+	vshd	$s1,$s2,$s1
+	vshd	$s2,$s3,$s2
+	vshd	$s3,$t1,$s3
+
+L\$enc_inp_aligned
+	bl	_parisc_AES_encrypt,%r31
+	nop
+
+	extru,<> $out,31,2,%r0
+	b	L\$enc_out_aligned
+	nop
+
+	_srm	$s0,24,$acc0
+	_srm	$s0,16,$acc1
+	stb	$acc0,0($out)
+	_srm	$s0,8,$acc2
+	stb	$acc1,1($out)
+	_srm	$s1,24,$acc4
+	stb	$acc2,2($out)
+	_srm	$s1,16,$acc5
+	stb	$s0,3($out)
+	_srm	$s1,8,$acc6
+	stb	$acc4,4($out)
+	_srm	$s2,24,$acc0
+	stb	$acc5,5($out)
+	_srm	$s2,16,$acc1
+	stb	$acc6,6($out)
+	_srm	$s2,8,$acc2
+	stb	$s1,7($out)
+	_srm	$s3,24,$acc4
+	stb	$acc0,8($out)
+	_srm	$s3,16,$acc5
+	stb	$acc1,9($out)
+	_srm	$s3,8,$acc6
+	stb	$acc2,10($out)
+	stb	$s2,11($out)
+	stb	$acc4,12($out)
+	stb	$acc5,13($out)
+	stb	$acc6,14($out)
+	b	L\$enc_done
+	stb	$s3,15($out)
+
+L\$enc_out_aligned
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+
+L\$enc_done
+	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
+	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
+	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
+	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
+	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
+	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
+	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
+	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
+	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
+	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
+	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
+	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
+	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
+	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
+	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
+	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
+	bv	(%r2)
+	.EXIT
+	$POPMB	-$FRAME(%sp),%r3
+	.PROCEND
+
+	.ALIGN	16
+_parisc_AES_encrypt
+	.PROC
+	.CALLINFO	MILLICODE
+	.ENTRY
+	ldw	240($key),$rounds
+	ldw	0($key),$t0
+	ldw	4($key),$t1
+	ldw	8($key),$t2
+	_srm	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ldw	12($key),$t3
+	_srm	$s0,24,$acc0
+	xor	$t1,$s1,$s1
+	ldw	16($key),$t0
+	_srm	$s1,16,$acc1
+	xor	$t2,$s2,$s2
+	ldw	20($key),$t1
+	xor	$t3,$s3,$s3
+	ldw	24($key),$t2
+	ldw	28($key),$t3
+L\$enc_loop
+	_srm	$s2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$s3,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$s1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$s2,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$s3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$s0,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$s2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$s3,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$s0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$s1,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$s3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$s0,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$s1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$s2,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+	ldwx,s	$acc14($tbl),$acc14
+	ldwx,s	$acc15($tbl),$acc15
+	addib,= -1,$rounds,L\$enc_last
+	ldo	32($key),$key
+
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	_srm	$t1,16,$acc1
+		xor	$acc15,$t3,$t3
+
+	_srm	$t2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$t3,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$t1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$t2,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$t3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$t0,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$t2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$t3,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$t0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$t1,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$t3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$t0,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$t1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$t2,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+		_ror	$acc1,8,$acc1
+	ldwx,s	$acc14($tbl),$acc14
+
+		_ror	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldwx,s	$acc15($tbl),$acc15
+		_ror	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ldw	16($key),$t0
+		_ror	$acc5,8,$acc5
+		xor	$acc2,$s0,$s0
+	ldw	20($key),$t1
+		_ror	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ldw	24($key),$t2
+		_ror	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ldw	28($key),$t3
+		_ror	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldw	1024+0($tbl),%r0		; prefetch te4
+		_ror	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldw	1024+32($tbl),%r0		; prefetch te4
+		_ror	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldw	1024+64($tbl),%r0		; prefetch te4
+		_ror	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldw	1024+96($tbl),%r0		; prefetch te4
+		_ror	$acc14,16,$acc14
+		xor	$acc9,$s2,$s2
+	ldw	1024+128($tbl),%r0		; prefetch te4
+		_ror	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldw	1024+160($tbl),%r0		; prefetch te4
+	_srm	$s0,24,$acc0
+		xor	$acc11,$s2,$s2
+	ldw	1024+192($tbl),%r0		; prefetch te4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldw	1024+224($tbl),%r0		; prefetch te4
+	_srm	$s1,16,$acc1
+		xor	$acc14,$s3,$s3
+	b	L\$enc_loop
+		xor	$acc15,$s3,$s3
+
+	.ALIGN	16
+L\$enc_last
+	ldo	1024($tbl),$rounds
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	_srm	$t1,16,$acc1
+		xor	$acc15,$t3,$t3
+
+	_srm	$t2,8,$acc2
+	ldbx	$acc0($rounds),$acc0
+	_srm	$t1,24,$acc4
+	ldbx	$acc1($rounds),$acc1
+	_srm	$t2,16,$acc5
+	_srm	$t3,0,$acc3
+	ldbx	$acc2($rounds),$acc2
+	ldbx	$acc3($rounds),$acc3
+	_srm	$t3,8,$acc6
+	ldbx	$acc4($rounds),$acc4
+	_srm	$t2,24,$acc8
+	ldbx	$acc5($rounds),$acc5
+	_srm	$t3,16,$acc9
+	_srm	$t0,0,$acc7
+	ldbx	$acc6($rounds),$acc6
+	ldbx	$acc7($rounds),$acc7
+	_srm	$t0,8,$acc10
+	ldbx	$acc8($rounds),$acc8
+	_srm	$t3,24,$acc12
+	ldbx	$acc9($rounds),$acc9
+	_srm	$t0,16,$acc13
+	_srm	$t1,0,$acc11
+	ldbx	$acc10($rounds),$acc10
+	_srm	$t1,8,$acc14
+	ldbx	$acc11($rounds),$acc11
+	ldbx	$acc12($rounds),$acc12
+	ldbx	$acc13($rounds),$acc13
+	_srm	$t2,0,$acc15
+	ldbx	$acc14($rounds),$acc14
+
+		dep	$acc0,7,8,$acc3
+	ldbx	$acc15($rounds),$acc15
+		dep	$acc4,7,8,$acc7
+		dep	$acc1,15,8,$acc3
+		dep	$acc5,15,8,$acc7
+		dep	$acc2,23,8,$acc3
+		dep	$acc6,23,8,$acc7
+		xor	$acc3,$s0,$s0
+		xor	$acc7,$s1,$s1
+		dep	$acc8,7,8,$acc11
+		dep	$acc12,7,8,$acc15
+		dep	$acc9,15,8,$acc11
+		dep	$acc13,15,8,$acc15
+		dep	$acc10,23,8,$acc11
+		dep	$acc14,23,8,$acc15
+		xor	$acc11,$s2,$s2
+
+	bv	(%r31)
+	.EXIT
+		xor	$acc15,$s3,$s3
+	.PROCEND
+
+	.ALIGN	64
+L\$AES_Te
+	.WORD	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+	.WORD	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+	.WORD	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+	.WORD	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+	.WORD	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+	.WORD	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+	.WORD	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+	.WORD	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+	.WORD	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+	.WORD	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+	.WORD	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+	.WORD	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+	.WORD	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+	.WORD	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+	.WORD	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+	.WORD	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+	.WORD	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+	.WORD	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+	.WORD	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+	.WORD	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+	.WORD	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+	.WORD	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+	.WORD	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+	.WORD	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+	.WORD	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+	.WORD	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+	.WORD	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+	.WORD	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+	.WORD	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+	.WORD	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+	.WORD	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+	.WORD	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+	.WORD	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+	.WORD	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+	.WORD	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+	.WORD	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+	.WORD	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+	.WORD	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+	.WORD	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+	.WORD	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+	.WORD	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+	.WORD	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+	.WORD	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+	.WORD	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+	.WORD	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+	.WORD	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+	.WORD	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+	.WORD	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+	.WORD	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+	.WORD	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+	.WORD	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+	.WORD	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+	.WORD	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+	.WORD	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+	.WORD	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+	.WORD	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+	.WORD	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+	.WORD	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+	.WORD	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+	.WORD	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+	.WORD	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+	.WORD	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+	.WORD	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+	.WORD	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+	.BYTE	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.BYTE	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.BYTE	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.BYTE	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.BYTE	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.BYTE	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.BYTE	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.BYTE	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.BYTE	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.BYTE	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.BYTE	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.BYTE	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.BYTE	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.BYTE	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.BYTE	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.BYTE	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.BYTE	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.BYTE	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.BYTE	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.BYTE	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.BYTE	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.BYTE	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.BYTE	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.BYTE	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.BYTE	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.BYTE	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.BYTE	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.BYTE	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.BYTE	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.BYTE	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.BYTE	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.BYTE	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+___
+
+$code.=<<___;
+	.EXPORT	AES_decrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+	.ALIGN	16
+AES_decrypt
+	.PROC
+	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+	.ENTRY
+	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
+	$PUSHMA	%r3,$FRAME(%sp)
+	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
+	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
+	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
+	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
+	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
+	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
+	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
+	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
+	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
+	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
+	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
+	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
+	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
+	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
+	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+	blr	%r0,$tbl
+	ldi	3,$t0
+L\$dec_pic
+	andcm	$tbl,$t0,$tbl
+	ldo	L\$AES_Td-L\$dec_pic($tbl),$tbl
+
+	and	$inp,$t0,$t0
+	sub	$inp,$t0,$inp
+	ldw	0($inp),$s0
+	ldw	4($inp),$s1
+	ldw	8($inp),$s2
+	comib,=	0,$t0,L\$dec_inp_aligned
+	ldw	12($inp),$s3
+
+	sh3addl	$t0,%r0,$t0
+	subi	32,$t0,$t0
+	mtctl	$t0,%cr11
+	ldw	16($inp),$t1
+	vshd	$s0,$s1,$s0
+	vshd	$s1,$s2,$s1
+	vshd	$s2,$s3,$s2
+	vshd	$s3,$t1,$s3
+
+L\$dec_inp_aligned
+	bl	_parisc_AES_decrypt,%r31
+	nop
+
+	extru,<> $out,31,2,%r0
+	b	L\$dec_out_aligned
+	nop
+
+	_srm	$s0,24,$acc0
+	_srm	$s0,16,$acc1
+	stb	$acc0,0($out)
+	_srm	$s0,8,$acc2
+	stb	$acc1,1($out)
+	_srm	$s1,24,$acc4
+	stb	$acc2,2($out)
+	_srm	$s1,16,$acc5
+	stb	$s0,3($out)
+	_srm	$s1,8,$acc6
+	stb	$acc4,4($out)
+	_srm	$s2,24,$acc0
+	stb	$acc5,5($out)
+	_srm	$s2,16,$acc1
+	stb	$acc6,6($out)
+	_srm	$s2,8,$acc2
+	stb	$s1,7($out)
+	_srm	$s3,24,$acc4
+	stb	$acc0,8($out)
+	_srm	$s3,16,$acc5
+	stb	$acc1,9($out)
+	_srm	$s3,8,$acc6
+	stb	$acc2,10($out)
+	stb	$s2,11($out)
+	stb	$acc4,12($out)
+	stb	$acc5,13($out)
+	stb	$acc6,14($out)
+	b	L\$dec_done
+	stb	$s3,15($out)
+
+L\$dec_out_aligned
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+
+L\$dec_done
+	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
+	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
+	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
+	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
+	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
+	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
+	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
+	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
+	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
+	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
+	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
+	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
+	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
+	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
+	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
+	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
+	bv	(%r2)
+	.EXIT
+	$POPMB	-$FRAME(%sp),%r3
+	.PROCEND
+
+	.ALIGN	16
+_parisc_AES_decrypt
+	.PROC
+	.CALLINFO	MILLICODE
+	.ENTRY
+	ldw	240($key),$rounds
+	ldw	0($key),$t0
+	ldw	4($key),$t1
+	ldw	8($key),$t2
+	ldw	12($key),$t3
+	_srm	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ldw	16($key),$t0
+	xor	$t1,$s1,$s1
+	ldw	20($key),$t1
+	_srm	$s0,24,$acc0
+	xor	$t2,$s2,$s2
+	ldw	24($key),$t2
+	xor	$t3,$s3,$s3
+	ldw	28($key),$t3
+	_srm	$s3,16,$acc1
+L\$dec_loop
+	_srm	$s2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$s1,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$s1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$s0,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$s3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$s2,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$s2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$s1,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$s0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$s3,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$s3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$s2,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$s1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$s0,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+	ldwx,s	$acc14($tbl),$acc14
+	ldwx,s	$acc15($tbl),$acc15
+	addib,= -1,$rounds,L\$dec_last
+	ldo	32($key),$key
+
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	_srm	$t3,16,$acc1
+
+	_srm	$t2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$t1,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$t1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$t0,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$t3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$t2,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$t2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$t1,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$t0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$t3,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$t3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$t2,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$t1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$t0,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+		_ror	$acc1,8,$acc1
+	ldwx,s	$acc14($tbl),$acc14
+
+		_ror	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldwx,s	$acc15($tbl),$acc15
+		_ror	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ldw	16($key),$t0
+		_ror	$acc5,8,$acc5
+		xor	$acc2,$s0,$s0
+	ldw	20($key),$t1
+		_ror	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ldw	24($key),$t2
+		_ror	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ldw	28($key),$t3
+		_ror	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldw	1024+0($tbl),%r0		; prefetch td4
+		_ror	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldw	1024+32($tbl),%r0		; prefetch td4
+		_ror	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldw	1024+64($tbl),%r0		; prefetch td4
+		_ror	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldw	1024+96($tbl),%r0		; prefetch td4
+		_ror	$acc14,16,$acc14
+		xor	$acc9,$s2,$s2
+	ldw	1024+128($tbl),%r0		; prefetch td4
+		_ror	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldw	1024+160($tbl),%r0		; prefetch td4
+	_srm	$s0,24,$acc0
+		xor	$acc11,$s2,$s2
+	ldw	1024+192($tbl),%r0		; prefetch td4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldw	1024+224($tbl),%r0		; prefetch td4
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	b	L\$dec_loop
+	_srm	$s3,16,$acc1
+
+	.ALIGN	16
+L\$dec_last
+	ldo	1024($tbl),$rounds
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	_srm	$t3,16,$acc1
+
+	_srm	$t2,8,$acc2
+	ldbx	$acc0($rounds),$acc0
+	_srm	$t1,24,$acc4
+	ldbx	$acc1($rounds),$acc1
+	_srm	$t0,16,$acc5
+	_srm	$t1,0,$acc3
+	ldbx	$acc2($rounds),$acc2
+	ldbx	$acc3($rounds),$acc3
+	_srm	$t3,8,$acc6
+	ldbx	$acc4($rounds),$acc4
+	_srm	$t2,24,$acc8
+	ldbx	$acc5($rounds),$acc5
+	_srm	$t1,16,$acc9
+	_srm	$t2,0,$acc7
+	ldbx	$acc6($rounds),$acc6
+	ldbx	$acc7($rounds),$acc7
+	_srm	$t0,8,$acc10
+	ldbx	$acc8($rounds),$acc8
+	_srm	$t3,24,$acc12
+	ldbx	$acc9($rounds),$acc9
+	_srm	$t2,16,$acc13
+	_srm	$t3,0,$acc11
+	ldbx	$acc10($rounds),$acc10
+	_srm	$t1,8,$acc14
+	ldbx	$acc11($rounds),$acc11
+	ldbx	$acc12($rounds),$acc12
+	ldbx	$acc13($rounds),$acc13
+	_srm	$t0,0,$acc15
+	ldbx	$acc14($rounds),$acc14
+
+		dep	$acc0,7,8,$acc3
+	ldbx	$acc15($rounds),$acc15
+		dep	$acc4,7,8,$acc7
+		dep	$acc1,15,8,$acc3
+		dep	$acc5,15,8,$acc7
+		dep	$acc2,23,8,$acc3
+		dep	$acc6,23,8,$acc7
+		xor	$acc3,$s0,$s0
+		xor	$acc7,$s1,$s1
+		dep	$acc8,7,8,$acc11
+		dep	$acc12,7,8,$acc15
+		dep	$acc9,15,8,$acc11
+		dep	$acc13,15,8,$acc15
+		dep	$acc10,23,8,$acc11
+		dep	$acc14,23,8,$acc15
+		xor	$acc11,$s2,$s2
+
+	bv	(%r31)
+	.EXIT
+		xor	$acc15,$s3,$s3
+	.PROCEND
+
+	.ALIGN	64
+L\$AES_Td
+	.WORD	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+	.WORD	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+	.WORD	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+	.WORD	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+	.WORD	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+	.WORD	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+	.WORD	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+	.WORD	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+	.WORD	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+	.WORD	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+	.WORD	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+	.WORD	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+	.WORD	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+	.WORD	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+	.WORD	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+	.WORD	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+	.WORD	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+	.WORD	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+	.WORD	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+	.WORD	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+	.WORD	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+	.WORD	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+	.WORD	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+	.WORD	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+	.WORD	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+	.WORD	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+	.WORD	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+	.WORD	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+	.WORD	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+	.WORD	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+	.WORD	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+	.WORD	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+	.WORD	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+	.WORD	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+	.WORD	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+	.WORD	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+	.WORD	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+	.WORD	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+	.WORD	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+	.WORD	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+	.WORD	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+	.WORD	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+	.WORD	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+	.WORD	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+	.WORD	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+	.WORD	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+	.WORD	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+	.WORD	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+	.WORD	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+	.WORD	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+	.WORD	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+	.WORD	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+	.WORD	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+	.WORD	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+	.WORD	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+	.WORD	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+	.WORD	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+	.WORD	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+	.WORD	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+	.WORD	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+	.WORD	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+	.WORD	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+	.WORD	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+	.WORD	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+	.BYTE	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.BYTE	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.BYTE	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.BYTE	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.BYTE	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.BYTE	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.BYTE	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.BYTE	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.BYTE	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.BYTE	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.BYTE	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.BYTE	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.BYTE	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.BYTE	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.BYTE	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.BYTE	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.BYTE	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.BYTE	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.BYTE	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.BYTE	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.BYTE	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.BYTE	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.BYTE	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.BYTE	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.BYTE	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.BYTE	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.BYTE	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.BYTE	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.BYTE	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.BYTE	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.BYTE	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.BYTE	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+	.STRINGZ "AES for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	# translate made up instructons: _ror, _srm
+	s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/				or
+
+	s/_srm(\s+%r[0-9]+),([0-9]+),/
+		$SIZE_T==4 ? sprintf("extru%s,%d,8,",$1,31-$2)
+		:            sprintf("extrd,u%s,%d,8,",$1,63-$2)/e;
+
+	s/,\*/,/ if ($SIZE_T==4);
+	print $_,"\n";
+}
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/aes-ppc.pl b/jni/openssl/crypto/aes/asm/aes-ppc.pl
new file mode 100644
index 0000000..7c52cbe
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-ppc.pl
@@ -0,0 +1,1365 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# Needs more work: key setup, CBC routine...
+#
+# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
+# 128-bit key, which is ~40% better than 64-bit code generated by gcc
+# 4.0. But these are not the ones currently used! Their "compact"
+# counterparts are, for security reason. ppc_AES_encrypt_compact runs
+# at 1/2 of ppc_AES_encrypt speed, while ppc_AES_decrypt_compact -
+# at 1/3 of ppc_AES_decrypt.
+
+# February 2010
+#
+# Rescheduling instructions to favour Power6 pipeline gave 10%
+# performance improvement on the platfrom in question (and marginal
+# improvement even on others). It should be noted that Power6 fails
+# to process byte in 18 cycles, only in 23, because it fails to issue
+# 4 load instructions in two cycles, only in 3. As result non-compact
+# block subroutines are 25% slower than one would expect. Compact
+# functions scale better, because they have pure computational part,
+# which scales perfectly with clock frequency. To be specific
+# ppc_AES_encrypt_compact operates at 42 cycles per byte, while
+# ppc_AES_decrypt_compact - at 55 (in 64-bit build).
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=32*$SIZE_T;
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$sp="r1";
+$toc="r2";
+$inp="r3";
+$out="r4";
+$key="r5";
+
+$Tbl0="r3";
+$Tbl1="r6";
+$Tbl2="r7";
+$Tbl3="r2";
+
+$s0="r8";
+$s1="r9";
+$s2="r10";
+$s3="r11";
+
+$t0="r12";
+$t1="r13";
+$t2="r14";
+$t3="r15";
+
+$acc00="r16";
+$acc01="r17";
+$acc02="r18";
+$acc03="r19";
+
+$acc04="r20";
+$acc05="r21";
+$acc06="r22";
+$acc07="r23";
+
+$acc08="r24";
+$acc09="r25";
+$acc10="r26";
+$acc11="r27";
+
+$acc12="r28";
+$acc13="r29";
+$acc14="r30";
+$acc15="r31";
+
+# stay away from TLS pointer
+if ($SIZE_T==8)	{ die if ($t1 ne "r13");  $t1="r0";		}
+else		{ die if ($Tbl3 ne "r2"); $Tbl3=$t0; $t0="r0";	}
+$mask80=$Tbl2;
+$mask1b=$Tbl3;
+
+$code.=<<___;
+.machine	"any"
+.text
+
+.align	7
+LAES_Te:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$Tbl0	;    vvvvv "distance" between . and 1st data entry
+	addi	$Tbl0,$Tbl0,`128-8`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`64-9*4`
+LAES_Td:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$Tbl0	;    vvvvvvvv "distance" between . and 1st data entry
+	addi	$Tbl0,$Tbl0,`128-64-8+2048+256`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`128-64-9*4`
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+
+
+.globl	.AES_encrypt
+.align	7
+.AES_encrypt:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+
+	$PUSH	$toc,`$FRAME-$SIZE_T*20`($sp)
+	$PUSH	r13,`$FRAME-$SIZE_T*19`($sp)
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	andi.	$t0,$inp,3
+	andi.	$t1,$out,3
+	or.	$t0,$t0,$t1
+	bne	Lenc_unaligned
+
+Lenc_unaligned_ok:
+	lwz	$s0,0($inp)
+	lwz	$s1,4($inp)
+	lwz	$s2,8($inp)
+	lwz	$s3,12($inp)
+	bl	LAES_Te
+	bl	Lppc_AES_encrypt_compact
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+	b	Lenc_done
+
+Lenc_unaligned:
+	subfic	$t0,$inp,4096
+	subfic	$t1,$out,4096
+	andi.	$t0,$t0,4096-16
+	beq	Lenc_xpage
+	andi.	$t1,$t1,4096-16
+	bne	Lenc_unaligned_ok
+
+Lenc_xpage:
+	lbz	$acc00,0($inp)
+	lbz	$acc01,1($inp)
+	lbz	$acc02,2($inp)
+	lbz	$s0,3($inp)
+	lbz	$acc04,4($inp)
+	lbz	$acc05,5($inp)
+	lbz	$acc06,6($inp)
+	lbz	$s1,7($inp)
+	lbz	$acc08,8($inp)
+	lbz	$acc09,9($inp)
+	lbz	$acc10,10($inp)
+	insrwi	$s0,$acc00,8,0
+	lbz	$s2,11($inp)
+	insrwi	$s1,$acc04,8,0
+	lbz	$acc12,12($inp)
+	insrwi	$s0,$acc01,8,8
+	lbz	$acc13,13($inp)
+	insrwi	$s1,$acc05,8,8
+	lbz	$acc14,14($inp)
+	insrwi	$s0,$acc02,8,16
+	lbz	$s3,15($inp)
+	insrwi	$s1,$acc06,8,16
+	insrwi	$s2,$acc08,8,0
+	insrwi	$s3,$acc12,8,0
+	insrwi	$s2,$acc09,8,8
+	insrwi	$s3,$acc13,8,8
+	insrwi	$s2,$acc10,8,16
+	insrwi	$s3,$acc14,8,16
+
+	bl	LAES_Te
+	bl	Lppc_AES_encrypt_compact
+
+	extrwi	$acc00,$s0,8,0
+	extrwi	$acc01,$s0,8,8
+	stb	$acc00,0($out)
+	extrwi	$acc02,$s0,8,16
+	stb	$acc01,1($out)
+	stb	$acc02,2($out)
+	extrwi	$acc04,$s1,8,0
+	stb	$s0,3($out)
+	extrwi	$acc05,$s1,8,8
+	stb	$acc04,4($out)
+	extrwi	$acc06,$s1,8,16
+	stb	$acc05,5($out)
+	stb	$acc06,6($out)
+	extrwi	$acc08,$s2,8,0
+	stb	$s1,7($out)
+	extrwi	$acc09,$s2,8,8
+	stb	$acc08,8($out)
+	extrwi	$acc10,$s2,8,16
+	stb	$acc09,9($out)
+	stb	$acc10,10($out)
+	extrwi	$acc12,$s3,8,0
+	stb	$s2,11($out)
+	extrwi	$acc13,$s3,8,8
+	stb	$acc12,12($out)
+	extrwi	$acc14,$s3,8,16
+	stb	$acc13,13($out)
+	stb	$acc14,14($out)
+	stb	$s3,15($out)
+
+Lenc_done:
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	$toc,`$FRAME-$SIZE_T*20`($sp)
+	$POP	r13,`$FRAME-$SIZE_T*19`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,3,0
+	.long	0
+
+.align	5
+Lppc_AES_encrypt:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,3
+	lwz	$t0,0($key)
+	addi	$Tbl2,$Tbl0,2
+	lwz	$t1,4($key)
+	addi	$Tbl3,$Tbl0,1
+	lwz	$t2,8($key)
+	addi	$acc00,$acc00,-1
+	lwz	$t3,12($key)
+	addi	$key,$key,16
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	mtctr	$acc00
+.align	4
+Lenc_loop:
+	rlwinm	$acc00,$s0,`32-24+3`,21,28
+	rlwinm	$acc01,$s1,`32-24+3`,21,28
+	rlwinm	$acc02,$s2,`32-24+3`,21,28
+	rlwinm	$acc03,$s3,`32-24+3`,21,28
+	lwz	$t0,0($key)
+	rlwinm	$acc04,$s1,`32-16+3`,21,28
+	lwz	$t1,4($key)
+	rlwinm	$acc05,$s2,`32-16+3`,21,28
+	lwz	$t2,8($key)
+	rlwinm	$acc06,$s3,`32-16+3`,21,28
+	lwz	$t3,12($key)
+	rlwinm	$acc07,$s0,`32-16+3`,21,28
+	lwzx	$acc00,$Tbl0,$acc00
+	rlwinm	$acc08,$s2,`32-8+3`,21,28
+	lwzx	$acc01,$Tbl0,$acc01
+	rlwinm	$acc09,$s3,`32-8+3`,21,28
+	lwzx	$acc02,$Tbl0,$acc02
+	rlwinm	$acc10,$s0,`32-8+3`,21,28
+	lwzx	$acc03,$Tbl0,$acc03
+	rlwinm	$acc11,$s1,`32-8+3`,21,28
+	lwzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s3,`0+3`,21,28
+	lwzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s0,`0+3`,21,28
+	lwzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s1,`0+3`,21,28
+	lwzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s2,`0+3`,21,28
+	lwzx	$acc08,$Tbl2,$acc08
+	xor	$t0,$t0,$acc00
+	lwzx	$acc09,$Tbl2,$acc09
+	xor	$t1,$t1,$acc01
+	lwzx	$acc10,$Tbl2,$acc10
+	xor	$t2,$t2,$acc02
+	lwzx	$acc11,$Tbl2,$acc11
+	xor	$t3,$t3,$acc03
+	lwzx	$acc12,$Tbl3,$acc12
+	xor	$t0,$t0,$acc04
+	lwzx	$acc13,$Tbl3,$acc13
+	xor	$t1,$t1,$acc05
+	lwzx	$acc14,$Tbl3,$acc14
+	xor	$t2,$t2,$acc06
+	lwzx	$acc15,$Tbl3,$acc15
+	xor	$t3,$t3,$acc07
+	xor	$t0,$t0,$acc08
+	xor	$t1,$t1,$acc09
+	xor	$t2,$t2,$acc10
+	xor	$t3,$t3,$acc11
+	xor	$s0,$t0,$acc12
+	xor	$s1,$t1,$acc13
+	xor	$s2,$t2,$acc14
+	xor	$s3,$t3,$acc15
+	addi	$key,$key,16
+	bdnz-	Lenc_loop
+
+	addi	$Tbl2,$Tbl0,2048
+	nop
+	lwz	$t0,0($key)
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	lwz	$t1,4($key)
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	lwz	$t2,8($key)
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	lwz	$t3,12($key)
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Te4
+	rlwinm	$acc04,$s1,`32-16`,24,31
+	lwz	$acc09,`2048+32`($Tbl0)
+	rlwinm	$acc05,$s2,`32-16`,24,31
+	lwz	$acc10,`2048+64`($Tbl0)
+	rlwinm	$acc06,$s3,`32-16`,24,31
+	lwz	$acc11,`2048+96`($Tbl0)
+	rlwinm	$acc07,$s0,`32-16`,24,31
+	lwz	$acc12,`2048+128`($Tbl0)
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lwz	$acc13,`2048+160`($Tbl0)
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lwz	$acc14,`2048+192`($Tbl0)
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lwz	$acc15,`2048+224`($Tbl0)
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc00,$Tbl2,$acc00
+	rlwinm	$acc12,$s3,`0`,24,31
+	lbzx	$acc01,$Tbl2,$acc01
+	rlwinm	$acc13,$s0,`0`,24,31
+	lbzx	$acc02,$Tbl2,$acc02
+	rlwinm	$acc14,$s1,`0`,24,31
+	lbzx	$acc03,$Tbl2,$acc03
+	rlwinm	$acc15,$s2,`0`,24,31
+	lbzx	$acc04,$Tbl2,$acc04
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc05,$Tbl2,$acc05
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc06,$Tbl2,$acc06
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc07,$Tbl2,$acc07
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc08,$Tbl2,$acc08
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc09,$Tbl2,$acc09
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc10,$Tbl2,$acc10
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc11,$Tbl2,$acc11
+	rlwimi	$s3,$acc07,16,8,15
+	lbzx	$acc12,$Tbl2,$acc12
+	rlwimi	$s0,$acc08,8,16,23
+	lbzx	$acc13,$Tbl2,$acc13
+	rlwimi	$s1,$acc09,8,16,23
+	lbzx	$acc14,$Tbl2,$acc14
+	rlwimi	$s2,$acc10,8,16,23
+	lbzx	$acc15,$Tbl2,$acc15
+	rlwimi	$s3,$acc11,8,16,23
+	or	$s0,$s0,$acc12
+	or	$s1,$s1,$acc13
+	or	$s2,$s2,$acc14
+	or	$s3,$s3,$acc15
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.align	4
+Lppc_AES_encrypt_compact:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,2048
+	lwz	$t0,0($key)
+	lis	$mask80,0x8080
+	lwz	$t1,4($key)
+	lis	$mask1b,0x1b1b
+	lwz	$t2,8($key)
+	ori	$mask80,$mask80,0x8080
+	lwz	$t3,12($key)
+	ori	$mask1b,$mask1b,0x1b1b
+	addi	$key,$key,16
+	mtctr	$acc00
+.align	4
+Lenc_compact_loop:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	xor	$s2,$s2,$t2
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	xor	$s3,$s3,$t3
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	rlwinm	$acc04,$s1,`32-16`,24,31
+	rlwinm	$acc05,$s2,`32-16`,24,31
+	rlwinm	$acc06,$s3,`32-16`,24,31
+	rlwinm	$acc07,$s0,`32-16`,24,31
+	lbzx	$acc00,$Tbl1,$acc00
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lbzx	$acc01,$Tbl1,$acc01
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl1,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl1,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s3,`0`,24,31
+	lbzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s0,`0`,24,31
+	lbzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s1,`0`,24,31
+	lbzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s2,`0`,24,31
+	lbzx	$acc08,$Tbl1,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl1,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl1,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl1,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl1,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl1,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl1,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl1,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	lwz	$t0,0($key)
+	or	$s0,$s0,$acc12
+	lwz	$t1,4($key)
+	or	$s1,$s1,$acc13
+	lwz	$t2,8($key)
+	or	$s2,$s2,$acc14
+	lwz	$t3,12($key)
+	or	$s3,$s3,$acc15
+
+	addi	$key,$key,16
+	bdz	Lenc_compact_done
+
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc01,$s1,$mask80
+	and	$acc02,$s2,$mask80
+	and	$acc03,$s3,$mask80
+	srwi	$acc04,$acc00,7		# r1>>7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	srwi	$acc05,$acc01,7
+	andc	$acc09,$s1,$mask80
+	srwi	$acc06,$acc02,7
+	andc	$acc10,$s2,$mask80
+	srwi	$acc07,$acc03,7
+	andc	$acc11,$s3,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc01,$acc01,$acc05
+	sub	$acc02,$acc02,$acc06
+	sub	$acc03,$acc03,$acc07
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc09,$acc09,$acc09
+	add	$acc10,$acc10,$acc10
+	add	$acc11,$acc11,$acc11
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc01,$acc01,$mask1b
+	and	$acc02,$acc02,$mask1b
+	and	$acc03,$acc03,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc01,$acc01,$acc09
+	 rotlwi	$acc12,$s0,16		# ROTATE(r0,16)
+	xor	$acc02,$acc02,$acc10
+	 rotlwi	$acc13,$s1,16
+	xor	$acc03,$acc03,$acc11
+	 rotlwi	$acc14,$s2,16
+
+	xor	$s0,$s0,$acc00		# r0^r2
+	rotlwi	$acc15,$s3,16
+	xor	$s1,$s1,$acc01
+	rotrwi	$s0,$s0,24		# ROTATE(r2^r0,24)
+	xor	$s2,$s2,$acc02
+	rotrwi	$s1,$s1,24
+	xor	$s3,$s3,$acc03
+	rotrwi	$s2,$s2,24
+	xor	$s0,$s0,$acc00		# ROTATE(r2^r0,24)^r2
+	rotrwi	$s3,$s3,24
+	xor	$s1,$s1,$acc01
+	xor	$s2,$s2,$acc02
+	xor	$s3,$s3,$acc03
+	rotlwi	$acc08,$acc12,8		# ROTATE(r0,24)
+	xor	$s0,$s0,$acc12		#
+	rotlwi	$acc09,$acc13,8
+	xor	$s1,$s1,$acc13
+	rotlwi	$acc10,$acc14,8
+	xor	$s2,$s2,$acc14
+	rotlwi	$acc11,$acc15,8
+	xor	$s3,$s3,$acc15
+	xor	$s0,$s0,$acc08		#
+	xor	$s1,$s1,$acc09
+	xor	$s2,$s2,$acc10
+	xor	$s3,$s3,$acc11
+
+	b	Lenc_compact_loop
+.align	4
+Lenc_compact_done:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.globl	.AES_decrypt
+.align	7
+.AES_decrypt:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+
+	$PUSH	$toc,`$FRAME-$SIZE_T*20`($sp)
+	$PUSH	r13,`$FRAME-$SIZE_T*19`($sp)
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	andi.	$t0,$inp,3
+	andi.	$t1,$out,3
+	or.	$t0,$t0,$t1
+	bne	Ldec_unaligned
+
+Ldec_unaligned_ok:
+	lwz	$s0,0($inp)
+	lwz	$s1,4($inp)
+	lwz	$s2,8($inp)
+	lwz	$s3,12($inp)
+	bl	LAES_Td
+	bl	Lppc_AES_decrypt_compact
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+	b	Ldec_done
+
+Ldec_unaligned:
+	subfic	$t0,$inp,4096
+	subfic	$t1,$out,4096
+	andi.	$t0,$t0,4096-16
+	beq	Ldec_xpage
+	andi.	$t1,$t1,4096-16
+	bne	Ldec_unaligned_ok
+
+Ldec_xpage:
+	lbz	$acc00,0($inp)
+	lbz	$acc01,1($inp)
+	lbz	$acc02,2($inp)
+	lbz	$s0,3($inp)
+	lbz	$acc04,4($inp)
+	lbz	$acc05,5($inp)
+	lbz	$acc06,6($inp)
+	lbz	$s1,7($inp)
+	lbz	$acc08,8($inp)
+	lbz	$acc09,9($inp)
+	lbz	$acc10,10($inp)
+	insrwi	$s0,$acc00,8,0
+	lbz	$s2,11($inp)
+	insrwi	$s1,$acc04,8,0
+	lbz	$acc12,12($inp)
+	insrwi	$s0,$acc01,8,8
+	lbz	$acc13,13($inp)
+	insrwi	$s1,$acc05,8,8
+	lbz	$acc14,14($inp)
+	insrwi	$s0,$acc02,8,16
+	lbz	$s3,15($inp)
+	insrwi	$s1,$acc06,8,16
+	insrwi	$s2,$acc08,8,0
+	insrwi	$s3,$acc12,8,0
+	insrwi	$s2,$acc09,8,8
+	insrwi	$s3,$acc13,8,8
+	insrwi	$s2,$acc10,8,16
+	insrwi	$s3,$acc14,8,16
+
+	bl	LAES_Td
+	bl	Lppc_AES_decrypt_compact
+
+	extrwi	$acc00,$s0,8,0
+	extrwi	$acc01,$s0,8,8
+	stb	$acc00,0($out)
+	extrwi	$acc02,$s0,8,16
+	stb	$acc01,1($out)
+	stb	$acc02,2($out)
+	extrwi	$acc04,$s1,8,0
+	stb	$s0,3($out)
+	extrwi	$acc05,$s1,8,8
+	stb	$acc04,4($out)
+	extrwi	$acc06,$s1,8,16
+	stb	$acc05,5($out)
+	stb	$acc06,6($out)
+	extrwi	$acc08,$s2,8,0
+	stb	$s1,7($out)
+	extrwi	$acc09,$s2,8,8
+	stb	$acc08,8($out)
+	extrwi	$acc10,$s2,8,16
+	stb	$acc09,9($out)
+	stb	$acc10,10($out)
+	extrwi	$acc12,$s3,8,0
+	stb	$s2,11($out)
+	extrwi	$acc13,$s3,8,8
+	stb	$acc12,12($out)
+	extrwi	$acc14,$s3,8,16
+	stb	$acc13,13($out)
+	stb	$acc14,14($out)
+	stb	$s3,15($out)
+
+Ldec_done:
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	$toc,`$FRAME-$SIZE_T*20`($sp)
+	$POP	r13,`$FRAME-$SIZE_T*19`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,3,0
+	.long	0
+
+.align	5
+Lppc_AES_decrypt:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,3
+	lwz	$t0,0($key)
+	addi	$Tbl2,$Tbl0,2
+	lwz	$t1,4($key)
+	addi	$Tbl3,$Tbl0,1
+	lwz	$t2,8($key)
+	addi	$acc00,$acc00,-1
+	lwz	$t3,12($key)
+	addi	$key,$key,16
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	mtctr	$acc00
+.align	4
+Ldec_loop:
+	rlwinm	$acc00,$s0,`32-24+3`,21,28
+	rlwinm	$acc01,$s1,`32-24+3`,21,28
+	rlwinm	$acc02,$s2,`32-24+3`,21,28
+	rlwinm	$acc03,$s3,`32-24+3`,21,28
+	lwz	$t0,0($key)
+	rlwinm	$acc04,$s3,`32-16+3`,21,28
+	lwz	$t1,4($key)
+	rlwinm	$acc05,$s0,`32-16+3`,21,28
+	lwz	$t2,8($key)
+	rlwinm	$acc06,$s1,`32-16+3`,21,28
+	lwz	$t3,12($key)
+	rlwinm	$acc07,$s2,`32-16+3`,21,28
+	lwzx	$acc00,$Tbl0,$acc00
+	rlwinm	$acc08,$s2,`32-8+3`,21,28
+	lwzx	$acc01,$Tbl0,$acc01
+	rlwinm	$acc09,$s3,`32-8+3`,21,28
+	lwzx	$acc02,$Tbl0,$acc02
+	rlwinm	$acc10,$s0,`32-8+3`,21,28
+	lwzx	$acc03,$Tbl0,$acc03
+	rlwinm	$acc11,$s1,`32-8+3`,21,28
+	lwzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s1,`0+3`,21,28
+	lwzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s2,`0+3`,21,28
+	lwzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s3,`0+3`,21,28
+	lwzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s0,`0+3`,21,28
+	lwzx	$acc08,$Tbl2,$acc08
+	xor	$t0,$t0,$acc00
+	lwzx	$acc09,$Tbl2,$acc09
+	xor	$t1,$t1,$acc01
+	lwzx	$acc10,$Tbl2,$acc10
+	xor	$t2,$t2,$acc02
+	lwzx	$acc11,$Tbl2,$acc11
+	xor	$t3,$t3,$acc03
+	lwzx	$acc12,$Tbl3,$acc12
+	xor	$t0,$t0,$acc04
+	lwzx	$acc13,$Tbl3,$acc13
+	xor	$t1,$t1,$acc05
+	lwzx	$acc14,$Tbl3,$acc14
+	xor	$t2,$t2,$acc06
+	lwzx	$acc15,$Tbl3,$acc15
+	xor	$t3,$t3,$acc07
+	xor	$t0,$t0,$acc08
+	xor	$t1,$t1,$acc09
+	xor	$t2,$t2,$acc10
+	xor	$t3,$t3,$acc11
+	xor	$s0,$t0,$acc12
+	xor	$s1,$t1,$acc13
+	xor	$s2,$t2,$acc14
+	xor	$s3,$t3,$acc15
+	addi	$key,$key,16
+	bdnz-	Ldec_loop
+
+	addi	$Tbl2,$Tbl0,2048
+	nop
+	lwz	$t0,0($key)
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	lwz	$t1,4($key)
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	lwz	$t2,8($key)
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	lwz	$t3,12($key)
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Td4
+	rlwinm	$acc04,$s3,`32-16`,24,31
+	lwz	$acc09,`2048+32`($Tbl0)
+	rlwinm	$acc05,$s0,`32-16`,24,31
+	lwz	$acc10,`2048+64`($Tbl0)
+	lbzx	$acc00,$Tbl2,$acc00
+	lwz	$acc11,`2048+96`($Tbl0)
+	lbzx	$acc01,$Tbl2,$acc01
+	lwz	$acc12,`2048+128`($Tbl0)
+	rlwinm	$acc06,$s1,`32-16`,24,31
+	lwz	$acc13,`2048+160`($Tbl0)
+	rlwinm	$acc07,$s2,`32-16`,24,31
+	lwz	$acc14,`2048+192`($Tbl0)
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lwz	$acc15,`2048+224`($Tbl0)
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl2,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl2,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl2,$acc04
+	rlwinm	$acc12,$s1,`0`,24,31
+	lbzx	$acc05,$Tbl2,$acc05
+	rlwinm	$acc13,$s2,`0`,24,31
+	lbzx	$acc06,$Tbl2,$acc06
+	rlwinm	$acc14,$s3,`0`,24,31
+	lbzx	$acc07,$Tbl2,$acc07
+	rlwinm	$acc15,$s0,`0`,24,31
+	lbzx	$acc08,$Tbl2,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl2,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl2,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl2,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl2,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl2,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl2,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl2,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	or	$s0,$s0,$acc12
+	or	$s1,$s1,$acc13
+	or	$s2,$s2,$acc14
+	or	$s3,$s3,$acc15
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.align	4
+Lppc_AES_decrypt_compact:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,2048
+	lwz	$t0,0($key)
+	lis	$mask80,0x8080
+	lwz	$t1,4($key)
+	lis	$mask1b,0x1b1b
+	lwz	$t2,8($key)
+	ori	$mask80,$mask80,0x8080
+	lwz	$t3,12($key)
+	ori	$mask1b,$mask1b,0x1b1b
+	addi	$key,$key,16
+___
+$code.=<<___ if ($SIZE_T==8);
+	insrdi	$mask80,$mask80,32,0
+	insrdi	$mask1b,$mask1b,32,0
+___
+$code.=<<___;
+	mtctr	$acc00
+.align	4
+Ldec_compact_loop:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	xor	$s2,$s2,$t2
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	xor	$s3,$s3,$t3
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	rlwinm	$acc04,$s3,`32-16`,24,31
+	rlwinm	$acc05,$s0,`32-16`,24,31
+	rlwinm	$acc06,$s1,`32-16`,24,31
+	rlwinm	$acc07,$s2,`32-16`,24,31
+	lbzx	$acc00,$Tbl1,$acc00
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lbzx	$acc01,$Tbl1,$acc01
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl1,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl1,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s1,`0`,24,31
+	lbzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s2,`0`,24,31
+	lbzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s3,`0`,24,31
+	lbzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s0,`0`,24,31
+	lbzx	$acc08,$Tbl1,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl1,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl1,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl1,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl1,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl1,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl1,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl1,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	lwz	$t0,0($key)
+	or	$s0,$s0,$acc12
+	lwz	$t1,4($key)
+	or	$s1,$s1,$acc13
+	lwz	$t2,8($key)
+	or	$s2,$s2,$acc14
+	lwz	$t3,12($key)
+	or	$s3,$s3,$acc15
+
+	addi	$key,$key,16
+	bdz	Ldec_compact_done
+___
+$code.=<<___ if ($SIZE_T==8);
+	# vectorized permutation improves decrypt performance by 10%
+	insrdi	$s0,$s1,32,0
+	insrdi	$s2,$s3,32,0
+
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc02,$s2,$mask80
+	srdi	$acc04,$acc00,7		# r1>>7
+	srdi	$acc06,$acc02,7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	andc	$acc10,$s2,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc02,$acc02,$acc06
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc10,$acc10,$acc10
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc02,$acc02,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc02,$acc02,$acc10
+
+	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
+	and	$acc06,$acc02,$mask80
+	srdi	$acc08,$acc04,7		# r1>>7
+	srdi	$acc10,$acc06,7
+	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
+	andc	$acc14,$acc02,$mask80
+	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
+	sub	$acc06,$acc06,$acc10
+	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
+	add	$acc14,$acc14,$acc14
+	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc06,$acc06,$mask1b
+	xor	$acc04,$acc04,$acc12	# r4
+	xor	$acc06,$acc06,$acc14
+
+	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
+	and	$acc10,$acc06,$mask80
+	srdi	$acc12,$acc08,7		# r1>>7
+	srdi	$acc14,$acc10,7
+	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
+	sub	$acc10,$acc10,$acc14
+	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
+	andc	$acc14,$acc06,$mask80
+	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
+	add	$acc14,$acc14,$acc14
+	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc10,$acc10,$mask1b
+	xor	$acc08,$acc08,$acc12	# r8
+	xor	$acc10,$acc10,$acc14
+
+	xor	$acc00,$acc00,$s0	# r2^r0
+	xor	$acc02,$acc02,$s2
+	xor	$acc04,$acc04,$s0	# r4^r0
+	xor	$acc06,$acc06,$s2
+
+	extrdi	$acc01,$acc00,32,0
+	extrdi	$acc03,$acc02,32,0
+	extrdi	$acc05,$acc04,32,0
+	extrdi	$acc07,$acc06,32,0
+	extrdi	$acc09,$acc08,32,0
+	extrdi	$acc11,$acc10,32,0
+___
+$code.=<<___ if ($SIZE_T==4);
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc01,$s1,$mask80
+	and	$acc02,$s2,$mask80
+	and	$acc03,$s3,$mask80
+	srwi	$acc04,$acc00,7		# r1>>7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	srwi	$acc05,$acc01,7
+	andc	$acc09,$s1,$mask80
+	srwi	$acc06,$acc02,7
+	andc	$acc10,$s2,$mask80
+	srwi	$acc07,$acc03,7
+	andc	$acc11,$s3,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc01,$acc01,$acc05
+	sub	$acc02,$acc02,$acc06
+	sub	$acc03,$acc03,$acc07
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc09,$acc09,$acc09
+	add	$acc10,$acc10,$acc10
+	add	$acc11,$acc11,$acc11
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc01,$acc01,$mask1b
+	and	$acc02,$acc02,$mask1b
+	and	$acc03,$acc03,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc01,$acc01,$acc09
+	xor	$acc02,$acc02,$acc10
+	xor	$acc03,$acc03,$acc11
+
+	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
+	and	$acc05,$acc01,$mask80
+	and	$acc06,$acc02,$mask80
+	and	$acc07,$acc03,$mask80
+	srwi	$acc08,$acc04,7		# r1>>7
+	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
+	srwi	$acc09,$acc05,7
+	andc	$acc13,$acc01,$mask80
+	srwi	$acc10,$acc06,7
+	andc	$acc14,$acc02,$mask80
+	srwi	$acc11,$acc07,7
+	andc	$acc15,$acc03,$mask80
+	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
+	sub	$acc05,$acc05,$acc09
+	sub	$acc06,$acc06,$acc10
+	sub	$acc07,$acc07,$acc11
+	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
+	add	$acc13,$acc13,$acc13
+	add	$acc14,$acc14,$acc14
+	add	$acc15,$acc15,$acc15
+	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc05,$acc05,$mask1b
+	and	$acc06,$acc06,$mask1b
+	and	$acc07,$acc07,$mask1b
+	xor	$acc04,$acc04,$acc12	# r4
+	xor	$acc05,$acc05,$acc13
+	xor	$acc06,$acc06,$acc14
+	xor	$acc07,$acc07,$acc15
+
+	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
+	and	$acc09,$acc05,$mask80
+	srwi	$acc12,$acc08,7		# r1>>7
+	and	$acc10,$acc06,$mask80
+	srwi	$acc13,$acc09,7
+	and	$acc11,$acc07,$mask80
+	srwi	$acc14,$acc10,7
+	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
+	srwi	$acc15,$acc11,7
+	sub	$acc09,$acc09,$acc13
+	sub	$acc10,$acc10,$acc14
+	sub	$acc11,$acc11,$acc15
+	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
+	andc	$acc13,$acc05,$mask80
+	andc	$acc14,$acc06,$mask80
+	andc	$acc15,$acc07,$mask80
+	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
+	add	$acc13,$acc13,$acc13
+	add	$acc14,$acc14,$acc14
+	add	$acc15,$acc15,$acc15
+	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc09,$acc09,$mask1b
+	and	$acc10,$acc10,$mask1b
+	and	$acc11,$acc11,$mask1b
+	xor	$acc08,$acc08,$acc12	# r8
+	xor	$acc09,$acc09,$acc13
+	xor	$acc10,$acc10,$acc14
+	xor	$acc11,$acc11,$acc15
+
+	xor	$acc00,$acc00,$s0	# r2^r0
+	xor	$acc01,$acc01,$s1
+	xor	$acc02,$acc02,$s2
+	xor	$acc03,$acc03,$s3
+	xor	$acc04,$acc04,$s0	# r4^r0
+	xor	$acc05,$acc05,$s1
+	xor	$acc06,$acc06,$s2
+	xor	$acc07,$acc07,$s3
+___
+$code.=<<___;
+	rotrwi	$s0,$s0,8		# = ROTATE(r0,8)
+	rotrwi	$s1,$s1,8
+	xor	$s0,$s0,$acc00		# ^= r2^r0
+	rotrwi	$s2,$s2,8
+	xor	$s1,$s1,$acc01
+	rotrwi	$s3,$s3,8
+	xor	$s2,$s2,$acc02
+	xor	$s3,$s3,$acc03
+	xor	$acc00,$acc00,$acc08
+	xor	$acc01,$acc01,$acc09
+	xor	$acc02,$acc02,$acc10
+	xor	$acc03,$acc03,$acc11
+	xor	$s0,$s0,$acc04		# ^= r4^r0
+	rotrwi	$acc00,$acc00,24
+	xor	$s1,$s1,$acc05
+	rotrwi	$acc01,$acc01,24
+	xor	$s2,$s2,$acc06
+	rotrwi	$acc02,$acc02,24
+	xor	$s3,$s3,$acc07
+	rotrwi	$acc03,$acc03,24
+	xor	$acc04,$acc04,$acc08
+	xor	$acc05,$acc05,$acc09
+	xor	$acc06,$acc06,$acc10
+	xor	$acc07,$acc07,$acc11
+	xor	$s0,$s0,$acc08		# ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
+	rotrwi	$acc04,$acc04,16
+	xor	$s1,$s1,$acc09
+	rotrwi	$acc05,$acc05,16
+	xor	$s2,$s2,$acc10
+	rotrwi	$acc06,$acc06,16
+	xor	$s3,$s3,$acc11
+	rotrwi	$acc07,$acc07,16
+	xor	$s0,$s0,$acc00		# ^= ROTATE(r8^r2^r0,24)
+	rotrwi	$acc08,$acc08,8
+	xor	$s1,$s1,$acc01
+	rotrwi	$acc09,$acc09,8
+	xor	$s2,$s2,$acc02
+	rotrwi	$acc10,$acc10,8
+	xor	$s3,$s3,$acc03
+	rotrwi	$acc11,$acc11,8
+	xor	$s0,$s0,$acc04		# ^= ROTATE(r8^r4^r0,16)
+	xor	$s1,$s1,$acc05
+	xor	$s2,$s2,$acc06
+	xor	$s3,$s3,$acc07
+	xor	$s0,$s0,$acc08		# ^= ROTATE(r8,8)	
+	xor	$s1,$s1,$acc09	
+	xor	$s2,$s2,$acc10	
+	xor	$s3,$s3,$acc11	
+
+	b	Ldec_compact_loop
+.align	4
+Ldec_compact_done:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.asciz	"AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+.align	7
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/aes-s390x.pl b/jni/openssl/crypto/aes/asm/aes-s390x.pl
new file mode 100644
index 0000000..445a1e6
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-s390x.pl
@@ -0,0 +1,2254 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for s390x.
+
+# April 2007.
+#
+# Software performance improvement over gcc-generated code is ~70% and
+# in absolute terms is ~73 cycles per byte processed with 128-bit key.
+# You're likely to exclaim "why so slow?" Keep in mind that z-CPUs are
+# *strictly* in-order execution and issued instruction [in this case
+# load value from memory is critical] has to complete before execution
+# flow proceeds. S-boxes are compressed to 2KB[+256B].
+#
+# As for hardware acceleration support. It's basically a "teaser," as
+# it can and should be improved in several ways. Most notably support
+# for CBC is not utilized, nor multiple blocks are ever processed.
+# Then software key schedule can be postponed till hardware support
+# detection... Performance improvement over assembler is reportedly
+# ~2.5x, but can reach >8x [naturally on larger chunks] if proper
+# support is implemented.
+
+# May 2007.
+#
+# Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided
+# for 128-bit keys, if hardware support is detected.
+
+# Januray 2009.
+#
+# Add support for hardware AES192/256 and reschedule instructions to
+# minimize/avoid Address Generation Interlock hazard and to favour
+# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
+# almost 50% on z9. The gain is smaller on z10, because being dual-
+# issue z10 makes it improssible to eliminate the interlock condition:
+# critial path is not long enough. Yet it spends ~24 cycles per byte
+# processed with 128-bit key.
+#
+# Unlike previous version hardware support detection takes place only
+# at the moment of key schedule setup, which is denoted in key->rounds.
+# This is done, because deferred key setup can't be made MT-safe, not
+# for keys longer than 128 bits.
+#
+# Add AES_cbc_encrypt, which gives incredible performance improvement,
+# it was measured to be ~6.6x. It's less than previously mentioned 8x,
+# because software implementation was optimized.
+
+# May 2010.
+#
+# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
+# performance improvement over "generic" counter mode routine relying
+# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
+# to the fact that exact throughput value depends on current stack
+# frame alignment within 4KB page. In worst case you get ~75% of the
+# maximum, but *on average* it would be as much as ~98%. Meaning that
+# worst case is unlike, it's like hitting ravine on plateau.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. On z990 it was measured to perform
+# 2x better than code generated by gcc 4.3.
+
+# December 2010.
+#
+# Add support for z196 "cipher message with counter" instruction.
+# Note however that it's disengaged, because it was measured to
+# perform ~12% worse than vanilla km-based code...
+
+# February 2011.
+#
+# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
+# instructions, which deliver ~70% improvement at 8KB block size over
+# vanilla km-based code, 37% - at most like 512-bytes block size.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+	$SIZE_T=4;
+	$g="";
+} else {
+	$SIZE_T=8;
+	$g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$softonly=0;	# allow hardware support
+
+$t0="%r0";	$mask="%r0";
+$t1="%r1";
+$t2="%r2";	$inp="%r2";
+$t3="%r3";	$out="%r3";	$bits="%r3";
+$key="%r4";
+$i1="%r5";
+$i2="%r6";
+$i3="%r7";
+$s0="%r8";
+$s1="%r9";
+$s2="%r10";
+$s3="%r11";
+$tbl="%r12";
+$rounds="%r13";
+$ra="%r14";
+$sp="%r15";
+
+$stdframe=16*$SIZE_T+4*8;
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$code=<<___;
+.text
+
+.type	AES_Te,\@object
+.align	256
+AES_Te:
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+# Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+# rcon[]
+.long	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.long	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.long	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.align	256
+.size	AES_Te,.-AES_Te
+
+# void AES_encrypt(const unsigned char *inp, unsigned char *out,
+# 		 const AES_KEY *key) {
+.globl	AES_encrypt
+.type	AES_encrypt,\@function
+AES_encrypt:
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lesoft
+
+	la	%r1,0($key)
+	#la	%r2,0($inp)
+	la	%r4,0($out)
+	lghi	%r3,16		# single block length
+	.long	0xb92e0042	# km %r4,%r2
+	brc	1,.-4		# can this happen?
+	br	%r14
+.align	64
+.Lesoft:
+___
+$code.=<<___;
+	stm${g}	%r3,$ra,3*$SIZE_T($sp)
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt
+
+	l${g}	$out,3*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_encrypt,.-AES_encrypt
+
+.type   _s390x_AES_encrypt,\@function
+.align	16
+_s390x_AES_encrypt:
+	st${g}	$ra,15*$SIZE_T($sp)
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$s3,12($key)
+	l	$rounds,240($key)
+	llill	$mask,`0xff<<3`
+	aghi	$rounds,-1
+	j	.Lenc_loop
+.align	16
+.Lenc_loop:
+	sllg	$t1,$s0,`0+3`
+	srlg	$t2,$s0,`8-3`
+	srlg	$t3,$s0,`16-3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	ngr	$t1,$mask
+	nr	$t2,$mask
+	nr	$t3,$mask
+
+	srlg	$i1,$s1,`16-3`	# i0
+	sllg	$i2,$s1,`0+3`
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	nr	$i1,$mask
+	nr	$s1,$mask
+	ngr	$i2,$mask
+	nr	$i3,$mask
+
+	l	$s0,0($s0,$tbl)	# Te0[s0>>24]
+	l	$t1,1($t1,$tbl)	# Te3[s0>>0]
+	l	$t2,2($t2,$tbl) # Te2[s0>>8]
+	l	$t3,3($t3,$tbl)	# Te1[s0>>16]
+
+	x	$s0,3($i1,$tbl)	# Te1[s1>>16]
+	l	$s1,0($s1,$tbl)	# Te0[s1>>24]
+	x	$t2,1($i2,$tbl)	# Te3[s1>>0]
+	x	$t3,2($i3,$tbl)	# Te2[s1>>8]
+
+	srlg	$i1,$s2,`8-3`	# i0
+	srlg	$i2,$s2,`16-3`	# i1
+	nr	$i1,$mask
+	nr	$i2,$mask
+	sllg	$i3,$s2,`0+3`
+	srl	$s2,`24-3`
+	nr	$s2,$mask
+	ngr	$i3,$mask
+
+	xr	$s1,$t1
+	srlg	$ra,$s3,`8-3`	# i1
+	sllg	$t1,$s3,`0+3`	# i0
+	nr	$ra,$mask
+	la	$key,16($key)
+	ngr	$t1,$mask
+
+	x	$s0,2($i1,$tbl)	# Te2[s2>>8]
+	x	$s1,3($i2,$tbl)	# Te1[s2>>16]
+	l	$s2,0($s2,$tbl)	# Te0[s2>>24]
+	x	$t3,1($i3,$tbl)	# Te3[s2>>0]
+
+	srlg	$i3,$s3,`16-3`	# i2
+	xr	$s2,$t2
+	srl	$s3,`24-3`
+	nr	$i3,$mask
+	nr	$s3,$mask
+
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$t3,12($key)
+
+	x	$s0,1($t1,$tbl)	# Te3[s3>>0]
+	x	$s1,2($ra,$tbl)	# Te2[s3>>8]
+	x	$s2,3($i3,$tbl)	# Te1[s3>>16]
+	l	$s3,0($s3,$tbl)	# Te0[s3>>24]
+	xr	$s3,$t3
+
+	brct	$rounds,.Lenc_loop
+	.align	16
+
+	sllg	$t1,$s0,`0+3`
+	srlg	$t2,$s0,`8-3`
+	ngr	$t1,$mask
+	srlg	$t3,$s0,`16-3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	nr	$t2,$mask
+	nr	$t3,$mask
+
+	srlg	$i1,$s1,`16-3`	# i0
+	sllg	$i2,$s1,`0+3`
+	ngr	$i2,$mask
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	nr	$i1,$mask
+	nr	$s1,$mask
+	nr	$i3,$mask
+
+	llgc	$s0,2($s0,$tbl)	# Te4[s0>>24]
+	llgc	$t1,2($t1,$tbl)	# Te4[s0>>0]
+	sll	$s0,24
+	llgc	$t2,2($t2,$tbl)	# Te4[s0>>8]
+	llgc	$t3,2($t3,$tbl)	# Te4[s0>>16]
+	sll	$t2,8
+	sll	$t3,16
+
+	llgc	$i1,2($i1,$tbl)	# Te4[s1>>16]
+	llgc	$s1,2($s1,$tbl)	# Te4[s1>>24]
+	llgc	$i2,2($i2,$tbl)	# Te4[s1>>0]
+	llgc	$i3,2($i3,$tbl)	# Te4[s1>>8]
+	sll	$i1,16
+	sll	$s1,24
+	sll	$i3,8
+	or	$s0,$i1
+	or	$s1,$t1
+	or	$t2,$i2
+	or	$t3,$i3
+	
+	srlg	$i1,$s2,`8-3`	# i0
+	srlg	$i2,$s2,`16-3`	# i1
+	nr	$i1,$mask
+	nr	$i2,$mask
+	sllg	$i3,$s2,`0+3`
+	srl	$s2,`24-3`
+	ngr	$i3,$mask
+	nr	$s2,$mask
+
+	sllg	$t1,$s3,`0+3`	# i0
+	srlg	$ra,$s3,`8-3`	# i1
+	ngr	$t1,$mask
+
+	llgc	$i1,2($i1,$tbl)	# Te4[s2>>8]
+	llgc	$i2,2($i2,$tbl)	# Te4[s2>>16]
+	sll	$i1,8
+	llgc	$s2,2($s2,$tbl)	# Te4[s2>>24]
+	llgc	$i3,2($i3,$tbl)	# Te4[s2>>0]
+	sll	$i2,16
+	nr	$ra,$mask
+	sll	$s2,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$t2
+	or	$t3,$i3
+
+	srlg	$i3,$s3,`16-3`	# i2
+	srl	$s3,`24-3`
+	nr	$i3,$mask
+	nr	$s3,$mask
+
+	l	$t0,16($key)
+	l	$t2,20($key)
+
+	llgc	$i1,2($t1,$tbl)	# Te4[s3>>0]
+	llgc	$i2,2($ra,$tbl)	# Te4[s3>>8]
+	llgc	$i3,2($i3,$tbl)	# Te4[s3>>16]
+	llgc	$s3,2($s3,$tbl)	# Te4[s3>>24]
+	sll	$i2,8
+	sll	$i3,16
+	sll	$s3,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$i3
+	or	$s3,$t3
+
+	l${g}	$ra,15*$SIZE_T($sp)
+	xr	$s0,$t0
+	xr	$s1,$t2
+	x	$s2,24($key)
+	x	$s3,28($key)
+
+	br	$ra	
+.size	_s390x_AES_encrypt,.-_s390x_AES_encrypt
+___
+
+$code.=<<___;
+.type	AES_Td,\@object
+.align	256
+AES_Td:
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+# Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+# void AES_decrypt(const unsigned char *inp, unsigned char *out,
+# 		 const AES_KEY *key) {
+.globl	AES_decrypt
+.type	AES_decrypt,\@function
+AES_decrypt:
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Ldsoft
+
+	la	%r1,0($key)
+	#la	%r2,0($inp)
+	la	%r4,0($out)
+	lghi	%r3,16		# single block length
+	.long	0xb92e0042	# km %r4,%r2
+	brc	1,.-4		# can this happen?
+	br	%r14
+.align	64
+.Ldsoft:
+___
+$code.=<<___;
+	stm${g}	%r3,$ra,3*$SIZE_T($sp)
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+
+	larl	$tbl,AES_Td
+	bras	$ra,_s390x_AES_decrypt
+
+	l${g}	$out,3*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_decrypt,.-AES_decrypt
+
+.type   _s390x_AES_decrypt,\@function
+.align	16
+_s390x_AES_decrypt:
+	st${g}	$ra,15*$SIZE_T($sp)
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$s3,12($key)
+	l	$rounds,240($key)
+	llill	$mask,`0xff<<3`
+	aghi	$rounds,-1
+	j	.Ldec_loop
+.align	16
+.Ldec_loop:
+	srlg	$t1,$s0,`16-3`
+	srlg	$t2,$s0,`8-3`
+	sllg	$t3,$s0,`0+3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	nr	$t1,$mask
+	nr	$t2,$mask
+	ngr	$t3,$mask
+
+	sllg	$i1,$s1,`0+3`	# i0
+	srlg	$i2,$s1,`16-3`
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	ngr	$i1,$mask
+	nr	$s1,$mask
+	nr	$i2,$mask
+	nr	$i3,$mask
+
+	l	$s0,0($s0,$tbl)	# Td0[s0>>24]
+	l	$t1,3($t1,$tbl)	# Td1[s0>>16]
+	l	$t2,2($t2,$tbl)	# Td2[s0>>8]
+	l	$t3,1($t3,$tbl)	# Td3[s0>>0]
+
+	x	$s0,1($i1,$tbl)	# Td3[s1>>0]
+	l	$s1,0($s1,$tbl)	# Td0[s1>>24]
+	x	$t2,3($i2,$tbl)	# Td1[s1>>16]
+	x	$t3,2($i3,$tbl)	# Td2[s1>>8]
+
+	srlg	$i1,$s2,`8-3`	# i0
+	sllg	$i2,$s2,`0+3`	# i1
+	srlg	$i3,$s2,`16-3`
+	srl	$s2,`24-3`
+	nr	$i1,$mask
+	ngr	$i2,$mask
+	nr	$s2,$mask
+	nr	$i3,$mask
+
+	xr	$s1,$t1
+	srlg	$ra,$s3,`8-3`	# i1
+	srlg	$t1,$s3,`16-3`	# i0
+	nr	$ra,$mask
+	la	$key,16($key)
+	nr	$t1,$mask
+
+	x	$s0,2($i1,$tbl)	# Td2[s2>>8]
+	x	$s1,1($i2,$tbl)	# Td3[s2>>0]
+	l	$s2,0($s2,$tbl)	# Td0[s2>>24]
+	x	$t3,3($i3,$tbl)	# Td1[s2>>16]
+
+	sllg	$i3,$s3,`0+3`	# i2
+	srl	$s3,`24-3`
+	ngr	$i3,$mask
+	nr	$s3,$mask
+
+	xr	$s2,$t2
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$t3,12($key)
+
+	x	$s0,3($t1,$tbl)	# Td1[s3>>16]
+	x	$s1,2($ra,$tbl)	# Td2[s3>>8]
+	x	$s2,1($i3,$tbl)	# Td3[s3>>0]
+	l	$s3,0($s3,$tbl)	# Td0[s3>>24]
+	xr	$s3,$t3
+
+	brct	$rounds,.Ldec_loop
+	.align	16
+
+	l	$t1,`2048+0`($tbl)	# prefetch Td4
+	l	$t2,`2048+64`($tbl)
+	l	$t3,`2048+128`($tbl)
+	l	$i1,`2048+192`($tbl)
+	llill	$mask,0xff
+
+	srlg	$i3,$s0,24	# i0
+	srlg	$t1,$s0,16
+	srlg	$t2,$s0,8
+	nr	$s0,$mask	# i3
+	nr	$t1,$mask
+
+	srlg	$i1,$s1,24
+	nr	$t2,$mask
+	srlg	$i2,$s1,16
+	srlg	$ra,$s1,8
+	nr	$s1,$mask	# i0
+	nr	$i2,$mask
+	nr	$ra,$mask
+
+	llgc	$i3,2048($i3,$tbl)	# Td4[s0>>24]
+	llgc	$t1,2048($t1,$tbl)	# Td4[s0>>16]
+	llgc	$t2,2048($t2,$tbl)	# Td4[s0>>8]
+	sll	$t1,16
+	llgc	$t3,2048($s0,$tbl)	# Td4[s0>>0]
+	sllg	$s0,$i3,24
+	sll	$t2,8
+
+	llgc	$s1,2048($s1,$tbl)	# Td4[s1>>0]
+	llgc	$i1,2048($i1,$tbl)	# Td4[s1>>24]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s1>>16]
+	sll	$i1,24
+	llgc	$i3,2048($ra,$tbl)	# Td4[s1>>8]
+	sll	$i2,16
+	sll	$i3,8
+	or	$s0,$s1
+	or	$t1,$i1
+	or	$t2,$i2
+	or	$t3,$i3
+
+	srlg	$i1,$s2,8	# i0
+	srlg	$i2,$s2,24
+	srlg	$i3,$s2,16
+	nr	$s2,$mask	# i1
+	nr	$i1,$mask
+	nr	$i3,$mask
+	llgc	$i1,2048($i1,$tbl)	# Td4[s2>>8]
+	llgc	$s1,2048($s2,$tbl)	# Td4[s2>>0]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s2>>24]
+	llgc	$i3,2048($i3,$tbl)	# Td4[s2>>16]
+	sll	$i1,8
+	sll	$i2,24
+	or	$s0,$i1
+	sll	$i3,16
+	or	$t2,$i2
+	or	$t3,$i3
+
+	srlg	$i1,$s3,16	# i0
+	srlg	$i2,$s3,8	# i1
+	srlg	$i3,$s3,24
+	nr	$s3,$mask	# i2
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	l${g}	$ra,15*$SIZE_T($sp)
+	or	$s1,$t1
+	l	$t0,16($key)
+	l	$t1,20($key)
+
+	llgc	$i1,2048($i1,$tbl)	# Td4[s3>>16]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s3>>8]
+	sll	$i1,16
+	llgc	$s2,2048($s3,$tbl)	# Td4[s3>>0]
+	llgc	$s3,2048($i3,$tbl)	# Td4[s3>>24]
+	sll	$i2,8
+	sll	$s3,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$t2
+	or	$s3,$t3
+
+	xr	$s0,$t0
+	xr	$s1,$t1
+	x	$s2,24($key)
+	x	$s3,28($key)
+
+	br	$ra	
+.size	_s390x_AES_decrypt,.-_s390x_AES_decrypt
+___
+
+$code.=<<___;
+# void AES_set_encrypt_key(const unsigned char *in, int bits,
+# 		 AES_KEY *key) {
+.globl	private_AES_set_encrypt_key
+.type	private_AES_set_encrypt_key,\@function
+.align	16
+private_AES_set_encrypt_key:
+_s390x_AES_set_encrypt_key:
+	lghi	$t0,0
+	cl${g}r	$inp,$t0
+	je	.Lminus1
+	cl${g}r	$key,$t0
+	je	.Lminus1
+
+	lghi	$t0,128
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	$t0,192
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	$t0,256
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	%r2,-2
+	br	%r14
+
+.align	16
+.Lproceed:
+___
+$code.=<<___ if (!$softonly);
+	# convert bits to km code, [128,192,256]->[18,19,20]
+	lhi	%r5,-128
+	lhi	%r0,18
+	ar	%r5,$bits
+	srl	%r5,6
+	ar	%r5,%r0
+
+	larl	%r1,OPENSSL_s390xcap_P
+	lg	%r0,0(%r1)
+	tmhl	%r0,0x4000	# check for message-security assist
+	jz	.Lekey_internal
+
+	lghi	%r0,0		# query capability vector
+	la	%r1,16($sp)
+	.long	0xb92f0042	# kmc %r4,%r2
+
+	llihh	%r1,0x8000
+	srlg	%r1,%r1,0(%r5)
+	ng	%r1,16($sp)
+	jz	.Lekey_internal
+
+	lmg	%r0,%r1,0($inp)	# just copy 128 bits...
+	stmg	%r0,%r1,0($key)
+	lhi	%r0,192
+	cr	$bits,%r0
+	jl	1f
+	lg	%r1,16($inp)
+	stg	%r1,16($key)
+	je	1f
+	lg	%r1,24($inp)
+	stg	%r1,24($key)
+1:	st	$bits,236($key)	# save bits [for debugging purposes]
+	lgr	$t0,%r5
+	st	%r5,240($key)	# save km code
+	lghi	%r2,0
+	br	%r14
+___
+$code.=<<___;
+.align	16
+.Lekey_internal:
+	stm${g}	%r4,%r13,4*$SIZE_T($sp)	# all non-volatile regs and $key
+
+	larl	$tbl,AES_Te+2048
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+	st	$s0,0($key)
+	st	$s1,4($key)
+	st	$s2,8($key)
+	st	$s3,12($key)
+	lghi	$t0,128
+	cr	$bits,$t0
+	jne	.Lnot128
+
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,10
+	st	$rounds,240($key)
+
+	llgfr	$t2,$s3			# temp=rk[3]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	srlg	$i3,$s3,24
+	nr	$t2,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L128_loop:
+	la	$t2,0($t2,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t2,2,0($t2)		# Te4[rk[3]>>0]<<8
+	icm	$t2,4,0($i1)		# Te4[rk[3]>>8]<<16
+	icm	$t2,8,0($i2)		# Te4[rk[3]>>16]<<24
+	icm	$t2,1,0($i3)		# Te4[rk[3]>>24]
+	x	$t2,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t2			# rk[4]=rk[0]^...
+	xr	$s1,$s0			# rk[5]=rk[1]^rk[4]
+	xr	$s2,$s1			# rk[6]=rk[2]^rk[5]
+	xr	$s3,$s2			# rk[7]=rk[3]^rk[6]
+
+	llgfr	$t2,$s3			# temp=rk[3]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	nr	$t2,$mask
+	nr	$i1,$mask
+	srlg	$i3,$s3,24
+	nr	$i2,$mask
+
+	st	$s0,16($key)
+	st	$s1,20($key)
+	st	$s2,24($key)
+	st	$s3,28($key)
+	la	$key,16($key)		# key+=4
+	la	$t3,4($t3)		# i++
+	brct	$rounds,.L128_loop
+	lghi	$t0,10
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.Lnot128:
+	llgf	$t0,16($inp)
+	llgf	$t1,20($inp)
+	st	$t0,16($key)
+	st	$t1,20($key)
+	lghi	$t0,192
+	cr	$bits,$t0
+	jne	.Lnot192
+
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,12
+	st	$rounds,240($key)
+	lghi	$rounds,8
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L192_loop:
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t1,2,0($t1)		# Te4[rk[5]>>0]<<8
+	icm	$t1,4,0($i1)		# Te4[rk[5]>>8]<<16
+	icm	$t1,8,0($i2)		# Te4[rk[5]>>16]<<24
+	icm	$t1,1,0($i3)		# Te4[rk[5]>>24]
+	x	$t1,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t1			# rk[6]=rk[0]^...
+	xr	$s1,$s0			# rk[7]=rk[1]^rk[6]
+	xr	$s2,$s1			# rk[8]=rk[2]^rk[7]
+	xr	$s3,$s2			# rk[9]=rk[3]^rk[8]
+
+	st	$s0,24($key)
+	st	$s1,28($key)
+	st	$s2,32($key)
+	st	$s3,36($key)
+	brct	$rounds,.L192_continue
+	lghi	$t0,12
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.L192_continue:
+	lgr	$t1,$s3
+	x	$t1,16($key)		# rk[10]=rk[4]^rk[9]
+	st	$t1,40($key)
+	x	$t1,20($key)		# rk[11]=rk[5]^rk[10]
+	st	$t1,44($key)
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	la	$key,24($key)		# key+=6
+	la	$t3,4($t3)		# i++
+	j	.L192_loop
+
+.align	16
+.Lnot192:
+	llgf	$t0,24($inp)
+	llgf	$t1,28($inp)
+	st	$t0,24($key)
+	st	$t1,28($key)
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,14
+	st	$rounds,240($key)
+	lghi	$rounds,7
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L256_loop:
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t1,2,0($t1)		# Te4[rk[7]>>0]<<8
+	icm	$t1,4,0($i1)		# Te4[rk[7]>>8]<<16
+	icm	$t1,8,0($i2)		# Te4[rk[7]>>16]<<24
+	icm	$t1,1,0($i3)		# Te4[rk[7]>>24]
+	x	$t1,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t1			# rk[8]=rk[0]^...
+	xr	$s1,$s0			# rk[9]=rk[1]^rk[8]
+	xr	$s2,$s1			# rk[10]=rk[2]^rk[9]
+	xr	$s3,$s2			# rk[11]=rk[3]^rk[10]
+	st	$s0,32($key)
+	st	$s1,36($key)
+	st	$s2,40($key)
+	st	$s3,44($key)
+	brct	$rounds,.L256_continue
+	lghi	$t0,14
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.L256_continue:
+	lgr	$t1,$s3			# temp=rk[11]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	srlg	$i3,$s3,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	llgc	$t1,0($t1)		# Te4[rk[11]>>0]
+	icm	$t1,2,0($i1)		# Te4[rk[11]>>8]<<8
+	icm	$t1,4,0($i2)		# Te4[rk[11]>>16]<<16
+	icm	$t1,8,0($i3)		# Te4[rk[11]>>24]<<24
+	x	$t1,16($key)		# rk[12]=rk[4]^...
+	st	$t1,48($key)
+	x	$t1,20($key)		# rk[13]=rk[5]^rk[12]
+	st	$t1,52($key)
+	x	$t1,24($key)		# rk[14]=rk[6]^rk[13]
+	st	$t1,56($key)
+	x	$t1,28($key)		# rk[15]=rk[7]^rk[14]
+	st	$t1,60($key)
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	la	$key,32($key)		# key+=8
+	la	$t3,4($t3)		# i++
+	j	.L256_loop
+
+.Lminus1:
+	lghi	%r2,-1
+	br	$ra
+.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+
+# void AES_set_decrypt_key(const unsigned char *in, int bits,
+# 		 AES_KEY *key) {
+.globl	private_AES_set_decrypt_key
+.type	private_AES_set_decrypt_key,\@function
+.align	16
+private_AES_set_decrypt_key:
+	#st${g}	$key,4*$SIZE_T($sp)	# I rely on AES_set_encrypt_key to
+	st${g}	$ra,14*$SIZE_T($sp)	# save non-volatile registers and $key!
+	bras	$ra,_s390x_AES_set_encrypt_key
+	#l${g}	$key,4*$SIZE_T($sp)
+	l${g}	$ra,14*$SIZE_T($sp)
+	ltgr	%r2,%r2
+	bnzr	$ra
+___
+$code.=<<___ if (!$softonly);
+	#l	$t0,240($key)
+	lhi	$t1,16
+	cr	$t0,$t1
+	jl	.Lgo
+	oill	$t0,0x80	# set "decrypt" bit
+	st	$t0,240($key)
+	br	$ra
+___
+$code.=<<___;
+.align	16
+.Lgo:	lgr	$rounds,$t0	#llgf	$rounds,240($key)
+	la	$i1,0($key)
+	sllg	$i2,$rounds,4
+	la	$i2,0($i2,$key)
+	srl	$rounds,1
+	lghi	$t1,-16
+
+.align	16
+.Linv:	lmg	$s0,$s1,0($i1)
+	lmg	$s2,$s3,0($i2)
+	stmg	$s0,$s1,0($i2)
+	stmg	$s2,$s3,0($i1)
+	la	$i1,16($i1)
+	la	$i2,0($t1,$i2)
+	brct	$rounds,.Linv
+___
+$mask80=$i1;
+$mask1b=$i2;
+$maskfe=$i3;
+$code.=<<___;
+	llgf	$rounds,240($key)
+	aghi	$rounds,-1
+	sll	$rounds,2	# (rounds-1)*4
+	llilh	$mask80,0x8080
+	llilh	$mask1b,0x1b1b
+	llilh	$maskfe,0xfefe
+	oill	$mask80,0x8080
+	oill	$mask1b,0x1b1b
+	oill	$maskfe,0xfefe
+
+.align	16
+.Lmix:	l	$s0,16($key)	# tp1
+	lr	$s1,$s0
+	ngr	$s1,$mask80
+	srlg	$t1,$s1,7
+	slr	$s1,$t1
+	nr	$s1,$mask1b
+	sllg	$t1,$s0,1
+	nr	$t1,$maskfe
+	xr	$s1,$t1		# tp2
+
+	lr	$s2,$s1
+	ngr	$s2,$mask80
+	srlg	$t1,$s2,7
+	slr	$s2,$t1
+	nr	$s2,$mask1b
+	sllg	$t1,$s1,1
+	nr	$t1,$maskfe
+	xr	$s2,$t1		# tp4
+
+	lr	$s3,$s2
+	ngr	$s3,$mask80
+	srlg	$t1,$s3,7
+	slr	$s3,$t1
+	nr	$s3,$mask1b
+	sllg	$t1,$s2,1
+	nr	$t1,$maskfe
+	xr	$s3,$t1		# tp8
+
+	xr	$s1,$s0		# tp2^tp1
+	xr	$s2,$s0		# tp4^tp1
+	rll	$s0,$s0,24	# = ROTATE(tp1,8)
+	xr	$s2,$s3		# ^=tp8
+	xr	$s0,$s1		# ^=tp2^tp1
+	xr	$s1,$s3		# tp2^tp1^tp8
+	xr	$s0,$s2		# ^=tp4^tp1^tp8
+	rll	$s1,$s1,8
+	rll	$s2,$s2,16
+	xr	$s0,$s1		# ^= ROTATE(tp8^tp2^tp1,24)
+	rll	$s3,$s3,24
+	xr	$s0,$s2    	# ^= ROTATE(tp8^tp4^tp1,16)
+	xr	$s0,$s3		# ^= ROTATE(tp8,8)
+
+	st	$s0,16($key)
+	la	$key,4($key)
+	brct	$rounds,.Lmix
+
+	lm${g}	%r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
+	lghi	%r2,0
+	br	$ra
+.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+___
+
+########################################################################
+# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+#                     size_t length, const AES_KEY *key,
+#                     unsigned char *ivec, const int enc)
+{
+my $inp="%r2";
+my $out="%r4";	# length and out are swapped
+my $len="%r3";
+my $key="%r5";
+my $ivp="%r6";
+
+$code.=<<___;
+.globl	AES_cbc_encrypt
+.type	AES_cbc_encrypt,\@function
+.align	16
+AES_cbc_encrypt:
+	xgr	%r3,%r4		# flip %r3 and %r4, out and len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if (!$softonly);
+	lhi	%r0,16
+	cl	%r0,240($key)
+	jh	.Lcbc_software
+
+	lg	%r0,0($ivp)	# copy ivec
+	lg	%r1,8($ivp)
+	stmg	%r0,%r1,16($sp)
+	lmg	%r0,%r1,0($key)	# copy key, cover 256 bit
+	stmg	%r0,%r1,32($sp)
+	lmg	%r0,%r1,16($key)
+	stmg	%r0,%r1,48($sp)
+	l	%r0,240($key)	# load kmc code
+	lghi	$key,15		# res=len%16, len-=res;
+	ngr	$key,$len
+	sl${g}r	$len,$key
+	la	%r1,16($sp)	# parameter block - ivec || key
+	jz	.Lkmc_truncated
+	.long	0xb92f0042	# kmc %r4,%r2
+	brc	1,.-4		# pay attention to "partial completion"
+	ltr	$key,$key
+	jnz	.Lkmc_truncated
+.Lkmc_done:
+	lmg	%r0,%r1,16($sp)	# copy ivec to caller
+	stg	%r0,0($ivp)
+	stg	%r1,8($ivp)
+	br	$ra
+.align	16
+.Lkmc_truncated:
+	ahi	$key,-1		# it's the way it's encoded in mvc
+	tmll	%r0,0x80
+	jnz	.Lkmc_truncated_dec
+	lghi	%r1,0
+	stg	%r1,16*$SIZE_T($sp)
+	stg	%r1,16*$SIZE_T+8($sp)
+	bras	%r1,1f
+	mvc	16*$SIZE_T(1,$sp),0($inp)
+1:	ex	$key,0(%r1)
+	la	%r1,16($sp)	# restore parameter block
+	la	$inp,16*$SIZE_T($sp)
+	lghi	$len,16
+	.long	0xb92f0042	# kmc %r4,%r2
+	j	.Lkmc_done
+.align	16
+.Lkmc_truncated_dec:
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$out,16*$SIZE_T($sp)
+	lghi	$len,16
+	.long	0xb92f0042	# kmc %r4,%r2
+	l${g}	$out,4*$SIZE_T($sp)
+	bras	%r1,2f
+	mvc	0(1,$out),16*$SIZE_T($sp)
+2:	ex	$key,0(%r1)
+	j	.Lkmc_done
+.align	16
+.Lcbc_software:
+___
+$code.=<<___;
+	stm${g}	$key,$ra,5*$SIZE_T($sp)
+	lhi	%r0,0
+	cl	%r0,`$stdframe+$SIZE_T-4`($sp)
+	je	.Lcbc_decrypt
+
+	larl	$tbl,AES_Te
+
+	llgf	$s0,0($ivp)
+	llgf	$s1,4($ivp)
+	llgf	$s2,8($ivp)
+	llgf	$s3,12($ivp)
+
+	lghi	$t0,16
+	sl${g}r	$len,$t0
+	brc	4,.Lcbc_enc_tail	# if borrow
+.Lcbc_enc_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	x	$s0,0($inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_encrypt
+
+	lm${g}	$inp,$key,2*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	la	$inp,16($inp)
+	la	$out,16($out)
+	lghi	$t0,16
+	lt${g}r	$len,$len
+	jz	.Lcbc_enc_done
+	sl${g}r	$len,$t0
+	brc	4,.Lcbc_enc_tail	# if borrow
+	j	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_done:
+	l${g}	$ivp,6*$SIZE_T($sp)
+	st	$s0,0($ivp)
+	st	$s1,4($ivp)	
+	st	$s2,8($ivp)
+	st	$s3,12($ivp)
+
+	lm${g}	%r7,$ra,7*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.Lcbc_enc_tail:
+	aghi	$len,15
+	lghi	$t0,0
+	stg	$t0,16*$SIZE_T($sp)
+	stg	$t0,16*$SIZE_T+8($sp)
+	bras	$t1,3f
+	mvc	16*$SIZE_T(1,$sp),0($inp)
+3:	ex	$len,0($t1)
+	lghi	$len,0
+	la	$inp,16*$SIZE_T($sp)
+	j	.Lcbc_enc_loop
+
+.align	16
+.Lcbc_decrypt:
+	larl	$tbl,AES_Td
+
+	lg	$t0,0($ivp)
+	lg	$t1,8($ivp)
+	stmg	$t0,$t1,16*$SIZE_T($sp)
+
+.Lcbc_dec_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_decrypt
+
+	lm${g}	$inp,$key,2*$SIZE_T($sp)
+	sllg	$s0,$s0,32
+	sllg	$s2,$s2,32
+	lr	$s0,$s1
+	lr	$s2,$s3
+
+	lg	$t0,0($inp)
+	lg	$t1,8($inp)
+	xg	$s0,16*$SIZE_T($sp)
+	xg	$s2,16*$SIZE_T+8($sp)
+	lghi	$s1,16
+	sl${g}r	$len,$s1
+	brc	4,.Lcbc_dec_tail	# if borrow
+	brc	2,.Lcbc_dec_done	# if zero
+	stg	$s0,0($out)
+	stg	$s2,8($out)
+	stmg	$t0,$t1,16*$SIZE_T($sp)
+
+	la	$inp,16($inp)
+	la	$out,16($out)
+	j	.Lcbc_dec_loop
+
+.Lcbc_dec_done:
+	stg	$s0,0($out)
+	stg	$s2,8($out)
+.Lcbc_dec_exit:
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	stmg	$t0,$t1,0($ivp)
+
+	br	$ra
+
+.align	16
+.Lcbc_dec_tail:
+	aghi	$len,15
+	stg	$s0,16*$SIZE_T($sp)
+	stg	$s2,16*$SIZE_T+8($sp)
+	bras	$s1,4f
+	mvc	0(1,$out),16*$SIZE_T($sp)
+4:	ex	$len,0($s1)
+	j	.Lcbc_dec_exit
+.size	AES_cbc_encrypt,.-AES_cbc_encrypt
+___
+}
+########################################################################
+# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+#                     size_t blocks, const AES_KEY *key,
+#                     const unsigned char *ivec)
+{
+my $inp="%r2";
+my $out="%r4";	# blocks and out are swapped
+my $len="%r3";
+my $key="%r5";	my $iv0="%r5";
+my $ivp="%r6";
+my $fp ="%r7";
+
+$code.=<<___;
+.globl	AES_ctr32_encrypt
+.type	AES_ctr32_encrypt,\@function
+.align	16
+AES_ctr32_encrypt:
+	xgr	%r3,%r4		# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+	llgfr	$len,$len	# safe in ctr32 subroutine even in 64-bit case
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lctr32_software
+
+	stm${g}	%r6,$s3,6*$SIZE_T($sp)
+
+	slgr	$out,$inp
+	la	%r1,0($key)	# %r1 is permanent copy of $key
+	lg	$iv0,0($ivp)	# load ivec
+	lg	$ivp,8($ivp)
+
+	# prepare and allocate stack frame at the top of 4K page
+	# with 1K reserved for eventual signal handling
+	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
+	lghi	$s1,-4096
+	algr	$s0,$sp
+	lgr	$fp,$sp
+	ngr	$s0,$s1		# align at page boundary
+	slgr	$fp,$s0		# total buffer size
+	lgr	$s2,$sp
+	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
+	slgr	$fp,$s1		# deduct reservation to get usable buffer size
+	# buffer size is at lest 256 and at most 3072+256-16
+
+	la	$sp,1024($s0)	# alloca
+	srlg	$fp,$fp,4	# convert bytes to blocks, minimum 16
+	st${g}	$s2,0($sp)	# back-chain
+	st${g}	$fp,$SIZE_T($sp)
+
+	slgr	$len,$fp
+	brc	1,.Lctr32_hw_switch	# not zero, no borrow
+	algr	$fp,$len	# input is shorter than allocated buffer
+	lghi	$len,0
+	st${g}	$fp,$SIZE_T($sp)
+
+.Lctr32_hw_switch:
+___
+$code.=<<___ if (0);	######### kmctr code was measured to be ~12% slower
+	larl	$s0,OPENSSL_s390xcap_P
+	lg	$s0,8($s0)
+	tmhh	$s0,0x0004	# check for message_security-assist-4
+	jz	.Lctr32_km_loop
+
+	llgfr	$s0,%r0
+	lgr	$s1,%r1
+	lghi	%r0,0
+	la	%r1,16($sp)
+	.long	0xb92d2042	# kmctr %r4,%r2,%r2
+
+	llihh	%r0,0x8000	# check if kmctr supports the function code
+	srlg	%r0,%r0,0($s0)
+	ng	%r0,16($sp)
+	lgr	%r0,$s0
+	lgr	%r1,$s1
+	jz	.Lctr32_km_loop
+
+####### kmctr code
+	algr	$out,$inp	# restore $out
+	lgr	$s1,$len	# $s1 undertakes $len
+	j	.Lctr32_kmctr_loop
+.align	16
+.Lctr32_kmctr_loop:
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+.Lctr32_kmctr_prepare:
+	stg	$iv0,0($s2)
+	stg	$ivp,8($s2)
+	la	$s2,16($s2)
+	ahi	$ivp,1		# 32-bit increment, preserves upper half
+	brct	$s3,.Lctr32_kmctr_prepare
+
+	#la	$inp,0($inp)	# inp
+	sllg	$len,$fp,4	# len
+	#la	$out,0($out)	# out
+	la	$s2,16($sp)	# iv
+	.long	0xb92da042	# kmctr $out,$s2,$inp
+	brc	1,.-4		# pay attention to "partial completion"
+
+	slgr	$s1,$fp
+	brc	1,.Lctr32_kmctr_loop	# not zero, no borrow
+	algr	$fp,$s1
+	lghi	$s1,0
+	brc	4+1,.Lctr32_kmctr_loop	# not zero
+
+	l${g}	$sp,0($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+___
+$code.=<<___;
+.Lctr32_km_loop:
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+.Lctr32_km_prepare:
+	stg	$iv0,0($s2)
+	stg	$ivp,8($s2)
+	la	$s2,16($s2)
+	ahi	$ivp,1		# 32-bit increment, preserves upper half
+	brct	$s3,.Lctr32_km_prepare
+
+	la	$s0,16($sp)	# inp
+	sllg	$s1,$fp,4	# len
+	la	$s2,16($sp)	# out
+	.long	0xb92e00a8	# km %r10,%r8
+	brc	1,.-4		# pay attention to "partial completion"
+
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+	slgr	$s2,$inp
+.Lctr32_km_xor:
+	lg	$s0,0($inp)
+	lg	$s1,8($inp)
+	xg	$s0,0($s2,$inp)
+	xg	$s1,8($s2,$inp)
+	stg	$s0,0($out,$inp)
+	stg	$s1,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lctr32_km_xor
+
+	slgr	$len,$fp
+	brc	1,.Lctr32_km_loop	# not zero, no borrow
+	algr	$fp,$len
+	lghi	$len,0
+	brc	4+1,.Lctr32_km_loop	# not zero
+
+	l${g}	$s0,0($sp)
+	l${g}	$s1,$SIZE_T($sp)
+	la	$s2,16($sp)
+.Lctr32_km_zap:
+	stg	$s0,0($s2)
+	stg	$s0,8($s2)
+	la	$s2,16($s2)
+	brct	$s1,.Lctr32_km_zap
+
+	la	$sp,0($s0)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lctr32_software:
+___
+$code.=<<___;
+	stm${g}	$key,$ra,5*$SIZE_T($sp)
+	sl${g}r	$inp,$out
+	larl	$tbl,AES_Te
+	llgf	$t1,12($ivp)
+
+.Lctr32_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	llgf	$s0,0($ivp)
+	llgf	$s1,4($ivp)
+	llgf	$s2,8($ivp)
+	lgr	$s3,$t1
+	st	$t1,16*$SIZE_T($sp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_encrypt
+
+	lm${g}	$inp,$ivp,2*$SIZE_T($sp)
+	llgf	$t1,16*$SIZE_T($sp)
+	x	$s0,0($inp,$out)
+	x	$s1,4($inp,$out)
+	x	$s2,8($inp,$out)
+	x	$s3,12($inp,$out)
+	stm	$s0,$s3,0($out)
+
+	la	$out,16($out)
+	ahi	$t1,1		# 32-bit increment
+	brct	$len,.Lctr32_loop
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_ctr32_encrypt,.-AES_ctr32_encrypt
+___
+}
+
+########################################################################
+# void AES_xts_encrypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+{
+my $inp="%r2";
+my $out="%r4";	# len and out are swapped
+my $len="%r3";
+my $key1="%r5";	# $i1
+my $key2="%r6";	# $i2
+my $fp="%r7";	# $i3
+my $tweak=16*$SIZE_T+16;	# or $stdframe-16, bottom of the frame...
+
+$code.=<<___;
+.type	_s390x_xts_km,\@function
+.align	16
+_s390x_xts_km:
+___
+$code.=<<___ if(1);
+	llgfr	$s0,%r0			# put aside the function code
+	lghi	$s1,0x7f
+	nr	$s1,%r0
+	lghi	%r0,0			# query capability vector
+	la	%r1,2*$SIZE_T($sp)
+	.long	0xb92e0042		# km %r4,%r2
+	llihh	%r1,0x8000
+	srlg	%r1,%r1,32($s1)		# check for 32+function code
+	ng	%r1,2*$SIZE_T($sp)
+	lgr	%r0,$s0			# restore the function code
+	la	%r1,0($key1)		# restore $key1
+	jz	.Lxts_km_vanilla
+
+	lmg	$i2,$i3,$tweak($sp)	# put aside the tweak value
+	algr	$out,$inp
+
+	oill	%r0,32			# switch to xts function code
+	aghi	$s1,-18			#
+	sllg	$s1,$s1,3		# (function code - 18)*8, 0 or 16
+	la	%r1,$tweak-16($sp)
+	slgr	%r1,$s1			# parameter block position
+	lmg	$s0,$s3,0($key1)	# load 256 bits of key material,
+	stmg	$s0,$s3,0(%r1)		# and copy it to parameter block.
+					# yes, it contains junk and overlaps
+					# with the tweak in 128-bit case.
+					# it's done to avoid conditional
+					# branch.
+	stmg	$i2,$i3,$tweak($sp)	# "re-seat" the tweak value
+
+	.long	0xb92e0042		# km %r4,%r2
+	brc	1,.-4			# pay attention to "partial completion"
+
+	lrvg	$s0,$tweak+0($sp)	# load the last tweak
+	lrvg	$s1,$tweak+8($sp)
+	stmg	%r0,%r3,$tweak-32(%r1)	# wipe copy of the key
+
+	nill	%r0,0xffdf		# switch back to original function code
+	la	%r1,0($key1)		# restore pointer to $key1
+	slgr	$out,$inp
+
+	llgc	$len,2*$SIZE_T-1($sp)
+	nill	$len,0x0f		# $len%=16
+	br	$ra
+	
+.align	16
+.Lxts_km_vanilla:
+___
+$code.=<<___;
+	# prepare and allocate stack frame at the top of 4K page
+	# with 1K reserved for eventual signal handling
+	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
+	lghi	$s1,-4096
+	algr	$s0,$sp
+	lgr	$fp,$sp
+	ngr	$s0,$s1		# align at page boundary
+	slgr	$fp,$s0		# total buffer size
+	lgr	$s2,$sp
+	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
+	slgr	$fp,$s1		# deduct reservation to get usable buffer size
+	# buffer size is at lest 256 and at most 3072+256-16
+
+	la	$sp,1024($s0)	# alloca
+	nill	$fp,0xfff0	# round to 16*n
+	st${g}	$s2,0($sp)	# back-chain
+	nill	$len,0xfff0	# redundant
+	st${g}	$fp,$SIZE_T($sp)
+
+	slgr	$len,$fp
+	brc	1,.Lxts_km_go	# not zero, no borrow
+	algr	$fp,$len	# input is shorter than allocated buffer
+	lghi	$len,0
+	st${g}	$fp,$SIZE_T($sp)
+
+.Lxts_km_go:
+	lrvg	$s0,$tweak+0($s2)	# load the tweak value in little-endian
+	lrvg	$s1,$tweak+8($s2)
+
+	la	$s2,16($sp)		# vector of ascending tweak values
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+	j	.Lxts_km_start
+
+.Lxts_km_loop:
+	la	$s2,16($sp)
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+.Lxts_km_prepare:
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	srlg	$i2,$s0,63		# carry bit from lower half
+	sllg	$s0,$s0,1
+	sllg	$s1,$s1,1
+	xgr	$s0,$i1
+	ogr	$s1,$i2
+.Lxts_km_start:
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	stg	$i1,0($s2,$inp)
+	stg	$i2,8($s2,$inp)
+	xg	$i1,0($inp)
+	xg	$i2,8($inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lxts_km_prepare
+
+	slgr	$inp,$fp		# rewind $inp
+	la	$s2,0($out,$inp)
+	lgr	$s3,$fp
+	.long	0xb92e00aa		# km $s2,$s2
+	brc	1,.-4			# pay attention to "partial completion"
+
+	la	$s2,16($sp)
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+.Lxts_km_xor:
+	lg	$i1,0($out,$inp)
+	lg	$i2,8($out,$inp)
+	xg	$i1,0($s2,$inp)
+	xg	$i2,8($s2,$inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lxts_km_xor
+
+	slgr	$len,$fp
+	brc	1,.Lxts_km_loop		# not zero, no borrow
+	algr	$fp,$len
+	lghi	$len,0
+	brc	4+1,.Lxts_km_loop	# not zero
+
+	l${g}	$i1,0($sp)		# back-chain
+	llgf	$fp,`2*$SIZE_T-4`($sp)	# bytes used
+	la	$i2,16($sp)
+	srlg	$fp,$fp,4
+.Lxts_km_zap:
+	stg	$i1,0($i2)
+	stg	$i1,8($i2)
+	la	$i2,16($i2)
+	brct	$fp,.Lxts_km_zap
+
+	la	$sp,0($i1)
+	llgc	$len,2*$SIZE_T-1($i1)
+	nill	$len,0x0f		# $len%=16
+	bzr	$ra
+
+	# generate one more tweak...
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	srlg	$i2,$s0,63		# carry bit from lower half
+	sllg	$s0,$s0,1
+	sllg	$s1,$s1,1
+	xgr	$s0,$i1
+	ogr	$s1,$i2
+
+	ltr	$len,$len		# clear zero flag
+	br	$ra
+.size	_s390x_xts_km,.-_s390x_xts_km
+
+.globl	AES_xts_encrypt
+.type	AES_xts_encrypt,\@function
+.align	16
+AES_xts_encrypt:
+	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+	llgfr	$len,$len
+___
+$code.=<<___;
+	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
+	srag	$len,$len,4		# formally wrong, because it expands
+					# sign byte, but who can afford asking
+					# to process more than 2^63-1 bytes?
+					# I use it, because it sets condition
+					# code...
+	bcr	8,$ra			# abort if zero (i.e. less than 16)
+___
+$code.=<<___ if (!$softonly);
+	llgf	%r0,240($key2)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lxts_enc_software
+
+	stm${g}	%r6,$s3,6*$SIZE_T($sp)
+	st${g}	$ra,14*$SIZE_T($sp)
+
+	sllg	$len,$len,4		# $len&=~15
+	slgr	$out,$inp
+
+	# generate the tweak value
+	l${g}	$s3,$stdframe($sp)	# pointer to iv
+	la	$s2,$tweak($sp)
+	lmg	$s0,$s1,0($s3)
+	lghi	$s3,16
+	stmg	$s0,$s1,0($s2)
+	la	%r1,0($key2)		# $key2 is not needed anymore
+	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
+	brc	1,.-4			# can this happen?
+
+	l	%r0,240($key1)
+	la	%r1,0($key1)		# $key1 is not needed anymore
+	bras	$ra,_s390x_xts_km
+	jz	.Lxts_enc_km_done
+
+	aghi	$inp,-16		# take one step back
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_enc_km_steal:
+	llgc	$i1,16($inp)
+	llgc	$i2,0($out,$inp)
+	stc	$i1,0($out,$inp)
+	stc	$i2,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_enc_km_steal
+
+	la	$s2,0($i3)
+	lghi	$s3,16
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	xg	$i1,0($s2)
+	xg	$i2,8($s2)
+	stg	$i1,0($s2)
+	stg	$i2,8($s2)
+	.long	0xb92e00aa		# km $s2,$s2
+	brc	1,.-4			# can this happen?
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	xg	$i1,0($i3)
+	xg	$i2,8($i3)
+	stg	$i1,0($i3)
+	stg	$i2,8($i3)
+
+.Lxts_enc_km_done:
+	l${g}	$ra,14*$SIZE_T($sp)
+	st${g}	$sp,$tweak($sp)		# wipe tweak
+	st${g}	$sp,$tweak($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lxts_enc_software:
+___
+$code.=<<___;
+	stm${g}	%r6,$ra,6*$SIZE_T($sp)
+
+	slgr	$out,$inp
+
+	xgr	$s0,$s0			# clear upper half
+	xgr	$s1,$s1
+	lrv	$s0,$stdframe+4($sp)	# load secno
+	lrv	$s1,$stdframe+0($sp)
+	xgr	$s2,$s2
+	xgr	$s3,$s3
+	stm${g}	%r2,%r5,2*$SIZE_T($sp)
+	la	$key,0($key2)
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt	# generate the tweak
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	stm	$s0,$s3,$tweak($sp)	# save the tweak
+	j	.Lxts_enc_enter
+
+.align	16
+.Lxts_enc_loop:
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	srlg	%r0,$s1,63		# carry bit from lower half
+	sllg	$s1,$s1,1
+	sllg	$s3,$s3,1
+	xgr	$s1,%r1
+	ogr	$s3,%r0
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+	la	$inp,16($inp)		# $inp+=16
+.Lxts_enc_enter:
+	x	$s0,0($inp)		# ^=*($inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_encrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+	brct${g}	$len,.Lxts_enc_loop
+
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	jz	.Lxts_enc_done
+
+	la	$i3,0($inp,$out)	# put aside real $out
+.Lxts_enc_steal:
+	llgc	%r0,16($inp)
+	llgc	%r1,0($out,$inp)
+	stc	%r0,0($out,$inp)
+	stc	%r1,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_enc_steal
+	la	$out,0($i3)		# restore real $out
+
+	# generate last tweak...
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	srlg	%r0,$s1,63		# carry bit from lower half
+	sllg	$s1,$s1,1
+	sllg	$s3,$s3,1
+	xgr	$s1,%r1
+	ogr	$s3,%r0
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+
+	x	$s0,0($out)		# ^=*(inp)|stolen cipther-text
+	x	$s1,4($out)
+	x	$s2,8($out)
+	x	$s3,12($out)
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_encrypt
+	l${g}	$out,4*$SIZE_T($sp)
+	x	$s0,`$tweak+0`($sp)	# ^=tweak
+	x	$s1,`$tweak+4`($sp)
+	x	$s2,`$tweak+8`($sp)
+	x	$s3,`$tweak+12`($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+.Lxts_enc_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$twesk+8($sp)
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_xts_encrypt,.-AES_xts_encrypt
+___
+# void AES_xts_decrypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,u64 secno);
+#
+$code.=<<___;
+.globl	AES_xts_decrypt
+.type	AES_xts_decrypt,\@function
+.align	16
+AES_xts_decrypt:
+	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+	llgfr	$len,$len
+___
+$code.=<<___;
+	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
+	aghi	$len,-16
+	bcr	4,$ra			# abort if less than zero. formally
+					# wrong, because $len is unsigned,
+					# but who can afford asking to
+					# process more than 2^63-1 bytes?
+	tmll	$len,0x0f
+	jnz	.Lxts_dec_proceed
+	aghi	$len,16
+.Lxts_dec_proceed:
+___
+$code.=<<___ if (!$softonly);
+	llgf	%r0,240($key2)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lxts_dec_software
+
+	stm${g}	%r6,$s3,6*$SIZE_T($sp)
+	st${g}	$ra,14*$SIZE_T($sp)
+
+	nill	$len,0xfff0		# $len&=~15
+	slgr	$out,$inp
+
+	# generate the tweak value
+	l${g}	$s3,$stdframe($sp)	# pointer to iv
+	la	$s2,$tweak($sp)
+	lmg	$s0,$s1,0($s3)
+	lghi	$s3,16
+	stmg	$s0,$s1,0($s2)
+	la	%r1,0($key2)		# $key2 is not needed past this point
+	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
+	brc	1,.-4			# can this happen?
+
+	l	%r0,240($key1)
+	la	%r1,0($key1)		# $key1 is not needed anymore
+
+	ltgr	$len,$len
+	jz	.Lxts_dec_km_short
+	bras	$ra,_s390x_xts_km
+	jz	.Lxts_dec_km_done
+
+	lrvgr	$s2,$s0			# make copy in reverse byte order
+	lrvgr	$s3,$s1
+	j	.Lxts_dec_km_2ndtweak
+
+.Lxts_dec_km_short:
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%=16
+	lrvg	$s0,$tweak+0($sp)	# load the tweak
+	lrvg	$s1,$tweak+8($sp)
+	lrvgr	$s2,$s0			# make copy in reverse byte order
+	lrvgr	$s3,$s1
+
+.Lxts_dec_km_2ndtweak:
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	srlg	$i2,$s0,63		# carry bit from lower half
+	sllg	$s0,$s0,1
+	sllg	$s1,$s1,1
+	xgr	$s0,$i1
+	ogr	$s1,$i2
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+
+	xg	$i1,0($inp)
+	xg	$i2,8($inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$i2,0($out,$inp)
+	lghi	$i3,16
+	.long	0xb92e0066		# km $i2,$i2
+	brc	1,.-4			# can this happen?
+	lrvgr	$i1,$s0
+	lrvgr	$i2,$s1
+	xg	$i1,0($out,$inp)
+	xg	$i2,8($out,$inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_dec_km_steal:
+	llgc	$i1,16($inp)
+	llgc	$i2,0($out,$inp)
+	stc	$i1,0($out,$inp)
+	stc	$i2,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_dec_km_steal
+
+	lgr	$s0,$s2
+	lgr	$s1,$s3
+	xg	$s0,0($i3)
+	xg	$s1,8($i3)
+	stg	$s0,0($i3)
+	stg	$s1,8($i3)
+	la	$s0,0($i3)
+	lghi	$s1,16
+	.long	0xb92e0088		# km $s0,$s0
+	brc	1,.-4			# can this happen?
+	xg	$s2,0($i3)
+	xg	$s3,8($i3)
+	stg	$s2,0($i3)
+	stg	$s3,8($i3)
+.Lxts_dec_km_done:
+	l${g}	$ra,14*$SIZE_T($sp)
+	st${g}	$sp,$tweak($sp)		# wipe tweak
+	st${g}	$sp,$tweak($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lxts_dec_software:
+___
+$code.=<<___;
+	stm${g}	%r6,$ra,6*$SIZE_T($sp)
+
+	srlg	$len,$len,4
+	slgr	$out,$inp
+
+	xgr	$s0,$s0			# clear upper half
+	xgr	$s1,$s1
+	lrv	$s0,$stdframe+4($sp)	# load secno
+	lrv	$s1,$stdframe+0($sp)
+	xgr	$s2,$s2
+	xgr	$s3,$s3
+	stm${g}	%r2,%r5,2*$SIZE_T($sp)
+	la	$key,0($key2)
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt	# generate the tweak
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	larl	$tbl,AES_Td
+	lt${g}r	$len,$len
+	stm	$s0,$s3,$tweak($sp)	# save the tweak
+	jz	.Lxts_dec_short
+	j	.Lxts_dec_enter
+
+.align	16
+.Lxts_dec_loop:
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	srlg	%r0,$s1,63		# carry bit from lower half
+	sllg	$s1,$s1,1
+	sllg	$s3,$s3,1
+	xgr	$s1,%r1
+	ogr	$s3,%r0
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits 
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+.Lxts_dec_enter:
+	x	$s0,0($inp)		# tweak^=*(inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+	la	$inp,16($inp)
+	brct${g}	$len,.Lxts_dec_loop
+
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	jz	.Lxts_dec_done
+
+	# generate pair of tweaks...
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	srlg	%r0,$s1,63		# carry bit from lower half
+	sllg	$s1,$s1,1
+	sllg	$s3,$s3,1
+	xgr	$s1,%r1
+	ogr	$s3,%r0
+	lrvgr	$i2,$s1			# flip byte order
+	lrvgr	$i3,$s3
+	stmg	$i2,$i3,$tweak($sp)	# save the 1st tweak
+	j	.Lxts_dec_2ndtweak
+
+.align	16
+.Lxts_dec_short:
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+.Lxts_dec_2ndtweak:
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	srlg	%r0,$s1,63		# carry bit from lower half
+	sllg	$s1,$s1,1
+	sllg	$s3,$s3,1
+	xgr	$s1,%r1
+	ogr	$s3,%r0
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
+	stg	$s1,$tweak-16+0($sp)	# save the 2nd tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak-16+8($sp)
+	llgfr	$s3,$s3
+
+	x	$s0,0($inp)		# tweak_the_2nd^=*(inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak-16+0($sp)	# ^=tweak_the_2nd
+	x	$s1,$tweak-16+4($sp)
+	x	$s2,$tweak-16+8($sp)
+	x	$s3,$tweak-16+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_dec_steal:
+	llgc	%r0,16($inp)
+	llgc	%r1,0($out,$inp)
+	stc	%r0,0($out,$inp)
+	stc	%r1,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_dec_steal
+	la	$out,0($i3)		# restore real $out
+
+	lm	$s0,$s3,$tweak($sp)	# load the 1st tweak
+	x	$s0,0($out)		# tweak^=*(inp)|stolen cipher-text
+	x	$s1,4($out)
+	x	$s2,8($out)
+	x	$s3,12($out)
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	l${g}	$out,4*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+	stg	$sp,$tweak-16+0($sp)	# wipe 2nd tweak
+	stg	$sp,$tweak-16+8($sp)
+.Lxts_dec_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$twesk+8($sp)
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_xts_decrypt,.-AES_xts_decrypt
+___
+}
+$code.=<<___;
+.string	"AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+.comm	OPENSSL_s390xcap_P,16,8
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;	# force flush
diff --git a/jni/openssl/crypto/aes/asm/aes-sparcv9.pl b/jni/openssl/crypto/aes/asm/aes-sparcv9.pl
new file mode 100755
index 0000000..403c4d1
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-sparcv9.pl
@@ -0,0 +1,1182 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. Rights for redistribution and usage in source and binary
+# forms are granted according to the OpenSSL license.
+# ====================================================================
+#
+# Version 1.1
+#
+# The major reason for undertaken effort was to mitigate the hazard of
+# cache-timing attack. This is [currently and initially!] addressed in
+# two ways. 1. S-boxes are compressed from 5KB to 2KB+256B size each.
+# 2. References to them are scheduled for L2 cache latency, meaning
+# that the tables don't have to reside in L1 cache. Once again, this
+# is an initial draft and one should expect more countermeasures to
+# be implemented...
+#
+# Version 1.1 prefetches T[ed]4 in order to mitigate attack on last
+# round.
+#
+# Even though performance was not the primary goal [on the contrary,
+# extra shifts "induced" by compressed S-box and longer loop epilogue
+# "induced" by scheduling for L2 have negative effect on performance],
+# the code turned out to run in ~23 cycles per processed byte en-/
+# decrypted with 128-bit key. This is pretty good result for code
+# with mentioned qualities and UltraSPARC core. Compared to Sun C
+# generated code my encrypt procedure runs just few percents faster,
+# while decrypt one - whole 50% faster [yes, Sun C failed to generate
+# optimal decrypt procedure]. Compared to GNU C generated code both
+# procedures are more than 60% faster:-)
+
+$bits=32;
+for (@ARGV)	{ $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
+if ($bits==64)	{ $bias=2047; $frame=192; }
+else		{ $bias=0;    $frame=112; }
+$locals=16;
+
+$acc0="%l0";
+$acc1="%o0";
+$acc2="%o1";
+$acc3="%o2";
+
+$acc4="%l1";
+$acc5="%o3";
+$acc6="%o4";
+$acc7="%o5";
+
+$acc8="%l2";
+$acc9="%o7";
+$acc10="%g1";
+$acc11="%g2";
+
+$acc12="%l3";
+$acc13="%g3";
+$acc14="%g4";
+$acc15="%g5";
+
+$t0="%l4";
+$t1="%l5";
+$t2="%l6";
+$t3="%l7";
+
+$s0="%i0";
+$s1="%i1";
+$s2="%i2";
+$s3="%i3";
+$tbl="%i4";
+$key="%i5";
+$rounds="%i7";	# aliases with return address, which is off-loaded to stack
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$code.=<<___ if ($bits==64);
+.register	%g2,#scratch
+.register	%g3,#scratch
+___
+$code.=<<___;
+.section	".text",#alloc,#execinstr
+
+.align	256
+AES_Te:
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+	.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+.type	AES_Te,#object
+.size	AES_Te,(.-AES_Te)
+
+.align	64
+.skip	16
+_sparcv9_AES_encrypt:
+	save	%sp,-$frame-$locals,%sp
+	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
+	ld	[$key+240],$rounds
+	ld	[$key+0],$t0
+	ld	[$key+4],$t1			!
+	ld	[$key+8],$t2
+	srl	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ld	[$key+12],$t3
+	srl	$s0,21,$acc0
+	xor	$t1,$s1,$s1
+	ld	[$key+16],$t0
+	srl	$s1,13,$acc1			!
+	xor	$t2,$s2,$s2
+	ld	[$key+20],$t1
+	xor	$t3,$s3,$s3
+	ld	[$key+24],$t2
+	and	$acc0,2040,$acc0
+	ld	[$key+28],$t3
+	nop
+.Lenc_loop:
+	srl	$s2,5,$acc2			!
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$s3,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	srl	$s1,21,$acc4
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2		!
+	srl	$s2,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$s3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4
+	fmovs	%f0,%f0
+	sll	$s0,3,$acc7			!
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$s2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	srl	$s3,13,$acc9
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7		!
+	srl	$s0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$s1,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9
+	fmovs	%f0,%f0
+	srl	$s3,21,$acc12			!
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$s0,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	srl	$s1,5,$acc14
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12		!
+	sll	$s2,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+	and	$acc15,2040,$acc15
+	add	$key,32,$key
+	ldx	[$tbl+$acc14],$acc14
+	fmovs	%f0,%f0
+	subcc	$rounds,1,$rounds		!
+	ldx	[$tbl+$acc15],$acc15
+	bz,a,pn	%icc,.Lenc_last
+	add	$tbl,2048,$rounds
+
+		srlx	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+	fmovs	%f0,%f0
+		srlx	$acc2,16,$acc2		!
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3			!
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+	fmovs	%f0,%f0
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10	!
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15	!
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,21,$acc0
+		xor	$acc14,$t3,$t3
+	srl	$t1,13,$acc1
+		xor	$acc15,$t3,$t3
+
+	and	$acc0,2040,$acc0		!
+	srl	$t2,5,$acc2
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$t3,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	fmovs	%f0,%f0
+	srl	$t1,21,$acc4			!
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2
+	srl	$t2,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$t3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4		!
+	sll	$t0,3,$acc7
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$t2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	fmovs	%f0,%f0
+	srl	$t3,13,$acc9			!
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7
+	srl	$t0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$t1,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9		!
+	srl	$t3,21,$acc12
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$t0,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	fmovs	%f0,%f0
+	srl	$t1,5,$acc14			!
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12
+	sll	$t2,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+		srlx	$acc1,8,$acc1
+	and	$acc15,2040,$acc15
+	ldx	[$tbl+$acc14],$acc14		!
+
+		srlx	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldx	[$tbl+$acc15],$acc15
+		srlx	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ld	[$key+16],$t0
+	fmovs	%f0,%f0
+		srlx	$acc5,8,$acc5		!
+		xor	$acc2,$s0,$s0
+	ld	[$key+20],$t1
+		srlx	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ld	[$key+24],$t2
+		srlx	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ld	[$key+28],$t3			!
+		srlx	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldx	[$tbl+2048+0],%g0		! prefetch te4
+		srlx	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldx	[$tbl+2048+32],%g0		! prefetch te4
+		srlx	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldx	[$tbl+2048+64],%g0		! prefetch te4
+		srlx	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldx	[$tbl+2048+96],%g0		! prefetch te4
+		srlx	$acc14,16,$acc14	!
+		xor	$acc9,$s2,$s2
+	ldx	[$tbl+2048+128],%g0		! prefetch te4
+		srlx	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldx	[$tbl+2048+160],%g0		! prefetch te4
+	srl	$s0,21,$acc0
+		xor	$acc11,$s2,$s2
+	ldx	[$tbl+2048+192],%g0		! prefetch te4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldx	[$tbl+2048+224],%g0		! prefetch te4
+	srl	$s1,13,$acc1			!
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	ba	.Lenc_loop
+	and	$acc0,2040,$acc0
+
+.align	32
+.Lenc_last:
+		srlx	$acc1,8,$acc1		!
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+		srlx	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2			!
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9		!
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14	!
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	srl	$t1,16,$acc1			!
+		xor	$acc15,$t3,$t3
+
+	srl	$t2,8,$acc2
+	and	$acc1,255,$acc1
+	ldub	[$rounds+$acc0],$acc0
+	srl	$t1,24,$acc4
+	and	$acc2,255,$acc2
+	ldub	[$rounds+$acc1],$acc1
+	srl	$t2,16,$acc5			!
+	and	$t3,255,$acc3
+	ldub	[$rounds+$acc2],$acc2
+	ldub	[$rounds+$acc3],$acc3
+	srl	$t3,8,$acc6
+	and	$acc5,255,$acc5
+	ldub	[$rounds+$acc4],$acc4
+	fmovs	%f0,%f0
+	srl	$t2,24,$acc8			!
+	and	$acc6,255,$acc6
+	ldub	[$rounds+$acc5],$acc5
+	srl	$t3,16,$acc9
+	and	$t0,255,$acc7
+	ldub	[$rounds+$acc6],$acc6
+	ldub	[$rounds+$acc7],$acc7
+	fmovs	%f0,%f0
+	srl	$t0,8,$acc10			!
+	and	$acc9,255,$acc9
+	ldub	[$rounds+$acc8],$acc8
+	srl	$t3,24,$acc12
+	and	$acc10,255,$acc10
+	ldub	[$rounds+$acc9],$acc9
+	srl	$t0,16,$acc13
+	and	$t1,255,$acc11
+	ldub	[$rounds+$acc10],$acc10		!
+	srl	$t1,8,$acc14
+	and	$acc13,255,$acc13
+	ldub	[$rounds+$acc11],$acc11
+	ldub	[$rounds+$acc12],$acc12
+	and	$acc14,255,$acc14
+	ldub	[$rounds+$acc13],$acc13
+	and	$t2,255,$acc15
+	ldub	[$rounds+$acc14],$acc14		!
+
+		sll	$acc0,24,$acc0
+		xor	$acc3,$s0,$s0
+	ldub	[$rounds+$acc15],$acc15
+		sll	$acc1,16,$acc1
+		xor	$acc0,$s0,$s0
+	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
+	fmovs	%f0,%f0
+		sll	$acc2,8,$acc2		!
+		xor	$acc1,$s0,$s0
+		sll	$acc4,24,$acc4
+		xor	$acc2,$s0,$s0
+		sll	$acc5,16,$acc5
+		xor	$acc7,$s1,$s1
+		sll	$acc6,8,$acc6
+		xor	$acc4,$s1,$s1
+		sll	$acc8,24,$acc8		!
+		xor	$acc5,$s1,$s1
+		sll	$acc9,16,$acc9
+		xor	$acc11,$s2,$s2
+		sll	$acc10,8,$acc10
+		xor	$acc6,$s1,$s1
+		sll	$acc12,24,$acc12
+		xor	$acc8,$s2,$s2
+		sll	$acc13,16,$acc13	!
+		xor	$acc9,$s2,$s2
+		sll	$acc14,8,$acc14
+		xor	$acc10,$s2,$s2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+
+	ret
+	restore
+.type	_sparcv9_AES_encrypt,#function
+.size	_sparcv9_AES_encrypt,(.-_sparcv9_AES_encrypt)
+
+.align	32
+.globl	AES_encrypt
+AES_encrypt:
+	or	%o0,%o1,%g1
+	andcc	%g1,3,%g0
+	bnz,pn	%xcc,.Lunaligned_enc
+	save	%sp,-$frame,%sp
+
+	ld	[%i0+0],%o0
+	ld	[%i0+4],%o1
+	ld	[%i0+8],%o2
+	ld	[%i0+12],%o3
+
+1:	call	.+8
+	add	%o7,AES_Te-1b,%o4
+	call	_sparcv9_AES_encrypt
+	mov	%i2,%o5
+
+	st	%o0,[%i1+0]
+	st	%o1,[%i1+4]
+	st	%o2,[%i1+8]
+	st	%o3,[%i1+12]
+
+	ret
+	restore
+
+.align	32
+.Lunaligned_enc:
+	ldub	[%i0+0],%l0
+	ldub	[%i0+1],%l1
+	ldub	[%i0+2],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+3],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+4],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+5],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+6],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o0
+	ldub	[%i0+7],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	ldub	[%i0+8],%l0
+	or	%l7,%l6,%l6
+	ldub	[%i0+9],%l1
+	or	%l4,%l6,%o1
+	ldub	[%i0+10],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+11],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+12],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+13],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+14],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o2
+	ldub	[%i0+15],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	or	%l7,%l6,%l6
+	or	%l4,%l6,%o3
+
+1:	call	.+8
+	add	%o7,AES_Te-1b,%o4
+	call	_sparcv9_AES_encrypt
+	mov	%i2,%o5
+
+	srl	%o0,24,%l0
+	srl	%o0,16,%l1
+	stb	%l0,[%i1+0]
+	srl	%o0,8,%l2
+	stb	%l1,[%i1+1]
+	stb	%l2,[%i1+2]
+	srl	%o1,24,%l4
+	stb	%o0,[%i1+3]
+
+	srl	%o1,16,%l5
+	stb	%l4,[%i1+4]
+	srl	%o1,8,%l6
+	stb	%l5,[%i1+5]
+	stb	%l6,[%i1+6]
+	srl	%o2,24,%l0
+	stb	%o1,[%i1+7]
+
+	srl	%o2,16,%l1
+	stb	%l0,[%i1+8]
+	srl	%o2,8,%l2
+	stb	%l1,[%i1+9]
+	stb	%l2,[%i1+10]
+	srl	%o3,24,%l4
+	stb	%o2,[%i1+11]
+
+	srl	%o3,16,%l5
+	stb	%l4,[%i1+12]
+	srl	%o3,8,%l6
+	stb	%l5,[%i1+13]
+	stb	%l6,[%i1+14]
+	stb	%o3,[%i1+15]
+
+	ret
+	restore
+.type	AES_encrypt,#function
+.size	AES_encrypt,(.-AES_encrypt)
+
+___
+
+$code.=<<___;
+.align	256
+AES_Td:
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+	.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.type	AES_Td,#object
+.size	AES_Td,(.-AES_Td)
+
+.align	64
+.skip	16
+_sparcv9_AES_decrypt:
+	save	%sp,-$frame-$locals,%sp
+	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
+	ld	[$key+240],$rounds
+	ld	[$key+0],$t0
+	ld	[$key+4],$t1			!
+	ld	[$key+8],$t2
+	ld	[$key+12],$t3
+	srl	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ld	[$key+16],$t0
+	xor	$t1,$s1,$s1
+	ld	[$key+20],$t1
+	srl	$s0,21,$acc0			!
+	xor	$t2,$s2,$s2
+	ld	[$key+24],$t2
+	xor	$t3,$s3,$s3
+	and	$acc0,2040,$acc0
+	ld	[$key+28],$t3
+	srl	$s3,13,$acc1
+	nop
+.Ldec_loop:
+	srl	$s2,5,$acc2			!
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$s1,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	srl	$s1,21,$acc4
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2		!
+	srl	$s0,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$s3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4
+	fmovs	%f0,%f0
+	sll	$s2,3,$acc7			!
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$s2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	srl	$s1,13,$acc9
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7		!
+	srl	$s0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$s3,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9
+	fmovs	%f0,%f0
+	srl	$s3,21,$acc12			!
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$s2,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	srl	$s1,5,$acc14
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12		!
+	sll	$s0,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+	and	$acc15,2040,$acc15
+	add	$key,32,$key
+	ldx	[$tbl+$acc14],$acc14
+	fmovs	%f0,%f0
+	subcc	$rounds,1,$rounds		!
+	ldx	[$tbl+$acc15],$acc15
+	bz,a,pn	%icc,.Ldec_last
+	add	$tbl,2048,$rounds
+
+		srlx	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+	fmovs	%f0,%f0
+		srlx	$acc2,16,$acc2		!
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3			!
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+	fmovs	%f0,%f0
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10	!
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15	!
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,21,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	srl	$t3,13,$acc1
+
+	and	$acc0,2040,$acc0		!
+	srl	$t2,5,$acc2
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$t1,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	fmovs	%f0,%f0
+	srl	$t1,21,$acc4			!
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2
+	srl	$t0,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$t3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4		!
+	sll	$t2,3,$acc7
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$t2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	fmovs	%f0,%f0
+	srl	$t1,13,$acc9			!
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7
+	srl	$t0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$t3,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9		!
+	srl	$t3,21,$acc12
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$t2,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	fmovs	%f0,%f0
+	srl	$t1,5,$acc14			!
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12
+	sll	$t0,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+		srlx	$acc1,8,$acc1
+	and	$acc15,2040,$acc15
+	ldx	[$tbl+$acc14],$acc14		!
+
+		srlx	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldx	[$tbl+$acc15],$acc15
+		srlx	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ld	[$key+16],$t0
+	fmovs	%f0,%f0
+		srlx	$acc5,8,$acc5		!
+		xor	$acc2,$s0,$s0
+	ld	[$key+20],$t1
+		srlx	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ld	[$key+24],$t2
+		srlx	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ld	[$key+28],$t3			!
+		srlx	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldx	[$tbl+2048+0],%g0		! prefetch td4
+		srlx	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldx	[$tbl+2048+32],%g0		! prefetch td4
+		srlx	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldx	[$tbl+2048+64],%g0		! prefetch td4
+		srlx	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldx	[$tbl+2048+96],%g0		! prefetch td4
+		srlx	$acc14,16,$acc14	!
+		xor	$acc9,$s2,$s2
+	ldx	[$tbl+2048+128],%g0		! prefetch td4
+		srlx	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldx	[$tbl+2048+160],%g0		! prefetch td4
+	srl	$s0,21,$acc0
+		xor	$acc11,$s2,$s2
+	ldx	[$tbl+2048+192],%g0		! prefetch td4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldx	[$tbl+2048+224],%g0		! prefetch td4
+	and	$acc0,2040,$acc0		!
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	ba	.Ldec_loop
+	srl	$s3,13,$acc1
+
+.align	32
+.Ldec_last:
+		srlx	$acc1,8,$acc1		!
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+		srlx	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2			!
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9		!
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14	!
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3		!
+	srl	$t3,16,$acc1
+
+	srl	$t2,8,$acc2
+	and	$acc1,255,$acc1
+	ldub	[$rounds+$acc0],$acc0
+	srl	$t1,24,$acc4
+	and	$acc2,255,$acc2
+	ldub	[$rounds+$acc1],$acc1
+	srl	$t0,16,$acc5			!
+	and	$t1,255,$acc3
+	ldub	[$rounds+$acc2],$acc2
+	ldub	[$rounds+$acc3],$acc3
+	srl	$t3,8,$acc6
+	and	$acc5,255,$acc5
+	ldub	[$rounds+$acc4],$acc4
+	fmovs	%f0,%f0
+	srl	$t2,24,$acc8			!
+	and	$acc6,255,$acc6
+	ldub	[$rounds+$acc5],$acc5
+	srl	$t1,16,$acc9
+	and	$t2,255,$acc7
+	ldub	[$rounds+$acc6],$acc6
+	ldub	[$rounds+$acc7],$acc7
+	fmovs	%f0,%f0
+	srl	$t0,8,$acc10			!
+	and	$acc9,255,$acc9
+	ldub	[$rounds+$acc8],$acc8
+	srl	$t3,24,$acc12
+	and	$acc10,255,$acc10
+	ldub	[$rounds+$acc9],$acc9
+	srl	$t2,16,$acc13
+	and	$t3,255,$acc11
+	ldub	[$rounds+$acc10],$acc10		!
+	srl	$t1,8,$acc14
+	and	$acc13,255,$acc13
+	ldub	[$rounds+$acc11],$acc11
+	ldub	[$rounds+$acc12],$acc12
+	and	$acc14,255,$acc14
+	ldub	[$rounds+$acc13],$acc13
+	and	$t0,255,$acc15
+	ldub	[$rounds+$acc14],$acc14		!
+
+		sll	$acc0,24,$acc0
+		xor	$acc3,$s0,$s0
+	ldub	[$rounds+$acc15],$acc15
+		sll	$acc1,16,$acc1
+		xor	$acc0,$s0,$s0
+	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
+	fmovs	%f0,%f0
+		sll	$acc2,8,$acc2		!
+		xor	$acc1,$s0,$s0
+		sll	$acc4,24,$acc4
+		xor	$acc2,$s0,$s0
+		sll	$acc5,16,$acc5
+		xor	$acc7,$s1,$s1
+		sll	$acc6,8,$acc6
+		xor	$acc4,$s1,$s1
+		sll	$acc8,24,$acc8		!
+		xor	$acc5,$s1,$s1
+		sll	$acc9,16,$acc9
+		xor	$acc11,$s2,$s2
+		sll	$acc10,8,$acc10
+		xor	$acc6,$s1,$s1
+		sll	$acc12,24,$acc12
+		xor	$acc8,$s2,$s2
+		sll	$acc13,16,$acc13	!
+		xor	$acc9,$s2,$s2
+		sll	$acc14,8,$acc14
+		xor	$acc10,$s2,$s2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+
+	ret
+	restore
+.type	_sparcv9_AES_decrypt,#function
+.size	_sparcv9_AES_decrypt,(.-_sparcv9_AES_decrypt)
+
+.align	32
+.globl	AES_decrypt
+AES_decrypt:
+	or	%o0,%o1,%g1
+	andcc	%g1,3,%g0
+	bnz,pn	%xcc,.Lunaligned_dec
+	save	%sp,-$frame,%sp
+
+	ld	[%i0+0],%o0
+	ld	[%i0+4],%o1
+	ld	[%i0+8],%o2
+	ld	[%i0+12],%o3
+
+1:	call	.+8
+	add	%o7,AES_Td-1b,%o4
+	call	_sparcv9_AES_decrypt
+	mov	%i2,%o5
+
+	st	%o0,[%i1+0]
+	st	%o1,[%i1+4]
+	st	%o2,[%i1+8]
+	st	%o3,[%i1+12]
+
+	ret
+	restore
+
+.align	32
+.Lunaligned_dec:
+	ldub	[%i0+0],%l0
+	ldub	[%i0+1],%l1
+	ldub	[%i0+2],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+3],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+4],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+5],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+6],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o0
+	ldub	[%i0+7],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	ldub	[%i0+8],%l0
+	or	%l7,%l6,%l6
+	ldub	[%i0+9],%l1
+	or	%l4,%l6,%o1
+	ldub	[%i0+10],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+11],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+12],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+13],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+14],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o2
+	ldub	[%i0+15],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	or	%l7,%l6,%l6
+	or	%l4,%l6,%o3
+
+1:	call	.+8
+	add	%o7,AES_Td-1b,%o4
+	call	_sparcv9_AES_decrypt
+	mov	%i2,%o5
+
+	srl	%o0,24,%l0
+	srl	%o0,16,%l1
+	stb	%l0,[%i1+0]
+	srl	%o0,8,%l2
+	stb	%l1,[%i1+1]
+	stb	%l2,[%i1+2]
+	srl	%o1,24,%l4
+	stb	%o0,[%i1+3]
+
+	srl	%o1,16,%l5
+	stb	%l4,[%i1+4]
+	srl	%o1,8,%l6
+	stb	%l5,[%i1+5]
+	stb	%l6,[%i1+6]
+	srl	%o2,24,%l0
+	stb	%o1,[%i1+7]
+
+	srl	%o2,16,%l1
+	stb	%l0,[%i1+8]
+	srl	%o2,8,%l2
+	stb	%l1,[%i1+9]
+	stb	%l2,[%i1+10]
+	srl	%o3,24,%l4
+	stb	%o2,[%i1+11]
+
+	srl	%o3,16,%l5
+	stb	%l4,[%i1+12]
+	srl	%o3,8,%l6
+	stb	%l5,[%i1+13]
+	stb	%l6,[%i1+14]
+	stb	%o3,[%i1+15]
+
+	ret
+	restore
+.type	AES_decrypt,#function
+.size	AES_decrypt,(.-AES_decrypt)
+___
+
+# fmovs instructions substituting for FP nops were originally added
+# to meet specific instruction alignment requirements to maximize ILP.
+# As UltraSPARC T1, a.k.a. Niagara, has shared FPU, FP nops can have
+# undesired effect, so just omit them and sacrifice some portion of
+# percent in performance...
+$code =~ s/fmovs.*$//gm;
+
+print $code;
+close STDOUT;	# ensure flush
diff --git a/jni/openssl/crypto/aes/asm/aes-x86_64.pl b/jni/openssl/crypto/aes/asm/aes-x86_64.pl
new file mode 100755
index 0000000..48fa857
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aes-x86_64.pl
@@ -0,0 +1,2818 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Version 2.1.
+#
+# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
+# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
+# [you'll notice a lot of resemblance], such as compressed S-boxes
+# in little-endian byte order, prefetch of these tables in CBC mode,
+# as well as avoiding L1 cache aliasing between stack frame and key
+# schedule and already mentioned tables, compressed Td4...
+#
+# Performance in number of cycles per processed byte for 128-bit key:
+#
+#		ECB encrypt	ECB decrypt	CBC large chunk
+# AMD64		33		41		13.0
+# EM64T		38		59		18.6(*)
+# Core 2	30		43		14.5(*)
+#
+# (*) with hyper-threading off
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$verticalspin=1;	# unlike 32-bit version $verticalspin performs
+			# ~15% better on both AMD and Intel cores
+$speed_limit=512;	# see aes-586.pl for details
+
+$code=".text\n";
+
+$s0="%eax";
+$s1="%ebx";
+$s2="%ecx";
+$s3="%edx";
+$acc0="%esi";	$mask80="%rsi";
+$acc1="%edi";	$maskfe="%rdi";
+$acc2="%ebp";	$mask1b="%rbp";
+$inp="%r8";
+$out="%r9";
+$t0="%r10d";
+$t1="%r11d";
+$t2="%r12d";
+$rnds="%r13d";
+$sbox="%r14";
+$key="%r15";
+
+sub hi() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1h/;	$r; }
+sub lo() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1l/;
+			$r =~ s/%[er]([sd]i)/%\1l/;
+			$r =~ s/%(r[0-9]+)[d]?/%\1b/;	$r; }
+sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
+			$r =~ s/%r([0-9]+)/%r\1d/;	$r; }
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
+}
+sub data_word()
+{ my $i;
+  my $last=pop(@_);
+    $code.=".long\t";
+    while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
+    $code.=sprintf"0x%08x\n",$last;
+}
+
+sub data_byte()
+{ my $i;
+  my $last=pop(@_);
+    $code.=".byte\t";
+    while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
+    $code.=sprintf"0x%02x\n",$last&0xff;
+}
+
+sub encvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	# favor 3-way issue Opteron pipeline...
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	mov	0($sbox,$acc0,8),$t0
+	mov	0($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	movzb	`&lo("$s3")`,$acc2
+	xor	3($sbox,$acc0,8),$t0
+	xor	3($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s3")`,$acc0
+	shr	\$16,$s2
+	movzb	`&hi("$s0")`,$acc2
+	xor	3($sbox,$acc0,8),$t2
+	shr	\$16,$s3
+	xor	3($sbox,$acc2,8),$t3
+
+	shr	\$16,$s1
+	lea	16($key),$key
+	shr	\$16,$s0
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	xor	2($sbox,$acc0,8),$t0
+	xor	2($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	movzb	`&lo("$s1")`,$acc2
+	xor	1($sbox,$acc0,8),$t0
+	xor	1($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t3
+
+	mov	12($key),$s3
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	mov	0($key),$s0
+	xor	1($sbox,$acc1,8),$t2
+	xor	1($sbox,$acc2,8),$t3
+
+	mov	4($key),$s1
+	mov	8($key),$s2
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub enclastvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	movzb	2($sbox,$acc0,8),$t0
+	movzb	2($sbox,$acc1,8),$t1
+	movzb	2($sbox,$acc2,8),$t2
+
+	movzb	`&lo("$s3")`,$acc0
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	movzb	2($sbox,$acc0,8),$t3
+	mov	0($sbox,$acc1,8),$acc1	#$t0
+	mov	0($sbox,$acc2,8),$acc2	#$t1
+
+	and	\$0x0000ff00,$acc1
+	and	\$0x0000ff00,$acc2
+
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+	shr	\$16,$s2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	shr	\$16,$s3
+	mov	0($sbox,$acc0,8),$acc0	#$t2
+	mov	0($sbox,$acc1,8),$acc1	#$t3
+
+	and	\$0x0000ff00,$acc0
+	and	\$0x0000ff00,$acc1
+	shr	\$16,$s1
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+	shr	\$16,$s0
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	mov	0($sbox,$acc0,8),$acc0	#$t0
+	mov	0($sbox,$acc1,8),$acc1	#$t1
+	mov	0($sbox,$acc2,8),$acc2	#$t2
+
+	and	\$0x00ff0000,$acc0
+	and	\$0x00ff0000,$acc1
+	and	\$0x00ff0000,$acc2
+
+	xor	$acc0,$t0
+	xor	$acc1,$t1
+	xor	$acc2,$t2
+
+	movzb	`&lo("$s1")`,$acc0
+	movzb	`&hi("$s3")`,$acc1
+	movzb	`&hi("$s0")`,$acc2
+	mov	0($sbox,$acc0,8),$acc0	#$t3
+	mov	2($sbox,$acc1,8),$acc1	#$t0
+	mov	2($sbox,$acc2,8),$acc2	#$t1
+
+	and	\$0x00ff0000,$acc0
+	and	\$0xff000000,$acc1
+	and	\$0xff000000,$acc2
+
+	xor	$acc0,$t3
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	mov	16+12($key),$s3
+	mov	2($sbox,$acc0,8),$acc0	#$t2
+	mov	2($sbox,$acc1,8),$acc1	#$t3
+	mov	16+0($key),$s0
+
+	and	\$0xff000000,$acc0
+	and	\$0xff000000,$acc1
+
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+
+	mov	16+4($key),$s1
+	mov	16+8($key),$s2
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub encstep()
+{ my ($i,@s) = @_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	if ($i==3) {
+		$tmp0=$s[1];
+		$tmp1=$s[2];
+		$tmp2=$s[3];
+	}
+	$code.="	movzb	".&lo($s[0]).",$out\n";
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	lea	16($key),$key\n"	if ($i==0);
+
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	mov	0($sbox,$out,8),$out\n";
+
+	$code.="	shr	\$16,$tmp1\n";
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+	$code.="	xor	3($sbox,$tmp0,8),$out\n";
+
+	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+	$code.="	xor	4*$i($key),$out\n";
+
+	$code.="	xor	2($sbox,$tmp1,8),$out\n";
+	$code.="	xor	1($sbox,$tmp2,8),$out\n";
+
+	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+sub enclast()
+{ my ($i,@s)=@_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	if ($i==3) {
+		$tmp0=$s[1];
+		$tmp1=$s[2];
+		$tmp2=$s[3];
+	}
+	$code.="	movzb	".&lo($s[0]).",$out\n";
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+
+	$code.="	mov	2($sbox,$out,8),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+	$code.="	and	\$0x000000ff,$out\n";
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	mov	0($sbox,$tmp0,8),$tmp0\n";
+	$code.="	mov	0($sbox,$tmp1,8),$tmp1\n";
+	$code.="	mov	2($sbox,$tmp2,8),$tmp2\n";
+
+	$code.="	and	\$0x0000ff00,$tmp0\n";
+	$code.="	and	\$0x00ff0000,$tmp1\n";
+	$code.="	and	\$0xff000000,$tmp2\n";
+
+	$code.="	xor	$tmp0,$out\n";
+	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
+	$code.="	xor	$tmp1,$out\n";
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	xor	$tmp2,$out\n";
+	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+$code.=<<___;
+.type	_x86_64_AES_encrypt,\@abi-omnipotent
+.align	16
+_x86_64_AES_encrypt:
+	xor	0($key),$s0			# xor with key
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+
+	mov	240($key),$rnds			# load key->rounds
+	sub	\$1,$rnds
+	jmp	.Lenc_loop
+.align	16
+.Lenc_loop:
+___
+	if ($verticalspin) { &encvert(); }
+	else {	&encstep(0,$s0,$s1,$s2,$s3);
+		&encstep(1,$s1,$s2,$s3,$s0);
+		&encstep(2,$s2,$s3,$s0,$s1);
+		&encstep(3,$s3,$s0,$s1,$s2);
+	}
+$code.=<<___;
+	sub	\$1,$rnds
+	jnz	.Lenc_loop
+___
+	if ($verticalspin) { &enclastvert(); }
+	else {	&enclast(0,$s0,$s1,$s2,$s3);
+		&enclast(1,$s1,$s2,$s3,$s0);
+		&enclast(2,$s2,$s3,$s0,$s1);
+		&enclast(3,$s3,$s0,$s1,$s2);
+		$code.=<<___;
+		xor	16+0($key),$s0		# xor with key
+		xor	16+4($key),$s1
+		xor	16+8($key),$s2
+		xor	16+12($key),$s3
+___
+	}
+$code.=<<___;
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_encrypt,.-_x86_64_AES_encrypt
+___
+
+# it's possible to implement this by shifting tN by 8, filling least
+# significant byte with byte load and finally bswap-ing at the end,
+# but such partial register load kills Core 2...
+sub enccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$t0
+	movzb	`&lo("$s1")`,$t1
+	movzb	`&lo("$s2")`,$t2
+	movzb	($sbox,$t0,1),$t0
+	movzb	($sbox,$t1,1),$t1
+	movzb	($sbox,$t2,1),$t2
+
+	movzb	`&lo("$s3")`,$t3
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	movzb	($sbox,$t3,1),$t3
+	movzb	($sbox,$acc0,1),$t4	#$t0
+	movzb	($sbox,$acc1,1),$t5	#$t1
+
+	movzb	`&hi("$s3")`,$acc2
+	movzb	`&hi("$s0")`,$acc0
+	shr	\$16,$s2
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+	shr	\$16,$s3
+
+	movzb	`&lo("$s2")`,$acc1
+	shl	\$8,$t4
+	shl	\$8,$t5
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	xor	$t4,$t0
+	xor	$t5,$t1
+
+	movzb	`&lo("$s3")`,$t4
+	shr	\$16,$s0
+	shr	\$16,$s1
+	movzb	`&lo("$s0")`,$t5
+	shl	\$8,$acc2
+	shl	\$8,$acc0
+	movzb	($sbox,$t4,1),$t4	#$t1
+	movzb	($sbox,$t5,1),$t5	#$t2
+	xor	$acc2,$t2
+	xor	$acc0,$t3
+
+	movzb	`&lo("$s1")`,$acc2
+	movzb	`&hi("$s3")`,$acc0
+	shl	\$16,$acc1
+	movzb	($sbox,$acc2,1),$acc2	#$t3
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	xor	$acc1,$t0
+
+	movzb	`&hi("$s0")`,$acc1
+	shr	\$8,$s2
+	shr	\$8,$s1
+	movzb	($sbox,$acc1,1),$acc1	#$t1
+	movzb	($sbox,$s2,1),$s3	#$t3
+	movzb	($sbox,$s1,1),$s2	#$t2
+	shl	\$16,$t4
+	shl	\$16,$t5
+	shl	\$16,$acc2
+	xor	$t4,$t1
+	xor	$t5,$t2
+	xor	$acc2,$t3
+
+	shl	\$24,$acc0
+	shl	\$24,$acc1
+	shl	\$24,$s3
+	xor	$acc0,$t0
+	shl	\$24,$s2
+	xor	$acc1,$t1
+	mov	$t0,$s0
+	mov	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub enctransform_ref()
+{ my $sn = shift;
+  my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	mov	$sn,$acc
+	and	\$0x80808080,$acc
+	mov	$acc,$tmp
+	shr	\$7,$tmp
+	lea	($sn,$sn),$r2
+	sub	$tmp,$acc
+	and	\$0xfefefefe,$r2
+	and	\$0x1b1b1b1b,$acc
+	mov	$sn,$tmp
+	xor	$acc,$r2
+
+	xor	$r2,$sn
+	rol	\$24,$sn
+	xor	$r2,$sn
+	ror	\$16,$tmp
+	xor	$tmp,$sn
+	ror	\$8,$tmp
+	xor	$tmp,$sn
+___
+}
+
+# unlike decrypt case it does not pay off to parallelize enctransform
+sub enctransform()
+{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
+
+$code.=<<___;
+	mov	$s0,$acc0
+	mov	$s1,$acc1
+	and	\$0x80808080,$acc0
+	and	\$0x80808080,$acc1
+	mov	$acc0,$t0
+	mov	$acc1,$t1
+	shr	\$7,$t0
+	lea	($s0,$s0),$r20
+	shr	\$7,$t1
+	lea	($s1,$s1),$r21
+	sub	$t0,$acc0
+	sub	$t1,$acc1
+	and	\$0xfefefefe,$r20
+	and	\$0xfefefefe,$r21
+	and	\$0x1b1b1b1b,$acc0
+	and	\$0x1b1b1b1b,$acc1
+	mov	$s0,$t0
+	mov	$s1,$t1
+	xor	$acc0,$r20
+	xor	$acc1,$r21
+
+	xor	$r20,$s0
+	xor	$r21,$s1
+	 mov	$s2,$acc0
+	 mov	$s3,$acc1
+	rol	\$24,$s0
+	rol	\$24,$s1
+	 and	\$0x80808080,$acc0
+	 and	\$0x80808080,$acc1
+	xor	$r20,$s0
+	xor	$r21,$s1
+	 mov	$acc0,$t2
+	 mov	$acc1,$t3
+	ror	\$16,$t0
+	ror	\$16,$t1
+	 shr	\$7,$t2
+	 lea	($s2,$s2),$r20
+	xor	$t0,$s0
+	xor	$t1,$s1
+	 shr	\$7,$t3
+	 lea	($s3,$s3),$r21
+	ror	\$8,$t0
+	ror	\$8,$t1
+	 sub	$t2,$acc0
+	 sub	$t3,$acc1
+	xor	$t0,$s0
+	xor	$t1,$s1
+
+	and	\$0xfefefefe,$r20
+	and	\$0xfefefefe,$r21
+	and	\$0x1b1b1b1b,$acc0
+	and	\$0x1b1b1b1b,$acc1
+	mov	$s2,$t2
+	mov	$s3,$t3
+	xor	$acc0,$r20
+	xor	$acc1,$r21
+
+	xor	$r20,$s2
+	xor	$r21,$s3
+	rol	\$24,$s2
+	rol	\$24,$s3
+	xor	$r20,$s2
+	xor	$r21,$s3
+	mov	0($sbox),$acc0			# prefetch Te4
+	ror	\$16,$t2
+	ror	\$16,$t3
+	mov	64($sbox),$acc1
+	xor	$t2,$s2
+	xor	$t3,$s3
+	mov	128($sbox),$r20
+	ror	\$8,$t2
+	ror	\$8,$t3
+	mov	192($sbox),$r21
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+$code.=<<___;
+.type	_x86_64_AES_encrypt_compact,\@abi-omnipotent
+.align	16
+_x86_64_AES_encrypt_compact:
+	lea	128($sbox),$inp			# size optimization
+	mov	0-128($inp),$acc1		# prefetch Te4
+	mov	32-128($inp),$acc2
+	mov	64-128($inp),$t0
+	mov	96-128($inp),$t1
+	mov	128-128($inp),$acc1
+	mov	160-128($inp),$acc2
+	mov	192-128($inp),$t0
+	mov	224-128($inp),$t1
+	jmp	.Lenc_loop_compact
+.align	16
+.Lenc_loop_compact:
+		xor	0($key),$s0		# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+		lea	16($key),$key
+___
+		&enccompactvert();
+$code.=<<___;
+		cmp	16(%rsp),$key
+		je	.Lenc_compact_done
+___
+		&enctransform();
+$code.=<<___;
+	jmp	.Lenc_loop_compact
+.align	16
+.Lenc_compact_done:
+	xor	0($key),$s0
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
+___
+
+# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
+$code.=<<___;
+.globl	AES_encrypt
+.type	AES_encrypt,\@function,3
+.align	16
+.globl	asm_AES_encrypt
+.hidden	asm_AES_encrypt
+asm_AES_encrypt:
+AES_encrypt:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	# allocate frame "above" key schedule
+	mov	%rsp,%r10
+	lea	-63(%rdx),%rcx	# %rdx is key argument
+	and	\$-64,%rsp
+	sub	%rsp,%rcx
+	neg	%rcx
+	and	\$0x3c0,%rcx
+	sub	%rcx,%rsp
+	sub	\$32,%rsp
+
+	mov	%rsi,16(%rsp)	# save out
+	mov	%r10,24(%rsp)	# save real stack pointer
+.Lenc_prologue:
+
+	mov	%rdx,$key
+	mov	240($key),$rnds	# load rounds
+
+	mov	0(%rdi),$s0	# load input vector
+	mov	4(%rdi),$s1
+	mov	8(%rdi),$s2
+	mov	12(%rdi),$s3
+
+	shl	\$4,$rnds
+	lea	($key,$rnds),%rbp
+	mov	$key,(%rsp)	# key schedule
+	mov	%rbp,8(%rsp)	# end of key schedule
+
+	# pick Te4 copy which can't "overlap" with stack frame or key schedule
+	lea	.LAES_Te+2048(%rip),$sbox
+	lea	768(%rsp),%rbp
+	sub	$sbox,%rbp
+	and	\$0x300,%rbp
+	lea	($sbox,%rbp),$sbox
+
+	call	_x86_64_AES_encrypt_compact
+
+	mov	16(%rsp),$out	# restore out
+	mov	24(%rsp),%rsi	# restore saved stack pointer
+	mov	$s0,0($out)	# write output vector
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lenc_epilogue:
+	ret
+.size	AES_encrypt,.-AES_encrypt
+___
+
+#------------------------------------------------------------------#
+
+sub decvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	# favor 3-way issue Opteron pipeline...
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	mov	0($sbox,$acc0,8),$t0
+	mov	0($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	movzb	`&lo("$s3")`,$acc2
+	xor	3($sbox,$acc0,8),$t0
+	xor	3($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s1")`,$acc0
+	shr	\$16,$s0
+	movzb	`&hi("$s2")`,$acc2
+	xor	3($sbox,$acc0,8),$t2
+	shr	\$16,$s3
+	xor	3($sbox,$acc2,8),$t3
+
+	shr	\$16,$s1
+	lea	16($key),$key
+	shr	\$16,$s2
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	xor	2($sbox,$acc0,8),$t0
+	xor	2($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	movzb	`&lo("$s1")`,$acc2
+	xor	1($sbox,$acc0,8),$t0
+	xor	1($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s3")`,$acc0
+	mov	12($key),$s3
+	movzb	`&hi("$s0")`,$acc2
+	xor	1($sbox,$acc0,8),$t2
+	mov	0($key),$s0
+	xor	1($sbox,$acc2,8),$t3
+
+	xor	$t0,$s0
+	mov	4($key),$s1
+	mov	8($key),$s2
+	xor	$t2,$s2
+	xor	$t1,$s1
+	xor	$t3,$s3
+___
+}
+
+sub declastvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	lea	2048($sbox),$sbox	# size optimization
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	movzb	($sbox,$acc0,1),$t0
+	movzb	($sbox,$acc1,1),$t1
+	movzb	($sbox,$acc2,1),$t2
+
+	movzb	`&lo("$s3")`,$acc0
+	movzb	`&hi("$s3")`,$acc1
+	movzb	`&hi("$s0")`,$acc2
+	movzb	($sbox,$acc0,1),$t3
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	movzb	($sbox,$acc2,1),$acc2	#$t1
+
+	shl	\$8,$acc1
+	shl	\$8,$acc2
+
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+	shr	\$16,$s3
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	shr	\$16,$s0
+	movzb	($sbox,$acc0,1),$acc0	#$t2
+	movzb	($sbox,$acc1,1),$acc1	#$t3
+
+	shl	\$8,$acc0
+	shl	\$8,$acc1
+	shr	\$16,$s1
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+	shr	\$16,$s2
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	movzb	($sbox,$acc1,1),$acc1	#$t1
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+
+	shl	\$16,$acc0
+	shl	\$16,$acc1
+	shl	\$16,$acc2
+
+	xor	$acc0,$t0
+	xor	$acc1,$t1
+	xor	$acc2,$t2
+
+	movzb	`&lo("$s1")`,$acc0
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	movzb	($sbox,$acc2,1),$acc2	#$t1
+
+	shl	\$16,$acc0
+	shl	\$24,$acc1
+	shl	\$24,$acc2
+
+	xor	$acc0,$t3
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	mov	16+12($key),$s3
+	movzb	($sbox,$acc0,1),$acc0	#$t2
+	movzb	($sbox,$acc1,1),$acc1	#$t3
+	mov	16+0($key),$s0
+
+	shl	\$24,$acc0
+	shl	\$24,$acc1
+
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+
+	mov	16+4($key),$s1
+	mov	16+8($key),$s2
+	lea	-2048($sbox),$sbox
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub decstep()
+{ my ($i,@s) = @_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	$code.="	mov	$s[0],$out\n"		if ($i!=3);
+			$tmp1=$s[2]			if ($i==3);
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	and	\$0xFF,$out\n";
+
+	$code.="	mov	0($sbox,$out,8),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+			$tmp2=$s[3]			if ($i==3);
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+			$tmp0=$s[1]			if ($i==3);
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	and	\$0xFF,$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	xor	3($sbox,$tmp0,8),$out\n";
+	$code.="	xor	2($sbox,$tmp1,8),$out\n";
+	$code.="	xor	1($sbox,$tmp2,8),$out\n";
+
+	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+sub declast()
+{ my ($i,@s)=@_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	$code.="	mov	$s[0],$out\n"		if ($i!=3);
+			$tmp1=$s[2]			if ($i==3);
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	and	\$0xFF,$out\n";
+
+	$code.="	movzb	2048($sbox,$out,1),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+			$tmp2=$s[3]			if ($i==3);
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+			$tmp0=$s[1]			if ($i==3);
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	and	\$0xFF,$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	movzb	2048($sbox,$tmp0,1),$tmp0\n";
+	$code.="	movzb	2048($sbox,$tmp1,1),$tmp1\n";
+	$code.="	movzb	2048($sbox,$tmp2,1),$tmp2\n";
+
+	$code.="	shl	\$8,$tmp0\n";
+	$code.="	shl	\$16,$tmp1\n";
+	$code.="	shl	\$24,$tmp2\n";
+
+	$code.="	xor	$tmp0,$out\n";
+	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
+	$code.="	xor	$tmp1,$out\n";
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	xor	$tmp2,$out\n";
+	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+$code.=<<___;
+.type	_x86_64_AES_decrypt,\@abi-omnipotent
+.align	16
+_x86_64_AES_decrypt:
+	xor	0($key),$s0			# xor with key
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+
+	mov	240($key),$rnds			# load key->rounds
+	sub	\$1,$rnds
+	jmp	.Ldec_loop
+.align	16
+.Ldec_loop:
+___
+	if ($verticalspin) { &decvert(); }
+	else {	&decstep(0,$s0,$s3,$s2,$s1);
+		&decstep(1,$s1,$s0,$s3,$s2);
+		&decstep(2,$s2,$s1,$s0,$s3);
+		&decstep(3,$s3,$s2,$s1,$s0);
+		$code.=<<___;
+		lea	16($key),$key
+		xor	0($key),$s0			# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+___
+	}
+$code.=<<___;
+	sub	\$1,$rnds
+	jnz	.Ldec_loop
+___
+	if ($verticalspin) { &declastvert(); }
+	else {	&declast(0,$s0,$s3,$s2,$s1);
+		&declast(1,$s1,$s0,$s3,$s2);
+		&declast(2,$s2,$s1,$s0,$s3);
+		&declast(3,$s3,$s2,$s1,$s0);
+		$code.=<<___;
+		xor	16+0($key),$s0			# xor with key
+		xor	16+4($key),$s1
+		xor	16+8($key),$s2
+		xor	16+12($key),$s3
+___
+	}
+$code.=<<___;
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_decrypt,.-_x86_64_AES_decrypt
+___
+
+sub deccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$t0
+	movzb	`&lo("$s1")`,$t1
+	movzb	`&lo("$s2")`,$t2
+	movzb	($sbox,$t0,1),$t0
+	movzb	($sbox,$t1,1),$t1
+	movzb	($sbox,$t2,1),$t2
+
+	movzb	`&lo("$s3")`,$t3
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	movzb	($sbox,$t3,1),$t3
+	movzb	($sbox,$acc0,1),$t4	#$t0
+	movzb	($sbox,$acc1,1),$t5	#$t1
+
+	movzb	`&hi("$s1")`,$acc2
+	movzb	`&hi("$s2")`,$acc0
+	shr	\$16,$s2
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+	shr	\$16,$s3
+
+	movzb	`&lo("$s2")`,$acc1
+	shl	\$8,$t4
+	shl	\$8,$t5
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	xor	$t4,$t0
+	xor	$t5,$t1
+
+	movzb	`&lo("$s3")`,$t4
+	shr	\$16,$s0
+	shr	\$16,$s1
+	movzb	`&lo("$s0")`,$t5
+	shl	\$8,$acc2
+	shl	\$8,$acc0
+	movzb	($sbox,$t4,1),$t4	#$t1
+	movzb	($sbox,$t5,1),$t5	#$t2
+	xor	$acc2,$t2
+	xor	$acc0,$t3
+
+	movzb	`&lo("$s1")`,$acc2
+	movzb	`&hi("$s1")`,$acc0
+	shl	\$16,$acc1
+	movzb	($sbox,$acc2,1),$acc2	#$t3
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	xor	$acc1,$t0
+
+	movzb	`&hi("$s2")`,$acc1
+	shl	\$16,$t4
+	shl	\$16,$t5
+	movzb	($sbox,$acc1,1),$s1	#$t1
+	xor	$t4,$t1
+	xor	$t5,$t2
+
+	movzb	`&hi("$s3")`,$acc1
+	shr	\$8,$s0
+	shl	\$16,$acc2
+	movzb	($sbox,$acc1,1),$s2	#$t2
+	movzb	($sbox,$s0,1),$s3	#$t3
+	xor	$acc2,$t3
+
+	shl	\$24,$acc0
+	shl	\$24,$s1
+	shl	\$24,$s2
+	xor	$acc0,$t0
+	shl	\$24,$s3
+	xor	$t1,$s1
+	mov	$t0,$s0
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+# parallelized version! input is pair of 64-bit values: %rax=s1.s0
+# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
+# %ecx=s2 and %edx=s3.
+sub dectransform()
+{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
+  my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
+  my $prefetch = shift;
+
+$code.=<<___;
+	mov	$tp10,$acc0
+	mov	$tp18,$acc8
+	and	$mask80,$acc0
+	and	$mask80,$acc8
+	mov	$acc0,$tp40
+	mov	$acc8,$tp48
+	shr	\$7,$tp40
+	lea	($tp10,$tp10),$tp20
+	shr	\$7,$tp48
+	lea	($tp18,$tp18),$tp28
+	sub	$tp40,$acc0
+	sub	$tp48,$acc8
+	and	$maskfe,$tp20
+	and	$maskfe,$tp28
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$tp20,$acc0
+	xor	$tp28,$acc8
+	mov	$acc0,$tp20
+	mov	$acc8,$tp28
+
+	and	$mask80,$acc0
+	and	$mask80,$acc8
+	mov	$acc0,$tp80
+	mov	$acc8,$tp88
+	shr	\$7,$tp80
+	lea	($tp20,$tp20),$tp40
+	shr	\$7,$tp88
+	lea	($tp28,$tp28),$tp48
+	sub	$tp80,$acc0
+	sub	$tp88,$acc8
+	and	$maskfe,$tp40
+	and	$maskfe,$tp48
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$tp40,$acc0
+	xor	$tp48,$acc8
+	mov	$acc0,$tp40
+	mov	$acc8,$tp48
+
+	and	$mask80,$acc0
+	and	$mask80,$acc8
+	mov	$acc0,$tp80
+	mov	$acc8,$tp88
+	shr	\$7,$tp80
+	 xor	$tp10,$tp20		# tp2^=tp1
+	shr	\$7,$tp88
+	 xor	$tp18,$tp28		# tp2^=tp1
+	sub	$tp80,$acc0
+	sub	$tp88,$acc8
+	lea	($tp40,$tp40),$tp80
+	lea	($tp48,$tp48),$tp88
+	 xor	$tp10,$tp40		# tp4^=tp1
+	 xor	$tp18,$tp48		# tp4^=tp1
+	and	$maskfe,$tp80
+	and	$maskfe,$tp88
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$acc0,$tp80
+	xor	$acc8,$tp88
+
+	xor	$tp80,$tp10		# tp1^=tp8
+	xor	$tp88,$tp18		# tp1^=tp8
+	xor	$tp80,$tp20		# tp2^tp1^=tp8
+	xor	$tp88,$tp28		# tp2^tp1^=tp8
+	mov	$tp10,$acc0
+	mov	$tp18,$acc8
+	xor	$tp80,$tp40		# tp4^tp1^=tp8
+	xor	$tp88,$tp48		# tp4^tp1^=tp8
+	shr	\$32,$acc0
+	shr	\$32,$acc8
+	xor	$tp20,$tp80		# tp8^=tp8^tp2^tp1=tp2^tp1
+	xor	$tp28,$tp88		# tp8^=tp8^tp2^tp1=tp2^tp1
+	rol	\$8,`&LO("$tp10")`	# ROTATE(tp1^tp8,8)
+	rol	\$8,`&LO("$tp18")`	# ROTATE(tp1^tp8,8)
+	xor	$tp40,$tp80		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+	xor	$tp48,$tp88		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+
+	rol	\$8,`&LO("$acc0")`	# ROTATE(tp1^tp8,8)
+	rol	\$8,`&LO("$acc8")`	# ROTATE(tp1^tp8,8)
+	xor	`&LO("$tp80")`,`&LO("$tp10")`
+	xor	`&LO("$tp88")`,`&LO("$tp18")`
+	shr	\$32,$tp80
+	shr	\$32,$tp88
+	xor	`&LO("$tp80")`,`&LO("$acc0")`
+	xor	`&LO("$tp88")`,`&LO("$acc8")`
+
+	mov	$tp20,$tp80
+	mov	$tp28,$tp88
+	shr	\$32,$tp80
+	shr	\$32,$tp88
+	rol	\$24,`&LO("$tp20")`	# ROTATE(tp2^tp1^tp8,24)
+	rol	\$24,`&LO("$tp28")`	# ROTATE(tp2^tp1^tp8,24)
+	rol	\$24,`&LO("$tp80")`	# ROTATE(tp2^tp1^tp8,24)
+	rol	\$24,`&LO("$tp88")`	# ROTATE(tp2^tp1^tp8,24)
+	xor	`&LO("$tp20")`,`&LO("$tp10")`
+	xor	`&LO("$tp28")`,`&LO("$tp18")`
+	mov	$tp40,$tp20
+	mov	$tp48,$tp28
+	xor	`&LO("$tp80")`,`&LO("$acc0")`
+	xor	`&LO("$tp88")`,`&LO("$acc8")`
+
+	`"mov	0($sbox),$mask80"	if ($prefetch)`
+	shr	\$32,$tp20
+	shr	\$32,$tp28
+	`"mov	64($sbox),$maskfe"	if ($prefetch)`
+	rol	\$16,`&LO("$tp40")`	# ROTATE(tp4^tp1^tp8,16)
+	rol	\$16,`&LO("$tp48")`	# ROTATE(tp4^tp1^tp8,16)
+	`"mov	128($sbox),$mask1b"	if ($prefetch)`
+	rol	\$16,`&LO("$tp20")`	# ROTATE(tp4^tp1^tp8,16)
+	rol	\$16,`&LO("$tp28")`	# ROTATE(tp4^tp1^tp8,16)
+	`"mov	192($sbox),$tp80"	if ($prefetch)`
+	xor	`&LO("$tp40")`,`&LO("$tp10")`
+	xor	`&LO("$tp48")`,`&LO("$tp18")`
+	`"mov	256($sbox),$tp88"	if ($prefetch)`
+	xor	`&LO("$tp20")`,`&LO("$acc0")`
+	xor	`&LO("$tp28")`,`&LO("$acc8")`
+___
+}
+
+$code.=<<___;
+.type	_x86_64_AES_decrypt_compact,\@abi-omnipotent
+.align	16
+_x86_64_AES_decrypt_compact:
+	lea	128($sbox),$inp			# size optimization
+	mov	0-128($inp),$acc1		# prefetch Td4
+	mov	32-128($inp),$acc2
+	mov	64-128($inp),$t0
+	mov	96-128($inp),$t1
+	mov	128-128($inp),$acc1
+	mov	160-128($inp),$acc2
+	mov	192-128($inp),$t0
+	mov	224-128($inp),$t1
+	jmp	.Ldec_loop_compact
+
+.align	16
+.Ldec_loop_compact:
+		xor	0($key),$s0		# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+		lea	16($key),$key
+___
+		&deccompactvert();
+$code.=<<___;
+		cmp	16(%rsp),$key
+		je	.Ldec_compact_done
+
+		mov	256+0($sbox),$mask80
+		shl	\$32,%rbx
+		shl	\$32,%rdx
+		mov	256+8($sbox),$maskfe
+		or	%rbx,%rax
+		or	%rdx,%rcx
+		mov	256+16($sbox),$mask1b
+___
+		&dectransform(1);
+$code.=<<___;
+	jmp	.Ldec_loop_compact
+.align	16
+.Ldec_compact_done:
+	xor	0($key),$s0
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
+___
+
+# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
+$code.=<<___;
+.globl	AES_decrypt
+.type	AES_decrypt,\@function,3
+.align	16
+.globl	asm_AES_decrypt
+.hidden	asm_AES_decrypt
+asm_AES_decrypt:
+AES_decrypt:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	# allocate frame "above" key schedule
+	mov	%rsp,%r10
+	lea	-63(%rdx),%rcx	# %rdx is key argument
+	and	\$-64,%rsp
+	sub	%rsp,%rcx
+	neg	%rcx
+	and	\$0x3c0,%rcx
+	sub	%rcx,%rsp
+	sub	\$32,%rsp
+
+	mov	%rsi,16(%rsp)	# save out
+	mov	%r10,24(%rsp)	# save real stack pointer
+.Ldec_prologue:
+
+	mov	%rdx,$key
+	mov	240($key),$rnds	# load rounds
+
+	mov	0(%rdi),$s0	# load input vector
+	mov	4(%rdi),$s1
+	mov	8(%rdi),$s2
+	mov	12(%rdi),$s3
+
+	shl	\$4,$rnds
+	lea	($key,$rnds),%rbp
+	mov	$key,(%rsp)	# key schedule
+	mov	%rbp,8(%rsp)	# end of key schedule
+
+	# pick Td4 copy which can't "overlap" with stack frame or key schedule
+	lea	.LAES_Td+2048(%rip),$sbox
+	lea	768(%rsp),%rbp
+	sub	$sbox,%rbp
+	and	\$0x300,%rbp
+	lea	($sbox,%rbp),$sbox
+	shr	\$3,%rbp	# recall "magic" constants!
+	add	%rbp,$sbox
+
+	call	_x86_64_AES_decrypt_compact
+
+	mov	16(%rsp),$out	# restore out
+	mov	24(%rsp),%rsi	# restore saved stack pointer
+	mov	$s0,0($out)	# write output vector
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Ldec_epilogue:
+	ret
+.size	AES_decrypt,.-AES_decrypt
+___
+#------------------------------------------------------------------#
+
+sub enckey()
+{
+$code.=<<___;
+	movz	%dl,%esi		# rk[i]>>0
+	movzb	-128(%rbp,%rsi),%ebx
+	movz	%dh,%esi		# rk[i]>>8
+	shl	\$24,%ebx
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	shr	\$16,%edx
+	movz	%dl,%esi		# rk[i]>>16
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	movz	%dh,%esi		# rk[i]>>24
+	shl	\$8,%ebx
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	shl	\$16,%ebx
+	xor	%ebx,%eax
+
+	xor	1024-128(%rbp,%rcx,4),%eax		# rcon
+___
+}
+
+# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+#                        AES_KEY *key)
+$code.=<<___;
+.globl	private_AES_set_encrypt_key
+.type	private_AES_set_encrypt_key,\@function,3
+.align	16
+private_AES_set_encrypt_key:
+	push	%rbx
+	push	%rbp
+	push	%r12			# redundant, but allows to share 
+	push	%r13			# exception handler...
+	push	%r14
+	push	%r15
+	sub	\$8,%rsp
+.Lenc_key_prologue:
+
+	call	_x86_64_AES_set_encrypt_key
+
+	mov	8(%rsp),%r15
+	mov	16(%rsp),%r14
+	mov	24(%rsp),%r13
+	mov	32(%rsp),%r12
+	mov	40(%rsp),%rbp
+	mov	48(%rsp),%rbx
+	add	\$56,%rsp
+.Lenc_key_epilogue:
+	ret
+.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+
+.type	_x86_64_AES_set_encrypt_key,\@abi-omnipotent
+.align	16
+_x86_64_AES_set_encrypt_key:
+	mov	%esi,%ecx			# %ecx=bits
+	mov	%rdi,%rsi			# %rsi=userKey
+	mov	%rdx,%rdi			# %rdi=key
+
+	test	\$-1,%rsi
+	jz	.Lbadpointer
+	test	\$-1,%rdi
+	jz	.Lbadpointer
+
+	lea	.LAES_Te(%rip),%rbp
+	lea	2048+128(%rbp),%rbp
+
+	# prefetch Te4
+	mov	0-128(%rbp),%eax
+	mov	32-128(%rbp),%ebx
+	mov	64-128(%rbp),%r8d
+	mov	96-128(%rbp),%edx
+	mov	128-128(%rbp),%eax
+	mov	160-128(%rbp),%ebx
+	mov	192-128(%rbp),%r8d
+	mov	224-128(%rbp),%edx
+
+	cmp	\$128,%ecx
+	je	.L10rounds
+	cmp	\$192,%ecx
+	je	.L12rounds
+	cmp	\$256,%ecx
+	je	.L14rounds
+	mov	\$-2,%rax			# invalid number of bits
+	jmp	.Lexit
+
+.L10rounds:
+	mov	0(%rsi),%rax			# copy first 4 dwords
+	mov	8(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rdx,8(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L10shortcut
+.align	4
+.L10loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	12(%rdi),%edx			# rk[3]
+.L10shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,16(%rdi)			# rk[4]
+		xor	4(%rdi),%eax
+		mov	%eax,20(%rdi)			# rk[5]
+		xor	8(%rdi),%eax
+		mov	%eax,24(%rdi)			# rk[6]
+		xor	12(%rdi),%eax
+		mov	%eax,28(%rdi)			# rk[7]
+		add	\$1,%ecx
+		lea	16(%rdi),%rdi
+		cmp	\$10,%ecx
+	jl	.L10loop
+
+	movl	\$10,80(%rdi)			# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.L12rounds:
+	mov	0(%rsi),%rax			# copy first 6 dwords
+	mov	8(%rsi),%rbx
+	mov	16(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rbx,8(%rdi)
+	mov	%rdx,16(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L12shortcut
+.align	4
+.L12loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	20(%rdi),%edx			# rk[5]
+.L12shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,24(%rdi)			# rk[6]
+		xor	4(%rdi),%eax
+		mov	%eax,28(%rdi)			# rk[7]
+		xor	8(%rdi),%eax
+		mov	%eax,32(%rdi)			# rk[8]
+		xor	12(%rdi),%eax
+		mov	%eax,36(%rdi)			# rk[9]
+
+		cmp	\$7,%ecx
+		je	.L12break
+		add	\$1,%ecx
+
+		xor	16(%rdi),%eax
+		mov	%eax,40(%rdi)			# rk[10]
+		xor	20(%rdi),%eax
+		mov	%eax,44(%rdi)			# rk[11]
+
+		lea	24(%rdi),%rdi
+	jmp	.L12loop
+.L12break:
+	movl	\$12,72(%rdi)		# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.L14rounds:		
+	mov	0(%rsi),%rax			# copy first 8 dwords
+	mov	8(%rsi),%rbx
+	mov	16(%rsi),%rcx
+	mov	24(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rbx,8(%rdi)
+	mov	%rcx,16(%rdi)
+	mov	%rdx,24(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L14shortcut
+.align	4
+.L14loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	28(%rdi),%edx			# rk[4]
+.L14shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,32(%rdi)			# rk[8]
+		xor	4(%rdi),%eax
+		mov	%eax,36(%rdi)			# rk[9]
+		xor	8(%rdi),%eax
+		mov	%eax,40(%rdi)			# rk[10]
+		xor	12(%rdi),%eax
+		mov	%eax,44(%rdi)			# rk[11]
+
+		cmp	\$6,%ecx
+		je	.L14break
+		add	\$1,%ecx
+
+		mov	%eax,%edx
+		mov	16(%rdi),%eax			# rk[4]
+		movz	%dl,%esi			# rk[11]>>0
+		movzb	-128(%rbp,%rsi),%ebx
+		movz	%dh,%esi			# rk[11]>>8
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		shr	\$16,%edx
+		shl	\$8,%ebx
+		movz	%dl,%esi			# rk[11]>>16
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		movz	%dh,%esi			# rk[11]>>24
+		shl	\$16,%ebx
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		shl	\$24,%ebx
+		xor	%ebx,%eax
+
+		mov	%eax,48(%rdi)			# rk[12]
+		xor	20(%rdi),%eax
+		mov	%eax,52(%rdi)			# rk[13]
+		xor	24(%rdi),%eax
+		mov	%eax,56(%rdi)			# rk[14]
+		xor	28(%rdi),%eax
+		mov	%eax,60(%rdi)			# rk[15]
+
+		lea	32(%rdi),%rdi
+	jmp	.L14loop
+.L14break:
+	movl	\$14,48(%rdi)		# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.Lbadpointer:
+	mov	\$-1,%rax
+.Lexit:
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
+___
+
+sub deckey_ref()
+{ my ($i,$ptr,$te,$td) = @_;
+  my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
+$code.=<<___;
+	mov	$i($ptr),$tp1
+	mov	$tp1,$acc
+	and	\$0x80808080,$acc
+	mov	$acc,$tp4
+	shr	\$7,$tp4
+	lea	0($tp1,$tp1),$tp2
+	sub	$tp4,$acc
+	and	\$0xfefefefe,$tp2
+	and	\$0x1b1b1b1b,$acc
+	xor	$tp2,$acc
+	mov	$acc,$tp2
+
+	and	\$0x80808080,$acc
+	mov	$acc,$tp8
+	shr	\$7,$tp8
+	lea	0($tp2,$tp2),$tp4
+	sub	$tp8,$acc
+	and	\$0xfefefefe,$tp4
+	and	\$0x1b1b1b1b,$acc
+	 xor	$tp1,$tp2		# tp2^tp1
+	xor	$tp4,$acc
+	mov	$acc,$tp4
+
+	and	\$0x80808080,$acc
+	mov	$acc,$tp8
+	shr	\$7,$tp8
+	sub	$tp8,$acc
+	lea	0($tp4,$tp4),$tp8
+	 xor	$tp1,$tp4		# tp4^tp1
+	and	\$0xfefefefe,$tp8
+	and	\$0x1b1b1b1b,$acc
+	xor	$acc,$tp8
+
+	xor	$tp8,$tp1		# tp1^tp8
+	rol	\$8,$tp1		# ROTATE(tp1^tp8,8)
+	xor	$tp8,$tp2		# tp2^tp1^tp8
+	xor	$tp8,$tp4		# tp4^tp1^tp8
+	xor	$tp2,$tp8
+	xor	$tp4,$tp8		# tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
+
+	xor	$tp8,$tp1
+	rol	\$24,$tp2		# ROTATE(tp2^tp1^tp8,24)
+	xor	$tp2,$tp1
+	rol	\$16,$tp4		# ROTATE(tp4^tp1^tp8,16)
+	xor	$tp4,$tp1
+
+	mov	$tp1,$i($ptr)
+___
+}
+
+# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+#                        AES_KEY *key)
+$code.=<<___;
+.globl	private_AES_set_decrypt_key
+.type	private_AES_set_decrypt_key,\@function,3
+.align	16
+private_AES_set_decrypt_key:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	push	%rdx			# save key schedule
+.Ldec_key_prologue:
+
+	call	_x86_64_AES_set_encrypt_key
+	mov	(%rsp),%r8		# restore key schedule
+	cmp	\$0,%eax
+	jne	.Labort
+
+	mov	240(%r8),%r14d		# pull number of rounds
+	xor	%rdi,%rdi
+	lea	(%rdi,%r14d,4),%rcx
+	mov	%r8,%rsi
+	lea	(%r8,%rcx,4),%rdi	# pointer to last chunk
+.align	4
+.Linvert:
+		mov	0(%rsi),%rax
+		mov	8(%rsi),%rbx
+		mov	0(%rdi),%rcx
+		mov	8(%rdi),%rdx
+		mov	%rax,0(%rdi)
+		mov	%rbx,8(%rdi)
+		mov	%rcx,0(%rsi)
+		mov	%rdx,8(%rsi)
+		lea	16(%rsi),%rsi
+		lea	-16(%rdi),%rdi
+		cmp	%rsi,%rdi
+	jne	.Linvert
+
+	lea	.LAES_Te+2048+1024(%rip),%rax	# rcon
+
+	mov	40(%rax),$mask80
+	mov	48(%rax),$maskfe
+	mov	56(%rax),$mask1b
+
+	mov	%r8,$key
+	sub	\$1,%r14d
+.align	4
+.Lpermute:
+		lea	16($key),$key
+		mov	0($key),%rax
+		mov	8($key),%rcx
+___
+		&dectransform ();
+$code.=<<___;
+		mov	%eax,0($key)
+		mov	%ebx,4($key)
+		mov	%ecx,8($key)
+		mov	%edx,12($key)
+		sub	\$1,%r14d
+	jnz	.Lpermute
+
+	xor	%rax,%rax
+.Labort:
+	mov	8(%rsp),%r15
+	mov	16(%rsp),%r14
+	mov	24(%rsp),%r13
+	mov	32(%rsp),%r12
+	mov	40(%rsp),%rbp
+	mov	48(%rsp),%rbx
+	add	\$56,%rsp
+.Ldec_key_epilogue:
+	ret
+.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+___
+
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#			size_t length, const AES_KEY *key,
+#			unsigned char *ivp,const int enc);
+{
+# stack frame layout
+# -8(%rsp)		return address
+my $keyp="0(%rsp)";		# one to pass as $key
+my $keyend="8(%rsp)";		# &(keyp->rd_key[4*keyp->rounds])
+my $_rsp="16(%rsp)";		# saved %rsp
+my $_inp="24(%rsp)";		# copy of 1st parameter, inp
+my $_out="32(%rsp)";		# copy of 2nd parameter, out
+my $_len="40(%rsp)";		# copy of 3rd parameter, length
+my $_key="48(%rsp)";		# copy of 4th parameter, key
+my $_ivp="56(%rsp)";		# copy of 5th parameter, ivp
+my $ivec="64(%rsp)";		# ivec[16]
+my $aes_key="80(%rsp)";		# copy of aes_key
+my $mark="80+240(%rsp)";	# copy of aes_key->rounds
+
+$code.=<<___;
+.globl	AES_cbc_encrypt
+.type	AES_cbc_encrypt,\@function,6
+.align	16
+.extern	OPENSSL_ia32cap_P
+.globl	asm_AES_cbc_encrypt
+.hidden	asm_AES_cbc_encrypt
+asm_AES_cbc_encrypt:
+AES_cbc_encrypt:
+	cmp	\$0,%rdx	# check length
+	je	.Lcbc_epilogue
+	pushfq
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+.Lcbc_prologue:
+
+	cld
+	mov	%r9d,%r9d	# clear upper half of enc
+
+	lea	.LAES_Te(%rip),$sbox
+	cmp	\$0,%r9
+	jne	.Lcbc_picked_te
+	lea	.LAES_Td(%rip),$sbox
+.Lcbc_picked_te:
+
+	mov	OPENSSL_ia32cap_P(%rip),%r10d
+	cmp	\$$speed_limit,%rdx
+	jb	.Lcbc_slow_prologue
+	test	\$15,%rdx
+	jnz	.Lcbc_slow_prologue
+	bt	\$28,%r10d
+	jc	.Lcbc_slow_prologue
+
+	# allocate aligned stack frame...
+	lea	-88-248(%rsp),$key
+	and	\$-64,$key
+
+	# ... and make sure it doesn't alias with AES_T[ed] modulo 4096
+	mov	$sbox,%r10
+	lea	2304($sbox),%r11
+	mov	$key,%r12
+	and	\$0xFFF,%r10	# s = $sbox&0xfff
+	and	\$0xFFF,%r11	# e = ($sbox+2048)&0xfff
+	and	\$0xFFF,%r12	# p = %rsp&0xfff
+
+	cmp	%r11,%r12	# if (p=>e) %rsp =- (p-e);
+	jb	.Lcbc_te_break_out
+	sub	%r11,%r12
+	sub	%r12,$key
+	jmp	.Lcbc_te_ok
+.Lcbc_te_break_out:		# else %rsp -= (p-s)&0xfff + framesz
+	sub	%r10,%r12
+	and	\$0xFFF,%r12
+	add	\$320,%r12
+	sub	%r12,$key
+.align	4
+.Lcbc_te_ok:
+
+	xchg	%rsp,$key
+	#add	\$8,%rsp	# reserve for return address!
+	mov	$key,$_rsp	# save %rsp
+.Lcbc_fast_body:
+	mov	%rdi,$_inp	# save copy of inp
+	mov	%rsi,$_out	# save copy of out
+	mov	%rdx,$_len	# save copy of len
+	mov	%rcx,$_key	# save copy of key
+	mov	%r8,$_ivp	# save copy of ivp
+	movl	\$0,$mark	# copy of aes_key->rounds = 0;
+	mov	%r8,%rbp	# rearrange input arguments
+	mov	%r9,%rbx
+	mov	%rsi,$out
+	mov	%rdi,$inp
+	mov	%rcx,$key
+
+	mov	240($key),%eax		# key->rounds
+	# do we copy key schedule to stack?
+	mov	$key,%r10
+	sub	$sbox,%r10
+	and	\$0xfff,%r10
+	cmp	\$2304,%r10
+	jb	.Lcbc_do_ecopy
+	cmp	\$4096-248,%r10
+	jb	.Lcbc_skip_ecopy
+.align	4
+.Lcbc_do_ecopy:
+		mov	$key,%rsi
+		lea	$aes_key,%rdi
+		lea	$aes_key,$key
+		mov	\$240/8,%ecx
+		.long	0x90A548F3	# rep movsq
+		mov	%eax,(%rdi)	# copy aes_key->rounds
+.Lcbc_skip_ecopy:
+	mov	$key,$keyp	# save key pointer
+
+	mov	\$18,%ecx
+.align	4
+.Lcbc_prefetch_te:
+		mov	0($sbox),%r10
+		mov	32($sbox),%r11
+		mov	64($sbox),%r12
+		mov	96($sbox),%r13
+		lea	128($sbox),$sbox
+		sub	\$1,%ecx
+	jnz	.Lcbc_prefetch_te
+	lea	-2304($sbox),$sbox
+
+	cmp	\$0,%rbx
+	je	.LFAST_DECRYPT
+
+#----------------------------- ENCRYPT -----------------------------#
+	mov	0(%rbp),$s0		# load iv
+	mov	4(%rbp),$s1
+	mov	8(%rbp),$s2
+	mov	12(%rbp),$s3
+
+.align	4
+.Lcbc_fast_enc_loop:
+		xor	0($inp),$s0
+		xor	4($inp),$s1
+		xor	8($inp),$s2
+		xor	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_encrypt
+
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10
+		mov	$s0,0($out)
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		sub	\$16,%r10
+		test	\$-16,%r10
+		mov	%r10,$_len
+	jnz	.Lcbc_fast_enc_loop
+	mov	$_ivp,%rbp	# restore ivp
+	mov	$s0,0(%rbp)	# save ivec
+	mov	$s1,4(%rbp)
+	mov	$s2,8(%rbp)
+	mov	$s3,12(%rbp)
+
+	jmp	.Lcbc_fast_cleanup
+
+#----------------------------- DECRYPT -----------------------------#
+.align	16
+.LFAST_DECRYPT:
+	cmp	$inp,$out
+	je	.Lcbc_fast_dec_in_place
+
+	mov	%rbp,$ivec
+.align	4
+.Lcbc_fast_dec_loop:
+		mov	0($inp),$s0	# read input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_decrypt
+
+		mov	$ivec,%rbp	# load ivp
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10	# load len
+		xor	0(%rbp),$s0	# xor iv
+		xor	4(%rbp),$s1
+		xor	8(%rbp),$s2
+		xor	12(%rbp),$s3
+		mov	$inp,%rbp	# current input, next iv
+
+		sub	\$16,%r10
+		mov	%r10,$_len	# update len
+		mov	%rbp,$ivec	# update ivp
+
+		mov	$s0,0($out)	# write output
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+	jnz	.Lcbc_fast_dec_loop
+	mov	$_ivp,%r12		# load user ivp
+	mov	0(%rbp),%r10		# load iv
+	mov	8(%rbp),%r11
+	mov	%r10,0(%r12)		# copy back to user
+	mov	%r11,8(%r12)
+	jmp	.Lcbc_fast_cleanup
+
+.align	16
+.Lcbc_fast_dec_in_place:
+	mov	0(%rbp),%r10		# copy iv to stack
+	mov	8(%rbp),%r11
+	mov	%r10,0+$ivec
+	mov	%r11,8+$ivec
+.align	4
+.Lcbc_fast_dec_in_place_loop:
+		mov	0($inp),$s0	# load input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_decrypt
+
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10
+		xor	0+$ivec,$s0
+		xor	4+$ivec,$s1
+		xor	8+$ivec,$s2
+		xor	12+$ivec,$s3
+
+		mov	0($inp),%r11	# load input
+		mov	8($inp),%r12
+		sub	\$16,%r10
+		jz	.Lcbc_fast_dec_in_place_done
+
+		mov	%r11,0+$ivec	# copy input to iv
+		mov	%r12,8+$ivec
+
+		mov	$s0,0($out)	# save output [zaps input]
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		mov	%r10,$_len
+	jmp	.Lcbc_fast_dec_in_place_loop
+.Lcbc_fast_dec_in_place_done:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)	# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0($out)	# save output [zaps input]
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+.align	4
+.Lcbc_fast_cleanup:
+	cmpl	\$0,$mark	# was the key schedule copied?
+	lea	$aes_key,%rdi
+	je	.Lcbc_exit
+		mov	\$240/8,%ecx
+		xor	%rax,%rax
+		.long	0x90AB48F3	# rep stosq
+
+	jmp	.Lcbc_exit
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+.align	16
+.Lcbc_slow_prologue:
+	# allocate aligned stack frame...
+	lea	-88(%rsp),%rbp
+	and	\$-64,%rbp
+	# ... just "above" key schedule
+	lea	-88-63(%rcx),%r10
+	sub	%rbp,%r10
+	neg	%r10
+	and	\$0x3c0,%r10
+	sub	%r10,%rbp
+
+	xchg	%rsp,%rbp
+	#add	\$8,%rsp	# reserve for return address!
+	mov	%rbp,$_rsp	# save %rsp
+.Lcbc_slow_body:
+	#mov	%rdi,$_inp	# save copy of inp
+	#mov	%rsi,$_out	# save copy of out
+	#mov	%rdx,$_len	# save copy of len
+	#mov	%rcx,$_key	# save copy of key
+	mov	%r8,$_ivp	# save copy of ivp
+	mov	%r8,%rbp	# rearrange input arguments
+	mov	%r9,%rbx
+	mov	%rsi,$out
+	mov	%rdi,$inp
+	mov	%rcx,$key
+	mov	%rdx,%r10
+
+	mov	240($key),%eax
+	mov	$key,$keyp	# save key pointer
+	shl	\$4,%eax
+	lea	($key,%rax),%rax
+	mov	%rax,$keyend
+
+	# pick Te4 copy which can't "overlap" with stack frame or key scdedule
+	lea	2048($sbox),$sbox
+	lea	768-8(%rsp),%rax
+	sub	$sbox,%rax
+	and	\$0x300,%rax
+	lea	($sbox,%rax),$sbox
+
+	cmp	\$0,%rbx
+	je	.LSLOW_DECRYPT
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+	test	\$-16,%r10		# check upon length
+	mov	0(%rbp),$s0		# load iv
+	mov	4(%rbp),$s1
+	mov	8(%rbp),$s2
+	mov	12(%rbp),$s3
+	jz	.Lcbc_slow_enc_tail	# short input...
+
+.align	4
+.Lcbc_slow_enc_loop:
+		xor	0($inp),$s0
+		xor	4($inp),$s1
+		xor	8($inp),$s2
+		xor	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# save inp
+		mov	$out,$_out	# save out
+		mov	%r10,$_len	# save len
+
+		call	_x86_64_AES_encrypt_compact
+
+		mov	$_inp,$inp	# restore inp
+		mov	$_out,$out	# restore out
+		mov	$_len,%r10	# restore len
+		mov	$s0,0($out)
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		sub	\$16,%r10
+		test	\$-16,%r10
+	jnz	.Lcbc_slow_enc_loop
+	test	\$15,%r10
+	jnz	.Lcbc_slow_enc_tail
+	mov	$_ivp,%rbp	# restore ivp
+	mov	$s0,0(%rbp)	# save ivec
+	mov	$s1,4(%rbp)
+	mov	$s2,8(%rbp)
+	mov	$s3,12(%rbp)
+
+	jmp	.Lcbc_exit
+
+.align	4
+.Lcbc_slow_enc_tail:
+	mov	%rax,%r11
+	mov	%rcx,%r12
+	mov	%r10,%rcx
+	mov	$inp,%rsi
+	mov	$out,%rdi
+	.long	0x9066A4F3		# rep movsb
+	mov	\$16,%rcx		# zero tail
+	sub	%r10,%rcx
+	xor	%rax,%rax
+	.long	0x9066AAF3		# rep stosb
+	mov	$out,$inp		# this is not a mistake!
+	mov	\$16,%r10		# len=16
+	mov	%r11,%rax
+	mov	%r12,%rcx
+	jmp	.Lcbc_slow_enc_loop	# one more spin...
+#--------------------------- SLOW DECRYPT ---------------------------#
+.align	16
+.LSLOW_DECRYPT:
+	shr	\$3,%rax
+	add	%rax,$sbox		# recall "magic" constants!
+
+	mov	0(%rbp),%r11		# copy iv to stack
+	mov	8(%rbp),%r12
+	mov	%r11,0+$ivec
+	mov	%r12,8+$ivec
+
+.align	4
+.Lcbc_slow_dec_loop:
+		mov	0($inp),$s0	# load input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# save inp
+		mov	$out,$_out	# save out
+		mov	%r10,$_len	# save len
+
+		call	_x86_64_AES_decrypt_compact
+
+		mov	$_inp,$inp	# restore inp
+		mov	$_out,$out	# restore out
+		mov	$_len,%r10
+		xor	0+$ivec,$s0
+		xor	4+$ivec,$s1
+		xor	8+$ivec,$s2
+		xor	12+$ivec,$s3
+
+		mov	0($inp),%r11	# load input
+		mov	8($inp),%r12
+		sub	\$16,%r10
+		jc	.Lcbc_slow_dec_partial
+		jz	.Lcbc_slow_dec_done
+
+		mov	%r11,0+$ivec	# copy input to iv
+		mov	%r12,8+$ivec
+
+		mov	$s0,0($out)	# save output [can zap input]
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+	jmp	.Lcbc_slow_dec_loop
+.Lcbc_slow_dec_done:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)		# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0($out)		# save output [can zap input]
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	jmp	.Lcbc_exit
+
+.align	4
+.Lcbc_slow_dec_partial:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)		# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0+$ivec		# save output to stack
+	mov	$s1,4+$ivec
+	mov	$s2,8+$ivec
+	mov	$s3,12+$ivec
+
+	mov	$out,%rdi
+	lea	$ivec,%rsi
+	lea	16(%r10),%rcx
+	.long	0x9066A4F3	# rep movsb
+	jmp	.Lcbc_exit
+
+.align	16
+.Lcbc_exit:
+	mov	$_rsp,%rsi
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lcbc_popfq:
+	popfq
+.Lcbc_epilogue:
+	ret
+.size	AES_cbc_encrypt,.-AES_cbc_encrypt
+___
+}
+
+$code.=<<___;
+.align	64
+.LAES_Te:
+___
+	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
+	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
+	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
+	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
+	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
+	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
+	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
+	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
+	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
+	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
+	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
+	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
+	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
+	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
+	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
+	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
+	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
+	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
+	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
+	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
+	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
+	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
+	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
+	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
+	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
+	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
+	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
+	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
+	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
+	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
+	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
+	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
+	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
+	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
+	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
+	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
+	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
+	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
+	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
+	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
+	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
+	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
+	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
+	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
+	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
+	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
+	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
+	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
+	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
+	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
+	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
+	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
+	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
+	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
+	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
+	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
+	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
+	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
+	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
+	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
+	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
+	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
+	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
+	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+#rcon:
+$code.=<<___;
+	.long	0x00000001, 0x00000002, 0x00000004, 0x00000008
+	.long	0x00000010, 0x00000020, 0x00000040, 0x00000080
+	.long	0x0000001b, 0x00000036, 0x80808080, 0x80808080
+	.long	0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
+___
+$code.=<<___;
+.align	64
+.LAES_Td:
+___
+	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
+	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
+	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
+	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
+	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
+	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
+	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
+	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
+	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
+	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
+	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
+	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
+	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
+	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
+	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
+	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
+	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
+	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
+	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
+	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
+	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
+	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
+	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
+	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
+	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
+	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
+	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
+	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
+	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
+	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
+	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
+	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
+	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
+	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
+	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
+	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
+	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
+	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
+	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
+	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
+	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
+	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
+	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
+	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
+	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
+	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
+	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
+	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
+	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
+	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
+	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
+	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
+	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
+	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
+	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
+	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
+	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
+	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
+	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
+	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
+	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
+	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
+	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
+	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
+
+#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+.asciz  "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	block_se_handler,\@abi-omnipotent
+.align	16
+block_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_block_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_block_prologue
+
+	mov	24(%rax),%rax		# pull saved real stack pointer
+	lea	48(%rax),%rax		# adjust...
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_block_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	jmp	.Lcommon_seh_exit
+.size	block_se_handler,.-block_se_handler
+
+.type	key_se_handler,\@abi-omnipotent
+.align	16
+key_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_key_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_key_prologue
+
+	lea	56(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_key_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	jmp	.Lcommon_seh_exit
+.size	key_se_handler,.-key_se_handler
+
+.type	cbc_se_handler,\@abi-omnipotent
+.align	16
+cbc_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lcbc_prologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_prologue
+	jb	.Lin_cbc_prologue
+
+	lea	.Lcbc_fast_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_fast_body
+	jb	.Lin_cbc_frame_setup
+
+	lea	.Lcbc_slow_prologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_prologue
+	jb	.Lin_cbc_body
+
+	lea	.Lcbc_slow_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_body
+	jb	.Lin_cbc_frame_setup
+
+.Lin_cbc_body:
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lcbc_epilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lcbc_epilogue
+	jae	.Lin_cbc_prologue
+
+	lea	8(%rax),%rax
+
+	lea	.Lcbc_popfq(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lcbc_popfq
+	jae	.Lin_cbc_prologue
+
+	mov	`16-8`(%rax),%rax	# biased $_rsp
+	lea	56(%rax),%rax
+
+.Lin_cbc_frame_setup:
+	mov	-16(%rax),%rbx
+	mov	-24(%rax),%rbp
+	mov	-32(%rax),%r12
+	mov	-40(%rax),%r13
+	mov	-48(%rax),%r14
+	mov	-56(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_cbc_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+.Lcommon_seh_exit:
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	cbc_se_handler,.-cbc_se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_AES_encrypt
+	.rva	.LSEH_end_AES_encrypt
+	.rva	.LSEH_info_AES_encrypt
+
+	.rva	.LSEH_begin_AES_decrypt
+	.rva	.LSEH_end_AES_decrypt
+	.rva	.LSEH_info_AES_decrypt
+
+	.rva	.LSEH_begin_private_AES_set_encrypt_key
+	.rva	.LSEH_end_private_AES_set_encrypt_key
+	.rva	.LSEH_info_private_AES_set_encrypt_key
+
+	.rva	.LSEH_begin_private_AES_set_decrypt_key
+	.rva	.LSEH_end_private_AES_set_decrypt_key
+	.rva	.LSEH_info_private_AES_set_decrypt_key
+
+	.rva	.LSEH_begin_AES_cbc_encrypt
+	.rva	.LSEH_end_AES_cbc_encrypt
+	.rva	.LSEH_info_AES_cbc_encrypt
+
+.section	.xdata
+.align	8
+.LSEH_info_AES_encrypt:
+	.byte	9,0,0,0
+	.rva	block_se_handler
+	.rva	.Lenc_prologue,.Lenc_epilogue	# HandlerData[]
+.LSEH_info_AES_decrypt:
+	.byte	9,0,0,0
+	.rva	block_se_handler
+	.rva	.Ldec_prologue,.Ldec_epilogue	# HandlerData[]
+.LSEH_info_private_AES_set_encrypt_key:
+	.byte	9,0,0,0
+	.rva	key_se_handler
+	.rva	.Lenc_key_prologue,.Lenc_key_epilogue	# HandlerData[]
+.LSEH_info_private_AES_set_decrypt_key:
+	.byte	9,0,0,0
+	.rva	key_se_handler
+	.rva	.Ldec_key_prologue,.Ldec_key_epilogue	# HandlerData[]
+.LSEH_info_AES_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	cbc_se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl b/jni/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
new file mode 100644
index 0000000..c6f6b33
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
@@ -0,0 +1,1249 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# June 2011
+#
+# This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
+# in http://download.intel.com/design/intarch/papers/323686.pdf, is
+# that since AESNI-CBC encrypt exhibit *very* low instruction-level
+# parallelism, interleaving it with another algorithm would allow to
+# utilize processor resources better and achieve better performance.
+# SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
+# AESNI code is weaved into it. Below are performance numbers in
+# cycles per processed byte, less is better, for standalone AESNI-CBC
+# encrypt, sum of the latter and standalone SHA1, and "stitched"
+# subroutine:
+#
+#		AES-128-CBC	+SHA1		stitch      gain
+# Westmere	3.77[+5.6]	9.37		6.65	    +41%
+# Sandy Bridge	5.05[+5.2(6.3)]	10.25(11.35)	6.16(7.08)  +67%(+60%)
+#
+#		AES-192-CBC
+# Westmere	4.51		10.11		6.97	    +45%
+# Sandy Bridge	6.05		11.25(12.35)	6.34(7.27)  +77%(+70%)
+#
+#		AES-256-CBC
+# Westmere	5.25		10.85		7.25	    +50%
+# Sandy Bridge	7.05		12.25(13.35)	7.06(7.70)  +74%(+73%)
+#
+# (*)	There are two code paths: SSSE3 and AVX. See sha1-568.pl for
+#	background information. Above numbers in parentheses are SSSE3
+#	results collected on AVX-capable CPU, i.e. apply on OSes that
+#	don't support AVX.
+#
+# Needless to mention that it makes no sense to implement "stitched"
+# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
+# fully utilize parallelism, so stitching would not give any gain
+# anyway. Well, there might be some, e.g. because of better cache
+# locality... For reference, here are performance results for
+# standalone AESNI-CBC decrypt:
+#
+#		AES-128-CBC	AES-192-CBC	AES-256-CBC
+# Westmere	1.31		1.55		1.80
+# Sandy Bridge	0.93		1.06		1.22
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+	   $1>=2.19);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+	   $1>=2.09);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
+	   $1>=10);
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+# void aesni_cbc_sha1_enc(const void *inp,
+#			void *out,
+#			size_t length,
+#			const AES_KEY *key,
+#			unsigned char *iv,
+#			SHA_CTX *ctx,
+#			const void *in0);
+
+$code.=<<___;
+.text
+.extern	OPENSSL_ia32cap_P
+
+.globl	aesni_cbc_sha1_enc
+.type	aesni_cbc_sha1_enc,\@abi-omnipotent
+.align	16
+aesni_cbc_sha1_enc:
+	# caller should check for SSSE3 and AES-NI bits
+	mov	OPENSSL_ia32cap_P+0(%rip),%r10d
+	mov	OPENSSL_ia32cap_P+4(%rip),%r11d
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<28`,%r11d		# mask AVX bit
+	and	\$`1<<30`,%r10d		# mask "Intel CPU" bit
+	or	%r11d,%r10d
+	cmp	\$`1<<28|1<<30`,%r10d
+	je	aesni_cbc_sha1_enc_avx
+___
+$code.=<<___;
+	jmp	aesni_cbc_sha1_enc_ssse3
+	ret
+.size	aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
+___
+
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+my $j=0; my $jj=0; my $r=0; my $sn=0;
+my $K_XX_XX="%r11";
+my ($iv,$in,$rndkey0)=map("%xmm$_",(11..13));
+my @rndkey=("%xmm14","%xmm15");
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+$code.=<<___;
+.type	aesni_cbc_sha1_enc_ssse3,\@function,6
+.align	16
+aesni_cbc_sha1_enc_ssse3:
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	#shr	\$6,$len			# debugging artefact
+	#jz	.Lepilogue_ssse3		# debugging artefact
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+	#mov	$in0,$inp			# debugging artefact
+	#lea	64(%rsp),$ctx			# debugging artefact
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_ssse3:
+___
+$code.=<<___;
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	mov	$key,%r15
+	movdqu	($ivp),$iv			# load IV
+	mov	$ivp,88(%rsp)			# save $ivp
+___
+my ($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	mov	240($key),$rounds
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+
+	movdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	movdqa	0($K_XX_XX),@Tx[1]	# K_00_19
+	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	movdqu	16($inp),@X[-3&7]
+	movdqu	32($inp),@X[-2&7]
+	movdqu	48($inp),@X[-1&7]
+	pshufb	@X[2],@X[-4&7]		# byte swap
+	add	\$64,$inp
+	pshufb	@X[2],@X[-3&7]
+	pshufb	@X[2],@X[-2&7]
+	pshufb	@X[2],@X[-1&7]
+	paddd	@Tx[1],@X[-4&7]		# add K_00_19
+	paddd	@Tx[1],@X[-3&7]
+	paddd	@Tx[1],@X[-2&7]
+	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
+	psubd	@Tx[1],@X[-4&7]		# restore X[]
+	movdqa	@X[-3&7],16(%rsp)
+	psubd	@Tx[1],@X[-3&7]
+	movdqa	@X[-2&7],32(%rsp)
+	psubd	@Tx[1],@X[-2&7]
+	movups	($key),$rndkey0		# $key[0]
+	movups	16($key),$rndkey[0]	# forward reference
+	jmp	.Loop_ssse3
+___
+
+my $aesenc=sub {
+  use integer;
+  my ($n,$k)=($r/10,$r%10);
+    if ($k==0) {
+      $code.=<<___;
+	movups		`16*$n`($in0),$in		# load input
+	xorps		$rndkey0,$in
+___
+      $code.=<<___ if ($n);
+	movups		$iv,`16*($n-1)`($out,$in0)	# write output
+___
+      $code.=<<___;
+	xorps		$in,$iv
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*$k`($key),$rndkey[1]
+___
+    } elsif ($k==9) {
+      $sn++;
+      $code.=<<___;
+	cmp		\$11,$rounds
+	jb		.Laesenclast$sn
+	movups		`32+16*($k+0)`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+1)`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+	je		.Laesenclast$sn
+	movups		`32+16*($k+2)`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+3)`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+.Laesenclast$sn:
+	aesenclast	$rndkey[0],$iv
+	movups		16($key),$rndkey[1]		# forward reference
+___
+    } else {
+      $code.=<<___;
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*$k`($key),$rndkey[1]
+___
+    }
+    $r++;	unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&movdqa	(@X[0],@X[-3&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(@Tx[0],@X[-1&7]);
+	&palignr(@X[0],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psrldq	(@Tx[0],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@Tx[2],@X[0]);
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslldq	(@Tx[2],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(@Tx[1],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[2],30);
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@Tx[1],2);
+	&pxor	(@X[0],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)");	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 48 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&movdqa	(@Tx[0],@X[-1&7])	if ($Xi==8);
+	 eval(shift(@insns));		# body_20_39
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	&palignr(@Tx[0],@X[-2&7],8);	# compose "X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	if ($Xi%5) {
+	  &movdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+	} else {			# ... or load next one
+	  &movdqa	(@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+	}
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&movdqa	(@Tx[0],@X[0]);
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	&psrld	(@Tx[0],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[1],@X[0])	if ($Xi<19);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$len);
+	&je	(".Ldone_ssse3");
+
+	unshift(@Tx,pop(@Tx));
+
+	&movdqa	(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&movdqa	(@Tx[1],"0($K_XX_XX)");		# K_00_19
+	&movdqu	(@X[-4&7],"0($inp)");		# load input
+	&movdqu	(@X[-3&7],"16($inp)");
+	&movdqu	(@X[-2&7],"32($inp)");
+	&movdqu	(@X[-1&7],"48($inp)");
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@Tx[1]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {
+  use integer;
+  my ($k,$n);
+  my @r=(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)");',	# X[]+K xfer
+	'&xor	($c,$d);',
+	'&mov	(@T[1],$a);',	# $b in next round
+	'&$_rol	($a,5);',
+	'&and	(@T[0],$c);',	# ($b&($c^$d))
+	'&xor	($c,$d);',	# restore $c
+	'&xor	(@T[0],$d);',
+	'&add	($e,$a);',
+	'&$_ror	($b,$j?7:2);',	# $b>>>2
+	'&add	($e,@T[0]);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+	$n = scalar(@r);
+	$k = (($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
+	$jj++;
+    return @r;
+}
+
+sub body_20_39 () {
+  use integer;
+  my ($k,$n);
+  my @r=(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j++&15))."(%rsp)");',	# X[]+K xfer
+	'&xor	(@T[0],$d);',	# ($b^$d)
+	'&mov	(@T[1],$a);',	# $b in next round
+	'&$_rol	($a,5);',
+	'&xor	(@T[0],$c);',	# ($b^$d^$c)
+	'&add	($e,$a);',
+	'&$_ror	($b,7);',	# $b>>>2
+	'&add	($e,@T[0]);'	.'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+	$n = scalar(@r);
+	$k = (($jj+1)*8/20)*20*$n/8;	# 8 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
+	$jj++;
+    return @r;
+}
+
+sub body_40_59 () {
+  use integer;
+  my ($k,$n);
+  my @r=(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&mov	(@T[1],$c);',
+	'&xor	($c,$d);',
+	'&add	($e,eval(4*($j++&15))."(%rsp)");',	# X[]+K xfer
+	'&and	(@T[1],$d);',
+	'&and	(@T[0],$c);',	# ($b&($c^$d))
+	'&$_ror	($b,7);',	# $b>>>2
+	'&add	($e,@T[1]);',
+	'&mov	(@T[1],$a);',	# $b in next round
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	($c,$d);',	# restore $c
+	'&add	($e,$a);'	.'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+	$n = scalar(@r);
+	$k=(($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
+	$jj++;
+    return @r;
+}
+$code.=<<___;
+.align	16
+.Loop_ssse3:
+___
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+				$saved_r=$r; @saved_rndkey=@rndkey;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+$code.=<<___;
+	movups	$iv,48($out,$in0)		# write output
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	jmp	.Loop_ssse3
+
+.align	16
+.Ldone_ssse3:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$r=$saved_r;     @rndkey=@saved_rndkey;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+$code.=<<___;
+	movups	$iv,48($out,$in0)		# write output
+	mov	88(%rsp),$ivp			# restore $ivp
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	movups	$iv,($ivp)			# write IV
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+	mov	0(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_ssse3:
+	ret
+.size	aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
+___
+
+$j=$jj=$r=$sn=0;
+
+if ($avx) {
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	aesni_cbc_sha1_enc_avx,\@function,6
+.align	16
+aesni_cbc_sha1_enc_avx:
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	#shr	\$6,$len			# debugging artefact
+	#jz	.Lepilogue_avx			# debugging artefact
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+	#mov	$in0,$inp			# debugging artefact
+	#lea	64(%rsp),$ctx			# debugging artefact
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_avx:
+___
+$code.=<<___;
+	vzeroall
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	mov	$key,%r15
+	vmovdqu	($ivp),$iv			# load IV
+	mov	$ivp,88(%rsp)			# save $ivp
+___
+my ($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	mov	240($key),$rounds
+	add	\$112,$key		# size optimization
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+
+	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	vmovdqa	0($K_XX_XX),@Tx[1]	# K_00_19
+	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	vmovdqu	16($inp),@X[-3&7]
+	vmovdqu	32($inp),@X[-2&7]
+	vmovdqu	48($inp),@X[-1&7]
+	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
+	add	\$64,$inp
+	vpshufb	@X[2],@X[-3&7],@X[-3&7]
+	vpshufb	@X[2],@X[-2&7],@X[-2&7]
+	vpshufb	@X[2],@X[-1&7],@X[-1&7]
+	vpaddd	@Tx[1],@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	@Tx[1],@X[-3&7],@X[1]
+	vpaddd	@Tx[1],@X[-2&7],@X[2]
+	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vmovdqa	@X[1],16(%rsp)
+	vmovdqa	@X[2],32(%rsp)
+	vmovups	-112($key),$rndkey0	# $key[0]
+	vmovups	16-112($key),$rndkey[0]	# forward reference
+	jmp	.Loop_avx
+___
+
+my $aesenc=sub {
+  use integer;
+  my ($n,$k)=($r/10,$r%10);
+    if ($k==0) {
+      $code.=<<___;
+	vmovups		`16*$n`($in0),$in		# load input
+	vxorps		$rndkey0,$in,$in
+___
+      $code.=<<___ if ($n);
+	vmovups		$iv,`16*($n-1)`($out,$in0)	# write output
+___
+      $code.=<<___;
+	vxorps		$in,$iv,$iv
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*$k-112`($key),$rndkey[1]
+___
+    } elsif ($k==9) {
+      $sn++;
+      $code.=<<___;
+	cmp		\$11,$rounds
+	jb		.Lvaesenclast$sn
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*($k+0)-112`($key),$rndkey[1]
+	vaesenc		$rndkey[1],$iv,$iv
+	vmovups		`32+16*($k+1)-112`($key),$rndkey[0]
+	je		.Lvaesenclast$sn
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*($k+2)-112`($key),$rndkey[1]
+	vaesenc		$rndkey[1],$iv,$iv
+	vmovups		`32+16*($k+3)-112`($key),$rndkey[0]
+.Lvaesenclast$sn:
+	vaesenclast	$rndkey[0],$iv,$iv
+	vmovups		16-112($key),$rndkey[1]		# forward reference
+___
+    } else {
+      $code.=<<___;
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*$k-112`($key),$rndkey[1]
+___
+    }
+    $r++;	unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@Tx[0],@X[-1&7],4);	# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)");	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 48 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	if ($Xi%5) {
+	  &vmovdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+	} else {			# ... or load next one
+	  &vmovdqa	(@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+	}
+	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &vmovdqa	(@Tx[1],@X[0])	if ($Xi<19);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@Tx[1],@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$len);
+	&je	(".Ldone_avx");
+
+	unshift(@Tx,pop(@Tx));
+
+	&vmovdqa(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&vmovdqa(@Tx[1],"0($K_XX_XX)");		# K_00_19
+	&vmovdqu(@X[-4&7],"0($inp)");		# load input
+	&vmovdqu(@X[-3&7],"16($inp)");
+	&vmovdqu(@X[-2&7],"32($inp)");
+	&vmovdqu(@X[-1&7],"48($inp)");
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+$code.=<<___;
+.align	16
+.Loop_avx:
+___
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+				$saved_r=$r; @saved_rndkey=@rndkey;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+$code.=<<___;
+	vmovups	$iv,48($out,$in0)		# write output
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	jmp	.Loop_avx
+
+.align	16
+.Ldone_avx:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$r=$saved_r;     @rndkey=@saved_rndkey;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+$code.=<<___;
+	vmovups	$iv,48($out,$in0)		# write output
+	mov	88(%rsp),$ivp			# restore $ivp
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	vmovups	$iv,($ivp)			# write IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+	mov	0(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_avx:
+	ret
+.size	aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
+___
+}
+$code.=<<___;
+.align	64
+K_XX_XX:
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+
+.asciz	"AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	ssse3_handler,\@abi-omnipotent
+.align	16
+ssse3_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	96(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	`104+10*16`(%rax),%rax	# adjust stack pointer
+
+	mov	0(%rax),%r15
+	mov	8(%rax),%r14
+	mov	16(%rax),%r13
+	mov	24(%rax),%r12
+	mov	32(%rax),%rbp
+	mov	40(%rax),%rbx
+	lea	48(%rax),%rax
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	ssse3_handler,.-ssse3_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_aesni_cbc_sha1_enc_ssse3
+	.rva	.LSEH_end_aesni_cbc_sha1_enc_ssse3
+	.rva	.LSEH_info_aesni_cbc_sha1_enc_ssse3
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_aesni_cbc_sha1_enc_avx
+	.rva	.LSEH_end_aesni_cbc_sha1_enc_avx
+	.rva	.LSEH_info_aesni_cbc_sha1_enc_avx
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_aesni_cbc_sha1_enc_ssse3:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_aesni_cbc_sha1_enc_avx:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+}
+
+####################################################################
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    push @opcode,$rex|0x40	if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/aesni-x86.pl b/jni/openssl/crypto/aes/asm/aesni-x86.pl
new file mode 100644
index 0000000..3dc345b
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aesni-x86.pl
@@ -0,0 +1,2189 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
+# details].
+#
+# Performance.
+#
+# To start with see corresponding paragraph in aesni-x86_64.pl...
+# Instead of filling table similar to one found there I've chosen to
+# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
+# The simplified table below represents 32-bit performance relative
+# to 64-bit one in every given point. Ratios vary for different
+# encryption modes, therefore interval values.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+#	53-67%      67-84%      91-94%      95-98%      97-99.5%
+#
+# Lower ratios for smaller block sizes are perfectly understandable,
+# because function call overhead is higher in 32-bit mode. Largest
+# 8-KB block performance is virtually same: 32-bit code is less than
+# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
+
+# January 2011
+#
+# See aesni-x86_64.pl for details. Unlike x86_64 version this module
+# interleaves at most 6 aes[enc|dec] instructions, because there are
+# not enough registers for 8x interleave [which should be optimal for
+# Sandy Bridge]. Actually, performance results for 6x interleave
+# factor presented in aesni-x86_64.pl (except for CTR) are for this
+# module.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-586.pl:-)
+$inline=1;		# inline _aesni_[en|de]crypt
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+if ($PREFIX eq "aesni")	{ $movekey=*movups; }
+else			{ $movekey=*movups; }
+
+$len="eax";
+$rounds="ecx";
+$key="edx";
+$inp="esi";
+$out="edi";
+$rounds_="ebx";	# backup copy for $rounds
+$key_="ebp";	# backup copy for $key
+
+$rndkey0="xmm0";
+$rndkey1="xmm1";
+$inout0="xmm2";
+$inout1="xmm3";
+$inout2="xmm4";
+$inout3="xmm5";	$in1="xmm5";
+$inout4="xmm6";	$in0="xmm6";
+$inout5="xmm7";	$ivec="xmm7";
+
+# AESNI extenstion
+sub aeskeygenassist
+{ my($dst,$src,$imm)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm);	}
+}
+sub aescommon
+{ my($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
+}
+sub aesimc	{ aescommon(0xdb,@_); }
+sub aesenc	{ aescommon(0xdc,@_); }
+sub aesenclast	{ aescommon(0xdd,@_); }
+sub aesdec	{ aescommon(0xde,@_); }
+sub aesdeclast	{ aescommon(0xdf,@_); }
+
+# Inline version of internal aesni_[en|de]crypt1
+{ my $sn;
+sub aesni_inline_generate1
+{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+  $sn++;
+
+    &$movekey		($rndkey0,&QWP(0,$key));
+    &$movekey		($rndkey1,&QWP(16,$key));
+    &xorps		($ivec,$rndkey0)	if (defined($ivec));
+    &lea		($key,&DWP(32,$key));
+    &xorps		($inout,$ivec)		if (defined($ivec));
+    &xorps		($inout,$rndkey0)	if (!defined($ivec));
+    &set_label("${p}1_loop_$sn");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&dec		($rounds);
+	&$movekey	($rndkey1,&QWP(0,$key));
+	&lea		($key,&DWP(16,$key));
+    &jnz		(&label("${p}1_loop_$sn"));
+    eval"&aes${p}last	($inout,$rndkey1)";
+}}
+
+sub aesni_generate1	# fully unrolled loop
+{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
+
+    &function_begin_B("_aesni_${p}rypt1");
+	&movups		($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(0x10,$key));
+	&xorps		($inout,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0x20,$key));
+	&lea		($key,&DWP(0x30,$key));
+	&cmp		($rounds,11);
+	&jb		(&label("${p}128"));
+	&lea		($key,&DWP(0x20,$key));
+	&je		(&label("${p}192"));
+	&lea		($key,&DWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x30,$key));
+    &set_label("${p}192");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x10,$key));
+    &set_label("${p}128");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x10,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x30,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x50,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x60,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x70,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+    eval"&aes${p}last	($inout,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt1");
+}
+
+# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("enc") if (!$inline);
+&function_begin_B("${PREFIX}_encrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	(&QWP(0,"eax"),$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_encrypt");
+
+# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("dec") if(!$inline);
+&function_begin_B("${PREFIX}_decrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&movups	(&QWP(0,"eax"),$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_decrypt");
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x, but it's unfeasible to accommodate it
+# in XMM registers addreassable in 32-bit mode and therefore 6x is
+# used instead...
+
+sub aesni_generate3
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt3");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shr		($rounds,1);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&lea		($key,&DWP(32,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key));
+
+    &set_label("${p}3_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&dec		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(16,$key));
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	&lea		($key,&DWP(32,$key));
+	eval"&aes${p}	($inout2,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&jnz		(&label("${p}3_loop"));
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt3");
+}
+
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement  would be <20%,
+# so it's not worth it...
+sub aesni_generate4
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt4");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&shr		($rounds,1);
+	&lea		($key,&DWP(32,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key));
+
+    &set_label("${p}4_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&dec		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	eval"&aes${p}	($inout3,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(16,$key));
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	&lea		($key,&DWP(32,$key));
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0,$key));
+    &jnz		(&label("${p}4_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt4");
+}
+
+sub aesni_generate6
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt6");
+    &static_label("_aesni_${p}rypt6_enter");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shr		($rounds,1);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&lea		($key,&DWP(32,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);	# pxor does better here
+	eval"&aes${p}	($inout0,$rndkey1)";
+	&pxor		($inout2,$rndkey0);
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&pxor		($inout3,$rndkey0);
+	&dec		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&pxor		($inout4,$rndkey0);
+	eval"&aes${p}	($inout3,$rndkey1)";
+	&pxor		($inout5,$rndkey0);
+	eval"&aes${p}	($inout4,$rndkey1)";
+	&$movekey	($rndkey0,&QWP(0,$key));
+	eval"&aes${p}	($inout5,$rndkey1)";
+	&jmp		(&label("_aesni_${p}rypt6_enter"));
+
+    &set_label("${p}6_loop",16);
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&dec		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	eval"&aes${p}	($inout3,$rndkey1)";
+	eval"&aes${p}	($inout4,$rndkey1)";
+	eval"&aes${p}	($inout5,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_enter",16);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	&lea		($key,&DWP(32,$key));
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	eval"&aes${p}	($inout4,$rndkey0)";
+	eval"&aes${p}	($inout5,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0,$key));
+    &jnz		(&label("${p}6_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}	($inout4,$rndkey1)";
+    eval"&aes${p}	($inout5,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    eval"&aes${p}last	($inout4,$rndkey0)";
+    eval"&aes${p}last	($inout5,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt6");
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+
+if ($PREFIX eq "aesni") {
+######################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#                         size_t length, const AES_KEY *key,
+#                         int enc);
+&function_begin("aesni_ecb_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&and	($len,-16);
+	&jz	(&label("ecb_ret"));
+	&mov	($rounds,&DWP(240,$key));
+	&test	($rounds_,$rounds_);
+	&jz	(&label("ecb_decrypt"));
+
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_enc_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_enc_loop6_enter"));
+
+&set_label("ecb_enc_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_enc_loop6_enter");
+
+	&call	("_aesni_encrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_enc_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_enc_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_enc_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_enc_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_enc_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_enc_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_two",16);
+	&xorps	($inout2,$inout2);
+	&call	("_aesni_encrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&jmp	(&label("ecb_ret"));
+######################################################################
+&set_label("ecb_decrypt",16);
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_dec_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_dec_loop6_enter"));
+
+&set_label("ecb_dec_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_dec_loop6_enter");
+
+	&call	("_aesni_decrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_dec_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_dec_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_dec_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_dec_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_dec_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_two",16);
+	&xorps	($inout2,$inout2);
+	&call	("_aesni_decrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ecb_ret");
+&function_end("aesni_ecb_encrypt");
+
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{ my $cmac=$inout1;
+&function_begin("aesni_ccm64_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&shr	($rounds,1);
+	&lea	($key_,&DWP(0,$key));
+	&movdqa	($inout3,&QWP(0,"esp"));
+	&movdqa	($inout0,$ivec);
+	&mov	($rounds_,$rounds);
+	&pshufb	($ivec,$inout3);
+
+&set_label("ccm64_enc_outer");
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&mov		($rounds,$rounds_);
+	&movups		($in0,&QWP(0,$inp));
+
+	&xorps		($inout0,$rndkey0);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($rndkey0,$in0);
+	&lea		($key,&DWP(32,$key_));
+	&xorps		($cmac,$rndkey0);		# cmac^=inp
+	&$movekey	($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_enc2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&dec		($rounds);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&aesenc		($inout0,$rndkey0);
+	&lea		($key,&DWP(32,$key));
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&jnz		(&label("ccm64_enc2_loop"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&paddq		($ivec,&QWP(16,"esp"));
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+
+	&dec	($len);
+	&lea	($inp,&DWP(16,$inp));
+	&xorps	($in0,$inout0);			# inp^=E(ivec)
+	&movdqa	($inout0,$ivec);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&lea	($out,&DWP(16,$out));
+	&pshufb	($inout0,$inout3);
+	&jnz	(&label("ccm64_enc_outer"));
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_encrypt_blocks");
+
+&function_begin("aesni_ccm64_decrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&movdqa	($inout3,&QWP(0,"esp"));	# bswap mask
+	&movdqa	($inout0,$ivec);
+
+	&mov	($key_,$key);
+	&mov	($rounds_,$rounds);
+
+	&pshufb	($ivec,$inout3);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	($in0,&QWP(0,$inp));		# load inp
+	&paddq	($ivec,&QWP(16,"esp"));
+	&lea	($inp,&QWP(16,$inp));
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_outer",16);
+	&xorps	($in0,$inout0);			# inp ^= E(ivec)
+	&movdqa	($inout0,$ivec);
+	&mov	($rounds,$rounds_);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&lea	($out,&DWP(16,$out));
+	&pshufb	($inout0,$inout3);
+
+	&sub	($len,1);
+	&jz	(&label("ccm64_dec_break"));
+
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&shr		($rounds,1);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($in0,$rndkey0);
+	&lea		($key,&DWP(32,$key_));
+	&xorps		($inout0,$rndkey0);
+	&xorps		($cmac,$in0);		# cmac^=out
+	&$movekey	($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_dec2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&dec		($rounds);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&aesenc		($inout0,$rndkey0);
+	&lea		($key,&DWP(32,$key));
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&jnz		(&label("ccm64_dec2_loop"));
+	&movups		($in0,&QWP(0,$inp));	# load inp
+	&paddq		($ivec,&QWP(16,"esp"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&lea		($inp,&QWP(16,$inp));
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_break",16);
+	&mov	($key,$key_);
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$cmac,$in0);	}
+	else
+	{   &call	("_aesni_encrypt1",$cmac);	}
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_decrypt_blocks");
+}
+
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+# stack layout:
+#	0	pshufb mask
+#	16	vector addend: 0,6,6,6
+# 	32	counter-less ivec
+#	48	1st triplet of counter vector
+#	64	2nd triplet of counter vector
+#	80	saved %esp
+
+&function_begin("aesni_ctr32_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($key_,"esp");
+	&sub	("esp",88);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(80,"esp"),$key_);
+
+	&cmp	($len,1);
+	&je	(&label("ctr32_one_shortcut"));
+
+	&movdqu	($inout5,&QWP(0,$rounds_));	# load ivec
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds,6);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds);
+	&mov	(&DWP(20,"esp"),$rounds);
+	&mov	(&DWP(24,"esp"),$rounds);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&pextrd	($rounds_,$inout5,3);		# pull 32-bit counter
+	&pinsrd	($inout5,$key_,3);		# wipe 32-bit counter
+
+	&mov	($rounds,&DWP(240,$key));	# key->rounds
+
+	# compose 2 vectors of 3x32-bit counters
+	&bswap	($rounds_);
+	&pxor	($rndkey1,$rndkey1);
+	&pxor	($rndkey0,$rndkey0);
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte-swap mask
+	&pinsrd	($rndkey1,$rounds_,0);
+	&lea	($key_,&DWP(3,$rounds_));
+	&pinsrd	($rndkey0,$key_,0);
+	&inc	($rounds_);
+	&pinsrd	($rndkey1,$rounds_,1);
+	&inc	($key_);
+	&pinsrd	($rndkey0,$key_,1);
+	&inc	($rounds_);
+	&pinsrd	($rndkey1,$rounds_,2);
+	&inc	($key_);
+	&pinsrd	($rndkey0,$key_,2);
+	&movdqa	(&QWP(48,"esp"),$rndkey1);	# save 1st triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+	&movdqa	(&QWP(64,"esp"),$rndkey0);	# save 2nd triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+
+	&pshufd	($inout0,$rndkey1,3<<6);	# place counter to upper dword
+	&pshufd	($inout1,$rndkey1,2<<6);
+	&cmp	($len,6);
+	&jb	(&label("ctr32_tail"));
+	&movdqa	(&QWP(32,"esp"),$inout5);	# save counter-less ivec
+	&shr	($rounds,1);
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&sub	($len,6);
+	&jmp	(&label("ctr32_loop6"));
+
+&set_label("ctr32_loop6",16);
+	&pshufd	($inout2,$rndkey1,1<<6);
+	&movdqa	($rndkey1,&QWP(32,"esp"));	# pull counter-less ivec
+	&pshufd	($inout3,$rndkey0,3<<6);
+	&por	($inout0,$rndkey1);		# merge counter-less ivec
+	&pshufd	($inout4,$rndkey0,2<<6);
+	&por	($inout1,$rndkey1);
+	&pshufd	($inout5,$rndkey0,1<<6);
+	&por	($inout2,$rndkey1);
+	&por	($inout3,$rndkey1);
+	&por	($inout4,$rndkey1);
+	&por	($inout5,$rndkey1);
+
+	# inlining _aesni_encrypt6's prologue gives ~4% improvement...
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&lea		($key,&DWP(32,$key_));
+	&dec		($rounds);
+	&pxor		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&aesenc		($inout0,$rndkey1);
+	&pxor		($inout2,$rndkey0);
+	&aesenc		($inout1,$rndkey1);
+	&pxor		($inout3,$rndkey0);
+	&aesenc		($inout2,$rndkey1);
+	&pxor		($inout4,$rndkey0);
+	&aesenc		($inout3,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	&aesenc		($inout4,$rndkey1);
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&aesenc		($inout5,$rndkey1);
+
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqa	($rndkey0,&QWP(16,"esp"));	# load increment
+	&xorps	($inout2,$rndkey1);
+	&movdqa	($rndkey1,&QWP(48,"esp"));	# load 1st triplet
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+
+	&paddd	($rndkey1,$rndkey0);		# 1st triplet increment
+	&paddd	($rndkey0,&QWP(64,"esp"));	# 2nd triplet increment
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte swap mask
+
+	&movups	($inout1,&QWP(0x30,$inp));
+	&movups	($inout2,&QWP(0x40,$inp));
+	&xorps	($inout3,$inout1);
+	&movups	($inout1,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&movdqa	(&QWP(48,"esp"),$rndkey1);	# save 1st triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+	&xorps	($inout4,$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&xorps	($inout5,$inout1);
+	&movdqa	(&QWP(64,"esp"),$rndkey0);	# save 2nd triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+	&movups	(&QWP(0x40,$out),$inout4);
+	&pshufd	($inout0,$rndkey1,3<<6);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+
+	&mov	($rounds,$rounds_);
+	&pshufd	($inout1,$rndkey1,2<<6);
+	&sub	($len,6);
+	&jnc	(&label("ctr32_loop6"));
+
+	&add	($len,6);
+	&jz	(&label("ctr32_ret"));
+	&mov	($key,$key_);
+	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
+	&movdqa	($inout5,&QWP(32,"esp"));	# pull count-less ivec
+
+&set_label("ctr32_tail");
+	&por	($inout0,$inout5);
+	&cmp	($len,2);
+	&jb	(&label("ctr32_one"));
+
+	&pshufd	($inout2,$rndkey1,1<<6);
+	&por	($inout1,$inout5);
+	&je	(&label("ctr32_two"));
+
+	&pshufd	($inout3,$rndkey0,3<<6);
+	&por	($inout2,$inout5);
+	&cmp	($len,4);
+	&jb	(&label("ctr32_three"));
+
+	&pshufd	($inout4,$rndkey0,2<<6);
+	&por	($inout3,$inout5);
+	&je	(&label("ctr32_four"));
+
+	&por	($inout4,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout2,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout4,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_one_shortcut",16);
+	&movups	($inout0,&QWP(0,$rounds_));	# load ivec
+	&mov	($rounds,&DWP(240,$key));
+	
+&set_label("ctr32_one");
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	($in0,&QWP(0,$inp));
+	&xorps	($in0,$inout0);
+	&movups	(&QWP(0,$out),$in0);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_two",16);
+	&call	("_aesni_encrypt3");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&movups	($inout5,&QWP(0x20,$inp));
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	($inout4,&QWP(0,$inp));
+	&movups	($inout5,&QWP(0x10,$inp));
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout0,$inout4);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout1,$inout5);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ctr32_ret");
+	&mov	("esp",&DWP(80,"esp"));
+&function_end("aesni_ctr32_encrypt_blocks");
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
+
+&function_begin("aesni_xts_encrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&and	("esp",-16);			# align stack
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&sub	($len,16*6);
+	&jc	(&label("xts_enc_short"));
+
+	&shr	($rounds,1);
+	&mov	($rounds_,$rounds);
+	&jmp	(&label("xts_enc_loop6"));
+
+&set_label("xts_enc_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	 &lea		($key,&DWP(32,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	 &aesenc	($inout0,$rndkey1);
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesenc	($inout1,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	 &dec		($rounds);
+	 &aesenc	($inout2,$rndkey1);
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesenc	($inout3,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &aesenc	($inout4,$rndkey1);
+	 &$movekey	($rndkey0,&QWP(0,$key));
+	 &aesenc	($inout5,$rndkey1);
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_enc_loop6"));
+
+	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_enc_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_enc_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_enc_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_enc_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_enc_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_enc_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_encrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout2);
+
+	&call	("_aesni_encrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_encrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_encrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+	&movdqa	($inout3,$tweak);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_enc_steal"));
+
+&set_label("xts_enc_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($inout3,$twtmp,0x13);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,&QWP(16*6,"esp"));	# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+&set_label("xts_enc_steal");
+	&movz	($rounds,&BP(0,$inp));
+	&movz	($key,&BP(-16,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(-16,$out),&LB($rounds));
+	&mov	(&BP(0,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_enc_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(-16,$out));	# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(-16,$out),$inout0);	# write output
+
+&set_label("xts_enc_ret");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_encrypt");
+
+&function_begin("aesni_xts_decrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&and	("esp",-16);			# align stack
+
+	&xor	($rounds_,$rounds_);		# if(len%16) len-=16;
+	&test	($len,15);
+	&setnz	(&LB($rounds_));
+	&shl	($rounds_,4);
+	&sub	($len,$rounds_);
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&sub	($len,16*6);
+	&jc	(&label("xts_dec_short"));
+
+	&shr	($rounds,1);
+	&mov	($rounds_,$rounds);
+	&jmp	(&label("xts_dec_loop6"));
+
+&set_label("xts_dec_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	 &lea		($key,&DWP(32,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	 &aesdec	($inout0,$rndkey1);
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesdec	($inout1,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	 &dec		($rounds);
+	 &aesdec	($inout2,$rndkey1);
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesdec	($inout3,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &aesdec	($inout4,$rndkey1);
+	 &$movekey	($rndkey0,&QWP(0,$key));
+	 &aesdec	($inout5,$rndkey1);
+	&call		(&label("_aesni_decrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_dec_loop6"));
+
+	&lea	($rounds,&DWP(1,"",$rounds,2));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_dec_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_dec_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_dec_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_dec_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_dec_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_dec_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_decrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+
+	&call	("_aesni_decrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_decrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_decrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_dec_only_one_more"));
+
+&set_label("xts_dec_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(16*6,"esp"));
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+&set_label("xts_dec_only_one_more");
+	&pshufd	($inout3,$twtmp,0x13);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,$twmask);		# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$inp));		# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_steal");
+	&movz	($rounds,&BP(16,$inp));
+	&movz	($key,&BP(0,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(0,$out),&LB($rounds));
+	&mov	(&BP(16,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_dec_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$out));		# load input
+	&xorps	($inout0,$inout4);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout4);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_ret");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_decrypt");
+}
+}
+
+######################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#                           size_t length, const AES_KEY *key,
+#                           unsigned char *ivp,const int enc);
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($rounds_,"esp");
+	&mov	($out,&wparam(1));
+	&sub	($rounds_,24);
+	&mov	($len,&wparam(2));
+	&and	($rounds_,-16);
+	&mov	($key,&wparam(3));
+	&mov	($key_,&wparam(4));
+	&test	($len,$len);
+	&jz	(&label("cbc_abort"));
+
+	&cmp	(&wparam(5),0);
+	&xchg	($rounds_,"esp");		# alloca
+	&movups	($ivec,&QWP(0,$key_));		# load IV
+	&mov	($rounds,&DWP(240,$key));
+	&mov	($key_,$key);			# backup $key
+	&mov	(&DWP(16,"esp"),$rounds_);	# save original %esp
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&je	(&label("cbc_decrypt"));
+
+	&movaps	($inout0,$ivec);
+	&cmp	($len,16);
+	&jb	(&label("cbc_enc_tail"));
+	&sub	($len,16);
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movups	($ivec,&QWP(0,$inp));		# input actually
+	&lea	($inp,&DWP(16,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$inout0,$ivec);	}
+	else
+	{   &xorps($inout0,$ivec); &call("_aesni_encrypt1");	}
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($key,$key_);		# restore $key
+	&movups	(&QWP(0,$out),$inout0);	# store output
+	&lea	($out,&DWP(16,$out));
+	&sub	($len,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&add	($len,16);
+	&jnz	(&label("cbc_enc_tail"));
+	&movaps	($ivec,$inout0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_enc_tail");
+	&mov	("ecx",$len);		# zaps $rounds
+	&data_word(0xA4F3F689);		# rep movsb
+	&mov	("ecx",16);		# zero tail
+	&sub	("ecx",$len);
+	&xor	("eax","eax");		# zaps $len
+	&data_word(0xAAF3F689);		# rep stosb
+	&lea	($out,&DWP(-16,$out));	# rewind $out by 1 block
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($inp,$out);		# $inp and $out are the same
+	&mov	($key,$key_);		# restore $key
+	&jmp	(&label("cbc_enc_loop"));
+######################################################################
+&set_label("cbc_decrypt",16);
+	&cmp	($len,0x50);
+	&jbe	(&label("cbc_dec_tail"));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_loop6_enter"));
+
+&set_label("cbc_dec_loop6",16);
+	&movaps	(&QWP(0,"esp"),$rndkey0);	# save IV
+	&movups	(&QWP(0,$out),$inout5);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_loop6_enter");
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+
+	&call	("_aesni_decrypt6");
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^=IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout4,$rndkey0);
+	&movups	($rndkey0,&QWP(0x50,$inp));	# IV
+	&xorps	($inout5,$rndkey1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&lea	($inp,&DWP(0x60,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&mov	($rounds,$rounds_)		# restore $rounds
+	&movups	(&QWP(0x30,$out),$inout3);
+	&mov	($key,$key_);			# restore $key
+	&movups	(&QWP(0x40,$out),$inout4);
+	&lea	($out,&DWP(0x50,$out));
+	&sub	($len,0x60);
+	&ja	(&label("cbc_dec_loop6"));
+
+	&movaps	($inout0,$inout5);
+	&movaps	($ivec,$rndkey0);
+	&add	($len,0x50);
+	&jle	(&label("cbc_dec_tail_collected"));
+	&movups	(&QWP(0,$out),$inout0);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&movaps	($in0,$inout0);
+	&cmp	($len,0x10);
+	&jbe	(&label("cbc_dec_one"));
+
+	&movups	($inout1,&QWP(0x10,$inp));
+	&movaps	($in1,$inout1);
+	&cmp	($len,0x20);
+	&jbe	(&label("cbc_dec_two"));
+
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x30);
+	&jbe	(&label("cbc_dec_three"));
+
+	&movups	($inout3,&QWP(0x30,$inp));
+	&cmp	($len,0x40);
+	&jbe	(&label("cbc_dec_four"));
+
+	&movups	($inout4,&QWP(0x40,$inp));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&movups	($inout0,&QWP(0,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^= IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($ivec,&QWP(0x40,$inp));	# IV
+	&xorps	($inout4,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&lea	($out,&DWP(0x40,$out));
+	&movaps	($inout0,$inout4);
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$ivec);
+	&movaps	($ivec,$in0);
+	&sub	($len,0x10);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_two",16);
+	&xorps	($inout2,$inout2);
+	&call	("_aesni_decrypt3");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout1);
+	&lea	($out,&DWP(0x10,$out));
+	&movaps	($ivec,$in1);
+	&sub	($len,0x20);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&xorps	($inout2,$in1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout2);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&lea	($out,&DWP(0x20,$out));
+	&movups	($ivec,&QWP(0x20,$inp));
+	&sub	($len,0x30);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	($rndkey1,&QWP(0x10,$inp));
+	&movups	($rndkey0,&QWP(0x20,$inp));
+	&xorps	($inout0,$ivec);
+	&movups	($ivec,&QWP(0x30,$inp));
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&lea	($out,&DWP(0x30,$out));
+	&movaps	($inout0,$inout3);
+	&sub	($len,0x40);
+
+&set_label("cbc_dec_tail_collected");
+	&and	($len,15);
+	&jnz	(&label("cbc_dec_tail_partial"));
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_dec_tail_partial",16);
+	&movaps	(&QWP(0,"esp"),$inout0);
+	&mov	("ecx",16);
+	&mov	($inp,"esp");
+	&sub	("ecx",$len);
+	&data_word(0xA4F3F689);		# rep movsb
+
+&set_label("cbc_ret");
+	&mov	("esp",&DWP(16,"esp"));	# pull original %esp
+	&mov	($key_,&wparam(4));
+	&movups	(&QWP(0,$key_),$ivec);	# output IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+######################################################################
+# Mechanical port from aesni-x86_64.pl.
+#
+# _aesni_set_encrypt_key is private interface,
+# input:
+#	"eax"	const unsigned char *userKey
+#	$rounds	int bits
+#	$key	AES_KEY *key
+# output:
+#	"eax"	return code
+#	$round	rounds
+
+&function_begin_B("_aesni_set_encrypt_key");
+	&test	("eax","eax");
+	&jz	(&label("bad_pointer"));
+	&test	($key,$key);
+	&jz	(&label("bad_pointer"));
+
+	&movups	("xmm0",&QWP(0,"eax"));	# pull first 128 bits of *userKey
+	&xorps	("xmm4","xmm4");	# low dword of xmm4 is assumed 0
+	&lea	($key,&DWP(16,$key));
+	&cmp	($rounds,256);
+	&je	(&label("14rounds"));
+	&cmp	($rounds,192);
+	&je	(&label("12rounds"));
+	&cmp	($rounds,128);
+	&jne	(&label("bad_keybits"));
+
+&set_label("10rounds",16);
+	&mov		($rounds,9);
+	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 1
+	&call		(&label("key_128_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x2);		# round 2
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 3
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 4
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 5
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 6
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x40);		# round 7
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x80);		# round 8
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x1b);		# round 9
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x36);		# round 10
+	&call		(&label("key_128"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(80,$key),$rounds);
+	&xor		("eax","eax");
+	&ret();
+
+&set_label("key_128",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_128_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("12rounds",16);
+	&movq		("xmm2",&QWP(16,"eax"));	# remaining 1/3 of *userKey
+	&mov		($rounds,11);
+	&$movekey	(&QWP(-16,$key),"xmm0")		# round 0
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 1,2
+	&call		(&label("key_192a_cold"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 2,3
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 4,5
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 5,6
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 7,8
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 8,9
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 10,11
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x80);		# round 11,12
+	&call		(&label("key_192b"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(48,$key),$rounds);
+	&xor		("eax","eax");
+	&ret();
+
+&set_label("key_192a",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_192a_cold",16);
+	&movaps		("xmm5","xmm2");
+&set_label("key_192b_warm");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&movdqa		("xmm3","xmm2");
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&pslldq		("xmm3",4);
+	&xorps		("xmm0","xmm4");
+	&pshufd		("xmm1","xmm1",0b01010101);	# critical path
+	&pxor		("xmm2","xmm3");
+	&pxor		("xmm0","xmm1");
+	&pshufd		("xmm3","xmm0",0b11111111);
+	&pxor		("xmm2","xmm3");
+	&ret();
+
+&set_label("key_192b",16);
+	&movaps		("xmm3","xmm0");
+	&shufps		("xmm5","xmm0",0b01000100);
+	&$movekey	(&QWP(0,$key),"xmm5");
+	&shufps		("xmm3","xmm2",0b01001110);
+	&$movekey	(&QWP(16,$key),"xmm3");
+	&lea		($key,&DWP(32,$key));
+	&jmp		(&label("key_192b_warm"));
+
+&set_label("14rounds",16);
+	&movups		("xmm2",&QWP(16,"eax"));	# remaining half of *userKey
+	&mov		($rounds,13);
+	&lea		($key,&DWP(16,$key));
+	&$movekey	(&QWP(-32,$key),"xmm0");	# round 0
+	&$movekey	(&QWP(-16,$key),"xmm2");	# round 1
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 2
+	&call		(&label("key_256a_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 3
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 4
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x02);		# round 5
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 6
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 7
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 8
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 9
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 10
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 11
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 12
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 13
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 14
+	&call		(&label("key_256a"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(16,$key),$rounds);
+	&xor		("eax","eax");
+	&ret();
+
+&set_label("key_256a",16);
+	&$movekey	(&QWP(0,$key),"xmm2");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_256a_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("key_256b",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+
+	&shufps		("xmm4","xmm2",0b00010000);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm4","xmm2",0b10001100);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm1","xmm1",0b10101010);	# critical path
+	&xorps		("xmm2","xmm1");
+	&ret();
+
+&set_label("bad_pointer",4);
+	&mov	("eax",-1);
+	&ret	();
+&set_label("bad_keybits",4);
+	&mov	("eax",-2);
+	&ret	();
+&function_end_B("_aesni_set_encrypt_key");
+
+# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_encrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&ret	();
+&function_end_B("${PREFIX}_set_encrypt_key");
+
+# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_decrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&mov	($key,&wparam(2));
+	&shl	($rounds,4)	# rounds-1 after _aesni_set_encrypt_key
+	&test	("eax","eax");
+	&jnz	(&label("dec_key_ret"));
+	&lea	("eax",&DWP(16,$key,$rounds));	# end of key schedule
+
+	&$movekey	("xmm0",&QWP(0,$key));	# just swap
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&$movekey	(&QWP(0,"eax"),"xmm0");
+	&$movekey	(&QWP(0,$key),"xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+
+&set_label("dec_key_inverse");
+	&$movekey	("xmm0",&QWP(0,$key));	# swap and inverse
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&aesimc		("xmm0","xmm0");
+	&aesimc		("xmm1","xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+	&$movekey	(&QWP(16,"eax"),"xmm0");
+	&$movekey	(&QWP(-16,$key),"xmm1");
+	&cmp		("eax",$key);
+	&ja		(&label("dec_key_inverse"));
+
+	&$movekey	("xmm0",&QWP(0,$key));	# inverse middle
+	&aesimc		("xmm0","xmm0");
+	&$movekey	(&QWP(0,$key),"xmm0");
+
+	&xor		("eax","eax");		# return success
+&set_label("dec_key_ret");
+	&ret	();
+&function_end_B("${PREFIX}_set_decrypt_key");
+&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/jni/openssl/crypto/aes/asm/aesni-x86_64.pl b/jni/openssl/crypto/aes/asm/aesni-x86_64.pl
new file mode 100644
index 0000000..499f3b3
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/aesni-x86_64.pl
@@ -0,0 +1,3068 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
+# details].
+#
+# Performance.
+#
+# Given aes(enc|dec) instructions' latency asymptotic performance for
+# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
+# processed with 128-bit key. And given their throughput asymptotic
+# performance for parallelizable modes is 1.25 cycles per byte. Being
+# asymptotic limit it's not something you commonly achieve in reality,
+# but how close does one get? Below are results collected for
+# different modes and block sized. Pairs of numbers are for en-/
+# decryption.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+# ECB	4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26	1.26/1.26
+# CTR	5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
+# CBC	4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
+# CCM	5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07   
+# OFB	5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
+# CFB	5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
+#
+# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
+# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
+# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
+# The results were collected with specially crafted speed.c benchmark
+# in order to compare them with results reported in "Intel Advanced
+# Encryption Standard (AES) New Instruction Set" White Paper Revision
+# 3.0 dated May 2010. All above results are consistently better. This
+# module also provides better performance for block sizes smaller than
+# 128 bytes in points *not* represented in the above table.
+#
+# Looking at the results for 8-KB buffer.
+#
+# CFB and OFB results are far from the limit, because implementation
+# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
+# single-block aesni_encrypt, which is not the most optimal way to go.
+# CBC encrypt result is unexpectedly high and there is no documented
+# explanation for it. Seemingly there is a small penalty for feeding
+# the result back to AES unit the way it's done in CBC mode. There is
+# nothing one can do and the result appears optimal. CCM result is
+# identical to CBC, because CBC-MAC is essentially CBC encrypt without
+# saving output. CCM CTR "stays invisible," because it's neatly
+# interleaved wih CBC-MAC. This provides ~30% improvement over
+# "straghtforward" CCM implementation with CTR and CBC-MAC performed
+# disjointly. Parallelizable modes practically achieve the theoretical
+# limit.
+#
+# Looking at how results vary with buffer size.
+#
+# Curves are practically saturated at 1-KB buffer size. In most cases
+# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
+# CTR curve doesn't follow this pattern and is "slowest" changing one
+# with "256-byte" result being 87% of "8-KB." This is because overhead
+# in CTR mode is most computationally intensive. Small-block CCM
+# decrypt is slower than encrypt, because first CTR and last CBC-MAC
+# iterations can't be interleaved.
+#
+# Results for 192- and 256-bit keys.
+#
+# EVP-free results were observed to scale perfectly with number of
+# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
+# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
+# are a tad smaller, because the above mentioned penalty biases all
+# results by same constant value. In similar way function call
+# overhead affects small-block performance, as well as OFB and CFB
+# results. Differences are not large, most common coefficients are
+# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
+# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
+
+# January 2011
+#
+# While Westmere processor features 6 cycles latency for aes[enc|dec]
+# instructions, which can be scheduled every second cycle, Sandy
+# Bridge spends 8 cycles per instruction, but it can schedule them
+# every cycle. This means that code targeting Westmere would perform
+# suboptimally on Sandy Bridge. Therefore this update.
+#
+# In addition, non-parallelizable CBC encrypt (as well as CCM) is
+# optimized. Relative improvement might appear modest, 8% on Westmere,
+# but in absolute terms it's 3.77 cycles per byte encrypted with
+# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
+# should be compared to asymptotic limits of 3.75 for Westmere and
+# 5.00 for Sandy Bridge. Actually, the fact that they get this close
+# to asymptotic limits is quite amazing. Indeed, the limit is
+# calculated as latency times number of rounds, 10 for 128-bit key,
+# and divided by 16, the number of bytes in block, or in other words
+# it accounts *solely* for aesenc instructions. But there are extra
+# instructions, and numbers so close to the asymptotic limits mean
+# that it's as if it takes as little as *one* additional cycle to
+# execute all of them. How is it possible? It is possible thanks to
+# out-of-order execution logic, which manages to overlap post-
+# processing of previous block, things like saving the output, with
+# actual encryption of current block, as well as pre-processing of
+# current block, things like fetching input and xor-ing it with
+# 0-round element of the key schedule, with actual encryption of
+# previous block. Keep this in mind...
+#
+# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
+# performance is achieved by interleaving instructions working on
+# independent blocks. In which case asymptotic limit for such modes
+# can be obtained by dividing above mentioned numbers by AES
+# instructions' interleave factor. Westmere can execute at most 3 
+# instructions at a time, meaning that optimal interleave factor is 3,
+# and that's where the "magic" number of 1.25 come from. "Optimal
+# interleave factor" means that increase of interleave factor does
+# not improve performance. The formula has proven to reflect reality
+# pretty well on Westmere... Sandy Bridge on the other hand can
+# execute up to 8 AES instructions at a time, so how does varying
+# interleave factor affect the performance? Here is table for ECB
+# (numbers are cycles per byte processed with 128-bit key):
+#
+# instruction interleave factor		3x	6x	8x
+# theoretical asymptotic limit		1.67	0.83	0.625
+# measured performance for 8KB block	1.05	0.86	0.84
+#
+# "as if" interleave factor		4.7x	5.8x	6.0x
+#
+# Further data for other parallelizable modes:
+#
+# CBC decrypt				1.16	0.93	0.93
+# CTR					1.14	0.91	n/a
+#
+# Well, given 3x column it's probably inappropriate to call the limit
+# asymptotic, if it can be surpassed, isn't it? What happens there?
+# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
+# magic is responsible for this. Processor overlaps not only the
+# additional instructions with AES ones, but even AES instuctions
+# processing adjacent triplets of independent blocks. In the 6x case
+# additional instructions  still claim disproportionally small amount
+# of additional cycles, but in 8x case number of instructions must be
+# a tad too high for out-of-order logic to cope with, and AES unit
+# remains underutilized... As you can see 8x interleave is hardly
+# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
+# utilizies 6x interleave because of limited register bank capacity.
+#
+# Higher interleave factors do have negative impact on Westmere
+# performance. While for ECB mode it's negligible ~1.5%, other
+# parallelizables perform ~5% worse, which is outweighed by ~25%
+# improvement on Sandy Bridge. To balance regression on Westmere
+# CTR mode was implemented with 6x aesenc interleave factor.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
+# in CTR mode AES instruction interleave factor was chosen to be 6x.
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-x86_64.pl:-)
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
+@_4args=$win64?	("%rcx","%rdx","%r8", "%r9") :	# Win64 order
+		("%rdi","%rsi","%rdx","%rcx");	# Unix order
+
+$code=".text\n";
+
+$rounds="%eax";	# input to and changed by aesni_[en|de]cryptN !!!
+# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
+$inp="%rdi";
+$out="%rsi";
+$len="%rdx";
+$key="%rcx";	# input to and changed by aesni_[en|de]cryptN !!!
+$ivp="%r8";	# cbc, ctr, ...
+
+$rnds_="%r10d";	# backup copy for $rounds
+$key_="%r11";	# backup copy for $key
+
+# %xmm register layout
+$rndkey0="%xmm0";	$rndkey1="%xmm1";
+$inout0="%xmm2";	$inout1="%xmm3";
+$inout2="%xmm4";	$inout3="%xmm5";
+$inout4="%xmm6";	$inout5="%xmm7";
+$inout6="%xmm8";	$inout7="%xmm9";
+
+$in2="%xmm6";		$in1="%xmm7";	# used in CBC decrypt, CTR, ...
+$in0="%xmm8";		$iv="%xmm9";
+
+# Inline version of internal aesni_[en|de]crypt1.
+#
+# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
+# cycles which take care of loop variables...
+{ my $sn;
+sub aesni_generate1 {
+my ($p,$key,$rounds,$inout,$ivec)=@_;	$inout=$inout0 if (!defined($inout));
+++$sn;
+$code.=<<___;
+	$movkey	($key),$rndkey0
+	$movkey	16($key),$rndkey1
+___
+$code.=<<___ if (defined($ivec));
+	xorps	$rndkey0,$ivec
+	lea	32($key),$key
+	xorps	$ivec,$inout
+___
+$code.=<<___ if (!defined($ivec));
+	lea	32($key),$key
+	xorps	$rndkey0,$inout
+___
+$code.=<<___;
+.Loop_${p}1_$sn:
+	aes${p}	$rndkey1,$inout
+	dec	$rounds
+	$movkey	($key),$rndkey1
+	lea	16($key),$key
+	jnz	.Loop_${p}1_$sn	# loop body is 16 bytes
+	aes${p}last	$rndkey1,$inout
+___
+}}
+# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
+#
+{ my ($inp,$out,$key) = @_4args;
+
+$code.=<<___;
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_encrypt:
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# output
+	ret
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_decrypt:
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# output
+	ret
+.size	${PREFIX}_decrypt, .-${PREFIX}_decrypt
+___
+}
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x...
+sub aesni_generate3 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-2] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt3,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt3:
+	$movkey	($key),$rndkey0
+	shr	\$1,$rounds
+	$movkey	16($key),$rndkey1
+	lea	32($key),$key
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	$movkey		($key),$rndkey0
+
+.L${dir}_loop3:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout2
+	$movkey		16($key),$rndkey1
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	lea		32($key),$key
+	aes${dir}	$rndkey0,$inout2
+	$movkey		($key),$rndkey0
+	jnz		.L${dir}_loop3
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	ret
+.size	_aesni_${dir}rypt3,.-_aesni_${dir}rypt3
+___
+}
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-3] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt4,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt4:
+	$movkey	($key),$rndkey0
+	shr	\$1,$rounds
+	$movkey	16($key),$rndkey1
+	lea	32($key),$key
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	xorps	$rndkey0,$inout3
+	$movkey	($key),$rndkey0
+
+.L${dir}_loop4:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	$movkey		16($key),$rndkey1
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	lea		32($key),$key
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	$movkey		($key),$rndkey0
+	jnz		.L${dir}_loop4
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	ret
+.size	_aesni_${dir}rypt4,.-_aesni_${dir}rypt4
+___
+}
+sub aesni_generate6 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-5] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt6,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt6:
+	$movkey		($key),$rndkey0
+	shr		\$1,$rounds
+	$movkey		16($key),$rndkey1
+	lea		32($key),$key
+	xorps		$rndkey0,$inout0
+	pxor		$rndkey0,$inout1
+	aes${dir}	$rndkey1,$inout0
+	pxor		$rndkey0,$inout2
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	aes${dir}	$rndkey1,$inout2
+	pxor		$rndkey0,$inout4
+	aes${dir}	$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout4
+	$movkey		($key),$rndkey0
+	aes${dir}	$rndkey1,$inout5
+	jmp		.L${dir}_loop6_enter
+.align	16
+.L${dir}_loop6:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+.L${dir}_loop6_enter:				# happens to be 16-byte aligned
+	$movkey		16($key),$rndkey1
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	lea		32($key),$key
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	jnz		.L${dir}_loop6
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	ret
+.size	_aesni_${dir}rypt6,.-_aesni_${dir}rypt6
+___
+}
+sub aesni_generate8 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-7] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt8,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt8:
+	$movkey		($key),$rndkey0
+	shr		\$1,$rounds
+	$movkey		16($key),$rndkey1
+	lea		32($key),$key
+	xorps		$rndkey0,$inout0
+	xorps		$rndkey0,$inout1
+	aes${dir}	$rndkey1,$inout0
+	pxor		$rndkey0,$inout2
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	aes${dir}	$rndkey1,$inout2
+	pxor		$rndkey0,$inout4
+	aes${dir}	$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout4
+	pxor		$rndkey0,$inout6
+	aes${dir}	$rndkey1,$inout5
+	pxor		$rndkey0,$inout7
+	$movkey		($key),$rndkey0
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+	$movkey		16($key),$rndkey1
+	jmp		.L${dir}_loop8_enter
+.align	16
+.L${dir}_loop8:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	dec		$rounds
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+	$movkey		16($key),$rndkey1
+.L${dir}_loop8_enter:				# happens to be 16-byte aligned
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	lea		32($key),$key
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	aes${dir}	$rndkey0,$inout6
+	aes${dir}	$rndkey0,$inout7
+	$movkey		($key),$rndkey0
+	jnz		.L${dir}_loop8
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	aes${dir}last	$rndkey0,$inout6
+	aes${dir}last	$rndkey0,$inout7
+	ret
+.size	_aesni_${dir}rypt8,.-_aesni_${dir}rypt8
+___
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+&aesni_generate8("enc") if ($PREFIX eq "aesni");
+&aesni_generate8("dec");
+
+if ($PREFIX eq "aesni") {
+########################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#			  size_t length, const AES_KEY *key,
+#			  int enc);
+$code.=<<___;
+.globl	aesni_ecb_encrypt
+.type	aesni_ecb_encrypt,\@function,5
+.align	16
+aesni_ecb_encrypt:
+	and	\$-16,$len
+	jz	.Lecb_ret
+
+	mov	240($key),$rounds	# key->rounds
+	$movkey	($key),$rndkey0
+	mov	$key,$key_		# backup $key
+	mov	$rounds,$rnds_		# backup $rounds
+	test	%r8d,%r8d		# 5th argument
+	jz	.Lecb_decrypt
+#--------------------------- ECB ENCRYPT ------------------------------#
+	cmp	\$0x80,$len
+	jb	.Lecb_enc_tail
+
+	movdqu	($inp),$inout0
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp
+	sub	\$0x80,$len
+	jmp	.Lecb_enc_loop8_enter
+.align 16
+.Lecb_enc_loop8:
+	movups	$inout0,($out)
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp
+.Lecb_enc_loop8_enter:
+
+	call	_aesni_encrypt8
+
+	sub	\$0x80,$len
+	jnc	.Lecb_enc_loop8
+
+	movups	$inout0,($out)
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out
+	add	\$0x80,$len
+	jz	.Lecb_ret
+
+.Lecb_enc_tail:
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_enc_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_enc_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_enc_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_enc_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_enc_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_enc_six
+	movdqu	0x60($inp),$inout6
+	call	_aesni_encrypt8
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_one:
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_two:
+	xorps	$inout2,$inout2
+	call	_aesni_encrypt3
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_three:
+	call	_aesni_encrypt3
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_four:
+	call	_aesni_encrypt4
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_five:
+	xorps	$inout5,$inout5
+	call	_aesni_encrypt6
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_six:
+	call	_aesni_encrypt6
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	jmp	.Lecb_ret
+#--------------------------- ECB DECRYPT ------------------------------#
+.align	16
+.Lecb_decrypt:
+	cmp	\$0x80,$len
+	jb	.Lecb_dec_tail
+
+	movdqu	($inp),$inout0
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp
+	sub	\$0x80,$len
+	jmp	.Lecb_dec_loop8_enter
+.align 16
+.Lecb_dec_loop8:
+	movups	$inout0,($out)
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp
+.Lecb_dec_loop8_enter:
+
+	call	_aesni_decrypt8
+
+	$movkey	($key_),$rndkey0
+	sub	\$0x80,$len
+	jnc	.Lecb_dec_loop8
+
+	movups	$inout0,($out)
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out
+	add	\$0x80,$len
+	jz	.Lecb_ret
+
+.Lecb_dec_tail:
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_dec_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_dec_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_dec_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_dec_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_dec_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_dec_six
+	movups	0x60($inp),$inout6
+	$movkey	($key),$rndkey0
+	call	_aesni_decrypt8
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_one:
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_two:
+	xorps	$inout2,$inout2
+	call	_aesni_decrypt3
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_three:
+	call	_aesni_decrypt3
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_four:
+	call	_aesni_decrypt4
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_five:
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_six:
+	call	_aesni_decrypt6
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+
+.Lecb_ret:
+	ret
+.size	aesni_ecb_encrypt,.-aesni_ecb_encrypt
+___
+
+{
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{
+my $cmac="%r9";	# 6th argument
+
+my $increment="%xmm6";
+my $bswap_mask="%xmm7";
+
+$code.=<<___;
+.globl	aesni_ccm64_encrypt_blocks
+.type	aesni_ccm64_encrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+.Lccm64_enc_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movdqu	($ivp),$iv
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	shr	\$1,$rounds
+	lea	0($key),$key_
+	movdqu	($cmac),$inout1
+	movdqa	$iv,$inout0
+	mov	$rounds,$rnds_
+	pshufb	$bswap_mask,$iv
+	jmp	.Lccm64_enc_outer
+.align	16
+.Lccm64_enc_outer:
+	$movkey	($key_),$rndkey0
+	mov	$rnds_,$rounds
+	movups	($inp),$in0			# load inp
+
+	xorps	$rndkey0,$inout0		# counter
+	$movkey	16($key_),$rndkey1
+	xorps	$in0,$rndkey0
+	lea	32($key_),$key
+	xorps	$rndkey0,$inout1		# cmac^=inp
+	$movkey	($key),$rndkey0
+
+.Lccm64_enc2_loop:
+	aesenc	$rndkey1,$inout0
+	dec	$rounds
+	aesenc	$rndkey1,$inout1
+	$movkey	16($key),$rndkey1
+	aesenc	$rndkey0,$inout0
+	lea	32($key),$key
+	aesenc	$rndkey0,$inout1
+	$movkey	0($key),$rndkey0
+	jnz	.Lccm64_enc2_loop
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	paddq	$increment,$iv
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+
+	dec	$len
+	lea	16($inp),$inp
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	movups	$in0,($out)			# save output
+	lea	16($out),$out
+	pshufb	$bswap_mask,$inout0
+	jnz	.Lccm64_enc_outer
+
+	movups	$inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	0x10(%rsp),%xmm7
+	movaps	0x20(%rsp),%xmm8
+	movaps	0x30(%rsp),%xmm9
+	lea	0x58(%rsp),%rsp
+.Lccm64_enc_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
+___
+######################################################################
+$code.=<<___;
+.globl	aesni_ccm64_decrypt_blocks
+.type	aesni_ccm64_decrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_decrypt_blocks:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+.Lccm64_dec_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movups	($ivp),$iv
+	movdqu	($cmac),$inout1
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	movaps	$iv,$inout0
+	mov	$rounds,$rnds_
+	mov	$key,$key_
+	pshufb	$bswap_mask,$iv
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	movups	($inp),$in0			# load inp
+	paddq	$increment,$iv
+	lea	16($inp),$inp
+	jmp	.Lccm64_dec_outer
+.align	16
+.Lccm64_dec_outer:
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	mov	$rnds_,$rounds
+	movups	$in0,($out)			# save output
+	lea	16($out),$out
+	pshufb	$bswap_mask,$inout0
+
+	sub	\$1,$len
+	jz	.Lccm64_dec_break
+
+	$movkey	($key_),$rndkey0
+	shr	\$1,$rounds
+	$movkey	16($key_),$rndkey1
+	xorps	$rndkey0,$in0
+	lea	32($key_),$key
+	xorps	$rndkey0,$inout0
+	xorps	$in0,$inout1			# cmac^=out
+	$movkey	($key),$rndkey0
+
+.Lccm64_dec2_loop:
+	aesenc	$rndkey1,$inout0
+	dec	$rounds
+	aesenc	$rndkey1,$inout1
+	$movkey	16($key),$rndkey1
+	aesenc	$rndkey0,$inout0
+	lea	32($key),$key
+	aesenc	$rndkey0,$inout1
+	$movkey	0($key),$rndkey0
+	jnz	.Lccm64_dec2_loop
+	movups	($inp),$in0			# load inp
+	paddq	$increment,$iv
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	lea	16($inp),$inp
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+	jmp	.Lccm64_dec_outer
+
+.align	16
+.Lccm64_dec_break:
+	#xorps	$in0,$inout1			# cmac^=out
+___
+	&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
+$code.=<<___;
+	movups	$inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	0x10(%rsp),%xmm7
+	movaps	0x20(%rsp),%xmm8
+	movaps	0x30(%rsp),%xmm9
+	lea	0x58(%rsp),%rsp
+.Lccm64_dec_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
+___
+}
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+{
+my $reserved = $win64?0:-0x28;
+my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
+my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
+my $bswap_mask="%xmm15";
+
+$code.=<<___;
+.globl	aesni_ctr32_encrypt_blocks
+.type	aesni_ctr32_encrypt_blocks,\@function,5
+.align	16
+aesni_ctr32_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+	lea	-0xc8(%rsp),%rsp
+	movaps	%xmm6,0x20(%rsp)
+	movaps	%xmm7,0x30(%rsp)
+	movaps	%xmm8,0x40(%rsp)
+	movaps	%xmm9,0x50(%rsp)
+	movaps	%xmm10,0x60(%rsp)
+	movaps	%xmm11,0x70(%rsp)
+	movaps	%xmm12,0x80(%rsp)
+	movaps	%xmm13,0x90(%rsp)
+	movaps	%xmm14,0xa0(%rsp)
+	movaps	%xmm15,0xb0(%rsp)
+.Lctr32_body:
+___
+$code.=<<___;
+	cmp	\$1,$len
+	je	.Lctr32_one_shortcut
+
+	movdqu	($ivp),$ivec
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+	xor	$rounds,$rounds
+	pextrd	\$3,$ivec,$rnds_		# pull 32-bit counter
+	pinsrd	\$3,$rounds,$ivec		# wipe 32-bit counter
+
+	mov	240($key),$rounds		# key->rounds
+	bswap	$rnds_
+	pxor	$iv0,$iv0			# vector of 3 32-bit counters
+	pxor	$iv1,$iv1			# vector of 3 32-bit counters
+	pinsrd	\$0,$rnds_,$iv0
+	lea	3($rnds_),$key_
+	pinsrd	\$0,$key_,$iv1
+	inc	$rnds_
+	pinsrd	\$1,$rnds_,$iv0
+	inc	$key_
+	pinsrd	\$1,$key_,$iv1
+	inc	$rnds_
+	pinsrd	\$2,$rnds_,$iv0
+	inc	$key_
+	pinsrd	\$2,$key_,$iv1
+	movdqa	$iv0,$reserved(%rsp)
+	pshufb	$bswap_mask,$iv0
+	movdqa	$iv1,`$reserved+0x10`(%rsp)
+	pshufb	$bswap_mask,$iv1
+
+	pshufd	\$`3<<6`,$iv0,$inout0		# place counter to upper dword
+	pshufd	\$`2<<6`,$iv0,$inout1
+	pshufd	\$`1<<6`,$iv0,$inout2
+	cmp	\$6,$len
+	jb	.Lctr32_tail
+	shr	\$1,$rounds
+	mov	$key,$key_			# backup $key
+	mov	$rounds,$rnds_			# backup $rounds
+	sub	\$6,$len
+	jmp	.Lctr32_loop6
+
+.align	16
+.Lctr32_loop6:
+	pshufd	\$`3<<6`,$iv1,$inout3
+	por	$ivec,$inout0			# merge counter-less ivec
+	 $movkey	($key_),$rndkey0
+	pshufd	\$`2<<6`,$iv1,$inout4
+	por	$ivec,$inout1
+	 $movkey	16($key_),$rndkey1
+	pshufd	\$`1<<6`,$iv1,$inout5
+	por	$ivec,$inout2
+	por	$ivec,$inout3
+	 xorps		$rndkey0,$inout0
+	por	$ivec,$inout4
+	por	$ivec,$inout5
+
+	# inline _aesni_encrypt6 and interleave last rounds
+	# with own code...
+
+	pxor		$rndkey0,$inout1
+	aesenc		$rndkey1,$inout0
+	lea		32($key_),$key
+	pxor		$rndkey0,$inout2
+	aesenc		$rndkey1,$inout1
+	 movdqa		.Lincrement32(%rip),$iv1
+	pxor		$rndkey0,$inout3
+	aesenc		$rndkey1,$inout2
+	 movdqa		$reserved(%rsp),$iv0
+	pxor		$rndkey0,$inout4
+	aesenc		$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	dec		$rounds
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	jmp		.Lctr32_enc_loop6_enter
+.align	16
+.Lctr32_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	dec		$rounds
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+.Lctr32_enc_loop6_enter:
+	$movkey		16($key),$rndkey1
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	lea		32($key),$key
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	jnz		.Lctr32_enc_loop6
+
+	aesenc		$rndkey1,$inout0
+	 paddd		$iv1,$iv0		# increment counter vector
+	aesenc		$rndkey1,$inout1
+	 paddd		`$reserved+0x10`(%rsp),$iv1
+	aesenc		$rndkey1,$inout2
+	 movdqa		$iv0,$reserved(%rsp)	# save counter vector
+	aesenc		$rndkey1,$inout3
+	 movdqa		$iv1,`$reserved+0x10`(%rsp)
+	aesenc		$rndkey1,$inout4
+	 pshufb		$bswap_mask,$iv0	# byte swap
+	aesenc		$rndkey1,$inout5
+	 pshufb		$bswap_mask,$iv1
+
+	aesenclast	$rndkey0,$inout0
+	 movups		($inp),$in0		# load input
+	aesenclast	$rndkey0,$inout1
+	 movups		0x10($inp),$in1
+	aesenclast	$rndkey0,$inout2
+	 movups		0x20($inp),$in2
+	aesenclast	$rndkey0,$inout3
+	 movups		0x30($inp),$in3
+	aesenclast	$rndkey0,$inout4
+	 movups		0x40($inp),$rndkey1
+	aesenclast	$rndkey0,$inout5
+	 movups		0x50($inp),$rndkey0
+	 lea	0x60($inp),$inp
+
+	xorps	$inout0,$in0			# xor
+	 pshufd	\$`3<<6`,$iv0,$inout0
+	xorps	$inout1,$in1
+	 pshufd	\$`2<<6`,$iv0,$inout1
+	movups	$in0,($out)			# store output
+	xorps	$inout2,$in2
+	 pshufd	\$`1<<6`,$iv0,$inout2
+	movups	$in1,0x10($out)
+	xorps	$inout3,$in3
+	movups	$in2,0x20($out)
+	xorps	$inout4,$rndkey1
+	movups	$in3,0x30($out)
+	xorps	$inout5,$rndkey0
+	movups	$rndkey1,0x40($out)
+	movups	$rndkey0,0x50($out)
+	lea	0x60($out),$out
+	mov	$rnds_,$rounds
+	sub	\$6,$len
+	jnc	.Lctr32_loop6
+
+	add	\$6,$len
+	jz	.Lctr32_done
+	mov	$key_,$key			# restore $key
+	lea	1($rounds,$rounds),$rounds	# restore original value
+
+.Lctr32_tail:
+	por	$ivec,$inout0
+	movups	($inp),$in0
+	cmp	\$2,$len
+	jb	.Lctr32_one
+
+	por	$ivec,$inout1
+	movups	0x10($inp),$in1
+	je	.Lctr32_two
+
+	pshufd	\$`3<<6`,$iv1,$inout3
+	por	$ivec,$inout2
+	movups	0x20($inp),$in2
+	cmp	\$4,$len
+	jb	.Lctr32_three
+
+	pshufd	\$`2<<6`,$iv1,$inout4
+	por	$ivec,$inout3
+	movups	0x30($inp),$in3
+	je	.Lctr32_four
+
+	por	$ivec,$inout4
+	xorps	$inout5,$inout5
+
+	call	_aesni_encrypt6
+
+	movups	0x40($inp),$rndkey1
+	xorps	$inout0,$in0
+	xorps	$inout1,$in1
+	movups	$in0,($out)
+	xorps	$inout2,$in2
+	movups	$in1,0x10($out)
+	xorps	$inout3,$in3
+	movups	$in2,0x20($out)
+	xorps	$inout4,$rndkey1
+	movups	$in3,0x30($out)
+	movups	$rndkey1,0x40($out)
+	jmp	.Lctr32_done
+
+.align	16
+.Lctr32_one_shortcut:
+	movups	($ivp),$inout0
+	movups	($inp),$in0
+	mov	240($key),$rounds		# key->rounds
+.Lctr32_one:
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	$inout0,$in0
+	movups	$in0,($out)
+	jmp	.Lctr32_done
+
+.align	16
+.Lctr32_two:
+	xorps	$inout2,$inout2
+	call	_aesni_encrypt3
+	xorps	$inout0,$in0
+	xorps	$inout1,$in1
+	movups	$in0,($out)
+	movups	$in1,0x10($out)
+	jmp	.Lctr32_done
+
+.align	16
+.Lctr32_three:
+	call	_aesni_encrypt3
+	xorps	$inout0,$in0
+	xorps	$inout1,$in1
+	movups	$in0,($out)
+	xorps	$inout2,$in2
+	movups	$in1,0x10($out)
+	movups	$in2,0x20($out)
+	jmp	.Lctr32_done
+
+.align	16
+.Lctr32_four:
+	call	_aesni_encrypt4
+	xorps	$inout0,$in0
+	xorps	$inout1,$in1
+	movups	$in0,($out)
+	xorps	$inout2,$in2
+	movups	$in1,0x10($out)
+	xorps	$inout3,$in3
+	movups	$in2,0x20($out)
+	movups	$in3,0x30($out)
+
+.Lctr32_done:
+___
+$code.=<<___ if ($win64);
+	movaps	0x20(%rsp),%xmm6
+	movaps	0x30(%rsp),%xmm7
+	movaps	0x40(%rsp),%xmm8
+	movaps	0x50(%rsp),%xmm9
+	movaps	0x60(%rsp),%xmm10
+	movaps	0x70(%rsp),%xmm11
+	movaps	0x80(%rsp),%xmm12
+	movaps	0x90(%rsp),%xmm13
+	movaps	0xa0(%rsp),%xmm14
+	movaps	0xb0(%rsp),%xmm15
+	lea	0xc8(%rsp),%rsp
+.Lctr32_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
+___
+}
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{
+my @tweak=map("%xmm$_",(10..15));
+my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
+my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
+my $frame_size = 0x68 + ($win64?160:0);
+
+$code.=<<___;
+.globl	aesni_xts_encrypt
+.type	aesni_xts_encrypt,\@function,6
+.align	16
+aesni_xts_encrypt:
+	lea	-$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,0x60(%rsp)
+	movaps	%xmm7,0x70(%rsp)
+	movaps	%xmm8,0x80(%rsp)
+	movaps	%xmm9,0x90(%rsp)
+	movaps	%xmm10,0xa0(%rsp)
+	movaps	%xmm11,0xb0(%rsp)
+	movaps	%xmm12,0xc0(%rsp)
+	movaps	%xmm13,0xd0(%rsp)
+	movaps	%xmm14,0xe0(%rsp)
+	movaps	%xmm15,0xf0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+	movups	($ivp),@tweak[5]		# load clear-text tweak
+	mov	240(%r8),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	pxor	$twtmp,$twtmp
+	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[$i]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	pand	$twmask,$twres			# isolate carry and residue
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	pxor	$twres,@tweak[5]
+___
+    }
+$code.=<<___;
+	sub	\$16*6,$len
+	jc	.Lxts_enc_short
+
+	shr	\$1,$rounds
+	sub	\$1,$rounds
+	mov	$rounds,$rnds_
+	jmp	.Lxts_enc_grandloop
+
+.align	16
+.Lxts_enc_grandloop:
+	pshufd	\$0x13,$twtmp,$twres
+	movdqa	@tweak[5],@tweak[4]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	movdqu	`16*0`($inp),$inout0		# load input
+	pand	$twmask,$twres			# isolate carry and residue
+	movdqu	`16*1`($inp),$inout1
+	pxor	$twres,@tweak[5]
+
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[0],$inout0		# input^=tweak
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[2],$inout2
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+	pxor	@tweak[3],$inout3
+	$movkey		($key_),$rndkey0
+	pxor	@tweak[4],$inout4
+	pxor	@tweak[5],$inout5
+
+	# inline _aesni_encrypt6 and interleave first and last rounds
+	# with own code...
+	$movkey		16($key_),$rndkey1
+	pxor		$rndkey0,$inout0
+	pxor		$rndkey0,$inout1
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
+	aesenc		$rndkey1,$inout0
+	lea		32($key_),$key
+	pxor		$rndkey0,$inout2
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesenc		$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesenc		$rndkey1,$inout2
+	pxor		$rndkey0,$inout4
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	aesenc		$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	dec		$rounds
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesenc		$rndkey1,$inout4
+	 movdqa	@tweak[5],`16*5`(%rsp)
+	aesenc		$rndkey1,$inout5
+	pxor	$twtmp,$twtmp
+	pcmpgtd	@tweak[5],$twtmp
+	jmp		.Lxts_enc_loop6_enter
+
+.align	16
+.Lxts_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	dec		$rounds
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+.Lxts_enc_loop6_enter:
+	$movkey		16($key),$rndkey1
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	lea		32($key),$key
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	jnz		.Lxts_enc_loop6
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesenc		$rndkey1,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesenc		$rndkey1,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
+	 aesenc		$rndkey1,$inout2
+	pxor	$twres,@tweak[5]
+	 aesenc		$rndkey1,$inout3
+	 aesenc		$rndkey1,$inout4
+	 aesenc		$rndkey1,$inout5
+	 $movkey	16($key),$rndkey1
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[0]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesenc		$rndkey0,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesenc		$rndkey0,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesenc		$rndkey0,$inout2
+	pxor	$twres,@tweak[5]
+	 aesenc		$rndkey0,$inout3
+	 aesenc		$rndkey0,$inout4
+	 aesenc		$rndkey0,$inout5
+	 $movkey	32($key),$rndkey0
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[1]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesenc		$rndkey1,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesenc		$rndkey1,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesenc		$rndkey1,$inout2
+	pxor	$twres,@tweak[5]
+	 aesenc		$rndkey1,$inout3
+	 aesenc		$rndkey1,$inout4
+	 aesenc		$rndkey1,$inout5
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[2]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesenclast	$rndkey0,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesenclast	$rndkey0,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesenclast	$rndkey0,$inout2
+	pxor	$twres,@tweak[5]
+	 aesenclast	$rndkey0,$inout3
+	 aesenclast	$rndkey0,$inout4
+	 aesenclast	$rndkey0,$inout5
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[3]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
+	pand	$twmask,$twres			# isolate carry and residue
+	 xorps	`16*1`(%rsp),$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	pxor	$twres,@tweak[5]
+
+	xorps	`16*2`(%rsp),$inout2
+	movups	$inout0,`16*0`($out)		# write output
+	xorps	`16*3`(%rsp),$inout3
+	movups	$inout1,`16*1`($out)
+	xorps	`16*4`(%rsp),$inout4
+	movups	$inout2,`16*2`($out)
+	xorps	`16*5`(%rsp),$inout5
+	movups	$inout3,`16*3`($out)
+	mov	$rnds_,$rounds			# restore $rounds
+	movups	$inout4,`16*4`($out)
+	movups	$inout5,`16*5`($out)
+	lea	`16*6`($out),$out
+	sub	\$16*6,$len
+	jnc	.Lxts_enc_grandloop
+
+	lea	3($rounds,$rounds),$rounds	# restore original value
+	mov	$key_,$key			# restore $key
+	mov	$rounds,$rnds_			# backup $rounds
+
+.Lxts_enc_short:
+	add	\$16*6,$len
+	jz	.Lxts_enc_done
+
+	cmp	\$0x20,$len
+	jb	.Lxts_enc_one
+	je	.Lxts_enc_two
+
+	cmp	\$0x40,$len
+	jb	.Lxts_enc_three
+	je	.Lxts_enc_four
+
+	pshufd	\$0x13,$twtmp,$twres
+	movdqa	@tweak[5],@tweak[4]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	 movdqu	($inp),$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 movdqu	16*1($inp),$inout1
+	pxor	$twres,@tweak[5]
+
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+
+	call	_aesni_encrypt6
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[5],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)
+	lea	16*1($out),$out
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_encrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movups	$inout0,($out)
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_encrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_four:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_encrypt4
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[5],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)
+	xorps	@tweak[3],$inout3
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	movups	$inout3,16*3($out)
+	lea	16*4($out),$out
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_done:
+	and	\$15,$len_
+	jz	.Lxts_enc_ret
+	mov	$len_,$len
+
+.Lxts_enc_steal:
+	movzb	($inp),%eax			# borrow $rounds ...
+	movzb	-16($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,-16($out)
+	mov	%cl,0($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_enc_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	-16($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,-16($out)
+
+.Lxts_enc_ret:
+___
+$code.=<<___ if ($win64);
+	movaps	0x60(%rsp),%xmm6
+	movaps	0x70(%rsp),%xmm7
+	movaps	0x80(%rsp),%xmm8
+	movaps	0x90(%rsp),%xmm9
+	movaps	0xa0(%rsp),%xmm10
+	movaps	0xb0(%rsp),%xmm11
+	movaps	0xc0(%rsp),%xmm12
+	movaps	0xd0(%rsp),%xmm13
+	movaps	0xe0(%rsp),%xmm14
+	movaps	0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	$frame_size(%rsp),%rsp
+.Lxts_enc_epilogue:
+	ret
+.size	aesni_xts_encrypt,.-aesni_xts_encrypt
+___
+
+$code.=<<___;
+.globl	aesni_xts_decrypt
+.type	aesni_xts_decrypt,\@function,6
+.align	16
+aesni_xts_decrypt:
+	lea	-$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,0x60(%rsp)
+	movaps	%xmm7,0x70(%rsp)
+	movaps	%xmm8,0x80(%rsp)
+	movaps	%xmm9,0x90(%rsp)
+	movaps	%xmm10,0xa0(%rsp)
+	movaps	%xmm11,0xb0(%rsp)
+	movaps	%xmm12,0xc0(%rsp)
+	movaps	%xmm13,0xd0(%rsp)
+	movaps	%xmm14,0xe0(%rsp)
+	movaps	%xmm15,0xf0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+	movups	($ivp),@tweak[5]		# load clear-text tweak
+	mov	240($key2),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+	xor	%eax,%eax			# if ($len%16) len-=16;
+	test	\$15,$len
+	setnz	%al
+	shl	\$4,%rax
+	sub	%rax,$len
+
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	pxor	$twtmp,$twtmp
+	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[$i]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	pand	$twmask,$twres			# isolate carry and residue
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	pxor	$twres,@tweak[5]
+___
+    }
+$code.=<<___;
+	sub	\$16*6,$len
+	jc	.Lxts_dec_short
+
+	shr	\$1,$rounds
+	sub	\$1,$rounds
+	mov	$rounds,$rnds_
+	jmp	.Lxts_dec_grandloop
+
+.align	16
+.Lxts_dec_grandloop:
+	pshufd	\$0x13,$twtmp,$twres
+	movdqa	@tweak[5],@tweak[4]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	movdqu	`16*0`($inp),$inout0		# load input
+	pand	$twmask,$twres			# isolate carry and residue
+	movdqu	`16*1`($inp),$inout1
+	pxor	$twres,@tweak[5]
+
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[0],$inout0		# input^=tweak
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[2],$inout2
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+	pxor	@tweak[3],$inout3
+	$movkey		($key_),$rndkey0
+	pxor	@tweak[4],$inout4
+	pxor	@tweak[5],$inout5
+
+	# inline _aesni_decrypt6 and interleave first and last rounds
+	# with own code...
+	$movkey		16($key_),$rndkey1
+	pxor		$rndkey0,$inout0
+	pxor		$rndkey0,$inout1
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks
+	aesdec		$rndkey1,$inout0
+	lea		32($key_),$key
+	pxor		$rndkey0,$inout2
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesdec		$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesdec		$rndkey1,$inout2
+	pxor		$rndkey0,$inout4
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	aesdec		$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	dec		$rounds
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesdec		$rndkey1,$inout4
+	 movdqa	@tweak[5],`16*5`(%rsp)
+	aesdec		$rndkey1,$inout5
+	pxor	$twtmp,$twtmp
+	pcmpgtd	@tweak[5],$twtmp
+	jmp		.Lxts_dec_loop6_enter
+
+.align	16
+.Lxts_dec_loop6:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	dec		$rounds
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+.Lxts_dec_loop6_enter:
+	$movkey		16($key),$rndkey1
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	lea		32($key),$key
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		($key),$rndkey0
+	jnz		.Lxts_dec_loop6
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesdec		$rndkey1,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesdec		$rndkey1,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcast upper bits
+	 aesdec		$rndkey1,$inout2
+	pxor	$twres,@tweak[5]
+	 aesdec		$rndkey1,$inout3
+	 aesdec		$rndkey1,$inout4
+	 aesdec		$rndkey1,$inout5
+	 $movkey	16($key),$rndkey1
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[0]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesdec		$rndkey0,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesdec		$rndkey0,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesdec		$rndkey0,$inout2
+	pxor	$twres,@tweak[5]
+	 aesdec		$rndkey0,$inout3
+	 aesdec		$rndkey0,$inout4
+	 aesdec		$rndkey0,$inout5
+	 $movkey	32($key),$rndkey0
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[1]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesdec		$rndkey1,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesdec		$rndkey1,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesdec		$rndkey1,$inout2
+	pxor	$twres,@tweak[5]
+	 aesdec		$rndkey1,$inout3
+	 aesdec		$rndkey1,$inout4
+	 aesdec		$rndkey1,$inout5
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[2]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 aesdeclast	$rndkey0,$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 aesdeclast	$rndkey0,$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	 aesdeclast	$rndkey0,$inout2
+	pxor	$twres,@tweak[5]
+	 aesdeclast	$rndkey0,$inout3
+	 aesdeclast	$rndkey0,$inout4
+	 aesdeclast	$rndkey0,$inout5
+
+	pshufd	\$0x13,$twtmp,$twres
+	pxor	$twtmp,$twtmp
+	movdqa	@tweak[5],@tweak[3]
+	paddq	@tweak[5],@tweak[5]		# psllq	1,$tweak
+	 xorps	`16*0`(%rsp),$inout0		# output^=tweak
+	pand	$twmask,$twres			# isolate carry and residue
+	 xorps	`16*1`(%rsp),$inout1
+	pcmpgtd	@tweak[5],$twtmp		# broadcat upper bits
+	pxor	$twres,@tweak[5]
+
+	xorps	`16*2`(%rsp),$inout2
+	movups	$inout0,`16*0`($out)		# write output
+	xorps	`16*3`(%rsp),$inout3
+	movups	$inout1,`16*1`($out)
+	xorps	`16*4`(%rsp),$inout4
+	movups	$inout2,`16*2`($out)
+	xorps	`16*5`(%rsp),$inout5
+	movups	$inout3,`16*3`($out)
+	mov	$rnds_,$rounds			# restore $rounds
+	movups	$inout4,`16*4`($out)
+	movups	$inout5,`16*5`($out)
+	lea	`16*6`($out),$out
+	sub	\$16*6,$len
+	jnc	.Lxts_dec_grandloop
+
+	lea	3($rounds,$rounds),$rounds	# restore original value
+	mov	$key_,$key			# restore $key
+	mov	$rounds,$rnds_			# backup $rounds
+
+.Lxts_dec_short:
+	add	\$16*6,$len
+	jz	.Lxts_dec_done
+
+	cmp	\$0x20,$len
+	jb	.Lxts_dec_one
+	je	.Lxts_dec_two
+
+	cmp	\$0x40,$len
+	jb	.Lxts_dec_three
+	je	.Lxts_dec_four
+
+	pshufd	\$0x13,$twtmp,$twres
+	movdqa	@tweak[5],@tweak[4]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	 movdqu	($inp),$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 movdqu	16*1($inp),$inout1
+	pxor	$twres,@tweak[5]
+
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+
+	call	_aesni_decrypt6
+
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	 pxor		$twtmp,$twtmp
+	movdqu	$inout3,16*3($out)
+	 pcmpgtd	@tweak[5],$twtmp
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out
+	 pshufd		\$0x13,$twtmp,@tweak[1]	# $twres
+	and	\$15,$len_
+	jz	.Lxts_dec_ret
+
+	movdqa	@tweak[5],@tweak[0]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	pand	$twmask,@tweak[1]		# isolate carry and residue
+	pxor	@tweak[5],@tweak[1]
+	jmp	.Lxts_dec_done2
+
+.align	16
+.Lxts_dec_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)
+	movdqa	@tweak[2],@tweak[1]
+	lea	16*1($out),$out
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_decrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[3],@tweak[1]
+	movups	$inout0,($out)
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_decrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[5],@tweak[1]
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_four:
+	pshufd	\$0x13,$twtmp,$twres
+	movdqa	@tweak[5],@tweak[4]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	 movups	($inp),$inout0
+	pand	$twmask,$twres			# isolate carry and residue
+	 movups	16*1($inp),$inout1
+	pxor	$twres,@tweak[5]
+
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_decrypt4
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[4],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[5],@tweak[1]
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)
+	xorps	@tweak[3],$inout3
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	movups	$inout3,16*3($out)
+	lea	16*4($out),$out
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_done:
+	and	\$15,$len_
+	jz	.Lxts_dec_ret
+.Lxts_dec_done2:
+	mov	$len_,$len
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($inp),$inout0
+	xorps	@tweak[1],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[1],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_steal:
+	movzb	16($inp),%eax			# borrow $rounds ...
+	movzb	($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,($out)
+	mov	%cl,16($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_dec_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_ret:
+___
+$code.=<<___ if ($win64);
+	movaps	0x60(%rsp),%xmm6
+	movaps	0x70(%rsp),%xmm7
+	movaps	0x80(%rsp),%xmm8
+	movaps	0x90(%rsp),%xmm9
+	movaps	0xa0(%rsp),%xmm10
+	movaps	0xb0(%rsp),%xmm11
+	movaps	0xc0(%rsp),%xmm12
+	movaps	0xd0(%rsp),%xmm13
+	movaps	0xe0(%rsp),%xmm14
+	movaps	0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	$frame_size(%rsp),%rsp
+.Lxts_dec_epilogue:
+	ret
+.size	aesni_xts_decrypt,.-aesni_xts_decrypt
+___
+} }}
+
+########################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#			    size_t length, const AES_KEY *key,
+#			    unsigned char *ivp,const int enc);
+{
+my $reserved = $win64?0x40:-0x18;	# used in decrypt
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+	test	$len,$len		# check length
+	jz	.Lcbc_ret
+
+	mov	240($key),$rnds_	# key->rounds
+	mov	$key,$key_		# backup $key
+	test	%r9d,%r9d		# 6th argument
+	jz	.Lcbc_decrypt
+#--------------------------- CBC ENCRYPT ------------------------------#
+	movups	($ivp),$inout0		# load iv as initial state
+	mov	$rnds_,$rounds
+	cmp	\$16,$len
+	jb	.Lcbc_enc_tail
+	sub	\$16,$len
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movups	($inp),$inout1		# load input
+	lea	16($inp),$inp
+	#xorps	$inout1,$inout0
+___
+	&aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
+$code.=<<___;
+	mov	$rnds_,$rounds		# restore $rounds
+	mov	$key_,$key		# restore $key
+	movups	$inout0,0($out)		# store output
+	lea	16($out),$out
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	add	\$16,$len
+	jnz	.Lcbc_enc_tail
+	movups	$inout0,($ivp)
+	jmp	.Lcbc_ret
+
+.Lcbc_enc_tail:
+	mov	$len,%rcx	# zaps $key
+	xchg	$inp,$out	# $inp is %rsi and $out is %rdi now
+	.long	0x9066A4F3	# rep movsb
+	mov	\$16,%ecx	# zero tail
+	sub	$len,%rcx
+	xor	%eax,%eax
+	.long	0x9066AAF3	# rep stosb
+	lea	-16(%rdi),%rdi	# rewind $out by 1 block
+	mov	$rnds_,$rounds	# restore $rounds
+	mov	%rdi,%rsi	# $inp and $out are the same
+	mov	$key_,$key	# restore $key
+	xor	$len,$len	# len=16
+	jmp	.Lcbc_enc_loop	# one more spin
+#--------------------------- CBC DECRYPT ------------------------------#
+.align	16
+.Lcbc_decrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+.Lcbc_decrypt_body:
+___
+$code.=<<___;
+	movups	($ivp),$iv
+	mov	$rnds_,$rounds
+	cmp	\$0x70,$len
+	jbe	.Lcbc_dec_tail
+	shr	\$1,$rnds_
+	sub	\$0x70,$len
+	mov	$rnds_,$rounds
+	movaps	$iv,$reserved(%rsp)
+	jmp	.Lcbc_dec_loop8_enter
+.align	16
+.Lcbc_dec_loop8:
+	movaps	$rndkey0,$reserved(%rsp)	# save IV
+	movups	$inout7,($out)
+	lea	0x10($out),$out
+.Lcbc_dec_loop8_enter:
+	$movkey		($key),$rndkey0
+	movups	($inp),$inout0			# load input
+	movups	0x10($inp),$inout1
+	$movkey		16($key),$rndkey1
+
+	lea		32($key),$key
+	movdqu	0x20($inp),$inout2
+	xorps		$rndkey0,$inout0
+	movdqu	0x30($inp),$inout3
+	xorps		$rndkey0,$inout1
+	movdqu	0x40($inp),$inout4
+	aesdec		$rndkey1,$inout0
+	pxor		$rndkey0,$inout2
+	movdqu	0x50($inp),$inout5
+	aesdec		$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	movdqu	0x60($inp),$inout6
+	aesdec		$rndkey1,$inout2
+	pxor		$rndkey0,$inout4
+	movdqu	0x70($inp),$inout7
+	aesdec		$rndkey1,$inout3
+	pxor		$rndkey0,$inout5
+	dec		$rounds
+	aesdec		$rndkey1,$inout4
+	pxor		$rndkey0,$inout6
+	aesdec		$rndkey1,$inout5
+	pxor		$rndkey0,$inout7
+	$movkey		($key),$rndkey0
+	aesdec		$rndkey1,$inout6
+	aesdec		$rndkey1,$inout7
+	$movkey		16($key),$rndkey1
+
+	call		.Ldec_loop8_enter
+
+	movups	($inp),$rndkey1		# re-load input
+	movups	0x10($inp),$rndkey0
+	xorps	$reserved(%rsp),$inout0	# ^= IV
+	xorps	$rndkey1,$inout1
+	movups	0x20($inp),$rndkey1
+	xorps	$rndkey0,$inout2
+	movups	0x30($inp),$rndkey0
+	xorps	$rndkey1,$inout3
+	movups	0x40($inp),$rndkey1
+	xorps	$rndkey0,$inout4
+	movups	0x50($inp),$rndkey0
+	xorps	$rndkey1,$inout5
+	movups	0x60($inp),$rndkey1
+	xorps	$rndkey0,$inout6
+	movups	0x70($inp),$rndkey0	# IV
+	xorps	$rndkey1,$inout7
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout4,0x40($out)
+	mov	$key_,$key		# restore $key
+	movups	$inout5,0x50($out)
+	lea	0x80($inp),$inp
+	movups	$inout6,0x60($out)
+	lea	0x70($out),$out
+	sub	\$0x80,$len
+	ja	.Lcbc_dec_loop8
+
+	movaps	$inout7,$inout0
+	movaps	$rndkey0,$iv
+	add	\$0x70,$len
+	jle	.Lcbc_dec_tail_collected
+	movups	$inout0,($out)
+	lea	1($rnds_,$rnds_),$rounds
+	lea	0x10($out),$out
+.Lcbc_dec_tail:
+	movups	($inp),$inout0
+	movaps	$inout0,$in0
+	cmp	\$0x10,$len
+	jbe	.Lcbc_dec_one
+
+	movups	0x10($inp),$inout1
+	movaps	$inout1,$in1
+	cmp	\$0x20,$len
+	jbe	.Lcbc_dec_two
+
+	movups	0x20($inp),$inout2
+	movaps	$inout2,$in2
+	cmp	\$0x30,$len
+	jbe	.Lcbc_dec_three
+
+	movups	0x30($inp),$inout3
+	cmp	\$0x40,$len
+	jbe	.Lcbc_dec_four
+
+	movups	0x40($inp),$inout4
+	cmp	\$0x50,$len
+	jbe	.Lcbc_dec_five
+
+	movups	0x50($inp),$inout5
+	cmp	\$0x60,$len
+	jbe	.Lcbc_dec_six
+
+	movups	0x60($inp),$inout6
+	movaps	$iv,$reserved(%rsp)	# save IV
+	call	_aesni_decrypt8
+	movups	($inp),$rndkey1
+	movups	0x10($inp),$rndkey0
+	xorps	$reserved(%rsp),$inout0	# ^= IV
+	xorps	$rndkey1,$inout1
+	movups	0x20($inp),$rndkey1
+	xorps	$rndkey0,$inout2
+	movups	0x30($inp),$rndkey0
+	xorps	$rndkey1,$inout3
+	movups	0x40($inp),$rndkey1
+	xorps	$rndkey0,$inout4
+	movups	0x50($inp),$rndkey0
+	xorps	$rndkey1,$inout5
+	movups	0x60($inp),$iv		# IV
+	xorps	$rndkey0,$inout6
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	lea	0x60($out),$out
+	movaps	$inout6,$inout0
+	sub	\$0x70,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_one:
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	$iv,$inout0
+	movaps	$in0,$iv
+	sub	\$0x10,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_two:
+	xorps	$inout2,$inout2
+	call	_aesni_decrypt3
+	xorps	$iv,$inout0
+	xorps	$in0,$inout1
+	movups	$inout0,($out)
+	movaps	$in1,$iv
+	movaps	$inout1,$inout0
+	lea	0x10($out),$out
+	sub	\$0x20,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_three:
+	call	_aesni_decrypt3
+	xorps	$iv,$inout0
+	xorps	$in0,$inout1
+	movups	$inout0,($out)
+	xorps	$in1,$inout2
+	movups	$inout1,0x10($out)
+	movaps	$in2,$iv
+	movaps	$inout2,$inout0
+	lea	0x20($out),$out
+	sub	\$0x30,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_four:
+	call	_aesni_decrypt4
+	xorps	$iv,$inout0
+	movups	0x30($inp),$iv
+	xorps	$in0,$inout1
+	movups	$inout0,($out)
+	xorps	$in1,$inout2
+	movups	$inout1,0x10($out)
+	xorps	$in2,$inout3
+	movups	$inout2,0x20($out)
+	movaps	$inout3,$inout0
+	lea	0x30($out),$out
+	sub	\$0x40,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_five:
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	movups	0x10($inp),$rndkey1
+	movups	0x20($inp),$rndkey0
+	xorps	$iv,$inout0
+	xorps	$in0,$inout1
+	xorps	$rndkey1,$inout2
+	movups	0x30($inp),$rndkey1
+	xorps	$rndkey0,$inout3
+	movups	0x40($inp),$iv
+	xorps	$rndkey1,$inout4
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	lea	0x40($out),$out
+	movaps	$inout4,$inout0
+	sub	\$0x50,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_six:
+	call	_aesni_decrypt6
+	movups	0x10($inp),$rndkey1
+	movups	0x20($inp),$rndkey0
+	xorps	$iv,$inout0
+	xorps	$in0,$inout1
+	xorps	$rndkey1,$inout2
+	movups	0x30($inp),$rndkey1
+	xorps	$rndkey0,$inout3
+	movups	0x40($inp),$rndkey0
+	xorps	$rndkey1,$inout4
+	movups	0x50($inp),$iv
+	xorps	$rndkey0,$inout5
+	movups	$inout0,($out)
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	lea	0x50($out),$out
+	movaps	$inout5,$inout0
+	sub	\$0x60,$len
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_tail_collected:
+	and	\$15,$len
+	movups	$iv,($ivp)
+	jnz	.Lcbc_dec_tail_partial
+	movups	$inout0,($out)
+	jmp	.Lcbc_dec_ret
+.align	16
+.Lcbc_dec_tail_partial:
+	movaps	$inout0,$reserved(%rsp)
+	mov	\$16,%rcx
+	mov	$out,%rdi
+	sub	$len,%rcx
+	lea	$reserved(%rsp),%rsi
+	.long	0x9066A4F3	# rep movsb
+
+.Lcbc_dec_ret:
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	0x10(%rsp),%xmm7
+	movaps	0x20(%rsp),%xmm8
+	movaps	0x30(%rsp),%xmm9
+	lea	0x58(%rsp),%rsp
+___
+$code.=<<___;
+.Lcbc_ret:
+	ret
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+} 
+# int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
+#				int bits, AES_KEY *key)
+{ my ($inp,$bits,$key) = @_4args;
+  $bits =~ s/%r/%e/;
+
+$code.=<<___;
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_decrypt_key:
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+	call	__aesni_set_encrypt_key
+	shl	\$4,$bits		# rounds-1 after _aesni_set_encrypt_key
+	test	%eax,%eax
+	jnz	.Ldec_key_ret
+	lea	16($key,$bits),$inp	# points at the end of key schedule
+
+	$movkey	($key),%xmm0		# just swap
+	$movkey	($inp),%xmm1
+	$movkey	%xmm0,($inp)
+	$movkey	%xmm1,($key)
+	lea	16($key),$key
+	lea	-16($inp),$inp
+
+.Ldec_key_inverse:
+	$movkey	($key),%xmm0		# swap and inverse
+	$movkey	($inp),%xmm1
+	aesimc	%xmm0,%xmm0
+	aesimc	%xmm1,%xmm1
+	lea	16($key),$key
+	lea	-16($inp),$inp
+	$movkey	%xmm0,16($inp)
+	$movkey	%xmm1,-16($key)
+	cmp	$key,$inp
+	ja	.Ldec_key_inverse
+
+	$movkey	($key),%xmm0		# inverse middle
+	aesimc	%xmm0,%xmm0
+	$movkey	%xmm0,($inp)
+.Ldec_key_ret:
+	add	\$8,%rsp
+	ret
+.LSEH_end_set_decrypt_key:
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+___
+
+# This is based on submission by
+#
+#	Huang Ying <ying.huang@intel.com>
+#	Vinodh Gopal <vinodh.gopal@intel.com>
+#	Kahraman Akdemir
+#
+# Agressively optimized in respect to aeskeygenassist's critical path
+# and is contained in %xmm0-5 to meet Win64 ABI requirement.
+#
+$code.=<<___;
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_encrypt_key:
+__aesni_set_encrypt_key:
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+	mov	\$-1,%rax
+	test	$inp,$inp
+	jz	.Lenc_key_ret
+	test	$key,$key
+	jz	.Lenc_key_ret
+
+	movups	($inp),%xmm0		# pull first 128 bits of *userKey
+	xorps	%xmm4,%xmm4		# low dword of xmm4 is assumed 0
+	lea	16($key),%rax
+	cmp	\$256,$bits
+	je	.L14rounds
+	cmp	\$192,$bits
+	je	.L12rounds
+	cmp	\$128,$bits
+	jne	.Lbad_keybits
+
+.L10rounds:
+	mov	\$9,$bits			# 10 rounds for 128-bit key
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 1
+	call		.Lkey_expansion_128_cold
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 2
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 4
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 6
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x40,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x80,%xmm0,%xmm1	# round 8
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x1b,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x36,%xmm0,%xmm1	# round 10
+	call		.Lkey_expansion_128
+	$movkey	%xmm0,(%rax)
+	mov	$bits,80(%rax)	# 240(%rdx)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L12rounds:
+	movq	16($inp),%xmm2			# remaining 1/3 of *userKey
+	mov	\$11,$bits			# 12 rounds for 192
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 1,2
+	call		.Lkey_expansion_192a_cold
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 2,3
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 4,5
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 5,6
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 7,8
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 8,9
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 10,11
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x80,%xmm2,%xmm1	# round 11,12
+	call		.Lkey_expansion_192b
+	$movkey	%xmm0,(%rax)
+	mov	$bits,48(%rax)	# 240(%rdx)
+	xor	%rax, %rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L14rounds:
+	movups	16($inp),%xmm2			# remaning half of *userKey
+	mov	\$13,$bits			# 14 rounds for 256
+	lea	16(%rax),%rax
+	$movkey	%xmm0,($key)			# round 0
+	$movkey	%xmm2,16($key)			# round 1
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 2
+	call		.Lkey_expansion_256a_cold
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 4
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 6
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 8
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 10
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 11
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 12
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 13
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 14
+	call		.Lkey_expansion_256a
+	$movkey	%xmm0,(%rax)
+	mov	$bits,16(%rax)	# 240(%rdx)
+	xor	%rax,%rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.Lbad_keybits:
+	mov	\$-2,%rax
+.Lenc_key_ret:
+	add	\$8,%rsp
+	ret
+.LSEH_end_set_encrypt_key:
+
+.align	16
+.Lkey_expansion_128:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_128_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_192a:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_192a_cold:
+	movaps	%xmm2, %xmm5
+.Lkey_expansion_192b_warm:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	movdqa	%xmm2,%xmm3
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	pslldq	\$4,%xmm3
+	xorps	%xmm4,%xmm0
+	pshufd	\$0b01010101,%xmm1,%xmm1	# critical path
+	pxor	%xmm3,%xmm2
+	pxor	%xmm1,%xmm0
+	pshufd	\$0b11111111,%xmm0,%xmm3
+	pxor	%xmm3,%xmm2
+	ret
+
+.align 16
+.Lkey_expansion_192b:
+	movaps	%xmm0,%xmm3
+	shufps	\$0b01000100,%xmm0,%xmm5
+	$movkey	%xmm5,(%rax)
+	shufps	\$0b01001110,%xmm2,%xmm3
+	$movkey	%xmm3,16(%rax)
+	lea	32(%rax),%rax
+	jmp	.Lkey_expansion_192b_warm
+
+.align	16
+.Lkey_expansion_256a:
+	$movkey	%xmm2,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_256a_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_256b:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+
+	shufps	\$0b00010000,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10001100,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10101010,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm2
+	ret
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+.size	__aesni_set_encrypt_key,.-__aesni_set_encrypt_key
+___
+}
+
+$code.=<<___;
+.align	64
+.Lbswap_mask:
+	.byte	15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+.Lincrement32:
+	.long	6,6,6,0
+.Lincrement64:
+	.long	1,0,0,0
+.Lxts_magic:
+	.long	0x87,0,1,0
+
+.asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.type	ecb_se_handler,\@abi-omnipotent
+.align	16
+ecb_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	jmp	.Lcommon_seh_tail
+.size	ecb_se_handler,.-ecb_se_handler
+
+.type	ccm64_se_handler,\@abi-omnipotent
+.align	16
+ccm64_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	0(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0x58(%rax),%rax		# adjust stack pointer
+
+	jmp	.Lcommon_seh_tail
+.size	ccm64_se_handler,.-ccm64_se_handler
+
+.type	ctr32_se_handler,\@abi-omnipotent
+.align	16
+ctr32_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lctr32_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<"prologue" label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lctr32_ret(%rip),%r10
+	cmp	%r10,%rbx
+	jae	.Lcommon_seh_tail
+
+	lea	0x20(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xc8(%rax),%rax		# adjust stack pointer
+
+	jmp	.Lcommon_seh_tail
+.size	ctr32_se_handler,.-ctr32_se_handler
+
+.type	xts_se_handler,\@abi-omnipotent
+.align	16
+xts_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue lable
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	0x60(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0x68+160(%rax),%rax	# adjust stack pointer
+
+	jmp	.Lcommon_seh_tail
+.size	xts_se_handler,.-xts_se_handler
+___
+$code.=<<___;
+.type	cbc_se_handler,\@abi-omnipotent
+.align	16
+cbc_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	152($context),%rax	# pull context->Rsp
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lcbc_decrypt(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<"prologue" label
+	jb	.Lcommon_seh_tail
+
+	lea	.Lcbc_decrypt_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<cbc_decrypt_body
+	jb	.Lrestore_cbc_rax
+
+	lea	.Lcbc_ret(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>="epilogue" label
+	jae	.Lcommon_seh_tail
+
+	lea	0(%rax),%rsi		# top of stack
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0x58(%rax),%rax		# adjust stack pointer
+	jmp	.Lcommon_seh_tail
+
+.Lrestore_cbc_rax:
+	mov	120($context),%rax
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	cbc_se_handler,.-cbc_se_handler
+
+.section	.pdata
+.align	4
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+	.rva	.LSEH_begin_aesni_ecb_encrypt
+	.rva	.LSEH_end_aesni_ecb_encrypt
+	.rva	.LSEH_info_ecb
+
+	.rva	.LSEH_begin_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_info_ccm64_enc
+
+	.rva	.LSEH_begin_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_info_ccm64_dec
+
+	.rva	.LSEH_begin_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_end_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_info_ctr32
+
+	.rva	.LSEH_begin_aesni_xts_encrypt
+	.rva	.LSEH_end_aesni_xts_encrypt
+	.rva	.LSEH_info_xts_enc
+
+	.rva	.LSEH_begin_aesni_xts_decrypt
+	.rva	.LSEH_end_aesni_xts_decrypt
+	.rva	.LSEH_info_xts_dec
+___
+$code.=<<___;
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_cbc
+
+	.rva	${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_set_decrypt_key
+	.rva	.LSEH_info_key
+
+	.rva	${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_set_encrypt_key
+	.rva	.LSEH_info_key
+.section	.xdata
+.align	8
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.LSEH_info_ecb:
+	.byte	9,0,0,0
+	.rva	ecb_se_handler
+.LSEH_info_ccm64_enc:
+	.byte	9,0,0,0
+	.rva	ccm64_se_handler
+	.rva	.Lccm64_enc_body,.Lccm64_enc_ret	# HandlerData[]
+.LSEH_info_ccm64_dec:
+	.byte	9,0,0,0
+	.rva	ccm64_se_handler
+	.rva	.Lccm64_dec_body,.Lccm64_dec_ret	# HandlerData[]
+.LSEH_info_ctr32:
+	.byte	9,0,0,0
+	.rva	ctr32_se_handler
+.LSEH_info_xts_enc:
+	.byte	9,0,0,0
+	.rva	xts_se_handler
+	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
+.LSEH_info_xts_dec:
+	.byte	9,0,0,0
+	.rva	xts_se_handler
+	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
+___
+$code.=<<___;
+.LSEH_info_cbc:
+	.byte	9,0,0,0
+	.rva	cbc_se_handler
+.LSEH_info_key:
+	.byte	0x01,0x04,0x01,0x00
+	.byte	0x04,0x02,0x00,0x00	# sub rsp,8
+___
+}
+
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    push @opcode,$rex|0x40	if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	rex(\@opcode,$4,$3);
+	push @opcode,0x0f,0x3a,0xdf;
+	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
+	my $c=$2;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesimc" => 0xdb,
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/bsaes-x86_64.pl b/jni/openssl/crypto/aes/asm/bsaes-x86_64.pl
new file mode 100644
index 0000000..c9c6312
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/bsaes-x86_64.pl
@@ -0,0 +1,3044 @@
+#!/usr/bin/env perl
+
+###################################################################
+### AES-128 [originally in CTR mode]				###
+### bitsliced implementation for Intel Core 2 processors	###
+### requires support of SSE extensions up to SSSE3		###
+### Author: Emilia Käsper and Peter Schwabe			###
+### Date: 2009-03-19						###
+### Public domain						###
+###								###
+### See http://homes.esat.kuleuven.be/~ekasper/#software for	###
+### further information.					###
+###################################################################
+#
+# September 2011.
+#
+# Started as transliteration to "perlasm" the original code has
+# undergone following changes:
+#
+# - code was made position-independent;
+# - rounds were folded into a loop resulting in >5x size reduction
+#   from 12.5KB to 2.2KB;
+# - above was possibile thanks to mixcolumns() modification that
+#   allowed to feed its output back to aesenc[last], this was
+#   achieved at cost of two additional inter-registers moves;
+# - some instruction reordering and interleaving;
+# - this module doesn't implement key setup subroutine, instead it
+#   relies on conversion of "conventional" key schedule as returned
+#   by AES_set_encrypt_key (see discussion below);
+# - first and last round keys are treated differently, which allowed
+#   to skip one shiftrows(), reduce bit-sliced key schedule and
+#   speed-up conversion by 22%;
+# - support for 192- and 256-bit keys was added;
+#
+# Resulting performance in CPU cycles spent to encrypt one byte out
+# of 4096-byte buffer with 128-bit key is:
+#
+#		Emilia's	this(*)		difference
+#
+# Core 2    	9.30		8.69		+7%
+# Nehalem(**) 	7.63		6.98		+9%
+# Atom	    	17.1		17.4		-2%(***)
+#
+# (*)	Comparison is not completely fair, because "this" is ECB,
+#	i.e. no extra processing such as counter values calculation
+#	and xor-ing input as in Emilia's CTR implementation is
+#	performed. However, the CTR calculations stand for not more
+#	than 1% of total time, so comparison is *rather* fair.
+#
+# (**)	Results were collected on Westmere, which is considered to
+#	be equivalent to Nehalem for this code.
+#
+# (***)	Slowdown on Atom is rather strange per se, because original
+#	implementation has a number of 9+-bytes instructions, which
+#	are bad for Atom front-end, and which I eliminated completely.
+#	In attempt to address deterioration sbox() was tested in FP
+#	SIMD "domain" (movaps instead of movdqa, xorps instead of
+#	pxor, etc.). While it resulted in nominal 4% improvement on
+#	Atom, it hurted Westmere by more than 2x factor.
+#
+# As for key schedule conversion subroutine. Interface to OpenSSL
+# relies on per-invocation on-the-fly conversion. This naturally
+# has impact on performance, especially for short inputs. Conversion
+# time in CPU cycles and its ratio to CPU cycles spent in 8x block
+# function is:
+#
+# 		conversion	conversion/8x block
+# Core 2	240		0.22
+# Nehalem	180		0.20
+# Atom		430		0.19
+#
+# The ratio values mean that 128-byte blocks will be processed
+# 16-18% slower, 256-byte blocks - 9-10%, 384-byte blocks - 6-7%,
+# etc. Then keep in mind that input sizes not divisible by 128 are
+# *effectively* slower, especially shortest ones, e.g. consecutive
+# 144-byte blocks are processed 44% slower than one would expect,
+# 272 - 29%, 400 - 22%, etc. Yet, despite all these "shortcomings"
+# it's still faster than ["hyper-threading-safe" code path in]
+# aes-x86_64.pl on all lengths above 64 bytes...
+#
+# October 2011.
+#
+# Add decryption procedure. Performance in CPU cycles spent to decrypt
+# one byte out of 4096-byte buffer with 128-bit key is:
+#
+# Core 2	11.0
+# Nehalem	9.16
+# Atom		20.9
+#
+# November 2011.
+#
+# Add bsaes_xts_[en|de]crypt. Less-than-80-bytes-block performance is
+# suboptimal, but XTS is meant to be used with larger blocks...
+#
+#						<appro@openssl.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+my ($inp,$out,$len,$key,$ivp)=("%rdi","%rsi","%rdx","%rcx");
+my @XMM=map("%xmm$_",(15,0..14));	# best on Atom, +10% over (0..15)
+my $ecb=0;	# suppress unreferenced ECB subroutines, spare some space...
+
+{
+my ($key,$rounds,$const)=("%rax","%r10d","%r11");
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+	pxor	@b[6], @b[5]
+	pxor	@b[1], @b[2]
+	pxor	@b[0], @b[3]
+	pxor	@b[2], @b[6]
+	pxor 	@b[0], @b[5]
+
+	pxor	@b[3], @b[6]
+	pxor	@b[7], @b[3]
+	pxor	@b[5], @b[7]
+	pxor	@b[4], @b[3]
+	pxor	@b[5], @b[4]
+	pxor	@b[1], @b[3]
+
+	pxor	@b[7], @b[2]
+	pxor	@b[5], @b[1]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	pxor	@b[6], @b[0]
+	pxor	@b[4], @b[1]
+	pxor	@b[0], @b[2]
+	pxor	@b[6], @b[4]
+	pxor	@b[1], @b[6]
+
+	pxor	@b[5], @b[1]
+	pxor	@b[3], @b[5]
+	pxor	@b[7], @b[3]
+	pxor	@b[5], @b[7]
+	pxor	@b[5], @b[2]
+
+	pxor	@b[7], @b[4]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	pxor	@b[7], @b[4]
+
+	pxor	@b[5], @b[7]
+	pxor	@b[5], @b[2]
+	pxor	@b[7], @b[3]
+	pxor	@b[3], @b[5]
+	pxor	@b[5], @b[1]
+
+	pxor	@b[1], @b[6]
+	pxor	@b[0], @b[2]
+	pxor	@b[6], @b[4]
+	pxor	@b[6], @b[0]
+	pxor	@b[4], @b[1]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	pxor	@b[5], @b[1]
+	pxor	@b[7], @b[2]
+
+	pxor	@b[1], @b[3]
+	pxor	@b[5], @b[4]
+	pxor	@b[5], @b[7]
+	pxor	@b[4], @b[3]
+	 pxor 	@b[0], @b[5]
+	pxor	@b[7], @b[3]
+	 pxor	@b[2], @b[6]
+	 pxor	@b[1], @b[2]
+	pxor	@b[3], @b[6]
+
+	pxor	@b[0], @b[3]
+	pxor	@b[6], @b[5]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	pxor 	$y1, $t0
+	pand	$x0, $t0
+	pxor	$x1, $x0
+	pand	$y0, $x1
+	pand	$y1, $x0
+	pxor	$x1, $x0
+	pxor	$t0, $x1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	pxor	$y1, $t0
+	pand	$x0, $t0
+	pxor	$x1, $x0
+	pand	$y0, $x1
+	pand	$y1, $x0
+	pxor	$x0, $x1
+	pxor	$t0, $x0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	 movdqa	$y2, $t1
+	pxor	$y1, $t0
+	 pxor 	$y3, $t1
+	pand	$x0, $t0
+	 pand	$x2, $t1
+	pxor	$x1, $x0
+	 pxor	$x3, $x2
+	pand	$y0, $x1
+	 pand	$y2, $x3
+	pand	$y1, $x0
+	 pand	$y3, $x2
+	pxor	$x0, $x1
+	 pxor	$x3, $x2
+	pxor	$t0, $x0
+	 pxor	$t1, $x3
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	movdqa	@x[0], @t[0]
+	movdqa	@x[1], @t[1]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2]);
+$code.=<<___;
+	pxor	@x[2], @t[0]
+	pxor	@x[3], @t[1]
+	pxor	@y[2], @y[0]
+	pxor	@y[3], @y[1]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	pxor	@t[0], @x[0]
+	pxor	@t[0], @x[2]
+	pxor	@t[1], @x[1]
+	pxor	@t[1], @x[3]
+
+	movdqa	@x[4], @t[0]
+	movdqa	@x[5], @t[1]
+	pxor	@x[6], @t[0]
+	pxor	@x[7], @t[1]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	pxor	@y[2], @y[0]
+	pxor	@y[3], @y[1]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[3]);
+$code.=<<___;
+	pxor	@t[0], @x[4]
+	pxor	@t[0], @x[6]
+	pxor	@t[1], @x[5]
+	pxor	@t[1], @x[7]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	movdqa	@x[4], @t[3]
+	movdqa	@x[5], @t[2]
+	movdqa	@x[1], @t[1]
+	movdqa	@x[7], @s[1]
+	movdqa	@x[0], @s[0]
+
+	pxor	@x[6], @t[3]
+	pxor	@x[7], @t[2]
+	pxor	@x[3], @t[1]
+	 movdqa	@t[3], @s[2]
+	pxor	@x[6], @s[1]
+	 movdqa	@t[2], @t[0]
+	pxor	@x[2], @s[0]
+	 movdqa	@t[3], @s[3]
+
+	por	@t[1], @t[2]
+	por	@s[0], @t[3]
+	pxor	@t[0], @s[3]
+	pand	@s[0], @s[2]
+	pxor	@t[1], @s[0]
+	pand	@t[1], @t[0]
+	pand	@s[0], @s[3]
+	movdqa	@x[3], @s[0]
+	pxor	@x[2], @s[0]
+	pand	@s[0], @s[1]
+	pxor	@s[1], @t[3]
+	pxor	@s[1], @t[2]
+	movdqa	@x[4], @s[1]
+	movdqa	@x[1], @s[0]
+	pxor	@x[5], @s[1]
+	pxor	@x[0], @s[0]
+	movdqa	@s[1], @t[1]
+	pand	@s[0], @s[1]
+	por	@s[0], @t[1]
+	pxor	@s[1], @t[0]
+	pxor	@s[3], @t[3]
+	pxor	@s[2], @t[2]
+	pxor	@s[3], @t[1]
+	movdqa	@x[7], @s[0]
+	pxor	@s[2], @t[0]
+	movdqa	@x[6], @s[1]
+	pxor	@s[2], @t[1]
+	movdqa	@x[5], @s[2]
+	pand	@x[3], @s[0]
+	movdqa	@x[4], @s[3]
+	pand	@x[2], @s[1]
+	pand	@x[1], @s[2]
+	por	@x[0], @s[3]
+	pxor	@s[0], @t[3]
+	pxor	@s[1], @t[2]
+	pxor	@s[2], @t[1]
+	pxor	@s[3], @t[0] 
+
+	#Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	# new smaller inversion
+
+	movdqa	@t[3], @s[0]
+	pand	@t[1], @t[3]
+	pxor	@t[2], @s[0]
+
+	movdqa	@t[0], @s[2]
+	movdqa	@s[0], @s[3]
+	pxor	@t[3], @s[2]
+	pand	@s[2], @s[3]
+
+	movdqa	@t[1], @s[1]
+	pxor	@t[2], @s[3]
+	pxor	@t[0], @s[1]
+
+	pxor	@t[2], @t[3]
+
+	pand	@t[3], @s[1]
+
+	movdqa	@s[2], @t[2]
+	pxor	@t[0], @s[1]
+
+	pxor	@s[1], @t[2]
+	pxor	@s[1], @t[1]
+
+	pand	@t[0], @t[2]
+
+	pxor	@t[2], @s[2]
+	pxor	@t[2], @t[1]
+
+	pand	@s[3], @s[2]
+
+	pxor	@s[0], @s[2]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my $mask=pop;
+$code.=<<___;
+	pxor	0x00($key),@x[0]
+	pxor	0x10($key),@x[1]
+	pshufb	$mask,@x[0]
+	pxor	0x20($key),@x[2]
+	pshufb	$mask,@x[1]
+	pxor	0x30($key),@x[3]
+	pshufb	$mask,@x[2]
+	pxor	0x40($key),@x[4]
+	pshufb	$mask,@x[3]
+	pxor	0x50($key),@x[5]
+	pshufb	$mask,@x[4]
+	pxor	0x60($key),@x[6]
+	pshufb	$mask,@x[5]
+	pxor	0x70($key),@x[7]
+	pshufb	$mask,@x[6]
+	lea	0x80($key),$key
+	pshufb	$mask,@x[7]
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+	pshufd	\$0x93, @x[0], @t[0]	# x0 <<< 32
+	pshufd	\$0x93, @x[1], @t[1]
+	 pxor	@t[0], @x[0]		# x0 ^ (x0 <<< 32)
+	pshufd	\$0x93, @x[2], @t[2]
+	 pxor	@t[1], @x[1]
+	pshufd	\$0x93, @x[3], @t[3]
+	 pxor	@t[2], @x[2]
+	pshufd	\$0x93, @x[4], @t[4]
+	 pxor	@t[3], @x[3]
+	pshufd	\$0x93, @x[5], @t[5]
+	 pxor	@t[4], @x[4]
+	pshufd	\$0x93, @x[6], @t[6]
+	 pxor	@t[5], @x[5]
+	pshufd	\$0x93, @x[7], @t[7]
+	 pxor	@t[6], @x[6]
+	 pxor	@t[7], @x[7]
+
+	pxor	@x[0], @t[1]
+	pxor	@x[7], @t[0]
+	pxor	@x[7], @t[1]
+	 pshufd	\$0x4E, @x[0], @x[0] 	# (x0 ^ (x0 <<< 32)) <<< 64)
+	pxor	@x[1], @t[2]
+	 pshufd	\$0x4E, @x[1], @x[1]
+	pxor	@x[4], @t[5]
+	 pxor	@t[0], @x[0]
+	pxor	@x[5], @t[6]
+	 pxor	@t[1], @x[1]
+	pxor	@x[3], @t[4]
+	 pshufd	\$0x4E, @x[4], @t[0]
+	pxor	@x[6], @t[7]
+	 pshufd	\$0x4E, @x[5], @t[1]
+	pxor	@x[2], @t[3]
+	 pshufd	\$0x4E, @x[3], @x[4]
+	pxor	@x[7], @t[3]
+	 pshufd	\$0x4E, @x[7], @x[5]
+	pxor	@x[7], @t[4]
+	 pshufd	\$0x4E, @x[6], @x[3]
+	pxor	@t[4], @t[0]
+	 pshufd	\$0x4E, @x[2], @x[6]
+	pxor	@t[5], @t[1]
+
+	pxor	@t[3], @x[4]
+	pxor	@t[7], @x[5]
+	pxor	@t[6], @x[3]
+	 movdqa	@t[0], @x[2]
+	pxor	@t[2], @x[6]
+	 movdqa	@t[1], @x[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	# multiplication by 0x0e
+	pshufd	\$0x93, @x[7], @t[7]
+	movdqa	@x[2], @t[2]
+	pxor	@x[5], @x[7]		# 7 5
+	pxor	@x[5], @x[2]		# 2 5
+	pshufd	\$0x93, @x[0], @t[0]
+	movdqa	@x[5], @t[5]
+	pxor	@x[0], @x[5]		# 5 0		[1]
+	pxor	@x[1], @x[0]		# 0 1
+	pshufd	\$0x93, @x[1], @t[1]
+	pxor	@x[2], @x[1]		# 1 25
+	pxor	@x[6], @x[0]		# 01 6		[2]
+	pxor	@x[3], @x[1]		# 125 3		[4]
+	pshufd	\$0x93, @x[3], @t[3]
+	pxor	@x[0], @x[2]		# 25 016	[3]
+	pxor	@x[7], @x[3]		# 3 75
+	pxor	@x[6], @x[7]		# 75 6		[0]
+	pshufd	\$0x93, @x[6], @t[6]
+	movdqa	@x[4], @t[4]
+	pxor	@x[4], @x[6]		# 6 4
+	pxor	@x[3], @x[4]		# 4 375		[6]
+	pxor	@x[7], @x[3]		# 375 756=36
+	pxor	@t[5], @x[6]		# 64 5		[7]
+	pxor	@t[2], @x[3]		# 36 2
+	pxor	@t[4], @x[3]		# 362 4		[5]
+	pshufd	\$0x93, @t[5], @t[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	# multiplication by 0x0b
+	pxor	@y[0], @y[1]
+	pxor	@t[0], @y[0]
+	pxor	@t[1], @y[1]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[5], @y[0]
+	pxor	@t[6], @y[1]
+	pxor	@t[7], @y[0]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[6], @t[7]		# clobber t[7]
+	pxor	@y[0], @y[1]
+
+	pxor	@t[0], @y[3]
+	pshufd	\$0x93, @t[0], @t[0]
+	pxor	@t[1], @y[2]
+	pxor	@t[1], @y[4]
+	pxor	@t[2], @y[2]
+	pshufd	\$0x93, @t[1], @t[1]
+	pxor	@t[2], @y[3]
+	pxor	@t[2], @y[5]
+	pxor	@t[7], @y[2]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[3], @y[3]
+	pxor	@t[3], @y[6]
+	pxor	@t[3], @y[4]
+	pshufd	\$0x93, @t[3], @t[3]
+	pxor	@t[4], @y[7]
+	pxor	@t[4], @y[5]
+	pxor	@t[7], @y[7]
+	pxor	@t[5], @y[3]
+	pxor	@t[4], @y[4]
+	pxor	@t[5], @t[7]		# clobber t[7] even more
+
+	pxor	@t[7], @y[5]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[7], @y[6]
+	pxor	@t[7], @y[4]
+
+	pxor	@t[5], @t[7]
+	pshufd	\$0x93, @t[5], @t[5]
+	pxor	@t[6], @t[7]		# restore t[7]
+
+	# multiplication by 0x0d
+	pxor	@y[7], @y[4]
+	pxor	@t[4], @y[7]
+	pshufd	\$0x93, @t[6], @t[6]
+	pxor	@t[0], @y[2]
+	pxor	@t[5], @y[7]
+	pxor	@t[2], @y[2]
+	pshufd	\$0x93, @t[7], @t[7]
+
+	pxor	@y[1], @y[3]
+	pxor	@t[1], @y[1]
+	pxor	@t[0], @y[0]
+	pxor	@t[0], @y[3]
+	pxor	@t[5], @y[1]
+	pxor	@t[5], @y[0]
+	pxor	@t[7], @y[1]
+	pshufd	\$0x93, @t[0], @t[0]
+	pxor	@t[6], @y[0]
+	pxor	@y[1], @y[3]
+	pxor	@t[1], @y[4]
+	pshufd	\$0x93, @t[1], @t[1]
+
+	pxor	@t[7], @y[7]
+	pxor	@t[2], @y[4]
+	pxor	@t[2], @y[5]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[6], @y[2]
+	pxor	@t[3], @t[6]		# clobber t[6]
+	pxor	@y[7], @y[4]
+	pxor	@t[6], @y[3]
+
+	pxor	@t[6], @y[6]
+	pxor	@t[5], @y[5]
+	pxor	@t[4], @y[6]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[6], @y[5]
+	pxor	@t[7], @y[6]
+	pxor	@t[3], @t[6]		# restore t[6]
+
+	pshufd	\$0x93, @t[5], @t[5]
+	pshufd	\$0x93, @t[6], @t[6]
+	pshufd	\$0x93, @t[7], @t[7]
+	pshufd	\$0x93, @t[3], @t[3]
+
+	# multiplication by 0x09
+	pxor	@y[1], @y[4]
+	pxor	@y[1], @t[1]		# t[1]=y[1]
+	pxor	@t[5], @t[0]		# clobber t[0]
+	pxor	@t[5], @t[1]
+	pxor	@t[0], @y[3]
+	pxor	@y[0], @t[0]		# t[0]=y[0]
+	pxor	@t[6], @t[1]
+	pxor	@t[7], @t[6]		# clobber t[6]
+	pxor	@t[1], @y[4]
+	pxor	@t[4], @y[7]
+	pxor	@y[4], @t[4]		# t[4]=y[4]
+	pxor	@t[3], @y[6]
+	pxor	@y[3], @t[3]		# t[3]=y[3]
+	pxor	@t[2], @y[5]
+	pxor	@y[2], @t[2]		# t[2]=y[2]
+	pxor	@t[7], @t[3]
+	pxor	@y[5], @t[5]		# t[5]=y[5]
+	pxor	@t[6], @t[2]
+	pxor	@t[6], @t[5]
+	pxor	@y[6], @t[6]		# t[6]=y[6]
+	pxor	@y[7], @t[7]		# t[7]=y[7]
+
+	movdqa	@t[0],@XMM[0]
+	movdqa	@t[1],@XMM[1]
+	movdqa	@t[2],@XMM[2]
+	movdqa	@t[3],@XMM[3]
+	movdqa	@t[4],@XMM[4]
+	movdqa	@t[5],@XMM[5]
+	movdqa	@t[6],@XMM[6]
+	movdqa	@t[7],@XMM[7]
+___
+}
+
+sub aesenc {				# not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+	movdqa	0x30($const),@t[0]	# .LSR
+___
+	&ShiftRows	(@b,@t[0]);
+	&Sbox		(@b,@t);
+	&MixColumns	(@b[0,1,4,6,3,7,2,5],@t);
+}
+
+sub aesenclast {			# not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+	movdqa	0x40($const),@t[0]	# .LSRM0
+___
+	&ShiftRows	(@b,@t[0]);
+	&Sbox		(@b,@t);
+$code.=<<___
+	pxor	0x00($key),@b[0]
+	pxor	0x10($key),@b[1]
+	pxor	0x20($key),@b[4]
+	pxor	0x30($key),@b[6]
+	pxor	0x40($key),@b[3]
+	pxor	0x50($key),@b[7]
+	pxor	0x60($key),@b[2]
+	pxor	0x70($key),@b[5]
+___
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	movdqa	$b,$t
+	psrlq	\$$n,$b
+	pxor  	$a,$b
+	pand	$mask,$b
+	pxor	$b,$a
+	psllq	\$$n,$b
+	pxor	$t,$b
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	movdqa	$b0,$t0
+	psrlq	\$$n,$b0
+	 movdqa	$b1,$t1
+	 psrlq	\$$n,$b1
+	pxor  	$a0,$b0
+	 pxor  	$a1,$b1
+	pand	$mask,$b0
+	 pand	$mask,$b1
+	pxor	$b0,$a0
+	psllq	\$$n,$b0
+	 pxor	$b1,$a1
+	 psllq	\$$n,$b1
+	pxor	$t0,$b0
+	 pxor	$t1,$b1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	movdqa	0x00($const),$t0	# .LBS0
+	movdqa	0x10($const),$t1	# .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	movdqa	0x20($const),$t0	# .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+.text
+
+.extern	asm_AES_encrypt
+.extern	asm_AES_decrypt
+
+.type	_bsaes_encrypt8,\@abi-omnipotent
+.align	64
+_bsaes_encrypt8:
+	lea	.LBS0(%rip), $const	# constants table
+
+	movdqa	($key), @XMM[9]		# round 0 key
+	lea	0x10($key), $key
+	movdqa	0x50($const), @XMM[8]	# .LM0SR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	 pshufb	@XMM[8], @XMM[0]
+	pxor	@XMM[9], @XMM[2]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[2]
+	pxor	@XMM[9], @XMM[4]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[4]
+	pxor	@XMM[9], @XMM[6]
+	 pshufb	@XMM[8], @XMM[5]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[6]
+	 pshufb	@XMM[8], @XMM[7]
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	dec	$rounds
+	jmp	.Lenc_sbox
+.align	16
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	dec	$rounds
+	jl	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	movdqa	0x30($const), @XMM[8]	# .LSR
+	jnz	.Lenc_loop
+	movdqa	0x40($const), @XMM[8]	# .LSRM0
+	jmp	.Lenc_loop
+.align	16
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	movdqa	($key), @XMM[8]		# last round key
+	pxor	@XMM[8], @XMM[4]
+	pxor	@XMM[8], @XMM[6]
+	pxor	@XMM[8], @XMM[3]
+	pxor	@XMM[8], @XMM[7]
+	pxor	@XMM[8], @XMM[2]
+	pxor	@XMM[8], @XMM[5]
+	pxor	@XMM[8], @XMM[0]
+	pxor	@XMM[8], @XMM[1]
+	ret
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+
+.type	_bsaes_decrypt8,\@abi-omnipotent
+.align	64
+_bsaes_decrypt8:
+	lea	.LBS0(%rip), $const	# constants table
+
+	movdqa	($key), @XMM[9]		# round 0 key
+	lea	0x10($key), $key
+	movdqa	-0x30($const), @XMM[8]	# .LM0ISR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	 pshufb	@XMM[8], @XMM[0]
+	pxor	@XMM[9], @XMM[2]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[2]
+	pxor	@XMM[9], @XMM[4]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[4]
+	pxor	@XMM[9], @XMM[6]
+	 pshufb	@XMM[8], @XMM[5]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[6]
+	 pshufb	@XMM[8], @XMM[7]
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	dec	$rounds
+	jmp	.Ldec_sbox
+.align	16
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	dec	$rounds
+	jl	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	movdqa	-0x10($const), @XMM[8]	# .LISR
+	jnz	.Ldec_loop
+	movdqa	-0x20($const), @XMM[8]	# .LISRM0
+	jmp	.Ldec_loop
+.align	16
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	movdqa	($key), @XMM[8]		# last round key
+	pxor	@XMM[8], @XMM[6]
+	pxor	@XMM[8], @XMM[4]
+	pxor	@XMM[8], @XMM[2]
+	pxor	@XMM[8], @XMM[7]
+	pxor	@XMM[8], @XMM[3]
+	pxor	@XMM[8], @XMM[5]
+	pxor	@XMM[8], @XMM[0]
+	pxor	@XMM[8], @XMM[1]
+	ret
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("%rax","%rcx","%r10d","%r11");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	#&swapmove(@x[2,3],1,$t0,$t2,$t3);
+	movdqa	@x[0], @x[2]
+	movdqa	@x[1], @x[3]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	#&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	movdqa	@x[0], @x[4]
+	movdqa	@x[2], @x[6]
+	movdqa	@x[1], @x[5]
+	movdqa	@x[3], @x[7]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,\@abi-omnipotent
+.align	16
+_bsaes_key_convert:
+	lea	.Lmasks(%rip), $const
+	movdqu	($inp), %xmm7		# load round 0 key
+	lea	0x10($inp), $inp
+	movdqa	0x00($const), %xmm0	# 0x01...
+	movdqa	0x10($const), %xmm1	# 0x02...
+	movdqa	0x20($const), %xmm2	# 0x04...
+	movdqa	0x30($const), %xmm3	# 0x08...
+	movdqa	0x40($const), %xmm4	# .LM0
+	pcmpeqd	%xmm5, %xmm5		# .LNOT
+
+	movdqu	($inp), %xmm6		# load round 1 key
+	movdqa	%xmm7, ($out)		# save round 0 key
+	lea	0x10($out), $out
+	dec	$rounds
+	jmp	.Lkey_loop
+.align	16
+.Lkey_loop:
+	pshufb	%xmm4, %xmm6		# .LM0
+
+	movdqa	%xmm0,	%xmm8
+	movdqa	%xmm1,	%xmm9
+
+	pand	%xmm6,	%xmm8
+	pand	%xmm6,	%xmm9
+	movdqa	%xmm2,	%xmm10
+	pcmpeqb	%xmm0,	%xmm8
+	psllq	\$4,	%xmm0		# 0x10...
+	movdqa	%xmm3,	%xmm11
+	pcmpeqb	%xmm1,	%xmm9
+	psllq	\$4,	%xmm1		# 0x20...
+
+	pand	%xmm6,	%xmm10
+	pand	%xmm6,	%xmm11
+	movdqa	%xmm0,	%xmm12
+	pcmpeqb	%xmm2,	%xmm10
+	psllq	\$4,	%xmm2		# 0x40...
+	movdqa	%xmm1,	%xmm13
+	pcmpeqb	%xmm3,	%xmm11
+	psllq	\$4,	%xmm3		# 0x80...
+
+	movdqa	%xmm2,	%xmm14
+	movdqa	%xmm3,	%xmm15
+	 pxor	%xmm5,	%xmm8		# "pnot"
+	 pxor	%xmm5,	%xmm9
+
+	pand	%xmm6,	%xmm12
+	pand	%xmm6,	%xmm13
+	 movdqa	%xmm8, 0x00($out)	# write bit-sliced round key
+	pcmpeqb	%xmm0,	%xmm12
+	psrlq	\$4,	%xmm0		# 0x01...
+	 movdqa	%xmm9, 0x10($out)
+	pcmpeqb	%xmm1,	%xmm13
+	psrlq	\$4,	%xmm1		# 0x02...
+	 lea	0x10($inp), $inp
+
+	pand	%xmm6,	%xmm14
+	pand	%xmm6,	%xmm15
+	 movdqa	%xmm10, 0x20($out)
+	pcmpeqb	%xmm2,	%xmm14
+	psrlq	\$4,	%xmm2		# 0x04...
+	 movdqa	%xmm11, 0x30($out)
+	pcmpeqb	%xmm3,	%xmm15
+	psrlq	\$4,	%xmm3		# 0x08...
+	 movdqu	($inp), %xmm6		# load next round key
+
+	pxor	%xmm5, %xmm13		# "pnot"
+	pxor	%xmm5, %xmm14
+	movdqa	%xmm12, 0x40($out)
+	movdqa	%xmm13, 0x50($out)
+	movdqa	%xmm14, 0x60($out)
+	movdqa	%xmm15, 0x70($out)
+	lea	0x80($out),$out
+	dec	$rounds
+	jnz	.Lkey_loop
+
+	movdqa	0x50($const), %xmm7	# .L63
+	#movdqa	%xmm6, ($out)		# don't save last round key
+	ret
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0 && !$win64) {	# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,\@function,2
+.align	16
+bsaes_enc_key_convert:
+	mov	240($inp),%r10d		# pass rounds
+	mov	$inp,%rcx		# pass key
+	mov	$out,%rax		# pass key schedule
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+	ret
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,\@function,4
+.align	16
+bsaes_encrypt_128:
+.Lenc128_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	movdqu	0x60($inp), @XMM[6]
+	movdqu	0x70($inp), @XMM[7]
+	mov	$key, %rax		# pass the $key
+	lea	0x80($inp), $inp
+	mov	\$10,%r10d
+
+	call	_bsaes_encrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$0x80,$len
+	ja	.Lenc128_loop
+	ret
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,\@function,2
+.align	16
+bsaes_dec_key_convert:
+	mov	240($inp),%r10d		# pass rounds
+	mov	$inp,%rcx		# pass key
+	mov	$out,%rax		# pass key schedule
+	call	_bsaes_key_convert
+	pxor	($out),%xmm7		# fix up round 0 key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,($out)
+	ret
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,\@function,4
+.align	16
+bsaes_decrypt_128:
+.Ldec128_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	movdqu	0x60($inp), @XMM[6]
+	movdqu	0x70($inp), @XMM[7]
+	mov	$key, %rax		# pass the $key
+	lea	0x80($inp), $inp
+	mov	\$10,%r10d
+
+	call	_bsaes_decrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$0x80,$len
+	ja	.Ldec128_loop
+	ret
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+######################################################################
+#
+# OpenSSL interface
+#
+my ($arg1,$arg2,$arg3,$arg4,$arg5,$arg6)=$win64	? ("%rcx","%rdx","%r8","%r9","%r10","%r11d")
+						: ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+my ($inp,$out,$len,$key)=("%r12","%r13","%r14","%r15");
+
+if ($ecb) {
+$code.=<<___;
+.globl	bsaes_ecb_encrypt_blocks
+.type	bsaes_ecb_encrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ecb_encrypt_blocks:
+	mov	%rsp, %rax
+.Lecb_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lecb_enc_body:
+___
+$code.=<<___;
+	mov	%rsp,%rbp		# backup %rsp
+	mov	240($arg4),%eax		# rounds
+	mov	$arg1,$inp		# backup arguments
+	mov	$arg2,$out
+	mov	$arg3,$len
+	mov	$arg4,$key
+	cmp	\$8,$arg3
+	jb	.Lecb_enc_short
+
+	mov	%eax,%ebx		# backup rounds
+	shl	\$7,%rax		# 128 bytes per inner round key
+	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
+	sub	%rax,%rsp
+	mov	%rsp,%rax		# pass key schedule
+	mov	$key,%rcx		# pass key
+	mov	%ebx,%r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+
+	sub	\$8,$len
+.Lecb_enc_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%ebx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	lea	0x80($inp), $inp
+
+	call	_bsaes_encrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lecb_enc_loop
+
+	add	\$8,$len
+	jz	.Lecb_enc_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%ebx,%r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lecb_enc_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lecb_enc_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lecb_enc_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lecb_enc_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lecb_enc_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lecb_enc_six
+	movdqu	0x60($inp), @XMM[6]
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_six:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_five:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_four:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_three:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_two:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_one:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_short:
+	lea	($inp), $arg1
+	lea	($out), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt
+	lea	16($inp), $inp
+	lea	16($out), $out
+	dec	$len
+	jnz	.Lecb_enc_short
+
+.Lecb_enc_done:
+	lea	(%rsp),%rax
+	pxor	%xmm0, %xmm0
+.Lecb_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	jb	.Lecb_enc_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lecb_enc_epilogue:
+	ret
+.size	bsaes_ecb_encrypt_blocks,.-bsaes_ecb_encrypt_blocks
+
+.globl	bsaes_ecb_decrypt_blocks
+.type	bsaes_ecb_decrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ecb_decrypt_blocks:
+	mov	%rsp, %rax
+.Lecb_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lecb_dec_body:
+___
+$code.=<<___;
+	mov	%rsp,%rbp		# backup %rsp
+	mov	240($arg4),%eax		# rounds
+	mov	$arg1,$inp		# backup arguments
+	mov	$arg2,$out
+	mov	$arg3,$len
+	mov	$arg4,$key
+	cmp	\$8,$arg3
+	jb	.Lecb_dec_short
+
+	mov	%eax,%ebx		# backup rounds
+	shl	\$7,%rax		# 128 bytes per inner round key
+	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
+	sub	%rax,%rsp
+	mov	%rsp,%rax		# pass key schedule
+	mov	$key,%rcx		# pass key
+	mov	%ebx,%r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp),%xmm7		# fix up 0 round key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,(%rsp)
+
+	sub	\$8,$len
+.Lecb_dec_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%ebx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	lea	0x80($inp), $inp
+
+	call	_bsaes_decrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lecb_dec_loop
+
+	add	\$8,$len
+	jz	.Lecb_dec_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%ebx,%r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lecb_dec_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lecb_dec_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lecb_dec_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lecb_dec_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lecb_dec_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lecb_dec_six
+	movdqu	0x60($inp), @XMM[6]
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_six:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_five:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_four:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_three:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_two:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_one:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_short:
+	lea	($inp), $arg1
+	lea	($out), $arg2
+	lea	($key), $arg3
+	call	asm_AES_decrypt
+	lea	16($inp), $inp
+	lea	16($out), $out
+	dec	$len
+	jnz	.Lecb_dec_short
+
+.Lecb_dec_done:
+	lea	(%rsp),%rax
+	pxor	%xmm0, %xmm0
+.Lecb_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	jb	.Lecb_dec_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lecb_dec_epilogue:
+	ret
+.size	bsaes_ecb_decrypt_blocks,.-bsaes_ecb_decrypt_blocks
+___
+}
+$code.=<<___;
+.extern	asm_AES_cbc_encrypt
+.globl	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,\@abi-omnipotent
+.align	16
+bsaes_cbc_encrypt:
+___
+$code.=<<___ if ($win64);
+	mov	48(%rsp),$arg6		# pull direction flag
+___
+$code.=<<___;
+	cmp	\$0,$arg6
+	jne	asm_AES_cbc_encrypt
+	cmp	\$128,$arg3
+	jb	asm_AES_cbc_encrypt
+
+	mov	%rsp, %rax
+.Lcbc_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lcbc_dec_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	240($arg4), %eax	# rounds
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+	mov	$arg5, %rbx
+	shr	\$4, $len		# bytes to blocks
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp),%xmm7		# fix up 0 round key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,(%rsp)
+
+	movdqu	(%rbx), @XMM[15]	# load IV
+	sub	\$8,$len
+.Lcbc_dec_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%edx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+
+	call	_bsaes_decrypt8
+
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[12], @XMM[7]
+	movdqu	0x60($inp), @XMM[14]
+	pxor	@XMM[13], @XMM[3]
+	movdqu	0x70($inp), @XMM[15]	# IV
+	pxor	@XMM[14], @XMM[5]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x80($inp), $inp
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lcbc_dec_loop
+
+	add	\$8,$len
+	jz	.Lcbc_dec_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lcbc_dec_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lcbc_dec_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lcbc_dec_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lcbc_dec_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lcbc_dec_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lcbc_dec_six
+	movdqu	0x60($inp), @XMM[6]
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[12], @XMM[7]
+	movdqu	0x60($inp), @XMM[15]	# IV
+	pxor	@XMM[13], @XMM[3]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_six:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[15]	# IV
+	pxor	@XMM[12], @XMM[7]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_five:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[15]	# IV
+	pxor	@XMM[11], @XMM[2]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_four:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[15]	# IV
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_three:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[15]	# IV
+	pxor	@XMM[9], @XMM[6]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_two:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[15]	# IV
+	pxor	@XMM[8], @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_one:
+	lea	($inp), $arg1
+	lea	0x20(%rbp), $arg2	# buffer output
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[15]	# ^= IV
+	movdqu	@XMM[15], ($out)	# write output
+	movdqa	@XMM[0], @XMM[15]	# IV
+
+.Lcbc_dec_done:
+	movdqu	@XMM[15], (%rbx)	# return IV
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lcbc_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lcbc_dec_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lcbc_dec_epilogue:
+	ret
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+
+.globl	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ctr32_encrypt_blocks:
+	mov	%rsp, %rax
+.Lctr_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lctr_enc_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	movdqu	($arg5), %xmm0		# load counter
+	mov	240($arg4), %eax	# rounds
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+	movdqa	%xmm0, 0x20(%rbp)	# copy counter
+	cmp	\$8, $arg3
+	jb	.Lctr_enc_short
+
+	mov	%eax, %ebx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%ebx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+
+	movdqa	(%rsp), @XMM[9]		# load round0 key
+	lea	.LADD1(%rip), %r11
+	movdqa	0x20(%rbp), @XMM[0]	# counter copy
+	movdqa	-0x20(%r11), @XMM[8]	# .LSWPUP
+	pshufb	@XMM[8], @XMM[9]	# byte swap upper part
+	pshufb	@XMM[8], @XMM[0]
+	movdqa	@XMM[9], (%rsp)		# save adjusted round0 key
+	jmp	.Lctr_enc_loop
+.align	16
+.Lctr_enc_loop:
+	movdqa	@XMM[0], 0x20(%rbp)	# save counter
+	movdqa	@XMM[0], @XMM[1]	# prepare 8 counter values
+	movdqa	@XMM[0], @XMM[2]
+	paddd	0x00(%r11), @XMM[1]	# .LADD1
+	movdqa	@XMM[0], @XMM[3]
+	paddd	0x10(%r11), @XMM[2]	# .LADD2
+	movdqa	@XMM[0], @XMM[4]
+	paddd	0x20(%r11), @XMM[3]	# .LADD3
+	movdqa	@XMM[0], @XMM[5]
+	paddd	0x30(%r11), @XMM[4]	# .LADD4
+	movdqa	@XMM[0], @XMM[6]
+	paddd	0x40(%r11), @XMM[5]	# .LADD5
+	movdqa	@XMM[0], @XMM[7]
+	paddd	0x50(%r11), @XMM[6]	# .LADD6
+	paddd	0x60(%r11), @XMM[7]	# .LADD7
+
+	# Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	# to flip byte order in 32-bit counter
+	movdqa	(%rsp), @XMM[9]		# round 0 key
+	lea	0x10(%rsp), %rax	# pass key schedule
+	movdqa	-0x10(%r11), @XMM[8]	# .LSWPUPM0SR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	 pshufb	@XMM[8], @XMM[0]
+	pxor	@XMM[9], @XMM[2]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[2]
+	pxor	@XMM[9], @XMM[4]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[4]
+	pxor	@XMM[9], @XMM[6]
+	 pshufb	@XMM[8], @XMM[5]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[6]
+	lea	.LBS0(%rip), %r11	# constants table
+	 pshufb	@XMM[8], @XMM[7]
+	mov	%ebx,%r10d		# pass rounds
+
+	call	_bsaes_encrypt8_bitslice
+
+	sub	\$8,$len
+	jc	.Lctr_enc_loop_done
+
+	movdqu	0x00($inp), @XMM[8]	# load input
+	movdqu	0x10($inp), @XMM[9]
+	movdqu	0x20($inp), @XMM[10]
+	movdqu	0x30($inp), @XMM[11]
+	movdqu	0x40($inp), @XMM[12]
+	movdqu	0x50($inp), @XMM[13]
+	movdqu	0x60($inp), @XMM[14]
+	movdqu	0x70($inp), @XMM[15]
+	lea	0x80($inp),$inp
+	pxor	@XMM[0], @XMM[8]
+	movdqa	0x20(%rbp), @XMM[0]	# load counter
+	pxor	@XMM[9], @XMM[1]
+	movdqu	@XMM[8], 0x00($out)	# write output
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	@XMM[11], @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	@XMM[12], @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	@XMM[13], @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	@XMM[14], @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	@XMM[15], @XMM[5]
+	movdqu	@XMM[2], 0x60($out)
+	lea	.LADD1(%rip), %r11
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	paddd	0x70(%r11), @XMM[0]	# .LADD8
+	jnz	.Lctr_enc_loop
+
+	jmp	.Lctr_enc_done
+.align	16
+.Lctr_enc_loop_done:
+	add	\$8, $len
+	movdqu	0x00($inp), @XMM[8]	# load input
+	pxor	@XMM[8], @XMM[0]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	cmp	\$2,$len
+	jb	.Lctr_enc_done
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[9], @XMM[1]
+	movdqu	@XMM[1], 0x10($out)
+	je	.Lctr_enc_done
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[4], 0x20($out)
+	cmp	\$4,$len
+	jb	.Lctr_enc_done
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[11], @XMM[6]
+	movdqu	@XMM[6], 0x30($out)
+	je	.Lctr_enc_done
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[12], @XMM[3]
+	movdqu	@XMM[3], 0x40($out)
+	cmp	\$6,$len
+	jb	.Lctr_enc_done
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[13], @XMM[7]
+	movdqu	@XMM[7], 0x50($out)
+	je	.Lctr_enc_done
+	movdqu	0x60($inp), @XMM[14]
+	pxor	@XMM[14], @XMM[2]
+	movdqu	@XMM[2], 0x60($out)
+	jmp	.Lctr_enc_done
+
+.align	16
+.Lctr_enc_short:
+	lea	0x20(%rbp), $arg1
+	lea	0x30(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt
+	movdqu	($inp), @XMM[1]
+	lea	16($inp), $inp
+	mov	0x2c(%rbp), %eax	# load 32-bit counter
+	bswap	%eax
+	pxor	0x30(%rbp), @XMM[1]
+	inc	%eax			# increment
+	movdqu	@XMM[1], ($out)
+	bswap	%eax
+	lea	16($out), $out
+	mov	%eax, 0x2c(%rsp)	# save 32-bit counter
+	dec	$len
+	jnz	.Lctr_enc_short
+
+.Lctr_enc_done:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lctr_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lctr_enc_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lctr_enc_epilogue:
+	ret
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($twmask,$twres,$twtmp)=@XMM[13..15];
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,\@abi-omnipotent
+.align	16
+bsaes_xts_encrypt:
+	mov	%rsp, %rax
+.Lxts_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull key2
+	mov	0xa8(%rsp),$arg6	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+
+	lea	($arg6), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($arg5), $arg3
+	call	asm_AES_encrypt		# generate initial tweak
+
+	mov	240($key), %eax		# rounds
+	mov	$len, %rbx		# backup $len
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6, %xmm7		# fix up last round key
+	movdqa	%xmm7, (%rax)		# save last round key
+
+	and	\$-16, $len
+	sub	\$0x80, %rsp		# place for tweak[8]
+	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+
+	sub	\$0x80, $len
+	jc	.Lxts_enc_short
+	jmp	.Lxts_enc_loop
+
+.align	16
+.Lxts_enc_loop:
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqu	0x70($inp), @XMM[8+7]
+	lea	0x80($inp), $inp
+	movdqa	@XMM[7], 0x70(%rsp)
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	pxor	@XMM[8+7], @XMM[7]
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	0x60(%rsp), @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	0x70(%rsp), @XMM[5]
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+
+	sub	\$0x80,$len
+	jnc	.Lxts_enc_loop
+
+.Lxts_enc_short:
+	add	\$0x80, $len
+	jz	.Lxts_enc_done
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+	cmp	\$`0x10*$i`,$len
+	je	.Lxts_enc_$i
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqa	@XMM[7], 0x70(%rsp)
+	lea	0x70($inp), $inp
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	0x60(%rsp), @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	lea	0x70($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_6:
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x60($inp), $inp
+	pxor	@XMM[8+5], @XMM[5]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	lea	0x60($out), $out
+
+	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_5:
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x50($inp), $inp
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	lea	0x50($out), $out
+
+	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_4:
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x40($inp), $inp
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	lea	0x40($out), $out
+
+	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_3:
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x30($inp), $inp
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	lea	0x30($out), $out
+
+	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_2:
+	pxor	@XMM[8+0], @XMM[0]
+	lea	0x20($inp), $inp
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	lea	0x20($out), $out
+
+	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_1:
+	pxor	@XMM[0], @XMM[8]
+	lea	0x10($inp), $inp
+	movdqa	@XMM[8], 0x20(%rbp)
+	lea	0x20(%rbp), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
+	#pxor	@XMM[8], @XMM[0]
+	#lea	0x80(%rsp), %rax	# pass key schedule
+	#mov	%edx, %r10d		# pass rounds
+	#call	_bsaes_encrypt8
+	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x10($out), $out
+
+	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
+
+.Lxts_enc_done:
+	and	\$15, %ebx
+	jz	.Lxts_enc_ret
+	mov	$out, %rdx
+
+.Lxts_enc_steal:
+	movzb	($inp), %eax
+	movzb	-16(%rdx), %ecx
+	lea	1($inp), $inp
+	mov	%al, -16(%rdx)
+	mov	%cl, 0(%rdx)
+	lea	1(%rdx), %rdx
+	sub	\$1,%ebx
+	jnz	.Lxts_enc_steal
+
+	movdqu	-16($out), @XMM[0]
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[7], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_encrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[7]
+	movdqu	@XMM[7], -16($out)
+
+.Lxts_enc_ret:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lxts_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lxts_enc_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lxts_enc_epilogue:
+	ret
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,\@abi-omnipotent
+.align	16
+bsaes_xts_decrypt:
+	mov	%rsp, %rax
+.Lxts_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull key2
+	mov	0xa8(%rsp),$arg6	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+
+	lea	($arg6), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($arg5), $arg3
+	call	asm_AES_encrypt		# generate initial tweak
+
+	mov	240($key), %eax		# rounds
+	mov	$len, %rbx		# backup $len
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp), %xmm7		# fix up round 0 key
+	movdqa	%xmm6, (%rax)		# save last round key
+	movdqa	%xmm7, (%rsp)
+
+	xor	%eax, %eax		# if ($len%16) len-=16;
+	and	\$-16, $len
+	test	\$15, %ebx
+	setnz	%al
+	shl	\$4, %rax
+	sub	%rax, $len
+
+	sub	\$0x80, %rsp		# place for tweak[8]
+	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+
+	sub	\$0x80, $len
+	jc	.Lxts_dec_short
+	jmp	.Lxts_dec_loop
+
+.align	16
+.Lxts_dec_loop:
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqu	0x70($inp), @XMM[8+7]
+	lea	0x80($inp), $inp
+	movdqa	@XMM[7], 0x70(%rsp)
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	pxor	@XMM[8+7], @XMM[7]
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	pxor	0x60(%rsp), @XMM[3]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	0x70(%rsp), @XMM[5]
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+
+	sub	\$0x80,$len
+	jnc	.Lxts_dec_loop
+
+.Lxts_dec_short:
+	add	\$0x80, $len
+	jz	.Lxts_dec_done
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+	cmp	\$`0x10*$i`,$len
+	je	.Lxts_dec_$i
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqa	@XMM[7], 0x70(%rsp)
+	lea	0x70($inp), $inp
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	pxor	0x60(%rsp), @XMM[3]
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	lea	0x70($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_6:
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x60($inp), $inp
+	pxor	@XMM[8+5], @XMM[5]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	lea	0x60($out), $out
+
+	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_5:
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x50($inp), $inp
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	lea	0x50($out), $out
+
+	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_4:
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x40($inp), $inp
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	lea	0x40($out), $out
+
+	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_3:
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x30($inp), $inp
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	lea	0x30($out), $out
+
+	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_2:
+	pxor	@XMM[8+0], @XMM[0]
+	lea	0x20($inp), $inp
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	lea	0x20($out), $out
+
+	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_1:
+	pxor	@XMM[0], @XMM[8]
+	lea	0x10($inp), $inp
+	movdqa	@XMM[8], 0x20(%rbp)
+	lea	0x20(%rbp), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
+	#pxor	@XMM[8], @XMM[0]
+	#lea	0x80(%rsp), %rax	# pass key schedule
+	#mov	%edx, %r10d		# pass rounds
+	#call	_bsaes_decrypt8
+	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x10($out), $out
+
+	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
+
+.Lxts_dec_done:
+	and	\$15, %ebx
+	jz	.Lxts_dec_ret
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	movdqa	@XMM[7], @XMM[6]
+	paddq	@XMM[7], @XMM[7]	# psllq 1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	movdqu	($inp), @XMM[0]
+	pxor	$twres, @XMM[7]
+
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[7], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[7]
+	mov	$out, %rdx
+	movdqu	@XMM[7], ($out)
+
+.Lxts_dec_steal:
+	movzb	16($inp), %eax
+	movzb	(%rdx), %ecx
+	lea	1($inp), $inp
+	mov	%al, (%rdx)
+	mov	%cl, 16(%rdx)
+	lea	1(%rdx), %rdx
+	sub	\$1,%ebx
+	jnz	.Lxts_dec_steal
+
+	movdqu	($out), @XMM[0]
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[6], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[6]
+	movdqu	@XMM[6], ($out)
+
+.Lxts_dec_ret:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lxts_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lxts_dec_bzero
+
+	lea	(%rbp),%rsp		# restore %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rbp), %rsp
+___
+$code.=<<___;
+	mov	0x48(%rsp), %r15
+	mov	0x50(%rsp), %r14
+	mov	0x58(%rsp), %r13
+	mov	0x60(%rsp), %r12
+	mov	0x68(%rsp), %rbx
+	mov	0x70(%rsp), %rax
+	lea	0x78(%rsp), %rsp
+	mov	%rax, %rbp
+.Lxts_dec_epilogue:
+	ret
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+.type	_bsaes_const,\@object
+.align	64
+_bsaes_const:
+.LM0ISR:	# InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LBS0:		# bit-slice constants
+	.quad	0x5555555555555555, 0x5555555555555555
+.LBS1:
+	.quad	0x3333333333333333, 0x3333333333333333
+.LBS2:
+	.quad	0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
+.LSR:		# shiftrows constants
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0SR:
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSWPUP:	# byte-swap upper dword
+	.quad	0x0706050403020100, 0x0c0d0e0f0b0a0908
+.LSWPUPM0SR:
+	.quad	0x0a0d02060c03070b, 0x0004080f05090e01
+.LADD1:		# counter increment constants
+	.quad	0x0000000000000000, 0x0000000100000000
+.LADD2:
+	.quad	0x0000000000000000, 0x0000000200000000
+.LADD3:
+	.quad	0x0000000000000000, 0x0000000300000000
+.LADD4:
+	.quad	0x0000000000000000, 0x0000000400000000
+.LADD5:
+	.quad	0x0000000000000000, 0x0000000500000000
+.LADD6:
+	.quad	0x0000000000000000, 0x0000000600000000
+.LADD7:
+	.quad	0x0000000000000000, 0x0000000700000000
+.LADD8:
+	.quad	0x0000000000000000, 0x0000000800000000
+.Lxts_magic:
+	.long	0x87,0,1,0
+.Lmasks:
+	.quad	0x0101010101010101, 0x0101010101010101
+	.quad	0x0202020202020202, 0x0202020202020202
+	.quad	0x0404040404040404, 0x0404040404040404
+	.quad	0x0808080808080808, 0x0808080808080808
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.L63:
+	.quad	0x6363636363636363, 0x6363636363636363
+.asciz	"Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov"
+.align	64
+.size	_bsaes_const,.-_bsaes_const
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+
+	mov	160($context),%rax	# pull context->Rbp
+
+	lea	0x40(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xa0(%rax),%rax		# adjust stack pointer
+
+	mov	0x70(%rax),%rbp
+	mov	0x68(%rax),%rbx
+	mov	0x60(%rax),%r12
+	mov	0x58(%rax),%r13
+	mov	0x50(%rax),%r14
+	mov	0x48(%rax),%r15
+	lea	0x78(%rax),%rax		# adjust stack pointer
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_prologue:
+	mov	%rax,152($context)	# restore context->Rsp
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+___
+$code.=<<___ if ($ecb);
+	.rva	.Lecb_enc_prologue
+	.rva	.Lecb_enc_epilogue
+	.rva	.Lecb_enc_info
+
+	.rva	.Lecb_dec_prologue
+	.rva	.Lecb_dec_epilogue
+	.rva	.Lecb_dec_info
+___
+$code.=<<___;
+	.rva	.Lcbc_dec_prologue
+	.rva	.Lcbc_dec_epilogue
+	.rva	.Lcbc_dec_info
+
+	.rva	.Lctr_enc_prologue
+	.rva	.Lctr_enc_epilogue
+	.rva	.Lctr_enc_info
+
+	.rva	.Lxts_enc_prologue
+	.rva	.Lxts_enc_epilogue
+	.rva	.Lxts_enc_info
+
+	.rva	.Lxts_dec_prologue
+	.rva	.Lxts_dec_epilogue
+	.rva	.Lxts_dec_info
+
+.section	.xdata
+.align	8
+___
+$code.=<<___ if ($ecb);
+.Lecb_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lecb_enc_body,.Lecb_enc_epilogue	# HandlerData[]
+.Lecb_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lecb_dec_body,.Lecb_dec_epilogue	# HandlerData[]
+___
+$code.=<<___;
+.Lcbc_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lcbc_dec_body,.Lcbc_dec_epilogue	# HandlerData[]
+.Lctr_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lctr_enc_body,.Lctr_enc_epilogue	# HandlerData[]
+.Lxts_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
+.Lxts_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/jni/openssl/crypto/aes/asm/vpaes-x86.pl b/jni/openssl/crypto/aes/asm/vpaes-x86.pl
new file mode 100644
index 0000000..1533e2c
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/vpaes-x86.pl
@@ -0,0 +1,903 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
+# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-586.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86.pl column - [also
+# large-block CBC] encrypt/decrypt.
+#
+#		aes-586.pl		vpaes-x86.pl
+#
+# Core 2(**)	29.1/42.3/18.3		22.0/25.6(***)
+# Nehalem	27.9/40.4/18.1		10.3/12.0
+# Atom		102./119./60.1		64.5/85.3(***)
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +32%/65%  improvement on Core 2
+#	and +58%/40% on Atom (as implied, over "hyper-threading-safe"
+#	code path).
+#
+#						<appro@openssl.org>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
+
+$PREFIX="vpaes";
+
+my  ($round, $base, $magic, $key, $const, $inp, $out)=
+    ("eax",  "ebx", "ecx",  "edx","ebp",  "esi","edi");
+
+&static_label("_vpaes_consts");
+&static_label("_vpaes_schedule_low_round");
+
+&set_label("_vpaes_consts",64);
+$k_inv=-0x30;		# inv, inva
+	&data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
+	&data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
+
+$k_s0F=-0x10;		# s0F
+	&data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
+
+$k_ipt=0x00;		# input transform (lo, hi)
+	&data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
+	&data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
+
+$k_sb1=0x20;		# sb1u, sb1t
+	&data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
+	&data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
+$k_sb2=0x40;		# sb2u, sb2t
+	&data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
+	&data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
+$k_sbo=0x60;		# sbou, sbot
+	&data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
+	&data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
+
+$k_mc_forward=0x80;	# mc_forward
+	&data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
+	&data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
+	&data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
+	&data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
+
+$k_mc_backward=0xc0;	# mc_backward
+	&data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
+	&data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
+	&data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
+	&data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
+
+$k_sr=0x100;		# sr
+	&data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
+	&data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
+	&data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
+	&data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
+
+$k_rcon=0x140;		# rcon
+	&data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
+
+$k_s63=0x150;		# s63: all equal to 0x63 transformed
+	&data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
+
+$k_opt=0x160;		# output transform
+	&data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
+	&data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
+
+$k_deskew=0x180;	# deskew tables: inverts the sbox's "skew"
+	&data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
+	&data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+$k_dksd=0x1a0;		# decryption key schedule: invskew x*D
+	&data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
+	&data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
+$k_dksb=0x1c0;		# decryption key schedule: invskew x*B
+	&data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
+	&data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
+$k_dkse=0x1e0;		# decryption key schedule: invskew x*E + 0x63
+	&data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
+	&data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
+$k_dks9=0x200;		# decryption key schedule: invskew x*9
+	&data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
+	&data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+$k_dipt=0x220;		# decryption input transform
+	&data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
+	&data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
+
+$k_dsb9=0x240;		# decryption sbox output *9*u, *9*t
+	&data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
+	&data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
+$k_dsbd=0x260;		# decryption sbox output *D*u, *D*t
+	&data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
+	&data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
+$k_dsbb=0x280;		# decryption sbox output *B*u, *B*t
+	&data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
+	&data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
+$k_dsbe=0x2a0;		# decryption sbox output *E*u, *E*t
+	&data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
+	&data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
+$k_dsbo=0x2c0;		# decryption sbox final output
+	&data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
+	&data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
+&asciz	("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
+&align	(64);
+
+&function_begin_B("_vpaes_preheat");
+	&add	($const,&DWP(0,"esp"));
+	&movdqa	("xmm7",&QWP($k_inv,$const));
+	&movdqa	("xmm6",&QWP($k_s0F,$const));
+	&ret	();
+&function_end_B("_vpaes_preheat");
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm6-%xmm7 as in _vpaes_preheat
+##    (%edx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
+##
+##
+&function_begin_B("_vpaes_encrypt_core");
+	&mov	($magic,16);
+	&mov	($round,&DWP(240,$key));
+	&movdqa	("xmm1","xmm6")
+	&movdqa	("xmm2",&QWP($k_ipt,$const));
+	&pandn	("xmm1","xmm0");
+	&movdqu	("xmm5",&QWP(0,$key));
+	&psrld	("xmm1",4);
+	&pand	("xmm0","xmm6");
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_ipt+16,$const));
+	&pshufb	("xmm0","xmm1");
+	&pxor	("xmm2","xmm5");
+	&pxor	("xmm0","xmm2");
+	&add	($key,16);
+	&lea	($base,&DWP($k_mc_backward,$const));
+	&jmp	(&label("enc_entry"));
+
+
+&set_label("enc_loop",16);
+	# middle of middle round
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sb1u
+	&pshufb	("xmm4","xmm2");		# 4 = sb1u
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&movdqa	("xmm5",&QWP($k_sb2,$const));	# 4 : sb2u
+	&pshufb	("xmm5","xmm2");		# 4 = sb2u
+	&movdqa	("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
+	&movdqa	("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
+	&pshufb	("xmm2","xmm3");		# 2 = sb2t
+	&pxor	("xmm2","xmm5");		# 2 = 2A
+	&movdqa	("xmm4",&QWP(0,$base,$magic));	# .Lk_mc_backward[]
+	&movdqa	("xmm3","xmm0");		# 3 = A
+	&pshufb	("xmm0","xmm1");		# 0 = B
+	&add	($key,16);			# next key
+	&pxor	("xmm0","xmm2");		# 0 = 2A+B
+	&pshufb	("xmm3","xmm4");		# 3 = D
+	&add	($magic,16);			# next mc
+	&pxor	("xmm3","xmm0");		# 3 = 2A+B+D
+	&pshufb	("xmm0","xmm1");		# 0 = 2B+C
+	&and	($magic,0x30);			# ... mod 4
+	&pxor	("xmm0","xmm3");		# 0 = 2A+3B+C+D
+	&sub	($round,1);			# nr--
+
+&set_label("enc_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm6");		# 0 = k
+	&movdqa	("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
+	&pshufb	("xmm5","xmm0");		# 2 = a/k
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&pxor	("xmm3","xmm5");		# 3 = iak = 1/i + a/k
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm5");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&movdqu	("xmm5",&QWP(0,$key));
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&jnz	(&label("enc_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP($k_sbo,$const));	# 3 : sbou      .Lk_sbo
+	&movdqa	("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot      .Lk_sbo+16
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&movdqa	("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm1");
+	&ret	();
+&function_end_B("_vpaes_encrypt_core");
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+&function_begin_B("_vpaes_decrypt_core");
+	&mov	($round,&DWP(240,$key));
+	&lea	($base,&DWP($k_dsbd,$const));
+	&movdqa	("xmm1","xmm6");
+	&movdqa	("xmm2",&QWP($k_dipt-$k_dsbd,$base));
+	&pandn	("xmm1","xmm0");
+	&mov	($magic,$round);
+	&psrld	("xmm1",4)
+	&movdqu	("xmm5",&QWP(0,$key));
+	&shl	($magic,4);
+	&pand	("xmm0","xmm6");
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
+	&xor	($magic,0x30);
+	&pshufb	("xmm0","xmm1");
+	&and	($magic,0x30);
+	&pxor	("xmm2","xmm5");
+	&movdqa	("xmm5",&QWP($k_mc_forward+48,$const));
+	&pxor	("xmm0","xmm2");
+	&add	($key,16);
+	&lea	($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
+	&jmp	(&label("dec_entry"));
+
+&set_label("dec_loop",16);
+##
+##  Inverse mix columns
+##
+	&movdqa	("xmm4",&QWP(-0x20,$base));	# 4 : sb9u
+	&pshufb	("xmm4","xmm2");		# 4 = sb9u
+	&pxor	("xmm4","xmm0");
+	&movdqa	("xmm0",&QWP(-0x10,$base));	# 0 : sb9t
+	&pshufb	("xmm0","xmm3");		# 0 = sb9t
+	&pxor	("xmm0","xmm4");		# 0 = ch
+	&add	($key,16);			# next round key
+
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&movdqa	("xmm4",&QWP(0,$base));		# 4 : sbdu
+	&pshufb	("xmm4","xmm2");		# 4 = sbdu
+	&pxor	("xmm4","xmm0");		# 4 = ch
+	&movdqa	("xmm0",&QWP(0x10,$base));	# 0 : sbdt
+	&pshufb	("xmm0","xmm3");		# 0 = sbdt
+	&pxor	("xmm0","xmm4");		# 0 = ch
+	&sub	($round,1);			# nr--
+
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&movdqa	("xmm4",&QWP(0x20,$base));	# 4 : sbbu
+	&pshufb	("xmm4","xmm2");		# 4 = sbbu
+	&pxor	("xmm4","xmm0");		# 4 = ch
+	&movdqa	("xmm0",&QWP(0x30,$base));	# 0 : sbbt
+	&pshufb	("xmm0","xmm3");		# 0 = sbbt
+	&pxor	("xmm0","xmm4");		# 0 = ch
+
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&movdqa	("xmm4",&QWP(0x40,$base));	# 4 : sbeu
+	&pshufb	("xmm4","xmm2");		# 4 = sbeu
+	&pxor	("xmm4","xmm0");		# 4 = ch
+	&movdqa	("xmm0",&QWP(0x50,$base));	# 0 : sbet
+	&pshufb	("xmm0","xmm3");		# 0 = sbet
+	&pxor	("xmm0","xmm4");		# 0 = ch
+
+	&palignr("xmm5","xmm5",12);
+
+&set_label("dec_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm6");		# 0 = k
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&movdqu	("xmm0",&QWP(0,$key));
+	&jnz	(&label("dec_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP(0x60,$base));	# 3 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm0");		# 4 = sb1u + k
+	&movdqa	("xmm0",&QWP(0x70,$base));	# 0 : sbot
+	&movdqa	("xmm2",&QWP(0,$magic));
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_decrypt_core");
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+&function_begin_B("_vpaes_schedule_core");
+	&add	($const,&DWP(0,"esp"));
+	&movdqu	("xmm0",&QWP(0,$inp));		# load key (unaligned)
+	&movdqa	("xmm2",&QWP($k_rcon,$const));	# load rcon
+
+	# input transform
+	&movdqa	("xmm3","xmm0");
+	&lea	($base,&DWP($k_ipt,$const));
+	&movdqa	(&QWP(4,"esp"),"xmm2");		# xmm8
+	&call	("_vpaes_schedule_transform");
+	&movdqa	("xmm7","xmm0");
+
+	&test	($out,$out);
+	&jnz	(&label("schedule_am_decrypting"));
+
+	# encrypting, output zeroth round key after transform
+	&movdqu	(&QWP(0,$key),"xmm0");
+	&jmp	(&label("schedule_go"));
+
+&set_label("schedule_am_decrypting");
+	# decrypting, output zeroth round key after shiftrows
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&xor	($magic,0x30);
+
+&set_label("schedule_go");
+	&cmp	($round,192);
+	&ja	(&label("schedule_256"));
+	&je	(&label("schedule_192"));
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+&set_label("schedule_128");
+	&mov	($round,10);
+
+&set_label("loop_schedule_128");
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# write output
+	&jmp	(&label("loop_schedule_128"));
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+&set_label("schedule_192",16);
+	&movdqu	("xmm0",&QWP(8,$inp));		# load key part 2 (very unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform	
+	&movdqa	("xmm6","xmm0");		# save short part
+	&pxor	("xmm4","xmm4");		# clear 4
+	&movhlps("xmm6","xmm4");		# clobber low side with zeros
+	&mov	($round,4);
+
+&set_label("loop_schedule_192");
+	&call	("_vpaes_schedule_round");
+	&palignr("xmm0","xmm6",8);
+	&call	("_vpaes_schedule_mangle");	# save key n
+	&call	("_vpaes_schedule_192_smear");
+	&call	("_vpaes_schedule_mangle");	# save key n+1
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# save key n+2
+	&call	("_vpaes_schedule_192_smear");
+	&jmp	(&label("loop_schedule_192"));
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+&set_label("schedule_256",16);
+	&movdqu	("xmm0",&QWP(16,$inp));		# load key part 2 (unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform	
+	&mov	($round,7);
+
+&set_label("loop_schedule_256");
+	&call	("_vpaes_schedule_mangle");	# output low result
+	&movdqa	("xmm6","xmm0");		# save cur_lo in xmm6
+
+	# high round
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	
+
+	# low round. swap xmm7 and xmm6
+	&pshufd	("xmm0","xmm0",0xFF);
+	&movdqa	(&QWP(20,"esp"),"xmm7");
+	&movdqa	("xmm7","xmm6");
+	&call	("_vpaes_schedule_low_round");
+	&movdqa	("xmm7",&QWP(20,"esp"));
+
+	&jmp	(&label("loop_schedule_256"));
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+&set_label("schedule_mangle_last",16);
+	# schedule last round key from xmm0
+	&lea	($base,&DWP($k_deskew,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_last_dec"));
+
+	# encrypting
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm0","xmm1");		# output permute
+	&lea	($base,&DWP($k_opt,$const));	# prepare to output transform
+	&add	($key,32);
+
+&set_label("schedule_mangle_last_dec");
+	&add	($key,-16);
+	&pxor	("xmm0",&QWP($k_s63,$const));
+	&call	("_vpaes_schedule_transform");	# output transform
+	&movdqu	(&QWP(0,$key),"xmm0");		# save last key
+
+	# cleanup
+	&pxor	("xmm0","xmm0");
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+	&ret	();
+&function_end_B("_vpaes_schedule_core");
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+&function_begin_B("_vpaes_schedule_192_smear");
+	&pshufd	("xmm0","xmm6",0x80);		# d c 0 0 -> c 0 0 0
+	&pxor	("xmm6","xmm0");		# -> c+d c 0 0
+	&pshufd	("xmm0","xmm7",0xFE);		# b a _ _ -> b b b a
+	&pxor	("xmm6","xmm0");		# -> b+c+d b+c b a
+	&movdqa	("xmm0","xmm6");
+	&pxor	("xmm1","xmm1");
+	&movhlps("xmm6","xmm1");		# clobber low side with zeros
+	&ret	();
+&function_end_B("_vpaes_schedule_192_smear");
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm5.
+##
+&function_begin_B("_vpaes_schedule_round");
+	# extract rcon from xmm8
+	&movdqa	("xmm2",&QWP(8,"esp"));		# xmm8
+	&pxor	("xmm1","xmm1");
+	&palignr("xmm1","xmm2",15);
+	&palignr("xmm2","xmm2",15);
+	&pxor	("xmm7","xmm1");
+
+	# rotate
+	&pshufd	("xmm0","xmm0",0xFF);
+	&palignr("xmm0","xmm0",1);
+
+	# fall through...
+	&movdqa	(&QWP(8,"esp"),"xmm2");		# xmm8
+
+	# low round: same as high round, but no rotation and no rcon.
+&set_label("_vpaes_schedule_low_round");
+	# smear xmm7
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",4);
+	&pxor	("xmm7","xmm1");
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",8);
+	&pxor	("xmm7","xmm1");
+	&pxor	("xmm7",&QWP($k_s63,$const));
+
+	# subbyte
+	&movdqa	("xmm4",&QWP($k_s0F,$const));
+	&movdqa	("xmm5",&QWP($k_inv,$const));	# 4 : 1/j
+	&movdqa	("xmm1","xmm4");	
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm4");		# 0 = k
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&movdqa	("xmm3","xmm5");		# 3 : 1/i
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&movdqa	("xmm4","xmm5");		# 4 : 1/j
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm5");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&movdqa	("xmm3","xmm5");		# 3 : 1/jak
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = sbox output
+
+	# add in smeared stuff
+	&pxor	("xmm0","xmm7");
+	&movdqa	("xmm7","xmm0");
+	&ret	();
+&function_end_B("_vpaes_schedule_round");
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%ebx)
+##
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+&function_begin_B("_vpaes_schedule_transform");
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);
+	&pand	("xmm0","xmm2");
+	&movdqa	("xmm2",&QWP(0,$base));
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP(16,$base));
+	&pshufb	("xmm0","xmm1");
+	&pxor	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_schedule_transform");
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%edx), and increments or decrements it
+##  Keeps track of round number mod 4 in %ecx
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+&function_begin_B("_vpaes_schedule_mangle");
+	&movdqa	("xmm4","xmm0");	# save xmm0 for later
+	&movdqa	("xmm5",&QWP($k_mc_forward,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_dec"));
+
+	# encrypting
+	&add	($key,16);
+	&pxor	("xmm4",&QWP($k_s63,$const));
+	&pshufb	("xmm4","xmm5");
+	&movdqa	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+
+	&jmp	(&label("schedule_mangle_both"));
+
+&set_label("schedule_mangle_dec",16);
+	# inverse mix columns
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&lea	($inp,&DWP($k_dksd,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm4");
+	&psrld	("xmm1",4);			# 1 = hi
+	&pand	("xmm4","xmm2");		# 4 = lo
+
+	&movdqa	("xmm2",&QWP(0,$inp));
+	&pshufb	("xmm2","xmm4");
+	&movdqa	("xmm3",&QWP(0x10,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x20,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x30,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x40,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x50,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x60,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x70,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+
+	&add	($key,-16);
+
+&set_label("schedule_mangle_both");
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&add	($magic,-16);
+	&and	($magic,0x30);
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&ret	();
+&function_end_B("_vpaes_schedule_mangle");
+
+#
+# Interface to OpenSSL
+#
+&function_begin("${PREFIX}_set_encrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);		# AES_KEY->rounds = nbits/32+5;
+	&mov	($magic,0x30);
+	&mov	($out,0);
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_encrypt_key");
+
+&function_begin("${PREFIX}_set_decrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);	# AES_KEY->rounds = nbits/32+5;
+	&shl	($base,4);
+	&lea	($key,&DWP(16,$key,$base));
+
+	&mov	($out,1);
+	&mov	($magic,$round);
+	&shr	($magic,1);
+	&and	($magic,32);
+	&xor	($magic,32);			# nbist==192?0:32;
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_decrypt_key");
+
+&function_begin("${PREFIX}_encrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_encrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_encrypt");
+
+&function_begin("${PREFIX}_decrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_decrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_decrypt");
+
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));		# inp
+	&mov	($out,&wparam(1));		# out
+	&mov	($round,&wparam(2));		# len
+	&mov	($key,&wparam(3));		# key
+	&sub	($round,16);
+	&jc	(&label("cbc_abort"));
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($const,&wparam(4));		# ivp
+	&and	($base,-16);
+	&mov	($magic,&wparam(5));		# enc
+	&xchg	($base,"esp");			# alloca
+	&movdqu	("xmm1",&QWP(0,$const));	# load IV
+	&sub	($out,$inp);
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	(&DWP(0,"esp"),$out);		# save out
+	&mov	(&DWP(4,"esp"),$key)		# save key
+	&mov	(&DWP(8,"esp"),$const);		# save ivp
+	&mov	($out,$round);			# $out works as $len
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&cmp	($magic,0);
+	&je	(&label("cbc_dec_loop"));
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&pxor	("xmm0","xmm1");		# inp^=iv
+	&call	("_vpaes_encrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&movdqa	("xmm1","xmm0");
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&jmp	(&label("cbc_done"));
+
+&set_label("cbc_dec_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&movdqa	(&QWP(16,"esp"),"xmm1");	# save IV
+	&movdqa	(&QWP(32,"esp"),"xmm0");	# save future IV
+	&call	("_vpaes_decrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&pxor	("xmm0",&QWP(16,"esp"));	# out^=iv
+	&movdqa	("xmm1",&QWP(32,"esp"));	# load next IV
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_dec_loop"));
+
+&set_label("cbc_done");
+	&mov	($base,&DWP(8,"esp"));		# restore ivp
+	&mov	("esp",&DWP(48,"esp"));
+	&movdqu	(&QWP(0,$base),"xmm1");		# write IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+&asm_finish();
diff --git a/jni/openssl/crypto/aes/asm/vpaes-x86_64.pl b/jni/openssl/crypto/aes/asm/vpaes-x86_64.pl
new file mode 100644
index 0000000..37998db
--- /dev/null
+++ b/jni/openssl/crypto/aes/asm/vpaes-x86_64.pl
@@ -0,0 +1,1206 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Interface to OpenSSL as "almost" drop-in replacement for
+# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-x86_64.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86_64.pl column -
+# [also large-block CBC] encrypt/decrypt.
+#
+#		aes-x86_64.pl		vpaes-x86_64.pl
+#
+# Core 2(**)	30.5/43.7/14.3		21.8/25.7(***)
+# Nehalem	30.5/42.2/14.6		 9.8/11.8
+# Atom		63.9/79.0/32.1		64.0/84.8(***)
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +40%/78% improvement on Core 2
+#	(as implied, over "hyper-threading-safe" code path).
+#
+#						<appro@openssl.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$PREFIX="vpaes";
+
+$code.=<<___;
+.text
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
+##  Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type	_vpaes_encrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_encrypt_core:
+	mov	%rdx,	%r9
+	mov	\$16,	%r11
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_ipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_ipt+16(%rip), %xmm0	# ipthi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm5,	%xmm2
+	pxor	%xmm2,	%xmm0
+	add	\$16,	%r9
+	lea	.Lk_mc_backward(%rip),%r10
+	jmp	.Lenc_entry
+
+.align 16
+.Lenc_loop:
+	# middle of middle round
+	movdqa  %xmm13,	%xmm4	# 4 : sb1u
+	pshufb  %xmm2,	%xmm4	# 4 = sb1u
+	pxor	%xmm5,	%xmm4	# 4 = sb1u + k
+	movdqa  %xmm12,	%xmm0	# 0 : sb1t
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm4,	%xmm0	# 0 = A
+	movdqa  %xmm15,	%xmm5	# 4 : sb2u
+	pshufb	%xmm2,	%xmm5	# 4 = sb2u
+	movdqa	-0x40(%r11,%r10), %xmm1		# .Lk_mc_forward[]
+	movdqa	%xmm14, %xmm2	# 2 : sb2t
+	pshufb	%xmm3,  %xmm2	# 2 = sb2t
+	pxor	%xmm5,	%xmm2	# 2 = 2A
+	movdqa	(%r11,%r10), %xmm4		# .Lk_mc_backward[]
+	movdqa	%xmm0,  %xmm3	# 3 = A
+	pshufb  %xmm1,  %xmm0	# 0 = B
+	add	\$16,	%r9	# next key
+	pxor	%xmm2,  %xmm0	# 0 = 2A+B
+	pshufb	%xmm4,	%xmm3	# 3 = D
+	add	\$16,	%r11	# next mc
+	pxor	%xmm0,	%xmm3	# 3 = 2A+B+D
+	pshufb  %xmm1,	%xmm0	# 0 = 2B+C
+	and	\$0x30,	%r11	# ... mod 4
+	pxor	%xmm3,	%xmm0	# 0 = 2A+3B+C+D
+	sub	\$1,%rax	# nr--
+
+.Lenc_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	psrld	\$4,   	%xmm1   # 1 = i
+	pand	%xmm9, 	%xmm0   # 0 = k
+	movdqa	%xmm11, %xmm5	# 2 : a/k
+	pshufb  %xmm0,  %xmm5	# 2 = a/k
+	pxor	%xmm1,	%xmm0	# 0 = j
+	movdqa	%xmm10,	%xmm3  	# 3 : 1/i
+	pshufb  %xmm1, 	%xmm3  	# 3 = 1/i
+	pxor	%xmm5, 	%xmm3  	# 3 = iak = 1/i + a/k
+	movdqa	%xmm10,	%xmm4  	# 4 : 1/j
+	pshufb	%xmm0, 	%xmm4  	# 4 = 1/j
+	pxor	%xmm5, 	%xmm4  	# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2  	# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2  	# 2 = 1/iak
+	pxor	%xmm0, 	%xmm2  	# 2 = io
+	movdqa	%xmm10, %xmm3   # 3 : 1/jak
+	movdqu	(%r9),	%xmm5
+	pshufb  %xmm4,  %xmm3   # 3 = 1/jak
+	pxor	%xmm1,  %xmm3   # 3 = jo
+	jnz	.Lenc_loop
+
+	# middle of last round
+	movdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+	movdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm5,  %xmm4	# 4 = sb1u + k
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	movdqa	0x40(%r11,%r10), %xmm1		# .Lk_sr[]
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm1,	%xmm0
+	ret
+.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
+	
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.type	_vpaes_decrypt_core,\@abi-omnipotent
+.align	16
+_vpaes_decrypt_core:
+	mov	%rdx,	%r9		# load key
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_dipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	mov	%rax,	%r11
+	psrld	\$4,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	shl	\$4,	%r11
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_dipt+16(%rip), %xmm0 # ipthi
+	xor	\$0x30,	%r11
+	lea	.Lk_dsbd(%rip),%r10
+	pshufb	%xmm1,	%xmm0
+	and	\$0x30,	%r11
+	pxor	%xmm5,	%xmm2
+	movdqa	.Lk_mc_forward+48(%rip), %xmm5
+	pxor	%xmm2,	%xmm0
+	add	\$16,	%r9
+	add	%r10,	%r11
+	jmp	.Ldec_entry
+
+.align 16
+.Ldec_loop:
+##
+##  Inverse mix columns
+##
+	movdqa  -0x20(%r10),%xmm4	# 4 : sb9u
+	pshufb	%xmm2,	%xmm4		# 4 = sb9u
+	pxor	%xmm0,	%xmm4
+	movdqa  -0x10(%r10),%xmm0	# 0 : sb9t
+	pshufb	%xmm3,	%xmm0		# 0 = sb9t
+	pxor	%xmm4,	%xmm0		# 0 = ch
+	add	\$16, %r9		# next round key
+
+	pshufb	%xmm5,	%xmm0		# MC ch
+	movdqa  0x00(%r10),%xmm4	# 4 : sbdu
+	pshufb	%xmm2,	%xmm4		# 4 = sbdu
+	pxor	%xmm0,	%xmm4		# 4 = ch
+	movdqa  0x10(%r10),%xmm0	# 0 : sbdt
+	pshufb	%xmm3,	%xmm0		# 0 = sbdt
+	pxor	%xmm4,	%xmm0		# 0 = ch
+	sub	\$1,%rax		# nr--
+	
+	pshufb	%xmm5,	%xmm0		# MC ch
+	movdqa  0x20(%r10),%xmm4	# 4 : sbbu
+	pshufb	%xmm2,	%xmm4		# 4 = sbbu
+	pxor	%xmm0,	%xmm4		# 4 = ch
+	movdqa  0x30(%r10),%xmm0	# 0 : sbbt
+	pshufb	%xmm3,	%xmm0		# 0 = sbbt
+	pxor	%xmm4,	%xmm0		# 0 = ch
+	
+	pshufb	%xmm5,	%xmm0		# MC ch
+	movdqa  0x40(%r10),%xmm4	# 4 : sbeu
+	pshufb	%xmm2,	%xmm4		# 4 = sbeu
+	pxor	%xmm0,	%xmm4		# 4 = ch
+	movdqa  0x50(%r10),%xmm0	# 0 : sbet
+	pshufb	%xmm3,	%xmm0		# 0 = sbet
+	pxor	%xmm4,	%xmm0		# 0 = ch
+
+	palignr	\$12,	%xmm5,	%xmm5
+	
+.Ldec_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	psrld	\$4,    %xmm1	# 1 = i
+	pand	%xmm9, 	%xmm0	# 0 = k
+	movdqa	%xmm11, %xmm2	# 2 : a/k
+	pshufb  %xmm0,  %xmm2	# 2 = a/k
+	pxor	%xmm1,	%xmm0	# 0 = j
+	movdqa	%xmm10,	%xmm3	# 3 : 1/i
+	pshufb  %xmm1, 	%xmm3	# 3 = 1/i
+	pxor	%xmm2, 	%xmm3	# 3 = iak = 1/i + a/k
+	movdqa	%xmm10,	%xmm4	# 4 : 1/j
+	pshufb	%xmm0, 	%xmm4	# 4 = 1/j
+	pxor	%xmm2, 	%xmm4	# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2	# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2	# 2 = 1/iak
+	pxor	%xmm0, 	%xmm2	# 2 = io
+	movdqa	%xmm10, %xmm3	# 3 : 1/jak
+	pshufb  %xmm4,  %xmm3	# 3 = 1/jak
+	pxor	%xmm1,  %xmm3	# 3 = jo
+	movdqu	(%r9),	%xmm0
+	jnz	.Ldec_loop
+
+	# middle of last round
+	movdqa	0x60(%r10), %xmm4	# 3 : sbou
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm0,  %xmm4	# 4 = sb1u + k
+	movdqa	0x70(%r10), %xmm0	# 0 : sbot
+	movdqa	-0x160(%r11), %xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm2,	%xmm0
+	ret
+.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.type	_vpaes_schedule_core,\@abi-omnipotent
+.align	16
+_vpaes_schedule_core:
+	# rdi = key
+	# rsi = size in bits
+	# rdx = buffer
+	# rcx = direction.  0=encrypt, 1=decrypt
+
+	call	_vpaes_preheat		# load the tables
+	movdqa	.Lk_rcon(%rip), %xmm8	# load rcon
+	movdqu	(%rdi),	%xmm0		# load key (unaligned)
+
+	# input transform
+	movdqa	%xmm0,	%xmm3
+	lea	.Lk_ipt(%rip), %r11
+	call	_vpaes_schedule_transform
+	movdqa	%xmm0,	%xmm7
+
+	lea	.Lk_sr(%rip),%r10
+	test	%rcx,	%rcx
+	jnz	.Lschedule_am_decrypting
+
+	# encrypting, output zeroth round key after transform
+	movdqu	%xmm0,	(%rdx)
+	jmp	.Lschedule_go
+
+.Lschedule_am_decrypting:
+	# decrypting, output zeroth round key after shiftrows
+	movdqa	(%r8,%r10),%xmm1
+	pshufb  %xmm1,	%xmm3
+	movdqu	%xmm3,	(%rdx)
+	xor	\$0x30, %r8
+
+.Lschedule_go:
+	cmp	\$192,	%esi
+	ja	.Lschedule_256
+	je	.Lschedule_192
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+.Lschedule_128:
+	mov	\$10, %esi
+	
+.Loop_schedule_128:
+	call 	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# write output
+	jmp 	.Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align	16
+.Lschedule_192:
+	movdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	movdqa	%xmm0,	%xmm6		# save short part
+	pxor	%xmm4,	%xmm4		# clear 4
+	movhlps	%xmm4,	%xmm6		# clobber low side with zeros
+	mov	\$4,	%esi
+
+.Loop_schedule_192:
+	call	_vpaes_schedule_round
+	palignr	\$8,%xmm6,%xmm0	
+	call	_vpaes_schedule_mangle	# save key n
+	call	_vpaes_schedule_192_smear
+	call	_vpaes_schedule_mangle	# save key n+1
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# save key n+2
+	call	_vpaes_schedule_192_smear
+	jmp	.Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align	16
+.Lschedule_256:
+	movdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	mov	\$7, %esi
+	
+.Loop_schedule_256:
+	call	_vpaes_schedule_mangle	# output low result
+	movdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
+
+	# high round
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	
+
+	# low round. swap xmm7 and xmm6
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	movdqa	%xmm7,	%xmm5
+	movdqa	%xmm6,	%xmm7
+	call	_vpaes_schedule_low_round
+	movdqa	%xmm5,	%xmm7
+	
+	jmp	.Loop_schedule_256
+
+	
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align	16
+.Lschedule_mangle_last:
+	# schedule last round key from xmm0
+	lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_last_dec
+
+	# encrypting
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,	%xmm0		# output permute
+	lea	.Lk_opt(%rip),	%r11	# prepare to output transform
+	add	\$32,	%rdx
+
+.Lschedule_mangle_last_dec:
+	add	\$-16,	%rdx
+	pxor	.Lk_s63(%rip),	%xmm0
+	call	_vpaes_schedule_transform # output transform
+	movdqu	%xmm0,	(%rdx)		# save last key
+
+	# cleanup
+	pxor	%xmm0,  %xmm0
+	pxor	%xmm1,  %xmm1
+	pxor	%xmm2,  %xmm2
+	pxor	%xmm3,  %xmm3
+	pxor	%xmm4,  %xmm4
+	pxor	%xmm5,  %xmm5
+	pxor	%xmm6,  %xmm6
+	pxor	%xmm7,  %xmm7
+	ret
+.size	_vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.type	_vpaes_schedule_192_smear,\@abi-omnipotent
+.align	16
+_vpaes_schedule_192_smear:
+	pshufd	\$0x80,	%xmm6,	%xmm0	# d c 0 0 -> c 0 0 0
+	pxor	%xmm0,	%xmm6		# -> c+d c 0 0
+	pshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
+	pxor	%xmm0,	%xmm6		# -> b+c+d b+c b a
+	movdqa	%xmm6,	%xmm0
+	pxor	%xmm1,	%xmm1
+	movhlps	%xmm1,	%xmm6		# clobber low side with zeros
+	ret
+.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.type	_vpaes_schedule_round,\@abi-omnipotent
+.align	16
+_vpaes_schedule_round:
+	# extract rcon from xmm8
+	pxor	%xmm1,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm8
+	pxor	%xmm1,	%xmm7
+
+	# rotate
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	palignr	\$1,	%xmm0,	%xmm0
+	
+	# fall through...
+	
+	# low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+	# smear xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$4,	%xmm7
+	pxor	%xmm1,	%xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$8,	%xmm7
+	pxor	%xmm1,	%xmm7
+	pxor	.Lk_s63(%rip), %xmm7
+
+	# subbytes
+	movdqa  %xmm9, 	%xmm1
+	pandn	%xmm0, 	%xmm1
+	psrld	\$4,    %xmm1		# 1 = i
+	pand	%xmm9, 	%xmm0		# 0 = k
+	movdqa	%xmm11, %xmm2		# 2 : a/k
+	pshufb  %xmm0,  %xmm2		# 2 = a/k
+	pxor	%xmm1,	%xmm0		# 0 = j
+	movdqa	%xmm10,	%xmm3		# 3 : 1/i
+	pshufb  %xmm1, 	%xmm3		# 3 = 1/i
+	pxor	%xmm2, 	%xmm3		# 3 = iak = 1/i + a/k
+	movdqa	%xmm10,	%xmm4		# 4 : 1/j
+	pshufb	%xmm0, 	%xmm4		# 4 = 1/j
+	pxor	%xmm2, 	%xmm4		# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2		# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2		# 2 = 1/iak
+	pxor	%xmm0, 	%xmm2		# 2 = io
+	movdqa	%xmm10, %xmm3		# 3 : 1/jak
+	pshufb  %xmm4,  %xmm3		# 3 = 1/jak
+	pxor	%xmm1,  %xmm3		# 3 = jo
+	movdqa	%xmm13, %xmm4		# 4 : sbou
+	pshufb  %xmm2,  %xmm4		# 4 = sbou
+	movdqa	%xmm12, %xmm0		# 0 : sbot
+	pshufb  %xmm3,	%xmm0		# 0 = sb1t
+	pxor	%xmm4, 	%xmm0		# 0 = sbox output
+
+	# add in smeared stuff
+	pxor	%xmm7,	%xmm0	
+	movdqa	%xmm0,	%xmm7
+	ret
+.size	_vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+.type	_vpaes_schedule_transform,\@abi-omnipotent
+.align	16
+_vpaes_schedule_transform:
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm0,	%xmm1
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	movdqa	(%r11), %xmm2 	# lo
+	pshufb	%xmm0,	%xmm2
+	movdqa	16(%r11), %xmm0 # hi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm2,	%xmm0
+	ret
+.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.type	_vpaes_schedule_mangle,\@abi-omnipotent
+.align	16
+_vpaes_schedule_mangle:
+	movdqa	%xmm0,	%xmm4	# save xmm0 for later
+	movdqa	.Lk_mc_forward(%rip),%xmm5
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_dec
+
+	# encrypting
+	add	\$16,	%rdx
+	pxor	.Lk_s63(%rip),%xmm4
+	pshufb	%xmm5,	%xmm4
+	movdqa	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+
+	jmp	.Lschedule_mangle_both
+.align	16
+.Lschedule_mangle_dec:
+	# inverse mix columns
+	lea	.Lk_dksd(%rip),%r11
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm4,	%xmm1
+	psrld	\$4,	%xmm1	# 1 = hi
+	pand	%xmm9,	%xmm4	# 4 = lo
+
+	movdqa	0x00(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	movdqa	0x10(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x20(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x30(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x40(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x50(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x60(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x70(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+
+	add	\$-16,	%rdx
+
+.Lschedule_mangle_both:
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,%xmm3
+	add	\$-16,	%r8
+	and	\$0x30,	%r8
+	movdqu	%xmm3,	(%rdx)
+	ret
+.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+#
+# Interface to OpenSSL
+#
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@function,3
+.align	16
+${PREFIX}_set_encrypt_key:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	mov	\$0,%ecx
+	mov	\$0x30,%r8d
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@function,3
+.align	16
+${PREFIX}_set_decrypt_key:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+	shl	\$4,%eax
+	lea	16(%rdx,%rax),%rdx
+
+	mov	\$1,%ecx
+	mov	%esi,%r8d
+	shr	\$1,%r8d
+	and	\$32,%r8d
+	xor	\$32,%r8d	# nbits==192?0:32
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@function,3
+.align	16
+${PREFIX}_encrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_encrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_epilogue:
+___
+$code.=<<___;
+	ret
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@function,3
+.align	16
+${PREFIX}_decrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_decrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_epilogue:
+___
+$code.=<<___;
+	ret
+.size	${PREFIX}_decrypt,.-${PREFIX}_decrypt
+___
+{
+my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#                       size_t length, const AES_KEY *key,
+#                       unsigned char *ivp,const int enc);
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+	xchg	$key,$len
+___
+($len,$key)=($key,$len);
+$code.=<<___;
+	sub	\$16,$len
+	jc	.Lcbc_abort
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lcbc_body:
+___
+$code.=<<___;
+	movdqu	($ivp),%xmm6		# load IV
+	sub	$inp,$out
+	call	_vpaes_preheat
+	cmp	\$0,${enc}d
+	je	.Lcbc_dec_loop
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movdqu	($inp),%xmm0
+	pxor	%xmm6,%xmm0
+	call	_vpaes_encrypt_core
+	movdqa	%xmm0,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	jmp	.Lcbc_done
+.align	16
+.Lcbc_dec_loop:
+	movdqu	($inp),%xmm0
+	movdqa	%xmm0,%xmm7
+	call	_vpaes_decrypt_core
+	pxor	%xmm6,%xmm0
+	movdqa	%xmm7,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_dec_loop
+.Lcbc_done:
+	movdqu	%xmm6,($ivp)		# save IV
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lcbc_epilogue:
+___
+$code.=<<___;
+.Lcbc_abort:
+	ret
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.type	_vpaes_preheat,\@abi-omnipotent
+.align	16
+_vpaes_preheat:
+	lea	.Lk_s0F(%rip), %r10
+	movdqa	-0x20(%r10), %xmm10	# .Lk_inv
+	movdqa	-0x10(%r10), %xmm11	# .Lk_inv+16
+	movdqa	0x00(%r10), %xmm9	# .Lk_s0F
+	movdqa	0x30(%r10), %xmm13	# .Lk_sb1
+	movdqa	0x40(%r10), %xmm12	# .Lk_sb1+16
+	movdqa	0x50(%r10), %xmm15	# .Lk_sb2
+	movdqa	0x60(%r10), %xmm14	# .Lk_sb2+16
+	ret
+.size	_vpaes_preheat,.-_vpaes_preheat
+########################################################
+##                                                    ##
+##                     Constants                      ##
+##                                                    ##
+########################################################
+.type	_vpaes_consts,\@object
+.align	64
+_vpaes_consts:
+.Lk_inv:	# inv, inva
+	.quad	0x0E05060F0D080180, 0x040703090A0B0C02
+	.quad	0x01040A060F0B0780, 0x030D0E0C02050809
+
+.Lk_s0F:	# s0F
+	.quad	0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
+
+.Lk_ipt:	# input transform (lo, hi)
+	.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
+	.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+
+.Lk_sb1:	# sb1u, sb1t
+	.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+	.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+.Lk_sb2:	# sb2u, sb2t
+	.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
+	.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
+.Lk_sbo:	# sbou, sbot
+	.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
+	.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+
+.Lk_mc_forward:	# mc_forward
+	.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
+	.quad	0x080B0A0904070605, 0x000302010C0F0E0D
+	.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
+	.quad	0x000302010C0F0E0D, 0x080B0A0904070605
+
+.Lk_mc_backward:# mc_backward
+	.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
+	.quad	0x020100030E0D0C0F, 0x0A09080B06050407
+	.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
+	.quad	0x0A09080B06050407, 0x020100030E0D0C0F
+
+.Lk_sr:		# sr
+	.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
+	.quad	0x030E09040F0A0500, 0x0B06010C07020D08
+	.quad	0x0F060D040B020900, 0x070E050C030A0108
+	.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
+
+.Lk_rcon:	# rcon
+	.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_s63:	# s63: all equal to 0x63 transformed
+	.quad	0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
+
+.Lk_opt:	# output transform
+	.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
+	.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+
+.Lk_deskew:	# deskew tables: inverts the sbox's "skew"
+	.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+	.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+.Lk_dksd:	# decryption key schedule: invskew x*D
+	.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+	.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb:	# decryption key schedule: invskew x*B
+	.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
+	.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse:	# decryption key schedule: invskew x*E + 0x63
+	.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
+	.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9:	# decryption key schedule: invskew x*9
+	.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
+	.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+.Lk_dipt:	# decryption input transform
+	.quad	0x0F505B040B545F00, 0x154A411E114E451A
+	.quad	0x86E383E660056500, 0x12771772F491F194
+
+.Lk_dsb9:	# decryption sbox output *9*u, *9*t
+	.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
+	.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd:	# decryption sbox output *D*u, *D*t
+	.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+	.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb:	# decryption sbox output *B*u, *B*t
+	.quad	0xD022649296B44200, 0x602646F6B0F2D404
+	.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe:	# decryption sbox output *E*u, *E*t
+	.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
+	.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.Lk_dsbo:	# decryption sbox final output
+	.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+	.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.asciz	"Vector Permutaion AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
+.align	64
+.size	_vpaes_consts,.-_vpaes_consts
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+
+	lea	16(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xb8(%rax),%rax		# adjust stack pointer
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_encrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_decrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_encrypt
+	.rva	.LSEH_end_${PREFIX}_encrypt
+	.rva	.LSEH_info_${PREFIX}_encrypt
+
+	.rva	.LSEH_begin_${PREFIX}_decrypt
+	.rva	.LSEH_end_${PREFIX}_decrypt
+	.rva	.LSEH_info_${PREFIX}_decrypt
+
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_${PREFIX}_cbc_encrypt
+
+.section	.xdata
+.align	8
+.LSEH_info_${PREFIX}_set_encrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_key_body,.Lenc_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_set_decrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_key_body,.Ldec_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_body,.Lenc_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_decrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_body,.Ldec_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lcbc_body,.Lcbc_epilogue		# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;