#14465: Update openssl to tag android-4.0.4_r2.1
diff --git a/jni/openssl/crypto/ec/ec_cvt.c b/jni/openssl/crypto/ec/ec_cvt.c
index bfcbab3..d45640b 100644
--- a/jni/openssl/crypto/ec/ec_cvt.c
+++ b/jni/openssl/crypto/ec/ec_cvt.c
@@ -78,32 +78,7 @@
const EC_METHOD *meth;
EC_GROUP *ret;
-#if defined(OPENSSL_BN_ASM_MONT)
- /*
- * This might appear controversial, but the fact is that generic
- * prime method was observed to deliver better performance even
- * for NIST primes on a range of platforms, e.g.: 60%-15%
- * improvement on IA-64, ~25% on ARM, 30%-90% on P4, 20%-25%
- * in 32-bit build and 35%--12% in 64-bit build on Core2...
- * Coefficients are relative to optimized bn_nist.c for most
- * intensive ECDSA verify and ECDH operations for 192- and 521-
- * bit keys respectively. Choice of these boundary values is
- * arguable, because the dependency of improvement coefficient
- * from key length is not a "monotone" curve. For example while
- * 571-bit result is 23% on ARM, 384-bit one is -1%. But it's
- * generally faster, sometimes "respectfully" faster, sometimes
- * "tolerably" slower... What effectively happens is that loop
- * with bn_mul_add_words is put against bn_mul_mont, and the
- * latter "wins" on short vectors. Correct solution should be
- * implementing dedicated NxN multiplication subroutines for
- * small N. But till it materializes, let's stick to generic
- * prime method...
- * <appro>
- */
- meth = EC_GFp_mont_method();
-#else
meth = EC_GFp_nist_method();
-#endif
ret = EC_GROUP_new(meth);
if (ret == NULL)
@@ -147,7 +122,7 @@
return ret;
}
-#ifndef OPENSSL_NO_EC2M
+
EC_GROUP *EC_GROUP_new_curve_GF2m(const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
{
const EC_METHOD *meth;
@@ -167,4 +142,3 @@
return ret;
}
-#endif