andre@0: /* This Source Code Form is subject to the terms of the Mozilla Public andre@0: * License, v. 2.0. If a copy of the MPL was not distributed with this andre@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ andre@0: andre@0: /* This file implements moduluar exponentiation using Montgomery's andre@0: * method for modular reduction. This file implements the method andre@0: * described as "Improvement 2" in the paper "A Cryptogrpahic Library for andre@0: * the Motorola DSP56000" by Stephen R. Dusse' and Burton S. Kaliski Jr. andre@0: * published in "Advances in Cryptology: Proceedings of EUROCRYPT '90" andre@0: * "Lecture Notes in Computer Science" volume 473, 1991, pg 230-244, andre@0: * published by Springer Verlag. andre@0: */ andre@0: andre@0: #define MP_USING_CACHE_SAFE_MOD_EXP 1 andre@0: #include andre@0: #include "mpi-priv.h" andre@0: #include "mplogic.h" andre@0: #include "mpprime.h" andre@0: #ifdef MP_USING_MONT_MULF andre@0: #include "montmulf.h" andre@0: #endif andre@0: #include /* ptrdiff_t */ andre@0: andre@0: /* if MP_CHAR_STORE_SLOW is defined, we */ andre@0: /* need to know endianness of this platform. */ andre@0: #ifdef MP_CHAR_STORE_SLOW andre@0: #if !defined(MP_IS_BIG_ENDIAN) && !defined(MP_IS_LITTLE_ENDIAN) andre@0: #error "You must define MP_IS_BIG_ENDIAN or MP_IS_LITTLE_ENDIAN\n" \ andre@0: " if you define MP_CHAR_STORE_SLOW." andre@0: #endif andre@0: #endif andre@0: andre@0: #define STATIC andre@0: andre@0: #define MAX_ODD_INTS 32 /* 2 ** (WINDOW_BITS - 1) */ andre@0: andre@0: /*! computes T = REDC(T), 2^b == R andre@0: \param T < RN andre@0: */ andre@0: mp_err s_mp_redc(mp_int *T, mp_mont_modulus *mmm) andre@0: { andre@0: mp_err res; andre@0: mp_size i; andre@0: andre@0: i = (MP_USED(&mmm->N) << 1) + 1; andre@0: MP_CHECKOK( s_mp_pad(T, i) ); andre@0: for (i = 0; i < MP_USED(&mmm->N); ++i ) { andre@0: mp_digit m_i = MP_DIGIT(T, i) * mmm->n0prime; andre@0: /* T += N * m_i * (MP_RADIX ** i); */ andre@0: MP_CHECKOK( s_mp_mul_d_add_offset(&mmm->N, m_i, T, i) ); andre@0: } andre@0: s_mp_clamp(T); andre@0: andre@0: /* T /= R */ andre@0: s_mp_rshd( T, MP_USED(&mmm->N) ); andre@0: andre@0: if ((res = s_mp_cmp(T, &mmm->N)) >= 0) { andre@0: /* T = T - N */ andre@0: MP_CHECKOK( s_mp_sub(T, &mmm->N) ); andre@0: #ifdef DEBUG andre@0: if ((res = mp_cmp(T, &mmm->N)) >= 0) { andre@0: res = MP_UNDEF; andre@0: goto CLEANUP; andre@0: } andre@0: #endif andre@0: } andre@0: res = MP_OKAY; andre@0: CLEANUP: andre@0: return res; andre@0: } andre@0: andre@0: #if !defined(MP_MONT_USE_MP_MUL) andre@0: andre@0: /*! c <- REDC( a * b ) mod N andre@0: \param a < N i.e. "reduced" andre@0: \param b < N i.e. "reduced" andre@0: \param mmm modulus N and n0' of N andre@0: */ andre@0: mp_err s_mp_mul_mont(const mp_int *a, const mp_int *b, mp_int *c, andre@0: mp_mont_modulus *mmm) andre@0: { andre@0: mp_digit *pb; andre@0: mp_digit m_i; andre@0: mp_err res; andre@0: mp_size ib; /* "index b": index of current digit of B */ andre@0: mp_size useda, usedb; andre@0: andre@0: ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG); andre@0: andre@0: if (MP_USED(a) < MP_USED(b)) { andre@0: const mp_int *xch = b; /* switch a and b, to do fewer outer loops */ andre@0: b = a; andre@0: a = xch; andre@0: } andre@0: andre@0: MP_USED(c) = 1; MP_DIGIT(c, 0) = 0; andre@0: ib = (MP_USED(&mmm->N) << 1) + 1; andre@0: if((res = s_mp_pad(c, ib)) != MP_OKAY) andre@0: goto CLEANUP; andre@0: andre@0: useda = MP_USED(a); andre@0: pb = MP_DIGITS(b); andre@0: s_mpv_mul_d(MP_DIGITS(a), useda, *pb++, MP_DIGITS(c)); andre@0: s_mp_setz(MP_DIGITS(c) + useda + 1, ib - (useda + 1)); andre@0: m_i = MP_DIGIT(c, 0) * mmm->n0prime; andre@0: s_mp_mul_d_add_offset(&mmm->N, m_i, c, 0); andre@0: andre@0: /* Outer loop: Digits of b */ andre@0: usedb = MP_USED(b); andre@0: for (ib = 1; ib < usedb; ib++) { andre@0: mp_digit b_i = *pb++; andre@0: andre@0: /* Inner product: Digits of a */ andre@0: if (b_i) andre@0: s_mpv_mul_d_add_prop(MP_DIGITS(a), useda, b_i, MP_DIGITS(c) + ib); andre@0: m_i = MP_DIGIT(c, ib) * mmm->n0prime; andre@0: s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib); andre@0: } andre@0: if (usedb < MP_USED(&mmm->N)) { andre@0: for (usedb = MP_USED(&mmm->N); ib < usedb; ++ib ) { andre@0: m_i = MP_DIGIT(c, ib) * mmm->n0prime; andre@0: s_mp_mul_d_add_offset(&mmm->N, m_i, c, ib); andre@0: } andre@0: } andre@0: s_mp_clamp(c); andre@0: s_mp_rshd( c, MP_USED(&mmm->N) ); /* c /= R */ andre@0: if (s_mp_cmp(c, &mmm->N) >= 0) { andre@0: MP_CHECKOK( s_mp_sub(c, &mmm->N) ); andre@0: } andre@0: res = MP_OKAY; andre@0: andre@0: CLEANUP: andre@0: return res; andre@0: } andre@0: #endif andre@0: andre@0: STATIC andre@0: mp_err s_mp_to_mont(const mp_int *x, mp_mont_modulus *mmm, mp_int *xMont) andre@0: { andre@0: mp_err res; andre@0: andre@0: /* xMont = x * R mod N where N is modulus */ andre@0: MP_CHECKOK( mp_copy( x, xMont ) ); andre@0: MP_CHECKOK( s_mp_lshd( xMont, MP_USED(&mmm->N) ) ); /* xMont = x << b */ andre@0: MP_CHECKOK( mp_div(xMont, &mmm->N, 0, xMont) ); /* mod N */ andre@0: CLEANUP: andre@0: return res; andre@0: } andre@0: andre@0: #ifdef MP_USING_MONT_MULF andre@0: andre@0: /* the floating point multiply is already cache safe, andre@0: * don't turn on cache safe unless we specifically andre@0: * force it */ andre@0: #ifndef MP_FORCE_CACHE_SAFE andre@0: #undef MP_USING_CACHE_SAFE_MOD_EXP andre@0: #endif andre@0: andre@0: unsigned int mp_using_mont_mulf = 1; andre@0: andre@0: /* computes montgomery square of the integer in mResult */ andre@0: #define SQR \ andre@0: conv_i32_to_d32_and_d16(dm1, d16Tmp, mResult, nLen); \ andre@0: mont_mulf_noconv(mResult, dm1, d16Tmp, \ andre@0: dTmp, dn, MP_DIGITS(modulus), nLen, dn0) andre@0: andre@0: /* computes montgomery product of x and the integer in mResult */ andre@0: #define MUL(x) \ andre@0: conv_i32_to_d32(dm1, mResult, nLen); \ andre@0: mont_mulf_noconv(mResult, dm1, oddPowers[x], \ andre@0: dTmp, dn, MP_DIGITS(modulus), nLen, dn0) andre@0: andre@0: /* Do modular exponentiation using floating point multiply code. */ andre@0: mp_err mp_exptmod_f(const mp_int * montBase, andre@0: const mp_int * exponent, andre@0: const mp_int * modulus, andre@0: mp_int * result, andre@0: mp_mont_modulus *mmm, andre@0: int nLen, andre@0: mp_size bits_in_exponent, andre@0: mp_size window_bits, andre@0: mp_size odd_ints) andre@0: { andre@0: mp_digit *mResult; andre@0: double *dBuf = 0, *dm1, *dn, *dSqr, *d16Tmp, *dTmp; andre@0: double dn0; andre@0: mp_size i; andre@0: mp_err res; andre@0: int expOff; andre@0: int dSize = 0, oddPowSize, dTmpSize; andre@0: mp_int accum1; andre@0: double *oddPowers[MAX_ODD_INTS]; andre@0: andre@0: /* function for computing n0prime only works if n0 is odd */ andre@0: andre@0: MP_DIGITS(&accum1) = 0; andre@0: andre@0: for (i = 0; i < MAX_ODD_INTS; ++i) andre@0: oddPowers[i] = 0; andre@0: andre@0: MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); andre@0: andre@0: mp_set(&accum1, 1); andre@0: MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); andre@0: MP_CHECKOK( s_mp_pad(&accum1, nLen) ); andre@0: andre@0: oddPowSize = 2 * nLen + 1; andre@0: dTmpSize = 2 * oddPowSize; andre@0: dSize = sizeof(double) * (nLen * 4 + 1 + andre@0: ((odd_ints + 1) * oddPowSize) + dTmpSize); andre@0: dBuf = (double *)malloc(dSize); andre@0: dm1 = dBuf; /* array of d32 */ andre@0: dn = dBuf + nLen; /* array of d32 */ andre@0: dSqr = dn + nLen; /* array of d32 */ andre@0: d16Tmp = dSqr + nLen; /* array of d16 */ andre@0: dTmp = d16Tmp + oddPowSize; andre@0: andre@0: for (i = 0; i < odd_ints; ++i) { andre@0: oddPowers[i] = dTmp; andre@0: dTmp += oddPowSize; andre@0: } andre@0: mResult = (mp_digit *)(dTmp + dTmpSize); /* size is nLen + 1 */ andre@0: andre@0: /* Make dn and dn0 */ andre@0: conv_i32_to_d32(dn, MP_DIGITS(modulus), nLen); andre@0: dn0 = (double)(mmm->n0prime & 0xffff); andre@0: andre@0: /* Make dSqr */ andre@0: conv_i32_to_d32_and_d16(dm1, oddPowers[0], MP_DIGITS(montBase), nLen); andre@0: mont_mulf_noconv(mResult, dm1, oddPowers[0], andre@0: dTmp, dn, MP_DIGITS(modulus), nLen, dn0); andre@0: conv_i32_to_d32(dSqr, mResult, nLen); andre@0: andre@0: for (i = 1; i < odd_ints; ++i) { andre@0: mont_mulf_noconv(mResult, dSqr, oddPowers[i - 1], andre@0: dTmp, dn, MP_DIGITS(modulus), nLen, dn0); andre@0: conv_i32_to_d16(oddPowers[i], mResult, nLen); andre@0: } andre@0: andre@0: s_mp_copy(MP_DIGITS(&accum1), mResult, nLen); /* from, to, len */ andre@0: andre@0: for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) { andre@0: mp_size smallExp; andre@0: MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); andre@0: smallExp = (mp_size)res; andre@0: andre@0: if (window_bits == 1) { andre@0: if (!smallExp) { andre@0: SQR; andre@0: } else if (smallExp & 1) { andre@0: SQR; MUL(0); andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 4) { andre@0: if (!smallExp) { andre@0: SQR; SQR; SQR; SQR; andre@0: } else if (smallExp & 1) { andre@0: SQR; SQR; SQR; SQR; MUL(smallExp/2); andre@0: } else if (smallExp & 2) { andre@0: SQR; SQR; SQR; MUL(smallExp/4); SQR; andre@0: } else if (smallExp & 4) { andre@0: SQR; SQR; MUL(smallExp/8); SQR; SQR; andre@0: } else if (smallExp & 8) { andre@0: SQR; MUL(smallExp/16); SQR; SQR; SQR; andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 5) { andre@0: if (!smallExp) { andre@0: SQR; SQR; SQR; SQR; SQR; andre@0: } else if (smallExp & 1) { andre@0: SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2); andre@0: } else if (smallExp & 2) { andre@0: SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR; andre@0: } else if (smallExp & 4) { andre@0: SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR; andre@0: } else if (smallExp & 8) { andre@0: SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR; andre@0: } else if (smallExp & 0x10) { andre@0: SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR; andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 6) { andre@0: if (!smallExp) { andre@0: SQR; SQR; SQR; SQR; SQR; SQR; andre@0: } else if (smallExp & 1) { andre@0: SQR; SQR; SQR; SQR; SQR; SQR; MUL(smallExp/2); andre@0: } else if (smallExp & 2) { andre@0: SQR; SQR; SQR; SQR; SQR; MUL(smallExp/4); SQR; andre@0: } else if (smallExp & 4) { andre@0: SQR; SQR; SQR; SQR; MUL(smallExp/8); SQR; SQR; andre@0: } else if (smallExp & 8) { andre@0: SQR; SQR; SQR; MUL(smallExp/16); SQR; SQR; SQR; andre@0: } else if (smallExp & 0x10) { andre@0: SQR; SQR; MUL(smallExp/32); SQR; SQR; SQR; SQR; andre@0: } else if (smallExp & 0x20) { andre@0: SQR; MUL(smallExp/64); SQR; SQR; SQR; SQR; SQR; andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else { andre@0: abort(); andre@0: } andre@0: } andre@0: andre@0: s_mp_copy(mResult, MP_DIGITS(&accum1), nLen); /* from, to, len */ andre@0: andre@0: res = s_mp_redc(&accum1, mmm); andre@0: mp_exch(&accum1, result); andre@0: andre@0: CLEANUP: andre@0: mp_clear(&accum1); andre@0: if (dBuf) { andre@0: if (dSize) andre@0: memset(dBuf, 0, dSize); andre@0: free(dBuf); andre@0: } andre@0: andre@0: return res; andre@0: } andre@0: #undef SQR andre@0: #undef MUL andre@0: #endif andre@0: andre@0: #define SQR(a,b) \ andre@0: MP_CHECKOK( mp_sqr(a, b) );\ andre@0: MP_CHECKOK( s_mp_redc(b, mmm) ) andre@0: andre@0: #if defined(MP_MONT_USE_MP_MUL) andre@0: #define MUL(x,a,b) \ andre@0: MP_CHECKOK( mp_mul(a, oddPowers + (x), b) ); \ andre@0: MP_CHECKOK( s_mp_redc(b, mmm) ) andre@0: #else andre@0: #define MUL(x,a,b) \ andre@0: MP_CHECKOK( s_mp_mul_mont(a, oddPowers + (x), b, mmm) ) andre@0: #endif andre@0: andre@0: #define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp andre@0: andre@0: /* Do modular exponentiation using integer multiply code. */ andre@0: mp_err mp_exptmod_i(const mp_int * montBase, andre@0: const mp_int * exponent, andre@0: const mp_int * modulus, andre@0: mp_int * result, andre@0: mp_mont_modulus *mmm, andre@0: int nLen, andre@0: mp_size bits_in_exponent, andre@0: mp_size window_bits, andre@0: mp_size odd_ints) andre@0: { andre@0: mp_int *pa1, *pa2, *ptmp; andre@0: mp_size i; andre@0: mp_err res; andre@0: int expOff; andre@0: mp_int accum1, accum2, power2, oddPowers[MAX_ODD_INTS]; andre@0: andre@0: /* power2 = base ** 2; oddPowers[i] = base ** (2*i + 1); */ andre@0: /* oddPowers[i] = base ** (2*i + 1); */ andre@0: andre@0: MP_DIGITS(&accum1) = 0; andre@0: MP_DIGITS(&accum2) = 0; andre@0: MP_DIGITS(&power2) = 0; andre@0: for (i = 0; i < MAX_ODD_INTS; ++i) { andre@0: MP_DIGITS(oddPowers + i) = 0; andre@0: } andre@0: andre@0: MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) ); andre@0: andre@0: MP_CHECKOK( mp_init_copy(&oddPowers[0], montBase) ); andre@0: andre@0: mp_init_size(&power2, nLen + 2 * MP_USED(montBase) + 2); andre@0: MP_CHECKOK( mp_sqr(montBase, &power2) ); /* power2 = montBase ** 2 */ andre@0: MP_CHECKOK( s_mp_redc(&power2, mmm) ); andre@0: andre@0: for (i = 1; i < odd_ints; ++i) { andre@0: mp_init_size(oddPowers + i, nLen + 2 * MP_USED(&power2) + 2); andre@0: MP_CHECKOK( mp_mul(oddPowers + (i - 1), &power2, oddPowers + i) ); andre@0: MP_CHECKOK( s_mp_redc(oddPowers + i, mmm) ); andre@0: } andre@0: andre@0: /* set accumulator to montgomery residue of 1 */ andre@0: mp_set(&accum1, 1); andre@0: MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); andre@0: pa1 = &accum1; andre@0: pa2 = &accum2; andre@0: andre@0: for (expOff = bits_in_exponent - window_bits; expOff >= 0; expOff -= window_bits) { andre@0: mp_size smallExp; andre@0: MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); andre@0: smallExp = (mp_size)res; andre@0: andre@0: if (window_bits == 1) { andre@0: if (!smallExp) { andre@0: SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 1) { andre@0: SQR(pa1,pa2); MUL(0,pa2,pa1); andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 4) { andre@0: if (!smallExp) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: } else if (smallExp & 1) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: MUL(smallExp/2, pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 2) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); andre@0: MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 4) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/8,pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 8) { andre@0: SQR(pa1,pa2); MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 5) { andre@0: if (!smallExp) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 1) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); MUL(smallExp/2,pa2,pa1); andre@0: } else if (smallExp & 2) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: MUL(smallExp/4,pa1,pa2); SQR(pa2,pa1); andre@0: } else if (smallExp & 4) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); andre@0: MUL(smallExp/8,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: } else if (smallExp & 8) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/16,pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: } else if (smallExp & 0x10) { andre@0: SQR(pa1,pa2); MUL(smallExp/32,pa2,pa1); SQR(pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else if (window_bits == 6) { andre@0: if (!smallExp) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); SQR(pa2,pa1); andre@0: } else if (smallExp & 1) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/2,pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 2) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); MUL(smallExp/4,pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 4) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: MUL(smallExp/8,pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 8) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); andre@0: MUL(smallExp/16,pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 0x10) { andre@0: SQR(pa1,pa2); SQR(pa2,pa1); MUL(smallExp/32,pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 0x20) { andre@0: SQR(pa1,pa2); MUL(smallExp/64,pa2,pa1); SQR(pa1,pa2); andre@0: SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SWAPPA; andre@0: } else { andre@0: abort(); andre@0: } andre@0: } else { andre@0: abort(); andre@0: } andre@0: } andre@0: andre@0: res = s_mp_redc(pa1, mmm); andre@0: mp_exch(pa1, result); andre@0: andre@0: CLEANUP: andre@0: mp_clear(&accum1); andre@0: mp_clear(&accum2); andre@0: mp_clear(&power2); andre@0: for (i = 0; i < odd_ints; ++i) { andre@0: mp_clear(oddPowers + i); andre@0: } andre@0: return res; andre@0: } andre@0: #undef SQR andre@0: #undef MUL andre@0: andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: unsigned int mp_using_cache_safe_exp = 1; andre@0: #endif andre@0: andre@0: mp_err mp_set_safe_modexp(int value) andre@0: { andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: mp_using_cache_safe_exp = value; andre@0: return MP_OKAY; andre@0: #else andre@0: if (value == 0) { andre@0: return MP_OKAY; andre@0: } andre@0: return MP_BADARG; andre@0: #endif andre@0: } andre@0: andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: #define WEAVE_WORD_SIZE 4 andre@0: andre@0: #ifndef MP_CHAR_STORE_SLOW andre@0: /* andre@0: * mpi_to_weave takes an array of bignums, a matrix in which each bignum andre@0: * occupies all the columns of a row, and transposes it into a matrix in andre@0: * which each bignum occupies a column of every row. The first row of the andre@0: * input matrix becomes the first column of the output matrix. The n'th andre@0: * row of input becomes the n'th column of output. The input data is said andre@0: * to be "interleaved" or "woven" into the output matrix. andre@0: * andre@0: * The array of bignums is left in this woven form. Each time a single andre@0: * bignum value is needed, it is recreated by fetching the n'th column, andre@0: * forming a single row which is the new bignum. andre@0: * andre@0: * The purpose of this interleaving is make it impossible to determine which andre@0: * of the bignums is being used in any one operation by examining the pattern andre@0: * of cache misses. andre@0: * andre@0: * The weaving function does not transpose the entire input matrix in one call. andre@0: * It transposes 4 rows of mp_ints into their respective columns of output. andre@0: * andre@0: * There are two different implementations of the weaving and unweaving code andre@0: * in this file. One uses byte loads and stores. The second uses loads and andre@0: * stores of mp_weave_word size values. The weaved forms of these two andre@0: * implementations differ. Consequently, each one has its own explanation. andre@0: * andre@0: * Here is the explanation for the byte-at-a-time implementation. andre@0: * andre@0: * This implementation treats each mp_int bignum as an array of bytes, andre@0: * rather than as an array of mp_digits. It stores those bytes as a andre@0: * column of bytes in the output matrix. It doesn't care if the machine andre@0: * uses big-endian or little-endian byte ordering within mp_digits. andre@0: * The first byte of the mp_digit array becomes the first byte in the output andre@0: * column, regardless of whether that byte is the MSB or LSB of the mp_digit. andre@0: * andre@0: * "bignums" is an array of mp_ints. andre@0: * It points to four rows, four mp_ints, a subset of a larger array of mp_ints. andre@0: * andre@0: * "weaved" is the weaved output matrix. andre@0: * The first byte of bignums[0] is stored in weaved[0]. andre@0: * andre@0: * "nBignums" is the total number of bignums in the array of which "bignums" andre@0: * is a part. andre@0: * andre@0: * "nDigits" is the size in mp_digits of each mp_int in the "bignums" array. andre@0: * mp_ints that use less than nDigits digits are logically padded with zeros andre@0: * while being stored in the weaved array. andre@0: */ andre@0: mp_err mpi_to_weave(const mp_int *bignums, andre@0: unsigned char *weaved, andre@0: mp_size nDigits, /* in each mp_int of input */ andre@0: mp_size nBignums) /* in the entire source array */ andre@0: { andre@0: mp_size i; andre@0: unsigned char * endDest = weaved + (nDigits * nBignums * sizeof(mp_digit)); andre@0: andre@0: for (i=0; i < WEAVE_WORD_SIZE; i++) { andre@0: mp_size used = MP_USED(&bignums[i]); andre@0: unsigned char *pSrc = (unsigned char *)MP_DIGITS(&bignums[i]); andre@0: unsigned char *endSrc = pSrc + (used * sizeof(mp_digit)); andre@0: unsigned char *pDest = weaved + i; andre@0: andre@0: ARGCHK(MP_SIGN(&bignums[i]) == MP_ZPOS, MP_BADARG); andre@0: ARGCHK(used <= nDigits, MP_BADARG); andre@0: andre@0: for (; pSrc < endSrc; pSrc++) { andre@0: *pDest = *pSrc; andre@0: pDest += nBignums; andre@0: } andre@0: while (pDest < endDest) { andre@0: *pDest = 0; andre@0: pDest += nBignums; andre@0: } andre@0: } andre@0: andre@0: return MP_OKAY; andre@0: } andre@0: andre@0: /* Reverse the operation above for one mp_int. andre@0: * Reconstruct one mp_int from its column in the weaved array. andre@0: * "pSrc" points to the offset into the weave array of the bignum we andre@0: * are going to reconstruct. andre@0: */ andre@0: mp_err weave_to_mpi(mp_int *a, /* output, result */ andre@0: const unsigned char *pSrc, /* input, byte matrix */ andre@0: mp_size nDigits, /* per mp_int output */ andre@0: mp_size nBignums) /* bignums in weaved matrix */ andre@0: { andre@0: unsigned char *pDest = (unsigned char *)MP_DIGITS(a); andre@0: unsigned char *endDest = pDest + (nDigits * sizeof(mp_digit)); andre@0: andre@0: MP_SIGN(a) = MP_ZPOS; andre@0: MP_USED(a) = nDigits; andre@0: andre@0: for (; pDest < endDest; pSrc += nBignums, pDest++) { andre@0: *pDest = *pSrc; andre@0: } andre@0: s_mp_clamp(a); andre@0: return MP_OKAY; andre@0: } andre@0: andre@0: #else andre@0: andre@0: /* Need a primitive that we know is 32 bits long... */ andre@0: /* this is true on all modern processors we know of today*/ andre@0: typedef unsigned int mp_weave_word; andre@0: andre@0: /* andre@0: * on some platforms character stores into memory is very expensive since they andre@0: * generate a read/modify/write operation on the bus. On those platforms andre@0: * we need to do integer writes to the bus. Because of some unrolled code, andre@0: * in this current code the size of mp_weave_word must be four. The code that andre@0: * makes this assumption explicity is called out. (on some platforms a write andre@0: * of 4 bytes still requires a single read-modify-write operation. andre@0: * andre@0: * This function is takes the identical parameters as the function above, andre@0: * however it lays out the final array differently. Where the previous function andre@0: * treats the mpi_int as an byte array, this function treats it as an array of andre@0: * mp_digits where each digit is stored in big endian order. andre@0: * andre@0: * since we need to interleave on a byte by byte basis, we need to collect andre@0: * several mpi structures together into a single PRUint32 before we write. We andre@0: * also need to make sure the PRUint32 is arranged so that the first value of andre@0: * the first array winds up in b[0]. This means construction of that PRUint32 andre@0: * is endian specific (even though the layout of the mp_digits in the array andre@0: * is always big endian). andre@0: * andre@0: * The final data is stored as follows : andre@0: * andre@0: * Our same logical array p array, m is sizeof(mp_digit), andre@0: * N is still count and n is now b_size. If we define p[i].digit[j]0 as the andre@0: * most significant byte of the word p[i].digit[j], p[i].digit[j]1 as andre@0: * the next most significant byte of p[i].digit[j], ... and p[i].digit[j]m-1 andre@0: * is the least significant byte. andre@0: * Our array would look like: andre@0: * p[0].digit[0]0 p[1].digit[0]0 ... p[N-2].digit[0]0 p[N-1].digit[0]0 andre@0: * p[0].digit[0]1 p[1].digit[0]1 ... p[N-2].digit[0]1 p[N-1].digit[0]1 andre@0: * . . andre@0: * p[0].digit[0]m-1 p[1].digit[0]m-1 ... p[N-2].digit[0]m-1 p[N-1].digit[0]m-1 andre@0: * p[0].digit[1]0 p[1].digit[1]0 ... p[N-2].digit[1]0 p[N-1].digit[1]0 andre@0: * . . andre@0: * . . andre@0: * p[0].digit[n-1]m-2 p[1].digit[n-1]m-2 ... p[N-2].digit[n-1]m-2 p[N-1].digit[n-1]m-2 andre@0: * p[0].digit[n-1]m-1 p[1].digit[n-1]m-1 ... p[N-2].digit[n-1]m-1 p[N-1].digit[n-1]m-1 andre@0: * andre@0: */ andre@0: mp_err mpi_to_weave(const mp_int *a, unsigned char *b, andre@0: mp_size b_size, mp_size count) andre@0: { andre@0: mp_size i; andre@0: mp_digit *digitsa0; andre@0: mp_digit *digitsa1; andre@0: mp_digit *digitsa2; andre@0: mp_digit *digitsa3; andre@0: mp_size useda0; andre@0: mp_size useda1; andre@0: mp_size useda2; andre@0: mp_size useda3; andre@0: mp_weave_word *weaved = (mp_weave_word *)b; andre@0: andre@0: count = count/sizeof(mp_weave_word); andre@0: andre@0: /* this code pretty much depends on this ! */ andre@0: #if MP_ARGCHK == 2 andre@0: assert(WEAVE_WORD_SIZE == 4); andre@0: assert(sizeof(mp_weave_word) == 4); andre@0: #endif andre@0: andre@0: digitsa0 = MP_DIGITS(&a[0]); andre@0: digitsa1 = MP_DIGITS(&a[1]); andre@0: digitsa2 = MP_DIGITS(&a[2]); andre@0: digitsa3 = MP_DIGITS(&a[3]); andre@0: useda0 = MP_USED(&a[0]); andre@0: useda1 = MP_USED(&a[1]); andre@0: useda2 = MP_USED(&a[2]); andre@0: useda3 = MP_USED(&a[3]); andre@0: andre@0: ARGCHK(MP_SIGN(&a[0]) == MP_ZPOS, MP_BADARG); andre@0: ARGCHK(MP_SIGN(&a[1]) == MP_ZPOS, MP_BADARG); andre@0: ARGCHK(MP_SIGN(&a[2]) == MP_ZPOS, MP_BADARG); andre@0: ARGCHK(MP_SIGN(&a[3]) == MP_ZPOS, MP_BADARG); andre@0: ARGCHK(useda0 <= b_size, MP_BADARG); andre@0: ARGCHK(useda1 <= b_size, MP_BADARG); andre@0: ARGCHK(useda2 <= b_size, MP_BADARG); andre@0: ARGCHK(useda3 <= b_size, MP_BADARG); andre@0: andre@0: #define SAFE_FETCH(digit, used, word) ((word) < (used) ? (digit[word]) : 0) andre@0: andre@0: for (i=0; i < b_size; i++) { andre@0: mp_digit d0 = SAFE_FETCH(digitsa0,useda0,i); andre@0: mp_digit d1 = SAFE_FETCH(digitsa1,useda1,i); andre@0: mp_digit d2 = SAFE_FETCH(digitsa2,useda2,i); andre@0: mp_digit d3 = SAFE_FETCH(digitsa3,useda3,i); andre@0: register mp_weave_word acc; andre@0: andre@0: /* andre@0: * ONE_STEP takes the MSB of each of our current digits and places that andre@0: * byte in the appropriate position for writing to the weaved array. andre@0: * On little endian: andre@0: * b3 b2 b1 b0 andre@0: * On big endian: andre@0: * b0 b1 b2 b3 andre@0: * When the data is written it would always wind up: andre@0: * b[0] = b0 andre@0: * b[1] = b1 andre@0: * b[2] = b2 andre@0: * b[3] = b3 andre@0: * andre@0: * Once we've written the MSB, we shift the whole digit up left one andre@0: * byte, putting the Next Most Significant Byte in the MSB position, andre@0: * so we we repeat the next one step that byte will be written. andre@0: * NOTE: This code assumes sizeof(mp_weave_word) and MP_WEAVE_WORD_SIZE andre@0: * is 4. andre@0: */ andre@0: #ifdef MP_IS_LITTLE_ENDIAN andre@0: #define MPI_WEAVE_ONE_STEP \ andre@0: acc = (d0 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d0 <<= 8; /*b0*/ \ andre@0: acc |= (d1 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d1 <<= 8; /*b1*/ \ andre@0: acc |= (d2 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d2 <<= 8; /*b2*/ \ andre@0: acc |= (d3 >> (MP_DIGIT_BIT-32)) & 0xff000000; d3 <<= 8; /*b3*/ \ andre@0: *weaved = acc; weaved += count; andre@0: #else andre@0: #define MPI_WEAVE_ONE_STEP \ andre@0: acc = (d0 >> (MP_DIGIT_BIT-32)) & 0xff000000; d0 <<= 8; /*b0*/ \ andre@0: acc |= (d1 >> (MP_DIGIT_BIT-24)) & 0x00ff0000; d1 <<= 8; /*b1*/ \ andre@0: acc |= (d2 >> (MP_DIGIT_BIT-16)) & 0x0000ff00; d2 <<= 8; /*b2*/ \ andre@0: acc |= (d3 >> (MP_DIGIT_BIT-8)) & 0x000000ff; d3 <<= 8; /*b3*/ \ andre@0: *weaved = acc; weaved += count; andre@0: #endif andre@0: switch (sizeof(mp_digit)) { andre@0: case 32: andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: case 16: andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: case 8: andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: case 4: andre@0: MPI_WEAVE_ONE_STEP andre@0: MPI_WEAVE_ONE_STEP andre@0: case 2: andre@0: MPI_WEAVE_ONE_STEP andre@0: case 1: andre@0: MPI_WEAVE_ONE_STEP andre@0: break; andre@0: } andre@0: } andre@0: andre@0: return MP_OKAY; andre@0: } andre@0: andre@0: /* reverse the operation above for one entry. andre@0: * b points to the offset into the weave array of the power we are andre@0: * calculating */ andre@0: mp_err weave_to_mpi(mp_int *a, const unsigned char *b, andre@0: mp_size b_size, mp_size count) andre@0: { andre@0: mp_digit *pb = MP_DIGITS(a); andre@0: mp_digit *end = &pb[b_size]; andre@0: andre@0: MP_SIGN(a) = MP_ZPOS; andre@0: MP_USED(a) = b_size; andre@0: andre@0: for (; pb < end; pb++) { andre@0: register mp_digit digit; andre@0: andre@0: digit = *b << 8; b += count; andre@0: #define MPI_UNWEAVE_ONE_STEP digit |= *b; b += count; digit = digit << 8; andre@0: switch (sizeof(mp_digit)) { andre@0: case 32: andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: case 16: andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: case 8: andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: case 4: andre@0: MPI_UNWEAVE_ONE_STEP andre@0: MPI_UNWEAVE_ONE_STEP andre@0: case 2: andre@0: break; andre@0: } andre@0: digit |= *b; b += count; andre@0: andre@0: *pb = digit; andre@0: } andre@0: s_mp_clamp(a); andre@0: return MP_OKAY; andre@0: } andre@0: #endif andre@0: andre@0: andre@0: #define SQR(a,b) \ andre@0: MP_CHECKOK( mp_sqr(a, b) );\ andre@0: MP_CHECKOK( s_mp_redc(b, mmm) ) andre@0: andre@0: #if defined(MP_MONT_USE_MP_MUL) andre@0: #define MUL_NOWEAVE(x,a,b) \ andre@0: MP_CHECKOK( mp_mul(a, x, b) ); \ andre@0: MP_CHECKOK( s_mp_redc(b, mmm) ) andre@0: #else andre@0: #define MUL_NOWEAVE(x,a,b) \ andre@0: MP_CHECKOK( s_mp_mul_mont(a, x, b, mmm) ) andre@0: #endif andre@0: andre@0: #define MUL(x,a,b) \ andre@0: MP_CHECKOK( weave_to_mpi(&tmp, powers + (x), nLen, num_powers) ); \ andre@0: MUL_NOWEAVE(&tmp,a,b) andre@0: andre@0: #define SWAPPA ptmp = pa1; pa1 = pa2; pa2 = ptmp andre@0: #define MP_ALIGN(x,y) ((((ptrdiff_t)(x))+((y)-1))&(((ptrdiff_t)0)-(y))) andre@0: andre@0: /* Do modular exponentiation using integer multiply code. */ andre@0: mp_err mp_exptmod_safe_i(const mp_int * montBase, andre@0: const mp_int * exponent, andre@0: const mp_int * modulus, andre@0: mp_int * result, andre@0: mp_mont_modulus *mmm, andre@0: int nLen, andre@0: mp_size bits_in_exponent, andre@0: mp_size window_bits, andre@0: mp_size num_powers) andre@0: { andre@0: mp_int *pa1, *pa2, *ptmp; andre@0: mp_size i; andre@0: mp_size first_window; andre@0: mp_err res; andre@0: int expOff; andre@0: mp_int accum1, accum2, accum[WEAVE_WORD_SIZE]; andre@0: mp_int tmp; andre@0: unsigned char *powersArray; andre@0: unsigned char *powers; andre@0: andre@0: MP_DIGITS(&accum1) = 0; andre@0: MP_DIGITS(&accum2) = 0; andre@0: MP_DIGITS(&accum[0]) = 0; andre@0: MP_DIGITS(&accum[1]) = 0; andre@0: MP_DIGITS(&accum[2]) = 0; andre@0: MP_DIGITS(&accum[3]) = 0; andre@0: MP_DIGITS(&tmp) = 0; andre@0: andre@0: powersArray = (unsigned char *)malloc(num_powers*(nLen*sizeof(mp_digit)+1)); andre@0: if (powersArray == NULL) { andre@0: res = MP_MEM; andre@0: goto CLEANUP; andre@0: } andre@0: andre@0: /* powers[i] = base ** (i); */ andre@0: powers = (unsigned char *)MP_ALIGN(powersArray,num_powers); andre@0: andre@0: /* grab the first window value. This allows us to preload accumulator1 andre@0: * and save a conversion, some squares and a multiple*/ andre@0: MP_CHECKOK( mpl_get_bits(exponent, andre@0: bits_in_exponent-window_bits, window_bits) ); andre@0: first_window = (mp_size)res; andre@0: andre@0: MP_CHECKOK( mp_init_size(&accum1, 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&accum2, 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&tmp, 3 * nLen + 2) ); andre@0: andre@0: /* build the first WEAVE_WORD powers inline */ andre@0: /* if WEAVE_WORD_SIZE is not 4, this code will have to change */ andre@0: if (num_powers > 2) { andre@0: MP_CHECKOK( mp_init_size(&accum[0], 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&accum[1], 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&accum[2], 3 * nLen + 2) ); andre@0: MP_CHECKOK( mp_init_size(&accum[3], 3 * nLen + 2) ); andre@0: mp_set(&accum[0], 1); andre@0: MP_CHECKOK( s_mp_to_mont(&accum[0], mmm, &accum[0]) ); andre@0: MP_CHECKOK( mp_copy(montBase, &accum[1]) ); andre@0: SQR(montBase, &accum[2]); andre@0: MUL_NOWEAVE(montBase, &accum[2], &accum[3]); andre@0: MP_CHECKOK( mpi_to_weave(accum, powers, nLen, num_powers) ); andre@0: if (first_window < 4) { andre@0: MP_CHECKOK( mp_copy(&accum[first_window], &accum1) ); andre@0: first_window = num_powers; andre@0: } andre@0: } else { andre@0: if (first_window == 0) { andre@0: mp_set(&accum1, 1); andre@0: MP_CHECKOK( s_mp_to_mont(&accum1, mmm, &accum1) ); andre@0: } else { andre@0: /* assert first_window == 1? */ andre@0: MP_CHECKOK( mp_copy(montBase, &accum1) ); andre@0: } andre@0: } andre@0: andre@0: /* andre@0: * calculate all the powers in the powers array. andre@0: * this adds 2**(k-1)-2 square operations over just calculating the andre@0: * odd powers where k is the window size in the two other mp_modexpt andre@0: * implementations in this file. We will get some of that andre@0: * back by not needing the first 'k' squares and one multiply for the andre@0: * first window */ andre@0: for (i = WEAVE_WORD_SIZE; i < num_powers; i++) { andre@0: int acc_index = i & (WEAVE_WORD_SIZE-1); /* i % WEAVE_WORD_SIZE */ andre@0: if ( i & 1 ) { andre@0: MUL_NOWEAVE(montBase, &accum[acc_index-1] , &accum[acc_index]); andre@0: /* we've filled the array do our 'per array' processing */ andre@0: if (acc_index == (WEAVE_WORD_SIZE-1)) { andre@0: MP_CHECKOK( mpi_to_weave(accum, powers + i - (WEAVE_WORD_SIZE-1), andre@0: nLen, num_powers) ); andre@0: andre@0: if (first_window <= i) { andre@0: MP_CHECKOK( mp_copy(&accum[first_window & (WEAVE_WORD_SIZE-1)], andre@0: &accum1) ); andre@0: first_window = num_powers; andre@0: } andre@0: } andre@0: } else { andre@0: /* up to 8 we can find 2^i-1 in the accum array, but at 8 we our source andre@0: * and target are the same so we need to copy.. After that, the andre@0: * value is overwritten, so we need to fetch it from the stored andre@0: * weave array */ andre@0: if (i > 2* WEAVE_WORD_SIZE) { andre@0: MP_CHECKOK(weave_to_mpi(&accum2, powers+i/2, nLen, num_powers)); andre@0: SQR(&accum2, &accum[acc_index]); andre@0: } else { andre@0: int half_power_index = (i/2) & (WEAVE_WORD_SIZE-1); andre@0: if (half_power_index == acc_index) { andre@0: /* copy is cheaper than weave_to_mpi */ andre@0: MP_CHECKOK(mp_copy(&accum[half_power_index], &accum2)); andre@0: SQR(&accum2,&accum[acc_index]); andre@0: } else { andre@0: SQR(&accum[half_power_index],&accum[acc_index]); andre@0: } andre@0: } andre@0: } andre@0: } andre@0: /* if the accum1 isn't set, Then there is something wrong with our logic andre@0: * above and is an internal programming error. andre@0: */ andre@0: #if MP_ARGCHK == 2 andre@0: assert(MP_USED(&accum1) != 0); andre@0: #endif andre@0: andre@0: /* set accumulator to montgomery residue of 1 */ andre@0: pa1 = &accum1; andre@0: pa2 = &accum2; andre@0: andre@0: for (expOff = bits_in_exponent - window_bits*2; expOff >= 0; expOff -= window_bits) { andre@0: mp_size smallExp; andre@0: MP_CHECKOK( mpl_get_bits(exponent, expOff, window_bits) ); andre@0: smallExp = (mp_size)res; andre@0: andre@0: /* handle unroll the loops */ andre@0: switch (window_bits) { andre@0: case 1: andre@0: if (!smallExp) { andre@0: SQR(pa1,pa2); SWAPPA; andre@0: } else if (smallExp & 1) { andre@0: SQR(pa1,pa2); MUL_NOWEAVE(montBase,pa2,pa1); andre@0: } else { andre@0: abort(); andre@0: } andre@0: break; andre@0: case 6: andre@0: SQR(pa1,pa2); SQR(pa2,pa1); andre@0: /* fall through */ andre@0: case 4: andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: MUL(smallExp, pa1,pa2); SWAPPA; andre@0: break; andre@0: case 5: andre@0: SQR(pa1,pa2); SQR(pa2,pa1); SQR(pa1,pa2); SQR(pa2,pa1); andre@0: SQR(pa1,pa2); MUL(smallExp,pa2,pa1); andre@0: break; andre@0: default: andre@0: abort(); /* could do a loop? */ andre@0: } andre@0: } andre@0: andre@0: res = s_mp_redc(pa1, mmm); andre@0: mp_exch(pa1, result); andre@0: andre@0: CLEANUP: andre@0: mp_clear(&accum1); andre@0: mp_clear(&accum2); andre@0: mp_clear(&accum[0]); andre@0: mp_clear(&accum[1]); andre@0: mp_clear(&accum[2]); andre@0: mp_clear(&accum[3]); andre@0: mp_clear(&tmp); andre@0: /* PORT_Memset(powers,0,num_powers*nLen*sizeof(mp_digit)); */ andre@0: free(powersArray); andre@0: return res; andre@0: } andre@0: #undef SQR andre@0: #undef MUL andre@0: #endif andre@0: andre@0: mp_err mp_exptmod(const mp_int *inBase, const mp_int *exponent, andre@0: const mp_int *modulus, mp_int *result) andre@0: { andre@0: const mp_int *base; andre@0: mp_size bits_in_exponent, i, window_bits, odd_ints; andre@0: mp_err res; andre@0: int nLen; andre@0: mp_int montBase, goodBase; andre@0: mp_mont_modulus mmm; andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: static unsigned int max_window_bits; andre@0: #endif andre@0: andre@0: /* function for computing n0prime only works if n0 is odd */ andre@0: if (!mp_isodd(modulus)) andre@0: return s_mp_exptmod(inBase, exponent, modulus, result); andre@0: andre@0: MP_DIGITS(&montBase) = 0; andre@0: MP_DIGITS(&goodBase) = 0; andre@0: andre@0: if (mp_cmp(inBase, modulus) < 0) { andre@0: base = inBase; andre@0: } else { andre@0: MP_CHECKOK( mp_init(&goodBase) ); andre@0: base = &goodBase; andre@0: MP_CHECKOK( mp_mod(inBase, modulus, &goodBase) ); andre@0: } andre@0: andre@0: nLen = MP_USED(modulus); andre@0: MP_CHECKOK( mp_init_size(&montBase, 2 * nLen + 2) ); andre@0: andre@0: mmm.N = *modulus; /* a copy of the mp_int struct */ andre@0: andre@0: /* compute n0', given n0, n0' = -(n0 ** -1) mod MP_RADIX andre@0: ** where n0 = least significant mp_digit of N, the modulus. andre@0: */ andre@0: mmm.n0prime = 0 - s_mp_invmod_radix( MP_DIGIT(modulus, 0) ); andre@0: andre@0: MP_CHECKOK( s_mp_to_mont(base, &mmm, &montBase) ); andre@0: andre@0: bits_in_exponent = mpl_significant_bits(exponent); andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: if (mp_using_cache_safe_exp) { andre@0: if (bits_in_exponent > 780) andre@0: window_bits = 6; andre@0: else if (bits_in_exponent > 256) andre@0: window_bits = 5; andre@0: else if (bits_in_exponent > 20) andre@0: window_bits = 4; andre@0: /* RSA public key exponents are typically under 20 bits (common values andre@0: * are: 3, 17, 65537) and a 4-bit window is inefficient andre@0: */ andre@0: else andre@0: window_bits = 1; andre@0: } else andre@0: #endif andre@0: if (bits_in_exponent > 480) andre@0: window_bits = 6; andre@0: else if (bits_in_exponent > 160) andre@0: window_bits = 5; andre@0: else if (bits_in_exponent > 20) andre@0: window_bits = 4; andre@0: /* RSA public key exponents are typically under 20 bits (common values andre@0: * are: 3, 17, 65537) and a 4-bit window is inefficient andre@0: */ andre@0: else andre@0: window_bits = 1; andre@0: andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: /* andre@0: * clamp the window size based on andre@0: * the cache line size. andre@0: */ andre@0: if (!max_window_bits) { andre@0: unsigned long cache_size = s_mpi_getProcessorLineSize(); andre@0: /* processor has no cache, use 'fast' code always */ andre@0: if (cache_size == 0) { andre@0: mp_using_cache_safe_exp = 0; andre@0: } andre@0: if ((cache_size == 0) || (cache_size >= 64)) { andre@0: max_window_bits = 6; andre@0: } else if (cache_size >= 32) { andre@0: max_window_bits = 5; andre@0: } else if (cache_size >= 16) { andre@0: max_window_bits = 4; andre@0: } else max_window_bits = 1; /* should this be an assert? */ andre@0: } andre@0: andre@0: /* clamp the window size down before we caclulate bits_in_exponent */ andre@0: if (mp_using_cache_safe_exp) { andre@0: if (window_bits > max_window_bits) { andre@0: window_bits = max_window_bits; andre@0: } andre@0: } andre@0: #endif andre@0: andre@0: odd_ints = 1 << (window_bits - 1); andre@0: i = bits_in_exponent % window_bits; andre@0: if (i != 0) { andre@0: bits_in_exponent += window_bits - i; andre@0: } andre@0: andre@0: #ifdef MP_USING_MONT_MULF andre@0: if (mp_using_mont_mulf) { andre@0: MP_CHECKOK( s_mp_pad(&montBase, nLen) ); andre@0: res = mp_exptmod_f(&montBase, exponent, modulus, result, &mmm, nLen, andre@0: bits_in_exponent, window_bits, odd_ints); andre@0: } else andre@0: #endif andre@0: #ifdef MP_USING_CACHE_SAFE_MOD_EXP andre@0: if (mp_using_cache_safe_exp) { andre@0: res = mp_exptmod_safe_i(&montBase, exponent, modulus, result, &mmm, nLen, andre@0: bits_in_exponent, window_bits, 1 << window_bits); andre@0: } else andre@0: #endif andre@0: res = mp_exptmod_i(&montBase, exponent, modulus, result, &mmm, nLen, andre@0: bits_in_exponent, window_bits, odd_ints); andre@0: andre@0: CLEANUP: andre@0: mp_clear(&montBase); andre@0: mp_clear(&goodBase); andre@0: /* Don't mp_clear mmm.N because it is merely a copy of modulus. andre@0: ** Just zap it. andre@0: */ andre@0: memset(&mmm, 0, sizeof mmm); andre@0: return res; andre@0: }