clightning-dumpkeys

dump clightning output descriptors
git clone git://jb55.com/clightning-dumpkeys
Log | Files | Refs | README | LICENSE

sha256.c (11197B)


      1 
      2 /* MIT (BSD) license - see LICENSE file for details */
      3 /* SHA256 core code translated from the Bitcoin project's C++:
      4  *
      5  * src/crypto/sha256.cpp commit 417532c8acb93c36c2b6fd052b7c11b6a2906aa2
      6  * Copyright (c) 2014 The Bitcoin Core developers
      7  * Distributed under the MIT software license, see the accompanying
      8  * file COPYING or http://www.opensource.org/licenses/mit-license.php.
      9  */
     10 #include "sha256.h"
     11 #include "endian.h"
     12 #include "compiler.h"
     13 #include <stdbool.h>
     14 #include <assert.h>
     15 #include <string.h>
     16 
     17 static void invalidate_sha256(struct sha256_ctx *ctx)
     18 {
     19 #ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
     20 	ctx->c.md_len = 0;
     21 #else
     22 	ctx->bytes = (size_t)-1;
     23 #endif
     24 }
     25 
     26 static void check_sha256(struct sha256_ctx *ctx)
     27 {
     28 #ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
     29 	assert(ctx->c.md_len != 0);
     30 #else
     31 	assert(ctx->bytes != (size_t)-1);
     32 #endif
     33 }
     34 
     35 #ifdef CCAN_CRYPTO_SHA256_USE_OPENSSL
     36 void sha256_init(struct sha256_ctx *ctx)
     37 {
     38 	SHA256_Init(&ctx->c);
     39 }
     40 
     41 void sha256_update(struct sha256_ctx *ctx, const void *p, size_t size)
     42 {
     43 	check_sha256(ctx);
     44 	SHA256_Update(&ctx->c, p, size);
     45 }
     46 
     47 void sha256_done(struct sha256_ctx *ctx, struct sha256 *res)
     48 {
     49 	SHA256_Final(res->u.u8, &ctx->c);
     50 	invalidate_sha256(ctx);
     51 }
     52 #else
     53 static uint32_t Ch(uint32_t x, uint32_t y, uint32_t z)
     54 {
     55 	return z ^ (x & (y ^ z));
     56 }
     57 static uint32_t Maj(uint32_t x, uint32_t y, uint32_t z)
     58 {
     59 	return (x & y) | (z & (x | y));
     60 }
     61 static uint32_t Sigma0(uint32_t x)
     62 {
     63 	return (x >> 2 | x << 30) ^ (x >> 13 | x << 19) ^ (x >> 22 | x << 10);
     64 }
     65 static uint32_t Sigma1(uint32_t x)
     66 {
     67 	return (x >> 6 | x << 26) ^ (x >> 11 | x << 21) ^ (x >> 25 | x << 7);
     68 }
     69 static uint32_t sigma0(uint32_t x)
     70 {
     71 	return (x >> 7 | x << 25) ^ (x >> 18 | x << 14) ^ (x >> 3);
     72 }
     73 static uint32_t sigma1(uint32_t x)
     74 {
     75 	return (x >> 17 | x << 15) ^ (x >> 19 | x << 13) ^ (x >> 10);
     76 }
     77 
     78 /** One round of SHA-256. */
     79 static void Round(uint32_t a, uint32_t b, uint32_t c, uint32_t *d, uint32_t e, uint32_t f, uint32_t g, uint32_t *h, uint32_t k, uint32_t w)
     80 {
     81 	uint32_t t1 = *h + Sigma1(e) + Ch(e, f, g) + k + w;
     82 	uint32_t t2 = Sigma0(a) + Maj(a, b, c);
     83 	*d += t1;
     84 	*h = t1 + t2;
     85 }
     86 
     87 /** Perform one SHA-256 transformation, processing a 64-byte chunk. */
     88 static void Transform(uint32_t *s, const uint32_t *chunk)
     89 {
     90 	uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
     91 	uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
     92 
     93 	Round(a, b, c, &d, e, f, g, &h, 0x428a2f98, w0 = be32_to_cpu(chunk[0]));
     94 	Round(h, a, b, &c, d, e, f, &g, 0x71374491, w1 = be32_to_cpu(chunk[1]));
     95 	Round(g, h, a, &b, c, d, e, &f, 0xb5c0fbcf, w2 = be32_to_cpu(chunk[2]));
     96 	Round(f, g, h, &a, b, c, d, &e, 0xe9b5dba5, w3 = be32_to_cpu(chunk[3]));
     97 	Round(e, f, g, &h, a, b, c, &d, 0x3956c25b, w4 = be32_to_cpu(chunk[4]));
     98 	Round(d, e, f, &g, h, a, b, &c, 0x59f111f1, w5 = be32_to_cpu(chunk[5]));
     99 	Round(c, d, e, &f, g, h, a, &b, 0x923f82a4, w6 = be32_to_cpu(chunk[6]));
    100 	Round(b, c, d, &e, f, g, h, &a, 0xab1c5ed5, w7 = be32_to_cpu(chunk[7]));
    101 	Round(a, b, c, &d, e, f, g, &h, 0xd807aa98, w8 = be32_to_cpu(chunk[8]));
    102 	Round(h, a, b, &c, d, e, f, &g, 0x12835b01, w9 = be32_to_cpu(chunk[9]));
    103 	Round(g, h, a, &b, c, d, e, &f, 0x243185be, w10 = be32_to_cpu(chunk[10]));
    104 	Round(f, g, h, &a, b, c, d, &e, 0x550c7dc3, w11 = be32_to_cpu(chunk[11]));
    105 	Round(e, f, g, &h, a, b, c, &d, 0x72be5d74, w12 = be32_to_cpu(chunk[12]));
    106 	Round(d, e, f, &g, h, a, b, &c, 0x80deb1fe, w13 = be32_to_cpu(chunk[13]));
    107 	Round(c, d, e, &f, g, h, a, &b, 0x9bdc06a7, w14 = be32_to_cpu(chunk[14]));
    108 	Round(b, c, d, &e, f, g, h, &a, 0xc19bf174, w15 = be32_to_cpu(chunk[15]));
    109 
    110 	Round(a, b, c, &d, e, f, g, &h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
    111 	Round(h, a, b, &c, d, e, f, &g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
    112 	Round(g, h, a, &b, c, d, e, &f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3));
    113 	Round(f, g, h, &a, b, c, d, &e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4));
    114 	Round(e, f, g, &h, a, b, c, &d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5));
    115 	Round(d, e, f, &g, h, a, b, &c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6));
    116 	Round(c, d, e, &f, g, h, a, &b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7));
    117 	Round(b, c, d, &e, f, g, h, &a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8));
    118 	Round(a, b, c, &d, e, f, g, &h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9));
    119 	Round(h, a, b, &c, d, e, f, &g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10));
    120 	Round(g, h, a, &b, c, d, e, &f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11));
    121 	Round(f, g, h, &a, b, c, d, &e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12));
    122 	Round(e, f, g, &h, a, b, c, &d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13));
    123 	Round(d, e, f, &g, h, a, b, &c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14));
    124 	Round(c, d, e, &f, g, h, a, &b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15));
    125 	Round(b, c, d, &e, f, g, h, &a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0));
    126 
    127 	Round(a, b, c, &d, e, f, g, &h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1));
    128 	Round(h, a, b, &c, d, e, f, &g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2));
    129 	Round(g, h, a, &b, c, d, e, &f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3));
    130 	Round(f, g, h, &a, b, c, d, &e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4));
    131 	Round(e, f, g, &h, a, b, c, &d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5));
    132 	Round(d, e, f, &g, h, a, b, &c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6));
    133 	Round(c, d, e, &f, g, h, a, &b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7));
    134 	Round(b, c, d, &e, f, g, h, &a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8));
    135 	Round(a, b, c, &d, e, f, g, &h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9));
    136 	Round(h, a, b, &c, d, e, f, &g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10));
    137 	Round(g, h, a, &b, c, d, e, &f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11));
    138 	Round(f, g, h, &a, b, c, d, &e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12));
    139 	Round(e, f, g, &h, a, b, c, &d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13));
    140 	Round(d, e, f, &g, h, a, b, &c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14));
    141 	Round(c, d, e, &f, g, h, a, &b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15));
    142 	Round(b, c, d, &e, f, g, h, &a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0));
    143 
    144 	Round(a, b, c, &d, e, f, g, &h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1));
    145 	Round(h, a, b, &c, d, e, f, &g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2));
    146 	Round(g, h, a, &b, c, d, e, &f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3));
    147 	Round(f, g, h, &a, b, c, d, &e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4));
    148 	Round(e, f, g, &h, a, b, c, &d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5));
    149 	Round(d, e, f, &g, h, a, b, &c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6));
    150 	Round(c, d, e, &f, g, h, a, &b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7));
    151 	Round(b, c, d, &e, f, g, h, &a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8));
    152 	Round(a, b, c, &d, e, f, g, &h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9));
    153 	Round(h, a, b, &c, d, e, f, &g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10));
    154 	Round(g, h, a, &b, c, d, e, &f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11));
    155 	Round(f, g, h, &a, b, c, d, &e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12));
    156 	Round(e, f, g, &h, a, b, c, &d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13));
    157 	Round(d, e, f, &g, h, a, b, &c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14));
    158 	Round(c, d, e, &f, g, h, a, &b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15));
    159 	Round(b, c, d, &e, f, g, h, &a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0));
    160 
    161 	s[0] += a;
    162 	s[1] += b;
    163 	s[2] += c;
    164 	s[3] += d;
    165 	s[4] += e;
    166 	s[5] += f;
    167 	s[6] += g;
    168 	s[7] += h;
    169 }
    170 
    171 
    172 static void add(struct sha256_ctx *ctx, const void *p, size_t len)
    173 {
    174 	const unsigned char *data = p;
    175 	size_t bufsize = ctx->bytes % 64;
    176 
    177 	if (bufsize + len >= 64) {
    178 		/* Fill the buffer, and process it. */
    179 		memcpy(ctx->buf.u8 + bufsize, data, 64 - bufsize);
    180 		ctx->bytes += 64 - bufsize;
    181 		data += 64 - bufsize;
    182 		len -= 64 - bufsize;
    183 		Transform(ctx->s, ctx->buf.u32);
    184 		bufsize = 0;
    185 	}
    186 
    187 	while (len >= 64) {
    188 		/* Process full chunks directly from the source. */
    189 		if (alignment_ok(data, sizeof(uint32_t)))
    190 			Transform(ctx->s, (const uint32_t *)data);
    191 		else {
    192 			memcpy(ctx->buf.u8, data, sizeof(ctx->buf));
    193 			Transform(ctx->s, ctx->buf.u32);
    194 		}
    195 		ctx->bytes += 64;
    196 		data += 64;
    197 		len -= 64;
    198 	}
    199 
    200 	if (len) {
    201 		/* Fill the buffer with what remains. */
    202 		memcpy(ctx->buf.u8 + bufsize, data, len);
    203 		ctx->bytes += len;
    204 	}
    205 }
    206 
    207 void sha256_init(struct sha256_ctx *ctx)
    208 {
    209 	struct sha256_ctx init = SHA256_INIT;
    210 	*ctx = init;
    211 }
    212 
    213 void sha256_update(struct sha256_ctx *ctx, const void *p, size_t size)
    214 {
    215 	check_sha256(ctx);
    216 	add(ctx, p, size);
    217 }
    218 
    219 void sha256_done(struct sha256_ctx *ctx, struct sha256 *res)
    220 {
    221 	static const unsigned char pad[64] = {0x80};
    222 	uint64_t sizedesc;
    223 	size_t i;
    224 
    225 	sizedesc = cpu_to_be64((uint64_t)ctx->bytes << 3);
    226 	/* Add '1' bit to terminate, then all 0 bits, up to next block - 8. */
    227 	add(ctx, pad, 1 + ((128 - 8 - (ctx->bytes % 64) - 1) % 64));
    228 	/* Add number of bits of data (big endian) */
    229 	add(ctx, &sizedesc, 8);
    230 	for (i = 0; i < sizeof(ctx->s) / sizeof(ctx->s[0]); i++)
    231 		res->u.u32[i] = cpu_to_be32(ctx->s[i]);
    232 	invalidate_sha256(ctx);
    233 }
    234 #endif
    235 
    236 void sha256(struct sha256 *sha, const void *p, size_t size)
    237 {
    238 	struct sha256_ctx ctx;
    239 
    240 	sha256_init(&ctx);
    241 	sha256_update(&ctx, p, size);
    242 	sha256_done(&ctx, sha);
    243 }
    244 
    245 void sha256_u8(struct sha256_ctx *ctx, uint8_t v)
    246 {
    247 	sha256_update(ctx, &v, sizeof(v));
    248 }
    249 
    250 void sha256_u16(struct sha256_ctx *ctx, uint16_t v)
    251 {
    252 	sha256_update(ctx, &v, sizeof(v));
    253 }
    254 
    255 void sha256_u32(struct sha256_ctx *ctx, uint32_t v)
    256 {
    257 	sha256_update(ctx, &v, sizeof(v));
    258 }
    259 
    260 void sha256_u64(struct sha256_ctx *ctx, uint64_t v)
    261 {
    262 	sha256_update(ctx, &v, sizeof(v));
    263 }
    264 
    265 /* Add as little-endian */
    266 void sha256_le16(struct sha256_ctx *ctx, uint16_t v)
    267 {
    268 	leint16_t lev = cpu_to_le16(v);
    269 	sha256_update(ctx, &lev, sizeof(lev));
    270 }
    271 
    272 void sha256_le32(struct sha256_ctx *ctx, uint32_t v)
    273 {
    274 	leint32_t lev = cpu_to_le32(v);
    275 	sha256_update(ctx, &lev, sizeof(lev));
    276 }
    277 
    278 void sha256_le64(struct sha256_ctx *ctx, uint64_t v)
    279 {
    280 	leint64_t lev = cpu_to_le64(v);
    281 	sha256_update(ctx, &lev, sizeof(lev));
    282 }
    283 
    284 /* Add as big-endian */
    285 void sha256_be16(struct sha256_ctx *ctx, uint16_t v)
    286 {
    287 	beint16_t bev = cpu_to_be16(v);
    288 	sha256_update(ctx, &bev, sizeof(bev));
    289 }
    290 
    291 void sha256_be32(struct sha256_ctx *ctx, uint32_t v)
    292 {
    293 	beint32_t bev = cpu_to_be32(v);
    294 	sha256_update(ctx, &bev, sizeof(bev));
    295 }
    296 
    297 void sha256_be64(struct sha256_ctx *ctx, uint64_t v)
    298 {
    299 	beint64_t bev = cpu_to_be64(v);
    300 	sha256_update(ctx, &bev, sizeof(bev));
    301 }
    302 
    303 
    304 int sha256d(const unsigned char *bytes, size_t bytes_len,
    305                   unsigned char *bytes_out, size_t len)
    306 {
    307 	struct sha256 sha_1, sha_2;
    308 	bool aligned = alignment_ok(bytes_out, sizeof(sha_1.u.u32));
    309 
    310 	if (!bytes || !bytes_out || len != SHA256_LEN)
    311 		return 0;
    312 
    313 	sha256(&sha_1, bytes, bytes_len);
    314 	sha256(aligned ? (struct sha256 *)bytes_out : &sha_2, &sha_1, sizeof(sha_1));
    315 	if (!aligned) {
    316 		memcpy(bytes_out, &sha_2, sizeof(sha_2));
    317 		wally_clear(&sha_2, sizeof(sha_2));
    318 	}
    319 	wally_clear(&sha_1, sizeof(sha_1));
    320 	return 1;
    321 }