chibipub

experimental activitypub node in C
git clone git://jb55.com/chibipub
Log | Files | Refs | README | LICENSE

blake3.c (27003B)


      1 #include <assert.h>
      2 #include <stdbool.h>
      3 #include <string.h>
      4 
      5 #include "blake3.h"
      6 #include "blake3_impl.h"
      7 
      8 const char * blake3_version(void) {
      9   return BLAKE3_VERSION_STRING;
     10 }
     11 
     12 INLINE void chunk_state_init(blake3_chunk_state *self, const uint32_t key[8],
     13                              uint8_t flags) {
     14   memcpy(self->cv, key, BLAKE3_KEY_LEN);
     15   self->chunk_counter = 0;
     16   memset(self->buf, 0, BLAKE3_BLOCK_LEN);
     17   self->buf_len = 0;
     18   self->blocks_compressed = 0;
     19   self->flags = flags;
     20 }
     21 
     22 INLINE void chunk_state_reset(blake3_chunk_state *self, const uint32_t key[8],
     23                               uint64_t chunk_counter) {
     24   memcpy(self->cv, key, BLAKE3_KEY_LEN);
     25   self->chunk_counter = chunk_counter;
     26   self->blocks_compressed = 0;
     27   memset(self->buf, 0, BLAKE3_BLOCK_LEN);
     28   self->buf_len = 0;
     29 }
     30 
     31 INLINE size_t chunk_state_len(const blake3_chunk_state *self) {
     32   return (BLAKE3_BLOCK_LEN * (size_t)self->blocks_compressed) +
     33          ((size_t)self->buf_len);
     34 }
     35 
     36 INLINE size_t chunk_state_fill_buf(blake3_chunk_state *self,
     37                                    const uint8_t *input, size_t input_len) {
     38   size_t take = BLAKE3_BLOCK_LEN - ((size_t)self->buf_len);
     39   if (take > input_len) {
     40     take = input_len;
     41   }
     42   uint8_t *dest = self->buf + ((size_t)self->buf_len);
     43   memcpy(dest, input, take);
     44   self->buf_len += (uint8_t)take;
     45   return take;
     46 }
     47 
     48 INLINE uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state *self) {
     49   if (self->blocks_compressed == 0) {
     50     return CHUNK_START;
     51   } else {
     52     return 0;
     53   }
     54 }
     55 
     56 typedef struct {
     57   uint32_t input_cv[8];
     58   uint64_t counter;
     59   uint8_t block[BLAKE3_BLOCK_LEN];
     60   uint8_t block_len;
     61   uint8_t flags;
     62 } output_t;
     63 
     64 INLINE output_t make_output(const uint32_t input_cv[8],
     65                             const uint8_t block[BLAKE3_BLOCK_LEN],
     66                             uint8_t block_len, uint64_t counter,
     67                             uint8_t flags) {
     68   output_t ret;
     69   memcpy(ret.input_cv, input_cv, 32);
     70   memcpy(ret.block, block, BLAKE3_BLOCK_LEN);
     71   ret.block_len = block_len;
     72   ret.counter = counter;
     73   ret.flags = flags;
     74   return ret;
     75 }
     76 
     77 // Chaining values within a given chunk (specifically the compress_in_place
     78 // interface) are represented as words. This avoids unnecessary bytes<->words
     79 // conversion overhead in the portable implementation. However, the hash_many
     80 // interface handles both user input and parent node blocks, so it accepts
     81 // bytes. For that reason, chaining values in the CV stack are represented as
     82 // bytes.
     83 INLINE void output_chaining_value(const output_t *self, uint8_t cv[32]) {
     84   uint32_t cv_words[8];
     85   memcpy(cv_words, self->input_cv, 32);
     86   blake3_compress_in_place(cv_words, self->block, self->block_len,
     87                            self->counter, self->flags);
     88   store_cv_words(cv, cv_words);
     89 }
     90 
     91 INLINE void output_root_bytes(const output_t *self, uint64_t seek, uint8_t *out,
     92                               size_t out_len) {
     93   uint64_t output_block_counter = seek / 64;
     94   size_t offset_within_block = seek % 64;
     95   uint8_t wide_buf[64];
     96   while (out_len > 0) {
     97     blake3_compress_xof(self->input_cv, self->block, self->block_len,
     98                         output_block_counter, self->flags | ROOT, wide_buf);
     99     size_t available_bytes = 64 - offset_within_block;
    100     size_t memcpy_len;
    101     if (out_len > available_bytes) {
    102       memcpy_len = available_bytes;
    103     } else {
    104       memcpy_len = out_len;
    105     }
    106     memcpy(out, wide_buf + offset_within_block, memcpy_len);
    107     out += memcpy_len;
    108     out_len -= memcpy_len;
    109     output_block_counter += 1;
    110     offset_within_block = 0;
    111   }
    112 }
    113 
    114 INLINE void chunk_state_update(blake3_chunk_state *self, const uint8_t *input,
    115                                size_t input_len) {
    116   if (self->buf_len > 0) {
    117     size_t take = chunk_state_fill_buf(self, input, input_len);
    118     input += take;
    119     input_len -= take;
    120     if (input_len > 0) {
    121       blake3_compress_in_place(
    122           self->cv, self->buf, BLAKE3_BLOCK_LEN, self->chunk_counter,
    123           self->flags | chunk_state_maybe_start_flag(self));
    124       self->blocks_compressed += 1;
    125       self->buf_len = 0;
    126       memset(self->buf, 0, BLAKE3_BLOCK_LEN);
    127     }
    128   }
    129 
    130   while (input_len > BLAKE3_BLOCK_LEN) {
    131     blake3_compress_in_place(self->cv, input, BLAKE3_BLOCK_LEN,
    132                              self->chunk_counter,
    133                              self->flags | chunk_state_maybe_start_flag(self));
    134     self->blocks_compressed += 1;
    135     input += BLAKE3_BLOCK_LEN;
    136     input_len -= BLAKE3_BLOCK_LEN;
    137   }
    138 
    139   size_t take = chunk_state_fill_buf(self, input, input_len);
    140   input += take;
    141   input_len -= take;
    142 }
    143 
    144 INLINE output_t chunk_state_output(const blake3_chunk_state *self) {
    145   uint8_t block_flags =
    146       self->flags | chunk_state_maybe_start_flag(self) | CHUNK_END;
    147   return make_output(self->cv, self->buf, self->buf_len, self->chunk_counter,
    148                      block_flags);
    149 }
    150 
    151 INLINE output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN],
    152                               const uint32_t key[8], uint8_t flags) {
    153   return make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT);
    154 }
    155 
    156 // Given some input larger than one chunk, return the number of bytes that
    157 // should go in the left subtree. This is the largest power-of-2 number of
    158 // chunks that leaves at least 1 byte for the right subtree.
    159 INLINE size_t left_len(size_t content_len) {
    160   // Subtract 1 to reserve at least one byte for the right side. content_len
    161   // should always be greater than BLAKE3_CHUNK_LEN.
    162   size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
    163   return round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN;
    164 }
    165 
    166 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
    167 // on a single thread. Write out the chunk chaining values and return the
    168 // number of chunks hashed. These chunks are never the root and never empty;
    169 // those cases use a different codepath.
    170 INLINE size_t compress_chunks_parallel(const uint8_t *input, size_t input_len,
    171                                        const uint32_t key[8],
    172                                        uint64_t chunk_counter, uint8_t flags,
    173                                        uint8_t *out) {
    174 #if defined(BLAKE3_TESTING)
    175   assert(0 < input_len);
    176   assert(input_len <= MAX_SIMD_DEGREE * BLAKE3_CHUNK_LEN);
    177 #endif
    178 
    179   const uint8_t *chunks_array[MAX_SIMD_DEGREE];
    180   size_t input_position = 0;
    181   size_t chunks_array_len = 0;
    182   while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
    183     chunks_array[chunks_array_len] = &input[input_position];
    184     input_position += BLAKE3_CHUNK_LEN;
    185     chunks_array_len += 1;
    186   }
    187 
    188   blake3_hash_many(chunks_array, chunks_array_len,
    189                    BLAKE3_CHUNK_LEN / BLAKE3_BLOCK_LEN, key, chunk_counter,
    190                    true, flags, CHUNK_START, CHUNK_END, out);
    191 
    192   // Hash the remaining partial chunk, if there is one. Note that the empty
    193   // chunk (meaning the empty message) is a different codepath.
    194   if (input_len > input_position) {
    195     uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
    196     blake3_chunk_state chunk_state;
    197     chunk_state_init(&chunk_state, key, flags);
    198     chunk_state.chunk_counter = counter;
    199     chunk_state_update(&chunk_state, &input[input_position],
    200                        input_len - input_position);
    201     output_t output = chunk_state_output(&chunk_state);
    202     output_chaining_value(&output, &out[chunks_array_len * BLAKE3_OUT_LEN]);
    203     return chunks_array_len + 1;
    204   } else {
    205     return chunks_array_len;
    206   }
    207 }
    208 
    209 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
    210 // on a single thread. Write out the parent chaining values and return the
    211 // number of parents hashed. (If there's an odd input chaining value left over,
    212 // return it as an additional output.) These parents are never the root and
    213 // never empty; those cases use a different codepath.
    214 INLINE size_t compress_parents_parallel(const uint8_t *child_chaining_values,
    215                                         size_t num_chaining_values,
    216                                         const uint32_t key[8], uint8_t flags,
    217                                         uint8_t *out) {
    218 #if defined(BLAKE3_TESTING)
    219   assert(2 <= num_chaining_values);
    220   assert(num_chaining_values <= 2 * MAX_SIMD_DEGREE_OR_2);
    221 #endif
    222 
    223   const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2];
    224   size_t parents_array_len = 0;
    225   while (num_chaining_values - (2 * parents_array_len) >= 2) {
    226     parents_array[parents_array_len] =
    227         &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN];
    228     parents_array_len += 1;
    229   }
    230 
    231   blake3_hash_many(parents_array, parents_array_len, 1, key,
    232                    0, // Parents always use counter 0.
    233                    false, flags | PARENT,
    234                    0, // Parents have no start flags.
    235                    0, // Parents have no end flags.
    236                    out);
    237 
    238   // If there's an odd child left over, it becomes an output.
    239   if (num_chaining_values > 2 * parents_array_len) {
    240     memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
    241            &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN],
    242            BLAKE3_OUT_LEN);
    243     return parents_array_len + 1;
    244   } else {
    245     return parents_array_len;
    246   }
    247 }
    248 
    249 // The wide helper function returns (writes out) an array of chaining values
    250 // and returns the length of that array. The number of chaining values returned
    251 // is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
    252 // if the input is shorter than that many chunks. The reason for maintaining a
    253 // wide array of chaining values going back up the tree, is to allow the
    254 // implementation to hash as many parents in parallel as possible.
    255 //
    256 // As a special case when the SIMD degree is 1, this function will still return
    257 // at least 2 outputs. This guarantees that this function doesn't perform the
    258 // root compression. (If it did, it would use the wrong flags, and also we
    259 // wouldn't be able to implement exendable ouput.) Note that this function is
    260 // not used when the whole input is only 1 chunk long; that's a different
    261 // codepath.
    262 //
    263 // Why not just have the caller split the input on the first update(), instead
    264 // of implementing this special rule? Because we don't want to limit SIMD or
    265 // multi-threading parallelism for that update().
    266 static size_t blake3_compress_subtree_wide(const uint8_t *input,
    267                                            size_t input_len,
    268                                            const uint32_t key[8],
    269                                            uint64_t chunk_counter,
    270                                            uint8_t flags, uint8_t *out) {
    271   // Note that the single chunk case does *not* bump the SIMD degree up to 2
    272   // when it is 1. If this implementation adds multi-threading in the future,
    273   // this gives us the option of multi-threading even the 2-chunk case, which
    274   // can help performance on smaller platforms.
    275   if (input_len <= blake3_simd_degree() * BLAKE3_CHUNK_LEN) {
    276     return compress_chunks_parallel(input, input_len, key, chunk_counter, flags,
    277                                     out);
    278   }
    279 
    280   // With more than simd_degree chunks, we need to recurse. Start by dividing
    281   // the input into left and right subtrees. (Note that this is only optimal
    282   // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
    283   // of 3 or something, we'll need a more complicated strategy.)
    284   size_t left_input_len = left_len(input_len);
    285   size_t right_input_len = input_len - left_input_len;
    286   const uint8_t *right_input = &input[left_input_len];
    287   uint64_t right_chunk_counter =
    288       chunk_counter + (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
    289 
    290   // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
    291   // account for the special case of returning 2 outputs when the SIMD degree
    292   // is 1.
    293   uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
    294   size_t degree = blake3_simd_degree();
    295   if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
    296     // The special case: We always use a degree of at least two, to make
    297     // sure there are two outputs. Except, as noted above, at the chunk
    298     // level, where we allow degree=1. (Note that the 1-chunk-input case is
    299     // a different codepath.)
    300     degree = 2;
    301   }
    302   uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
    303 
    304   // Recurse! If this implementation adds multi-threading support in the
    305   // future, this is where it will go.
    306   size_t left_n = blake3_compress_subtree_wide(input, left_input_len, key,
    307                                                chunk_counter, flags, cv_array);
    308   size_t right_n = blake3_compress_subtree_wide(
    309       right_input, right_input_len, key, right_chunk_counter, flags, right_cvs);
    310 
    311   // The special case again. If simd_degree=1, then we'll have left_n=1 and
    312   // right_n=1. Rather than compressing them into a single output, return
    313   // them directly, to make sure we always have at least two outputs.
    314   if (left_n == 1) {
    315     memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
    316     return 2;
    317   }
    318 
    319   // Otherwise, do one layer of parent node compression.
    320   size_t num_chaining_values = left_n + right_n;
    321   return compress_parents_parallel(cv_array, num_chaining_values, key, flags,
    322                                    out);
    323 }
    324 
    325 // Hash a subtree with compress_subtree_wide(), and then condense the resulting
    326 // list of chaining values down to a single parent node. Don't compress that
    327 // last parent node, however. Instead, return its message bytes (the
    328 // concatenated chaining values of its children). This is necessary when the
    329 // first call to update() supplies a complete subtree, because the topmost
    330 // parent node of that subtree could end up being the root. It's also necessary
    331 // for extended output in the general case.
    332 //
    333 // As with compress_subtree_wide(), this function is not used on inputs of 1
    334 // chunk or less. That's a different codepath.
    335 INLINE void compress_subtree_to_parent_node(
    336     const uint8_t *input, size_t input_len, const uint32_t key[8],
    337     uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN]) {
    338 #if defined(BLAKE3_TESTING)
    339   assert(input_len > BLAKE3_CHUNK_LEN);
    340 #endif
    341 
    342   uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
    343   size_t num_cvs = blake3_compress_subtree_wide(input, input_len, key,
    344                                                 chunk_counter, flags, cv_array);
    345 
    346   // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
    347   // compress_subtree_wide() returns more than 2 chaining values. Condense
    348   // them into 2 by forming parent nodes repeatedly.
    349   uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
    350   while (num_cvs > 2) {
    351     num_cvs =
    352         compress_parents_parallel(cv_array, num_cvs, key, flags, out_array);
    353     memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
    354   }
    355   memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
    356 }
    357 
    358 INLINE void hasher_init_base(blake3_hasher *self, const uint32_t key[8],
    359                              uint8_t flags) {
    360   memcpy(self->key, key, BLAKE3_KEY_LEN);
    361   chunk_state_init(&self->chunk, key, flags);
    362   self->cv_stack_len = 0;
    363 }
    364 
    365 void blake3_hasher_init(blake3_hasher *self) { hasher_init_base(self, IV, 0); }
    366 
    367 void blake3_hasher_init_keyed(blake3_hasher *self,
    368                               const uint8_t key[BLAKE3_KEY_LEN]) {
    369   uint32_t key_words[8];
    370   load_key_words(key, key_words);
    371   hasher_init_base(self, key_words, KEYED_HASH);
    372 }
    373 
    374 void blake3_hasher_init_derive_key_raw(blake3_hasher *self, const void *context,
    375                                        size_t context_len) {
    376   blake3_hasher context_hasher;
    377   hasher_init_base(&context_hasher, IV, DERIVE_KEY_CONTEXT);
    378   blake3_hasher_update(&context_hasher, context, context_len);
    379   uint8_t context_key[BLAKE3_KEY_LEN];
    380   blake3_hasher_finalize(&context_hasher, context_key, BLAKE3_KEY_LEN);
    381   uint32_t context_key_words[8];
    382   load_key_words(context_key, context_key_words);
    383   hasher_init_base(self, context_key_words, DERIVE_KEY_MATERIAL);
    384 }
    385 
    386 void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
    387   blake3_hasher_init_derive_key_raw(self, context, strlen(context));
    388 }
    389 
    390 // As described in hasher_push_cv() below, we do "lazy merging", delaying
    391 // merges until right before the next CV is about to be added. This is
    392 // different from the reference implementation. Another difference is that we
    393 // aren't always merging 1 chunk at a time. Instead, each CV might represent
    394 // any power-of-two number of chunks, as long as the smaller-above-larger stack
    395 // order is maintained. Instead of the "count the trailing 0-bits" algorithm
    396 // described in the spec, we use a "count the total number of 1-bits" variant
    397 // that doesn't require us to retain the subtree size of the CV on top of the
    398 // stack. The principle is the same: each CV that should remain in the stack is
    399 // represented by a 1-bit in the total number of chunks (or bytes) so far.
    400 INLINE void hasher_merge_cv_stack(blake3_hasher *self, uint64_t total_len) {
    401   size_t post_merge_stack_len = (size_t)popcnt(total_len);
    402   while (self->cv_stack_len > post_merge_stack_len) {
    403     uint8_t *parent_node =
    404         &self->cv_stack[(self->cv_stack_len - 2) * BLAKE3_OUT_LEN];
    405     output_t output = parent_output(parent_node, self->key, self->chunk.flags);
    406     output_chaining_value(&output, parent_node);
    407     self->cv_stack_len -= 1;
    408   }
    409 }
    410 
    411 // In reference_impl.rs, we merge the new CV with existing CVs from the stack
    412 // before pushing it. We can do that because we know more input is coming, so
    413 // we know none of the merges are root.
    414 //
    415 // This setting is different. We want to feed as much input as possible to
    416 // compress_subtree_wide(), without setting aside anything for the chunk_state.
    417 // If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
    418 // as a single subtree, if at all possible.
    419 //
    420 // This leads to two problems:
    421 // 1) This 64 KiB input might be the only call that ever gets made to update.
    422 //    In this case, the root node of the 64 KiB subtree would be the root node
    423 //    of the whole tree, and it would need to be ROOT finalized. We can't
    424 //    compress it until we know.
    425 // 2) This 64 KiB input might complete a larger tree, whose root node is
    426 //    similarly going to be the the root of the whole tree. For example, maybe
    427 //    we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
    428 //    node at the root of the 256 KiB subtree until we know how to finalize it.
    429 //
    430 // The second problem is solved with "lazy merging". That is, when we're about
    431 // to add a CV to the stack, we don't merge it with anything first, as the
    432 // reference impl does. Instead we do merges using the *previous* CV that was
    433 // added, which is sitting on top of the stack, and we put the new CV
    434 // (unmerged) on top of the stack afterwards. This guarantees that we never
    435 // merge the root node until finalize().
    436 //
    437 // Solving the first problem requires an additional tool,
    438 // compress_subtree_to_parent_node(). That function always returns the top
    439 // *two* chaining values of the subtree it's compressing. We then do lazy
    440 // merging with each of them separately, so that the second CV will always
    441 // remain unmerged. (That also helps us support extendable output when we're
    442 // hashing an input all-at-once.)
    443 INLINE void hasher_push_cv(blake3_hasher *self, uint8_t new_cv[BLAKE3_OUT_LEN],
    444                            uint64_t chunk_counter) {
    445   hasher_merge_cv_stack(self, chunk_counter);
    446   memcpy(&self->cv_stack[self->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
    447          BLAKE3_OUT_LEN);
    448   self->cv_stack_len += 1;
    449 }
    450 
    451 void blake3_hasher_update(blake3_hasher *self, const void *input,
    452                           size_t input_len) {
    453   // Explicitly checking for zero avoids causing UB by passing a null pointer
    454   // to memcpy. This comes up in practice with things like:
    455   //   std::vector<uint8_t> v;
    456   //   blake3_hasher_update(&hasher, v.data(), v.size());
    457   if (input_len == 0) {
    458     return;
    459   }
    460 
    461   const uint8_t *input_bytes = (const uint8_t *)input;
    462 
    463   // If we have some partial chunk bytes in the internal chunk_state, we need
    464   // to finish that chunk first.
    465   if (chunk_state_len(&self->chunk) > 0) {
    466     size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&self->chunk);
    467     if (take > input_len) {
    468       take = input_len;
    469     }
    470     chunk_state_update(&self->chunk, input_bytes, take);
    471     input_bytes += take;
    472     input_len -= take;
    473     // If we've filled the current chunk and there's more coming, finalize this
    474     // chunk and proceed. In this case we know it's not the root.
    475     if (input_len > 0) {
    476       output_t output = chunk_state_output(&self->chunk);
    477       uint8_t chunk_cv[32];
    478       output_chaining_value(&output, chunk_cv);
    479       hasher_push_cv(self, chunk_cv, self->chunk.chunk_counter);
    480       chunk_state_reset(&self->chunk, self->key, self->chunk.chunk_counter + 1);
    481     } else {
    482       return;
    483     }
    484   }
    485 
    486   // Now the chunk_state is clear, and we have more input. If there's more than
    487   // a single chunk (so, definitely not the root chunk), hash the largest whole
    488   // subtree we can, with the full benefits of SIMD (and maybe in the future,
    489   // multi-threading) parallelism. Two restrictions:
    490   // - The subtree has to be a power-of-2 number of chunks. Only subtrees along
    491   //   the right edge can be incomplete, and we don't know where the right edge
    492   //   is going to be until we get to finalize().
    493   // - The subtree must evenly divide the total number of chunks up until this
    494   //   point (if total is not 0). If the current incomplete subtree is only
    495   //   waiting for 1 more chunk, we can't hash a subtree of 4 chunks. We have
    496   //   to complete the current subtree first.
    497   // Because we might need to break up the input to form powers of 2, or to
    498   // evenly divide what we already have, this part runs in a loop.
    499   while (input_len > BLAKE3_CHUNK_LEN) {
    500     size_t subtree_len = round_down_to_power_of_2(input_len);
    501     uint64_t count_so_far = self->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
    502     // Shrink the subtree_len until it evenly divides the count so far. We know
    503     // that subtree_len itself is a power of 2, so we can use a bitmasking
    504     // trick instead of an actual remainder operation. (Note that if the caller
    505     // consistently passes power-of-2 inputs of the same size, as is hopefully
    506     // typical, this loop condition will always fail, and subtree_len will
    507     // always be the full length of the input.)
    508     //
    509     // An aside: We don't have to shrink subtree_len quite this much. For
    510     // example, if count_so_far is 1, we could pass 2 chunks to
    511     // compress_subtree_to_parent_node. Since we'll get 2 CVs back, we'll still
    512     // get the right answer in the end, and we might get to use 2-way SIMD
    513     // parallelism. The problem with this optimization, is that it gets us
    514     // stuck always hashing 2 chunks. The total number of chunks will remain
    515     // odd, and we'll never graduate to higher degrees of parallelism. See
    516     // https://github.com/BLAKE3-team/BLAKE3/issues/69.
    517     while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
    518       subtree_len /= 2;
    519     }
    520     // The shrunken subtree_len might now be 1 chunk long. If so, hash that one
    521     // chunk by itself. Otherwise, compress the subtree into a pair of CVs.
    522     uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
    523     if (subtree_len <= BLAKE3_CHUNK_LEN) {
    524       blake3_chunk_state chunk_state;
    525       chunk_state_init(&chunk_state, self->key, self->chunk.flags);
    526       chunk_state.chunk_counter = self->chunk.chunk_counter;
    527       chunk_state_update(&chunk_state, input_bytes, subtree_len);
    528       output_t output = chunk_state_output(&chunk_state);
    529       uint8_t cv[BLAKE3_OUT_LEN];
    530       output_chaining_value(&output, cv);
    531       hasher_push_cv(self, cv, chunk_state.chunk_counter);
    532     } else {
    533       // This is the high-performance happy path, though getting here depends
    534       // on the caller giving us a long enough input.
    535       uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
    536       compress_subtree_to_parent_node(input_bytes, subtree_len, self->key,
    537                                       self->chunk.chunk_counter,
    538                                       self->chunk.flags, cv_pair);
    539       hasher_push_cv(self, cv_pair, self->chunk.chunk_counter);
    540       hasher_push_cv(self, &cv_pair[BLAKE3_OUT_LEN],
    541                      self->chunk.chunk_counter + (subtree_chunks / 2));
    542     }
    543     self->chunk.chunk_counter += subtree_chunks;
    544     input_bytes += subtree_len;
    545     input_len -= subtree_len;
    546   }
    547 
    548   // If there's any remaining input less than a full chunk, add it to the chunk
    549   // state. In that case, also do a final merge loop to make sure the subtree
    550   // stack doesn't contain any unmerged pairs. The remaining input means we
    551   // know these merges are non-root. This merge loop isn't strictly necessary
    552   // here, because hasher_push_chunk_cv already does its own merge loop, but it
    553   // simplifies blake3_hasher_finalize below.
    554   if (input_len > 0) {
    555     chunk_state_update(&self->chunk, input_bytes, input_len);
    556     hasher_merge_cv_stack(self, self->chunk.chunk_counter);
    557   }
    558 }
    559 
    560 void blake3_hasher_finalize(const blake3_hasher *self, uint8_t *out,
    561                             size_t out_len) {
    562   blake3_hasher_finalize_seek(self, 0, out, out_len);
    563 }
    564 
    565 void blake3_hasher_finalize_seek(const blake3_hasher *self, uint64_t seek,
    566                                  uint8_t *out, size_t out_len) {
    567   // Explicitly checking for zero avoids causing UB by passing a null pointer
    568   // to memcpy. This comes up in practice with things like:
    569   //   std::vector<uint8_t> v;
    570   //   blake3_hasher_finalize(&hasher, v.data(), v.size());
    571   if (out_len == 0) {
    572     return;
    573   }
    574 
    575   // If the subtree stack is empty, then the current chunk is the root.
    576   if (self->cv_stack_len == 0) {
    577     output_t output = chunk_state_output(&self->chunk);
    578     output_root_bytes(&output, seek, out, out_len);
    579     return;
    580   }
    581   // If there are any bytes in the chunk state, finalize that chunk and do a
    582   // roll-up merge between that chunk hash and every subtree in the stack. In
    583   // this case, the extra merge loop at the end of blake3_hasher_update
    584   // guarantees that none of the subtrees in the stack need to be merged with
    585   // each other first. Otherwise, if there are no bytes in the chunk state,
    586   // then the top of the stack is a chunk hash, and we start the merge from
    587   // that.
    588   output_t output;
    589   size_t cvs_remaining;
    590   if (chunk_state_len(&self->chunk) > 0) {
    591     cvs_remaining = self->cv_stack_len;
    592     output = chunk_state_output(&self->chunk);
    593   } else {
    594     // There are always at least 2 CVs in the stack in this case.
    595     cvs_remaining = self->cv_stack_len - 2;
    596     output = parent_output(&self->cv_stack[cvs_remaining * 32], self->key,
    597                            self->chunk.flags);
    598   }
    599   while (cvs_remaining > 0) {
    600     cvs_remaining -= 1;
    601     uint8_t parent_block[BLAKE3_BLOCK_LEN];
    602     memcpy(parent_block, &self->cv_stack[cvs_remaining * 32], 32);
    603     output_chaining_value(&output, &parent_block[32]);
    604     output = parent_output(parent_block, self->key, self->chunk.flags);
    605   }
    606   output_root_bytes(&output, seek, out, out_len);
    607 }