nostrdb

an unfairly fast embedded nostr database backed by lmdb
git clone git://jb55.com/nostrdb
Log | Files | Refs | Submodules | README | LICENSE

builder.c (65247B)


      1 /*
      2  * Codegenerator for C, building FlatBuffers.
      3  *
      4  * There are several approaches, some light, some requiring a library,
      5  * some with vectored I/O etc.
      6  *
      7  * Here we focus on a reasonable balance of light code and efficiency.
      8  *
      9  * Builder code is generated to a separate file that includes the
     10  * generated read-only code.
     11  *
     12  * Mutable buffers are not supported in this version.
     13  *
     14  */
     15 
     16 #include <stdlib.h>
     17 #include <string.h>
     18 
     19 #include "flatcc/flatcc_builder.h"
     20 #include "flatcc/flatcc_emitter.h"
     21 
     22 /*
     23  * `check` is designed to handle incorrect use errors that can be
     24  * ignored in production of a tested product.
     25  *
     26  * `check_error` fails if condition is false and is designed to return an
     27  * error code in production.
     28  */
     29 
     30 #if FLATCC_BUILDER_ASSERT_ON_ERROR
     31 #define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
     32 #else
     33 #define check(cond, reason) ((void)0)
     34 #endif
     35 
     36 #if FLATCC_BUILDER_SKIP_CHECKS
     37 #define check_error(cond, err, reason) ((void)0)
     38 #else
     39 #define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
     40 #endif
     41 
     42 /* `strnlen` not widely supported. */
     43 static inline size_t pstrnlen(const char *s, size_t max_len)
     44 {
     45     const char *end = memchr(s, 0, max_len);
     46     return end ? (size_t)(end - s) : max_len;
     47 }
     48 #undef strnlen
     49 #define strnlen pstrnlen
     50 
     51 /* Padding can be up to 255 zeroes, and 1 zero string termination byte.
     52  * When two paddings are combined at nested buffers, we need twice that.
     53  * Visible to emitter so it can test for zero padding in iov. */
     54 const uint8_t flatcc_builder_padding_base[512] = { 0 };
     55 #define _pad flatcc_builder_padding_base
     56 
     57 #define uoffset_t flatbuffers_uoffset_t
     58 #define soffset_t flatbuffers_soffset_t
     59 #define voffset_t flatbuffers_voffset_t
     60 #define utype_t flatbuffers_utype_t
     61 
     62 #define write_uoffset __flatbuffers_uoffset_write_to_pe
     63 #define write_voffset  __flatbuffers_voffset_write_to_pe
     64 #define write_identifier __flatbuffers_uoffset_write_to_pe
     65 #define write_utype __flatbuffers_utype_write_to_pe
     66 
     67 #define field_size sizeof(uoffset_t)
     68 #define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
     69 #define union_size sizeof(flatcc_builder_union_ref_t)
     70 #define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
     71 #define utype_size sizeof(utype_t)
     72 #define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
     73 
     74 #define max_string_len FLATBUFFERS_COUNT_MAX(1)
     75 #define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
     76 
     77 
     78 #define iovec_t flatcc_iovec_t
     79 #define frame_size sizeof(__flatcc_builder_frame_t)
     80 #define frame(x) (B->frame[0].x)
     81 
     82 
     83 /* `align` must be a power of 2. */
     84 static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
     85 {
     86     return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
     87 }
     88 
     89 static inline size_t alignup_size(size_t x, size_t align)
     90 {
     91     return (x + align - 1u) & ~(align - 1u);
     92 }
     93 
     94 
     95 typedef struct vtable_descriptor vtable_descriptor_t;
     96 struct vtable_descriptor {
     97     /* Where the vtable is emitted. */
     98     flatcc_builder_ref_t vt_ref;
     99     /* Which buffer it was emitted to. */
    100     uoffset_t nest_id;
    101     /* Where the vtable is cached. */
    102     uoffset_t vb_start;
    103     /* Hash table collision chain. */
    104     uoffset_t next;
    105 };
    106 
    107 typedef struct flatcc_iov_state flatcc_iov_state_t;
    108 struct flatcc_iov_state {
    109     size_t len;
    110     int count;
    111     flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
    112 };
    113 
    114 #define iov_state_t flatcc_iov_state_t
    115 
    116 /* This assumes `iov_state_t iov;` has been declared in scope */
    117 #define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
    118         iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
    119 #define push_iov(base, size) push_iov_cond(base, size, 1)
    120 #define init_iov() { iov.len = 0; iov.count = 0; }
    121 
    122 
    123 int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
    124 {
    125     void *p;
    126     size_t n;
    127 
    128     (void)alloc_context;
    129 
    130     if (request == 0) {
    131         if (b->iov_base) {
    132             FLATCC_BUILDER_FREE(b->iov_base);
    133             b->iov_base = 0;
    134             b->iov_len = 0;
    135         }
    136         return 0;
    137     }
    138     switch (hint) {
    139     case flatcc_builder_alloc_ds:
    140         n = 256;
    141         break;
    142     case flatcc_builder_alloc_ht:
    143         /* Should be exact size, or space size is just wasted. */
    144         n = request;
    145         break;
    146     case flatcc_builder_alloc_fs:
    147         n = sizeof(__flatcc_builder_frame_t) * 8;
    148         break;
    149     case flatcc_builder_alloc_us:
    150         n = 64;
    151         break;
    152     default:
    153         /*
    154          * We have many small structures - vs stack for tables with few
    155          * elements, and few offset fields in patch log. No need to
    156          * overallocate in case of busy small messages.
    157          */
    158         n = 32;
    159         break;
    160     }
    161     while (n < request) {
    162         n *= 2;
    163     }
    164     if (request <= b->iov_len && b->iov_len / 2 >= n) {
    165         /* Add hysteresis to shrink. */
    166         return 0;
    167     }
    168     if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
    169         return -1;
    170     }
    171     /* Realloc might also shrink. */
    172     if (zero_fill && b->iov_len < n) {
    173         memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
    174     }
    175     b->iov_base = p;
    176     b->iov_len = n;
    177     return 0;
    178 }
    179 
    180 #define T_ptr(base, pos) ((void *)((size_t)(base) + (size_t)(pos)))
    181 #define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
    182 #define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
    183 #define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
    184 #define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
    185 #define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
    186 #define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
    187 #define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
    188 #define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
    189 #define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
    190 
    191 #define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
    192 #define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
    193 
    194 #define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
    195 
    196 /* Must also return true when no buffer has been started. */
    197 #define is_top_buffer(B) (B->nest_id == 0)
    198 
    199 /*
    200  * Tables use a stack represention better suited for quickly adding
    201  * fields to tables, but it must occasionally be refreshed following
    202  * reallocation or reentry from child frame.
    203  */
    204 static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
    205 {
    206     iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
    207 
    208     B->ds = ds_ptr(B->ds_first);
    209     B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
    210     /*
    211      * So we don't allocate outside tables representation size, nor our
    212      * current buffer size.
    213      */
    214     if (B->ds_limit > type_limit) {
    215         B->ds_limit = type_limit;
    216     }
    217     /* So exit frame can refresh fast. */
    218     frame(type_limit) = type_limit;
    219 }
    220 
    221 static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
    222 {
    223     iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
    224 
    225     if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
    226         return -1;
    227     }
    228     refresh_ds(B, limit);
    229     return 0;
    230 }
    231 
    232 /*
    233  * Make sure there is always an extra zero termination on stack
    234  * even if it isn't emitted such that string updates may count
    235  * on zero termination being present always.
    236  */
    237 static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
    238 {
    239     size_t offset;
    240 
    241     offset = B->ds_offset;
    242     if ((B->ds_offset += size) >= B->ds_limit) {
    243         if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
    244             return 0;
    245         }
    246     }
    247     return B->ds + offset;
    248 }
    249 
    250 static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
    251 {
    252     B->ds_offset -= size;
    253     memset(B->ds + B->ds_offset, 0, size);
    254 }
    255 
    256 static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
    257 {
    258     void *p;
    259 
    260     if (!(p = push_ds(B, size))) {
    261         return 0;
    262     }
    263     memcpy(p, data, size);
    264     return p;
    265 }
    266 
    267 static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
    268 {
    269     uoffset_t offset;
    270 
    271     /*
    272      * We calculate table field alignment relative to first entry, not
    273      * header field with vtable offset.
    274      *
    275      * Note: >= comparison handles special case where B->ds is not
    276      * allocated yet and size is 0 so the return value would be mistaken
    277      * for an error.
    278      */
    279     offset = alignup_uoffset(B->ds_offset, align);
    280     if ((B->ds_offset = offset + size) >= B->ds_limit) {
    281         if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
    282             return 0;
    283         }
    284     }
    285     B->vs[id] = (voffset_t)(offset + field_size);
    286     if (id >= B->id_end) {
    287         B->id_end = id + 1u;
    288     }
    289     return B->ds + offset;
    290 }
    291 
    292 static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
    293 {
    294     uoffset_t offset;
    295 
    296     offset = alignup_uoffset(B->ds_offset, field_size);
    297     if ((B->ds_offset = offset + field_size) > B->ds_limit) {
    298         if (reserve_ds(B, B->ds_offset, table_limit)) {
    299             return 0;
    300         }
    301     }
    302     B->vs[id] = (voffset_t)(offset + field_size);
    303     if (id >= B->id_end) {
    304         B->id_end = id + 1u;
    305     }
    306     *B->pl++ = (flatbuffers_voffset_t)offset;
    307     return B->ds + offset;
    308 }
    309 
    310 static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
    311 {
    312     iovec_t *buf = B->buffers + alloc_type;
    313 
    314     if (used + need > buf->iov_len) {
    315         if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
    316             check(0, "memory allocation failed");
    317             return 0;
    318         }
    319     }
    320     return (void *)((size_t)buf->iov_base + used);
    321 }
    322 
    323 static inline int reserve_fields(flatcc_builder_t *B, int count)
    324 {
    325     size_t used, need;
    326 
    327     /* Provide faster stack operations for common table operations. */
    328     used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
    329     need = (size_t)(count + 2) * sizeof(voffset_t);
    330     if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
    331         return -1;
    332     }
    333     /* Move past header for convenience. */
    334     B->vs += 2;
    335     used = frame(container.table.pl_end);
    336     /* Add one to handle special case of first table being empty. */
    337     need = (size_t)count * sizeof(*(B->pl)) + 1;
    338     if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
    339         return -1;
    340     }
    341     return 0;
    342 }
    343 
    344 static int alloc_ht(flatcc_builder_t *B)
    345 {
    346     iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
    347 
    348     size_t size, k;
    349     /* Allocate null entry so we can check for return errors. */
    350     FLATCC_ASSERT(B->vd_end == 0);
    351     if (!reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0)) {
    352         return -1;
    353     }
    354     B->vd_end = sizeof(vtable_descriptor_t);
    355     size = field_size * FLATCC_BUILDER_MIN_HASH_COUNT;
    356     if (B->alloc(B->alloc_context, buf, size, 1, flatcc_builder_alloc_ht)) {
    357         return -1;
    358     }
    359     while (size * 2 <= buf->iov_len) {
    360         size *= 2;
    361     }
    362     size /= field_size;
    363     for (k = 0; (((size_t)1) << k) < size; ++k) {
    364     }
    365     B->ht_width = k;
    366     return 0;
    367 }
    368 
    369 static inline uoffset_t *lookup_ht(flatcc_builder_t *B, uint32_t hash)
    370 {
    371     uoffset_t *T;
    372 
    373     if (B->ht_width == 0) {
    374         if (alloc_ht(B)) {
    375             return 0;
    376         }
    377     }
    378     T = B->buffers[flatcc_builder_alloc_ht].iov_base;
    379 
    380     return &T[FLATCC_BUILDER_BUCKET_VT_HASH(hash, B->ht_width)];
    381 }
    382 
    383 void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B)
    384 {
    385     iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
    386 
    387     if (B->ht_width == 0) {
    388         return;
    389     }
    390     memset(buf->iov_base, 0, buf->iov_len);
    391     /* Reserve the null entry. */
    392     B->vd_end = sizeof(vtable_descriptor_t);
    393     B->vb_end = 0;
    394 }
    395 
    396 int flatcc_builder_custom_init(flatcc_builder_t *B,
    397         flatcc_builder_emit_fun *emit, void *emit_context,
    398         flatcc_builder_alloc_fun *alloc, void *alloc_context)
    399 {
    400     /*
    401      * Do not allocate anything here. Only the required buffers will be
    402      * allocated. For simple struct buffers, no allocation is required
    403      * at all.
    404      */
    405     memset(B, 0, sizeof(*B));
    406 
    407     if (emit == 0) {
    408         B->is_default_emitter = 1;
    409         emit = flatcc_emitter;
    410         emit_context = &B->default_emit_context;
    411     }
    412     if (alloc == 0) {
    413         alloc = flatcc_builder_default_alloc;
    414     }
    415     B->alloc_context = alloc_context;
    416     B->alloc = alloc;
    417     B->emit_context = emit_context;
    418     B->emit = emit;
    419     return 0;
    420 }
    421 
    422 int flatcc_builder_init(flatcc_builder_t *B)
    423 {
    424     return flatcc_builder_custom_init(B, 0, 0, 0, 0);
    425 }
    426 
    427 int flatcc_builder_custom_reset(flatcc_builder_t *B, int set_defaults, int reduce_buffers)
    428 {
    429     iovec_t *buf;
    430     int i;
    431 
    432     for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
    433         buf = B->buffers + i;
    434         if (buf->iov_base) {
    435             /* Don't try to reduce the hash table. */
    436             if (i != flatcc_builder_alloc_ht &&
    437                 reduce_buffers && B->alloc(B->alloc_context, buf, 1, 1, i)) {
    438                 return -1;
    439             }
    440             memset(buf->iov_base, 0, buf->iov_len);
    441         } else {
    442             FLATCC_ASSERT(buf->iov_len == 0);
    443         }
    444     }
    445     B->vb_end = 0;
    446     if (B->vd_end > 0) {
    447         /* Reset past null entry. */
    448         B->vd_end = sizeof(vtable_descriptor_t);
    449     }
    450     B->min_align = 0;
    451     B->emit_start = 0;
    452     B->emit_end = 0;
    453     B->level = 0;
    454     B->limit_level = 0;
    455     B->ds_offset = 0;
    456     B->ds_limit = 0;
    457     B->nest_count = 0;
    458     B->nest_id = 0;
    459     /* Needed for correct offset calculation. */
    460     B->ds = B->buffers[flatcc_builder_alloc_ds].iov_base;
    461     B->pl = B->buffers[flatcc_builder_alloc_pl].iov_base;
    462     B->vs = B->buffers[flatcc_builder_alloc_vs].iov_base;
    463     B->frame = 0;
    464     if (set_defaults) {
    465         B->vb_flush_limit = 0;
    466         B->max_level = 0;
    467         B->disable_vt_clustering = 0;
    468     }
    469     if (B->is_default_emitter) {
    470         flatcc_emitter_reset(&B->default_emit_context);
    471     }
    472     if (B->refmap) {
    473         flatcc_refmap_reset(B->refmap);
    474     }
    475     return 0;
    476 }
    477 
    478 int flatcc_builder_reset(flatcc_builder_t *B)
    479 {
    480     return flatcc_builder_custom_reset(B, 0, 0);
    481 }
    482 
    483 void flatcc_builder_clear(flatcc_builder_t *B)
    484 {
    485     iovec_t *buf;
    486     int i;
    487 
    488     for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
    489         buf = B->buffers + i;
    490         B->alloc(B->alloc_context, buf, 0, 0, i);
    491     }
    492     if (B->is_default_emitter) {
    493         flatcc_emitter_clear(&B->default_emit_context);
    494     }
    495     if (B->refmap) {
    496         flatcc_refmap_clear(B->refmap);
    497     }
    498     memset(B, 0, sizeof(*B));
    499 }
    500 
    501 static inline void set_min_align(flatcc_builder_t *B, uint16_t align)
    502 {
    503     if (B->min_align < align) {
    504         B->min_align = align;
    505     }
    506 }
    507 
    508 /*
    509  * It's a max, but the minimum viable alignment is the largest observed
    510  * alignment requirement, but no larger.
    511  */
    512 static inline void get_min_align(uint16_t *align, uint16_t b)
    513 {
    514     if (*align < b) {
    515         *align = b;
    516     }
    517 }
    518 
    519 void *flatcc_builder_enter_user_frame_ptr(flatcc_builder_t *B, size_t size)
    520 {
    521     size_t *frame;
    522 
    523     size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
    524 
    525     if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
    526         return 0;
    527     }
    528     memset(frame, 0, size);
    529     *frame++ = B->user_frame_offset;
    530     B->user_frame_offset = B->user_frame_end + sizeof(size_t);
    531     B->user_frame_end += size;
    532     return frame;
    533 }
    534 
    535 size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size)
    536 {
    537     size_t *frame;
    538 
    539     size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
    540 
    541     if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
    542         return 0;
    543     }
    544     memset(frame, 0, size);
    545     *frame++ = B->user_frame_offset;
    546     B->user_frame_offset = B->user_frame_end + sizeof(size_t);
    547     B->user_frame_end += size;
    548     return B->user_frame_offset;
    549 }
    550 
    551 
    552 size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B)
    553 {
    554     size_t *hdr;
    555 
    556     FLATCC_ASSERT(B->user_frame_offset > 0);
    557 
    558     hdr = us_ptr(B->user_frame_offset);
    559     B->user_frame_end = B->user_frame_offset - sizeof(size_t);
    560     return B->user_frame_offset = hdr[-1];
    561 }
    562 
    563 size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle)
    564 {
    565     FLATCC_ASSERT(B->user_frame_offset >= handle);
    566 
    567     B->user_frame_offset = handle;
    568     return flatcc_builder_exit_user_frame(B);
    569 }
    570 
    571 size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B)
    572 {
    573     return B->user_frame_offset;
    574 }
    575 
    576 void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle)
    577 {
    578     return us_ptr(handle);
    579 }
    580 
    581 static int enter_frame(flatcc_builder_t *B, uint16_t align)
    582 {
    583     if (++B->level > B->limit_level) {
    584         if (B->max_level > 0 && B->level > B->max_level) {
    585             return -1;
    586         }
    587         if (!(B->frame = reserve_buffer(B, flatcc_builder_alloc_fs,
    588                         (size_t)(B->level - 1) * frame_size, frame_size, 0))) {
    589             return -1;
    590         }
    591         B->limit_level = (int)(B->buffers[flatcc_builder_alloc_fs].iov_len / frame_size);
    592         if (B->max_level > 0 && B->max_level < B->limit_level) {
    593             B->limit_level = B->max_level;
    594         }
    595     } else {
    596         ++B->frame;
    597     }
    598     frame(ds_offset) = B->ds_offset;
    599     frame(align) = B->align;
    600     B->align = align;
    601     /* Note: do not assume padding before first has been allocated! */
    602     frame(ds_first) = B->ds_first;
    603     frame(type_limit) = data_limit;
    604     B->ds_first = alignup_uoffset(B->ds_first + B->ds_offset, 8);
    605     B->ds_offset = 0;
    606     return 0;
    607 }
    608 
    609 static inline void exit_frame(flatcc_builder_t *B)
    610 {
    611     memset(B->ds, 0, B->ds_offset);
    612     B->ds_offset = frame(ds_offset);
    613     B->ds_first = frame(ds_first);
    614     refresh_ds(B, frame(type_limit));
    615 
    616     /*
    617      * Restore local alignment: e.g. a table should not change alignment
    618      * because a child table was just created elsewhere in the buffer,
    619      * but the overall alignment (min align), should be aware of it.
    620      * Each buffer has its own min align that then migrates up without
    621      * being affected by sibling or child buffers.
    622      */
    623     set_min_align(B, B->align);
    624     B->align = frame(align);
    625 
    626     --B->frame;
    627     --B->level;
    628 }
    629 
    630 static inline uoffset_t front_pad(flatcc_builder_t *B, uoffset_t size, uint16_t align)
    631 {
    632     return (uoffset_t)(B->emit_start - (flatcc_builder_ref_t)size) & (align - 1u);
    633 }
    634 
    635 static inline uoffset_t back_pad(flatcc_builder_t *B, uint16_t align)
    636 {
    637     return (uoffset_t)(B->emit_end) & (align - 1u);
    638 }
    639 
    640 static inline flatcc_builder_ref_t emit_front(flatcc_builder_t *B, iov_state_t *iov)
    641 {
    642     flatcc_builder_ref_t ref;
    643 
    644     /*
    645      * We might have overflow when including headers, but without
    646      * headers we should have checks to prevent overflow in the
    647      * uoffset_t range, hence we subtract 16 to be safe. With that
    648      * guarantee we can also make a safe check on the soffset_t range.
    649      *
    650      * We only allow buffers half the theoritical size of
    651      * FLATBUFFERS_UOFFSET_MAX so we can safely use signed references.
    652      *
    653      * NOTE: vtables vt_offset field is signed, and the check in create
    654      * table only ensures the signed limit. The check would fail if the
    655      * total buffer size could grow beyond UOFFSET_MAX, and we prevent
    656      * that by limiting the lower end to SOFFSET_MIN, and the upper end
    657      * at emit_back to SOFFSET_MAX.
    658      */
    659     ref = B->emit_start - (flatcc_builder_ref_t)iov->len;
    660     if ((iov->len > 16 && iov->len - 16 > FLATBUFFERS_UOFFSET_MAX) || ref >= B->emit_start) {
    661         check(0, "buffer too large to represent");
    662         return 0;
    663     }
    664     if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
    665         check(0, "emitter rejected buffer content");
    666         return 0;
    667     }
    668     return B->emit_start = ref;
    669 }
    670 
    671 static inline flatcc_builder_ref_t emit_back(flatcc_builder_t *B, iov_state_t *iov)
    672 {
    673     flatcc_builder_ref_t ref;
    674 
    675     ref = B->emit_end;
    676     B->emit_end = ref + (flatcc_builder_ref_t)iov->len;
    677     /*
    678      * Similar to emit_front check, but since we only emit vtables and
    679      * padding at the back, we are not concerned with iov->len overflow,
    680      * only total buffer overflow.
    681      *
    682      * With this check, vtable soffset references at table header can
    683      * still overflow in extreme cases, so this must be checked
    684      * separately.
    685      */
    686     if (B->emit_end < ref) {
    687         check(0, "buffer too large to represent");
    688         return 0;
    689     }
    690     if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
    691         check(0, "emitter rejected buffer content");
    692         return 0;
    693     }
    694     /*
    695      * Back references always return ref + 1 because ref == 0 is valid and
    696      * should not be mistaken for error. vtables understand this.
    697      */
    698     return ref + 1;
    699 }
    700 
    701 /* If nested we cannot pad the end of the buffer without moving the entire buffer, so we don't. */
    702 static int align_buffer_end(flatcc_builder_t *B, uint16_t *align, uint16_t block_align, int is_nested)
    703 {
    704     size_t end_pad;
    705     iov_state_t iov;
    706 
    707     block_align = block_align ? block_align : B->block_align ? B->block_align : 1;
    708     get_min_align(align, field_size);
    709     get_min_align(align, block_align);
    710     /* Pad end of buffer to multiple. */
    711     if (!is_nested) {
    712         end_pad = back_pad(B, *align);
    713         if (end_pad) {
    714             init_iov();
    715             push_iov(_pad, end_pad);
    716             if (0 == emit_back(B, &iov)) {
    717                 check(0, "emitter rejected buffer content");
    718                 return -1;
    719             }
    720         }
    721     }
    722     return 0;
    723 }
    724 
    725 flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
    726         uint16_t block_align,
    727         const void *data, size_t size, uint16_t align, flatcc_builder_buffer_flags_t flags)
    728 {
    729     uoffset_t size_field, pad;
    730     iov_state_t iov;
    731     int with_size = (flags & flatcc_builder_with_size) != 0;
    732 
    733     if (align_buffer_end(B, &align, block_align, !is_top_buffer(B))) {
    734         return 0;
    735     }
    736     pad = front_pad(B, (uoffset_t)(size + (with_size ? field_size : 0)), align);
    737     write_uoffset(&size_field, (uoffset_t)size + pad);
    738     init_iov();
    739     /* Add ubyte vector size header if nested buffer. */
    740     push_iov_cond(&size_field, field_size, !is_top_buffer(B));
    741     push_iov(data, size);
    742     push_iov(_pad, pad);
    743     return emit_front(B, &iov);
    744 }
    745 
    746 flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
    747         const char identifier[identifier_size], uint16_t block_align,
    748         flatcc_builder_ref_t object_ref, uint16_t align, flatcc_builder_buffer_flags_t flags)
    749 {
    750     flatcc_builder_ref_t buffer_ref;
    751     uoffset_t header_pad, id_size = 0;
    752     uoffset_t object_offset, buffer_size, buffer_base;
    753     iov_state_t iov;
    754     flatcc_builder_identifier_t id_out = 0;
    755     int is_nested = (flags & flatcc_builder_is_nested) != 0;
    756     int with_size = (flags & flatcc_builder_with_size) != 0;
    757 
    758     if (align_buffer_end(B, &align, block_align, is_nested)) {
    759         return 0;
    760     }
    761     set_min_align(B, align);
    762     if (identifier) {
    763         FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == identifier_size);
    764         FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == field_size);
    765         memcpy(&id_out, identifier, identifier_size);
    766         id_out = __flatbuffers_thash_read_from_le(&id_out);
    767         write_identifier(&id_out, id_out);
    768     }
    769     id_size = id_out ? identifier_size : 0;
    770     header_pad = front_pad(B, field_size + id_size + (uoffset_t)(with_size ? field_size : 0), align);
    771     init_iov();
    772     /* ubyte vectors size field wrapping nested buffer. */
    773     push_iov_cond(&buffer_size, field_size, is_nested || with_size);
    774     push_iov(&object_offset, field_size);
    775     /* Identifiers are not always present in buffer. */
    776     push_iov(&id_out, id_size);
    777     push_iov(_pad, header_pad);
    778     buffer_base = (uoffset_t)B->emit_start - (uoffset_t)iov.len + (uoffset_t)((is_nested || with_size) ? field_size : 0);
    779     if (is_nested) {
    780         write_uoffset(&buffer_size, (uoffset_t)B->buffer_mark - buffer_base);
    781     } else {
    782         /* Also include clustered vtables. */
    783         write_uoffset(&buffer_size, (uoffset_t)B->emit_end - buffer_base);
    784     }
    785     write_uoffset(&object_offset, (uoffset_t)object_ref - buffer_base);
    786     if (0 == (buffer_ref = emit_front(B, &iov))) {
    787         check(0, "emitter rejected buffer content");
    788         return 0;
    789     }
    790     return buffer_ref;
    791 }
    792 
    793 flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B, const void *data, size_t size, uint16_t align)
    794 {
    795     size_t pad;
    796     iov_state_t iov;
    797 
    798     check(align >= 1, "align cannot be 0");
    799     set_min_align(B, align);
    800     pad = front_pad(B, (uoffset_t)size, align);
    801     init_iov();
    802     push_iov(data, size);
    803     /*
    804      * Normally structs will already be a multiple of their alignment,
    805      * so this padding will not likely be emitted.
    806      */
    807     push_iov(_pad, pad);
    808     return emit_front(B, &iov);
    809 }
    810 
    811 int flatcc_builder_start_buffer(flatcc_builder_t *B,
    812         const char identifier[identifier_size], uint16_t block_align, flatcc_builder_buffer_flags_t flags)
    813 {
    814     /*
    815      * This saves the parent `min_align` in the align field since we
    816      * shouldn't use that for the current buffer. `exit_frame`
    817      * automatically aggregates align up, so it is updated when the
    818      * buffer frame exits.
    819      */
    820     if (enter_frame(B, B->min_align)) {
    821         return -1;
    822     }
    823     /* B->align now has parent min_align, and child frames will save it. */
    824     /* Since we allow objects to be created before the buffer at top level,
    825        we need to respect min_align in that case. */
    826     if (!is_top_buffer(B) || B->min_align == 0) {
    827         B->min_align = 1;
    828     }
    829     /* Save the parent block align, and set proper defaults for this buffer. */
    830     frame(container.buffer.block_align) = B->block_align;
    831     B->block_align = block_align;
    832     frame(container.buffer.flags = B->buffer_flags);
    833     B->buffer_flags = (uint16_t)flags;
    834     frame(container.buffer.mark) = B->buffer_mark;
    835     frame(container.buffer.nest_id) = B->nest_id;
    836     /*
    837      * End of buffer when nested. Not defined for top-level because we
    838      * here (on only here) permit strings etc. to be created before buffer start and
    839      * because top-level buffer vtables can be clustered.
    840      */
    841     B->buffer_mark = B->emit_start;
    842     /* Must be 0 before and after entering top-level buffer, and unique otherwise. */
    843     B->nest_id = B->nest_count++;
    844     frame(container.buffer.identifier) = B->identifier;
    845     set_identifier(identifier);
    846     frame(type) = flatcc_builder_buffer;
    847     return 0;
    848 }
    849 
    850 flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root)
    851 {
    852     flatcc_builder_ref_t buffer_ref;
    853     flatcc_builder_buffer_flags_t flags;
    854 
    855     flags = (flatcc_builder_buffer_flags_t)B->buffer_flags & flatcc_builder_with_size;
    856     flags |= is_top_buffer(B) ? 0 : flatcc_builder_is_nested;
    857     check(frame(type) == flatcc_builder_buffer, "expected buffer frame");
    858     set_min_align(B, B->block_align);
    859     if (0 == (buffer_ref = flatcc_builder_create_buffer(B, (void *)&B->identifier,
    860             B->block_align, root, B->min_align, flags))) {
    861         return 0;
    862     }
    863     B->buffer_mark = frame(container.buffer.mark);
    864     B->nest_id = frame(container.buffer.nest_id);
    865     B->identifier = frame(container.buffer.identifier);
    866     B->buffer_flags = frame(container.buffer.flags);
    867     B->block_align = frame(container.buffer.block_align);
    868 
    869     exit_frame(B);
    870     return buffer_ref;
    871 }
    872 
    873 void *flatcc_builder_start_struct(flatcc_builder_t *B, size_t size, uint16_t align)
    874 {
    875     /* Allocate space for the struct on the ds stack. */
    876     if (enter_frame(B, align)) {
    877         return 0;
    878     }
    879     frame(type) = flatcc_builder_struct;
    880     refresh_ds(B, data_limit);
    881     return push_ds(B, (uoffset_t)size);
    882 }
    883 
    884 void *flatcc_builder_struct_edit(flatcc_builder_t *B)
    885 {
    886     return B->ds;
    887 }
    888 
    889 flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B)
    890 {
    891     flatcc_builder_ref_t object_ref;
    892 
    893     check(frame(type) == flatcc_builder_struct, "expected struct frame");
    894     if (0 == (object_ref = flatcc_builder_create_struct(B, B->ds, B->ds_offset, B->align))) {
    895         return 0;
    896     }
    897     exit_frame(B);
    898     return object_ref;
    899 }
    900 
    901 static inline int vector_count_add(flatcc_builder_t *B, uoffset_t count, uoffset_t max_count)
    902 {
    903     uoffset_t n, n1;
    904     n = frame(container.vector.count);
    905     n1 = n + count;
    906     /*
    907      * This prevents elem_size * count from overflowing iff max_vector
    908      * has been set sensible. Without this check we might allocate to
    909      * little on the ds stack and return a buffer the user thinks is
    910      * much larger which of course is bad even though the buffer eventually
    911      * would fail anyway.
    912      */
    913     check_error(n <= n1 && n1 <= max_count, -1, "vector too large to represent");
    914     frame(container.vector.count) = n1;
    915     return 0;
    916 }
    917 
    918 void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count)
    919 {
    920     if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
    921         return 0;
    922     }
    923     return push_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
    924 }
    925 
    926 void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data)
    927 {
    928     check(frame(type) == flatcc_builder_vector, "expected vector frame");
    929     check_error(frame(container.vector.count) <= frame(container.vector.max_count), 0, "vector max count exceeded");
    930     frame(container.vector.count) += 1;
    931     return push_ds_copy(B, data, frame(container.vector.elem_size));
    932 }
    933 
    934 void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count)
    935 {
    936     check(frame(type) == flatcc_builder_vector, "expected vector frame");
    937     if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
    938         return 0;
    939     }
    940     return push_ds_copy(B, data, frame(container.vector.elem_size) * (uoffset_t)count);
    941 }
    942 
    943 flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count)
    944 {
    945     if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
    946         return 0;
    947     }
    948     return push_ds(B, (uoffset_t)(field_size * count));
    949 }
    950 
    951 flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B, flatcc_builder_ref_t ref)
    952 {
    953     flatcc_builder_ref_t *p;
    954 
    955     check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
    956     if (frame(container.vector.count) == max_offset_count) {
    957         return 0;
    958     }
    959     frame(container.vector.count) += 1;
    960     if (0 == (p = push_ds(B, field_size))) {
    961         return 0;
    962     }
    963     *p = ref;
    964     return p;
    965 }
    966 
    967 flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B, const flatcc_builder_ref_t *refs, size_t count)
    968 {
    969     check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
    970     if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
    971         return 0;
    972     }
    973     return push_ds_copy(B, refs, (uoffset_t)(field_size * count));
    974 }
    975 
    976 char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len)
    977 {
    978     check(frame(type) == flatcc_builder_string, "expected string frame");
    979     if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
    980         return 0;
    981     }
    982     return push_ds(B, (uoffset_t)len);
    983 }
    984 
    985 char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len)
    986 {
    987     check(frame(type) == flatcc_builder_string, "expected string frame");
    988     if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
    989         return 0;
    990     }
    991     return push_ds_copy(B, s, (uoffset_t)len);
    992 }
    993 
    994 char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s)
    995 {
    996     return flatcc_builder_append_string(B, s, strlen(s));
    997 }
    998 
    999 char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
   1000 {
   1001     return flatcc_builder_append_string(B, s, strnlen(s, max_len));
   1002 }
   1003 
   1004 int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count)
   1005 {
   1006     check(frame(type) == flatcc_builder_vector, "expected vector frame");
   1007     check_error(frame(container.vector.count) >= count, -1, "cannot truncate vector past empty");
   1008     frame(container.vector.count) -= (uoffset_t)count;
   1009     unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
   1010     return 0;
   1011 }
   1012 
   1013 int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count)
   1014 {
   1015     check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
   1016     check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
   1017     frame(container.vector.count) -= (uoffset_t)count;
   1018     unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
   1019     return 0;
   1020 }
   1021 
   1022 int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len)
   1023 {
   1024     check(frame(type) == flatcc_builder_string, "expected string frame");
   1025     check_error(frame(container.vector.count) >= len, -1, "cannot truncate string past empty");
   1026     frame(container.vector.count) -= (uoffset_t)len;
   1027     unpush_ds(B, (uoffset_t)len);
   1028     return 0;
   1029 }
   1030 
   1031 int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size, uint16_t align, size_t max_count)
   1032 {
   1033     get_min_align(&align, field_size);
   1034     if (enter_frame(B, align)) {
   1035         return -1;
   1036     }
   1037     frame(container.vector.elem_size) = (uoffset_t)elem_size;
   1038     frame(container.vector.count) = 0;
   1039     frame(container.vector.max_count) = (uoffset_t)max_count;
   1040     frame(type) = flatcc_builder_vector;
   1041     refresh_ds(B, data_limit);
   1042     return 0;
   1043 }
   1044 
   1045 int flatcc_builder_start_offset_vector(flatcc_builder_t *B)
   1046 {
   1047     if (enter_frame(B, field_size)) {
   1048         return -1;
   1049     }
   1050     frame(container.vector.elem_size) = field_size;
   1051     frame(container.vector.count) = 0;
   1052     frame(type) = flatcc_builder_offset_vector;
   1053     refresh_ds(B, data_limit);
   1054     return 0;
   1055 }
   1056 
   1057 flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
   1058         const flatcc_builder_ref_t *vec, size_t count)
   1059 {
   1060     flatcc_builder_ref_t *_vec;
   1061 
   1062     if (flatcc_builder_start_offset_vector(B)) {
   1063         return 0;
   1064     }
   1065     if (!(_vec = flatcc_builder_extend_offset_vector(B, count))) {
   1066         return 0;
   1067     }
   1068     memcpy(_vec, vec, count * field_size);
   1069     return flatcc_builder_end_offset_vector(B);
   1070 }
   1071 
   1072 int flatcc_builder_start_string(flatcc_builder_t *B)
   1073 {
   1074     if (enter_frame(B, 1)) {
   1075         return -1;
   1076     }
   1077     frame(container.vector.elem_size) = 1;
   1078     frame(container.vector.count) = 0;
   1079     frame(type) = flatcc_builder_string;
   1080     refresh_ds(B, data_limit);
   1081     return 0;
   1082 }
   1083 
   1084 int flatcc_builder_reserve_table(flatcc_builder_t *B, int count)
   1085 {
   1086     check(count >= 0, "cannot reserve negative count");
   1087     return reserve_fields(B, count);
   1088 }
   1089 
   1090 int flatcc_builder_start_table(flatcc_builder_t *B, int count)
   1091 {
   1092     if (enter_frame(B, field_size)) {
   1093         return -1;
   1094     }
   1095     frame(container.table.vs_end) = vs_offset(B->vs);
   1096     frame(container.table.pl_end) = pl_offset(B->pl);
   1097     frame(container.table.vt_hash) = B->vt_hash;
   1098     frame(container.table.id_end) = B->id_end;
   1099     B->vt_hash = 0;
   1100     FLATCC_BUILDER_INIT_VT_HASH(B->vt_hash);
   1101     B->id_end = 0;
   1102     frame(type) = flatcc_builder_table;
   1103     if (reserve_fields(B, count)) {
   1104         return -1;
   1105     }
   1106     refresh_ds(B, table_limit);
   1107     return 0;
   1108 }
   1109 
   1110 flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
   1111         const voffset_t *vt, voffset_t vt_size)
   1112 {
   1113     flatcc_builder_vt_ref_t vt_ref;
   1114     iov_state_t iov;
   1115     voffset_t *vt_;
   1116     size_t i;
   1117 
   1118     /*
   1119      * Only top-level buffer can cluster vtables because only it can
   1120      * extend beyond the end.
   1121      *
   1122      * We write the vtable after the referencing table to maintain
   1123      * the construction invariant that any offset reference has
   1124      * valid emitted data at a higher address, and also that any
   1125      * issued negative emit address represents an offset reference
   1126      * to some flatbuffer object or vector (or possibly a root
   1127      * struct).
   1128      *
   1129      * The vt_ref is stored as the reference + 1 to avoid having 0 as a
   1130      * valid reference (which usally means error). It also idententifies
   1131      * vtable references as the only uneven references, and the only
   1132      * references that can be used multiple times in the same buffer.
   1133      *
   1134      * We do the vtable conversion here so cached vtables can be built
   1135      * hashed and compared more efficiently, and so end users with
   1136      * direct vtable construction don't have to worry about endianness.
   1137      * This also ensures the hash function works the same wrt.
   1138      * collision frequency.
   1139      */
   1140 
   1141     if (!flatbuffers_is_native_pe()) {
   1142         /* Make space in vtable cache for temporary endian conversion. */
   1143         if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
   1144             return 0;
   1145         }
   1146         for (i = 0; i < vt_size / sizeof(voffset_t); ++i) {
   1147             write_voffset(&vt_[i], vt[i]);
   1148         }
   1149         vt = vt_;
   1150         /* We don't need to free the reservation since we don't advance any base pointer. */
   1151     }
   1152 
   1153     init_iov();
   1154     push_iov(vt, vt_size);
   1155     if (is_top_buffer(B) && !B->disable_vt_clustering) {
   1156         /* Note that `emit_back` already returns ref + 1 as we require for vtables. */
   1157         if (0 == (vt_ref = emit_back(B, &iov))) {
   1158             return 0;
   1159         }
   1160     } else {
   1161         if (0 == (vt_ref = emit_front(B, &iov))) {
   1162             return 0;
   1163         }
   1164         /*
   1165          * We don't have a valid 0 ref here, but to be consistent with
   1166          * clustered vtables we offset by one. This cannot be zero
   1167          * either.
   1168          */
   1169         vt_ref += 1;
   1170     }
   1171     return vt_ref;
   1172 }
   1173 
   1174 flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
   1175         const voffset_t *vt, voffset_t vt_size, uint32_t vt_hash)
   1176 {
   1177     vtable_descriptor_t *vd, *vd2;
   1178     uoffset_t *pvd, *pvd_head;
   1179     uoffset_t next;
   1180     voffset_t *vt_;
   1181 
   1182     /* This just gets the hash table slot, we still have to inspect it. */
   1183     if (!(pvd_head = lookup_ht(B, vt_hash))) {
   1184         return 0;
   1185     }
   1186     pvd = pvd_head;
   1187     next = *pvd;
   1188     /* Tracks if there already is a cached copy. */
   1189     vd2 = 0;
   1190     while (next) {
   1191         vd = vd_ptr(next);
   1192         vt_ = vb_ptr(vd->vb_start);
   1193         if (vt_[0] != vt_size || 0 != memcmp(vt, vt_, vt_size)) {
   1194             pvd = &vd->next;
   1195             next = vd->next;
   1196             continue;
   1197         }
   1198         /* Can't share emitted vtables between buffers, */
   1199         if (vd->nest_id != B->nest_id) {
   1200             /* but we don't have to resubmit to cache. */
   1201             vd2 = vd;
   1202             /* See if there is a better match. */
   1203             pvd = &vd->next;
   1204             next = vd->next;
   1205             continue;
   1206         }
   1207         /* Move to front hash strategy. */
   1208         if (pvd != pvd_head) {
   1209             *pvd = vd->next;
   1210             vd->next = *pvd_head;
   1211             *pvd_head = next;
   1212         }
   1213         /* vtable exists and has been emitted within current buffer. */
   1214         return vd->vt_ref;
   1215     }
   1216     /* Allocate new descriptor. */
   1217     if (!(vd = reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0))) {
   1218         return 0;
   1219     }
   1220     next = B->vd_end;
   1221     B->vd_end += (uoffset_t)sizeof(vtable_descriptor_t);
   1222 
   1223     /* Identify the buffer this vtable descriptor belongs to. */
   1224     vd->nest_id = B->nest_id;
   1225 
   1226     /* Move to front hash strategy. */
   1227     vd->next = *pvd_head;
   1228     *pvd_head = next;
   1229     if (0 == (vd->vt_ref = flatcc_builder_create_vtable(B, vt, vt_size))) {
   1230         return 0;
   1231     }
   1232     if (vd2) {
   1233         /* Reuse cached copy. */
   1234         vd->vb_start = vd2->vb_start;
   1235     } else {
   1236         if (B->vb_flush_limit && B->vb_flush_limit < B->vb_end + vt_size) {
   1237             flatcc_builder_flush_vtable_cache(B);
   1238         } else {
   1239             /* Make space in vtable cache. */
   1240             if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
   1241                 return -1;
   1242             }
   1243             vd->vb_start = B->vb_end;
   1244             B->vb_end += vt_size;
   1245             memcpy(vt_, vt, vt_size);
   1246         }
   1247     }
   1248     return vd->vt_ref;
   1249 }
   1250 
   1251 flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B, const void *data, size_t size, uint16_t align,
   1252         flatbuffers_voffset_t *offsets, int offset_count, flatcc_builder_vt_ref_t vt_ref)
   1253 {
   1254     int i;
   1255     uoffset_t pad, vt_offset, vt_offset_field, vt_base, base, offset, *offset_field;
   1256     iov_state_t iov;
   1257 
   1258     check(offset_count >= 0, "expected non-negative offset_count");
   1259     /*
   1260      * vtable references are offset by 1 to avoid confusion with
   1261      * 0 as an error reference. It also uniquely identifies them
   1262      * as vtables being the only uneven reference type.
   1263      */
   1264     check(vt_ref & 1, "invalid vtable referenc");
   1265     get_min_align(&align, field_size);
   1266     set_min_align(B, align);
   1267     /* Alignment is calculated for the first element, not the header. */
   1268     pad = front_pad(B, (uoffset_t)size, align);
   1269     base = (uoffset_t)B->emit_start - (uoffset_t)(pad + size + field_size);
   1270     /* Adjust by 1 to get unencoded vtable reference. */
   1271     vt_base = (uoffset_t)(vt_ref - 1);
   1272     vt_offset = base - vt_base;
   1273     /* Avoid overflow. */
   1274     if (base - vt_offset != vt_base) {
   1275         return -1;
   1276     }
   1277     /* Protocol endian encoding. */
   1278     write_uoffset(&vt_offset_field, vt_offset);
   1279     for (i = 0; i < offset_count; ++i) {
   1280         offset_field = (uoffset_t *)((size_t)data + offsets[i]);
   1281         offset = *offset_field - base - offsets[i] - (uoffset_t)field_size;
   1282         write_uoffset(offset_field, offset);
   1283     }
   1284     init_iov();
   1285     push_iov(&vt_offset_field, field_size);
   1286     push_iov(data, size);
   1287     push_iov(_pad, pad);
   1288     return emit_front(B, &iov);
   1289 }
   1290 
   1291 int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
   1292 {
   1293     check(frame(type) == flatcc_builder_table, "expected table frame");
   1294 
   1295     return id < B->id_end && B->vs[id] != 0;
   1296 }
   1297 
   1298 int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
   1299 {
   1300     check(frame(type) == flatcc_builder_table, "expected table frame");
   1301 
   1302     if (id == 0 || id >= B->id_end) {
   1303         return 0;
   1304     }
   1305     if (B->vs[id - 1] == 0) {
   1306         return B->vs[id] == 0;
   1307     }
   1308     if (*(uint8_t *)(B->ds + B->vs[id - 1])) {
   1309         return B->vs[id] != 0;
   1310     }
   1311     return B->vs[id] == 0;
   1312 }
   1313 
   1314 int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count)
   1315 {
   1316     int i;
   1317 
   1318     check(frame(type) == flatcc_builder_table, "expected table frame");
   1319 
   1320     if (B->id_end < count) {
   1321         return 0;
   1322     }
   1323     for (i = 0; i < count; ++i) {
   1324         if (B->vs[required[i]] == 0) {
   1325             return 0;
   1326         }
   1327     }
   1328     return 1;
   1329 }
   1330 
   1331 flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B)
   1332 {
   1333     voffset_t *vt, vt_size;
   1334     flatcc_builder_ref_t table_ref, vt_ref;
   1335     int pl_count;
   1336     voffset_t *pl;
   1337     size_t tsize;
   1338 
   1339     check(frame(type) == flatcc_builder_table, "expected table frame");
   1340 
   1341     /* We have `ds_limit`, so we should not have to check for overflow here. */
   1342 
   1343     vt = B->vs - 2;
   1344     vt_size = (voffset_t)(sizeof(voffset_t) * (B->id_end + 2u));
   1345     /* Update vtable header fields, first vtable size, then object table size. */
   1346     vt[0] = vt_size;
   1347     /*
   1348      * The `ds` buffer is always at least `field_size` aligned but excludes the
   1349      * initial vtable offset field. Therefore `field_size` is added here
   1350      * to the total table size in the vtable.
   1351      */
   1352     tsize = (size_t)(B->ds_offset + field_size);
   1353     /*
   1354      * Tables are limited to 64K in standard FlatBuffers format due to the voffset
   1355      * 16 bit size, but we must also be able to store the table size, so the
   1356      * table payload has to be slightly less than that.
   1357      */
   1358     check(tsize <= FLATBUFFERS_VOFFSET_MAX, "table too large"); 
   1359     vt[1] = (voffset_t)tsize;
   1360     FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)vt[0], (uint32_t)vt[1]);
   1361     /* Find already emitted vtable, or emit a new one. */
   1362     if (!(vt_ref = flatcc_builder_create_cached_vtable(B, vt, vt_size, B->vt_hash))) {
   1363         return 0;
   1364     }
   1365     /* Clear vs stack so it is ready for the next vtable (ds stack is cleared by exit frame). */
   1366     memset(vt, 0, vt_size);
   1367 
   1368     pl = pl_ptr(frame(container.table.pl_end));
   1369     pl_count = (int)(B->pl - pl);
   1370     if (0 == (table_ref = flatcc_builder_create_table(B, B->ds, B->ds_offset, B->align, pl, pl_count, vt_ref))) {
   1371         return 0;
   1372     }
   1373     B->vt_hash = frame(container.table.vt_hash);
   1374     B->id_end = frame(container.table.id_end);
   1375     B->vs = vs_ptr(frame(container.table.vs_end));
   1376     B->pl = pl_ptr(frame(container.table.pl_end));
   1377     exit_frame(B);
   1378     return table_ref;
   1379 }
   1380 
   1381 flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
   1382         const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count)
   1383 {
   1384     /*
   1385      * Note: it is important that vec_size is uoffset not size_t
   1386      * in case sizeof(uoffset_t) > sizeof(size_t) because max_count is
   1387      * defined in terms of uoffset_t representation size, and also
   1388      * because we risk accepting too large a vector even if max_count is
   1389      * not violated.
   1390      */
   1391     uoffset_t vec_size, vec_pad, length_prefix;
   1392     iov_state_t iov;
   1393 
   1394     check_error(count <= max_count, 0, "vector max_count violated");
   1395     get_min_align(&align, field_size);
   1396     set_min_align(B, align);
   1397     vec_size = (uoffset_t)count * (uoffset_t)elem_size;
   1398     /*
   1399      * That can happen on 32 bit systems when uoffset_t is defined as 64-bit.
   1400      * `emit_front/back` captures overflow, but not if our size type wraps first.
   1401      */
   1402 #if FLATBUFFERS_UOFFSET_MAX > SIZE_MAX
   1403     check_error(vec_size < SIZE_MAX, 0, "vector larger than address space");
   1404 #endif
   1405     write_uoffset(&length_prefix, (uoffset_t)count);
   1406     /* Alignment is calculated for the first element, not the header. */
   1407     vec_pad = front_pad(B, vec_size, align);
   1408     init_iov();
   1409     push_iov(&length_prefix, field_size);
   1410     push_iov(data, vec_size);
   1411     push_iov(_pad, vec_pad);
   1412     return emit_front(B, &iov);
   1413 }
   1414 
   1415 /*
   1416  * Note: FlatBuffers official documentation states that the size field of a
   1417  * vector is a 32-bit element count. It is not quite clear if the
   1418  * intention is to have the size field be of type uoffset_t since tables
   1419  * also have a uoffset_t sized header, or if the vector size should
   1420  * remain unchanged if uoffset is changed to 16- or 64-bits
   1421  * respectively. Since it makes most sense to have a vector compatible
   1422  * with the addressable space, we choose to use uoffset_t as size field,
   1423  * which remains compatible with the default 32-bit version of uoffset_t.
   1424  */
   1425 flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B)
   1426 {
   1427     flatcc_builder_ref_t vector_ref;
   1428 
   1429     check(frame(type) == flatcc_builder_vector, "expected vector frame");
   1430 
   1431     if (0 == (vector_ref = flatcc_builder_create_vector(B, B->ds,
   1432             frame(container.vector.count), frame(container.vector.elem_size),
   1433             B->align, frame(container.vector.max_count)))) {
   1434         return 0;
   1435     }
   1436     exit_frame(B);
   1437     return vector_ref;
   1438 }
   1439 
   1440 size_t flatcc_builder_vector_count(flatcc_builder_t *B)
   1441 {
   1442     return frame(container.vector.count);
   1443 }
   1444 
   1445 void *flatcc_builder_vector_edit(flatcc_builder_t *B)
   1446 {
   1447     return B->ds;
   1448 }
   1449 
   1450 /* This function destroys the source content but avoids stack allocation. */
   1451 static flatcc_builder_ref_t _create_offset_vector_direct(flatcc_builder_t *B,
   1452         flatcc_builder_ref_t *vec, size_t count, const utype_t *types)
   1453 {
   1454     uoffset_t vec_size, vec_pad;
   1455     uoffset_t length_prefix, offset;
   1456     uoffset_t i;
   1457     soffset_t base;
   1458     iov_state_t iov;
   1459 
   1460     if ((uoffset_t)count > max_offset_count) {
   1461         return 0;
   1462     }
   1463     set_min_align(B, field_size);
   1464     vec_size = (uoffset_t)(count * field_size);
   1465     write_uoffset(&length_prefix, (uoffset_t)count);
   1466     /* Alignment is calculated for the first element, not the header. */
   1467     vec_pad = front_pad(B, vec_size, field_size);
   1468     init_iov();
   1469     push_iov(&length_prefix, field_size);
   1470     push_iov(vec, vec_size);
   1471     push_iov(_pad, vec_pad);
   1472     base = B->emit_start - (soffset_t)iov.len;
   1473     for (i = 0; i < (uoffset_t)count; ++i) {
   1474         /*
   1475          * 0 is either end of buffer, start of vtables, or start of
   1476          * buffer depending on the direction in which the buffer is
   1477          * built. None of these can create a valid 0 reference but it
   1478          * is easy to create by mistake when manually building offset
   1479          * vectors.
   1480          *
   1481          * Unions do permit nulls, but only when the type is NONE.
   1482          */
   1483         if (vec[i] != 0) {
   1484             offset = (uoffset_t)
   1485                 (vec[i] - base - (soffset_t)(i * field_size) - (soffset_t)field_size);
   1486             write_uoffset(&vec[i], offset);
   1487             if (types) {
   1488                 check(types[i] != 0, "union vector cannot have non-null element with type NONE");
   1489             }
   1490         } else {
   1491             if (types) {
   1492                 check(types[i] == 0, "union vector cannot have null element without type NONE");
   1493             } else {
   1494                 check(0, "offset vector cannot have null element");
   1495             }
   1496         }
   1497     }
   1498     return emit_front(B, &iov);
   1499 }
   1500 
   1501 flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
   1502         flatcc_builder_ref_t *vec, size_t count)
   1503 {
   1504     return _create_offset_vector_direct(B, vec, count, 0);
   1505 }
   1506 
   1507 flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B)
   1508 {
   1509     flatcc_builder_ref_t vector_ref;
   1510 
   1511     check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
   1512     if (0 == (vector_ref = flatcc_builder_create_offset_vector_direct(B,
   1513             (flatcc_builder_ref_t *)B->ds, frame(container.vector.count)))) {
   1514         return 0;
   1515     }
   1516     exit_frame(B);
   1517     return vector_ref;
   1518 }
   1519 
   1520 flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B, const utype_t *types)
   1521 {
   1522     flatcc_builder_ref_t vector_ref;
   1523 
   1524     check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
   1525     if (0 == (vector_ref = _create_offset_vector_direct(B,
   1526             (flatcc_builder_ref_t *)B->ds, frame(container.vector.count), types))) {
   1527         return 0;
   1528     }
   1529     exit_frame(B);
   1530     return vector_ref;
   1531 }
   1532 
   1533 void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B)
   1534 {
   1535     return B->ds;
   1536 }
   1537 
   1538 size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B)
   1539 {
   1540     return frame(container.vector.count);
   1541 }
   1542 
   1543 int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
   1544     flatcc_builder_union_ref_t uref)
   1545 {
   1546     flatcc_builder_ref_t *pref;
   1547     flatcc_builder_utype_t *putype;
   1548 
   1549     check(frame(type) == flatcc_builder_table, "expected table frame");
   1550     check_error(uref.type != 0 || uref.value == 0, -1, "expected null value for type NONE");
   1551     if (uref.value != 0) {
   1552         pref = flatcc_builder_table_add_offset(B, id);
   1553         check_error(pref != 0, -1, "unable to add union value");
   1554         *pref = uref.value;
   1555     }
   1556     putype = flatcc_builder_table_add(B, id - 1, utype_size, utype_size);
   1557     check_error(putype != 0, -1, "unable to add union type");
   1558     write_utype(putype, uref.type);
   1559     return 0;
   1560 }
   1561 
   1562 int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
   1563         flatcc_builder_union_vec_ref_t uvref)
   1564 {
   1565     flatcc_builder_ref_t *pref;
   1566 
   1567     check(frame(type) == flatcc_builder_table, "expected table frame");
   1568     check_error((uvref.type == 0) == (uvref.value == 0), -1, "expected both type and value vector, or neither");
   1569     if (uvref.type != 0) {
   1570         pref = flatcc_builder_table_add_offset(B, id - 1);
   1571         check_error(pref != 0, -1, "unable to add union member");
   1572         *pref = uvref.type;
   1573 
   1574         pref = flatcc_builder_table_add_offset(B, id);
   1575         check_error(pref != 0, -1, "unable to add union member");
   1576         *pref = uvref.value;
   1577     }
   1578     return 0;
   1579 }
   1580 
   1581 flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
   1582         const flatcc_builder_union_ref_t *urefs, size_t count)
   1583 {
   1584     flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
   1585     flatcc_builder_utype_t *types;
   1586     flatcc_builder_ref_t *refs;
   1587     size_t i;
   1588 
   1589     if (flatcc_builder_start_offset_vector(B)) {
   1590         return uvref;
   1591     }
   1592     if (0 == flatcc_builder_extend_offset_vector(B, count)) {
   1593         return uvref;
   1594     }
   1595     if (0 == (types = push_ds(B, (uoffset_t)(utype_size * count)))) {
   1596         return uvref;
   1597     }
   1598 
   1599     /* Safe even if push_ds caused stack reallocation. */
   1600     refs = flatcc_builder_offset_vector_edit(B);
   1601 
   1602     for (i = 0; i < count; ++i) {
   1603         types[i] = urefs[i].type;
   1604         refs[i] = urefs[i].value;
   1605     }
   1606     uvref = flatcc_builder_create_union_vector_direct(B,
   1607             types, refs, count);
   1608     /* No need to clean up after out temporary types vector. */
   1609     exit_frame(B);
   1610     return uvref;
   1611 }
   1612 
   1613 flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
   1614         const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count)
   1615 {
   1616     flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
   1617 
   1618     if (0 == (uvref.value = _create_offset_vector_direct(B, data, count, types))) {
   1619         return uvref;
   1620     }
   1621     if (0 == (uvref.type = flatcc_builder_create_type_vector(B, types, count))) {
   1622         return uvref;
   1623     }
   1624     return uvref;
   1625 }
   1626 
   1627 flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
   1628         const flatcc_builder_utype_t *types, size_t count)
   1629 {
   1630     return flatcc_builder_create_vector(B, types, count,
   1631                     utype_size, utype_size, max_utype_count);
   1632 }
   1633 
   1634 int flatcc_builder_start_union_vector(flatcc_builder_t *B)
   1635 {
   1636     if (enter_frame(B, field_size)) {
   1637         return -1;
   1638     }
   1639     frame(container.vector.elem_size) = union_size;
   1640     frame(container.vector.count) = 0;
   1641     frame(type) = flatcc_builder_union_vector;
   1642     refresh_ds(B, data_limit);
   1643     return 0;
   1644 }
   1645 
   1646 flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B)
   1647 {
   1648     flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
   1649     flatcc_builder_utype_t *types;
   1650     flatcc_builder_union_ref_t *urefs;
   1651     flatcc_builder_ref_t *refs;
   1652     size_t i, count;
   1653 
   1654     check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
   1655 
   1656     /*
   1657      * We could split the union vector in-place, but then we would have
   1658      * to deal with strict pointer aliasing rules which is not worthwhile
   1659      * so we create a new offset and type vector on the stack.
   1660      *
   1661      * We assume the stack is sufficiently aligned as is.
   1662      */
   1663     count = flatcc_builder_union_vector_count(B);
   1664     if (0 == (refs = push_ds(B, (uoffset_t)(count * (utype_size + field_size))))) {
   1665         return uvref;
   1666     }
   1667     types = (flatcc_builder_utype_t *)(refs + count);
   1668 
   1669     /* Safe even if push_ds caused stack reallocation. */
   1670     urefs = flatcc_builder_union_vector_edit(B);
   1671 
   1672     for (i = 0; i < count; ++i) {
   1673         types[i] = urefs[i].type;
   1674         refs[i] = urefs[i].value;
   1675     }
   1676     uvref = flatcc_builder_create_union_vector_direct(B, types, refs, count);
   1677     /* No need to clean up after out temporary types vector. */
   1678     exit_frame(B);
   1679     return uvref;
   1680 }
   1681 
   1682 void *flatcc_builder_union_vector_edit(flatcc_builder_t *B)
   1683 {
   1684     return B->ds;
   1685 }
   1686 
   1687 size_t flatcc_builder_union_vector_count(flatcc_builder_t *B)
   1688 {
   1689     return frame(container.vector.count);
   1690 }
   1691 
   1692 flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count)
   1693 {
   1694     if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
   1695         return 0;
   1696     }
   1697     return push_ds(B, (uoffset_t)(union_size * count));
   1698 }
   1699 
   1700 int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count)
   1701 {
   1702     check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
   1703     check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
   1704     frame(container.vector.count) -= (uoffset_t)count;
   1705     unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
   1706     return 0;
   1707 }
   1708 
   1709 flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
   1710         flatcc_builder_union_ref_t uref)
   1711 {
   1712     flatcc_builder_union_ref_t *p;
   1713 
   1714     check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
   1715     if (frame(container.vector.count) == max_union_count) {
   1716         return 0;
   1717     }
   1718     frame(container.vector.count) += 1;
   1719     if (0 == (p = push_ds(B, union_size))) {
   1720         return 0;
   1721     }
   1722     *p = uref;
   1723     return p;
   1724 }
   1725 
   1726 flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
   1727         const flatcc_builder_union_ref_t *urefs, size_t count)
   1728 {
   1729     check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
   1730     if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
   1731         return 0;
   1732     }
   1733     return push_ds_copy(B, urefs, (uoffset_t)(union_size * count));
   1734 }
   1735 
   1736 flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B, const char *s, size_t len)
   1737 {
   1738     uoffset_t s_pad;
   1739     uoffset_t length_prefix;
   1740     iov_state_t iov;
   1741 
   1742     if (len > max_string_len) {
   1743         return 0;
   1744     }
   1745     write_uoffset(&length_prefix, (uoffset_t)len);
   1746     /* Add 1 for zero termination. */
   1747     s_pad = front_pad(B, (uoffset_t)len + 1, field_size) + 1;
   1748     init_iov();
   1749     push_iov(&length_prefix, field_size);
   1750     push_iov(s, len);
   1751     push_iov(_pad, s_pad);
   1752     return emit_front(B, &iov);
   1753 }
   1754 
   1755 flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B, const char *s)
   1756 {
   1757     return flatcc_builder_create_string(B, s, strlen(s));
   1758 }
   1759 
   1760 flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
   1761 {
   1762     return flatcc_builder_create_string(B, s, strnlen(s, max_len));
   1763 }
   1764 
   1765 flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B)
   1766 {
   1767     flatcc_builder_ref_t string_ref;
   1768 
   1769     check(frame(type) == flatcc_builder_string, "expected string frame");
   1770     FLATCC_ASSERT(frame(container.vector.count) == B->ds_offset);
   1771     if (0 == (string_ref = flatcc_builder_create_string(B,
   1772             (const char *)B->ds, B->ds_offset))) {
   1773         return 0;
   1774     }
   1775     exit_frame(B);
   1776     return string_ref;
   1777 }
   1778 
   1779 char *flatcc_builder_string_edit(flatcc_builder_t *B)
   1780 {
   1781     return (char *)B->ds;
   1782 }
   1783 
   1784 size_t flatcc_builder_string_len(flatcc_builder_t *B)
   1785 {
   1786     return frame(container.vector.count);
   1787 }
   1788 
   1789 void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align)
   1790 {
   1791     /*
   1792      * We align the offset relative to the first table field, excluding
   1793      * the header holding the vtable reference. On the stack, `ds_first`
   1794      * is aligned to 8 bytes thanks to the `enter_frame` logic, and this
   1795      * provides a safe way to update the fields on the stack, but here
   1796      * we are concerned with the target buffer alignment.
   1797      *
   1798      * We could also have aligned relative to the end of the table which
   1799      * would allow us to emit each field immediately, but it would be a
   1800      * confusing user experience wrt. field ordering, and it would add
   1801      * more variability to vtable layouts, thus reducing reuse, and
   1802      * frequent emissions to external emitter interface would be
   1803      * sub-optimal. Also, with that appoach, the vtable offsets would
   1804      * have to be adjusted at table end.
   1805      *
   1806      * As we have it, each emit occur at table end, vector end, string
   1807      * end, or buffer end, which might be helpful to various backend
   1808      * processors.
   1809      */
   1810     check(frame(type) == flatcc_builder_table, "expected table frame");
   1811     check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
   1812     if (align > B->align) {
   1813         B->align = align;
   1814     }
   1815 #if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
   1816     if (B->vs[id] != 0) {
   1817         return B->ds + B->vs[id] - field_size;
   1818     }
   1819 #else
   1820     if (B->vs[id] != 0) {
   1821         check(0, "table field already set");
   1822         return 0;
   1823     }
   1824 #endif
   1825     FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)size);
   1826     return push_ds_field(B, (uoffset_t)size, align, (voffset_t)id);
   1827 }
   1828 
   1829 void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size)
   1830 {
   1831     check(frame(type) == flatcc_builder_table, "expected table frame");
   1832 
   1833     return B->ds + B->ds_offset - size;
   1834 }
   1835 
   1836 void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align)
   1837 {
   1838     void *p;
   1839 
   1840     if ((p = flatcc_builder_table_add(B, id, size, align))) {
   1841         memcpy(p, data, size);
   1842     }
   1843     return p;
   1844 }
   1845 
   1846 flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id)
   1847 {
   1848     check(frame(type) == flatcc_builder_table, "expected table frame");
   1849     check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
   1850 #if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
   1851     if (B->vs[id] != 0) {
   1852         return B->ds + B->vs[id] - field_size;
   1853     }
   1854 #else
   1855     if (B->vs[id] != 0) {
   1856         check(0, "table field already set");
   1857         return 0;
   1858     }
   1859 #endif
   1860     FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)field_size);
   1861     return push_ds_offset_field(B, (voffset_t)id);
   1862 }
   1863 
   1864 uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B)
   1865 {
   1866     uint16_t old_min_align = B->min_align;
   1867 
   1868     B->min_align = field_size;
   1869     return old_min_align;
   1870 }
   1871 
   1872 void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t pushed_align)
   1873 {
   1874     set_min_align(B, pushed_align);
   1875 }
   1876 
   1877 uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B)
   1878 {
   1879     return B->min_align;
   1880 }
   1881 
   1882 void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable)
   1883 {
   1884     /* Inverted because we zero all memory in B on init. */
   1885     B->disable_vt_clustering = !enable;
   1886 }
   1887 
   1888 void flatcc_builder_set_block_align(flatcc_builder_t *B, uint16_t align)
   1889 {
   1890     B->block_align = align;
   1891 }
   1892 
   1893 int flatcc_builder_get_level(flatcc_builder_t *B)
   1894 {
   1895     return B->level;
   1896 }
   1897 
   1898 void flatcc_builder_set_max_level(flatcc_builder_t *B, int max_level)
   1899 {
   1900     B->max_level = max_level;
   1901     if (B->limit_level < B->max_level) {
   1902         B->limit_level = B->max_level;
   1903     }
   1904 }
   1905 
   1906 size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B)
   1907 {
   1908     return (size_t)(B->emit_end - B->emit_start);
   1909 }
   1910 
   1911 flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B)
   1912 {
   1913     return B->emit_start;
   1914 }
   1915 
   1916 flatcc_builder_ref_t flatcc_builder_get_buffer_end(flatcc_builder_t *B)
   1917 {
   1918     return B->emit_end;
   1919 }
   1920 
   1921 void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size)
   1922 {
   1923     B->vb_flush_limit = size;
   1924 }
   1925 
   1926 void flatcc_builder_set_identifier(flatcc_builder_t *B, const char identifier[identifier_size])
   1927 {
   1928     set_identifier(identifier);
   1929 }
   1930 
   1931 enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B)
   1932 {
   1933     return B->frame ? frame(type) : flatcc_builder_empty;
   1934 }
   1935 
   1936 enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level)
   1937 {
   1938     if (level < 1 || level > B->level) {
   1939         return flatcc_builder_empty;
   1940     }
   1941     return B->frame[level - B->level].type;
   1942 }
   1943 
   1944 void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out)
   1945 {
   1946     if (B->is_default_emitter) {
   1947         return flatcc_emitter_get_direct_buffer(&B->default_emit_context, size_out);
   1948     } else {
   1949         if (size_out) {
   1950             *size_out = 0;
   1951         }
   1952     }
   1953     return 0;
   1954 }
   1955 
   1956 void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size)
   1957 {
   1958     /* User is allowed to call tentatively to see if there is support. */
   1959     if (!B->is_default_emitter) {
   1960         return 0;
   1961     }
   1962     buffer = flatcc_emitter_copy_buffer(&B->default_emit_context, buffer, size);
   1963     check(buffer, "default emitter declined to copy buffer");
   1964     return buffer;
   1965 }
   1966 
   1967 void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out)
   1968 {
   1969     void * buffer;
   1970     size_t size;
   1971 
   1972     size = flatcc_builder_get_buffer_size(B);
   1973 
   1974     if (size_out) {
   1975         *size_out = size;
   1976     }
   1977 
   1978     buffer = FLATCC_BUILDER_ALLOC(size);
   1979 
   1980     if (!buffer) {
   1981         check(0, "failed to allocated memory for finalized buffer");
   1982         goto done;
   1983     }
   1984     if (!flatcc_builder_copy_buffer(B, buffer, size)) {
   1985         check(0, "default emitter declined to copy buffer");
   1986         FLATCC_BUILDER_FREE(buffer);
   1987         buffer = 0;
   1988     }
   1989 done:
   1990     if (!buffer && size_out) {
   1991         *size_out = 0;
   1992     }
   1993     return buffer;
   1994 }
   1995 
   1996 void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out)
   1997 {
   1998     void * buffer;
   1999     size_t align;
   2000     size_t size;
   2001 
   2002     size = flatcc_builder_get_buffer_size(B);
   2003 
   2004     if (size_out) {
   2005         *size_out = size;
   2006     }
   2007     align = flatcc_builder_get_buffer_alignment(B);
   2008 
   2009     size = (size + align - 1) & ~(align - 1);
   2010     buffer = FLATCC_BUILDER_ALIGNED_ALLOC(align, size);
   2011 
   2012     if (!buffer) {
   2013         goto done;
   2014     }
   2015     if (!flatcc_builder_copy_buffer(B, buffer, size)) {
   2016         FLATCC_BUILDER_ALIGNED_FREE(buffer);
   2017         buffer = 0;
   2018         goto done;
   2019     }
   2020 done:
   2021     if (!buffer && size_out) {
   2022         *size_out = 0;
   2023     }
   2024     return buffer;
   2025 }
   2026 
   2027 void *flatcc_builder_aligned_alloc(size_t alignment, size_t size)
   2028 {
   2029     return FLATCC_BUILDER_ALIGNED_ALLOC(alignment, size);
   2030 }
   2031 
   2032 void flatcc_builder_aligned_free(void *p)
   2033 {
   2034     FLATCC_BUILDER_ALIGNED_FREE(p);
   2035 }
   2036 
   2037 void *flatcc_builder_alloc(size_t size)
   2038 {
   2039     return FLATCC_BUILDER_ALLOC(size);
   2040 }
   2041 
   2042 void flatcc_builder_free(void *p)
   2043 {
   2044     FLATCC_BUILDER_FREE(p);
   2045 }
   2046 
   2047 void *flatcc_builder_get_emit_context(flatcc_builder_t *B)
   2048 {
   2049     return B->emit_context;
   2050 }