commit 1f5f1e28a4fff3fa8dd69593b17149b5ef4c7911
parent f30f93f65cb78a508731a682100485dc62908f97
Author: William Casarin <jb55@jb55.com>
Date: Fri, 25 Aug 2023 12:32:30 -0700
nostrdb: pull latest, adding flatcc and lmdb
Diffstat:
104 files changed, 36269 insertions(+), 28 deletions(-)
diff --git a/damus-c/damus-Bridging-Header.h b/damus-c/damus-Bridging-Header.h
@@ -9,4 +9,5 @@
#include "wasm.h"
#include "nostrscript.h"
#include "nostrdb.h"
+#include "lmdb.h"
diff --git a/damus.xcodeproj/project.pbxproj b/damus.xcodeproj/project.pbxproj
@@ -150,6 +150,12 @@
4C3EA67D28FFBBA300C48A62 /* InvoicesView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C3EA67C28FFBBA200C48A62 /* InvoicesView.swift */; };
4C3EA67F28FFC01D00C48A62 /* InvoiceView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C3EA67E28FFC01D00C48A62 /* InvoiceView.swift */; };
4C42812C298C848200DBF26F /* TranslateView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C42812B298C848200DBF26F /* TranslateView.swift */; };
+ 4C4793012A993CDA00489948 /* mdb.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4793002A993B9A00489948 /* mdb.c */; };
+ 4C4793042A993DC000489948 /* midl.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4793032A993DB900489948 /* midl.c */; };
+ 4C4793052A993E3200489948 /* builder.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4792942A9939BD00489948 /* builder.c */; };
+ 4C4793062A993E5300489948 /* json_parser.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4792C82A9939BD00489948 /* json_parser.c */; };
+ 4C4793072A993E6200489948 /* emitter.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4792CF2A9939BD00489948 /* emitter.c */; };
+ 4C4793082A993E8900489948 /* refmap.c in Sources */ = {isa = PBXBuildFile; fileRef = 4C4792D12A9939BD00489948 /* refmap.c */; };
4C4DD3DB2A6CA7E8005B4E85 /* ContentParsing.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C4DD3DA2A6CA7E8005B4E85 /* ContentParsing.swift */; };
4C4E137B2A76D5FB00BDD832 /* MuteThreadNotify.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C4E137A2A76D5FB00BDD832 /* MuteThreadNotify.swift */; };
4C4E137D2A76D63600BDD832 /* UnmuteThreadNotify.swift in Sources */ = {isa = PBXBuildFile; fileRef = 4C4E137C2A76D63600BDD832 /* UnmuteThreadNotify.swift */; };
@@ -686,6 +692,101 @@
4C3EA67C28FFBBA200C48A62 /* InvoicesView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = InvoicesView.swift; sourceTree = "<group>"; };
4C3EA67E28FFC01D00C48A62 /* InvoiceView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = InvoiceView.swift; sourceTree = "<group>"; };
4C42812B298C848200DBF26F /* TranslateView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TranslateView.swift; sourceTree = "<group>"; };
+ 4C478E242A9932C100489948 /* Ndb.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Ndb.swift; sourceTree = "<group>"; };
+ 4C478E262A99353500489948 /* threadpool.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = threadpool.h; sourceTree = "<group>"; };
+ 4C478E272A99354E00489948 /* protected_queue.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = protected_queue.h; sourceTree = "<group>"; };
+ 4C478E282A99357400489948 /* memchr.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = memchr.h; sourceTree = "<group>"; };
+ 4C478E292A99359900489948 /* util.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = util.h; sourceTree = "<group>"; };
+ 4C478E2C2A9935D300489948 /* NdbProfile.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NdbProfile.swift; sourceTree = "<group>"; };
+ 4C478E2E2A9935D300489948 /* profile_json_parser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = profile_json_parser.h; sourceTree = "<group>"; };
+ 4C478E2F2A9935D300489948 /* profile_reader.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = profile_reader.h; sourceTree = "<group>"; };
+ 4C478E302A9935D300489948 /* meta_json_parser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = meta_json_parser.h; sourceTree = "<group>"; };
+ 4C478E312A9935D300489948 /* profile_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = profile_builder.h; sourceTree = "<group>"; };
+ 4C478E322A9935D300489948 /* meta_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = meta_builder.h; sourceTree = "<group>"; };
+ 4C478E332A9935D300489948 /* profile_verifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = profile_verifier.h; sourceTree = "<group>"; };
+ 4C478E352A9935D300489948 /* meta_reader.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = meta_reader.h; sourceTree = "<group>"; };
+ 4C478E362A9935D300489948 /* flatbuffers_common_reader.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatbuffers_common_reader.h; sourceTree = "<group>"; };
+ 4C478E372A9935D300489948 /* meta_verifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = meta_verifier.h; sourceTree = "<group>"; };
+ 4C478E382A9935D300489948 /* flatbuffers_common_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatbuffers_common_builder.h; sourceTree = "<group>"; };
+ 4C47928E2A9939BD00489948 /* flatcc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc.h; sourceTree = "<group>"; };
+ 4C47928F2A9939BD00489948 /* flatcc_version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_version.h; sourceTree = "<group>"; };
+ 4C4792902A9939BD00489948 /* flatcc_emitter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_emitter.h; sourceTree = "<group>"; };
+ 4C4792912A9939BD00489948 /* flatcc_alloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_alloc.h; sourceTree = "<group>"; };
+ 4C4792922A9939BD00489948 /* flatcc_json_printer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_json_printer.h; sourceTree = "<group>"; };
+ 4C4792932A9939BD00489948 /* CMakeLists.txt */ = {isa = PBXFileReference; lastKnownFileType = text; path = CMakeLists.txt; sourceTree = "<group>"; };
+ 4C4792942A9939BD00489948 /* builder.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = builder.c; sourceTree = "<group>"; };
+ 4C4792952A9939BD00489948 /* flatcc_verifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_verifier.h; sourceTree = "<group>"; };
+ 4C4792962A9939BD00489948 /* flatcc_refmap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_refmap.h; sourceTree = "<group>"; };
+ 4C4792972A9939BD00489948 /* flatcc_unaligned.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_unaligned.h; sourceTree = "<group>"; };
+ 4C4792992A9939BD00489948 /* grisu3_print.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = grisu3_print.h; sourceTree = "<group>"; };
+ 4C47929A2A9939BD00489948 /* pprintfp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pprintfp.h; sourceTree = "<group>"; };
+ 4C47929B2A9939BD00489948 /* pbase64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pbase64.h; sourceTree = "<group>"; };
+ 4C47929C2A9939BD00489948 /* punaligned.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = punaligned.h; sourceTree = "<group>"; };
+ 4C47929D2A9939BD00489948 /* LICENSE */ = {isa = PBXFileReference; lastKnownFileType = text; path = LICENSE; sourceTree = "<group>"; };
+ 4C47929E2A9939BD00489948 /* pdiagnostic.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pdiagnostic.h; sourceTree = "<group>"; };
+ 4C47929F2A9939BD00489948 /* pinttypes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pinttypes.h; sourceTree = "<group>"; };
+ 4C4792A02A9939BD00489948 /* pinline.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pinline.h; sourceTree = "<group>"; };
+ 4C4792A12A9939BD00489948 /* pprintint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pprintint.h; sourceTree = "<group>"; };
+ 4C4792A22A9939BD00489948 /* pdiagnostic_pop.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pdiagnostic_pop.h; sourceTree = "<group>"; };
+ 4C4792A52A9939BD00489948 /* stdalign.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = stdalign.h; sourceTree = "<group>"; };
+ 4C4792A62A9939BD00489948 /* inttypes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = inttypes.h; sourceTree = "<group>"; };
+ 4C4792A72A9939BD00489948 /* stdbool.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = stdbool.h; sourceTree = "<group>"; };
+ 4C4792A82A9939BD00489948 /* stdint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = stdint.h; sourceTree = "<group>"; };
+ 4C4792A92A9939BD00489948 /* README */ = {isa = PBXFileReference; lastKnownFileType = text; path = README; sourceTree = "<group>"; };
+ 4C4792AB2A9939BD00489948 /* endian.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = endian.h; sourceTree = "<group>"; };
+ 4C4792AC2A9939BD00489948 /* pversion.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pversion.h; sourceTree = "<group>"; };
+ 4C4792AD2A9939BD00489948 /* pstdalign.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pstdalign.h; sourceTree = "<group>"; };
+ 4C4792AE2A9939BD00489948 /* pdiagnostic_push.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pdiagnostic_push.h; sourceTree = "<group>"; };
+ 4C4792AF2A9939BD00489948 /* pendian_detect.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pendian_detect.h; sourceTree = "<group>"; };
+ 4C4792B02A9939BD00489948 /* paligned_alloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = paligned_alloc.h; sourceTree = "<group>"; };
+ 4C4792B12A9939BD00489948 /* pendian.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pendian.h; sourceTree = "<group>"; };
+ 4C4792B22A9939BD00489948 /* pstatic_assert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pstatic_assert.h; sourceTree = "<group>"; };
+ 4C4792B32A9939BD00489948 /* pwarnings.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pwarnings.h; sourceTree = "<group>"; };
+ 4C4792B42A9939BD00489948 /* pparsefp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pparsefp.h; sourceTree = "<group>"; };
+ 4C4792B52A9939BD00489948 /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = "<group>"; };
+ 4C4792B62A9939BD00489948 /* portable_basic.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = portable_basic.h; sourceTree = "<group>"; };
+ 4C4792B72A9939BD00489948 /* portable.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = portable.h; sourceTree = "<group>"; };
+ 4C4792B82A9939BD00489948 /* grisu3_math.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = grisu3_math.h; sourceTree = "<group>"; };
+ 4C4792B92A9939BD00489948 /* pattributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pattributes.h; sourceTree = "<group>"; };
+ 4C4792BA2A9939BD00489948 /* pstdint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pstdint.h; sourceTree = "<group>"; };
+ 4C4792BB2A9939BD00489948 /* pstdbool.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pstdbool.h; sourceTree = "<group>"; };
+ 4C4792BC2A9939BD00489948 /* pcrt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pcrt.h; sourceTree = "<group>"; };
+ 4C4792BD2A9939BD00489948 /* pstatic_assert_scope.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pstatic_assert_scope.h; sourceTree = "<group>"; };
+ 4C4792BE2A9939BD00489948 /* grisu3_parse.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = grisu3_parse.h; sourceTree = "<group>"; };
+ 4C4792BF2A9939BD00489948 /* pparseint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pparseint.h; sourceTree = "<group>"; };
+ 4C4792C02A9939BD00489948 /* flatcc_endian.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_endian.h; sourceTree = "<group>"; };
+ 4C4792C12A9939BD00489948 /* flatcc_iov.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_iov.h; sourceTree = "<group>"; };
+ 4C4792C22A9939BD00489948 /* flatcc_rtconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_rtconfig.h; sourceTree = "<group>"; };
+ 4C4792C32A9939BD00489948 /* flatcc_accessors.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_accessors.h; sourceTree = "<group>"; };
+ 4C4792C42A9939BD00489948 /* flatcc_epilogue.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_epilogue.h; sourceTree = "<group>"; };
+ 4C4792C52A9939BD00489948 /* flatcc_identifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_identifier.h; sourceTree = "<group>"; };
+ 4C4792C62A9939BD00489948 /* flatcc_prologue.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_prologue.h; sourceTree = "<group>"; };
+ 4C4792C72A9939BD00489948 /* flatcc_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_builder.h; sourceTree = "<group>"; };
+ 4C4792C82A9939BD00489948 /* json_parser.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = json_parser.c; sourceTree = "<group>"; };
+ 4C4792CA2A9939BD00489948 /* README */ = {isa = PBXFileReference; lastKnownFileType = text; path = README; sourceTree = "<group>"; };
+ 4C4792CB2A9939BD00489948 /* readfile.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = readfile.h; sourceTree = "<group>"; };
+ 4C4792CC2A9939BD00489948 /* cdump.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cdump.h; sourceTree = "<group>"; };
+ 4C4792CD2A9939BD00489948 /* elapsed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = elapsed.h; sourceTree = "<group>"; };
+ 4C4792CE2A9939BD00489948 /* hexdump.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hexdump.h; sourceTree = "<group>"; };
+ 4C4792CF2A9939BD00489948 /* emitter.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = emitter.c; sourceTree = "<group>"; };
+ 4C4792D02A9939BD00489948 /* flatcc_json_parser.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_json_parser.h; sourceTree = "<group>"; };
+ 4C4792D12A9939BD00489948 /* refmap.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = refmap.c; sourceTree = "<group>"; };
+ 4C4792D22A9939BD00489948 /* flatcc_flatbuffers.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_flatbuffers.h; sourceTree = "<group>"; };
+ 4C4792D32A9939BD00489948 /* flatcc_portable.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_portable.h; sourceTree = "<group>"; };
+ 4C4792D42A9939BD00489948 /* verifier.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = verifier.c; sourceTree = "<group>"; };
+ 4C4792D52A9939BD00489948 /* flatcc_types.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_types.h; sourceTree = "<group>"; };
+ 4C4792D62A9939BD00489948 /* json_printer.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = json_printer.c; sourceTree = "<group>"; };
+ 4C4792D72A9939BD00489948 /* flatcc_assert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatcc_assert.h; sourceTree = "<group>"; };
+ 4C4792D92A9939BD00489948 /* README */ = {isa = PBXFileReference; lastKnownFileType = text; path = README; sourceTree = "<group>"; };
+ 4C4792DA2A9939BD00489948 /* reflection_reader.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = reflection_reader.h; sourceTree = "<group>"; };
+ 4C4792DB2A9939BD00489948 /* flatbuffers_common_reader.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatbuffers_common_reader.h; sourceTree = "<group>"; };
+ 4C4792DC2A9939BD00489948 /* reflection_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = reflection_builder.h; sourceTree = "<group>"; };
+ 4C4792DD2A9939BD00489948 /* reflection_verifier.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = reflection_verifier.h; sourceTree = "<group>"; };
+ 4C4792DE2A9939BD00489948 /* flatbuffers_common_builder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = flatbuffers_common_builder.h; sourceTree = "<group>"; };
+ 4C4792FF2A993B9A00489948 /* lmdb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lmdb.h; sourceTree = "<group>"; };
+ 4C4793002A993B9A00489948 /* mdb.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = mdb.c; sourceTree = "<group>"; };
+ 4C4793022A993D9300489948 /* midl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = midl.h; sourceTree = "<group>"; };
+ 4C4793032A993DB900489948 /* midl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = midl.c; sourceTree = "<group>"; };
4C4A3A5A288A1B2200453788 /* damus.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = damus.entitlements; sourceTree = "<group>"; };
4C4DD3DA2A6CA7E8005B4E85 /* ContentParsing.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentParsing.swift; sourceTree = "<group>"; };
4C4E137A2A76D5FB00BDD832 /* MuteThreadNotify.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MuteThreadNotify.swift; sourceTree = "<group>"; };
@@ -1227,6 +1328,170 @@
path = Notifications;
sourceTree = "<group>";
};
+ 4C478E2A2A9935D300489948 /* bindings */ = {
+ isa = PBXGroup;
+ children = (
+ 4C478E2B2A9935D300489948 /* swift */,
+ 4C478E2D2A9935D300489948 /* c */,
+ );
+ path = bindings;
+ sourceTree = "<group>";
+ };
+ 4C478E2B2A9935D300489948 /* swift */ = {
+ isa = PBXGroup;
+ children = (
+ 4C478E2C2A9935D300489948 /* NdbProfile.swift */,
+ );
+ path = swift;
+ sourceTree = "<group>";
+ };
+ 4C478E2D2A9935D300489948 /* c */ = {
+ isa = PBXGroup;
+ children = (
+ 4C478E2E2A9935D300489948 /* profile_json_parser.h */,
+ 4C478E2F2A9935D300489948 /* profile_reader.h */,
+ 4C478E302A9935D300489948 /* meta_json_parser.h */,
+ 4C478E312A9935D300489948 /* profile_builder.h */,
+ 4C478E322A9935D300489948 /* meta_builder.h */,
+ 4C478E332A9935D300489948 /* profile_verifier.h */,
+ 4C478E352A9935D300489948 /* meta_reader.h */,
+ 4C478E362A9935D300489948 /* flatbuffers_common_reader.h */,
+ 4C478E372A9935D300489948 /* meta_verifier.h */,
+ 4C478E382A9935D300489948 /* flatbuffers_common_builder.h */,
+ );
+ path = c;
+ sourceTree = "<group>";
+ };
+ 4C47928D2A9939BD00489948 /* flatcc */ = {
+ isa = PBXGroup;
+ children = (
+ 4C47928E2A9939BD00489948 /* flatcc.h */,
+ 4C47928F2A9939BD00489948 /* flatcc_version.h */,
+ 4C4792902A9939BD00489948 /* flatcc_emitter.h */,
+ 4C4792912A9939BD00489948 /* flatcc_alloc.h */,
+ 4C4792922A9939BD00489948 /* flatcc_json_printer.h */,
+ 4C4792932A9939BD00489948 /* CMakeLists.txt */,
+ 4C4792942A9939BD00489948 /* builder.c */,
+ 4C4792952A9939BD00489948 /* flatcc_verifier.h */,
+ 4C4792962A9939BD00489948 /* flatcc_refmap.h */,
+ 4C4792972A9939BD00489948 /* flatcc_unaligned.h */,
+ 4C4792982A9939BD00489948 /* portable */,
+ 4C4792C02A9939BD00489948 /* flatcc_endian.h */,
+ 4C4792C12A9939BD00489948 /* flatcc_iov.h */,
+ 4C4792C22A9939BD00489948 /* flatcc_rtconfig.h */,
+ 4C4792C32A9939BD00489948 /* flatcc_accessors.h */,
+ 4C4792C42A9939BD00489948 /* flatcc_epilogue.h */,
+ 4C4792C52A9939BD00489948 /* flatcc_identifier.h */,
+ 4C4792C62A9939BD00489948 /* flatcc_prologue.h */,
+ 4C4792C72A9939BD00489948 /* flatcc_builder.h */,
+ 4C4792C82A9939BD00489948 /* json_parser.c */,
+ 4C4792C92A9939BD00489948 /* support */,
+ 4C4792CF2A9939BD00489948 /* emitter.c */,
+ 4C4792D02A9939BD00489948 /* flatcc_json_parser.h */,
+ 4C4792D12A9939BD00489948 /* refmap.c */,
+ 4C4792D22A9939BD00489948 /* flatcc_flatbuffers.h */,
+ 4C4792D32A9939BD00489948 /* flatcc_portable.h */,
+ 4C4792D42A9939BD00489948 /* verifier.c */,
+ 4C4792D52A9939BD00489948 /* flatcc_types.h */,
+ 4C4792D62A9939BD00489948 /* json_printer.c */,
+ 4C4792D72A9939BD00489948 /* flatcc_assert.h */,
+ 4C4792D82A9939BD00489948 /* reflection */,
+ );
+ path = flatcc;
+ sourceTree = "<group>";
+ };
+ 4C4792982A9939BD00489948 /* portable */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792992A9939BD00489948 /* grisu3_print.h */,
+ 4C47929A2A9939BD00489948 /* pprintfp.h */,
+ 4C47929B2A9939BD00489948 /* pbase64.h */,
+ 4C47929C2A9939BD00489948 /* punaligned.h */,
+ 4C47929D2A9939BD00489948 /* LICENSE */,
+ 4C47929E2A9939BD00489948 /* pdiagnostic.h */,
+ 4C47929F2A9939BD00489948 /* pinttypes.h */,
+ 4C4792A02A9939BD00489948 /* pinline.h */,
+ 4C4792A12A9939BD00489948 /* pprintint.h */,
+ 4C4792A22A9939BD00489948 /* pdiagnostic_pop.h */,
+ 4C4792A32A9939BD00489948 /* include */,
+ 4C4792AC2A9939BD00489948 /* pversion.h */,
+ 4C4792AD2A9939BD00489948 /* pstdalign.h */,
+ 4C4792AE2A9939BD00489948 /* pdiagnostic_push.h */,
+ 4C4792AF2A9939BD00489948 /* pendian_detect.h */,
+ 4C4792B02A9939BD00489948 /* paligned_alloc.h */,
+ 4C4792B12A9939BD00489948 /* pendian.h */,
+ 4C4792B22A9939BD00489948 /* pstatic_assert.h */,
+ 4C4792B32A9939BD00489948 /* pwarnings.h */,
+ 4C4792B42A9939BD00489948 /* pparsefp.h */,
+ 4C4792B52A9939BD00489948 /* README.md */,
+ 4C4792B62A9939BD00489948 /* portable_basic.h */,
+ 4C4792B72A9939BD00489948 /* portable.h */,
+ 4C4792B82A9939BD00489948 /* grisu3_math.h */,
+ 4C4792B92A9939BD00489948 /* pattributes.h */,
+ 4C4792BA2A9939BD00489948 /* pstdint.h */,
+ 4C4792BB2A9939BD00489948 /* pstdbool.h */,
+ 4C4792BC2A9939BD00489948 /* pcrt.h */,
+ 4C4792BD2A9939BD00489948 /* pstatic_assert_scope.h */,
+ 4C4792BE2A9939BD00489948 /* grisu3_parse.h */,
+ 4C4792BF2A9939BD00489948 /* pparseint.h */,
+ );
+ path = portable;
+ sourceTree = "<group>";
+ };
+ 4C4792A32A9939BD00489948 /* include */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792A42A9939BD00489948 /* std */,
+ 4C4792A92A9939BD00489948 /* README */,
+ 4C4792AA2A9939BD00489948 /* linux */,
+ );
+ path = include;
+ sourceTree = "<group>";
+ };
+ 4C4792A42A9939BD00489948 /* std */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792A52A9939BD00489948 /* stdalign.h */,
+ 4C4792A62A9939BD00489948 /* inttypes.h */,
+ 4C4792A72A9939BD00489948 /* stdbool.h */,
+ 4C4792A82A9939BD00489948 /* stdint.h */,
+ );
+ path = std;
+ sourceTree = "<group>";
+ };
+ 4C4792AA2A9939BD00489948 /* linux */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792AB2A9939BD00489948 /* endian.h */,
+ );
+ path = linux;
+ sourceTree = "<group>";
+ };
+ 4C4792C92A9939BD00489948 /* support */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792CA2A9939BD00489948 /* README */,
+ 4C4792CB2A9939BD00489948 /* readfile.h */,
+ 4C4792CC2A9939BD00489948 /* cdump.h */,
+ 4C4792CD2A9939BD00489948 /* elapsed.h */,
+ 4C4792CE2A9939BD00489948 /* hexdump.h */,
+ );
+ path = support;
+ sourceTree = "<group>";
+ };
+ 4C4792D82A9939BD00489948 /* reflection */ = {
+ isa = PBXGroup;
+ children = (
+ 4C4792D92A9939BD00489948 /* README */,
+ 4C4792DA2A9939BD00489948 /* reflection_reader.h */,
+ 4C4792DB2A9939BD00489948 /* flatbuffers_common_reader.h */,
+ 4C4792DC2A9939BD00489948 /* reflection_builder.h */,
+ 4C4792DD2A9939BD00489948 /* reflection_verifier.h */,
+ 4C4792DE2A9939BD00489948 /* flatbuffers_common_builder.h */,
+ );
+ path = reflection;
+ sourceTree = "<group>";
+ };
4C54AA0829A55416003E4487 /* Notifications */ = {
isa = PBXGroup;
children = (
@@ -1460,6 +1725,8 @@
4C9054862A6AEB4500811EEC /* nostrdb */ = {
isa = PBXGroup;
children = (
+ 4C47928D2A9939BD00489948 /* flatcc */,
+ 4C478E2A2A9935D300489948 /* bindings */,
4CE9FBBB2A6B3D9C007E485C /* Test */,
4C9054882A6AED4700811EEC /* NdbTagIterator.swift */,
4C90548A2A6AEDEE00811EEC /* NdbNote.swift */,
@@ -1467,13 +1734,22 @@
4CDD1ADF2A6B305F001CD4DF /* NdbTagElem.swift */,
4CDD1AE12A6B3074001CD4DF /* NdbTagsIterator.swift */,
4CE9FBB82A6B3B26007E485C /* nostrdb.c */,
+ 4C4793032A993DB900489948 /* midl.c */,
+ 4C4793002A993B9A00489948 /* mdb.c */,
+ 4C4793022A993D9300489948 /* midl.h */,
+ 4C4792FF2A993B9A00489948 /* lmdb.h */,
4CE9FBB92A6B3B26007E485C /* nostrdb.h */,
4C78EFD62A7078C5007E8197 /* random.h */,
4CDD1AE72A6B3611001CD4DF /* jsmn.h */,
+ 4C478E292A99359900489948 /* util.h */,
+ 4C478E282A99357400489948 /* memchr.h */,
+ 4C478E272A99354E00489948 /* protected_queue.h */,
+ 4C478E262A99353500489948 /* threadpool.h */,
4C78EFD82A707C4D007E8197 /* secp256k1_ecdh.h */,
4C78EFD72A707C4D007E8197 /* secp256k1_schnorrsig.h */,
4C78EFDA2A707C67007E8197 /* secp256k1_extrakeys.h */,
4C78EFD92A707C4D007E8197 /* secp256k1.h */,
+ 4C478E242A9932C100489948 /* Ndb.swift */,
);
path = nostrdb;
sourceTree = "<group>";
@@ -2127,6 +2403,12 @@
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
+ 4C4793082A993E8900489948 /* refmap.c in Sources */,
+ 4C4793072A993E6200489948 /* emitter.c in Sources */,
+ 4C4793062A993E5300489948 /* json_parser.c in Sources */,
+ 4C4793052A993E3200489948 /* builder.c in Sources */,
+ 4C4793042A993DC000489948 /* midl.c in Sources */,
+ 4C4793012A993CDA00489948 /* mdb.c in Sources */,
4CE9FBBA2A6B3C63007E485C /* nostrdb.c in Sources */,
4C3AC79D2833036D00E1F516 /* FollowingView.swift in Sources */,
5CF72FC229B9142F00124A13 /* ShareAction.swift in Sources */,
diff --git a/damus/Nostr/NostrResponse.swift b/damus/Nostr/NostrResponse.swift
@@ -51,7 +51,7 @@ enum NostrResponse {
//json_cs
var tce = ndb_tce()
- let len = ndb_ws_event_from_json(cstr, Int32(json.utf8.count), &tce, data, Int32(bufsize))
+ let len = ndb_ws_event_from_json(cstr, Int32(json.utf8.count), &tce, data, Int32(bufsize), nil)
if len <= 0 {
free(data)
return nil
diff --git a/nostrdb/NdbNote.swift b/nostrdb/NdbNote.swift
@@ -156,7 +156,7 @@ class NdbNote: Encodable, Equatable, Hashable {
ndb_builder_set_pubkey(&builder, &pk_raw)
ndb_builder_set_kind(&builder, UInt32(kind))
- ndb_builder_set_created_at(&builder, createdAt)
+ ndb_builder_set_created_at(&builder, UInt64(createdAt))
var ok = true
for tag in tags {
diff --git a/nostrdb/bindings/c/.dir b/nostrdb/bindings/c/.dir
diff --git a/nostrdb/bindings/c/flatbuffers_common_builder.h b/nostrdb/bindings/c/flatbuffers_common_builder.h
@@ -0,0 +1,685 @@
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#define FLATBUFFERS_COMMON_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers build functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#ifndef FLATBUILDER_H
+#include "flatcc/flatcc_builder.h"
+#endif
+typedef flatcc_builder_t flatbuffers_builder_t;
+typedef flatcc_builder_ref_t flatbuffers_ref_t;
+typedef flatcc_builder_ref_t flatbuffers_vec_ref_t;
+typedef flatcc_builder_union_ref_t flatbuffers_union_ref_t;
+typedef flatcc_builder_union_vec_ref_t flatbuffers_union_vec_ref_t;
+/* integer return code (ref and ptr always fail on 0) */
+#define flatbuffers_failed(x) ((x) < 0)
+typedef flatbuffers_ref_t flatbuffers_root_t;
+#define flatbuffers_root(ref) ((flatbuffers_root_t)(ref))
+
+#define __flatbuffers_memoize_begin(B, src)\
+do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)
+#define __flatbuffers_memoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)
+#define __flatbuffers_memoize(B, src, op) do { __flatbuffers_memoize_begin(B, src); __flatbuffers_memoize_end(B, src, op); } while (0)
+
+#define __flatbuffers_build_buffer(NS)\
+typedef NS ## ref_t NS ## buffer_ref_t;\
+static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\
+static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\
+static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\
+static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\
+{ return flatcc_builder_end_buffer(B, root); }
+
+#define __flatbuffers_build_table_root(NS, N, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }
+
+#define __flatbuffers_build_table_prolog(NS, N, FID, TFID)\
+__flatbuffers_build_table_vector_ops(NS, N ## _vec, N)\
+__flatbuffers_build_table_root(NS, N, FID, TFID)
+
+#define __flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }
+
+#define __flatbuffers_build_nested_table_root(NS, N, TN, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }
+
+#define __flatbuffers_build_nested_struct_root(NS, N, TN, A, FID, TFID)\
+static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\
+static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }
+
+#define __flatbuffers_build_vector_ops(NS, V, N, TN, T)\
+static inline T *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return (T *)flatcc_builder_extend_vector(B, len); }\
+static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\
+{ return (T *)flatcc_builder_append_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_vector(B, len); }\
+static inline T *V ## _edit(NS ## builder_t *B)\
+{ return (T *)flatcc_builder_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_vector_count(B); }\
+static inline T *V ## _push(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\
+static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }
+
+#define __flatbuffers_build_vector(NS, N, T, S, A)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\
+ for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\
+ { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\
+{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\
+ p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\
+ for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\
+ return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ __flatbuffers_memoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\
+static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\
+{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+__flatbuffers_build_vector_ops(NS, N ## _vec, N, N, T)
+
+#define __flatbuffers_build_union_vector_ops(NS, V, N, TN)\
+static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_append_union_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_union_vector_count(B); }\
+static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\
+{ return flatcc_builder_union_vector_push(B, ref); }\
+static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\
+{ return TN ## _vec_push(B, TN ## _clone(B, u)); }
+
+#define __flatbuffers_build_union_vector(NS, N)\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_create_union_vector(B, data, len); }\
+__flatbuffers_build_union_vector_ops(NS, N ## _vec, N, N)\
+/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\
+static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\
+{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\
+ if (vec.type == 0) return _ret;\
+ _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\
+ _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\
+ _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\
+ if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\
+ if (flatcc_builder_start_offset_vector(B)) return _ret;\
+ for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\
+ if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\
+ _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\
+ if (_uvref.value == 0) return _ret; } return _uvref; }
+
+#define __flatbuffers_build_string_vector_ops(NS, N)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return NS ## string_vec_push(B, NS ## string_end(B)); }\
+static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\
+static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\
+static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\
+static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\
+static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }
+
+#define __flatbuffers_build_table_vector_ops(NS, N, TN)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return N ## _push(B, TN ## _end(B)); }\
+static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }
+
+#define __flatbuffers_build_offset_vector_ops(NS, V, N, TN)\
+static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return flatcc_builder_append_offset_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_offset_vector_count(B); }\
+static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\
+{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }
+
+#define __flatbuffers_build_offset_vector(NS, N)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\
+{ return flatcc_builder_create_offset_vector(B, data, len); }\
+__flatbuffers_build_offset_vector_ops(NS, N ## _vec, N, N)\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ int _ret; N ## _ref_t _e; size_t _i, _len; __flatbuffers_memoize_begin(B, vec);\
+ _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\
+ for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\
+ if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\
+ __flatbuffers_memoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\
+
+#define __flatbuffers_build_string_ops(NS, N)\
+static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string(B, s, len); }\
+static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_append_string_str(B, s); }\
+static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string_strn(B, s, len); }\
+static inline size_t N ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_string_len(B); }\
+static inline char *N ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_string(B, len); }\
+static inline char *N ## _edit(NS ## builder_t *B)\
+{ return flatcc_builder_string_edit(B); }\
+static inline int N ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_string(B, len); }
+
+#define __flatbuffers_build_string(NS)\
+typedef NS ## ref_t NS ## string_ref_t;\
+static inline int NS ## string_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_string(B); }\
+static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string(B, s, len); }\
+static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_create_string_str(B, s); }\
+static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string_strn(B, s, len); }\
+static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\
+{ __flatbuffers_memoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\
+static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_string(B, string + index, len); }\
+__flatbuffers_build_string_ops(NS, NS ## string)\
+__flatbuffers_build_offset_vector(NS, NS ## string)
+
+#define __flatbuffers_copy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))
+#define __flatbuffers_from_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))
+#define __flatbuffers_copy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))
+#define __flatbuffers_to_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))
+#define __flatbuffers_define_fixed_array_primitives(NS, N, T)\
+static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\
+{ memcpy(p, p2, n * sizeof(T)); return p; }\
+static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\
+static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }
+#define __flatbuffers_define_scalar_primitives(NS, N, T)\
+static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\
+static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\
+static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\
+static inline T *N ## _copy_from_pe(T *p, const T *p2)\
+{ return __ ## NS ## copy_from_pe(p, p2, N); }\
+static inline T *N ## _copy_to_pe(T *p, const T *p2) \
+{ return __ ## NS ## copy_to_pe(p, p2, N); }\
+static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\
+static inline T *N ## _assign_from_pe(T *p, T v0)\
+{ *p = N ## _read_from_pe(&v0); return p; }\
+static inline T *N ## _assign_to_pe(T *p, T v0)\
+{ N ## _write_to_pe(p, v0); return p; }
+#define __flatbuffers_build_scalar(NS, N, T)\
+__ ## NS ## define_scalar_primitives(NS, N, T)\
+__ ## NS ## define_fixed_array_primitives(NS, N, T)\
+__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))
+/* Depends on generated copy_to/from_pe functions, and the type. */
+#define __flatbuffers_define_struct_primitives(NS, N)\
+static inline N ## _t *N ##_to_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\
+static inline N ## _t *N ##_from_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\
+static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }
+
+/* Depends on generated copy/assign_to/from_pe functions, and the type. */
+#define __flatbuffers_build_struct(NS, N, S, A, FID, TFID)\
+__ ## NS ## define_struct_primitives(NS, N)\
+typedef NS ## ref_t N ## _ref_t;\
+static inline N ## _t *N ## _start(NS ## builder_t *B)\
+{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\
+ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\
+{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\
+ return N ## _end_pe(B); }\
+static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\
+{ N ## _t *_p; __flatbuffers_memoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\
+ N ## _copy(_p, p); __flatbuffers_memoize_end(B, p, N ##_end_pe(B)); }\
+__flatbuffers_build_vector(NS, N, N ## _t, S, A)\
+__flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+
+#define __flatbuffers_struct_clear_field(p) memset((p), 0, sizeof(*(p)))
+#define __flatbuffers_build_table(NS, N, K)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_table(B, K); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\
+ sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\
+ return flatcc_builder_end_table(B); }\
+__flatbuffers_build_offset_vector(NS, N)
+
+#define __flatbuffers_build_table_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\
+{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\
+ ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _end(B)); }\
+static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\
+ if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\
+ *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\
+static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\
+{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\
+ sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\
+static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\
+ ((*p = uref.value), 0) : -1; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_table_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\
+{ T ## _ref_t ref = T ## _clone(B, t);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_struct_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end_pe(B);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\
+{ T ## _ref_t ref = T ## _clone(B, p);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+#define __flatbuffers_build_union_string_value_field(NS, N, NU, M)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+__flatbuffers_build_string_field_ops(NS, N ## _ ## M)
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */
+#define __flatbuffers_build_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+static inline int N ## _force_add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T. */
+#define __flatbuffers_build_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_struct_field(ID, NS, N, TN, S, A, TT)\
+static inline TN ## _t *N ## _start(NS ## builder_t *B)\
+{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\
+static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\
+static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\
+ return 0; }\
+static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_vector_field(ID, NS, N, TN, T, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _vec_start(B); }\
+static inline int N ## _end_pe(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end_pe(B)); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end(B)); }\
+static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\
+static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\
+static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\
+{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\
+__flatbuffers_build_vector_ops(NS, N, N, TN, T)\
+
+#define __flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\
+__flatbuffers_build_offset_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* depends on N ## _add which differs for union member fields and ordinary fields */\
+#define __flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_string(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\
+static inline int N ## _create_str(NS ## builder_t *B, const char *s)\
+{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\
+static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\
+static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\
+{ return N ## _add(B, NS ## string_clone(B, string)); }\
+static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\
+__flatbuffers_build_string_ops(NS, N)
+
+#define __flatbuffers_build_string_field(ID, NS, N, TT)\
+static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+__flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_table_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_table_vector_ops(NS, N, TN)
+
+#define __flatbuffers_build_union_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\
+{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\
+__flatbuffers_build_union_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_table_vector_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }
+
+#define __flatbuffers_build_union_struct_vector_value_field(NS, N, NU, M, T)\
+static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }
+
+#define __flatbuffers_build_union_string_vector_value_field(NS, N, NU, M)\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }
+
+#define __flatbuffers_build_string_vector_field(ID, NS, N, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, NS ## string, TT)\
+__flatbuffers_build_string_vector_ops(NS, N)
+
+#define __flatbuffers_char_formal_args , char v0
+#define __flatbuffers_char_call_args , v0
+#define __flatbuffers_uint8_formal_args , uint8_t v0
+#define __flatbuffers_uint8_call_args , v0
+#define __flatbuffers_int8_formal_args , int8_t v0
+#define __flatbuffers_int8_call_args , v0
+#define __flatbuffers_bool_formal_args , flatbuffers_bool_t v0
+#define __flatbuffers_bool_call_args , v0
+#define __flatbuffers_uint16_formal_args , uint16_t v0
+#define __flatbuffers_uint16_call_args , v0
+#define __flatbuffers_uint32_formal_args , uint32_t v0
+#define __flatbuffers_uint32_call_args , v0
+#define __flatbuffers_uint64_formal_args , uint64_t v0
+#define __flatbuffers_uint64_call_args , v0
+#define __flatbuffers_int16_formal_args , int16_t v0
+#define __flatbuffers_int16_call_args , v0
+#define __flatbuffers_int32_formal_args , int32_t v0
+#define __flatbuffers_int32_call_args , v0
+#define __flatbuffers_int64_formal_args , int64_t v0
+#define __flatbuffers_int64_call_args , v0
+#define __flatbuffers_float_formal_args , float v0
+#define __flatbuffers_float_call_args , v0
+#define __flatbuffers_double_formal_args , double v0
+#define __flatbuffers_double_call_args , v0
+
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_char, char)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint8, uint8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int8, int8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint16, uint16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint32, uint32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint64, uint64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int16, int16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int32, int32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int64, int64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_float, float)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_double, double)
+
+__flatbuffers_build_string(flatbuffers_)
+
+__flatbuffers_build_buffer(flatbuffers_)
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_BUILDER_H */
diff --git a/nostrdb/bindings/c/flatbuffers_common_reader.h b/nostrdb/bindings/c/flatbuffers_common_reader.h
@@ -0,0 +1,578 @@
+#ifndef FLATBUFFERS_COMMON_READER_H
+#define FLATBUFFERS_COMMON_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers read functionality for C. */
+
+#include "flatcc_prologue.h"
+#include "flatcc_flatbuffers.h"
+
+
+#define __flatbuffers_read_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))
+#define __flatbuffers_read_scalar(N, p) N ## _read_from_pe(p)
+#define __flatbuffers_read_vt(ID, offset, t)\
+flatbuffers_voffset_t offset = 0;\
+{ flatbuffers_voffset_t id__tmp, *vt__tmp;\
+ FLATCC_ASSERT(t != 0 && "null pointer table access");\
+ id__tmp = ID;\
+ vt__tmp = (flatbuffers_voffset_t *)((uint8_t *)(t) -\
+ __flatbuffers_soffset_read_from_pe(t));\
+ if (__flatbuffers_voffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\
+ offset = __flatbuffers_voffset_read_from_pe(vt__tmp + id__tmp + 2);\
+ }\
+}
+#define __flatbuffers_field_present(ID, t) { __flatbuffers_read_vt(ID, offset__tmp, t) return offset__tmp != 0; }
+#define __flatbuffers_scalar_field(T, ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (const T *)((uint8_t *)(t) + offset__tmp);\
+ }\
+ return 0;\
+}
+#define __flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\
+__flatbuffers_scalar_field(T, ID, t__tmp)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_scalar_optional_field(ID, N, NK, TK, T, V)\
+__flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\
+{ TK ## _option_t ret; __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\
+ __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+ return ret; }
+#define __flatbuffers_struct_field(T, ID, t, r)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (T)((uint8_t *)(t) + offset__tmp);\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_offset_field(T, ID, t, r, adjust)\
+{\
+ flatbuffers_uoffset_t *elem__tmp;\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ elem__tmp = (flatbuffers_uoffset_t *)((uint8_t *)(t) + offset__tmp);\
+ /* Add sizeof so C api can have raw access past header field. */\
+ return (T)((uint8_t *)(elem__tmp) + adjust +\
+ __flatbuffers_uoffset_read_from_pe(elem__tmp));\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_vector_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, sizeof(flatbuffers_uoffset_t))
+#define __flatbuffers_table_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, 0)
+#define __flatbuffers_define_struct_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_vector_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_table_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_string_field(ID, N, NK, r)\
+static inline flatbuffers_string_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline flatbuffers_string_t N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_string_field(N, NK)
+#define __flatbuffers_vec_len(vec)\
+{ return (vec) ? (size_t)__flatbuffers_uoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }
+#define __flatbuffers_string_len(s) __flatbuffers_vec_len(s)
+static inline size_t flatbuffers_vec_len(const void *vec)
+__flatbuffers_vec_len(vec)
+#define __flatbuffers_scalar_vec_at(N, vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return __flatbuffers_read_scalar(N, &(vec)[i]); }
+#define __flatbuffers_struct_vec_at(vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range"); return (vec) + (i); }
+/* `adjust` skips past the header for string vectors. */
+#define __flatbuffers_offset_vec_at(T, vec, i, adjust)\
+{ const flatbuffers_uoffset_t *elem__tmp = (vec) + (i);\
+ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return (T)((uint8_t *)(elem__tmp) + (size_t)__flatbuffers_uoffset_read_from_pe(elem__tmp) + (adjust)); }
+#define __flatbuffers_define_scalar_vec_len(N)\
+static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\
+{ return flatbuffers_vec_len(vec__tmp); }
+#define __flatbuffers_define_scalar_vec_at(N, T) \
+static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\
+__flatbuffers_scalar_vec_at(N, vec__tmp, i__tmp)
+typedef const char *flatbuffers_string_t;
+static inline size_t flatbuffers_string_len(flatbuffers_string_t s)
+__flatbuffers_string_len(s)
+typedef const flatbuffers_uoffset_t *flatbuffers_string_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_string_mutable_vec_t;
+static inline size_t flatbuffers_string_vec_len(flatbuffers_string_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_string_t flatbuffers_string_vec_at(flatbuffers_string_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_string_t, vec, i, sizeof(vec[0]))
+typedef const void *flatbuffers_generic_t;
+typedef void *flatbuffers_mutable_generic_t;
+static inline flatbuffers_string_t flatbuffers_string_cast_from_generic(const flatbuffers_generic_t p)
+{ return p ? ((const char *)p) + __flatbuffers_uoffset__size() : 0; }
+typedef const flatbuffers_uoffset_t *flatbuffers_generic_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_generic_table_mutable_vec_t;
+static inline size_t flatbuffers_generic_vec_len(flatbuffers_generic_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, 0)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at_as_string(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, sizeof(vec[0]))
+typedef struct flatbuffers_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_generic_t value;
+} flatbuffers_union_t;
+typedef struct flatbuffers_union_vec {
+ const flatbuffers_union_type_t *type;
+ const flatbuffers_uoffset_t *value;
+} flatbuffers_union_vec_t;
+typedef struct flatbuffers_mutable_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_mutable_generic_t value;
+} flatbuffers_mutable_union_t;
+typedef struct flatbuffers_mutable_union_vec {
+ flatbuffers_union_type_t *type;
+ flatbuffers_uoffset_t *value;
+} flatbuffers_mutable_union_vec_t;
+static inline flatbuffers_mutable_union_t flatbuffers_mutable_union_cast(flatbuffers_union_t u__tmp)\
+{ flatbuffers_mutable_union_t mu = { u__tmp.type, (flatbuffers_mutable_generic_t)u__tmp.value };\
+ return mu; }
+static inline flatbuffers_mutable_union_vec_t flatbuffers_mutable_union_vec_cast(flatbuffers_union_vec_t uv__tmp)\
+{ flatbuffers_mutable_union_vec_t muv =\
+ { (flatbuffers_union_type_t *)uv__tmp.type, (flatbuffers_uoffset_t *)uv__tmp.value }; return muv; }
+#define __flatbuffers_union_type_field(ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(__flatbuffers_utype, t, offset__tmp) : 0;\
+}
+static inline flatbuffers_string_t flatbuffers_string_cast_from_union(const flatbuffers_union_t u__tmp)\
+{ return flatbuffers_string_cast_from_generic(u__tmp.value); }
+#define __flatbuffers_define_union_field(NS, ID, N, NK, T, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__## NS ## field_present(ID, t__tmp)\
+static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\
+static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\
+{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\
+
+#define __flatbuffers_define_union_vector_ops(NS, T)\
+static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\
+{ return NS ## vec_len(uv__tmp.type); }\
+static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\
+ FLATCC_ASSERT(n__tmp > (i__tmp) && "index out of range"); u__tmp.type = uv__tmp.type[i__tmp];\
+ /* Unknown type is treated as NONE for schema evolution. */\
+ if (u__tmp.type == 0) return u__tmp;\
+ u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\
+static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\
+
+#define __flatbuffers_define_union_vector(NS, T)\
+typedef NS ## union_vec_t T ## _union_vec_t;\
+typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\
+static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\
+{ return NS ## mutable_union_vec_cast(u__tmp); }\
+__## NS ## define_union_vector_ops(NS, T)
+#define __flatbuffers_define_union(NS, T)\
+typedef NS ## union_t T ## _union_t;\
+typedef NS ## mutable_union_t T ## _mutable_union_t;\
+static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\
+{ return NS ## mutable_union_cast(u__tmp); }\
+__## NS ## define_union_vector(NS, T)
+#define __flatbuffers_define_union_vector_field(NS, ID, N, NK, T, r)\
+__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\
+__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\
+static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ uv__tmp.value = N ## _ ## NK(t__tmp);\
+ FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\
+ && "union vector type length mismatch"); return uv__tmp; }
+#include <string.h>
+static const size_t flatbuffers_not_found = (size_t)-1;
+static const size_t flatbuffers_end = (size_t)-1;
+#define __flatbuffers_identity(n) (n)
+#define __flatbuffers_min(a, b) ((a) < (b) ? (a) : (b))
+/* Subtraction doesn't work for unsigned types. */
+#define __flatbuffers_scalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))
+static inline int __flatbuffers_string_n_cmp(flatbuffers_string_t v, const char *s, size_t n)
+{ size_t nv = flatbuffers_string_len(v); int x = strncmp(v, s, nv < n ? nv : n);
+ return x != 0 ? x : nv < n ? -1 : nv > n; }
+/* `n` arg unused, but needed by string find macro expansion. */
+static inline int __flatbuffers_string_cmp(flatbuffers_string_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_find_by_field(A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return flatbuffers_not_found; }\
+ --b__tmp;\
+ while (a__tmp < b__tmp) {\
+ m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\
+ v__tmp = A(E(V, m__tmp));\
+ if ((D(v__tmp, (K), (Kn))) < 0) {\
+ a__tmp = m__tmp + 1;\
+ } else {\
+ b__tmp = m__tmp;\
+ }\
+ }\
+ if (a__tmp == b__tmp) {\
+ v__tmp = A(E(V, a__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return a__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_find_by_scalar_field(A, V, E, L, K, T)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_find_by_string_field(A, V, E, L, K)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_find_by_string_n_field(A, V, E, L, K, Kn)\
+__flatbuffers_find_by_field(A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\
+__flatbuffers_find_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)
+#define __flatbuffers_define_scalar_find(N, T)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_find_by_scalar_field(__flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_find_by_string_field(N, NK) \
+/* Note: find only works on vectors sorted by this field. */\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_find_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_find_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }
+#define __flatbuffers_define_default_find_by_string_field(N, NK) \
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp;\
+ for (i__tmp = b; i__tmp < e; ++i__tmp) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp = e;\
+ while (i__tmp-- > b) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_scan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_scan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_scan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_rscan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_rscan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_rscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_scan_by_scalar_field(N, NK, T)\
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scalar_scan(N, T)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_scan_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }
+#define __flatbuffers_define_default_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }
+#define __flatbuffers_heap_sort(N, X, A, E, L, TK, TE, D, S)\
+static inline void __ ## N ## X ## __heap_sift_down(\
+ N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\
+{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\
+ root__tmp = start__tmp;\
+ while ((root__tmp << 1) <= end__tmp) {\
+ child__tmp = root__tmp << 1;\
+ if (child__tmp < end__tmp) {\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ v2__tmp = A(E(vec__tmp, child__tmp + 1));\
+ if (D(v1__tmp, v2__tmp) < 0) {\
+ child__tmp++;\
+ }\
+ }\
+ vroot__tmp = A(E(vec__tmp, root__tmp));\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ if (D(vroot__tmp, v1__tmp) < 0) {\
+ S(vec__tmp, root__tmp, child__tmp, TE);\
+ root__tmp = child__tmp;\
+ } else {\
+ return;\
+ }\
+ }\
+}\
+static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\
+{ size_t start__tmp, end__tmp, size__tmp;\
+ size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\
+ do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\
+ while (end__tmp > 0) { \
+ S(vec__tmp, 0, end__tmp, TE);\
+ __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }
+#define __flatbuffers_define_sort_by_field(N, NK, TK, TE, D, S)\
+ __flatbuffers_heap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\
+{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }
+#define __flatbuffers_define_sort(N, TK, TE, D, S)\
+__flatbuffers_heap_sort(N, , __flatbuffers_identity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }
+#define __flatbuffers_scalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))
+#define __flatbuffers_string_diff(x, y) __flatbuffers_string_n_cmp((x), (const char *)(y), flatbuffers_string_len(y))
+#define __flatbuffers_value_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }
+#define __flatbuffers_uoffset_swap(vec, a, b, TE)\
+{ TE ta__tmp, tb__tmp, d__tmp;\
+ d__tmp = (TE)((a - b) * sizeof(vec[0]));\
+ ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\
+ tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\
+ __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\
+ __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }
+#define __flatbuffers_scalar_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_string_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_struct_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_table_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_define_struct_sort_by_scalar_field(N, NK, TK, TE)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, TE, __flatbuffers_scalar_diff, __flatbuffers_struct_swap)
+#define __flatbuffers_define_table_sort_by_scalar_field(N, NK, TK)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, flatbuffers_uoffset_t, __flatbuffers_scalar_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_table_sort_by_string_field(N, NK)\
+ __flatbuffers_define_sort_by_field(N, NK, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_scalar_sort(N, T) __flatbuffers_define_sort(N, T, T, __flatbuffers_scalar_diff, __flatbuffers_scalar_swap)
+#define __flatbuffers_define_string_sort() __flatbuffers_define_sort(flatbuffers_string, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_string_swap)
+#define __flatbuffers_sort_vector_field(N, NK, T, t)\
+{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\
+ if (v__tmp) T ## _vec_sort(v__tmp); }
+#define __flatbuffers_sort_table_field(N, NK, T, t)\
+{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }
+#define __flatbuffers_sort_union_field(N, NK, T, t)\
+{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }
+#define __flatbuffers_sort_table_vector_field_elements(N, NK, T, t)\
+{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}
+#define __flatbuffers_sort_union_vector_field_elements(N, NK, T, t)\
+{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}
+#define __flatbuffers_define_scalar_vector(N, T)\
+typedef const T *N ## _vec_t;\
+typedef T *N ## _mutable_vec_t;\
+__flatbuffers_define_scalar_vec_len(N)\
+__flatbuffers_define_scalar_vec_at(N, T)\
+__flatbuffers_define_scalar_find(N, T)\
+__flatbuffers_define_scalar_scan(N, T)\
+__flatbuffers_define_scalar_sort(N, T)
+
+#define __flatbuffers_define_integer_type(N, T, W)\
+__flatcc_define_integer_accessors(N, T, W, flatbuffers_endian)\
+__flatbuffers_define_scalar_vector(N, T)
+__flatbuffers_define_scalar_vector(flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_define_scalar_vector(flatbuffers_char, char)
+__flatbuffers_define_scalar_vector(flatbuffers_uint8, uint8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int8, int8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint16, uint16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int16, int16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint32, uint32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int32, int32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint64, uint64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int64, int64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_float, float)
+__flatbuffers_define_scalar_vector(flatbuffers_double, double)
+__flatbuffers_define_scalar_vector(flatbuffers_union_type, flatbuffers_union_type_t)
+static inline size_t flatbuffers_string_vec_find(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_find_by_string_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_find_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_find_by_string_n_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_scan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_scan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_rscan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_rscan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+__flatbuffers_define_string_sort()
+#define __flatbuffers_define_struct_scalar_fixed_array_field(N, NK, TK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0;\
+ return __flatbuffers_read_scalar(TK, &(t__tmp->NK[i__tmp])); }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\
+{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }
+#define __flatbuffers_define_struct_struct_fixed_array_field(N, NK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }
+#define __flatbuffers_define_struct_scalar_field(N, NK, TK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_struct_struct_field(N, NK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }
+/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */
+static inline int flatbuffers_has_identifier(const void *buffer, const char *fid)
+{ flatbuffers_thash_t id, id2 = 0; if (fid == 0) { return 1; };
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe(((flatbuffers_uoffset_t *)buffer) + 1);
+ return id2 == 0 || id == id2; }
+static inline int flatbuffers_has_type_hash(const void *buffer, flatbuffers_thash_t thash)
+{ return thash == 0 || (__flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1) == thash); }
+
+static inline flatbuffers_thash_t flatbuffers_get_type_hash(const void *buffer)
+{ return __flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }
+
+#define flatbuffers_verify_endian() flatbuffers_has_identifier("\x00\x00\x00\x00" "1234", "1234")
+static inline void *flatbuffers_read_size_prefix(void *b, size_t *size_out)
+{ if (size_out) { *size_out = (size_t)__flatbuffers_uoffset_read_from_pe(b); }
+ return (uint8_t *)b + sizeof(flatbuffers_uoffset_t); }
+/* Null file identifier accepts anything, otherwise fid should be 4 characters. */
+#define __flatbuffers_read_root(T, K, buffer, fid)\
+ ((!buffer || !flatbuffers_has_identifier(buffer, fid)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_read_typed_root(T, K, buffer, thash)\
+ ((!buffer || !flatbuffers_has_type_hash(buffer, thash)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_nested_buffer_as_root(C, N, T, K)\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\
+{ const char *fid__tmp = T ## _file_identifier;\
+ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }
+#define __flatbuffers_buffer_as_root(N, K)\
+static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\
+{ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, flatbuffers_thash_t thash__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, thash__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\
+{ const char *fid__tmp = N ## _file_identifier;\
+ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, N ## _type_hash); }
+#define __flatbuffers_struct_as_root(N) __flatbuffers_buffer_as_root(N, struct_)
+#define __flatbuffers_table_as_root(N) __flatbuffers_buffer_as_root(N, table_)
+
+#include "flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_H */
diff --git a/nostrdb/bindings/c/meta_builder.h b/nostrdb/bindings/c/meta_builder.h
@@ -0,0 +1,52 @@
+#ifndef META_BUILDER_H
+#define META_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef META_READER_H
+#include "meta_reader.h"
+#endif
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#include "flatbuffers_common_builder.h"
+#endif
+#include "flatcc/flatcc_prologue.h"
+#ifndef flatbuffers_identifier
+#define flatbuffers_identifier 0
+#endif
+#ifndef flatbuffers_extension
+#define flatbuffers_extension "bin"
+#endif
+
+static const flatbuffers_voffset_t __NdbEventMeta_required[] = { 0 };
+typedef flatbuffers_ref_t NdbEventMeta_ref_t;
+static NdbEventMeta_ref_t NdbEventMeta_clone(flatbuffers_builder_t *B, NdbEventMeta_table_t t);
+__flatbuffers_build_table(flatbuffers_, NdbEventMeta, 1)
+
+#define __NdbEventMeta_formal_args , int32_t v0
+#define __NdbEventMeta_call_args , v0
+static inline NdbEventMeta_ref_t NdbEventMeta_create(flatbuffers_builder_t *B __NdbEventMeta_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, NdbEventMeta, NdbEventMeta_file_identifier, NdbEventMeta_type_identifier)
+
+__flatbuffers_build_scalar_field(0, flatbuffers_, NdbEventMeta_received_at, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), NdbEventMeta)
+
+static inline NdbEventMeta_ref_t NdbEventMeta_create(flatbuffers_builder_t *B __NdbEventMeta_formal_args)
+{
+ if (NdbEventMeta_start(B)
+ || NdbEventMeta_received_at_add(B, v0)) {
+ return 0;
+ }
+ return NdbEventMeta_end(B);
+}
+
+static NdbEventMeta_ref_t NdbEventMeta_clone(flatbuffers_builder_t *B, NdbEventMeta_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (NdbEventMeta_start(B)
+ || NdbEventMeta_received_at_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, NdbEventMeta_end(B));
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* META_BUILDER_H */
diff --git a/nostrdb/bindings/c/meta_json_parser.h b/nostrdb/bindings/c/meta_json_parser.h
@@ -0,0 +1,116 @@
+#ifndef META_JSON_PARSER_H
+#define META_JSON_PARSER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#include "flatcc/flatcc_json_parser.h"
+#include "flatcc/flatcc_prologue.h"
+
+/*
+ * Parses the default root table or struct of the schema and constructs a FlatBuffer.
+ *
+ * Builder `B` must be initialized. `ctx` can be null but will hold
+ * hold detailed error info on return when available.
+ * Returns 0 on success, or error code.
+ * `flags` : 0 by default, `flatcc_json_parser_f_skip_unknown` silently
+ * ignores unknown table and structs fields, and union types.
+ */
+static int meta_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags);
+
+static const char *NdbEventMeta_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);
+static const char *meta_local_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+int *value_type, uint64_t *value, int *aggregate);
+static const char *meta_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate);
+
+static const char *NdbEventMeta_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)
+{
+ int more;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ const char *mark;
+ uint64_t w;
+
+ *result = 0;
+ if (flatcc_builder_start_table(ctx->ctx, 1)) goto failed;
+ buf = flatcc_json_parser_object_start(ctx, buf, end, &more);
+ while (more) {
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if (w == 0x7265636569766564) { /* descend "received" */
+ buf += 8;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if ((w & 0xffffff0000000000) == 0x5f61740000000000) { /* "_at" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 3);
+ if (mark != buf) {
+ int32_t val = 0;
+ static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {
+ meta_local_json_parser_enum,
+ meta_global_json_parser_enum, 0 };
+ buf = flatcc_json_parser_int32(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_int32(ctx, (mark = buf), end, symbolic_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ if (val != INT32_C(0) || (ctx->flags & flatcc_json_parser_f_force_add)) {
+ if (!(pval = flatcc_builder_table_add(ctx->ctx, 0, 4, 4))) goto failed;
+ flatbuffers_int32_write_to_pe(pval, val);
+ }
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "_at" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "_at" */
+ } else { /* descend "received" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* descend "received" */
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (ctx->error) goto failed;
+ if (!(*result = flatcc_builder_end_table(ctx->ctx))) goto failed;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+static inline int NdbEventMeta_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, int flags, const char *fid)
+{
+ return flatcc_json_parser_table_as_root(B, ctx, buf, bufsiz, flags, fid, NdbEventMeta_parse_json_table);
+}
+
+static const char *meta_local_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate)
+{
+ /* Scope has no enum / union types to look up. */
+ return buf; /* unmatched; */
+}
+
+static const char *meta_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate)
+{
+ /* Global scope has no enum / union types to look up. */
+ return buf; /* unmatched; */
+}
+
+static int meta_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags)
+{
+ flatcc_json_parser_t parser;
+ flatcc_builder_ref_t root;
+
+ ctx = ctx ? ctx : &parser;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;
+ NdbEventMeta_parse_json_table(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* META_JSON_PARSER_H */
diff --git a/nostrdb/bindings/c/meta_reader.h b/nostrdb/bindings/c/meta_reader.h
@@ -0,0 +1,53 @@
+#ifndef META_READER_H
+#define META_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef FLATBUFFERS_COMMON_READER_H
+#include "flatbuffers_common_reader.h"
+#endif
+#include "flatcc/flatcc_flatbuffers.h"
+#ifndef __alignas_is_defined
+#include <stdalign.h>
+#endif
+#include "flatcc/flatcc_prologue.h"
+#ifndef flatbuffers_identifier
+#define flatbuffers_identifier 0
+#endif
+#ifndef flatbuffers_extension
+#define flatbuffers_extension "bin"
+#endif
+
+
+typedef const struct NdbEventMeta_table *NdbEventMeta_table_t;
+typedef struct NdbEventMeta_table *NdbEventMeta_mutable_table_t;
+typedef const flatbuffers_uoffset_t *NdbEventMeta_vec_t;
+typedef flatbuffers_uoffset_t *NdbEventMeta_mutable_vec_t;
+#ifndef NdbEventMeta_file_identifier
+#define NdbEventMeta_file_identifier 0
+#endif
+/* deprecated, use NdbEventMeta_file_identifier */
+#ifndef NdbEventMeta_identifier
+#define NdbEventMeta_identifier 0
+#endif
+#define NdbEventMeta_type_hash ((flatbuffers_thash_t)0xa8c23be8)
+#define NdbEventMeta_type_identifier "\xe8\x3b\xc2\xa8"
+#ifndef NdbEventMeta_file_extension
+#define NdbEventMeta_file_extension "bin"
+#endif
+
+
+
+struct NdbEventMeta_table { uint8_t unused__; };
+
+static inline size_t NdbEventMeta_vec_len(NdbEventMeta_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline NdbEventMeta_table_t NdbEventMeta_vec_at(NdbEventMeta_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(NdbEventMeta_table_t, vec, i, 0)
+__flatbuffers_table_as_root(NdbEventMeta)
+
+__flatbuffers_define_scalar_field(0, NdbEventMeta, received_at, flatbuffers_int32, int32_t, INT32_C(0))
+
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* META_READER_H */
diff --git a/nostrdb/bindings/c/meta_verifier.h b/nostrdb/bindings/c/meta_verifier.h
@@ -0,0 +1,42 @@
+#ifndef META_VERIFIER_H
+#define META_VERIFIER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef META_READER_H
+#include "meta_reader.h"
+#endif
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_prologue.h"
+
+static int NdbEventMeta_verify_table(flatcc_table_verifier_descriptor_t *td);
+
+static int NdbEventMeta_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_field(td, 0, 4, 4) /* received_at */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int NdbEventMeta_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, NdbEventMeta_identifier, &NdbEventMeta_verify_table);
+}
+
+static inline int NdbEventMeta_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, NdbEventMeta_type_identifier, &NdbEventMeta_verify_table);
+}
+
+static inline int NdbEventMeta_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &NdbEventMeta_verify_table);
+}
+
+static inline int NdbEventMeta_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &NdbEventMeta_verify_table);
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* META_VERIFIER_H */
diff --git a/nostrdb/bindings/c/profile_builder.h b/nostrdb/bindings/c/profile_builder.h
@@ -0,0 +1,88 @@
+#ifndef PROFILE_BUILDER_H
+#define PROFILE_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef PROFILE_READER_H
+#include "profile_reader.h"
+#endif
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#include "flatbuffers_common_builder.h"
+#endif
+#include "flatcc/flatcc_prologue.h"
+#ifndef flatbuffers_identifier
+#define flatbuffers_identifier 0
+#endif
+#ifndef flatbuffers_extension
+#define flatbuffers_extension "bin"
+#endif
+
+static const flatbuffers_voffset_t __NdbProfile_required[] = { 0 };
+typedef flatbuffers_ref_t NdbProfile_ref_t;
+static NdbProfile_ref_t NdbProfile_clone(flatbuffers_builder_t *B, NdbProfile_table_t t);
+__flatbuffers_build_table(flatbuffers_, NdbProfile, 11)
+
+#define __NdbProfile_formal_args ,\
+ flatbuffers_string_ref_t v0, flatbuffers_string_ref_t v1, flatbuffers_string_ref_t v2, flatbuffers_string_ref_t v3,\
+ flatbuffers_string_ref_t v4, flatbuffers_string_ref_t v5, flatbuffers_bool_t v6, flatbuffers_string_ref_t v7,\
+ flatbuffers_string_ref_t v8, int32_t v9, int32_t v10
+#define __NdbProfile_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6, v7,\
+ v8, v9, v10
+static inline NdbProfile_ref_t NdbProfile_create(flatbuffers_builder_t *B __NdbProfile_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, NdbProfile, NdbProfile_file_identifier, NdbProfile_type_identifier)
+
+__flatbuffers_build_string_field(0, flatbuffers_, NdbProfile_name, NdbProfile)
+__flatbuffers_build_string_field(1, flatbuffers_, NdbProfile_website, NdbProfile)
+__flatbuffers_build_string_field(2, flatbuffers_, NdbProfile_about, NdbProfile)
+__flatbuffers_build_string_field(3, flatbuffers_, NdbProfile_lud16, NdbProfile)
+__flatbuffers_build_string_field(4, flatbuffers_, NdbProfile_banner, NdbProfile)
+__flatbuffers_build_string_field(5, flatbuffers_, NdbProfile_display_name, NdbProfile)
+__flatbuffers_build_scalar_field(6, flatbuffers_, NdbProfile_reactions, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(1), NdbProfile)
+__flatbuffers_build_string_field(7, flatbuffers_, NdbProfile_picture, NdbProfile)
+__flatbuffers_build_string_field(8, flatbuffers_, NdbProfile_nip05, NdbProfile)
+__flatbuffers_build_scalar_field(9, flatbuffers_, NdbProfile_damus_donation, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), NdbProfile)
+__flatbuffers_build_scalar_field(10, flatbuffers_, NdbProfile_damus_donation_v2, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), NdbProfile)
+
+static inline NdbProfile_ref_t NdbProfile_create(flatbuffers_builder_t *B __NdbProfile_formal_args)
+{
+ if (NdbProfile_start(B)
+ || NdbProfile_name_add(B, v0)
+ || NdbProfile_website_add(B, v1)
+ || NdbProfile_about_add(B, v2)
+ || NdbProfile_lud16_add(B, v3)
+ || NdbProfile_banner_add(B, v4)
+ || NdbProfile_display_name_add(B, v5)
+ || NdbProfile_picture_add(B, v7)
+ || NdbProfile_nip05_add(B, v8)
+ || NdbProfile_damus_donation_add(B, v9)
+ || NdbProfile_damus_donation_v2_add(B, v10)
+ || NdbProfile_reactions_add(B, v6)) {
+ return 0;
+ }
+ return NdbProfile_end(B);
+}
+
+static NdbProfile_ref_t NdbProfile_clone(flatbuffers_builder_t *B, NdbProfile_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (NdbProfile_start(B)
+ || NdbProfile_name_pick(B, t)
+ || NdbProfile_website_pick(B, t)
+ || NdbProfile_about_pick(B, t)
+ || NdbProfile_lud16_pick(B, t)
+ || NdbProfile_banner_pick(B, t)
+ || NdbProfile_display_name_pick(B, t)
+ || NdbProfile_picture_pick(B, t)
+ || NdbProfile_nip05_pick(B, t)
+ || NdbProfile_damus_donation_pick(B, t)
+ || NdbProfile_damus_donation_v2_pick(B, t)
+ || NdbProfile_reactions_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, NdbProfile_end(B));
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* PROFILE_BUILDER_H */
diff --git a/nostrdb/bindings/c/profile_json_parser.h b/nostrdb/bindings/c/profile_json_parser.h
@@ -0,0 +1,284 @@
+#ifndef PROFILE_JSON_PARSER_H
+#define PROFILE_JSON_PARSER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#include "flatcc_json_parser.h"
+#include "flatcc_prologue.h"
+
+/*
+ * Parses the default root table or struct of the schema and constructs a FlatBuffer.
+ *
+ * Builder `B` must be initialized. `ctx` can be null but will hold
+ * hold detailed error info on return when available.
+ * Returns 0 on success, or error code.
+ * `flags` : 0 by default, `flatcc_json_parser_f_skip_unknown` silently
+ * ignores unknown table and structs fields, and union types.
+ */
+static int profile_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags);
+
+static const char *NdbProfile_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result);
+static const char *profile_local_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+int *value_type, uint64_t *value, int *aggregate);
+static const char *profile_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate);
+
+static const char *NdbProfile_parse_json_table(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_builder_ref_t *result)
+{
+ int more;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ const char *mark;
+ uint64_t w;
+
+ *result = 0;
+ if (flatcc_builder_start_table(ctx->ctx, 11)) goto failed;
+ buf = flatcc_json_parser_object_start(ctx, buf, end, &more);
+ while (more) {
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if (w < 0x6c75643136000000) { /* branch "lud16" */
+ if (w < 0x64616d75735f646f) { /* branch "damus_do" */
+ if ((w & 0xffffffffffff0000) == 0x62616e6e65720000) { /* "banner" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 6);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 4))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "banner" */
+ if ((w & 0xffffffffff000000) == 0x61626f7574000000) { /* "about" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 5);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 2))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "about" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "about" */
+ } /* "banner" */
+ } else { /* branch "damus_do" */
+ if (w == 0x64616d75735f646f) { /* descend "damus_do" */
+ buf += 8;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if (w == 0x6e6174696f6e5f76) { /* descend "nation_v" */
+ buf += 8;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if ((w & 0xff00000000000000) == 0x3200000000000000) { /* "2" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 1);
+ if (mark != buf) {
+ int32_t val = 0;
+ static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {
+ profile_local_json_parser_enum,
+ profile_global_json_parser_enum, 0 };
+ buf = flatcc_json_parser_int32(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_int32(ctx, (mark = buf), end, symbolic_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ if (val != INT32_C(0) || (ctx->flags & flatcc_json_parser_f_force_add)) {
+ if (!(pval = flatcc_builder_table_add(ctx->ctx, 10, 4, 4))) goto failed;
+ flatbuffers_int32_write_to_pe(pval, val);
+ }
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "2" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "2" */
+ } else { /* descend "nation_v" */
+ if ((w & 0xffffffffffff0000) == 0x6e6174696f6e0000) { /* "nation" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 6);
+ if (mark != buf) {
+ int32_t val = 0;
+ static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {
+ profile_local_json_parser_enum,
+ profile_global_json_parser_enum, 0 };
+ buf = flatcc_json_parser_int32(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_int32(ctx, (mark = buf), end, symbolic_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ if (val != INT32_C(0) || (ctx->flags & flatcc_json_parser_f_force_add)) {
+ if (!(pval = flatcc_builder_table_add(ctx->ctx, 9, 4, 4))) goto failed;
+ flatbuffers_int32_write_to_pe(pval, val);
+ }
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "nation" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "nation" */
+ } /* descend "nation_v" */
+ } else { /* descend "damus_do" */
+ if (w == 0x646973706c61795f) { /* descend "display_" */
+ buf += 8;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if ((w & 0xffffffff00000000) == 0x6e616d6500000000) { /* "name" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 4);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 5))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "name" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "name" */
+ } else { /* descend "display_" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* descend "display_" */
+ } /* descend "damus_do" */
+ } /* branch "damus_do" */
+ } else { /* branch "lud16" */
+ if (w < 0x6e69703035000000) { /* branch "nip05" */
+ if ((w & 0xffffffff00000000) == 0x6e616d6500000000) { /* "name" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 4);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 0))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "name" */
+ if ((w & 0xffffffffff000000) == 0x6c75643136000000) { /* "lud16" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 5);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 3))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "lud16" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "lud16" */
+ } /* "name" */
+ } else { /* branch "nip05" */
+ if (w < 0x7069637475726500) { /* branch "picture" */
+ if ((w & 0xffffffffff000000) == 0x6e69703035000000) { /* "nip05" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 5);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 8))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "nip05" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "nip05" */
+ } else { /* branch "picture" */
+ if (w < 0x7265616374696f6e) { /* branch "reaction" */
+ if ((w & 0xffffffffffffff00) == 0x7069637475726500) { /* "picture" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 7);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 7))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "picture" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "picture" */
+ } else { /* branch "reaction" */
+ if (w == 0x7265616374696f6e) { /* descend "reaction" */
+ buf += 8;
+ w = flatcc_json_parser_symbol_part(buf, end);
+ if ((w & 0xff00000000000000) == 0x7300000000000000) { /* "s" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 1);
+ if (mark != buf) {
+ uint8_t val = 0;
+ static flatcc_json_parser_integral_symbol_f *symbolic_parsers[] = {
+ profile_local_json_parser_enum,
+ profile_global_json_parser_enum, 0 };
+ buf = flatcc_json_parser_bool(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_bool(ctx, (mark = buf), end, symbolic_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ if (val != UINT8_C(1) || (ctx->flags & flatcc_json_parser_f_force_add)) {
+ if (!(pval = flatcc_builder_table_add(ctx->ctx, 6, 1, 1))) goto failed;
+ flatbuffers_bool_write_to_pe(pval, val);
+ }
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "s" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "s" */
+ } else { /* descend "reaction" */
+ if ((w & 0xffffffffffffff00) == 0x7765627369746500) { /* "website" */
+ buf = flatcc_json_parser_match_symbol(ctx, (mark = buf), end, 7);
+ if (mark != buf) {
+ buf = flatcc_json_parser_build_string(ctx, buf, end, &ref);
+ if (!ref || !(pref = flatcc_builder_table_add_offset(ctx->ctx, 1))) goto failed;
+ *pref = ref;
+ } else {
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ }
+ } else { /* "website" */
+ buf = flatcc_json_parser_unmatched_symbol(ctx, buf, end);
+ } /* "website" */
+ } /* descend "reaction" */
+ } /* branch "reaction" */
+ } /* branch "picture" */
+ } /* branch "nip05" */
+ } /* branch "lud16" */
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (ctx->error) goto failed;
+ if (!(*result = flatcc_builder_end_table(ctx->ctx))) goto failed;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+static inline int NdbProfile_parse_json_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx, const char *buf, size_t bufsiz, int flags, const char *fid)
+{
+ return flatcc_json_parser_table_as_root(B, ctx, buf, bufsiz, flags, fid, NdbProfile_parse_json_table);
+}
+
+static const char *profile_local_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate)
+{
+ /* Scope has no enum / union types to look up. */
+ return buf; /* unmatched; */
+}
+
+static const char *profile_global_json_parser_enum(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_type, uint64_t *value, int *aggregate)
+{
+ /* Global scope has no enum / union types to look up. */
+ return buf; /* unmatched; */
+}
+
+static int profile_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags)
+{
+ flatcc_json_parser_t parser;
+ flatcc_builder_ref_t root;
+
+ ctx = ctx ? ctx : &parser;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, 0, 0, 0)) return -1;
+ NdbProfile_parse_json_table(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+#include "flatcc_epilogue.h"
+#endif /* PROFILE_JSON_PARSER_H */
diff --git a/nostrdb/bindings/c/profile_reader.h b/nostrdb/bindings/c/profile_reader.h
@@ -0,0 +1,63 @@
+#ifndef PROFILE_READER_H
+#define PROFILE_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef FLATBUFFERS_COMMON_READER_H
+#include "flatbuffers_common_reader.h"
+#endif
+#include "flatcc/flatcc_flatbuffers.h"
+#ifndef __alignas_is_defined
+#include <stdalign.h>
+#endif
+#include "flatcc/flatcc_prologue.h"
+#ifndef flatbuffers_identifier
+#define flatbuffers_identifier 0
+#endif
+#ifndef flatbuffers_extension
+#define flatbuffers_extension "bin"
+#endif
+
+
+typedef const struct NdbProfile_table *NdbProfile_table_t;
+typedef struct NdbProfile_table *NdbProfile_mutable_table_t;
+typedef const flatbuffers_uoffset_t *NdbProfile_vec_t;
+typedef flatbuffers_uoffset_t *NdbProfile_mutable_vec_t;
+#ifndef NdbProfile_file_identifier
+#define NdbProfile_file_identifier 0
+#endif
+/* deprecated, use NdbProfile_file_identifier */
+#ifndef NdbProfile_identifier
+#define NdbProfile_identifier 0
+#endif
+#define NdbProfile_type_hash ((flatbuffers_thash_t)0xba639e28)
+#define NdbProfile_type_identifier "\x28\x9e\x63\xba"
+#ifndef NdbProfile_file_extension
+#define NdbProfile_file_extension "bin"
+#endif
+
+
+
+struct NdbProfile_table { uint8_t unused__; };
+
+static inline size_t NdbProfile_vec_len(NdbProfile_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline NdbProfile_table_t NdbProfile_vec_at(NdbProfile_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(NdbProfile_table_t, vec, i, 0)
+__flatbuffers_table_as_root(NdbProfile)
+
+__flatbuffers_define_string_field(0, NdbProfile, name, 0)
+__flatbuffers_define_string_field(1, NdbProfile, website, 0)
+__flatbuffers_define_string_field(2, NdbProfile, about, 0)
+__flatbuffers_define_string_field(3, NdbProfile, lud16, 0)
+__flatbuffers_define_string_field(4, NdbProfile, banner, 0)
+__flatbuffers_define_string_field(5, NdbProfile, display_name, 0)
+__flatbuffers_define_scalar_field(6, NdbProfile, reactions, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(1))
+__flatbuffers_define_string_field(7, NdbProfile, picture, 0)
+__flatbuffers_define_string_field(8, NdbProfile, nip05, 0)
+__flatbuffers_define_scalar_field(9, NdbProfile, damus_donation, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_scalar_field(10, NdbProfile, damus_donation_v2, flatbuffers_int32, int32_t, INT32_C(0))
+
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* PROFILE_READER_H */
diff --git a/nostrdb/bindings/c/profile_verifier.h b/nostrdb/bindings/c/profile_verifier.h
@@ -0,0 +1,52 @@
+#ifndef PROFILE_VERIFIER_H
+#define PROFILE_VERIFIER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef PROFILE_READER_H
+#include "profile_reader.h"
+#endif
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_prologue.h"
+
+static int NdbProfile_verify_table(flatcc_table_verifier_descriptor_t *td);
+
+static int NdbProfile_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 0) /* name */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 1, 0) /* website */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 2, 0) /* about */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 3, 0) /* lud16 */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 4, 0) /* banner */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 5, 0) /* display_name */)) return ret;
+ if ((ret = flatcc_verify_field(td, 6, 1, 1) /* reactions */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 7, 0) /* picture */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 8, 0) /* nip05 */)) return ret;
+ if ((ret = flatcc_verify_field(td, 9, 4, 4) /* damus_donation */)) return ret;
+ if ((ret = flatcc_verify_field(td, 10, 4, 4) /* damus_donation_v2 */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int NdbProfile_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, NdbProfile_identifier, &NdbProfile_verify_table);
+}
+
+static inline int NdbProfile_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, NdbProfile_type_identifier, &NdbProfile_verify_table);
+}
+
+static inline int NdbProfile_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &NdbProfile_verify_table);
+}
+
+static inline int NdbProfile_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &NdbProfile_verify_table);
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* PROFILE_VERIFIER_H */
diff --git a/nostrdb/bindings/swift/NdbProfile.swift b/nostrdb/bindings/swift/NdbProfile.swift
@@ -0,0 +1,110 @@
+// automatically generated by the FlatBuffers compiler, do not modify
+// swiftlint:disable all
+// swiftformat:disable all
+
+import FlatBuffers
+
+public struct NdbProfile: FlatBufferObject, Verifiable {
+
+ static func validateVersion() { FlatBuffersVersion_23_5_26() }
+ public var __buffer: ByteBuffer! { return _accessor.bb }
+ private var _accessor: Table
+
+ private init(_ t: Table) { _accessor = t }
+ public init(_ bb: ByteBuffer, o: Int32) { _accessor = Table(bb: bb, position: o) }
+
+ private enum VTOFFSET: VOffset {
+ case name = 4
+ case website = 6
+ case about = 8
+ case lud16 = 10
+ case banner = 12
+ case displayName = 14
+ case reactions = 16
+ case picture = 18
+ case nip05 = 20
+ case damusDonation = 22
+ case damusDonationV2 = 24
+ var v: Int32 { Int32(self.rawValue) }
+ var p: VOffset { self.rawValue }
+ }
+
+ public var name: String? { let o = _accessor.offset(VTOFFSET.name.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var nameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.name.v) }
+ public var website: String? { let o = _accessor.offset(VTOFFSET.website.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var websiteSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.website.v) }
+ public var about: String? { let o = _accessor.offset(VTOFFSET.about.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var aboutSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.about.v) }
+ public var lud16: String? { let o = _accessor.offset(VTOFFSET.lud16.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var lud16SegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.lud16.v) }
+ public var banner: String? { let o = _accessor.offset(VTOFFSET.banner.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var bannerSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.banner.v) }
+ public var displayName: String? { let o = _accessor.offset(VTOFFSET.displayName.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var displayNameSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.displayName.v) }
+ public var reactions: Bool { let o = _accessor.offset(VTOFFSET.reactions.v); return o == 0 ? true : _accessor.readBuffer(of: Bool.self, at: o) }
+ public var picture: String? { let o = _accessor.offset(VTOFFSET.picture.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var pictureSegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.picture.v) }
+ public var nip05: String? { let o = _accessor.offset(VTOFFSET.nip05.v); return o == 0 ? nil : _accessor.string(at: o) }
+ public var nip05SegmentArray: [UInt8]? { return _accessor.getVector(at: VTOFFSET.nip05.v) }
+ public var damusDonation: Int32 { let o = _accessor.offset(VTOFFSET.damusDonation.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) }
+ public var damusDonationV2: Int32 { let o = _accessor.offset(VTOFFSET.damusDonationV2.v); return o == 0 ? 0 : _accessor.readBuffer(of: Int32.self, at: o) }
+ public static func startNdbProfile(_ fbb: inout FlatBufferBuilder) -> UOffset { fbb.startTable(with: 11) }
+ public static func add(name: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: name, at: VTOFFSET.name.p) }
+ public static func add(website: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: website, at: VTOFFSET.website.p) }
+ public static func add(about: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: about, at: VTOFFSET.about.p) }
+ public static func add(lud16: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: lud16, at: VTOFFSET.lud16.p) }
+ public static func add(banner: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: banner, at: VTOFFSET.banner.p) }
+ public static func add(displayName: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: displayName, at: VTOFFSET.displayName.p) }
+ public static func add(reactions: Bool, _ fbb: inout FlatBufferBuilder) { fbb.add(element: reactions, def: true,
+ at: VTOFFSET.reactions.p) }
+ public static func add(picture: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: picture, at: VTOFFSET.picture.p) }
+ public static func add(nip05: Offset, _ fbb: inout FlatBufferBuilder) { fbb.add(offset: nip05, at: VTOFFSET.nip05.p) }
+ public static func add(damusDonation: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: damusDonation, def: 0, at: VTOFFSET.damusDonation.p) }
+ public static func add(damusDonationV2: Int32, _ fbb: inout FlatBufferBuilder) { fbb.add(element: damusDonationV2, def: 0, at: VTOFFSET.damusDonationV2.p) }
+ public static func endNdbProfile(_ fbb: inout FlatBufferBuilder, start: UOffset) -> Offset { let end = Offset(offset: fbb.endTable(at: start)); return end }
+ public static func createNdbProfile(
+ _ fbb: inout FlatBufferBuilder,
+ nameOffset name: Offset = Offset(),
+ websiteOffset website: Offset = Offset(),
+ aboutOffset about: Offset = Offset(),
+ lud16Offset lud16: Offset = Offset(),
+ bannerOffset banner: Offset = Offset(),
+ displayNameOffset displayName: Offset = Offset(),
+ reactions: Bool = true,
+ pictureOffset picture: Offset = Offset(),
+ nip05Offset nip05: Offset = Offset(),
+ damusDonation: Int32 = 0,
+ damusDonationV2: Int32 = 0
+ ) -> Offset {
+ let __start = NdbProfile.startNdbProfile(&fbb)
+ NdbProfile.add(name: name, &fbb)
+ NdbProfile.add(website: website, &fbb)
+ NdbProfile.add(about: about, &fbb)
+ NdbProfile.add(lud16: lud16, &fbb)
+ NdbProfile.add(banner: banner, &fbb)
+ NdbProfile.add(displayName: displayName, &fbb)
+ NdbProfile.add(reactions: reactions, &fbb)
+ NdbProfile.add(picture: picture, &fbb)
+ NdbProfile.add(nip05: nip05, &fbb)
+ NdbProfile.add(damusDonation: damusDonation, &fbb)
+ NdbProfile.add(damusDonationV2: damusDonationV2, &fbb)
+ return NdbProfile.endNdbProfile(&fbb, start: __start)
+ }
+
+ public static func verify<T>(_ verifier: inout Verifier, at position: Int, of type: T.Type) throws where T: Verifiable {
+ var _v = try verifier.visitTable(at: position)
+ try _v.visit(field: VTOFFSET.name.p, fieldName: "name", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.website.p, fieldName: "website", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.about.p, fieldName: "about", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.lud16.p, fieldName: "lud16", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.banner.p, fieldName: "banner", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.displayName.p, fieldName: "displayName", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.reactions.p, fieldName: "reactions", required: false, type: Bool.self)
+ try _v.visit(field: VTOFFSET.picture.p, fieldName: "picture", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.nip05.p, fieldName: "nip05", required: false, type: ForwardOffset<String>.self)
+ try _v.visit(field: VTOFFSET.damusDonation.p, fieldName: "damusDonation", required: false, type: Int32.self)
+ try _v.visit(field: VTOFFSET.damusDonationV2.p, fieldName: "damusDonationV2", required: false, type: Int32.self)
+ _v.finish()
+ }
+}
+
diff --git a/nostrdb/copy-ndb b/nostrdb/copy-ndb
@@ -1,4 +1,13 @@
#!/usr/bin/env bash
+rm -rf flatcc bindings
+mkdir -p flatcc
cp ~/src/c/nostrdb/nostrdb.{c,h} .
-cp ~/src/c/nostrdb/jsmn.h .
+cp ~/src/c/nostrdb/{jsmn,threadpool,protected_queue,memchr,util}.h .
+cp ~/src/c/nostrdb/deps/lmdb/{lmdb,midl}.h .
+cp ~/src/c/nostrdb/deps/lmdb/mdb.c .
+cp ~/src/c/nostrdb/deps/lmdb/midl.c .
+cp -r ~/src/c/nostrdb/deps/flatcc/include/flatcc/* flatcc
+cp ~/src/c/nostrdb/deps/flatcc/src/runtime/* flatcc
+cp -r ~/src/c/nostrdb/bindings .
+patch -p2 < flatcc.patch
diff --git a/nostrdb/flatcc.patch b/nostrdb/flatcc.patch
@@ -0,0 +1,289 @@
+diff --git b/nostrdb/bindings/c/flatbuffers_common_reader.h a/nostrdb/bindings/c/flatbuffers_common_reader.h
+index c575308689b9..49e479e29980 100644
+--- b/nostrdb/bindings/c/flatbuffers_common_reader.h
++++ a/nostrdb/bindings/c/flatbuffers_common_reader.h
+@@ -5,8 +5,8 @@
+
+ /* Common FlatBuffers read functionality for C. */
+
+-#include "flatcc/flatcc_prologue.h"
+-#include "flatcc/flatcc_flatbuffers.h"
++#include "flatcc_prologue.h"
++#include "flatcc_flatbuffers.h"
+
+
+ #define __flatbuffers_read_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))
+@@ -574,5 +574,5 @@ static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\
+ #define __flatbuffers_struct_as_root(N) __flatbuffers_buffer_as_root(N, struct_)
+ #define __flatbuffers_table_as_root(N) __flatbuffers_buffer_as_root(N, table_)
+
+-#include "flatcc/flatcc_epilogue.h"
++#include "flatcc_epilogue.h"
+ #endif /* FLATBUFFERS_COMMON_H */
+diff --git b/nostrdb/bindings/c/profile_json_parser.h a/nostrdb/bindings/c/profile_json_parser.h
+index a7caaaec6d37..f404b6679fe0 100644
+--- b/nostrdb/bindings/c/profile_json_parser.h
++++ a/nostrdb/bindings/c/profile_json_parser.h
+@@ -3,8 +3,8 @@
+
+ /* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+-#include "flatcc/flatcc_json_parser.h"
+-#include "flatcc/flatcc_prologue.h"
++#include "flatcc_json_parser.h"
++#include "flatcc_prologue.h"
+
+ /*
+ * Parses the default root table or struct of the schema and constructs a FlatBuffer.
+@@ -280,5 +280,5 @@ static int profile_parse_json(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ return 0;
+ }
+
+-#include "flatcc/flatcc_epilogue.h"
++#include "flatcc_epilogue.h"
+ #endif /* PROFILE_JSON_PARSER_H */
+diff --git b/nostrdb/flatcc/builder.c a/nostrdb/flatcc/builder.c
+index 9f54d884ff53..c5155a85e407 100644
+--- b/nostrdb/flatcc/builder.c
++++ a/nostrdb/flatcc/builder.c
+@@ -16,8 +16,8 @@
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include "flatcc/flatcc_builder.h"
+-#include "flatcc/flatcc_emitter.h"
++#include "flatcc_builder.h"
++#include "flatcc_emitter.h"
+
+ /*
+ * `check` is designed to handle incorrect use errors that can be
+diff --git b/nostrdb/flatcc/emitter.c a/nostrdb/flatcc/emitter.c
+index 089ea00b2060..dbeffacd97ed 100644
+--- b/nostrdb/flatcc/emitter.c
++++ a/nostrdb/flatcc/emitter.c
+@@ -1,7 +1,7 @@
+ #include <stdlib.h>
+
+-#include "flatcc/flatcc_rtconfig.h"
+-#include "flatcc/flatcc_emitter.h"
++#include "flatcc_rtconfig.h"
++#include "flatcc_emitter.h"
+
+ static int advance_front(flatcc_emitter_t *E)
+ {
+diff --git b/nostrdb/flatcc/flatcc_alloc.h a/nostrdb/flatcc/flatcc_alloc.h
+index 155364c1e2ba..c07462d57754 100644
+--- b/nostrdb/flatcc/flatcc_alloc.h
++++ a/nostrdb/flatcc/flatcc_alloc.h
+@@ -69,7 +69,7 @@ extern "C" {
+ #ifndef FLATCC_USE_GENERIC_ALIGNED_ALLOC
+
+ #ifndef FLATCC_NO_PALIGNED_ALLOC
+-#include "flatcc/portable/paligned_alloc.h"
++#include "paligned_alloc.h"
+ #else
+ #if !defined(__aligned_free_is_defined) || !__aligned_free_is_defined
+ #define aligned_free free
+diff --git b/nostrdb/flatcc/flatcc_emitter.h a/nostrdb/flatcc/flatcc_emitter.h
+index b8c83b94d3de..11756f23f4d0 100644
+--- b/nostrdb/flatcc/flatcc_emitter.h
++++ a/nostrdb/flatcc/flatcc_emitter.h
+@@ -16,9 +16,9 @@ extern "C" {
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include "flatcc/flatcc_types.h"
+-#include "flatcc/flatcc_iov.h"
+-#include "flatcc/flatcc_alloc.h"
++#include "flatcc_types.h"
++#include "flatcc_iov.h"
++#include "flatcc_alloc.h"
+
+ /*
+ * The buffer steadily grows during emission but the design allows for
+diff --git b/nostrdb/flatcc/flatcc_endian.h a/nostrdb/flatcc/flatcc_endian.h
+index 0592f3132380..d16f72c89a11 100644
+--- b/nostrdb/flatcc/flatcc_endian.h
++++ a/nostrdb/flatcc/flatcc_endian.h
+@@ -66,7 +66,7 @@ extern "C" {
+ #define htobe8(n) (n)
+ #endif
+
+-#include "flatcc/flatcc_accessors.h"
++#include "flatcc_accessors.h"
+
+ /* This is the binary encoding endianness, usually LE for flatbuffers. */
+ #if FLATBUFFERS_PROTOCOL_IS_LE
+diff --git b/nostrdb/flatcc/flatcc_epilogue.h a/nostrdb/flatcc/flatcc_epilogue.h
+index 496857ba1251..dc724f6c98ed 100644
+--- b/nostrdb/flatcc/flatcc_epilogue.h
++++ a/nostrdb/flatcc/flatcc_epilogue.h
+@@ -4,5 +4,5 @@
+ }
+ #endif
+
+-#include "flatcc/portable/pdiagnostic_pop.h"
++#include "pdiagnostic_pop.h"
+
+diff --git b/nostrdb/flatcc/flatcc_flatbuffers.h a/nostrdb/flatcc/flatcc_flatbuffers.h
+index 4bfc7435251a..210c9f2a420d 100644
+--- b/nostrdb/flatcc/flatcc_flatbuffers.h
++++ a/nostrdb/flatcc/flatcc_flatbuffers.h
+@@ -4,7 +4,7 @@
+ *
+ * Outside include guard to handle scope counter.
+ */
+-#include "flatcc/portable/pstatic_assert.h"
++#include "pstatic_assert.h"
+
+ #ifndef FLATCC_FLATBUFFERS_H
+ #define FLATCC_FLATBUFFERS_H
+@@ -19,15 +19,15 @@ extern "C" {
+ #ifdef FLATCC_PORTABLE
+ #include "flatcc/flatcc_portable.h"
+ #endif
+-#include "flatcc/portable/pwarnings.h"
++#include "pwarnings.h"
+ /* Needed by C99 compilers without FLATCC_PORTABLE. */
+-#include "flatcc/portable/pstdalign.h"
++#include "pstdalign.h"
+
+ /* Handle fallthrough attribute in switch statements. */
+-#include "flatcc/portable/pattributes.h"
++#include "pattributes.h"
+
+-#include "flatcc/flatcc_alloc.h"
+-#include "flatcc/flatcc_assert.h"
++#include "flatcc_alloc.h"
++#include "flatcc_assert.h"
+
+ #define __FLATBUFFERS_PASTE2(a, b) a ## b
+ #define __FLATBUFFERS_PASTE3(a, b, c) a ## b ## c
+@@ -37,10 +37,10 @@ extern "C" {
+ * "flatcc_endian.h" requires the preceeding include files,
+ * or compatible definitions.
+ */
+-#include "flatcc/portable/pendian.h"
+-#include "flatcc/flatcc_types.h"
+-#include "flatcc/flatcc_endian.h"
+-#include "flatcc/flatcc_identifier.h"
++#include "pendian.h"
++#include "flatcc_types.h"
++#include "flatcc_endian.h"
++#include "flatcc_identifier.h"
+
+ #ifndef FLATBUFFERS_WRAP_NAMESPACE
+ #define FLATBUFFERS_WRAP_NAMESPACE(ns, x) ns ## _ ## x
+diff --git b/nostrdb/flatcc/flatcc_json_parser.h a/nostrdb/flatcc/flatcc_json_parser.h
+index 1907fc7fc635..ed7151c2fd6b 100644
+--- b/nostrdb/flatcc/flatcc_json_parser.h
++++ a/nostrdb/flatcc/flatcc_json_parser.h
+@@ -15,12 +15,12 @@ extern "C" {
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include "flatcc/flatcc_rtconfig.h"
+-#include "flatcc/flatcc_builder.h"
+-#include "flatcc/flatcc_unaligned.h"
++#include "flatcc_rtconfig.h"
++#include "flatcc_builder.h"
++#include "flatcc_unaligned.h"
+
+ #define PDIAGNOSTIC_IGNORE_UNUSED
+-#include "flatcc/portable/pdiagnostic_push.h"
++#include "pdiagnostic_push.h"
+
+ enum flatcc_json_parser_flags {
+ flatcc_json_parser_f_skip_unknown = 1,
+@@ -886,7 +886,7 @@ int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t
+ const char *buf, size_t bufsiz, int flags, const char *fid,
+ flatcc_json_parser_struct_f *parser);
+
+-#include "flatcc/portable/pdiagnostic_pop.h"
++#include "pdiagnostic_pop.h"
+
+ #ifdef __cplusplus
+ }
+diff --git b/nostrdb/flatcc/flatcc_prologue.h a/nostrdb/flatcc/flatcc_prologue.h
+index 3a74ed6040db..36344c4c071f 100644
+--- b/nostrdb/flatcc/flatcc_prologue.h
++++ a/nostrdb/flatcc/flatcc_prologue.h
+@@ -1,7 +1,7 @@
+ /* Include guard intentionally left out. */
+
+ #define PDIAGNOSTIC_IGNORE_UNUSED
+-#include "flatcc/portable/pdiagnostic_push.h"
++#include "pdiagnostic_push.h"
+
+ #ifdef __cplusplus
+ extern "C" {
+diff --git b/nostrdb/flatcc/flatcc_refmap.h a/nostrdb/flatcc/flatcc_refmap.h
+index 062d94f5d35d..beafa301d042 100644
+--- b/nostrdb/flatcc/flatcc_refmap.h
++++ a/nostrdb/flatcc/flatcc_refmap.h
+@@ -50,7 +50,7 @@
+ extern "C" {
+ #endif
+
+-#include "flatcc/flatcc_types.h"
++#include "flatcc_types.h"
+
+ #ifndef FLATCC_REFMAP_MIN_BUCKETS
+ /* 8 buckets gives us 5 useful initial entries with a load factor of 0.7 */
+diff --git b/nostrdb/flatcc/flatcc_unaligned.h a/nostrdb/flatcc/flatcc_unaligned.h
+index a7dc546111cd..5ea26cede6ee 100644
+--- b/nostrdb/flatcc/flatcc_unaligned.h
++++ a/nostrdb/flatcc/flatcc_unaligned.h
+@@ -5,7 +5,7 @@
+ extern "C" {
+ #endif
+
+-#include "flatcc/portable/punaligned.h"
++#include "punaligned.h"
+
+ #define FLATCC_ALLOW_UNALIGNED_ACCESS PORTABLE_UNALIGNED_ACCESS
+
+diff --git b/nostrdb/flatcc/json_parser.c a/nostrdb/flatcc/json_parser.c
+index 0e3aeea9834c..06f778da33f3 100644
+--- b/nostrdb/flatcc/json_parser.c
++++ a/nostrdb/flatcc/json_parser.c
+@@ -1,6 +1,6 @@
+-#include "flatcc/flatcc_rtconfig.h"
+-#include "flatcc/flatcc_json_parser.h"
+-#include "flatcc/flatcc_assert.h"
++#include "flatcc_rtconfig.h"
++#include "flatcc_json_parser.h"
++#include "flatcc_assert.h"
+
+ #define uoffset_t flatbuffers_uoffset_t
+ #define soffset_t flatbuffers_soffset_t
+@@ -16,8 +16,8 @@
+ #if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+ #define PORTABLE_USE_GRISU3 1
+ #endif
+-#include "flatcc/portable/pparsefp.h"
+-#include "flatcc/portable/pbase64.h"
++#include "portable/pparsefp.h"
++#include "portable/pbase64.h"
+
+ #if FLATCC_USE_SSE4_2
+ #ifdef __SSE4_2__
+diff --git b/nostrdb/flatcc/refmap.c a/nostrdb/flatcc/refmap.c
+index a2497f02247b..d8c6034fbb12 100644
+--- b/nostrdb/flatcc/refmap.c
++++ a/nostrdb/flatcc/refmap.c
+@@ -13,10 +13,10 @@
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include "flatcc/flatcc_rtconfig.h"
+-#include "flatcc/flatcc_refmap.h"
+-#include "flatcc/flatcc_alloc.h"
+-#include "flatcc/flatcc_assert.h"
++#include "flatcc_rtconfig.h"
++#include "flatcc_refmap.h"
++#include "flatcc_alloc.h"
++#include "flatcc_assert.h"
+
+ #define _flatcc_refmap_calloc FLATCC_CALLOC
+ #define _flatcc_refmap_free FLATCC_FREE
diff --git a/nostrdb/flatcc/CMakeLists.txt b/nostrdb/flatcc/CMakeLists.txt
@@ -0,0 +1,16 @@
+include_directories (
+ "${PROJECT_SOURCE_DIR}/include"
+)
+
+add_library(flatccrt
+ builder.c
+ emitter.c
+ refmap.c
+ verifier.c
+ json_parser.c
+ json_printer.c
+)
+
+if (FLATCC_INSTALL)
+ install(TARGETS flatccrt DESTINATION ${lib_dir})
+endif()
diff --git a/nostrdb/flatcc/builder.c b/nostrdb/flatcc/builder.c
@@ -0,0 +1,2035 @@
+/*
+ * Codegenerator for C, building FlatBuffers.
+ *
+ * There are several approaches, some light, some requiring a library,
+ * some with vectored I/O etc.
+ *
+ * Here we focus on a reasonable balance of light code and efficiency.
+ *
+ * Builder code is generated to a separate file that includes the
+ * generated read-only code.
+ *
+ * Mutable buffers are not supported in this version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc_builder.h"
+#include "flatcc_emitter.h"
+
+/*
+ * `check` is designed to handle incorrect use errors that can be
+ * ignored in production of a tested product.
+ *
+ * `check_error` fails if condition is false and is designed to return an
+ * error code in production.
+ */
+
+#if FLATCC_BUILDER_ASSERT_ON_ERROR
+#define check(cond, reason) FLATCC_BUILDER_ASSERT(cond, reason)
+#else
+#define check(cond, reason) ((void)0)
+#endif
+
+#if FLATCC_BUILDER_SKIP_CHECKS
+#define check_error(cond, err, reason) ((void)0)
+#else
+#define check_error(cond, err, reason) if (!(cond)) { check(cond, reason); return err; }
+#endif
+
+/* `strnlen` not widely supported. */
+static inline size_t pstrnlen(const char *s, size_t max_len)
+{
+ const char *end = memchr(s, 0, max_len);
+ return end ? (size_t)(end - s) : max_len;
+}
+#undef strnlen
+#define strnlen pstrnlen
+
+/* Padding can be up to 255 zeroes, and 1 zero string termination byte.
+ * When two paddings are combined at nested buffers, we need twice that.
+ * Visible to emitter so it can test for zero padding in iov. */
+const uint8_t flatcc_builder_padding_base[512] = { 0 };
+#define _pad flatcc_builder_padding_base
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define write_uoffset __flatbuffers_uoffset_write_to_pe
+#define write_voffset __flatbuffers_voffset_write_to_pe
+#define write_identifier __flatbuffers_uoffset_write_to_pe
+#define write_utype __flatbuffers_utype_write_to_pe
+
+#define field_size sizeof(uoffset_t)
+#define max_offset_count FLATBUFFERS_COUNT_MAX(field_size)
+#define union_size sizeof(flatcc_builder_union_ref_t)
+#define max_union_count FLATBUFFERS_COUNT_MAX(union_size)
+#define utype_size sizeof(utype_t)
+#define max_utype_count FLATBUFFERS_COUNT_MAX(utype_size)
+
+#define max_string_len FLATBUFFERS_COUNT_MAX(1)
+#define identifier_size FLATBUFFERS_IDENTIFIER_SIZE
+
+
+#define iovec_t flatcc_iovec_t
+#define frame_size sizeof(__flatcc_builder_frame_t)
+#define frame(x) (B->frame[0].x)
+
+
+/* `align` must be a power of 2. */
+static inline uoffset_t alignup_uoffset(uoffset_t x, size_t align)
+{
+ return (x + (uoffset_t)align - 1u) & ~((uoffset_t)align - 1u);
+}
+
+static inline size_t alignup_size(size_t x, size_t align)
+{
+ return (x + align - 1u) & ~(align - 1u);
+}
+
+
+typedef struct vtable_descriptor vtable_descriptor_t;
+struct vtable_descriptor {
+ /* Where the vtable is emitted. */
+ flatcc_builder_ref_t vt_ref;
+ /* Which buffer it was emitted to. */
+ uoffset_t nest_id;
+ /* Where the vtable is cached. */
+ uoffset_t vb_start;
+ /* Hash table collision chain. */
+ uoffset_t next;
+};
+
+typedef struct flatcc_iov_state flatcc_iov_state_t;
+struct flatcc_iov_state {
+ size_t len;
+ int count;
+ flatcc_iovec_t iov[FLATCC_IOV_COUNT_MAX];
+};
+
+#define iov_state_t flatcc_iov_state_t
+
+/* This assumes `iov_state_t iov;` has been declared in scope */
+#define push_iov_cond(base, size, cond) if ((size) > 0 && (cond)) { iov.len += size;\
+ iov.iov[iov.count].iov_base = (void *)(base); iov.iov[iov.count].iov_len = (size); ++iov.count; }
+#define push_iov(base, size) push_iov_cond(base, size, 1)
+#define init_iov() { iov.len = 0; iov.count = 0; }
+
+
+int flatcc_builder_default_alloc(void *alloc_context, iovec_t *b, size_t request, int zero_fill, int hint)
+{
+ void *p;
+ size_t n;
+
+ (void)alloc_context;
+
+ if (request == 0) {
+ if (b->iov_base) {
+ FLATCC_BUILDER_FREE(b->iov_base);
+ b->iov_base = 0;
+ b->iov_len = 0;
+ }
+ return 0;
+ }
+ switch (hint) {
+ case flatcc_builder_alloc_ds:
+ n = 256;
+ break;
+ case flatcc_builder_alloc_ht:
+ /* Should be exact size, or space size is just wasted. */
+ n = request;
+ break;
+ case flatcc_builder_alloc_fs:
+ n = sizeof(__flatcc_builder_frame_t) * 8;
+ break;
+ case flatcc_builder_alloc_us:
+ n = 64;
+ break;
+ default:
+ /*
+ * We have many small structures - vs stack for tables with few
+ * elements, and few offset fields in patch log. No need to
+ * overallocate in case of busy small messages.
+ */
+ n = 32;
+ break;
+ }
+ while (n < request) {
+ n *= 2;
+ }
+ if (request <= b->iov_len && b->iov_len / 2 >= n) {
+ /* Add hysteresis to shrink. */
+ return 0;
+ }
+ if (!(p = FLATCC_BUILDER_REALLOC(b->iov_base, n))) {
+ return -1;
+ }
+ /* Realloc might also shrink. */
+ if (zero_fill && b->iov_len < n) {
+ memset((uint8_t *)p + b->iov_len, 0, n - b->iov_len);
+ }
+ b->iov_base = p;
+ b->iov_len = n;
+ return 0;
+}
+
+#define T_ptr(base, pos) ((void *)((uint8_t *)(base) + (uoffset_t)(pos)))
+#define ds_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_ds].iov_base, (pos)))
+#define vs_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vs].iov_base, (pos)))
+#define pl_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_pl].iov_base, (pos)))
+#define us_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_us].iov_base, (pos)))
+#define vd_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vd].iov_base, (pos)))
+#define vb_ptr(pos) (T_ptr(B->buffers[flatcc_builder_alloc_vb].iov_base, (pos)))
+#define vs_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_vs].iov_base))
+#define pl_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_pl].iov_base))
+#define us_offset(ptr) ((uoffset_t)((size_t)(ptr) - (size_t)B->buffers[flatcc_builder_alloc_us].iov_base))
+
+#define table_limit (FLATBUFFERS_VOFFSET_MAX - field_size + 1)
+#define data_limit (FLATBUFFERS_UOFFSET_MAX - field_size + 1)
+
+#define set_identifier(id) memcpy(&B->identifier, (id) ? (void *)(id) : (void *)_pad, identifier_size)
+
+/* Must also return true when no buffer has been started. */
+#define is_top_buffer(B) (B->nest_id == 0)
+
+/*
+ * Tables use a stack represention better suited for quickly adding
+ * fields to tables, but it must occasionally be refreshed following
+ * reallocation or reentry from child frame.
+ */
+static inline void refresh_ds(flatcc_builder_t *B, uoffset_t type_limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ B->ds = ds_ptr(B->ds_first);
+ B->ds_limit = (uoffset_t)buf->iov_len - B->ds_first;
+ /*
+ * So we don't allocate outside tables representation size, nor our
+ * current buffer size.
+ */
+ if (B->ds_limit > type_limit) {
+ B->ds_limit = type_limit;
+ }
+ /* So exit frame can refresh fast. */
+ frame(type_limit) = type_limit;
+}
+
+static int reserve_ds(flatcc_builder_t *B, size_t need, uoffset_t limit)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ds;
+
+ if (B->alloc(B->alloc_context, buf, B->ds_first + need, 1, flatcc_builder_alloc_ds)) {
+ return -1;
+ }
+ refresh_ds(B, limit);
+ return 0;
+}
+
+/*
+ * Make sure there is always an extra zero termination on stack
+ * even if it isn't emitted such that string updates may count
+ * on zero termination being present always.
+ */
+static inline void *push_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ size_t offset;
+
+ offset = B->ds_offset;
+ if ((B->ds_offset += size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, data_limit)) {
+ return 0;
+ }
+ }
+ return B->ds + offset;
+}
+
+static inline void unpush_ds(flatcc_builder_t *B, uoffset_t size)
+{
+ B->ds_offset -= size;
+ memset(B->ds + B->ds_offset, 0, size);
+}
+
+static inline void *push_ds_copy(flatcc_builder_t *B, const void *data, uoffset_t size)
+{
+ void *p;
+
+ if (!(p = push_ds(B, size))) {
+ return 0;
+ }
+ memcpy(p, data, size);
+ return p;
+}
+
+static inline void *push_ds_field(flatcc_builder_t *B, uoffset_t size, uint16_t align, voffset_t id)
+{
+ uoffset_t offset;
+
+ /*
+ * We calculate table field alignment relative to first entry, not
+ * header field with vtable offset.
+ *
+ * Note: >= comparison handles special case where B->ds is not
+ * allocated yet and size is 0 so the return value would be mistaken
+ * for an error.
+ */
+ offset = alignup_uoffset(B->ds_offset, align);
+ if ((B->ds_offset = offset + size) >= B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset + 1, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ return B->ds + offset;
+}
+
+static inline void *push_ds_offset_field(flatcc_builder_t *B, voffset_t id)
+{
+ uoffset_t offset;
+
+ offset = alignup_uoffset(B->ds_offset, field_size);
+ if ((B->ds_offset = offset + field_size) > B->ds_limit) {
+ if (reserve_ds(B, B->ds_offset, table_limit)) {
+ return 0;
+ }
+ }
+ B->vs[id] = (voffset_t)(offset + field_size);
+ if (id >= B->id_end) {
+ B->id_end = id + 1u;
+ }
+ *B->pl++ = (flatbuffers_voffset_t)offset;
+ return B->ds + offset;
+}
+
+static inline void *reserve_buffer(flatcc_builder_t *B, int alloc_type, size_t used, size_t need, int zero_init)
+{
+ iovec_t *buf = B->buffers + alloc_type;
+
+ if (used + need > buf->iov_len) {
+ if (B->alloc(B->alloc_context, buf, used + need, zero_init, alloc_type)) {
+ check(0, "memory allocation failed");
+ return 0;
+ }
+ }
+ return (void *)((size_t)buf->iov_base + used);
+}
+
+static inline int reserve_fields(flatcc_builder_t *B, int count)
+{
+ size_t used, need;
+
+ /* Provide faster stack operations for common table operations. */
+ used = frame(container.table.vs_end) + frame(container.table.id_end) * sizeof(voffset_t);
+ need = (size_t)(count + 2) * sizeof(voffset_t);
+ if (!(B->vs = reserve_buffer(B, flatcc_builder_alloc_vs, used, need, 1))) {
+ return -1;
+ }
+ /* Move past header for convenience. */
+ B->vs += 2;
+ used = frame(container.table.pl_end);
+ /* Add one to handle special case of first table being empty. */
+ need = (size_t)count * sizeof(*(B->pl)) + 1;
+ if (!(B->pl = reserve_buffer(B, flatcc_builder_alloc_pl, used, need, 0))) {
+ return -1;
+ }
+ return 0;
+}
+
+static int alloc_ht(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ size_t size, k;
+ /* Allocate null entry so we can check for return errors. */
+ FLATCC_ASSERT(B->vd_end == 0);
+ if (!reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0)) {
+ return -1;
+ }
+ B->vd_end = sizeof(vtable_descriptor_t);
+ size = field_size * FLATCC_BUILDER_MIN_HASH_COUNT;
+ if (B->alloc(B->alloc_context, buf, size, 1, flatcc_builder_alloc_ht)) {
+ return -1;
+ }
+ while (size * 2 <= buf->iov_len) {
+ size *= 2;
+ }
+ size /= field_size;
+ for (k = 0; (((size_t)1) << k) < size; ++k) {
+ }
+ B->ht_width = k;
+ return 0;
+}
+
+static inline uoffset_t *lookup_ht(flatcc_builder_t *B, uint32_t hash)
+{
+ uoffset_t *T;
+
+ if (B->ht_width == 0) {
+ if (alloc_ht(B)) {
+ return 0;
+ }
+ }
+ T = B->buffers[flatcc_builder_alloc_ht].iov_base;
+
+ return &T[FLATCC_BUILDER_BUCKET_VT_HASH(hash, B->ht_width)];
+}
+
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B)
+{
+ iovec_t *buf = B->buffers + flatcc_builder_alloc_ht;
+
+ if (B->ht_width == 0) {
+ return;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ /* Reserve the null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ B->vb_end = 0;
+}
+
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context)
+{
+ /*
+ * Do not allocate anything here. Only the required buffers will be
+ * allocated. For simple struct buffers, no allocation is required
+ * at all.
+ */
+ memset(B, 0, sizeof(*B));
+
+ if (emit == 0) {
+ B->is_default_emitter = 1;
+ emit = flatcc_emitter;
+ emit_context = &B->default_emit_context;
+ }
+ if (alloc == 0) {
+ alloc = flatcc_builder_default_alloc;
+ }
+ B->alloc_context = alloc_context;
+ B->alloc = alloc;
+ B->emit_context = emit_context;
+ B->emit = emit;
+ return 0;
+}
+
+int flatcc_builder_init(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_init(B, 0, 0, 0, 0);
+}
+
+int flatcc_builder_custom_reset(flatcc_builder_t *B, int set_defaults, int reduce_buffers)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ if (buf->iov_base) {
+ /* Don't try to reduce the hash table. */
+ if (i != flatcc_builder_alloc_ht &&
+ reduce_buffers && B->alloc(B->alloc_context, buf, 1, 1, i)) {
+ return -1;
+ }
+ memset(buf->iov_base, 0, buf->iov_len);
+ } else {
+ FLATCC_ASSERT(buf->iov_len == 0);
+ }
+ }
+ B->vb_end = 0;
+ if (B->vd_end > 0) {
+ /* Reset past null entry. */
+ B->vd_end = sizeof(vtable_descriptor_t);
+ }
+ B->min_align = 0;
+ B->emit_start = 0;
+ B->emit_end = 0;
+ B->level = 0;
+ B->limit_level = 0;
+ B->ds_offset = 0;
+ B->ds_limit = 0;
+ B->nest_count = 0;
+ B->nest_id = 0;
+ /* Needed for correct offset calculation. */
+ B->ds = B->buffers[flatcc_builder_alloc_ds].iov_base;
+ B->pl = B->buffers[flatcc_builder_alloc_pl].iov_base;
+ B->vs = B->buffers[flatcc_builder_alloc_vs].iov_base;
+ B->frame = 0;
+ if (set_defaults) {
+ B->vb_flush_limit = 0;
+ B->max_level = 0;
+ B->disable_vt_clustering = 0;
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_reset(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_reset(B->refmap);
+ }
+ return 0;
+}
+
+int flatcc_builder_reset(flatcc_builder_t *B)
+{
+ return flatcc_builder_custom_reset(B, 0, 0);
+}
+
+void flatcc_builder_clear(flatcc_builder_t *B)
+{
+ iovec_t *buf;
+ int i;
+
+ for (i = 0; i < FLATCC_BUILDER_ALLOC_BUFFER_COUNT; ++i) {
+ buf = B->buffers + i;
+ B->alloc(B->alloc_context, buf, 0, 0, i);
+ }
+ if (B->is_default_emitter) {
+ flatcc_emitter_clear(&B->default_emit_context);
+ }
+ if (B->refmap) {
+ flatcc_refmap_clear(B->refmap);
+ }
+ memset(B, 0, sizeof(*B));
+}
+
+static inline void set_min_align(flatcc_builder_t *B, uint16_t align)
+{
+ if (B->min_align < align) {
+ B->min_align = align;
+ }
+}
+
+/*
+ * It's a max, but the minimum viable alignment is the largest observed
+ * alignment requirement, but no larger.
+ */
+static inline void get_min_align(uint16_t *align, uint16_t b)
+{
+ if (*align < b) {
+ *align = b;
+ }
+}
+
+void *flatcc_builder_enter_user_frame_ptr(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return frame;
+}
+
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size)
+{
+ size_t *frame;
+
+ size = alignup_size(size, sizeof(size_t)) + sizeof(size_t);
+
+ if (!(frame = reserve_buffer(B, flatcc_builder_alloc_us, B->user_frame_end, size, 0))) {
+ return 0;
+ }
+ memset(frame, 0, size);
+ *frame++ = B->user_frame_offset;
+ B->user_frame_offset = B->user_frame_end + sizeof(size_t);
+ B->user_frame_end += size;
+ return B->user_frame_offset;
+}
+
+
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B)
+{
+ size_t *hdr;
+
+ FLATCC_ASSERT(B->user_frame_offset > 0);
+
+ hdr = us_ptr(B->user_frame_offset);
+ B->user_frame_end = B->user_frame_offset - sizeof(size_t);
+ return B->user_frame_offset = hdr[-1];
+}
+
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle)
+{
+ FLATCC_ASSERT(B->user_frame_offset >= handle);
+
+ B->user_frame_offset = handle;
+ return flatcc_builder_exit_user_frame(B);
+}
+
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B)
+{
+ return B->user_frame_offset;
+}
+
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle)
+{
+ return us_ptr(handle);
+}
+
+static int enter_frame(flatcc_builder_t *B, uint16_t align)
+{
+ if (++B->level > B->limit_level) {
+ if (B->max_level > 0 && B->level > B->max_level) {
+ return -1;
+ }
+ if (!(B->frame = reserve_buffer(B, flatcc_builder_alloc_fs,
+ (size_t)(B->level - 1) * frame_size, frame_size, 0))) {
+ return -1;
+ }
+ B->limit_level = (int)(B->buffers[flatcc_builder_alloc_fs].iov_len / frame_size);
+ if (B->max_level > 0 && B->max_level < B->limit_level) {
+ B->limit_level = B->max_level;
+ }
+ } else {
+ ++B->frame;
+ }
+ frame(ds_offset) = B->ds_offset;
+ frame(align) = B->align;
+ B->align = align;
+ /* Note: do not assume padding before first has been allocated! */
+ frame(ds_first) = B->ds_first;
+ frame(type_limit) = data_limit;
+ B->ds_first = alignup_uoffset(B->ds_first + B->ds_offset, 8);
+ B->ds_offset = 0;
+ return 0;
+}
+
+static inline void exit_frame(flatcc_builder_t *B)
+{
+ memset(B->ds, 0, B->ds_offset);
+ B->ds_offset = frame(ds_offset);
+ B->ds_first = frame(ds_first);
+ refresh_ds(B, frame(type_limit));
+
+ /*
+ * Restore local alignment: e.g. a table should not change alignment
+ * because a child table was just created elsewhere in the buffer,
+ * but the overall alignment (min align), should be aware of it.
+ * Each buffer has its own min align that then migrates up without
+ * being affected by sibling or child buffers.
+ */
+ set_min_align(B, B->align);
+ B->align = frame(align);
+
+ --B->frame;
+ --B->level;
+}
+
+static inline uoffset_t front_pad(flatcc_builder_t *B, uoffset_t size, uint16_t align)
+{
+ return (uoffset_t)(B->emit_start - (flatcc_builder_ref_t)size) & (align - 1u);
+}
+
+static inline uoffset_t back_pad(flatcc_builder_t *B, uint16_t align)
+{
+ return (uoffset_t)(B->emit_end) & (align - 1u);
+}
+
+static inline flatcc_builder_ref_t emit_front(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ /*
+ * We might have overflow when including headers, but without
+ * headers we should have checks to prevent overflow in the
+ * uoffset_t range, hence we subtract 16 to be safe. With that
+ * guarantee we can also make a safe check on the soffset_t range.
+ *
+ * We only allow buffers half the theoritical size of
+ * FLATBUFFERS_UOFFSET_MAX so we can safely use signed references.
+ *
+ * NOTE: vtables vt_offset field is signed, and the check in create
+ * table only ensures the signed limit. The check would fail if the
+ * total buffer size could grow beyond UOFFSET_MAX, and we prevent
+ * that by limiting the lower end to SOFFSET_MIN, and the upper end
+ * at emit_back to SOFFSET_MAX.
+ */
+ ref = B->emit_start - (flatcc_builder_ref_t)iov->len;
+ if ((iov->len > 16 && iov->len - 16 > FLATBUFFERS_UOFFSET_MAX) || ref >= B->emit_start) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return B->emit_start = ref;
+}
+
+static inline flatcc_builder_ref_t emit_back(flatcc_builder_t *B, iov_state_t *iov)
+{
+ flatcc_builder_ref_t ref;
+
+ ref = B->emit_end;
+ B->emit_end = ref + (flatcc_builder_ref_t)iov->len;
+ /*
+ * Similar to emit_front check, but since we only emit vtables and
+ * padding at the back, we are not concerned with iov->len overflow,
+ * only total buffer overflow.
+ *
+ * With this check, vtable soffset references at table header can
+ * still overflow in extreme cases, so this must be checked
+ * separately.
+ */
+ if (B->emit_end < ref) {
+ check(0, "buffer too large to represent");
+ return 0;
+ }
+ if (B->emit(B->emit_context, iov->iov, iov->count, ref, iov->len)) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ /*
+ * Back references always return ref + 1 because ref == 0 is valid and
+ * should not be mistaken for error. vtables understand this.
+ */
+ return ref + 1;
+}
+
+static int align_to_block(flatcc_builder_t *B, uint16_t *align, uint16_t block_align, int is_nested)
+{
+ size_t end_pad;
+ iov_state_t iov;
+
+ block_align = block_align ? block_align : B->block_align ? B->block_align : 1;
+ get_min_align(align, field_size);
+ get_min_align(align, block_align);
+ /* Pad end of buffer to multiple. */
+ if (!is_nested) {
+ end_pad = back_pad(B, block_align);
+ if (end_pad) {
+ init_iov();
+ push_iov(_pad, end_pad);
+ if (0 == emit_back(B, &iov)) {
+ check(0, "emitter rejected buffer content");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, int flags)
+{
+ uoffset_t size_field, pad;
+ iov_state_t iov;
+ int with_size = flags & flatcc_builder_with_size;
+
+ if (align_to_block(B, &align, block_align, !is_top_buffer(B))) {
+ return 0;
+ }
+ pad = front_pad(B, (uoffset_t)(size + (with_size ? field_size : 0)), align);
+ write_uoffset(&size_field, (uoffset_t)size + pad);
+ init_iov();
+ /* Add ubyte vector size header if nested buffer. */
+ push_iov_cond(&size_field, field_size, !is_top_buffer(B));
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align,
+ flatcc_builder_ref_t object_ref, uint16_t align, int flags)
+{
+ flatcc_builder_ref_t buffer_ref;
+ uoffset_t header_pad, id_size = 0;
+ uoffset_t object_offset, buffer_size, buffer_base;
+ iov_state_t iov;
+ flatcc_builder_identifier_t id_out = 0;
+ int is_nested = (flags & flatcc_builder_is_nested) != 0;
+ int with_size = (flags & flatcc_builder_with_size) != 0;
+
+ if (align_to_block(B, &align, block_align, is_nested)) {
+ return 0;
+ }
+ set_min_align(B, align);
+ if (identifier) {
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == identifier_size);
+ FLATCC_ASSERT(sizeof(flatcc_builder_identifier_t) == field_size);
+ memcpy(&id_out, identifier, identifier_size);
+ id_out = __flatbuffers_thash_read_from_le(&id_out);
+ write_identifier(&id_out, id_out);
+ }
+ id_size = id_out ? identifier_size : 0;
+ header_pad = front_pad(B, field_size + id_size + (uoffset_t)(with_size ? field_size : 0), align);
+ init_iov();
+ /* ubyte vectors size field wrapping nested buffer. */
+ push_iov_cond(&buffer_size, field_size, is_nested || with_size);
+ push_iov(&object_offset, field_size);
+ /* Identifiers are not always present in buffer. */
+ push_iov(&id_out, id_size);
+ push_iov(_pad, header_pad);
+ buffer_base = (uoffset_t)B->emit_start - (uoffset_t)iov.len + (uoffset_t)((is_nested || with_size) ? field_size : 0);
+ if (is_nested) {
+ write_uoffset(&buffer_size, (uoffset_t)B->buffer_mark - buffer_base);
+ } else {
+ /* Also include clustered vtables. */
+ write_uoffset(&buffer_size, (uoffset_t)B->emit_end - buffer_base);
+ }
+ write_uoffset(&object_offset, (uoffset_t)object_ref - buffer_base);
+ if (0 == (buffer_ref = emit_front(B, &iov))) {
+ check(0, "emitter rejected buffer content");
+ return 0;
+ }
+ return buffer_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B, const void *data, size_t size, uint16_t align)
+{
+ size_t pad;
+ iov_state_t iov;
+
+ check(align >= 1, "align cannot be 0");
+ set_min_align(B, align);
+ pad = front_pad(B, (uoffset_t)size, align);
+ init_iov();
+ push_iov(data, size);
+ /*
+ * Normally structs will already be a multiple of their alignment,
+ * so this padding will not likely be emitted.
+ */
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[identifier_size], uint16_t block_align, int flags)
+{
+ /*
+ * This saves the parent `min_align` in the align field since we
+ * shouldn't use that for the current buffer. `exit_frame`
+ * automatically aggregates align up, so it is updated when the
+ * buffer frame exits.
+ */
+ if (enter_frame(B, B->min_align)) {
+ return -1;
+ }
+ /* B->align now has parent min_align, and child frames will save it. */
+ B->min_align = 1;
+ /* Save the parent block align, and set proper defaults for this buffer. */
+ frame(container.buffer.block_align) = B->block_align;
+ B->block_align = block_align;
+ frame(container.buffer.flags = B->buffer_flags);
+ B->buffer_flags = (uint16_t)flags;
+ frame(container.buffer.mark) = B->buffer_mark;
+ frame(container.buffer.nest_id) = B->nest_id;
+ /*
+ * End of buffer when nested. Not defined for top-level because we
+ * here (on only here) permit strings etc. to be created before buffer start and
+ * because top-level buffer vtables can be clustered.
+ */
+ B->buffer_mark = B->emit_start;
+ /* Must be 0 before and after entering top-level buffer, and unique otherwise. */
+ B->nest_id = B->nest_count++;
+ frame(container.buffer.identifier) = B->identifier;
+ set_identifier(identifier);
+ frame(type) = flatcc_builder_buffer;
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root)
+{
+ flatcc_builder_ref_t buffer_ref;
+ int flags;
+
+ flags = B->buffer_flags & flatcc_builder_with_size;
+ flags |= is_top_buffer(B) ? 0 : flatcc_builder_is_nested;
+ check(frame(type) == flatcc_builder_buffer, "expected buffer frame");
+ set_min_align(B, B->block_align);
+ if (0 == (buffer_ref = flatcc_builder_create_buffer(B, (void *)&B->identifier,
+ B->block_align, root, B->min_align, flags))) {
+ return 0;
+ }
+ B->buffer_mark = frame(container.buffer.mark);
+ B->nest_id = frame(container.buffer.nest_id);
+ B->identifier = frame(container.buffer.identifier);
+ B->buffer_flags = frame(container.buffer.flags);
+ exit_frame(B);
+ return buffer_ref;
+}
+
+void *flatcc_builder_start_struct(flatcc_builder_t *B, size_t size, uint16_t align)
+{
+ /* Allocate space for the struct on the ds stack. */
+ if (enter_frame(B, align)) {
+ return 0;
+ }
+ frame(type) = flatcc_builder_struct;
+ refresh_ds(B, data_limit);
+ return push_ds(B, (uoffset_t)size);
+}
+
+void *flatcc_builder_struct_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t object_ref;
+
+ check(frame(type) == flatcc_builder_struct, "expected struct frame");
+ if (0 == (object_ref = flatcc_builder_create_struct(B, B->ds, B->ds_offset, B->align))) {
+ return 0;
+ }
+ exit_frame(B);
+ return object_ref;
+}
+
+static inline int vector_count_add(flatcc_builder_t *B, uoffset_t count, uoffset_t max_count)
+{
+ uoffset_t n, n1;
+ n = frame(container.vector.count);
+ n1 = n + count;
+ /*
+ * This prevents elem_size * count from overflowing iff max_vector
+ * has been set sensible. Without this check we might allocate to
+ * little on the ds stack and return a buffer the user thinks is
+ * much larger which of course is bad even though the buffer eventually
+ * would fail anyway.
+ */
+ check_error(n <= n1 && n1 <= max_count, -1, "vector too large to represent");
+ frame(container.vector.count) = n1;
+ return 0;
+}
+
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) <= frame(container.vector.max_count), 0, "vector max count exceeded");
+ frame(container.vector.count) += 1;
+ return push_ds_copy(B, data, frame(container.vector.elem_size));
+}
+
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ if (vector_count_add(B, (uoffset_t)count, frame(container.vector.max_count))) {
+ return 0;
+ }
+ return push_ds_copy(B, data, frame(container.vector.elem_size) * (uoffset_t)count);
+}
+
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(field_size * count));
+}
+
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B, flatcc_builder_ref_t ref)
+{
+ flatcc_builder_ref_t *p;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (frame(container.vector.count) == max_offset_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, field_size))) {
+ return 0;
+ }
+ *p = ref;
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B, const flatcc_builder_ref_t *refs, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_offset_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, refs, (uoffset_t)(field_size * count));
+}
+
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ if (vector_count_add(B, (uoffset_t)len, max_string_len)) {
+ return 0;
+ }
+ return push_ds_copy(B, s, (uoffset_t)len);
+}
+
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_append_string(B, s, strlen(s));
+}
+
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_append_string(B, s, strnlen(s, max_len));
+}
+
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+ check_error(frame(container.vector.count) >= count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len)
+{
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ check_error(frame(container.vector.count) >= len, -1, "cannot truncate string past empty");
+ frame(container.vector.count) -= (uoffset_t)len;
+ unpush_ds(B, (uoffset_t)len);
+ return 0;
+}
+
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size, uint16_t align, size_t max_count)
+{
+ get_min_align(&align, field_size);
+ if (enter_frame(B, align)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = (uoffset_t)elem_size;
+ frame(container.vector.count) = 0;
+ frame(container.vector.max_count) = (uoffset_t)max_count;
+ frame(type) = flatcc_builder_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = field_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_offset_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *vec, size_t count)
+{
+ flatcc_builder_ref_t *_vec;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return 0;
+ }
+ if (!(_vec = flatcc_builder_extend_offset_vector(B, count))) {
+ return 0;
+ }
+ memcpy(_vec, vec, count * field_size);
+ return flatcc_builder_end_offset_vector(B);
+}
+
+int flatcc_builder_start_string(flatcc_builder_t *B)
+{
+ if (enter_frame(B, 1)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = 1;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_string;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count)
+{
+ check(count >= 0, "cannot reserve negative count");
+ return reserve_fields(B, count);
+}
+
+int flatcc_builder_start_table(flatcc_builder_t *B, int count)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.table.vs_end) = vs_offset(B->vs);
+ frame(container.table.pl_end) = pl_offset(B->pl);
+ frame(container.table.vt_hash) = B->vt_hash;
+ frame(container.table.id_end) = B->id_end;
+ B->vt_hash = 0;
+ FLATCC_BUILDER_INIT_VT_HASH(B->vt_hash);
+ B->id_end = 0;
+ frame(type) = flatcc_builder_table;
+ if (reserve_fields(B, count)) {
+ return -1;
+ }
+ refresh_ds(B, table_limit);
+ return 0;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size)
+{
+ flatcc_builder_vt_ref_t vt_ref;
+ iov_state_t iov;
+ voffset_t *vt_;
+ size_t i;
+
+ /*
+ * Only top-level buffer can cluster vtables because only it can
+ * extend beyond the end.
+ *
+ * We write the vtable after the referencing table to maintain
+ * the construction invariant that any offset reference has
+ * valid emitted data at a higher address, and also that any
+ * issued negative emit address represents an offset reference
+ * to some flatbuffer object or vector (or possibly a root
+ * struct).
+ *
+ * The vt_ref is stored as the reference + 1 to avoid having 0 as a
+ * valid reference (which usally means error). It also idententifies
+ * vtable references as the only uneven references, and the only
+ * references that can be used multiple times in the same buffer.
+ *
+ * We do the vtable conversion here so cached vtables can be built
+ * hashed and compared more efficiently, and so end users with
+ * direct vtable construction don't have to worry about endianness.
+ * This also ensures the hash function works the same wrt.
+ * collision frequency.
+ */
+
+ if (!flatbuffers_is_native_pe()) {
+ /* Make space in vtable cache for temporary endian conversion. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return 0;
+ }
+ for (i = 0; i < vt_size / sizeof(voffset_t); ++i) {
+ write_voffset(&vt_[i], vt[i]);
+ }
+ vt = vt_;
+ /* We don't need to free the reservation since we don't advance any base pointer. */
+ }
+
+ init_iov();
+ push_iov(vt, vt_size);
+ if (is_top_buffer(B) && !B->disable_vt_clustering) {
+ /* Note that `emit_back` already returns ref + 1 as we require for vtables. */
+ if (0 == (vt_ref = emit_back(B, &iov))) {
+ return 0;
+ }
+ } else {
+ if (0 == (vt_ref = emit_front(B, &iov))) {
+ return 0;
+ }
+ /*
+ * We don't have a valid 0 ref here, but to be consistent with
+ * clustered vtables we offset by one. This cannot be zero
+ * either.
+ */
+ vt_ref += 1;
+ }
+ return vt_ref;
+}
+
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const voffset_t *vt, voffset_t vt_size, uint32_t vt_hash)
+{
+ vtable_descriptor_t *vd, *vd2;
+ uoffset_t *pvd, *pvd_head;
+ uoffset_t next;
+ voffset_t *vt_;
+
+ /* This just gets the hash table slot, we still have to inspect it. */
+ if (!(pvd_head = lookup_ht(B, vt_hash))) {
+ return 0;
+ }
+ pvd = pvd_head;
+ next = *pvd;
+ /* Tracks if there already is a cached copy. */
+ vd2 = 0;
+ while (next) {
+ vd = vd_ptr(next);
+ vt_ = vb_ptr(vd->vb_start);
+ if (vt_[0] != vt_size || 0 != memcmp(vt, vt_, vt_size)) {
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Can't share emitted vtables between buffers, */
+ if (vd->nest_id != B->nest_id) {
+ /* but we don't have to resubmit to cache. */
+ vd2 = vd;
+ /* See if there is a better match. */
+ pvd = &vd->next;
+ next = vd->next;
+ continue;
+ }
+ /* Move to front hash strategy. */
+ if (pvd != pvd_head) {
+ *pvd = vd->next;
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ }
+ /* vtable exists and has been emitted within current buffer. */
+ return vd->vt_ref;
+ }
+ /* Allocate new descriptor. */
+ if (!(vd = reserve_buffer(B, flatcc_builder_alloc_vd, B->vd_end, sizeof(vtable_descriptor_t), 0))) {
+ return 0;
+ }
+ next = B->vd_end;
+ B->vd_end += (uoffset_t)sizeof(vtable_descriptor_t);
+
+ /* Identify the buffer this vtable descriptor belongs to. */
+ vd->nest_id = B->nest_id;
+
+ /* Move to front hash strategy. */
+ vd->next = *pvd_head;
+ *pvd_head = next;
+ if (0 == (vd->vt_ref = flatcc_builder_create_vtable(B, vt, vt_size))) {
+ return 0;
+ }
+ if (vd2) {
+ /* Reuse cached copy. */
+ vd->vb_start = vd2->vb_start;
+ } else {
+ if (B->vb_flush_limit && B->vb_flush_limit < B->vb_end + vt_size) {
+ flatcc_builder_flush_vtable_cache(B);
+ } else {
+ /* Make space in vtable cache. */
+ if (!(vt_ = reserve_buffer(B, flatcc_builder_alloc_vb, B->vb_end, vt_size, 0))) {
+ return -1;
+ }
+ vd->vb_start = B->vb_end;
+ B->vb_end += vt_size;
+ memcpy(vt_, vt, vt_size);
+ }
+ }
+ return vd->vt_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B, const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count, flatcc_builder_vt_ref_t vt_ref)
+{
+ int i;
+ uoffset_t pad, vt_offset, vt_offset_field, vt_base, base, offset, *offset_field;
+ iov_state_t iov;
+
+ check(offset_count >= 0, "expected non-negative offset_count");
+ /*
+ * vtable references are offset by 1 to avoid confusion with
+ * 0 as an error reference. It also uniquely identifies them
+ * as vtables being the only uneven reference type.
+ */
+ check(vt_ref & 1, "invalid vtable referenc");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ /* Alignment is calculated for the first element, not the header. */
+ pad = front_pad(B, (uoffset_t)size, align);
+ base = (uoffset_t)B->emit_start - (uoffset_t)(pad + size + field_size);
+ /* Adjust by 1 to get unencoded vtable reference. */
+ vt_base = (uoffset_t)(vt_ref - 1);
+ vt_offset = base - vt_base;
+ /* Avoid overflow. */
+ if (base - vt_offset != vt_base) {
+ return -1;
+ }
+ /* Protocol endian encoding. */
+ write_uoffset(&vt_offset_field, vt_offset);
+ for (i = 0; i < offset_count; ++i) {
+ offset_field = (uoffset_t *)((size_t)data + offsets[i]);
+ offset = *offset_field - base - offsets[i] - (uoffset_t)field_size;
+ write_uoffset(offset_field, offset);
+ }
+ init_iov();
+ push_iov(&vt_offset_field, field_size);
+ push_iov(data, size);
+ push_iov(_pad, pad);
+ return emit_front(B, &iov);
+}
+
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return id < B->id_end && B->vs[id] != 0;
+}
+
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (id == 0 || id >= B->id_end) {
+ return 0;
+ }
+ if (B->vs[id - 1] == 0) {
+ return B->vs[id] == 0;
+ }
+ if (*(uint8_t *)(B->ds + B->vs[id - 1])) {
+ return B->vs[id] != 0;
+ }
+ return B->vs[id] == 0;
+}
+
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count)
+{
+ int i;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ if (B->id_end < count) {
+ return 0;
+ }
+ for (i = 0; i < count; ++i) {
+ if (B->vs[required[i]] == 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B)
+{
+ voffset_t *vt, vt_size;
+ flatcc_builder_ref_t table_ref, vt_ref;
+ int pl_count;
+ voffset_t *pl;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ /* We have `ds_limit`, so we should not have to check for overflow here. */
+
+ vt = B->vs - 2;
+ vt_size = (voffset_t)(sizeof(voffset_t) * (B->id_end + 2u));
+ /* Update vtable header fields, first vtable size, then object table size. */
+ vt[0] = vt_size;
+ /*
+ * The `ds` buffer is always at least `field_size` aligned but excludes the
+ * initial vtable offset field. Therefore `field_size` is added here
+ * to the total table size in the vtable.
+ */
+ vt[1] = (voffset_t)(B->ds_offset + field_size);
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)vt[0], (uint32_t)vt[1]);
+ /* Find already emitted vtable, or emit a new one. */
+ if (!(vt_ref = flatcc_builder_create_cached_vtable(B, vt, vt_size, B->vt_hash))) {
+ return 0;
+ }
+ /* Clear vs stack so it is ready for the next vtable (ds stack is cleared by exit frame). */
+ memset(vt, 0, vt_size);
+
+ pl = pl_ptr(frame(container.table.pl_end));
+ pl_count = (int)(B->pl - pl);
+ if (0 == (table_ref = flatcc_builder_create_table(B, B->ds, B->ds_offset, B->align, pl, pl_count, vt_ref))) {
+ return 0;
+ }
+ B->vt_hash = frame(container.table.vt_hash);
+ B->id_end = frame(container.table.id_end);
+ B->vs = vs_ptr(frame(container.table.vs_end));
+ B->pl = pl_ptr(frame(container.table.pl_end));
+ exit_frame(B);
+ return table_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count)
+{
+ /*
+ * Note: it is important that vec_size is uoffset not size_t
+ * in case sizeof(uoffset_t) > sizeof(size_t) because max_count is
+ * defined in terms of uoffset_t representation size, and also
+ * because we risk accepting too large a vector even if max_count is
+ * not violated.
+ */
+ uoffset_t vec_size, vec_pad, length_prefix;
+ iov_state_t iov;
+
+ check_error(count <= max_count, 0, "vector max_count violated");
+ get_min_align(&align, field_size);
+ set_min_align(B, align);
+ vec_size = (uoffset_t)count * (uoffset_t)elem_size;
+ /*
+ * That can happen on 32 bit systems when uoffset_t is defined as 64-bit.
+ * `emit_front/back` captures overflow, but not if our size type wraps first.
+ */
+#if FLATBUFFERS_UOFFSET_MAX > SIZE_MAX
+ check_error(vec_size < SIZE_MAX, 0, "vector larger than address space");
+#endif
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, align);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(data, vec_size);
+ push_iov(_pad, vec_pad);
+ return emit_front(B, &iov);
+}
+
+/*
+ * Note: FlatBuffers official documentation states that the size field of a
+ * vector is a 32-bit element count. It is not quite clear if the
+ * intention is to have the size field be of type uoffset_t since tables
+ * also have a uoffset_t sized header, or if the vector size should
+ * remain unchanged if uoffset is changed to 16- or 64-bits
+ * respectively. Since it makes most sense to have a vector compatible
+ * with the addressable space, we choose to use uoffset_t as size field,
+ * which remains compatible with the default 32-bit version of uoffset_t.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_vector, "expected vector frame");
+
+ if (0 == (vector_ref = flatcc_builder_create_vector(B, B->ds,
+ frame(container.vector.count), frame(container.vector.elem_size),
+ B->align, frame(container.vector.max_count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+size_t flatcc_builder_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+/* This function destroys the source content but avoids stack allocation. */
+static flatcc_builder_ref_t _create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count, const utype_t *types)
+{
+ uoffset_t vec_size, vec_pad;
+ uoffset_t length_prefix, offset;
+ uoffset_t i;
+ soffset_t base;
+ iov_state_t iov;
+
+ if ((uoffset_t)count > max_offset_count) {
+ return 0;
+ }
+ set_min_align(B, field_size);
+ vec_size = (uoffset_t)(count * field_size);
+ write_uoffset(&length_prefix, (uoffset_t)count);
+ /* Alignment is calculated for the first element, not the header. */
+ vec_pad = front_pad(B, vec_size, field_size);
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(vec, vec_size);
+ push_iov(_pad, vec_pad);
+ base = B->emit_start - (soffset_t)iov.len;
+ for (i = 0; i < (uoffset_t)count; ++i) {
+ /*
+ * 0 is either end of buffer, start of vtables, or start of
+ * buffer depending on the direction in which the buffer is
+ * built. None of these can create a valid 0 reference but it
+ * is easy to create by mistake when manually building offset
+ * vectors.
+ *
+ * Unions do permit nulls, but only when the type is NONE.
+ */
+ if (vec[i] != 0) {
+ offset = (uoffset_t)
+ (vec[i] - base - (soffset_t)(i * field_size) - (soffset_t)field_size);
+ write_uoffset(&vec[i], offset);
+ if (types) {
+ check(types[i] != 0, "union vector cannot have non-null element with type NONE");
+ }
+ } else {
+ if (types) {
+ check(types[i] == 0, "union vector cannot have null element without type NONE");
+ } else {
+ check(0, "offset vector cannot have null element");
+ }
+ }
+ }
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *vec, size_t count)
+{
+ return _create_offset_vector_direct(B, vec, count, 0);
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = flatcc_builder_create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count)))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B, const utype_t *types)
+{
+ flatcc_builder_ref_t vector_ref;
+
+ check(frame(type) == flatcc_builder_offset_vector, "expected offset vector frame");
+ if (0 == (vector_ref = _create_offset_vector_direct(B,
+ (flatcc_builder_ref_t *)B->ds, frame(container.vector.count), types))) {
+ return 0;
+ }
+ exit_frame(B);
+ return vector_ref;
+}
+
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_ref_t *pref;
+ flatcc_builder_utype_t *putype;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error(uref.type != 0 || uref.value == 0, -1, "expected null value for type NONE");
+ if (uref.value != 0) {
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union value");
+ *pref = uref.value;
+ }
+ putype = flatcc_builder_table_add(B, id - 1, utype_size, utype_size);
+ check_error(putype != 0, -1, "unable to add union type");
+ write_utype(putype, uref.type);
+ return 0;
+}
+
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref)
+{
+ flatcc_builder_ref_t *pref;
+
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check_error((uvref.type == 0) == (uvref.value == 0), -1, "expected both type and value vector, or neither");
+ if (uvref.type != 0) {
+ pref = flatcc_builder_table_add_offset(B, id - 1);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.type;
+
+ pref = flatcc_builder_table_add_offset(B, id);
+ check_error(pref != 0, -1, "unable to add union member");
+ *pref = uvref.value;
+ }
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_ref_t *refs;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(B)) {
+ return uvref;
+ }
+ if (0 == flatcc_builder_extend_offset_vector(B, count)) {
+ return uvref;
+ }
+ if (0 == (types = push_ds(B, (uoffset_t)(utype_size * count)))) {
+ return uvref;
+ }
+
+ /* Safe even if push_ds caused stack reallocation. */
+ refs = flatcc_builder_offset_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B,
+ types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+
+ if (0 == (uvref.value = _create_offset_vector_direct(B, data, count, types))) {
+ return uvref;
+ }
+ if (0 == (uvref.type = flatcc_builder_create_type_vector(B, types, count))) {
+ return uvref;
+ }
+ return uvref;
+}
+
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count)
+{
+ return flatcc_builder_create_vector(B, types, count,
+ utype_size, utype_size, max_utype_count);
+}
+
+int flatcc_builder_start_union_vector(flatcc_builder_t *B)
+{
+ if (enter_frame(B, field_size)) {
+ return -1;
+ }
+ frame(container.vector.elem_size) = union_size;
+ frame(container.vector.count) = 0;
+ frame(type) = flatcc_builder_union_vector;
+ refresh_ds(B, data_limit);
+ return 0;
+}
+
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B)
+{
+ flatcc_builder_union_vec_ref_t uvref = { 0, 0 };
+ flatcc_builder_utype_t *types;
+ flatcc_builder_union_ref_t *urefs;
+ flatcc_builder_ref_t *refs;
+ size_t i, count;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+
+ /*
+ * We could split the union vector in-place, but then we would have
+ * to deal with strict pointer aliasing rules which is not worthwhile
+ * so we create a new offset and type vector on the stack.
+ *
+ * We assume the stack is sufficiently aligned as is.
+ */
+ count = flatcc_builder_union_vector_count(B);
+ if (0 == (refs = push_ds(B, (uoffset_t)(count * (utype_size + field_size))))) {
+ return uvref;
+ }
+ types = (flatcc_builder_utype_t *)(refs + count);
+
+ /* Safe even if push_ds caused stack reallocation. */
+ urefs = flatcc_builder_union_vector_edit(B);
+
+ for (i = 0; i < count; ++i) {
+ types[i] = urefs[i].type;
+ refs[i] = urefs[i].value;
+ }
+ uvref = flatcc_builder_create_union_vector_direct(B, types, refs, count);
+ /* No need to clean up after out temporary types vector. */
+ exit_frame(B);
+ return uvref;
+}
+
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B)
+{
+ return B->ds;
+}
+
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count)
+{
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds(B, (uoffset_t)(union_size * count));
+}
+
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ check_error(frame(container.vector.count) >= (uoffset_t)count, -1, "cannot truncate vector past empty");
+ frame(container.vector.count) -= (uoffset_t)count;
+ unpush_ds(B, frame(container.vector.elem_size) * (uoffset_t)count);
+ return 0;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref)
+{
+ flatcc_builder_union_ref_t *p;
+
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (frame(container.vector.count) == max_union_count) {
+ return 0;
+ }
+ frame(container.vector.count) += 1;
+ if (0 == (p = push_ds(B, union_size))) {
+ return 0;
+ }
+ *p = uref;
+ return p;
+}
+
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count)
+{
+ check(frame(type) == flatcc_builder_union_vector, "expected union vector frame");
+ if (vector_count_add(B, (uoffset_t)count, max_union_count)) {
+ return 0;
+ }
+ return push_ds_copy(B, urefs, (uoffset_t)(union_size * count));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B, const char *s, size_t len)
+{
+ uoffset_t s_pad;
+ uoffset_t length_prefix;
+ iov_state_t iov;
+
+ if (len > max_string_len) {
+ return 0;
+ }
+ write_uoffset(&length_prefix, (uoffset_t)len);
+ /* Add 1 for zero termination. */
+ s_pad = front_pad(B, (uoffset_t)len + 1, field_size) + 1;
+ init_iov();
+ push_iov(&length_prefix, field_size);
+ push_iov(s, len);
+ push_iov(_pad, s_pad);
+ return emit_front(B, &iov);
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B, const char *s)
+{
+ return flatcc_builder_create_string(B, s, strlen(s));
+}
+
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len)
+{
+ return flatcc_builder_create_string(B, s, strnlen(s, max_len));
+}
+
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B)
+{
+ flatcc_builder_ref_t string_ref;
+
+ check(frame(type) == flatcc_builder_string, "expected string frame");
+ FLATCC_ASSERT(frame(container.vector.count) == B->ds_offset);
+ if (0 == (string_ref = flatcc_builder_create_string(B,
+ (const char *)B->ds, B->ds_offset))) {
+ return 0;
+ }
+ exit_frame(B);
+ return string_ref;
+}
+
+char *flatcc_builder_string_edit(flatcc_builder_t *B)
+{
+ return (char *)B->ds;
+}
+
+size_t flatcc_builder_string_len(flatcc_builder_t *B)
+{
+ return frame(container.vector.count);
+}
+
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align)
+{
+ /*
+ * We align the offset relative to the first table field, excluding
+ * the header holding the vtable reference. On the stack, `ds_first`
+ * is aligned to 8 bytes thanks to the `enter_frame` logic, and this
+ * provides a safe way to update the fields on the stack, but here
+ * we are concerned with the target buffer alignment.
+ *
+ * We could also have aligned relative to the end of the table which
+ * would allow us to emit each field immediately, but it would be a
+ * confusing user experience wrt. field ordering, and it would add
+ * more variability to vtable layouts, thus reducing reuse, and
+ * frequent emissions to external emitter interface would be
+ * sub-optimal. Also, with that appoach, the vtable offsets would
+ * have to be adjusted at table end.
+ *
+ * As we have it, each emit occur at table end, vector end, string
+ * end, or buffer end, which might be helpful to various backend
+ * processors.
+ */
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+ if (align > B->align) {
+ B->align = align;
+ }
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)size);
+ return push_ds_field(B, (uoffset_t)size, align, (voffset_t)id);
+}
+
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+
+ return B->ds + B->ds_offset - size;
+}
+
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align)
+{
+ void *p;
+
+ if ((p = flatcc_builder_table_add(B, id, size, align))) {
+ memcpy(p, data, size);
+ }
+ return p;
+}
+
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id)
+{
+ check(frame(type) == flatcc_builder_table, "expected table frame");
+ check(id >= 0 && id <= (int)FLATBUFFERS_ID_MAX, "table id out of range");
+#if FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+ if (B->vs[id] != 0) {
+ return B->ds + B->vs[id] - field_size;
+ }
+#else
+ if (B->vs[id] != 0) {
+ check(0, "table field already set");
+ return 0;
+ }
+#endif
+ FLATCC_BUILDER_UPDATE_VT_HASH(B->vt_hash, (uint32_t)id, (uint32_t)field_size);
+ return push_ds_offset_field(B, (voffset_t)id);
+}
+
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B)
+{
+ uint16_t old_min_align = B->min_align;
+
+ B->min_align = field_size;
+ return old_min_align;
+}
+
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t pushed_align)
+{
+ set_min_align(B, pushed_align);
+}
+
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B)
+{
+ return B->min_align;
+}
+
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable)
+{
+ /* Inverted because we zero all memory in B on init. */
+ B->disable_vt_clustering = !enable;
+}
+
+void flatcc_builder_set_block_align(flatcc_builder_t *B, uint16_t align)
+{
+ B->block_align = align;
+}
+
+int flatcc_builder_get_level(flatcc_builder_t *B)
+{
+ return B->level;
+}
+
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int max_level)
+{
+ B->max_level = max_level;
+ if (B->limit_level < B->max_level) {
+ B->limit_level = B->max_level;
+ }
+}
+
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B)
+{
+ return (size_t)(B->emit_end - B->emit_start);
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B)
+{
+ return B->emit_start;
+}
+
+flatcc_builder_ref_t flatcc_builder_get_buffer_end(flatcc_builder_t *B)
+{
+ return B->emit_end;
+}
+
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size)
+{
+ B->vb_flush_limit = size;
+}
+
+void flatcc_builder_set_identifier(flatcc_builder_t *B, const char identifier[identifier_size])
+{
+ set_identifier(identifier);
+}
+
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B)
+{
+ return B->frame ? frame(type) : flatcc_builder_empty;
+}
+
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level)
+{
+ if (level < 1 || level > B->level) {
+ return flatcc_builder_empty;
+ }
+ return B->frame[level - B->level].type;
+}
+
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ if (B->is_default_emitter) {
+ return flatcc_emitter_get_direct_buffer(&B->default_emit_context, size_out);
+ } else {
+ if (size_out) {
+ *size_out = 0;
+ }
+ }
+ return 0;
+}
+
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size)
+{
+ /* User is allowed to call tentatively to see if there is support. */
+ if (!B->is_default_emitter) {
+ return 0;
+ }
+ buffer = flatcc_emitter_copy_buffer(&B->default_emit_context, buffer, size);
+ check(buffer, "default emitter declined to copy buffer");
+ return buffer;
+}
+
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+
+ buffer = FLATCC_BUILDER_ALLOC(size);
+
+ if (!buffer) {
+ check(0, "failed to allocated memory for finalized buffer");
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ check(0, "default emitter declined to copy buffer");
+ FLATCC_BUILDER_FREE(buffer);
+ buffer = 0;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out)
+{
+ void * buffer;
+ size_t align;
+ size_t size;
+
+ size = flatcc_builder_get_buffer_size(B);
+
+ if (size_out) {
+ *size_out = size;
+ }
+ align = flatcc_builder_get_buffer_alignment(B);
+
+ size = (size + align - 1) & ~(align - 1);
+ buffer = FLATCC_BUILDER_ALIGNED_ALLOC(align, size);
+
+ if (!buffer) {
+ goto done;
+ }
+ if (!flatcc_builder_copy_buffer(B, buffer, size)) {
+ FLATCC_BUILDER_ALIGNED_FREE(buffer);
+ buffer = 0;
+ goto done;
+ }
+done:
+ if (!buffer && size_out) {
+ *size_out = 0;
+ }
+ return buffer;
+}
+
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size)
+{
+ return FLATCC_BUILDER_ALIGNED_ALLOC(alignment, size);
+}
+
+void flatcc_builder_aligned_free(void *p)
+{
+ FLATCC_BUILDER_ALIGNED_FREE(p);
+}
+
+void *flatcc_builder_alloc(size_t size)
+{
+ return FLATCC_BUILDER_ALLOC(size);
+}
+
+void flatcc_builder_free(void *p)
+{
+ FLATCC_BUILDER_FREE(p);
+}
+
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B)
+{
+ return B->emit_context;
+}
diff --git a/nostrdb/flatcc/emitter.c b/nostrdb/flatcc/emitter.c
@@ -0,0 +1,269 @@
+#include <stdlib.h>
+
+#include "flatcc_rtconfig.h"
+#include "flatcc_emitter.h"
+
+static int advance_front(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->front && E->front->prev != E->back) {
+ E->front->prev->page_offset = E->front->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ E->front = E->front->prev;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->front) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->front = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->front->page_offset = E->front->next->page_offset - FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int advance_back(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = 0;
+
+ if (E->back && E->back->next != E->front) {
+ E->back = E->back->next;
+ goto done;
+ }
+ if (!(p = FLATCC_EMITTER_ALLOC(sizeof(flatcc_emitter_page_t)))) {
+ return -1;
+ }
+ E->capacity += FLATCC_EMITTER_PAGE_SIZE;
+ if (E->back) {
+ p->prev = E->back;
+ p->next = E->front;
+ E->front->prev = p;
+ E->back->next = p;
+ E->back = p;
+ goto done;
+ }
+ /*
+ * The first page is shared between front and back to avoid
+ * double unecessary extra allocation.
+ */
+ E->front = p;
+ E->back = p;
+ p->next = p;
+ p->prev = p;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ p->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ return 0;
+done:
+ E->back_cursor = E->back->page;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE;
+ E->back->page_offset = E->back->prev->page_offset + FLATCC_EMITTER_PAGE_SIZE;
+ return 0;
+}
+
+static int copy_front(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ data += size;
+ while (size) {
+ k = size;
+ if (k > E->front_left) {
+ k = E->front_left;
+ if (k == 0) {
+ if (advance_front(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ E->front_cursor -= k;
+ E->front_left -= k;
+ data -= k;
+ size -= k;
+ memcpy(E->front_cursor, data, k);
+ };
+ return 0;
+}
+
+static int copy_back(flatcc_emitter_t *E, uint8_t *data, size_t size)
+{
+ size_t k;
+
+ while (size) {
+ k = size;
+ if (k > E->back_left) {
+ k = E->back_left;
+ if (k == 0) {
+ if (advance_back(E)) {
+ return -1;
+ }
+ continue;
+ }
+ }
+ memcpy(E->back_cursor, data, k);
+ size -= k;
+ data += k;
+ E->back_cursor += k;
+ E->back_left -= k;
+ }
+ return 0;
+}
+
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p)
+{
+ if (p == E->front || p == E->back) {
+ return -1;
+ }
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+ p->prev = E->front->prev;
+ p->next = E->front;
+ p->prev->next = p;
+ p->next->prev = p;
+ return 0;
+}
+
+void flatcc_emitter_reset(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!E->front) {
+ return;
+ }
+ E->back = E->front;
+ E->front_cursor = E->front->page + FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_cursor = E->front_cursor;
+ E->front_left = FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->back_left = FLATCC_EMITTER_PAGE_SIZE - FLATCC_EMITTER_PAGE_SIZE / 2;
+ E->front->page_offset = -(flatbuffers_soffset_t)E->front_left;
+ /* Heuristic to reduce peak allocation over time. */
+ if (E->used_average == 0) {
+ E->used_average = E->used;
+ }
+ E->used_average = E->used_average * 3 / 4 + E->used / 4;
+ E->used = 0;
+ while (E->used_average * 2 < E->capacity && E->back->next != E->front) {
+ /* We deallocate the page after back since it is less likely to be hot in cache. */
+ p = E->back->next;
+ E->back->next = p->next;
+ p->next->prev = E->back;
+ FLATCC_EMITTER_FREE(p);
+ E->capacity -= FLATCC_EMITTER_PAGE_SIZE;
+ }
+}
+
+void flatcc_emitter_clear(flatcc_emitter_t *E)
+{
+ flatcc_emitter_page_t *p = E->front;
+
+ if (!p) {
+ return;
+ }
+ p->prev->next = 0;
+ while (p->next) {
+ p = p->next;
+ FLATCC_EMITTER_FREE(p->prev);
+ }
+ FLATCC_EMITTER_FREE(p);
+ memset(E, 0, sizeof(*E));
+}
+
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len)
+{
+ flatcc_emitter_t *E = emit_context;
+ uint8_t *p;
+
+ E->used += len;
+ if (offset < 0) {
+ if (len <= E->front_left) {
+ E->front_cursor -= len;
+ E->front_left -= len;
+ p = E->front_cursor;
+ goto copy;
+ }
+ iov += iov_count;
+ while (iov_count--) {
+ --iov;
+ if (copy_front(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ }
+ } else {
+ if (len <= E->back_left) {
+ p = E->back_cursor;
+ E->back_cursor += len;
+ E->back_left -= len;
+ goto copy;
+ }
+ while (iov_count--) {
+ if (copy_back(E, iov->iov_base, iov->iov_len)) {
+ return -1;
+ }
+ ++iov;
+ }
+ }
+ return 0;
+copy:
+ while (iov_count--) {
+ memcpy(p, iov->iov_base, iov->iov_len);
+ p += iov->iov_len;
+ ++iov;
+ }
+ return 0;
+}
+
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size)
+{
+ flatcc_emitter_page_t *p;
+ size_t len;
+
+ if (size < E->used) {
+ return 0;
+ }
+ if (!E->front) {
+ return 0;
+ }
+ if (E->front == E->back) {
+ memcpy(buf, E->front_cursor, E->used);
+ return buf;
+ }
+ len = FLATCC_EMITTER_PAGE_SIZE - E->front_left;
+ memcpy(buf, E->front_cursor, len);
+ buf = (uint8_t *)buf + len;
+ p = E->front->next;
+ while (p != E->back) {
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE);
+ buf = (uint8_t *)buf + FLATCC_EMITTER_PAGE_SIZE;
+ p = p->next;
+ }
+ memcpy(buf, p->page, FLATCC_EMITTER_PAGE_SIZE - E->back_left);
+ return buf;
+}
diff --git a/nostrdb/flatcc/flatcc.h b/nostrdb/flatcc/flatcc.h
@@ -0,0 +1,268 @@
+#ifndef FLATCC_H
+#define FLATCC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This is the primary `flatcc` interface when compiling `flatcc` as a
+ * library. Functions and types in the this interface will be kept
+ * stable to the extend possible or reasonable, but do not rely on other
+ * interfaces except "config.h" used to set default options for this
+ * interface.
+ *
+ * This interface is unrelated to the standalone flatbuilder library
+ * which has a life of its own.
+ */
+
+#include <stddef.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+#endif
+
+typedef struct flatcc_options flatcc_options_t;
+typedef void (*flatcc_error_fun) (void *err_ctx, const char *buf, size_t len);
+
+struct flatcc_options {
+ size_t max_schema_size;
+ int max_include_depth;
+ int max_include_count;
+ int disable_includes;
+ int allow_boolean_conversion;
+ int allow_enum_key;
+ int allow_enum_struct_field;
+ int allow_multiple_key_fields;
+ int allow_primary_key;
+ int allow_scan_for_all_fields;
+ int allow_string_key;
+ int allow_struct_field_deprecate;
+ int allow_struct_field_key;
+ int allow_struct_root;
+ int ascending_enum;
+ int hide_later_enum;
+ int hide_later_struct;
+ int offset_size;
+ int voffset_size;
+ int utype_size;
+ int bool_size;
+ int require_root_type;
+ int strict_enum_init;
+ uint64_t vt_max_count;
+
+ const char *default_schema_ext;
+ const char *default_bin_schema_ext;
+ const char *default_bin_ext;
+
+ /* Code Generator specific options. */
+ int gen_stdout;
+ int gen_dep;
+
+ const char *gen_depfile;
+ const char *gen_deptarget;
+ const char *gen_outfile;
+
+ int gen_append;
+
+ int cgen_pad;
+ int cgen_sort;
+ int cgen_pragmas;
+
+ int cgen_common_reader;
+ int cgen_common_builder;
+ int cgen_reader;
+ int cgen_builder;
+ int cgen_verifier;
+ int cgen_json_parser;
+ int cgen_json_printer;
+ int cgen_recursive;
+ int cgen_spacing;
+ int cgen_no_conflicts;
+
+
+ int bgen_bfbs;
+ int bgen_qualify_names;
+ int bgen_length_prefix;
+
+ /* Namespace args - these can override defaults so are null by default. */
+ const char *ns;
+ const char *nsc;
+
+ const char **inpaths;
+ const char **srcpaths;
+ int inpath_count;
+ int srcpath_count;
+ const char *outpath;
+};
+
+/* Runtime configurable optoins. */
+void flatcc_init_options(flatcc_options_t *opts);
+
+typedef void *flatcc_context_t;
+
+/*
+ * Call functions below in order listed one at a time.
+ * Each parse requires a new context.
+ *
+ * A reader file is named after the source base name, e.g.
+ * `monster.fbs` becomes `monster.h`. Builders are optional and created
+ * as `monster_builder.h`. A reader require a common header
+ * `flatbuffers_commoner.h` and a builder requires
+ * `flatbuffers_common_builder.h` in addition to the reader filers. A
+ * reader need no other source, but builders must link with the
+ * `flatbuilder` library and include files in `include/flatbuffers`.
+ *
+ * All the files may also be concatenated into one single file and then
+ * files will not be attempted included externally. This can be used
+ * with stdout output. The common builder can follow the common
+ * reader immediately, or at any later point before the first builder.
+ * The common files should only be included once, but not harm is done
+ * if duplication occurs.
+ *
+ * The outpath is prefixed every output filename. The containing
+ * directory must exist, but the prefix may have text following
+ * the directory, for example the namespace. If outpath = "stdout",
+ * files are generated to stdout.
+ *
+ * Note that const char * options must remain valid for the lifetime
+ * of the context since they are not copied. The options object itself
+ * is not used after initialization and may be reused.
+*/
+
+/*
+ * `name` is the name of the schema file or buffer. If it is path, the
+ * basename is extracted (leading path stripped), and the default schema
+ * extension is stripped if present. The resulting name is used
+ * internally when generating output files. Typically the `name`
+ * argument will be the same as a schema file path given to
+ * `flatcc_parse_file`, but it does not have to be.
+ *
+ * `name` may be null if only common files are generated.
+ *
+ * `error_out` is an optional error handler. If null output is truncated
+ * to a reasonable size and sent to stderr. `error_ctx` is provided as
+ * first argument to `error_out` if `error_out` is non-zero, otherwise
+ * it is ignored.
+ *
+ * Returns context or null on error.
+ */
+flatcc_context_t flatcc_create_context(flatcc_options_t *options, const char *name,
+ flatcc_error_fun error_out, void *error_ctx);
+
+/* Like `flatcc_create_context`, but with length argument for name. */
+/*
+ * Parse is optional - not needed for common files. If the input buffer version
+ * is called, the buffer must be zero terminated, otherwise an input
+ * path can be specified. The output path can be null.
+ *
+ * Only one parse can be called per context.
+ *
+ * The buffer size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * The buffer must remain valid for the duration of the context.
+ *
+ * The schema cannot contain include statements when parsed as a buffer.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_buffer(flatcc_context_t ctx, const char *buf, size_t buflen);
+
+/*
+ * If options contain a non-zero `inpath` option, the resulting filename is
+ * prefixed with that path unless the filename is an absolute path.
+ *
+ * Errors are sent to the error handler given during initialization,
+ * or to stderr.
+ *
+ * The file size is limited to the max_schema_size option unless it is
+ * 0. The default is reasonable size like 64K depending on config flags.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_parse_file(flatcc_context_t ctx, const char *filename);
+
+/*
+ * Generate output files. The basename derived when the context was
+ * created is used used to name the output files with respective
+ * extensions. If the outpath option is not null it is prefixed the
+ * output files. The `cgen_common_reader, cgen_common_builder,
+ * cgen_reader, and cgen_builder` must be set or reset depending on what
+ * is to be generated. The common files do not require a parse, and the
+ * non-common files require a successfull parse or the result is
+ * undefined.
+ *
+ * Unlinke the parser, the code generator produce errors to stderr
+ * always. These errors are rare, such as using too long namespace
+ * names.
+ *
+ * If the `gen_stdout` option is set, all files are generated to stdout.
+ * In this case it is unwise to mix C and binary schema output options.
+ *
+ * If `bgen_bfbs` is set, a binary schema is generated to a file with
+ * the `.bfbs` extension. See also `flatcc_generate_binary_schema` for
+ * further details. Only `flatcc_generate_files` is called via the
+ * `flatcc` cli command.
+ *
+ * The option `bgen_length_prefix` option will cause a length prefix to be
+ * written to the each output binary schema. This option is only
+ * understood when writing to files.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_generate_files(flatcc_context_t ctx);
+
+/*
+ * Returns a buffer with a binary schema for a previous parse.
+ * The user is responsible for calling `free` on the returned buffer
+ * unless it returns 0 on error.
+ *
+ * Can be called instead of generate files, before, or after, but a
+ * schema must be parsed first.
+ *
+ * Returns a binary schema in `reflection.fbs` format. Any included
+ * files will be contained in the schema and there are no separate
+ * schema files for included schema.
+ *
+ * All type names are scoped, mening that they are refixed their
+ * namespace using `.` as the namespace separator, for example:
+ * "MyGame.Example.Monster". Note that the this differs from the current
+ * `flatc` compiler which does not prefix names. Enum names are not
+ * scoped, but the scope is implied by the containing enum type.
+ * The option `bgen_qualify_names=0` changes this behavior.
+ *
+ * If the default option `ascending_enum` is disabled, the `flatcc` will
+ * accept duplicate values and overlapping ranges like the C programming
+ * language. In this case enum values in the binary schema will not be
+ * searchable. At any rate enum names are not searchable in the current
+ * schema format.
+ *
+ */
+void *flatcc_generate_binary_schema(flatcc_context_t ctx, size_t *size);
+
+/*
+ * Similar to `flatcc_generate_binary_schema` but copies the binary
+ * schema into a user supplied buffer. If the buffer is too small
+ * the return value will be negative and the buffer content undefined.
+ */
+int flatcc_generate_binary_schema_to_buffer(flatcc_context_t ctx, void *buf, size_t bufsiz);
+
+/* Must be called to deallocate resources eventually - it valid but
+ * without effect to call with a null context. */
+void flatcc_destroy_context(flatcc_context_t ctx);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_H */
diff --git a/nostrdb/flatcc/flatcc_accessors.h b/nostrdb/flatcc/flatcc_accessors.h
@@ -0,0 +1,101 @@
+#ifndef FLATCC_ACCESSORS
+#define FLATCC_ACCESSORS
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define __flatcc_basic_scalar_accessors_impl(N, T, W, E) \
+static inline size_t N ## __size(void) \
+{ return sizeof(T); } \
+static inline T *N ## __ptr_add(T *p, size_t i) \
+{ return p + i; } \
+static inline const T *N ## __const_ptr_add(const T *p, size_t i) \
+{ return p + i; } \
+static inline T N ## _read_from_pe(const void *p) \
+{ return N ## _cast_from_pe(*(T *)p); } \
+static inline T N ## _read_to_pe(const void *p) \
+{ return N ## _cast_to_pe(*(T *)p); } \
+static inline T N ## _read(const void *p) \
+{ return *(T *)p; } \
+static inline void N ## _write_from_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_from_pe(v); } \
+static inline void N ## _write_to_pe(void *p, T v) \
+{ *(T *)p = N ## _cast_to_pe(v); } \
+static inline void N ## _write(void *p, T v) \
+{ *(T *)p = v; } \
+static inline T N ## _read_from_le(const void *p) \
+{ return N ## _cast_from_le(*(T *)p); } \
+typedef struct { int is_null; T value; } N ## _option_t;
+
+#define __flatcc_define_integer_accessors_impl(N, T, W, E) \
+static inline T N ## _cast_from_pe(T v) \
+{ return (T) E ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_pe(T v) \
+{ return (T) hto ## E ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_le(T v) \
+{ return (T) le ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_le(T v) \
+{ return (T) htole ## W((uint ## W ## _t)v); } \
+static inline T N ## _cast_from_be(T v) \
+{ return (T) be ## W ## toh((uint ## W ## _t)v); } \
+static inline T N ## _cast_to_be(T v) \
+{ return (T) htobe ## W((uint ## W ## _t)v); } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors_impl(N, T, W, E) \
+union __ ## N ## _cast { T v; uint ## W ## _t u; }; \
+static inline T N ## _cast_from_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = E ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_pe(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = hto ## E ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = le ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_le(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htole ## W(x.u); return x.v; } \
+static inline T N ## _cast_from_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = be ## W ## toh(x.u); return x.v; } \
+static inline T N ## _cast_to_be(T v) \
+{ union __ ## N ## _cast x; \
+ x.v = v; x.u = htobe ## W(x.u); return x.v; } \
+__flatcc_basic_scalar_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_integer_accessors(N, T, W, E) \
+__flatcc_define_integer_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_real_accessors(N, T, W, E) \
+__flatcc_define_real_accessors_impl(N, T, W, E)
+
+#define __flatcc_define_basic_integer_accessors(NS, TN, T, W, E) \
+__flatcc_define_integer_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_real_accessors(NS, TN, T, W, E) \
+__flatcc_define_real_accessors(NS ## TN, T, W, E)
+
+#define __flatcc_define_basic_scalar_accessors(NS, E) \
+__flatcc_define_basic_integer_accessors(NS, char, char, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint8, uint8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, uint16, uint16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, uint32, uint32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, uint64, uint64_t, 64, E) \
+__flatcc_define_basic_integer_accessors(NS, int8, int8_t, 8, E) \
+__flatcc_define_basic_integer_accessors(NS, int16, int16_t, 16, E) \
+__flatcc_define_basic_integer_accessors(NS, int32, int32_t, 32, E) \
+__flatcc_define_basic_integer_accessors(NS, int64, int64_t, 64, E) \
+__flatcc_define_basic_real_accessors(NS, float, float, 32, E) \
+__flatcc_define_basic_real_accessors(NS, double, double, 64, E)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ACCESSORS */
diff --git a/nostrdb/flatcc/flatcc_alloc.h b/nostrdb/flatcc/flatcc_alloc.h
@@ -0,0 +1,127 @@
+#ifndef FLATCC_ALLOC_H
+#define FLATCC_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * These allocation abstractions are __only__ for runtime libraries.
+ *
+ * The flatcc compiler uses Posix allocation routines regardless
+ * of how this file is configured.
+ *
+ * This header makes it possible to use systems where malloc is not
+ * valid to use. In this case the portable library will not help
+ * because it implements Posix / C11 abstractions.
+ *
+ * Systems like FreeRTOS do not work with Posix memory calls and here it
+ * can be helpful to override runtime allocation primitives.
+ *
+ * In general, it is better to customize the allocator and emitter via
+ * flatcc_builder_custom_init and to avoid using the default emitter
+ * specific high level calls the copy out a buffer that must later be
+ * deallocated. This provides full control of allocation withou the need
+ * for this file.
+ *
+ *
+ * IMPORTANT
+ *
+ * If you override malloc, free, etc., make sure your applications
+ * use the same allocation methods. For example, samples/monster.c
+ * and several test cases are no longer guaranteed to work out of the
+ * box.
+ *
+ * The changes must only affect target runtime compilation including the
+ * the runtime library libflatccrt.
+ *
+ * The host system flatcc compiler and the compiler library libflatcc
+ * should NOT be compiled with non-Posix allocation since the compiler
+ * has a dependency on the runtime library and the wrong free operation
+ * might be callled. The safest way to avoid this problem this is to
+ * compile flatcc with the CMake script and the runtime files with a
+ * dedicated build system for the target system.
+ */
+
+#include <stdlib.h>
+
+#ifndef FLATCC_ALLOC
+#define FLATCC_ALLOC(n) malloc(n)
+#endif
+
+#ifndef FLATCC_FREE
+#define FLATCC_FREE(p) free(p)
+#endif
+
+#ifndef FLATCC_REALLOC
+#define FLATCC_REALLOC(p, n) realloc(p, n)
+#endif
+
+#ifndef FLATCC_CALLOC
+#define FLATCC_CALLOC(nm, n) calloc(nm, n)
+#endif
+
+/*
+ * Implements `aligned_alloc` and `aligned_free`.
+ * Even with C11, this implements non-standard aligned_free needed for portable
+ * aligned_alloc implementations.
+ */
+#ifndef FLATCC_USE_GENERIC_ALIGNED_ALLOC
+
+#ifndef FLATCC_NO_PALIGNED_ALLOC
+#include "paligned_alloc.h"
+#else
+#if !defined(__aligned_free_is_defined) || !__aligned_free_is_defined
+#define aligned_free free
+#endif
+#endif
+
+#else /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+static inline void *__flatcc_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)FLATCC_ALLOC(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+#define FLATCC_ALIGNED_ALLOC(alignment, size) __flatcc_aligned_alloc(alignment, size)
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_FREE
+static inline void __flatcc_aligned_free(void *p)
+{
+ char *raw;
+
+ if (!p) return;
+ raw = ((void **)p)[-1];
+
+ FLATCC_FREE(raw);
+}
+#define FLATCC_ALIGNED_FREE(p) __flatcc_aligned_free(p)
+#endif
+
+#endif /* FLATCC_USE_GENERIC_ALIGNED_ALLOC */
+
+#ifndef FLATCC_ALIGNED_ALLOC
+#define FLATCC_ALIGNED_ALLOC(a, n) aligned_alloc(a, n)
+#endif
+
+#ifndef FLATCC_ALIGNED_FREE
+#define FLATCC_ALIGNED_FREE(p) aligned_free(p)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ALLOC_H */
diff --git a/nostrdb/flatcc/flatcc_assert.h b/nostrdb/flatcc/flatcc_assert.h
@@ -0,0 +1,45 @@
+#ifndef FLATCC_ASSERT_H
+#define FLATCC_ASSERT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+* This assert abstraction is only used for the flatcc runtime library.
+* The flatcc compiler uses Posix assert routines regardless of how this
+* file is configured.
+*
+* This header makes it possible to use systems where assert is not
+* valid to use. Note that `<assert.h>` may remain a dependency for static
+* assertions.
+*
+* `FLATCC_ASSERT` is designed to handle errors which cannot be ignored
+* and could lead to crash. The portable library may use assertions that
+* are not affected by this macro.
+*
+* `FLATCC_ASSERT` defaults to POSIX assert but can be overrided by a
+* preprocessor definition.
+*
+* Runtime assertions can be entirely disabled by defining
+* `FLATCC_NO_ASSERT`.
+*/
+
+#ifdef FLATCC_NO_ASSERT
+/* NOTE: This will not affect inclusion of <assert.h> for static assertions. */
+#undef FLATCC_ASSERT
+#define FLATCC_ASSERT(x) ((void)0)
+/* Grisu3 is used for floating point conversion in JSON processing. */
+#define GRISU3_NO_ASSERT
+#endif
+
+#ifndef FLATCC_ASSERT
+#include <assert.h>
+#define FLATCC_ASSERT assert
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ASSERT_H */
diff --git a/nostrdb/flatcc/flatcc_builder.h b/nostrdb/flatcc/flatcc_builder.h
@@ -0,0 +1,1908 @@
+#ifndef FLATCC_BUILDER_H
+#define FLATCC_BUILDER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Library for building untyped FlatBuffers. Intended as a support
+ * library for generated C code to produce typed builders, but might
+ * also be useful in runtime environments and as support for scripting
+ * languages.
+ *
+ * The builder has two API layers: a stack based `start/end` approach,
+ * and a direct `create`, and they may be mixed freely. The direct
+ * approach may be used as part of more specialized optimizations such
+ * as rewriting buffers while the stack approach is convenient for state
+ * machine driven parsers without a stack, or with a very simple stack
+ * without extra allocations.
+ *
+ * The builder emits partial buffer sequences to a user provided emitter
+ * function and does not require a full buffer reprensenation in memory.
+ * For this reason it also does not support sorting or other operations
+ * that requires representing the buffer, but post-processors can easily
+ * do this, and the generated schema specific code and provide functions
+ * to handle this.
+ *
+ * A custom allocator with a default realloc implementation can place
+ * restraints on resource consumption and provide initial allocation
+ * sizes for various buffers and stacks in use.
+ *
+ * A buffer under construction uses a virtual address space for the
+ * completed part of the buffer, starting at 0 and growing in both
+ * directions, or just down depending on whether vtables should be
+ * clustered at the end or not. Clustering may help caching and
+ * preshipping that part of the buffer.
+ *
+ * Because an offset cannot be known before its reference location is
+ * defined, every completed table, vector, etc. returns a reference into
+ * the virtual address range. If the final buffer keeps the 0 offset,
+ * these references remain stable an may be used for external references
+ * into the buffer.
+ *
+ * The maximum buffer that can be constructed is in praxis limited to
+ * half the UOFFSET_MAX size, typically 2^31 bytes, not counting
+ * clustered vtables that may consume and additional 2^31 bytes
+ * (positive address range), but in praxis cannot because vtable
+ * references are signed and thus limited to 2^31 bytes (or equivalent
+ * depending on the flatbuffer types chosen).
+ *
+ * CORRECTION: in various places rules are mentioned about nesting and using
+ * a reference at most once. In fact, DAG's are also valid flatbuffers.
+ * This means a reference may be reused as long as each individual use
+ * obeys the rules and, for example, circular references are not
+ * constructed (circular types are ok, but objects graphs with cycles
+ * are not permitted). Be especially aware of the offset vector create
+ * call which translates the references into offsets - this can be
+ * reverted by noting the reference in vector and calculate the base
+ * used for the offset to restore the original references after the
+ * vector has been emitted.
+ */
+
+#include <stdlib.h>
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "flatcc_flatbuffers.h"
+#include "flatcc_emitter.h"
+#include "flatcc_refmap.h"
+
+/* It is possible to enable logging here. */
+#ifndef FLATCC_BUILDER_ASSERT
+#define FLATCC_BUILDER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+/*
+ * Eror handling is not convenient and correct use should not cause
+ * errors beyond possibly memory allocation, but assertions are a
+ * good way to trace problems.
+ *
+ * Note: some internal assertion will remain if disabled.
+ */
+#ifndef FLATCC_BUILDER_ASSERT_ON_ERROR
+#define FLATCC_BUILDER_ASSERT_ON_ERROR 1
+#endif
+
+/*
+ * If set, checks user input agains state and returns error,
+ * otherwise errors are ignored (assuming they won't happen).
+ * Errors will be asserted if enabled and checks are not skipped.
+ */
+#ifndef FLATCC_BUILDER_SKIP_CHECKS
+#define FLATCC_BUILDER_SKIP_CHECKS 0
+#endif
+
+
+/*
+ * When adding the same field to a table twice this is either an error
+ * or the existing field is returned, potentially introducing garbage
+ * if the type is a vector, table, or string. When implementing parsers
+ * it may be convenient to not treat this as an error.
+ */
+#ifndef FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD
+#define FLATCC_BUILDER_ALLOW_REPEAT_TABLE_ADD 0
+#endif
+
+/**
+ * This type must have same size as `flatbuffers_uoffset_t`
+ * and must be a signed type.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_ref_t;
+typedef flatbuffers_utype_t flatcc_builder_utype_t;
+
+/**
+ * This type must be compatible with code generation that
+ * creates union specific ref types.
+ */
+typedef struct flatcc_builder_union_ref {
+ flatcc_builder_utype_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_ref_t;
+
+typedef struct flatcc_builder_union_vec_ref {
+ flatcc_builder_ref_t type;
+ flatcc_builder_ref_t value;
+} flatcc_builder_union_vec_ref_t;
+
+/**
+ * Virtual tables are off by one to avoid being mistaken for error at
+ * position 0, and it makes them detectable as such because no other
+ * reference is uneven. Vtables are emitted at their actual location
+ * which is one less than the reference value.
+ */
+typedef flatbuffers_soffset_t flatcc_builder_vt_ref_t;
+
+typedef flatbuffers_uoffset_t flatcc_builder_identifier_t;
+
+/**
+ * Hints to custom allocators so they can provide initial alloc sizes
+ * etc. There will be at most one buffer for each allocation type per
+ * flatcc_builder instance. Buffers containing only structs may avoid
+ * allocation altogether using a `create` call. The vs stack must hold
+ * vtable entries for all open tables up to their requested max id, but
+ * unused max id overlap on the stack. The final vtables only store the
+ * largest id actually added. The fs stack must hold stack frames for
+ * the nesting levels expected in the buffer, each about 50-100 bytes.
+ * The ds stack holds open vectors, table data, and nested buffer state.
+ * `create` calls bypass the `ds` and `fs` stack and are thus faster.
+ * The vb buffer holds a copy of all vtables seen and emitted since last
+ * vtable flush. The patch log holds a uoffset for every table field
+ * added to currently open tables. The hash table holds a uoffset entry
+ * for each hash slot where the allocator decides how many to provide
+ * above a certain minimum. The vd buffer allocates vtable descriptors
+ * which is a reference to an emitted vtable, an offset to a cached
+ * vtable, and a link to next descriptor with same hash. Calling `reset`
+ * after build can either keep the allocation levels for the next
+ * buffer, or reduce the buffers already allocated by requesting 1 byte
+ * allocations (meaning provide a default).
+ *
+ * The user stack is not automatically allocated, but when entered
+ * explicitly, the boundary is rembered in the current live
+ * frame.
+ */
+enum flatcc_builder_alloc_type {
+ /* The stack where vtables are build. */
+ flatcc_builder_alloc_vs,
+ /* The stack where data structures are build. */
+ flatcc_builder_alloc_ds,
+ /* The virtual table buffer cache, holds a copy of each vt seen. */
+ flatcc_builder_alloc_vb,
+ /* The patch log, remembers table fields with outstanding offset refs. */
+ flatcc_builder_alloc_pl,
+ /* The stack of frames for nested types. */
+ flatcc_builder_alloc_fs,
+ /* The hash table part of the virtual table cache. */
+ flatcc_builder_alloc_ht,
+ /* The vtable descriptor buffer, i.e. list elements for emitted vtables. */
+ flatcc_builder_alloc_vd,
+ /* User stack frame for custom data. */
+ flatcc_builder_alloc_us,
+
+ /* Number of allocation buffers. */
+ flatcc_builder_alloc_buffer_count
+};
+
+/** Must reflect the `flatcc_builder_alloc_type` enum. */
+#define FLATCC_BUILDER_ALLOC_BUFFER_COUNT flatcc_builder_alloc_buffer_count
+
+#ifndef FLATCC_BUILDER_ALLOC
+#define FLATCC_BUILDER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_BUILDER_FREE
+#define FLATCC_BUILDER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_BUILDER_REALLOC
+#define FLATCC_BUILDER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_ALLOC
+#define FLATCC_BUILDER_ALIGNED_ALLOC(a, n) FLATCC_ALIGNED_ALLOC(a, n)
+#endif
+
+#ifndef FLATCC_BUILDER_ALIGNED_FREE
+#define FLATCC_BUILDER_ALIGNED_FREE(p) FLATCC_ALIGNED_FREE(p)
+#endif
+
+/**
+ * Emits data to a conceptual deque by appending to either front or
+ * back, starting from offset 0.
+ *
+ * Each emit call appends a strictly later or earlier sequence than the
+ * last emit with same offset sign. Thus a buffer is gradually grown at
+ * both ends. `len` is the combined length of all iov entries such that
+ * `offset + len` yields the former offset for negative offsets and
+ * `offset + len` yields the next offset for non-negative offsets.
+ * The bulk of the data will be in the negative range, possibly all of
+ * it. The first emitted emitted range will either start or end at
+ * offset 0. If offset 0 is emitted, it indicates the start of clustered
+ * vtables. The last positive (non-zero) offset may be zero padding to
+ * place the buffer in a full multiple of `block_align`, if set.
+ *
+ * No iov entry is empty, 0 < iov_count <= FLATCC_IOV_COUNT_MAX.
+ *
+ * The source data are in general ephemeral and should be consumed
+ * immediately, as opposed to caching iov.
+ *
+ * For high performance applications:
+ *
+ * The `create` calls may reference longer living data, but header
+ * fields etc. will still be short lived. If an emitter wants to
+ * reference data in another buffer rather than copying, it should
+ * inspect the memory range. The length of an iov entry may also be used
+ * since headers are never very long (anything starting at 16 bytes can
+ * safely be assumed to be user provided, or static zero padding). It is
+ * guaranteed that data pointers in `create` calls receive a unique slot
+ * separate from temporary headers, in the iov table which may be used
+ * for range checking or hashing (`create_table` is the only call that
+ * mutates the data buffer). It is also guaranteed (with the exception
+ * of `create_table` and `create_cached_vtable`) that data provided to
+ * create calls are not referenced at all by the builder, and these data
+ * may therefore de-facto be handles rather than direct pointers when
+ * the emitter and data provider can agree on such a protocol. This does
+ * NOT apply to any start/end/add/etc. calls which do copy to stack.
+ * `flatcc_builder_padding_base` may be used to test if an iov entry is
+ * zero padding which always begins at that address.
+ *
+ * Future: the emit interface could be extended with a type code
+ * and return an existing object insted of the emitted if, for
+ * example, they are identical. Outside this api level, generated
+ * code could provide a table comparison function to help such
+ * deduplication. It would be optional because two equal objects
+ * are not necessarily identical. The emitter already receives
+ * one object at time.
+ *
+ * Returns 0 on success and otherwise causes the flatcc_builder
+ * to fail.
+ */
+typedef int flatcc_builder_emit_fun(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count, flatbuffers_soffset_t offset, size_t len);
+
+/*
+ * Returns a pointer to static padding used in emitter calls. May
+ * sometimes also be used for empty defaults such as identifier.
+ */
+extern const uint8_t flatcc_builder_padding_base[];
+
+/**
+ * `request` is a minimum size to be returned, but allocation is
+ * expected to grow exponentially or in reasonable chunks. Notably,
+ * `alloc_type = flatcc_builder_alloc_ht` will only use highest available
+ * power of 2. The allocator may shrink if `request` is well below
+ * current size but should avoid repeated resizing on small changes in
+ * request sizes. If `zero_fill` is non-zero, allocated data beyond
+ * the current size must be zeroed. The buffer `b` may be null with 0
+ * length initially. `alloc_context` is completely implementation
+ * dependendent, and not needed when just relying on realloc. The
+ * resulting buffer may be the same or different with moved data, like
+ * realloc. Returns -1 with unmodified buffer on failure or 0 on
+ * success. The `alloc_type` identifies the buffer type. This may be
+ * used to cache buffers between instances of builders, or to decide a
+ * default allocation size larger than requested. If `need` is zero the
+ * buffer should be deallocate if non-zero, and return success (0)
+ * regardless.
+ */
+typedef int flatcc_builder_alloc_fun(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/*
+ * The number of hash slots there will be allocated space for. The
+ * allocator may provide more. The size returned should be
+ * `sizeof(flatbuffers_uoffset_t) * count`, where the size is a power of
+ * 2 (or the rest is wasted). The hash table can store many more entries
+ * than slots using linear search. The table does not resize.
+ */
+#ifndef FLATCC_BUILDER_MIN_HASH_COUNT
+#define FLATCC_BUILDER_MIN_HASH_COUNT 64
+#endif
+
+typedef struct __flatcc_builder_buffer_frame __flatcc_builder_buffer_frame_t;
+struct __flatcc_builder_buffer_frame {
+ flatcc_builder_identifier_t identifier;
+ flatcc_builder_ref_t mark;
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t nest_id;
+ uint16_t flags;
+ uint16_t block_align;
+};
+
+typedef struct __flatcc_builder_vector_frame __flatcc_builder_vector_frame_t;
+struct __flatcc_builder_vector_frame {
+ flatbuffers_uoffset_t elem_size;
+ flatbuffers_uoffset_t count;
+ flatbuffers_uoffset_t max_count;
+};
+
+typedef struct __flatcc_builder_table_frame __flatcc_builder_table_frame_t;
+struct __flatcc_builder_table_frame {
+ flatbuffers_uoffset_t vs_end;
+ flatbuffers_uoffset_t pl_end;
+ uint32_t vt_hash;
+ flatbuffers_voffset_t id_end;
+};
+
+/*
+ * Store state for nested structures such as buffers, tables and vectors.
+ *
+ * For less busy data and data where access to a previous state is
+ * irrelevant, the frame may store the current state directly. Otherwise
+ * the current state is maintained in the flatcc_builder_t structure in a
+ * possibly derived form (e.g. ds pointer instead of ds_end offset) and
+ * the frame is used to store the previous state when the frame is
+ * entered.
+ *
+ * Most operations have a start/update/end cycle the decides the
+ * liftetime of a frame, but these generally also have a direct form
+ * (create) that does not use a frame at all. These still do some
+ * state updates notably passing min_align to parent which may also be
+ * an operation without a frame following the child level operation
+ * (e.g. create struct, create buffer). Ending a frame results in the
+ * same kind of updates.
+ */
+typedef struct __flatcc_builder_frame __flatcc_builder_frame_t;
+struct __flatcc_builder_frame {
+ flatbuffers_uoffset_t ds_first;
+ flatbuffers_uoffset_t type_limit;
+ flatbuffers_uoffset_t ds_offset;
+ uint16_t align;
+ uint16_t type;
+ union {
+ __flatcc_builder_table_frame_t table;
+ __flatcc_builder_vector_frame_t vector;
+ __flatcc_builder_buffer_frame_t buffer;
+ } container;
+};
+
+/**
+ * The main flatcc_builder structure. Can be stack allocated and must
+ * be initialized with `flatcc_builder_init` and cleared with
+ * `flatcc_builder_clear` to reclaim memory. Between buffer builds,
+ * `flatcc_builder_reset` may be used.
+ */
+typedef struct flatcc_builder flatcc_builder_t;
+
+struct flatcc_builder {
+ /* Next entry on reserved stack in `alloc_pl` buffer. */
+ flatbuffers_voffset_t *pl;
+ /* Next entry on reserved stack in `alloc_vs` buffer. */
+ flatbuffers_voffset_t *vs;
+ /* One above the highest entry in vs, used to track vt_size. */
+ flatbuffers_voffset_t id_end;
+ /* The evolving vtable hash updated with every new field. */
+ uint32_t vt_hash;
+
+ /* Pointer to ds_first. */
+ uint8_t *ds;
+ /* Offset from `ds` on current frame. */
+ flatbuffers_uoffset_t ds_offset;
+ /* ds buffer size relative to ds_first, clamped to max size of current type. */
+ flatbuffers_uoffset_t ds_limit;
+
+ /* ds_first, ds_first + ds_offset is current ds stack range. */
+ flatbuffers_uoffset_t ds_first;
+ /* Points to currently open frame in `alloc_fs` buffer. */
+ __flatcc_builder_frame_t *frame;
+
+ /* Only significant to emitter function, if at all. */
+ void *emit_context;
+ /* Only significant to allocator function, if at all. */
+ void *alloc_context;
+ /* Customizable write function that both appends and prepends data. */
+ flatcc_builder_emit_fun *emit;
+ /* Customizable allocator that also deallocates. */
+ flatcc_builder_alloc_fun *alloc;
+ /* Buffers indexed by `alloc_type` */
+ flatcc_iovec_t buffers[FLATCC_BUILDER_ALLOC_BUFFER_COUNT];
+ /* Number of slots in ht given as 1 << ht_width. */
+ size_t ht_width;
+
+ /* The location in vb to add next cached vtable. */
+ flatbuffers_uoffset_t vb_end;
+ /* Where to allocate next vtable descriptor for hash table. */
+ flatbuffers_uoffset_t vd_end;
+ /* Ensure final buffer is aligned to at least this. Nested buffers get their own `min_align`. */
+ uint16_t min_align;
+ /* The current active objects alignment isolated from nested activity. */
+ uint16_t align;
+ /* The current buffers block alignment used when emitting buffer. */
+ uint16_t block_align;
+ /* Signed virtual address range used for `flatcc_builder_ref_t` and emitter. */
+ flatcc_builder_ref_t emit_start;
+ flatcc_builder_ref_t emit_end;
+ /* 0 for top level, and end of buffer ref for nested buffers (can also be 0). */
+ flatcc_builder_ref_t buffer_mark;
+ /* Next nest_id. */
+ flatbuffers_uoffset_t nest_count;
+ /* Unique id to prevent sharing of vtables across buffers. */
+ flatbuffers_uoffset_t nest_id;
+ /* Current nesting level. Helpful to state-machines with explicit stack and to check `max_level`. */
+ int level;
+ /* Aggregate check for allocated frame and max_level. */
+ int limit_level;
+ /* Track size prefixed buffer. */
+ uint16_t buffer_flags;
+
+ /* Settings that may happen with no frame allocated. */
+
+ flatcc_builder_identifier_t identifier;
+
+ /* Settings that survive reset (emitter, alloc, and contexts also survive): */
+
+ /* If non-zero, vtable cache gets flushed periodically. */
+ size_t vb_flush_limit;
+ /* If non-zero, fails on deep nesting to help drivers with a stack, such as recursive parsers etc. */
+ int max_level;
+ /* If non-zero, do not cluster vtables at end, only emit negative offsets (0 by default). */
+ int disable_vt_clustering;
+
+ /* Set if the default emitter is being used. */
+ int is_default_emitter;
+ /* Only used with default emitter. */
+ flatcc_emitter_t default_emit_context;
+
+ /* Offset to the last entered user frame on the user frame stack, after frame header, or 0. */
+ size_t user_frame_offset;
+
+ /* The offset to the end of the most recent user frame. */
+ size_t user_frame_end;
+
+ /* The optional user supplied refmap for cloning DAG's - not shared with nested buffers. */
+ flatcc_refmap_t *refmap;
+};
+
+/**
+ * Call this before any other API call.
+ *
+ * The emitter handles the completed chunks of the buffer that will no
+ * longer be required by the builder. It is largely a `write` function
+ * that can append to both positive and negative offsets.
+ *
+ * No memory is allocated during init. Buffers will be allocated as
+ * needed. The `emit_context` is only used by the emitter, if at all.
+ *
+ * `flatcc_builder_reset/clear` calls are automtically forwarded to the
+ * default emitter.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_init(flatcc_builder_t *B);
+
+/**
+ * Use instead of `flatcc_builder_init` when providing a custom allocator
+ * or emitter. Leave emitter or allocator null to use default.
+ * Cleanup of emit and alloc context must be handled manually after
+ * the builder is cleared or reset, except if emitter is null the
+ * default will be automatically cleared and reset.
+ *
+ * Returns -1 on failure, 0 on success.
+ */
+int flatcc_builder_custom_init(flatcc_builder_t *B,
+ flatcc_builder_emit_fun *emit, void *emit_context,
+ flatcc_builder_alloc_fun *alloc, void *alloc_context);
+
+/*
+ * Returns (flatcc_emitter_t *) if the default context is used.
+ * Other emitter might have null contexts.
+ */
+void *flatcc_builder_get_emit_context(flatcc_builder_t *B);
+
+/**
+ * Prepares builder for a new build. The emitter is not told when a
+ * buffer is finished or when a new begins, and must be told so
+ * separately. Allocated buffers will be zeroed, but may optionally be
+ * reduced to their defaults (signalled by reallocating each non-empty
+ * buffer to a single byte). General settings are cleared optionally,
+ * such as cache flushing. Buffer specific settings such as buffer
+ * identifier are always cleared.
+ *
+ * Returns -1 if allocator complains during buffer reduction, 0 on
+ * success.
+ */
+int flatcc_builder_custom_reset(flatcc_builder_t *B,
+ int reduce_buffers, int set_defaults);
+
+/*
+ * Same as `flatcc_builder_custom_reset` with default arguments
+ * where buffers are not reduced and default settings are not reset.
+ */
+int flatcc_builder_reset(flatcc_builder_t *B);
+
+/**
+ * Deallocates all memory by calling allocate with a zero size request
+ * on each buffer, then zeroing the builder structure itself.
+ */
+void flatcc_builder_clear(flatcc_builder_t *B);
+
+/**
+ * Allocates to next higher power of 2 using system realloc and ignores
+ * `alloc_context`. Only reduces size if a small subsequent increase in
+ * size would not trigger a reallocation. `alloc_type` is used to
+ * set minimum sizes. Hash tables are allocated to the exact requested
+ * size. See also `alloc_fun`.
+ */
+int flatcc_builder_default_alloc(void *alloc_context,
+ flatcc_iovec_t *b, size_t request, int zero_fill, int alloc_type);
+
+/**
+ * If non-zero, the vtable cache will get flushed whenever it reaches
+ * the given limit at a point in time where more space is needed. The
+ * limit is not exact as it is only tested when reallocation is
+ * required.
+ */
+void flatcc_builder_set_vtable_cache_limit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Manual flushing of vtable for long running tasks. Mostly used
+ * internally to deal with nested buffers.
+ */
+void flatcc_builder_flush_vtable_cache(flatcc_builder_t *B);
+
+/**
+ * Low-level support function to aid in constructing nested buffers without
+ * allocation. Not for regular use.
+ *
+ * Call where `start_buffer` would have been placed when using
+ * `create_buffer` in a nested context. Save the return value on a stack
+ * as argument to `pop_buffer_alignment`.
+ *
+ * The call resets the current derived buffer alignment so the nested
+ * buffer will not be aligned to more than required.
+ *
+ * Often it will not be necessary to be so careful with alignment since
+ * the alignment cannot be invalid by failing to use push and pop, but
+ * for code generation it will ensure the correct result every time.
+ */
+uint16_t flatcc_builder_push_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Low-level call.
+ *
+ * Call with the return value from push_buffer_alignment after a nested
+ * `create_buffer_call`. The alignments merge back up in the buffer
+ * hierarchy so the top level buffer gets the largest of all aligments.
+ */
+void flatcc_builder_pop_buffer_alignment(flatcc_builder_t *B, uint16_t buffer_align);
+
+/**
+ * This value may be of interest when the buffer has been ended, for
+ * example when subsequently allocating memory for the buffer to ensure
+ * that memory is properly aligned.
+ */
+uint16_t flatcc_builder_get_buffer_alignment(flatcc_builder_t *B);
+
+/**
+ * Level 0 means no buffer is started, otherwise it increments with
+ * start calls and decrements with end calls (approximately for
+ * optimized operations such as table vectors).
+ *
+ * If `max_level` has been set, `get_level` always returns a value <=
+ * `max_level` provided no start call has failed.
+ *
+ * Level continues to increment inside nested buffers.
+ */
+int flatcc_builder_get_level(flatcc_builder_t *B);
+
+/**
+ * Setting the max level triggers a failure on start of new nestings
+ * when the level is reached. May be used to protect recursive descend
+ * parsers etc. or later buffer readers.
+ *
+ * The builder itself is not sensitive to depth, and the allocator is a
+ * better way to protect resource abuse.
+ *
+ * `max_level` is not reset inside nested buffers.
+ */
+void flatcc_builder_set_max_level(flatcc_builder_t *B, int level);
+
+/**
+ * By default ordinary data such as tables are placed in front of
+ * earlier produced content and vtables are placed at the very end thus
+ * clustering vtables together. This can be disabled so all content is
+ * placed in front. Nested buffers ignores this setting because they can
+ * only place content in front because they cannot blend with the
+ * containing buffers content. Clustering could be more cache friendly
+ * and also enables pre-shipping of the vtables during transmission.
+ */
+void flatcc_builder_set_vtable_clustering(flatcc_builder_t *B, int enable);
+
+/**
+ * Sets a new user supplied refmap which maps source pointers to
+ * references and returns the old refmap, or null. It is also
+ * possible to disable an existing refmap by setting a null
+ * refmap.
+ *
+ * A clone or pick operation may use this map when present,
+ * depending on the data type. If a hit is found, the stored
+ * reference will be used instead of performing a new clone or
+ * pick operation. It is also possible to manually populate the
+ * refmap. Note that the builder does not have a concept of
+ * clone or pick - these are higher level recursive operations
+ * to add data from one buffer to another - but such code may
+ * rely on the builder to provide the current refmap during
+ * recursive operations. For this reason, the builder makes no
+ * calls to the refmap interface on its own - it just stores the
+ * current refmap such that recursive operations can find it.
+ *
+ * Refmaps MUST be reset, replaced or disabled if a source
+ * pointer may be reused for different purposes - for example if
+ * repeatedly reading FlatBuffers into the same memory buffer
+ * and performing a clone into a buffer under construction.
+ * Refmaps may also be replaced if the same object is to be
+ * cloned several times keeping the internal DAG structure
+ * intact with every new clone being an independent object.
+ *
+ * Refmaps must also be replaced or disabled prior to starting a
+ * nested buffer and after stopping it, or when cloning a object
+ * as a nested root. THIS IS VERY EASY TO GET WRONG! The
+ * builder does a lot of bookkeeping for nested buffers but not
+ * in this case. Shared references may happen and they WILL fail
+ * verification and they WILL break when copying out a nested
+ * buffer to somewhere else. The user_frame stack may be used
+ * for pushing refmaps, but often user codes recursive stack
+ * will work just as well.
+ *
+ * It is entirely optional to use refmaps when cloning - they
+ * preserve DAG structure and may speed up operations or slow
+ * them down, depending on the source material.
+ *
+ * Refmaps may consume a lot of space when large offset vectors
+ * are cloned when these do not have significant shared
+ * references. They may also be very cheap to use without any
+ * dynamic allocation when objects are small and have at most a
+ * few references.
+ *
+ * Refmaps only support init, insert, find, reset, clear but not
+ * delete. There is a standard implementation in the runtime
+ * source tree but it can easily be replaced compile time and it
+ * may also be left out if unused. The builder wraps reset, insert,
+ * and find so the user does not have to check if a refmap is
+ * present but other operations must be done direcly on the
+ * refmap.
+ *
+ * The builder wrapped refmap operations are valid on a null
+ * refmap which will find nothing and insert nothing.
+ *
+ * The builder will reset the refmap during a builder reset and
+ * clear the refmap during a builder clear operation. If the
+ * refmap goes out of scope before that happens it is important
+ * to call set_refmap with null and manually clear the refmap.
+ */
+static inline flatcc_refmap_t *flatcc_builder_set_refmap(flatcc_builder_t *B, flatcc_refmap_t *refmap)
+{
+ flatcc_refmap_t *refmap_old;
+
+ refmap_old = B->refmap;
+ B->refmap = refmap;
+ return refmap_old;
+}
+
+/* Retrieves the current refmap, or null. */
+static inline flatcc_refmap_t *flatcc_builder_get_refmap(flatcc_builder_t *B)
+{
+ return B->refmap;
+}
+
+/* Finds a reference, or a null reference if no refmap is active. * */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_find(flatcc_builder_t *B, const void *src)
+{
+ return B->refmap ? flatcc_refmap_find(B->refmap, src) : flatcc_refmap_not_found;
+}
+
+/*
+ * Inserts into the current refmap with the inseted ref upon
+ * upon success, or not_found on failure (default 0), or just
+ * returns ref if refmap is absent.
+ *
+ * Note that if an existing item exists, the ref is replaced
+ * and the new, not the old, ref is returned.
+ */
+static inline flatcc_builder_ref_t flatcc_builder_refmap_insert(flatcc_builder_t *B, const void *src, flatcc_builder_ref_t ref)
+{
+ return B->refmap ? flatcc_refmap_insert(B->refmap, src, ref) : ref;
+}
+
+static inline void flatcc_builder_refmap_reset(flatcc_builder_t *B)
+{
+ if (B->refmap) flatcc_refmap_reset(B->refmap);
+}
+
+
+enum flatcc_builder_buffer_flags {
+ flatcc_builder_is_nested = 1,
+ flatcc_builder_with_size = 2,
+};
+
+/**
+ * An alternative to start buffer, start struct/table ... end buffer.
+ *
+ * This call is mostly of interest as a means to quicly create a zero
+ * allocation top-level buffer header following a call to create_struct,
+ * or to create_vtable/create_table. For that, it is quite simple to
+ * use. For general buffer construction without allocation, more care is
+ * needed, as discussed below.
+ *
+ * If the content is created with `start/end_table` calls, or similar,
+ * it is better to use `start/end_buffer` since stack allocation is used
+ * anyway.
+ *
+ * The buffer alignment must be provided manually as it is not derived
+ * from constructed content, unlike `start/end_buffer`. Typically
+ * `align` would be same argument as provided to `create_struct`.
+ * `get_buffer_alignment` may also used (note: `get_buffer_alignment`
+ * may return different after the call because it will be updated with
+ * the `block_align` argument to `create_buffer` but that is ok).
+ *
+ * The buffer may be constructed as a nested buffer with the `is_nested
+ * = 1` flag. As a nested buffer a ubyte vector header is placed before
+ * the aligned buffer header. A top-level buffer will normally have
+ * flags set to 0.
+ *
+ * A top-level buffer may also be constructed with the `with_size = 2`
+ * flag for top level buffers. It adds a size prefix similar to
+ * `is_nested` but the size is part of the aligned buffer. A size
+ * prefixed top level buffer must be accessed with a size prefix aware
+ * reader, or the buffer given to a standard reader must point to after
+ * the size field while keeping the buffer aligned to the size field
+ * (this will depend on the readers API which may be an arbitrary other
+ * language).
+ *
+ * If the `with_size` is used with the `is_nested` flag, the size is
+ * added as usual and all fields remain aligned as before, but padding
+ * is adjusted to ensure the buffer is aligned to the size field so
+ * that, for example, the nested buffer with size can safely be copied
+ * to a new memory buffer for consumption.
+ *
+ * Generally, references may only be used within the same buffer
+ * context. With `create_buffer` this becomes less precise. The rule
+ * here is that anything that would be valid with start/end_buffer
+ * nestings is also valid when removing the `start_buffer` call and
+ * replacing `end_buffer` with `create_buffer`.
+ *
+ * Note the additional burden of tracking buffer alignment manually -
+ * To help with this use `push_buffer_alignment` where `start_buffer`
+ * would have been placed, and `pop_buffer_alignment after the
+ * `create_buffer` call, and use `get_buffer_alignemnt` as described
+ * above.
+ *
+ * `create_buffer` is not suitable as a container for buffers created
+ * with `start/end_buffer` as these make assumptions about context that
+ * create buffer does not provide. Also, there is no point in doing so,
+ * since the idea of `create_buffer` is to avoid allocation in the first
+ * place.
+ */
+flatcc_builder_ref_t flatcc_builder_create_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align,
+ flatcc_builder_ref_t ref, uint16_t align, int flags);
+
+/**
+ * Creates a struct within the current buffer without using any
+ * allocation.
+ *
+ * The struct should be used as a root in the `end_buffer` call or as a
+ * union value as there are no other ways to use struct while conforming
+ * to the FlatBuffer format - noting that tables embed structs in their
+ * own data area except in union fields.
+ *
+ * The struct should be in little endian format and follow the usual
+ * FlatBuffers alignment rules, although this API won't care about what
+ * is being stored.
+ *
+ * May also be used to simply emit a struct through the emitter
+ * interface without being in a buffer and without being a valid
+ * FlatBuffer.
+ */
+flatcc_builder_ref_t flatcc_builder_create_struct(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align);
+
+/**
+ * Starts a struct and returns a pointer that should be used immediately
+ * to fill in the struct in protocol endian format, and when done,
+ * `end_struct` should be called. The returned reference should be used
+ * as argument to `end_buffer` or as a union value. See also
+ * `create_struct`.
+ */
+void *flatcc_builder_start_struct(flatcc_builder_t *B,
+ size_t size, uint16_t align);
+
+/**
+ * Return a pointer also returned at start struct, e.g. for endian
+ * conversion.
+ */
+void *flatcc_builder_struct_edit(flatcc_builder_t *B);
+
+/**
+ * Emits the struct started by `start_struct` and returns a reference to
+ * be used as root in an enclosing `end_buffer` call or as a union
+ * value. As mentioned in `create_struct`, these can also be used more
+ * freely, but not while being conformant FlatBuffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_struct(flatcc_builder_t *B);
+
+/**
+ * The buffer always aligns to at least the offset size (typically 4)
+ * and the internal alignment requirements of the buffer content which
+ * is derived as content is added.
+ *
+ * In addition, block_align can be specified. This ensures the resulting
+ * buffer is at least aligned to the block size and that the total size
+ * is zero padded to fill a block multiple if necessary. Because the
+ * emitter operates on a virtual address range before the full buffer is
+ * aligned, it may have to make assumptions based on that: For example,
+ * it may be processing encryption blocks in the fly, and the resulting
+ * buffer should be aligned to the encryption block size, even if the
+ * content is just a byte aligned struct. Block align helps ensure this.
+ * If the block align as 1 there will be no attempt to zero pad at the
+ * end, but the content may still warrant padding after the header. End
+ * padding is only needed with clustered vtables (which is the default).
+ *
+ * `block_align` is allowed to be 0 meaning it will inherit from parent if
+ * present, and otherwise it defaults to 1.
+ *
+ * The identifier may be null, and it may optionally be set later with
+ * `set_identifier` before the `end_buffer` call.
+ *
+ * General note:
+ *
+ * Only references returned with this buffer as current (i.e. last
+ * unended buffer) can be stored in other objects (tables, offset
+ * vectors) also belonging to this buffer, or used as the root argument
+ * to `end_buffer`. A reference may be stored at most once, and unused
+ * references will result in buffer garbage. All calls must be balanced
+ * around the respective start / end operations, but may otherwise nest
+ * freely, including nested buffers. Nested buffers are supposed to be
+ * stored in a table offset field to comply with FlatBuffers, but the
+ * API does not place any restrictions on where references are stored,
+ * as long as they are indicated as offset fields.
+ *
+ * All alignment in all API calls must be between 1 and 256 and must be a
+ * power of 2. This is not checked. Only if explicitly documented can it
+ * also be 0 for a default value.
+ *
+ * `flags` can be `with_size` but `is_nested` is derived from context
+ * see also `create_buffer`.
+ */
+int flatcc_builder_start_buffer(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE],
+ uint16_t block_align, int flags);
+
+/**
+ * The root object should be a struct or a table to conform to the
+ * FlatBuffers format, but technically it can also be a vector or a
+ * string, or even a child buffer (which is also vector as seen by the
+ * buffer). The object must be created within the current buffer
+ * context, that is, while the current buffer is the deepest nested
+ * buffer on the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_end_buffer(flatcc_builder_t *B, flatcc_builder_ref_t root);
+
+/**
+ * The embed buffer is mostly intended to add an existing buffer as a
+ * nested buffer. The buffer will be wrapped in a ubyte vector such that
+ * the buffer is aligned at vector start, after the size field.
+ *
+ * If `align` is 0 it will default to 8 so that all FlatBuffer numeric
+ * types will be readable. NOTE: generally do not count on align 0 being
+ * valid or even checked by the API, but in this case it may be
+ * difficult to know the internal buffer alignment, and 1 would be the wrong
+ * choice.
+ *
+ * If `block_align` is set (non-zero), the buffer is placed in an isolated
+ * block multiple. This may cost up to almost 2 block sizes in padding.
+ * If the `block_align` argument is 0, it inherits from the parent
+ * buffer block_size, or defaults to 1.
+ *
+ * The `align` argument must be set to respect the buffers internal
+ * alignment requirements, but if the buffer is smaller it will not be
+ * padded to isolate the buffer. For example a buffer of with
+ * `align = 64` and `size = 65` may share its last 64 byte block with
+ * other content, but not if `block_align = 64`.
+ *
+ * Because the ubyte size field is not, by default, part of the aligned
+ * buffer, significant space can be wasted if multiple blocks are added
+ * in sequence with a large block size.
+ *
+ * In most cases the distinction between the two alignments is not
+ * important, but it allows separate configuration of block internal
+ * alignment and block size, which can be important for auto-generated
+ * code that may know the alignment of the buffer, but not the users
+ * operational requirements.
+ *
+ * If the buffer is embedded without a parent buffer, it will simply
+ * emit the buffer through the emit interface, but may also add padding
+ * up to block alignment. At top-level there will be no size field
+ * header.
+ *
+ * If `with_size` flag is set, the buffer is aligned to size field and
+ * the above note about padding space no longer applies. The size field
+ * is added regardless. The `is_nested` flag has no effect since it is
+ * impplied.
+ */
+flatcc_builder_ref_t flatcc_builder_embed_buffer(flatcc_builder_t *B,
+ uint16_t block_align,
+ const void *data, size_t size, uint16_t align, int flags);
+
+/**
+ * Applies to the innermost open buffer. The identifier may be null or
+ * contain all zero. Overrides any identifier given to the start buffer
+ * call.
+ */
+void flatcc_builder_set_identifier(flatcc_builder_t *B,
+ const char identifier[FLATBUFFERS_IDENTIFIER_SIZE]);
+
+enum flatcc_builder_type {
+ flatcc_builder_empty = 0,
+ flatcc_builder_buffer,
+ flatcc_builder_struct,
+ flatcc_builder_table,
+ flatcc_builder_vector,
+ flatcc_builder_offset_vector,
+ flatcc_builder_string,
+ flatcc_builder_union_vector
+};
+
+/**
+ * Returns the object type currently on the stack, for example if
+ * needing to decide how to close a buffer. Because a table is
+ * automatically added when starting a table buffer,
+ * `flatcc_builder_table_buffer` should not normally be seen and the level
+ * should be 2 before when closing a top-level table buffer, and 0
+ * after. A `flatcc_builder_struct_buffer` will be visible at level 1.
+ *
+ */
+enum flatcc_builder_type flatcc_builder_get_type(flatcc_builder_t *B);
+
+/**
+ * Similar to `get_type` but for a specific level. `get_type_at(B, 1)`
+ * will return `flatcc_builder_table_buffer` if this is the root buffer
+ * type. get_type_at(B, 0) is always `flatcc_builder_empty` and so are any
+ * level above `get_level`.
+ */
+enum flatcc_builder_type flatcc_builder_get_type_at(flatcc_builder_t *B, int level);
+
+/**
+ * The user stack is available for custom data. It may be used as
+ * a simple stack by extending or reducing the inner-most frame.
+ *
+ * A frame has a size and a location on the user stack. Entering
+ * a frame ensures the start is aligned to sizeof(size_t) and
+ * ensures the requested space is available without reallocation.
+ * When exiting a frame, the previous frame is restored.
+ *
+ * A user frame works completely independently of the builders
+ * frame stack for tracking tables vectors etc. and does not have
+ * to be completely at exit, but obviously it is not valid to
+ * exit more often the entered.
+ *
+ * The frame is zeroed when entered.
+ *
+ * Returns a non-zero handle to the user frame upon success or
+ * 0 on allocation failure.
+ */
+size_t flatcc_builder_enter_user_frame(flatcc_builder_t *B, size_t size);
+
+/**
+ * Makes the parent user frame current, if any. It is not valid to call
+ * if there isn't any current frame. Returns handle to parent frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame(flatcc_builder_t *B);
+
+/**
+ * Exits the frame represented by the given handle. All more
+ * recently entered frames will also be exited. Returns the parent
+ * frame handle if any, or 0.
+ */
+size_t flatcc_builder_exit_user_frame_at(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns a non-zero handle to the current inner-most user frame if
+ * any, or 0.
+ */
+size_t flatcc_builder_get_current_user_frame(flatcc_builder_t *B);
+
+/*
+ * Returns a pointer to the user frame at the given handle. Any active
+ * frame can be accessed in this manner but the pointer is invalidated
+ * by user frame enter and exit operations.
+ */
+void *flatcc_builder_get_user_frame_ptr(flatcc_builder_t *B, size_t handle);
+
+/**
+ * Returns the size of the buffer and the logical start and end address
+ * of with respect to the emitters address range. `end` - `start` also
+ * yields the size. During construction `size` is the emitted number of
+ * bytes and after buffer close it is the actual buffer size - by then
+ * the start is also the return value of close buffer. End marks the end
+ * of the virtual table cluster block.
+ *
+ * NOTE: there is no guarantee that all vtables end up in the cluster
+ * block if there is placed a limit on the vtable size, or if nested
+ * buffers are being used. On the other hand, if these conditions are
+ * met, it is guaranteed that all vtables are present if the vtable
+ * block is available (this depends on external transmission - the
+ * vtables are always emitted before tables using them). In all cases
+ * the vtables will behave as valid vtables in a flatbuffer.
+ */
+size_t flatcc_builder_get_buffer_size(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the start of the emitter buffer so far, or
+ * in total after buffer end, in the virtual address range used
+ * by the emitter. Start is also returned by buffer end.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_start(flatcc_builder_t *B);
+
+/**
+ * Returns the reference to the end of buffer emitted so far. When
+ * clustering vtables, this is the end of tables, or after buffer end,
+ * also zero padding if block aligned. If clustering is disabled, this
+ * method will return 0 as the buffer only grows down then.
+ */
+flatcc_builder_ref_t flatcc_builder_get_buffer_mark(flatcc_builder_t *B);
+
+/**
+ * Creates the vtable in the current buffer context, somewhat similar to
+ * how create_vector operates. Each call results in a new table even if
+ * an identical has already been emitted.
+ *
+ * Also consider `create_cached_vtable` which will reuse existing
+ * vtables.
+ *
+ * This is low-low-level function intended to support
+ * `create_cached_vtable` or equivalent, and `create_table`, both of
+ * which are normally used indirectly via `start_table`, `table_add`,
+ * `table_add_offset`..., `table_end`.
+ *
+ * Creates a vtable as a verbatim copy. This means the vtable must
+ * include the header fields containing the vtable size and the table
+ * size in little endian voffset_t encoding followed by the vtable
+ * entries in same encoding.
+ *
+ * The function may be used to copy vtables from other other buffers
+ * since they are directly transferable.
+ *
+ * The returned reference is actually the emitted location + 1. This
+ * ensures the vtable is not mistaken for error because 0 is a valid
+ * vtable reference. `create_table` is aware of this and substracts one
+ * before computing the final offset relative to the table. This also
+ * means vtable references are uniquely identifiable by having the
+ * lowest bit set.
+ *
+ * vtable references may be reused within the same buffer, not any
+ * parent or other related buffer (technically this is possible though,
+ * as long as it is within same builder context, but it will not construct
+ * valid FlatBuffers because the buffer cannot be extracted in isolation).
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size);
+
+/**
+ * Support function to `create_vtable`. See also the uncached version
+ * `create_vtable`.
+ *
+ * Looks up the constructed vtable on the vs stack too see if it matches
+ * a cached entry. If not, it emits a new vtable either at the end if
+ * top-level and clustering is enabled, or at the front (always for
+ * nested buffers).
+ *
+ * If the same vtable was already emitted in a different buffer, but not
+ * in the current buffer, the cache entry will be reused, but a new
+ * table will be emitted the first it happens in the same table.
+ *
+ * The returned reference is + 1 relative to the emitted address range
+ * to identify it as a vtable and to avoid mistaking the valid 0
+ * reference for an error (clustered vtables tend to start at the end at
+ * the virtual address 0, and up).
+ *
+ * The hash function can be chosen arbitrarily but may result in
+ * duplicate emitted vtables if different hash functions are being used
+ * concurrently, such as mixing the default used by `start/end table`
+ * with a custom function (this is not incorrect, it only increases the
+ * buffer size and cache pressure).
+ *
+ * If a vtable has a unique ID by other means than hashing the content,
+ * such as an integer id, and offset into another buffer, or a pointer,
+ * a good hash may be multiplication by a 32-bit prime number. The hash
+ * table is not very sensitive to collissions as it uses externally
+ * chained hashing with move to front semantics.
+ */
+flatcc_builder_vt_ref_t flatcc_builder_create_cached_vtable(flatcc_builder_t *B,
+ const flatbuffers_voffset_t *vt,
+ flatbuffers_voffset_t vt_size, uint32_t vt_hash);
+
+/*
+ * Based on Knuth's prime multiplier.
+ *
+ * This is an incremental hash that is called with id and size of each
+ * non-empty field, and finally with the two vtable header fields
+ * when vtables are constructed via `table_add/table_add_offset`.
+ *
+ */
+#ifndef FLATCC_SLOW_MUL
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = (uint32_t)0x2f693b52UL; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = (((((uint32_t)id ^ (hash)) * (uint32_t)2654435761UL)\
+ ^ (uint32_t)(offset)) * (uint32_t)2654435761UL); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((uint32_t)(hash)) >> (32 - (width)))
+#endif
+#endif
+
+/*
+ * By default we use Bernsteins hash as fallback if multiplication is slow.
+ *
+ * This just have to be simple, fast, and work on devices without fast
+ * multiplication. We are not too sensitive to collisions. Feel free to
+ * experiment and replace.
+ */
+#ifndef FLATCC_BUILDER_INIT_VT_HASH
+#define FLATCC_BUILDER_INIT_VT_HASH(hash) { (hash) = 5381; }
+#endif
+#ifndef FLATCC_BUILDER_UPDATE_VT_HASH
+#define FLATCC_BUILDER_UPDATE_VT_HASH(hash, id, offset) \
+ { (hash) = ((((hash) << 5) ^ (id)) << 5) ^ (offset); }
+#endif
+#ifndef FLATCC_BUILDER_BUCKET_VT_HASH
+#define FLATCC_BUILDER_BUCKET_VT_HASH(hash, width) (((1 << (width)) - 1) & (hash))
+#endif
+
+
+
+/**
+ * Normally use `start_table` instead of this call.
+ *
+ * This is a low-level call only intended for high-performance
+ * applications that repeatedly churn about similar tables of known
+ * layout, or as a support layer for other builders that maintain their
+ * own allocation rather than using the stack of this builder.
+ *
+ * Creates a table from an already emitted vtable, actual data that is
+ * properly aligned relative to data start and in little endian
+ * encoding. Unlike structs, tables can have offset fields. These must
+ * be stored as flatcc_builder_ref_t types (which have uoffset_t size) as
+ * returned by the api in native encoding. The `offsets` table contain
+ * voffsets relative to `data` start (this is different from how vtables
+ * store offsets because they are relative to a table header). The
+ * `offsets` table is only used temporarily to translate the stored
+ * references and is not part of final buffer content. `offsets` may be
+ * null if `offset_count` is 0. `align` should be the highest aligned
+ * field in the table, but `size` need not be a multiple of `align`.
+ * Aside from endian encoding, the vtable must record a table size equal
+ * to `size + sizeof(flatbuffers_uoffset_t)` because it includes the
+ * table header field size. The vtable is not accessed by this call (nor
+ * is it available). Unlike other references, the vtable reference may
+ * be shared between tables in the same buffer (not with any related
+ * buffer such as a parent buffer).
+ *
+ * The operation will not use any allocation, but will update the
+ * alignment of the containing buffer if any.
+ *
+ * Note: unlike other create calls, except `create_offset_vector`,
+ * the source data is modified in order to translate references intok
+ * offsets before emitting the table.
+ */
+flatcc_builder_ref_t flatcc_builder_create_table(flatcc_builder_t *B,
+ const void *data, size_t size, uint16_t align,
+ flatbuffers_voffset_t *offsets, int offset_count,
+ flatcc_builder_vt_ref_t vt_ref);
+
+/**
+ * Starts a table, typically following a start_buffer call as an
+ * alternative to starting a struct, or to create table fields to be
+ * stored in a parent table, or in an offset vector.
+ * A number of `table_add` and table_add_offset` call may be placed
+ * before the `end_table` call. Struct fields should NOT use `struct`
+ * related call (because table structs are in-place), rather they should
+ * use the `table_add` call with the appropriate size and alignment.
+ *
+ * A table, like other reference returning calls, may also be started
+ * outside a buffer if the buffer header and alignment is of no
+ * interest to the application, for example as part of an externally
+ * built buffer.
+ *
+ * `count` must be larger than the largest id used for this table
+ * instance. Normally it is set to the number of fields defined in the
+ * schema, but it may be less if memory is constrained and only few
+ * fields with low valued id's are in use. The count can extended later
+ * with `reserve_table` if necessary. `count` may be also be set to a
+ * large enough value such as FLATBUFFERS_ID_MAX + 1 if memory is not a
+ * concern (reserves about twice the maximum vtable size to track the
+ * current vtable and voffsets where references must be translated to
+ * offsets at table end). `count` may be zero if for example
+ * `reserve_table` is being used.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_start_table(flatcc_builder_t *B, int count);
+
+/**
+ * Call before adding a field with an id that is not below the count set
+ * at table start. Not needed in most cases. For performance reasons
+ * the builder does not check all bounds all the the time, but the user
+ * can do so if memory constraints prevent start_table from using a
+ * conservative value. See also `table_start`.
+ *
+ * Note: this call has absolutely no effect on the table layout, it just
+ * prevents internal buffer overruns.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int flatcc_builder_reserve_table(flatcc_builder_t *B, int count);
+
+/**
+ * Completes the table constructed on the internal stack including
+ * emitting a vtable, or finding a matching vtable that has already been
+ * emitted to the same buffer. (Vtables cannot be shared between
+ * buffers, but they can between tables of the same buffer).
+ *
+ * Note: there is a considerable, but necessary, amount of bookkeeping
+ * involved in constructing tables. The `create_table` call is much
+ * faster, but it also expects a lot of work to be done already.
+ *
+ * Tables can be created with no fields added. This will result in an
+ * empty vtable and a table with just a vtable reference. If a table is
+ * used as a sub-table, such a table might also not be stored at all,
+ * but we do not return a special reference for that, nor do we provide
+ * and option to not create the table in this case. This may be
+ * interpreted as the difference between a null table (not stored in
+ * parent), and an empty table with a unique offset (and thus identity)
+ * different from other empty tables.
+ */
+flatcc_builder_ref_t flatcc_builder_end_table(flatcc_builder_t *B);
+
+/**
+ * Optionally this method can be called just before `flatcc_builder_end_table`
+ * to verify that all required fields have been set.
+ * Each entry is a table field id.
+ *
+ * Union fields should use the type field when checking for presence and
+ * may also want to check the soundness of the union field overall using
+ * `check_union_field` with the id one higher than the type field id.
+ *
+ * This funcion is typically called by an assertion in generated builder
+ * interfaces while release builds may want to avoid this performance
+ * overhead.
+ *
+ * Returns 1 if all fields are matched, 0 otherwise.
+ */
+int flatcc_builder_check_required(flatcc_builder_t *B, const flatbuffers_voffset_t *required, int count);
+
+/**
+ * Same as `check_required` when called with a single element.
+ *
+ * Typically used when direct calls are more convenient than building an
+ * array first. Useful when dealing with untrusted intput such as parsed
+ * text from an external source.
+ */
+int flatcc_builder_check_required_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * Checks that a union field is valid.
+ *
+ * The criteria is:
+ *
+ * If the type field is not present (at id - 1), or it holds a zero value,
+ * then the table field (at id) must be present.
+ *
+ * Generated builder code may be able to enforce valid unions without
+ * this check by setting both type and table together, but e.g. parsers
+ * may receive the type and the table independently and then it makes
+ * sense to validate the union fields before table completion.
+ *
+ * Note that an absent union field is perfectly valid. If a union is
+ * required, the type field (id - 1), should be checked separately
+ * while the table field should only be checked here because it can
+ * (and must) be absent when the type is NONE (= 0).
+ */
+int flatcc_builder_check_union_field(flatcc_builder_t *B, flatbuffers_voffset_t id);
+
+/**
+ * A struct, enum or scalar added should be stored in little endian in
+ * the return pointer location. The pointer is short lived and will
+ * not necessarily survive other builder calls.
+ *
+ * A union type field can also be set using this call. In fact, this is
+ * the only way to deal with unions via this API. Consequently, it is
+ * the users repsonsibility to ensure the appropriate type is added
+ * at the next higher id.
+ *
+ * Null and default values:
+ *
+ * FlatBuffers does not officially provide an option for null values
+ * because it does not distinguish between default values and values
+ * that are not present. At this api level, we do not deal with defaults
+ * at all. Callee should test the stored value against the default value
+ * and only add the field if it does not match the default. This only
+ * applies to scalar and enum values. Structs cannot have defaults so
+ * their absence means null, and strings, vectors and subtables do have
+ * natural null values different from the empty object and empty objects
+ * with different identity is also possible.
+ *
+ * To handle Null for scalars, the following approach is recommended:
+ *
+ * Provide a schema-specific `add` operation that only calls this
+ * low-level add method if the default does not match, and also provide
+ * another `set` operation that always stores the value, regardless of
+ * default. For most readers this will be transparent, except for extra
+ * space used, but for Null aware readers, these can support operations
+ * to test for Null/default/other value while still supporting the
+ * normal read operation that returns default when a value is absent
+ * (i.e. Null).
+ *
+ * It is valid to call with a size of 0 - the effect being adding the
+ * vtable entry. The call may also be dropped in this case to reduce
+ * the vtable size - the difference will be in null detection.
+ */
+void *flatcc_builder_table_add(flatcc_builder_t *B, int id, size_t size, uint16_t align);
+
+/**
+ * Returns a pointer to the buffer holding the last field added. The
+ * size argument must match the field size added. May, for example, be
+ * used to perform endian conversion after initially updating field
+ * as a native struct. Must be called before the table is ended.
+ */
+void *flatcc_builder_table_edit(flatcc_builder_t *B, size_t size);
+
+/**
+ * Similar to `table_add` but copies source data into the buffer before
+ * it is returned. Useful when adding a larger struct already encoded in
+ * little endian.
+ */
+void *flatcc_builder_table_add_copy(flatcc_builder_t *B, int id, const void *data, size_t size, uint16_t align);
+
+/**
+ * Add a string, vector, or sub-table depending on the type if the
+ * field identifier. The offset ref obtained when the field object was
+ * closed should be stored as is in the given pointer. The pointer
+ * is only valid short term, so create the object before calling
+ * add to table, but the owner table can be started earlier. Never mix
+ * refs from nested buffers with parent buffers.
+ *
+ * Also uses this method to add nested buffers. A nested buffer is
+ * simple a buffer created while another buffer is open. The buffer
+ * close operation provides the necessary reference.
+ *
+ * When the table closes, all references get converted into offsets.
+ * Before that point, it is not required that the offset is written
+ * to.
+ */
+flatcc_builder_ref_t *flatcc_builder_table_add_offset(flatcc_builder_t *B, int id);
+
+/*
+ * Adds a union type and reference in a single operation and returns 0
+ * on success. Stores the type field at `id - 1` and the value at
+ * `id`. The `value` is a reference to a table, to a string, or to a
+ * standalone `struct` outside the table.
+ *
+ * If the type is 0, the value field must also be 0.
+ *
+ * Unions can also be added as separate calls to the type and the offset
+ * separately which can lead to better packing when the type is placed
+ * together will other small fields.
+ */
+int flatcc_builder_table_add_union(flatcc_builder_t *B, int id,
+ flatcc_builder_union_ref_t uref);
+
+/*
+ * Adds a union type vector and value vector in a single operations
+ * and returns 0 on success.
+ *
+ * If both the type and value vector is null, nothing is added.
+ * Otherwise both must be present and have the same length.
+ *
+ * Any 0 entry in the type vector must also have a 0 entry in
+ * the value vector.
+ */
+int flatcc_builder_table_add_union_vector(flatcc_builder_t *B, int id,
+ flatcc_builder_union_vec_ref_t uvref);
+/**
+ * Creates a vector in a single operation using an externally supplied
+ * buffer. This completely bypasses the stack, but the size must be
+ * known and the content must be little endian. Do not use for strings
+ * and offset vectors. Other flatbuffer vectors could be used as a
+ * source, but the length prefix is not required.
+ *
+ * Set `max_count` to `FLATBUFFERS_COUNT_MAX(elem_size)` before a call
+ * to any string or vector operation to the get maximum safe vector
+ * size, or use (size_t)-1 if overflow is not a concern.
+ *
+ * The max count property is a global property that remains until
+ * explicitly changed.
+ *
+ * `max_count` is to prevent malicous or accidental overflow which is
+ * difficult to detect by multiplication alone, depending on the type
+ * sizes being used and having `max_count` thus avoids a division for
+ * every vector created. `max_count` does not guarantee a vector will
+ * fit in an empty buffer, it just ensures the internal size checks do
+ * not overflow. A safe, sane limit woud be max_count / 4 because that
+ * is half the maximum buffer size that can realistically be
+ * constructed, corresponding to a vector size of `UOFFSET_MAX / 4`
+ * which can always hold the vector in 1GB excluding the size field when
+ * sizeof(uoffset_t) = 4.
+ */
+flatcc_builder_ref_t flatcc_builder_create_vector(flatcc_builder_t *B,
+ const void *data, size_t count, size_t elem_size, uint16_t align, size_t max_count);
+
+/**
+ * Starts a vector on the stack.
+ *
+ * Do not use these calls for string or offset vectors, but do store
+ * scalars, enums and structs, always in little endian encoding.
+ *
+ * Use `extend_vector` subsequently to add zero, one or more elements
+ * at time.
+ *
+ * See `create_vector` for `max_count` argument (strings and offset
+ * vectors have a fixed element size and does not need this argument).
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_vector(flatcc_builder_t *B, size_t elem_size,
+ uint16_t align, size_t max_count);
+
+/**
+ * Emits the vector constructed on the stack by start_vector.
+ *
+ * The vector may be accessed in the emitted stream using the returned
+ * reference, even if the containing buffer is still under construction.
+ * This may be useful for sorting. This api does not support sorting
+ * because offset vectors cannot read their references after emission,
+ * and while plain vectors could be sorted, it has been chosen that this
+ * task is better left as a separate processing step. Generated code can
+ * provide sorting functions that work on final in-memory buffers.
+ */
+flatcc_builder_ref_t flatcc_builder_end_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Returns a zero initialized buffer to a new region of the vector which
+ * is extended at the end. The buffer must be consumed before other api
+ * calls that may affect the stack, including `extend_vector`.
+ *
+ * Do not use for strings, offset or union vectors. May be used for nested
+ * buffers, but these have dedicated calls to provide better alignment.
+ */
+void *flatcc_builder_extend_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized `vector_extend` that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error. Note: for structs, care must be taken to ensure
+ * the source has been zero padded. For this reason it may be better to
+ * use extend(B, 1) and assign specific fields instead.
+ */
+void *flatcc_builder_vector_push(flatcc_builder_t *B, const void *data);
+
+/**
+ * Pushes multiple elements at a time.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+void *flatcc_builder_append_vector(flatcc_builder_t *B, const void *data, size_t count);
+
+/**
+ * Removes elements already added to vector that has not been ended.
+ * For example, a vector of parsed list may remove the trailing comma,
+ * or the vector may simply overallocate to get some temporary working
+ * space. The total vector size must never become negative.
+ *
+ * Returns -1 if the count as larger than current count, or 0 on success.
+ */
+int flatcc_builder_truncate_vector(flatcc_builder_t *B, size_t count);
+
+/*
+ * Similar to `create_vector` but with references that get translated
+ * into offsets. The references must, as usual, belong to the current
+ * buffer. Strings, scalar and struct vectors can emit directly without
+ * stack allocation, but offset vectors must translate the offsets
+ * and therefore need the temporary space. Thus, this function is
+ * roughly equivalent to to start, append, end offset vector.
+ *
+ * See also `flatcc_builder_create_offset_vector_direct`.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content.
+ *
+ * This is a faster version of `create_offset_vector` where the
+ * source references are destroyed. In return the vector can be
+ * emitted directly without passing over the stack.
+ */
+flatcc_builder_ref_t flatcc_builder_create_offset_vector_direct(flatcc_builder_t *B,
+ flatcc_builder_ref_t *data, size_t count);
+
+
+/**
+ * Starts a vector holding offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown.
+ */
+int flatcc_builder_start_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector(flatcc_builder_t *B);
+
+/**
+ * Same as `flatcc_builder_end_offset_vector` except null references are
+ * permitted when the corresponding `type` entry is 0 (the 'NONE' type).
+ * This makes it possible to build union vectors with less overhead when
+ * the `type` vector is already known. Use standand offset vector calls
+ * prior to this call.
+ */
+flatcc_builder_ref_t flatcc_builder_end_offset_vector_for_unions(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *type);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_offset_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_offset_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` but returns a buffer indexable as
+ * `flatcc_builder_ref_t` array. All elements must be set to a valid
+ * unique non-null reference, but truncate and extend may be used to
+ * perform edits. Unused references will leave garbage in the buffer.
+ * References should not originate from any other buffer than the
+ * current, including parents and nested buffers. It is valid to reuse
+ * references in DAG form when contained in the sammer, excluding any
+ * nested, sibling or parent buffers.
+ */
+flatcc_builder_ref_t *flatcc_builder_extend_offset_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_offset_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_offset_vector_push(flatcc_builder_t *B,
+ flatcc_builder_ref_t ref);
+
+/**
+ * Takes an array of refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_ref_t *flatcc_builder_append_offset_vector(flatcc_builder_t *B,
+ const flatcc_builder_ref_t *refs, size_t count);
+
+/**
+ * All union vector operations are like offset vector operations,
+ * except they take a struct with a type and a reference rather than
+ * just a reference. The finished union vector is returned as a struct
+ * of two references, one for the type vector and one for the table offset
+ * vector. Each reference goes to a separate table field where the type
+ * offset vector id must be one larger than the type vector.
+ */
+
+/**
+ * Creates a union vector which is in reality two vectors, a type vector
+ * and an offset vector. Both vectors references are returned.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/*
+ * NOTE: this call takes non-const source array of references
+ * and destroys the content. The type array remains intact.
+ *
+ * This is a faster version of `create_union_vector` where the source
+ * references are destroyed and where the types are given in a separate
+ * array. In return the vector can be emitted directly without passing
+ * over the stack.
+ *
+ * Unlike `create_offset_vector` we do allow null references but only if
+ * the union type is NONE (0).
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_create_union_vector_direct(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, flatcc_builder_ref_t *data, size_t count);
+
+/*
+ * Creates just the type vector part of a union vector. This is
+ * similar to a normal `create_vector` call except that the size
+ * and alignment are given implicitly. Can be used during
+ * cloning or similar operations where the types are all given
+ * but the values must be handled one by one as prescribed by
+ * the type. The values can be added separately as an offset vector.
+ */
+flatcc_builder_ref_t flatcc_builder_create_type_vector(flatcc_builder_t *B,
+ const flatcc_builder_utype_t *types, size_t count);
+
+/**
+ * Starts a vector holding types and offsets to tables or strings. Before
+ * completion it will hold `flatcc_builder_union_ref_t` references because the
+ * offset is not known until the vector start location is known, which
+ * depends to the final size, which for parsers is generally unknown,
+ * and also because the union type must be separated out into a separate
+ * vector. It would not be practicaly to push on two different vectors
+ * during construction.
+ */
+int flatcc_builder_start_union_vector(flatcc_builder_t *B);
+
+/**
+ * Similar to `end_vector` but updates all stored references so they
+ * become offsets to the vector start and splits the union references
+ * into a type vector and an offset vector.
+ */
+flatcc_builder_union_vec_ref_t flatcc_builder_end_union_vector(flatcc_builder_t *B);
+
+/** Returns the number of elements currently on the stack. */
+size_t flatcc_builder_union_vector_count(flatcc_builder_t *B);
+
+/**
+ * Returns a pointer ot the first vector element on stack,
+ * accessible up to the number of elements currently on stack.
+ */
+void *flatcc_builder_union_vector_edit(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_offset_vector` but returns a buffer indexable as a
+ * `flatcc_builder_union_ref_t` array. All elements must be set to a valid
+ * unique non-null reference with a valid union type to match, or it
+ * must be null with a zero union type.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_extend_union_vector(flatcc_builder_t *B, size_t count);
+
+/** Similar to truncate_vector. */
+int flatcc_builder_truncate_union_vector(flatcc_builder_t *B, size_t count);
+
+/**
+ * A specialized extend that pushes a single element.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_union_vector_push(flatcc_builder_t *B,
+ flatcc_builder_union_ref_t uref);
+
+/**
+ * Takes an array of union_refs as argument to do a multi push operation.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+flatcc_builder_union_ref_t *flatcc_builder_append_union_vector(flatcc_builder_t *B,
+ const flatcc_builder_union_ref_t *urefs, size_t count);
+
+/**
+ * Faster string operation that avoids temporary stack storage. The
+ * string is not required to be zero-terminated, but is expected
+ * (unchecked) to be utf-8. Embedded zeroes would be allowed but
+ * ubyte vectors should be used for that. The resulting string will
+ * have a zero termination added, not included in length.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string(flatcc_builder_t *B,
+ const char *s, size_t len);
+
+/** `create_string` up to zero termination of source. */
+flatcc_builder_ref_t flatcc_builder_create_string_str(flatcc_builder_t *B,
+ const char *s);
+
+/**
+ * `create_string` up to zero termination or at most max_len of source.
+ *
+ * Note that like `strncpy` it will include `max_len` characters if
+ * the source is longer than `max_len`, but unlike `strncpy` it will
+ * always add zero termination.
+ */
+flatcc_builder_ref_t flatcc_builder_create_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Starts an empty string that can be extended subsequently.
+ *
+ * While the string is being created, it is guaranteed that there is
+ * always a null character after the end of the current string length.
+ * This also holds after `extend` and `append` operations. It is not
+ * allowed to modify the null character.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_builder_start_string(flatcc_builder_t *B);
+
+/**
+ * Similar to `extend_vector` except for the buffer return type and a
+ * slight speed advantage. Strings are expected to contain utf-8 content
+ * but this isn't verified, and null characters would be accepted. The
+ * length is given in bytes.
+ *
+ * Appending too much, then truncating can be used to trim string
+ * escapes during parsing, or convert between unicode formats etc.
+ */
+char *flatcc_builder_extend_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Concatenes a length of string. If the string contains zeroes (which
+ * it formally shouldn't), they will be copied in.
+ *
+ * Returns the buffer holding a modifiable copy of the added content,
+ * or null on error.
+ */
+char *flatcc_builder_append_string(flatcc_builder_t *B, const char *s, size_t len);
+
+/** `append_string` up to zero termination of source. */
+char *flatcc_builder_append_string_str(flatcc_builder_t *B, const char *s);
+
+/** `append_string` up zero termination or at most max_len of source. */
+char *flatcc_builder_append_string_strn(flatcc_builder_t *B, const char *s, size_t max_len);
+
+/**
+ * Similar to `truncate_vector` available for consistency and a slight
+ * speed advantage. Reduces string by `len` bytes - it does not set
+ * the length. The resulting length must not become negative. Zero
+ * termination is not counted.
+ *
+ * Returns -1 of the length becomes negative, 0 on success.
+ */
+int flatcc_builder_truncate_string(flatcc_builder_t *B, size_t len);
+
+/**
+ * Similar to `end_vector` but adds a trailing zero not included
+ * in the length. The trailing zero is added regardless of whatever
+ * zero content may exist in the provided string (although it
+ * formally should not contain any).
+ */
+flatcc_builder_ref_t flatcc_builder_end_string(flatcc_builder_t *B);
+
+/** Returns the length of string currently on the stack. */
+size_t flatcc_builder_string_len(flatcc_builder_t *B);
+
+/**
+ * Returns a ponter to the start of the string
+ * accessible up the length of string currently on the stack.
+ */
+char *flatcc_builder_string_edit(flatcc_builder_t *B);
+
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Fast acces to small buffers from default emitter.
+ *
+ * Only valid for default emitters before `flatcc_builder_clear`. The
+ * return buffer is not valid after a call to `flatcc_builder_reset` or
+ * `flatcc_builder_clear`.
+ *
+ * Returns null if the buffer size is too large to a have a linear
+ * memory representation or if the emitter is not the default. A valid
+ * size is between half and a full emitter page size depending on vtable
+ * content.
+ *
+ * Non-default emitters must be accessed by means specific to the
+ * particular emitter.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer should NOT be deallocated explicitly.
+ *
+ * The buffer size is the size reported by `flatcc_builder_get_buffer_size`.
+ */
+void *flatcc_builder_get_direct_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Default finalizer that allocates a buffer from the default emitter.
+ *
+ * Returns null if memory could not be allocated or if the emitter is
+ * not the default. This is just a convenience method - there are many
+ * other possible ways to extract the result of the emitter depending on
+ * use case.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The allocated buffer is aligned according to malloc which may not be
+ * sufficient in advanced cases - for that purpose
+ * `flatcc_builder_finalize_aligned_buffer` may be used.
+ *
+ * It may be worth calling `flatcc_builder_get_direct_buffer` first to see
+ * if the buffer is small enough to avoid copying.
+ *
+ * The returned buffer must be deallocated using `free`.
+ */
+void *flatcc_builder_finalize_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Similar to `flatcc_builder_finalize_buffer` but ensures the returned
+ * memory is aligned to the overall alignment required for the buffer.
+ * Often it is not necessary unless special operations rely on larger
+ * alignments than the stored scalars.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ *
+ * The returned buffer must be deallocated using `aligned_free` which is
+ * implemented via `flatcc_flatbuffers.h`. `free` will usually work but
+ * is not portable to platforms without posix_memalign or C11
+ * aligned_alloc support.
+ *
+ * NOTE: if a library might be compiled with a version of aligned_free
+ * that differs from the application using it, use
+ * `flatcc_builder_aligned_free` to make sure the correct deallocation
+ * function is used.
+ */
+void *flatcc_builder_finalize_aligned_buffer(flatcc_builder_t *B, size_t *size_out);
+
+/*
+ * A stable implementation of `aligned_alloc` that is not sensitive
+ * to the applications compile time flags.
+ */
+void *flatcc_builder_aligned_alloc(size_t alignment, size_t size);
+
+/*
+ * A stable implementation of `aligned_free` that is not sensitive
+ * to the applications compile time flags.
+ */
+void flatcc_builder_aligned_free(void *p);
+
+/*
+ * Same allocation as `flatcc_builder_finalize_buffer` returnes. Usually
+ * same as `malloc` but can redefined via macros.
+ */
+void *flatcc_builder_alloc(size_t size);
+
+/*
+ * A stable implementation of `free` when the default allocation
+ * methods have been redefined.
+ *
+ * Deallocates memory returned from `flatcc_builder_finalize_buffer`.
+ */
+void flatcc_builder_free(void *p);
+
+/*
+ * Only for use with the default emitter.
+ *
+ * Convenience method to copy buffer from default emitter. Forwards
+ * call to default emitter and returns input pointer, or null if
+ * the emitter is not default or of the given size is smaller than
+ * the buffer size.
+ *
+ * Note: the `size` argument is the target buffers capacity, not the
+ * flatcc_builders buffer size.
+ *
+ * Other emitters have custom interfaces for reaching their content.
+ */
+void *flatcc_builder_copy_buffer(flatcc_builder_t *B, void *buffer, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_BUILDER_H */
diff --git a/nostrdb/flatcc/flatcc_emitter.h b/nostrdb/flatcc/flatcc_emitter.h
@@ -0,0 +1,215 @@
+#ifndef FLATCC_EMITTER_H
+#define FLATCC_EMITTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Default implementation of a flatbuilder emitter.
+ *
+ * This may be used as a starting point for more advanced emitters,
+ * for example writing completed pages to disk or network and
+ * the recycling those pages.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc_types.h"
+#include "flatcc_iov.h"
+#include "flatcc_alloc.h"
+
+/*
+ * The buffer steadily grows during emission but the design allows for
+ * an extension where individual pages can recycled before the buffer
+ * is complete, for example because they have been transmitted.
+ *
+ * When done, the buffer can be cleared to free all memory, or reset to
+ * maintain an adaptive page pool for next buffer construction.
+ *
+ * Unlike an exponentially growing buffer, each buffer page remains
+ * stable in memory until reset, clear or recycle is called.
+ *
+ * Design notes for possible extensions:
+ *
+ * The buffer is a ring buffer marked by a front and a back page. The
+ * front and back may be the same page and may initially be absent.
+ * Anything outside these pages are unallocated pages for recycling.
+ * Any page between (but excluding) the front and back pages may be
+ * recycled by unlinking and relinking outside the front and back pages
+ * but then copy operations no longer makes sense. Each page stores the
+ * logical offset within the buffer but isn't otherwise used by the
+ * implemention - it might be used for network transmission. The buffer
+ * is not explicitly designed for multithreaded access but any page
+ * strictly between front and back is not touched unless recycled and in
+ * this case aligned allocation is useful to prevent cache line sharing.
+ */
+
+/*
+ * Memory is allocated in fixed length page units - the first page is
+ * split between front and back so each get half the page size. If the
+ * size is a multiple of 128 then each page offset will be a multiple of
+ * 64, which may be useful for sequencing etc.
+ */
+#ifndef FLATCC_EMITTER_PAGE_SIZE
+#define FLATCC_EMITTER_MAX_PAGE_SIZE 3000
+#define FLATCC_EMITTER_PAGE_MULTIPLE 64
+#define FLATCC_EMITTER_PAGE_SIZE ((FLATCC_EMITTER_MAX_PAGE_SIZE) &\
+ ~(2 * (FLATCC_EMITTER_PAGE_MULTIPLE) - 1))
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#ifdef FLATCC_EMITTER_USE_ALIGNED_ALLOC
+/*
+ * <stdlib.h> does not always provide aligned_alloc, so include whatever
+ * is required when enabling this feature.
+ */
+#define FLATCC_EMITTER_ALLOC(n) aligned_alloc(FLATCC_EMITTER_PAGE_MULTIPLE,\
+ (((n) + FLATCC_EMITTER_PAGE_MULTIPLE - 1) & ~(FLATCC_EMITTER_PAGE_MULTIPLE - 1)))
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) aligned_free(p)
+#endif
+#endif
+#endif
+
+#ifndef FLATCC_EMITTER_ALLOC
+#define FLATCC_EMITTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+#ifndef FLATCC_EMITTER_FREE
+#define FLATCC_EMITTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+typedef struct flatcc_emitter_page flatcc_emitter_page_t;
+typedef struct flatcc_emitter flatcc_emitter_t;
+
+struct flatcc_emitter_page {
+ uint8_t page[FLATCC_EMITTER_PAGE_SIZE];
+ flatcc_emitter_page_t *next;
+ flatcc_emitter_page_t *prev;
+ /*
+ * The offset is relative to page start, but not necessarily
+ * to any present content if part of front or back page,
+ * and undefined for unused pages.
+ */
+ flatbuffers_soffset_t page_offset;
+};
+
+/*
+ * Must be allocated and zeroed externally, e.g. on the stack
+ * then provided as emit_context to the flatbuilder along
+ * with the `flatcc_emitter` function.
+ */
+struct flatcc_emitter {
+ flatcc_emitter_page_t *front, *back;
+ uint8_t *front_cursor;
+ size_t front_left;
+ uint8_t *back_cursor;
+ size_t back_left;
+ size_t used;
+ size_t capacity;
+ size_t used_average;
+};
+
+/* Optional helper to ensure emitter is zeroed initially. */
+static inline void flatcc_emitter_init(flatcc_emitter_t *E)
+{
+ memset(E, 0, sizeof(*E));
+}
+
+/* Deallocates all buffer memory making the emitter ready for next use. */
+void flatcc_emitter_clear(flatcc_emitter_t *E);
+
+/*
+ * Similar to `clear_flatcc_emitter` but heuristacally keeps some allocated
+ * memory between uses while gradually reducing peak allocations.
+ * For small buffers, a single page will remain available with no
+ * additional allocations or deallocations after first use.
+ */
+void flatcc_emitter_reset(flatcc_emitter_t *E);
+
+/*
+ * Helper function that allows a page between front and back to be
+ * recycled while the buffer is still being constructed - most likely as part
+ * of partial copy or transmission. Attempting to recycle front or back
+ * pages will result in an error. Recycling pages outside the
+ * front and back will be valid but pointless. After recycling and copy
+ * operations are no longer well-defined and should be replaced with
+ * whatever logic is recycling the pages. The reset operation
+ * automatically recycles all (remaining) pages when emission is
+ * complete. After recycling, the `flatcc_emitter_size` function will
+ * return as if recycle was not called, but will only represent the
+ * logical size, not the size of the active buffer. Because a recycled
+ * page is fully utilized, it is fairly easy to compensate for this if
+ * required.
+ *
+ * Returns 0 on success.
+ */
+int flatcc_emitter_recycle_page(flatcc_emitter_t *E, flatcc_emitter_page_t *p);
+
+/*
+ * The amount of data copied with `flatcc_emitter_copy_buffer` and related
+ * functions. Normally called at end of buffer construction but is
+ * always valid, as is the copy functions. The size is a direct
+ * function of the amount emitted data so the flatbuilder itself can
+ * also provide this information.
+ */
+static inline size_t flatcc_emitter_get_buffer_size(flatcc_emitter_t *E)
+{
+ return E->used;
+}
+
+/*
+ * Returns buffer start iff the buffer fits on a single internal page.
+ * Only useful for fairly small buffers - about half the page size since
+ * one half of first page goes to vtables that likely use little space.
+ * Returns null if request could not be served.
+ *
+ * If `size_out` is not null, it is set to the buffer size, or 0 if
+ * operation failed.
+ */
+static inline void *flatcc_emitter_get_direct_buffer(flatcc_emitter_t *E, size_t *size_out)
+{
+ if (E->front == E->back) {
+ if (size_out) {
+ *size_out = E->used;
+ }
+ return E->front_cursor;
+ }
+ if (size_out) {
+ *size_out = 0;
+ }
+ return 0;
+}
+
+/*
+ * Copies the internal flatcc_emitter representation to an externally
+ * provided linear buffer that must have size `flatcc_emitter_get_size`.
+ *
+ * If pages have been recycled, only the remaining pages will be copied
+ * and thus less data than what `flatcc_emitter_get_size` would suggest. It
+ * makes more sense to provide a customized copy operation when
+ * recycling pages.
+ *
+ * If the buffer is too small, nothing is copied, otherwise the
+ * full buffer is copied and the input buffer is returned.
+ */
+void *flatcc_emitter_copy_buffer(flatcc_emitter_t *E, void *buf, size_t size);
+
+/*
+ * The emitter interface function to the flatbuilder API.
+ * `emit_context` should be of type `flatcc_emitter_t` for this
+ * particular implementation.
+ *
+ * This function is compatible with the `flatbuilder_emit_fun`
+ * type defined in "flatbuilder.h".
+ */
+int flatcc_emitter(void *emit_context,
+ const flatcc_iovec_t *iov, int iov_count,
+ flatbuffers_soffset_t offset, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_EMITTER_H */
diff --git a/nostrdb/flatcc/flatcc_endian.h b/nostrdb/flatcc/flatcc_endian.h
@@ -0,0 +1,125 @@
+#ifndef FLATCC_ENDIAN_H
+#define FLATCC_ENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This file provides helper macros to define type-specific macros and
+ * inline functions that convert between stored data and native data
+ * indedpently of both native (host) endianness and protocol endianness
+ * (i.e. the serialized endian format).
+ *
+ * To detect endianness correctly ensure one of the following is defined.
+ *
+ * __LITTLE_ENDIAN__
+ * __BIG_ENDIAN__
+ * FLATBUFFERS_LITTLEENDIAN=1
+ * FLATBUFFERS_LITTLEENDIAN=0
+ *
+ * Note: the Clang compiler likely already does this, but other
+ * compilers may have their own way, if at all.
+ *
+ * It is also necessary to include <endian.h> or a compatible
+ * implementation in order to provide:
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh,
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64.
+ *
+ * A simple way to ensure all of the above for most platforms is
+ * to include the portable endian support file:
+ *
+ * #include "flatcc/portable/pendian.h"
+ *
+ * It is also necessary to include
+ *
+ * #include "flatcc/flatcc_types.h"
+ *
+ * or an equivalent file. This makes it possible to change the
+ * endianness of the serialized data and the sizes of flatbuffer
+ * specific types such as `uoffset_t`.
+ *
+ * Note: the mentioned include files are likely already included
+ * by the file including this file, at least for the default
+ * configuration.
+ */
+
+#ifndef UINT8_t
+#include <stdint.h>
+#endif
+
+/* These are needed to simplify accessor macros and are not found in <endian.h>. */
+#ifndef le8toh
+#define le8toh(n) (n)
+#endif
+
+#ifndef be8toh
+#define be8toh(n) (n)
+#endif
+
+#ifndef htole8
+#define htole8(n) (n)
+#endif
+
+#ifndef htobe8
+#define htobe8(n) (n)
+#endif
+
+#include "flatcc_accessors.h"
+
+/* This is the binary encoding endianness, usually LE for flatbuffers. */
+#if FLATBUFFERS_PROTOCOL_IS_LE
+#define flatbuffers_endian le
+#elif FLATBUFFERS_PROTOCOL_IS_BE
+#define flatbuffers_endian be
+#else
+#error "flatbuffers has no defined endiannesss"
+#endif
+
+ __flatcc_define_basic_scalar_accessors(flatbuffers_, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(flatbuffers_bool, flatbuffers_bool_t,
+ FLATBUFFERS_BOOL_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(flatbuffers_union_type, flatbuffers_union_type_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+
+ __flatcc_define_integer_accessors(__flatbuffers_uoffset, flatbuffers_uoffset_t,
+ FLATBUFFERS_UOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_soffset, flatbuffers_soffset_t,
+ FLATBUFFERS_SOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_voffset, flatbuffers_voffset_t,
+ FLATBUFFERS_VOFFSET_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_utype, flatbuffers_utype_t,
+ FLATBUFFERS_UTYPE_WIDTH, flatbuffers_endian)
+ __flatcc_define_integer_accessors(__flatbuffers_thash, flatbuffers_thash_t,
+ FLATBUFFERS_THASH_WIDTH, flatbuffers_endian)
+
+/* flatcc/portable/pendian.h sets LITTLE/BIG flags if possible, and always defines le16toh. */
+#ifndef flatbuffers_is_native_pe
+#if defined(__LITTLE_ENDIAN__) || FLATBUFFERS_LITTLEENDIAN
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 1
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_LE)
+#elif defined(__BIG_ENDIAN__) || (defined(FLATBUFFERS_LITTLEENDIAN) && !FLATBUFFERS_LITTLEENDIAN)
+#undef FLATBUFFERS_LITTLEENDIAN
+#define FLATBUFFERS_LITTLEENDIAN 0
+#define flatbuffers_is_native_pe() (FLATBUFFERS_PROTOCOL_IS_BE)
+#else
+#define flatbuffers_is_native_pe() (__FLATBUFFERS_CONCAT(flatbuffers_endian, 16toh)(1) == 1)
+#endif
+#endif
+
+#ifndef flatbuffers_is_native_le
+#define flatbuffers_is_native_le() flatbuffers_is_native_pe()
+#endif
+
+#ifndef flatbuffers_is_native_be
+#define flatbuffers_is_native_be() (!flatbuffers_is_native_pe())
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_ENDIAN_H */
diff --git a/nostrdb/flatcc/flatcc_epilogue.h b/nostrdb/flatcc/flatcc_epilogue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "pdiagnostic_pop.h"
+
diff --git a/nostrdb/flatcc/flatcc_flatbuffers.h b/nostrdb/flatcc/flatcc_flatbuffers.h
@@ -0,0 +1,55 @@
+/*
+ * Even C11 compilers depend on clib support for `static_assert` which
+ * isn't always present, so we deal with this here for all compilers.
+ *
+ * Outside include guard to handle scope counter.
+ */
+#include "pstatic_assert.h"
+
+#ifndef FLATCC_FLATBUFFERS_H
+#define FLATCC_FLATBUFFERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef flatcc_flatbuffers_defined
+#define flatcc_flatbuffers_defined
+
+#ifdef FLATCC_PORTABLE
+#include "flatcc/flatcc_portable.h"
+#endif
+#include "pwarnings.h"
+/* Needed by C99 compilers without FLATCC_PORTABLE. */
+#include "pstdalign.h"
+
+/* Handle fallthrough attribute in switch statements. */
+#include "pattributes.h"
+
+#include "flatcc_alloc.h"
+#include "flatcc_assert.h"
+
+#define __FLATBUFFERS_PASTE2(a, b) a ## b
+#define __FLATBUFFERS_PASTE3(a, b, c) a ## b ## c
+#define __FLATBUFFERS_CONCAT(a, b) __FLATBUFFERS_PASTE2(a, b)
+
+/*
+ * "flatcc_endian.h" requires the preceeding include files,
+ * or compatible definitions.
+ */
+#include "pendian.h"
+#include "flatcc_types.h"
+#include "flatcc_endian.h"
+#include "flatcc_identifier.h"
+
+#ifndef FLATBUFFERS_WRAP_NAMESPACE
+#define FLATBUFFERS_WRAP_NAMESPACE(ns, x) ns ## _ ## x
+#endif
+
+#endif /* flatcc_flatbuffers_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_FLATBUFFERS_H */
diff --git a/nostrdb/flatcc/flatcc_identifier.h b/nostrdb/flatcc/flatcc_identifier.h
@@ -0,0 +1,148 @@
+#ifndef FLATCC_IDENTIFIER_H
+#define FLATCC_IDENTIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef FLATCC_FLATBUFFERS_H
+#error "include via flatcc/flatcc_flatbuffers.h"
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * FlatBuffers identifiers are normally specified by "file_identifer" in
+ * the schema, but a standard hash of the fully qualified type name can
+ * also be used. This file implements such a mapping, but the generated
+ * headers also contain the necessary information for known types.
+ */
+
+
+/*
+ * Returns the type hash of a given name in native endian format.
+ * Generated code already provides these, but if a name was changed
+ * in the schema it may be relevant to recompute the hash manually.
+ *
+ * The wire-format of this value should always be little endian.
+ *
+ * Note: this must be the fully qualified name, e.g. in the namespace
+ * "MyGame.Example":
+ *
+ * flatbuffers_type_hash_from_name("MyGame.Example.Monster");
+ *
+ * or, in the global namespace just:
+ *
+ * flatbuffers_type_hash_from_name("MyTable");
+ *
+ * This assumes 32 bit hash type. For other sizes, other FNV-1a
+ * constants would be required.
+ *
+ * Note that we reserve hash value 0 for missing or ignored value.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_name(const char *name)
+{
+ uint32_t hash = UINT32_C(2166136261);
+ while (*name) {
+ hash ^= (unsigned char)*name;
+ hash = hash * UINT32_C(16777619);
+ ++name;
+ }
+ if (hash == 0) {
+ hash = UINT32_C(2166136261);
+ }
+ return hash;
+}
+
+/*
+ * Type hash encoded as little endian file identifier string.
+ * Note: if type hash is 0, the identifier should be null which
+ * we cannot return in this interface.
+ */
+static inline void flatbuffers_identifier_from_type_hash(flatbuffers_thash_t type_hash, flatbuffers_fid_t out_identifier)
+{
+ out_identifier[0] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[1] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[2] = (char)(type_hash & 0xff);
+ type_hash >>= 8;
+ out_identifier[3] = (char)(type_hash & 0xff);
+}
+
+/* Native integer encoding of file identifier. */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_identifier(const flatbuffers_fid_t identifier)
+{
+ uint8_t *p = (uint8_t *)identifier;
+
+ return identifier ?
+ (uint32_t)p[0] + (((uint32_t)p[1]) << 8) + (((uint32_t)p[2]) << 16) + (((uint32_t)p[3]) << 24) : 0;
+}
+
+/*
+ * Convert a null terminated string identifier like "MONS" or "X" into a
+ * native type hash identifier, usually for comparison. This will not
+ * work with type hash strings because they can contain null bytes.
+ */
+static inline flatbuffers_thash_t flatbuffers_type_hash_from_string(const char *identifier)
+{
+ flatbuffers_thash_t h = 0;
+ const uint8_t *p = (const uint8_t *)identifier;
+
+ if (!p[0]) return h;
+ h += ((flatbuffers_thash_t)p[0]);
+ if (!p[1]) return h;
+ h += ((flatbuffers_thash_t)p[1]) << 8;
+ if (!p[2]) return h;
+ h += ((flatbuffers_thash_t)p[2]) << 16;
+ /* No need to test for termination here. */
+ h += ((flatbuffers_thash_t)p[3]) << 24;
+ return h;
+}
+
+/*
+ * Computes the little endian wire format of the type hash. It can be
+ * used as a file identifer argument to various flatcc buffer calls.
+ *
+ * `flatbuffers_fid_t` is just `char [4]` for the default flatbuffers
+ * type system defined in `flatcc/flatcc_types.h`.
+ */
+static inline void flatbuffers_identifier_from_name(const char *name, flatbuffers_fid_t out_identifier)
+{
+ flatbuffers_identifier_from_type_hash(flatbuffers_type_hash_from_name(name), out_identifier);
+}
+
+/*
+ * This is a collision free hash (a permutation) of the type hash to
+ * provide better distribution for use in hash tables. It is likely not
+ * necessary in praxis, and for uniqueness of identifiers it provides no
+ * advantage over just using the FNV-1a type hash, except when truncating
+ * the identifier to less than 32-bits.
+ *
+ * Note: the output should not be used in transmission. It provides no
+ * additional information and just complicates matters. Furthermore, the
+ * unmodified type hash has the benefit that it can seed a child namespace.
+ */
+static inline uint32_t flatbuffers_disperse_type_hash(flatbuffers_thash_t type_hash)
+{
+ /* http://stackoverflow.com/a/12996028 */
+ uint32_t x = type_hash;
+
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x) * UINT32_C(0x45d9f3b);
+ x = ((x >> 16) ^ x);
+ return x;
+}
+
+
+/* We have hardcoded assumptions about identifier size. */
+static_assert(sizeof(flatbuffers_fid_t) == 4, "unexpected file identifier size");
+static_assert(sizeof(flatbuffers_thash_t) == 4, "unexpected type hash size");
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IDENTIFIER_H */
diff --git a/nostrdb/flatcc/flatcc_iov.h b/nostrdb/flatcc/flatcc_iov.h
@@ -0,0 +1,31 @@
+#ifndef FLATCC_IOV_H
+#define FLATCC_IOV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/*
+ * The emitter receives one, or a few buffers at a time via
+ * this type. <sys/iov.h> compatible iovec structure used for
+ * allocation and emitter interface.
+ */
+typedef struct flatcc_iovec flatcc_iovec_t;
+struct flatcc_iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+/*
+ * The largest iovec vector the builder will issue. It will
+ * always be a relatively small number.
+ */
+#define FLATCC_IOV_COUNT_MAX 8
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_IOV_H */
diff --git a/nostrdb/flatcc/flatcc_json_parser.h b/nostrdb/flatcc/flatcc_json_parser.h
@@ -0,0 +1,895 @@
+#ifndef FLATCC_JSON_PARSE_H
+#define FLATCC_JSON_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * JSON RFC:
+ * http://www.ietf.org/rfc/rfc4627.txt?number=4627
+ *
+ * With several flatbuffers specific extensions.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc_rtconfig.h"
+#include "flatcc_builder.h"
+#include "flatcc_unaligned.h"
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "pdiagnostic_push.h"
+
+enum flatcc_json_parser_flags {
+ flatcc_json_parser_f_skip_unknown = 1,
+ flatcc_json_parser_f_force_add = 2,
+ flatcc_json_parser_f_with_size = 4,
+ flatcc_json_parser_f_skip_array_overflow = 8,
+ flatcc_json_parser_f_reject_array_underflow = 16
+};
+
+#define FLATCC_JSON_PARSE_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ XX(eof, "eof") \
+ XX(deep_nesting, "deep nesting") \
+ XX(trailing_comma, "trailing comma") \
+ XX(expected_colon, "expected colon") \
+ XX(unexpected_character, "unexpected character") \
+ XX(invalid_numeric, "invalid numeric") \
+ XX(overflow, "overflow") \
+ XX(underflow, "underflow") \
+ XX(unbalanced_array, "unbalanced array") \
+ XX(unbalanced_object, "unbalanced object") \
+ XX(precision_loss, "precision loss") \
+ XX(float_unexpected, "float unexpected") \
+ XX(unknown_symbol, "unknown symbol") \
+ XX(unquoted_symbolic_list, "unquoted list of symbols") \
+ XX(unknown_union, "unknown union type") \
+ XX(expected_string, "expected string") \
+ XX(invalid_character, "invalid character") \
+ XX(invalid_escape, "invalid escape") \
+ XX(invalid_type, "invalid type") \
+ XX(unterminated_string, "unterminated string") \
+ XX(expected_object, "expected object") \
+ XX(expected_array, "expected array") \
+ XX(expected_scalar, "expected literal or symbolic scalar") \
+ XX(expected_union_type, "expected union type") \
+ XX(union_none_present, "union present with type NONE") \
+ XX(union_none_not_null, "union of type NONE is not null") \
+ XX(union_incomplete, "table has incomplete union") \
+ XX(duplicate, "table has duplicate field") \
+ XX(required, "required field missing") \
+ XX(union_vector_length, "union vector length mismatch") \
+ XX(base64, "invalid base64 content") \
+ XX(base64url, "invalid base64url content") \
+ XX(array_underflow, "fixed length array underflow") \
+ XX(array_overflow, "fixed length array overflow") \
+ XX(runtime, "runtime error") \
+ XX(not_supported, "not supported")
+
+enum flatcc_json_parser_error_no {
+#define XX(no, str) flatcc_json_parser_error_##no,
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+};
+
+const char *flatcc_json_parser_error_string(int err);
+
+#define flatcc_json_parser_ok flatcc_json_parser_error_ok
+#define flatcc_json_parser_eof flatcc_json_parser_error_eof
+
+/*
+ * The struct may be zero initialized in which case the line count will
+ * start at line zero, or the line may be set to 1 initially. The ctx
+ * is only used for error reporting and tracking non-standard unquoted
+ * ctx.
+ *
+ * `ctx` may for example hold a flatcc_builder_t pointer.
+ */
+typedef struct flatcc_json_parser_ctx flatcc_json_parser_t;
+struct flatcc_json_parser_ctx {
+ flatcc_builder_t *ctx;
+ const char *line_start;
+ int flags;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ int unquoted;
+#endif
+
+ int line, pos;
+ int error;
+ const char *start;
+ const char *end;
+ const char *error_loc;
+ /* Set at end of successful parse. */
+ const char *end_loc;
+};
+
+static inline int flatcc_json_parser_get_error(flatcc_json_parser_t *ctx)
+{
+ return ctx->error;
+}
+
+static inline void flatcc_json_parser_init(flatcc_json_parser_t *ctx, flatcc_builder_t *B, const char *buf, const char *end, int flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->ctx = B;
+ ctx->line_start = buf;
+ ctx->line = 1;
+ ctx->flags = flags;
+ /* These are not needed for parsing, but may be helpful in reporting etc. */
+ ctx->start = buf;
+ ctx->end = end;
+ ctx->error_loc = buf;
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int reason);
+
+/*
+ * Wide space is not necessarily beneficial in the typical space, but it
+ * also isn't expensive so it may be added when there are applications
+ * that can benefit.
+ */
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_space(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (end - buf > 1) {
+ if (buf[0] > 0x20) {
+ return buf;
+ }
+ if (buf[0] == 0x20 && buf[1] > 0x20) {
+ return buf + 1;
+ }
+ }
+ return flatcc_json_parser_space_ext(ctx, buf, end);
+}
+
+
+static inline const char *flatcc_json_parser_string_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_string);
+ }
+ return ++buf;
+}
+
+static inline const char *flatcc_json_parser_string_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ return ++buf;
+}
+
+/*
+ * Parse a string as a fixed length char array as `s` with length `n`.
+ * and raise errors according to overflow/underflow runtime flags. Zero
+ * and truncate as needed. A trailing zero is not inserted if the input
+ * is at least the same length as the char array.
+ *
+ * Runtime flags: `skip_array_overflow`, `pad_array_underflow`.
+ */
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n);
+
+/*
+ * Creates a string. Returns *ref == 0 on unrecoverable error or
+ * sets *ref to a valid new string reference.
+ */
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref);
+
+typedef char flatcc_json_parser_escape_buffer_t[5];
+/*
+ * If the buffer does not hold a valid escape sequence, an error is
+ * returned with code[0] = 0/
+ *
+ * Otherwise code[0] the length (1-4) of the remaining
+ * characters in the code, transcoded from the escape sequence
+ * where a length of 4 only happens with escapaped surrogate pairs.
+ *
+ * The JSON extension `\xXX` is supported and may produced invalid UTF-8
+ * characters such as 0xff. The standard JSON escape `\uXXXX` is not
+ * checked for invalid code points and may produce invalid UTF-8.
+ *
+ * Regular characters are expected to valid UTF-8 but they are not checked
+ * and may therefore produce invalid UTF-8.
+ *
+ * Control characters within a string are rejected except in the
+ * standard JSON escpaped form for `\n \r \t \b \f`.
+ *
+ * Additional escape codes as per standard JSON: `\\ \/ \"`.
+ */
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code);
+
+/*
+ * Parses the longest unescaped run of string content followed by either
+ * an escape encoding, string termination, or error.
+ */
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_symbol_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '\"') {
+ ++buf;
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ ctx->unquoted = 0;
+#endif
+ } else {
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (*buf == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ ctx->unquoted = 1;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ return buf;
+}
+
+static inline uint64_t flatcc_json_parser_symbol_part_ext(const char *buf, const char *end)
+{
+ uint64_t w = 0;
+ size_t n = (size_t)(end - buf);
+
+ if (n > 8) {
+ n = 8;
+ }
+ /* This can bloat inlining for a rarely executed case. */
+#if 1
+ /* Fall through comments needed to silence gcc 7 warnings. */
+ switch (n) {
+ case 8: w |= ((uint64_t)buf[7]) << (0 * 8);
+ fallthrough;
+ case 7: w |= ((uint64_t)buf[6]) << (1 * 8);
+ fallthrough;
+ case 6: w |= ((uint64_t)buf[5]) << (2 * 8);
+ fallthrough;
+ case 5: w |= ((uint64_t)buf[4]) << (3 * 8);
+ fallthrough;
+ case 4: w |= ((uint64_t)buf[3]) << (4 * 8);
+ fallthrough;
+ case 3: w |= ((uint64_t)buf[2]) << (5 * 8);
+ fallthrough;
+ case 2: w |= ((uint64_t)buf[1]) << (6 * 8);
+ fallthrough;
+ case 1: w |= ((uint64_t)buf[0]) << (7 * 8);
+ fallthrough;
+ case 0:
+ break;
+ }
+#else
+ /* But this is hardly much of an improvement. */
+ {
+ size_t i;
+ for (i = 0; i < n; ++i) {
+ w <<= 8;
+ if (i < n) {
+ w = buf[i];
+ }
+ }
+ }
+#endif
+ return w;
+}
+
+/*
+ * Read out string as a big endian word. This allows for trie lookup,
+ * also when trailing characters are beyond keyword. This assumes the
+ * external words tested against are valid and therefore there need be
+ * no checks here. If a match is not made, the symbol_end function will
+ * consume and check any unmatched content - from _before_ this function
+ * was called - i.e. the returned buffer is tentative for use only if we
+ * accept the part returned here.
+ *
+ * Used for both symbols and symbolic constants.
+ */
+static inline uint64_t flatcc_json_parser_symbol_part(const char *buf, const char *end)
+{
+ size_t n = (size_t)(end - buf);
+
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ if (n >= 8) {
+ return be64toh(*(uint64_t *)buf);
+ }
+#endif
+ return flatcc_json_parser_symbol_part_ext(buf, end);
+}
+
+/* Don't allow space in dot notation neither inside nor outside strings. */
+static inline const char *flatcc_json_parser_match_scope(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ const char *mark = buf;
+
+ (void)ctx;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+ if (buf[pos] != '.') {
+ return mark;
+ }
+ return buf + pos + 1;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more);
+
+/* We allow '.' in unquoted symbols, but not at the start or end. */
+static inline const char *flatcc_json_parser_symbol_end(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c, clast = 0;
+
+
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ while (buf != end && *buf > 0x20) {
+ clast = c = *buf;
+ if (c == '_' || c == '.' || (c & 0x80) || (c >= '0' && c <= '9')) {
+ ++buf;
+ continue;
+ }
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ break;
+ }
+ if (clast == '.') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ } else {
+#else
+ {
+#endif
+ while (buf != end && *buf != '\"') {
+ if (*buf == '\\') {
+ if (end - buf < 2) {
+ break;
+ }
+ ++buf;
+ }
+ ++buf;
+ }
+ if (buf == end || *buf != '\"') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ ++buf;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_constant_start(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (!ctx->unquoted) {
+#else
+ {
+#endif
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '{') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_object);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_object_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != '}') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == '}') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_start(flatcc_json_parser_t *ctx, const char *buf, const char *end, int *more)
+{
+ if (buf == end || *buf != '[') {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_array);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ *more = 1;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_array_end(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int *more)
+{
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ if (*buf != ',') {
+ *more = 0;
+ if (*buf != ']') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ } else {
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf == end) {
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_array);
+ }
+#if FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+ if (*buf == ']') {
+ *more = 0;
+ return flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+#endif
+ *more = 1;
+ return buf;
+}
+
+/*
+ * Detects if a symbol terminates at a given `pos` relative to the
+ * buffer pointer, or return fast.
+ *
+ * Failure to match is not an error but a recommendation to try
+ * alternative longer suffixes - only if such do not exist will
+ * there be an error. If a match was not eventually found,
+ * the `flatcc_json_parser_unmatched_symbol` should be called to consume
+ * the symbol and generate error messages.
+ *
+ * If a match was detected, ':' and surrounding space is consumed,
+ * or an error is generated.
+ */
+static inline const char *flatcc_json_parser_match_symbol(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int pos)
+{
+ const char *mark = buf;
+
+ if (end - buf <= pos) {
+ return mark;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ if (buf[pos] > 0x20 && buf[pos] != ':') {
+ return mark;
+ }
+ buf += pos;
+ ctx->unquoted = 0;
+ } else {
+#else
+ {
+#endif
+ if (buf[pos] != '\"') {
+ return mark;
+ }
+ buf += pos + 1;
+ }
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ return flatcc_json_parser_space(ctx, buf, end);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+}
+
+static inline const char *flatcc_json_parser_match_type_suffix(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos)
+{
+ if (end - buf <= pos + 5) {
+ return buf;
+ }
+ if (memcmp(buf + pos, "_type", 5)) {
+ return buf;
+ }
+ return flatcc_json_parser_match_symbol(ctx, buf, end, pos + 5);
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+static inline const char *flatcc_json_parser_coerce_uint64(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint64_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_bool(flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, uint8_t *v)
+{
+ if (value_sign) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_underflow);
+ }
+ *v = (uint8_t)!!value;
+ return buf;
+}
+
+#define __flatcc_json_parser_define_coerce_unsigned(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_unsigned(uint32, uint32_t, UINT32)
+__flatcc_json_parser_define_coerce_unsigned(uint16, uint16_t, UINT16)
+__flatcc_json_parser_define_coerce_unsigned(uint8, uint8_t, UINT8)
+
+#define __flatcc_json_parser_define_coerce_signed(type, basetype, uctype) \
+static inline const char *flatcc_json_parser_coerce_ ## type( \
+ flatcc_json_parser_t *ctx, const char *buf, \
+ const char *end, int value_sign, uint64_t value, basetype *v) \
+{ \
+ if (value_sign) { \
+ if (value > (uint64_t)(uctype ## _MAX) + 1) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_underflow); \
+ } \
+ *v = (basetype)-(int64_t)value; \
+ } else { \
+ if (value > uctype ## _MAX) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_overflow); \
+ } \
+ *v = (basetype)value; \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_coerce_signed(int64, int64_t, INT64)
+__flatcc_json_parser_define_coerce_signed(int32, int32_t, INT32)
+__flatcc_json_parser_define_coerce_signed(int16, int16_t, INT16)
+__flatcc_json_parser_define_coerce_signed(int8, int8_t, INT8)
+
+static inline const char *flatcc_json_parser_coerce_float(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, float *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(float)value : (float)value;
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_coerce_double(
+ flatcc_json_parser_t *ctx, const char *buf,
+ const char *end, int value_sign, uint64_t value, double *v)
+{
+ (void)ctx;
+ (void)end;
+
+ *v = value_sign ? -(double)value : (double)value;
+ return buf;
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v);
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v);
+
+/*
+ * If the buffer does not contain a valid start character for a numeric
+ * value, the function will return the the input buffer without failure.
+ * This makes is possible to try a symbolic parse.
+ */
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value);
+
+/* Returns unchanged buffer without error if `null` is not matched. */
+static inline const char *flatcc_json_parser_null(const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return buf;
+}
+
+static inline const char *flatcc_json_parser_none(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end)
+{
+ if (end - buf >= 4 && memcmp(buf, "null", 4) == 0) {
+ return buf + 4;
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ flatcc_json_parser_error_union_none_not_null);
+}
+
+/*
+ * `parsers` is a null terminated array of parsers with at least one
+ * valid parser. A numeric literal parser may also be included.
+ */
+#define __flatcc_json_parser_define_integral_parser(type, basetype) \
+static inline const char *flatcc_json_parser_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, basetype *v) \
+{ \
+ uint64_t value = 0; \
+ int value_sign = 0; \
+ const char *mark = buf; \
+ \
+ *v = 0; \
+ if (buf == end) { \
+ return buf; \
+ } \
+ buf = flatcc_json_parser_integer(ctx, buf, end, &value_sign, &value); \
+ if (buf != mark) { \
+ return flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, v); \
+ } \
+ return buf; \
+}
+
+__flatcc_json_parser_define_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_integral_parser(int8, int8_t)
+
+static inline const char *flatcc_json_parser_bool(flatcc_json_parser_t *ctx, const char *buf, const char *end, uint8_t *v)
+{
+ const char *k;
+ uint8_t tmp;
+
+ k = buf;
+ if (end - buf >= 4 && memcmp(buf, "true", 4) == 0) {
+ *v = 1;
+ return k + 4;
+ } else if (end - buf >= 5 && memcmp(buf, "false", 5) == 0) {
+ *v = 0;
+ return k + 5;
+ }
+ buf = flatcc_json_parser_uint8(ctx, buf, end, &tmp);
+ *v = !!tmp;
+ return buf;
+}
+
+/*
+ * The `parsers` argument is a zero terminated array of parser
+ * functions with increasingly general scopes.
+ *
+ * Symbols can be be or'ed together by listing multiple space separated
+ * flags in source being parsed, like `{ x : "Red Blue" }`.
+ * Intended for flags, but generally available.
+ *
+ * `aggregate` means there are more symbols to follow.
+ *
+ * This function does not return input `buf` value if match was
+ * unsuccessful. It will either match or error.
+ */
+typedef const char *flatcc_json_parser_integral_symbol_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, int *value_sign, uint64_t *value, int *aggregate);
+
+/*
+ * Raise an error if a syntax like `color: Red Green` is seen unless
+ * explicitly permitted. `color: "Red Green"` or `"color": "Red Green"
+ * or `color: Red` is permitted if unquoted is permitted but not
+ * unquoted list. Googles flatc JSON parser does not allow multiple
+ * symbolic values unless quoted, so this is the default.
+ */
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED || FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define __flatcc_json_parser_init_check_unquoted_list()
+#define __flatcc_json_parser_check_unquoted_list()
+#else
+#define __flatcc_json_parser_init_check_unquoted_list() int list_count = 0;
+#define __flatcc_json_parser_check_unquoted_list() \
+ if (list_count++ && ctx->unquoted) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_unquoted_symbolic_list); \
+ }
+#endif
+
+#define __flatcc_json_parser_define_symbolic_integral_parser(type, basetype)\
+static const char *flatcc_json_parser_symbolic_ ## type( \
+ flatcc_json_parser_t *ctx, \
+ const char *buf, const char *end, \
+ flatcc_json_parser_integral_symbol_f *parsers[], \
+ basetype *v) \
+{ \
+ flatcc_json_parser_integral_symbol_f **p; \
+ const char *mark; \
+ basetype tmp = 0; \
+ uint64_t value; \
+ int value_sign, aggregate; \
+ __flatcc_json_parser_init_check_unquoted_list() \
+ \
+ *v = 0; \
+ buf = flatcc_json_parser_constant_start(ctx, buf, end); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ do { \
+ p = parsers; \
+ do { \
+ /* call parser function */ \
+ buf = (*p)(ctx, (mark = buf), end, \
+ &value_sign, &value, &aggregate); \
+ if (buf == end) { \
+ return buf; \
+ } \
+ } while (buf == mark && *++p); \
+ if (mark == buf) { \
+ return flatcc_json_parser_set_error(ctx, buf, end, \
+ flatcc_json_parser_error_expected_scalar); \
+ } \
+ __flatcc_json_parser_check_unquoted_list() \
+ if (end == flatcc_json_parser_coerce_ ## type(ctx, \
+ buf, end, value_sign, value, &tmp)) { \
+ return end; \
+ } \
+ /* \
+ * `+=`, not `|=` because we also coerce to float and double, \
+ * and because we need to handle signed values. This may give \
+ * unexpected results with duplicate flags. \
+ */ \
+ *v += tmp; \
+ } while (aggregate); \
+ return buf; \
+}
+
+__flatcc_json_parser_define_symbolic_integral_parser(uint64, uint64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint32, uint32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint16, uint16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(uint8, uint8_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int64, int64_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int32, int32_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int16, int16_t)
+__flatcc_json_parser_define_symbolic_integral_parser(int8, int8_t)
+
+__flatcc_json_parser_define_symbolic_integral_parser(bool, uint8_t)
+
+/* We still parse integral values, but coerce to float or double. */
+__flatcc_json_parser_define_symbolic_integral_parser(float, float)
+__flatcc_json_parser_define_symbolic_integral_parser(double, double)
+
+/* Parse vector as a base64 or base64url encoded string with no spaces permitted. */
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe);
+
+/*
+ * This doesn't do anything other than validate and advance past
+ * a JSON value which may use unquoted symbols.
+ *
+ * Upon call it is assumed that leading space has been stripped and that
+ * a JSON value is expected (i.e. root, or just after ':' in a
+ * container object, or less likely as an array member). Any trailing
+ * comma is assumed to belong to the parent context. Returns a parse
+ * location stripped from space so container should post call expect
+ * ',', '}', or ']', or EOF if the JSON is valid.
+ */
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end);
+
+/* Parse a JSON table. */
+typedef const char *flatcc_json_parser_table_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Parses a JSON struct. */
+typedef const char *flatcc_json_parser_struct_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *pref);
+
+/* Constructs a table, struct, or string object unless the type is 0 or unknown. */
+typedef const char *flatcc_json_parser_union_f(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, uint8_t type, flatcc_builder_ref_t *pref);
+
+typedef int flatcc_json_parser_is_known_type_f(uint8_t type);
+
+/* Called at start by table parsers with at least 1 union. */
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle);
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle);
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_union_f *union_parser);
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type);
+
+/*
+ * Parses a table as root.
+ *
+ * Use the flag `flatcc_json_parser_f_with_size` to create a buffer with
+ * size prefix.
+ *
+ * `ctx` may be null or an uninitialized json parser to receive parse results.
+ * `builder` must a newly initialized or reset builder object.
+ * `buf`, `bufsiz` may be larger than the parsed json if trailing
+ * space or zeroes are expected, but they must represent a valid memory buffer.
+ * `fid` must be null, or a valid file identifier.
+ * `flags` default to 0. See also `flatcc_json_parser_flags`.
+ */
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags, const char *fid,
+ flatcc_json_parser_table_f *parser);
+
+/*
+ * Similar to `flatcc_json_parser_table_as_root` but parses a struct as
+ * root.
+ */
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags, const char *fid,
+ flatcc_json_parser_struct_f *parser);
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PARSE_H */
diff --git a/nostrdb/flatcc/flatcc_json_printer.h b/nostrdb/flatcc/flatcc_json_printer.h
@@ -0,0 +1,789 @@
+#ifndef FLATCC_JSON_PRINTER_H
+#define FLATCC_JSON_PRINTER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions for default implementation, do not assume these are
+ * always valid.
+ */
+#define FLATCC_JSON_PRINT_FLUSH_SIZE (1024 * 16)
+#define FLATCC_JSON_PRINT_RESERVE 64
+#define FLATCC_JSON_PRINT_BUFFER_SIZE (FLATCC_JSON_PRINT_FLUSH_SIZE + FLATCC_JSON_PRINT_RESERVE)
+
+#ifndef FLATCC_JSON_PRINTER_ALLOC
+#define FLATCC_JSON_PRINTER_ALLOC(n) FLATCC_ALLOC(n)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_FREE
+#define FLATCC_JSON_PRINTER_FREE(p) FLATCC_FREE(p)
+#endif
+
+#ifndef FLATCC_JSON_PRINTER_REALLOC
+#define FLATCC_JSON_PRINTER_REALLOC(p, n) FLATCC_REALLOC(p, n)
+#endif
+
+/* Initial size that grows exponentially. */
+#define FLATCC_JSON_PRINT_DYN_BUFFER_SIZE 4096
+
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+/* -DFLATCC_PORTABLE may help if inttypes.h is missing. */
+#ifndef PRId64
+#include <inttypes.h>
+#endif
+
+#define FLATCC_JSON_PRINT_ERROR_MAP(XX) \
+ XX(ok, "ok") \
+ /* \
+ * When the flatbuffer is null, has too small a header, or has \
+ * mismatching identifier when a match was requested. \
+ */ \
+ XX(bad_input, "bad input") \
+ XX(deep_recursion, "deep recursion") \
+ /* \
+ * When the output was larger than the available fixed length buffer, \
+ * or dynamic allocation could not grow the buffer sufficiently. \
+ */ \
+ XX(overflow, "overflow")
+
+enum flatcc_json_printer_error_no {
+#define XX(no, str) flatcc_json_printer_error_##no,
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_json_printer_ok flatcc_json_printer_error_ok
+
+typedef struct flatcc_json_printer_ctx flatcc_json_printer_t;
+
+typedef void flatcc_json_printer_flush_f(flatcc_json_printer_t *ctx, int all);
+
+struct flatcc_json_printer_ctx {
+ char *buf;
+ size_t size;
+ size_t flush_size;
+ size_t total;
+ const char *pflush;
+ char *p;
+ uint8_t own_buffer;
+ uint8_t indent;
+ uint8_t unquote;
+ uint8_t noenum;
+ uint8_t skip_default;
+ uint8_t force_default;
+ int level;
+ int error;
+
+ void *fp;
+ flatcc_json_printer_flush_f *flush;
+};
+
+static inline void flatcc_json_printer_set_error(flatcc_json_printer_t *ctx, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ }
+}
+
+const char *flatcc_json_printer_error_string(int err);
+
+static inline int flatcc_json_printer_get_error(flatcc_json_printer_t *ctx)
+{
+ return ctx->error;
+}
+
+/*
+ * Call to reuse context between parses without without
+ * returning buffer. If a file pointer is being used,
+ * it will remain open.
+ *
+ * Reset does not affect the formatting settings indentation, and
+ * operational flags, but does zero the indentation level.
+ */
+static inline void flatcc_json_printer_reset(flatcc_json_printer_t *ctx)
+{
+ ctx->p = ctx->buf;
+ ctx->level = 0;
+ ctx->total = 0;
+ ctx->error = 0;
+}
+
+/*
+ * A custom init function can be implemented with a custom flush
+ * function can be custom implemented. A few have been provided:
+ * init with external fixed length buffer, and init with dynamically
+ * growing buffer.
+ *
+ * Because there are a lot of small print functions, it is essentially
+ * always faster to print to local buffer than moving to io directly
+ * such as using fprintf or fwrite. The flush callback is used to
+ * move data when enough has been collected.
+ *
+ * `fp` should be of type `FILE *` but we do not enforce it here
+ * because it allows the header to be independent of <stdio.h>
+ * when not required. If `fp` is null, it defaults to stdout.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * The file pointer may be stdout or a custom file. The file pointer
+ * is not affected by reset or clear and should be closed manually.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp);
+
+/*
+ * Prints to external buffer and sets overflow error if buffer is too
+ * small. Earlier content is then overwritten. A custom version of this
+ * function could flush the content to elsewhere before allowing the
+ * buffer content to be overwritten. The `buffers_size` must be large
+ * enough to hold `FLATCC_JSON_PRINT_RESERVED_SIZE` which is small but
+ * large enough value to hold entire numbers and the like.
+ *
+ * It is not strictly necessary to call clear because the buffer is
+ * external, but still good form and case the context type is changed
+ * later.
+ *
+ * Returns -1 on buffer size error (no cleanup needed), or 0 on success.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size);
+
+/*
+ * Returns the current buffer pointer and also the content size in
+ * `buffer_size` if it is null. The operation is not very useful for
+ * file oriented printers (created with `init`) and will then only
+ * return the unflushed buffer content. For fixed length buffers
+ * (`init_buffer`), only the last content is available if the buffer
+ * overflowed. Works well with (`init_buffer`) when the dynamic buffer
+ * is be reused, otherwise `finalize_dynamic_buffer` could be more
+ * appropriate.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * The returned pointer is only valid until next operation and should
+ * not deallocated manually.
+ */
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+/*
+ * Set to non-zero if names and enum symbols can be unquoted thus
+ * diverging from standard JSON while remaining compatible with `flatc`
+ * JSON flavor.
+ */
+static inline void flatcc_json_printer_set_unquoted(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->unquote = !!x;
+}
+
+/*
+ * Set to non-zero if enums should always be printed as numbers.
+ * Otherwise enums are printed as a symbol for member values, and as
+ * numbers for other values.
+ *
+ * NOTE: this setting will not affect code generated with enum mapping
+ * disabled - statically disabling enum mapping is signficantly faster
+ * for enums, less so for for union types.
+ */
+static inline void flatcc_json_printer_set_noenum(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->noenum = !!x;
+}
+
+/*
+ * Override priting an existing scalar field if it equals the default value.
+ * Note that this setting is not mutually exclusive to `set_force_default`.
+ */
+static inline void flatcc_json_printer_set_skip_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->skip_default = !!x;
+}
+
+/*
+ * Override skipping absent scalar fields and print the default value.
+ * Note that this setting is not mutually exclusive to `set_skip_default`.
+ */
+static inline void flatcc_json_printer_set_force_default(flatcc_json_printer_t *ctx, int x)
+{
+ ctx->force_default = !!x;
+}
+
+
+/*
+ * Set pretty-print indentation in number of spaces. 0 (default) is
+ * compact with no spaces or linebreaks (default), anything above
+ * triggers pretty print.
+ */
+static inline void flatcc_json_printer_set_indent(flatcc_json_printer_t *ctx, uint8_t x)
+{
+ ctx->indent = x;
+}
+
+/*
+ * Override the default compact valid JSON format with a
+ * pretty printed non-strict version. Enums are translated
+ * to names, which is also the default.
+ */
+static inline void flatcc_json_printer_set_nonstrict(flatcc_json_printer_t *ctx)
+{
+ flatcc_json_printer_set_indent(ctx, 2);
+ flatcc_json_printer_set_unquoted(ctx, 1);
+ flatcc_json_printer_set_noenum(ctx, 0);
+}
+
+enum flatcc_json_printer_flags {
+ flatcc_json_printer_f_unquote = 1,
+ flatcc_json_printer_f_noenum = 2,
+ flatcc_json_printer_f_skip_default = 4,
+ flatcc_json_printer_f_force_default = 8,
+ flatcc_json_printer_f_pretty = 16,
+ flatcc_json_printer_f_nonstrict = 32,
+};
+
+/*
+ * May be called instead of setting operational modes individually.
+ * Formatting is strict quoted json witout pretty printing by default.
+ *
+ * flags are:
+ *
+ * `unquote`,
+ * `noenum`,
+ * `skip_default`,
+ * `force_default`,
+ * `pretty`,
+ * `nonstrict`
+ *
+ * `pretty` flag sets indentation to 2.
+ * `nonstrict` implies: `noenum`, `unquote`, `pretty`.
+ */
+static inline void flatcc_json_printer_set_flags(flatcc_json_printer_t *ctx, int flags)
+{
+ ctx->unquote = !!(flags & flatcc_json_printer_f_unquote);
+ ctx->noenum = !!(flags & flatcc_json_printer_f_noenum);
+ ctx->skip_default = !!(flags & flatcc_json_printer_f_skip_default);
+ ctx->force_default = !!(flags & flatcc_json_printer_f_force_default);
+ if (flags & flatcc_json_printer_f_pretty) {
+ flatcc_json_printer_set_indent(ctx, 2);
+ }
+ if (flags & flatcc_json_printer_f_nonstrict) {
+ flatcc_json_printer_set_nonstrict(ctx);
+ }
+}
+
+
+/*
+ * Detects if the conctext type uses dynamically allocated memory
+ * using malloc and realloc and frees any such memory.
+ *
+ * Not all context types needs to be cleared.
+ */
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx);
+
+/*
+ * Ensures that there ia always buffer capacity for priting the next
+ * primitive with delimiters.
+ *
+ * Only flushes complete flush units and is inexpensive to call.
+ * The content buffer has an extra reserve which ensures basic
+ * data types and delimiters can always be printed after a partial
+ * flush. At the end, a `flush` is required to flush the
+ * remaining incomplete buffer data.
+ *
+ * Numbers do not call partial flush but will always fit into the reserve
+ * capacity after a partial flush, also surrounded by delimiters.
+ *
+ * Variable length operations generally submit a partial flush so it is
+ * safe to print a number after a name without flushing, but vectors of
+ * numbers must (and do) issue a partial flush between elements. This is
+ * handled automatically but must be considered if using the primitives
+ * for special purposes. Because repeated partial flushes are very cheap
+ * this is only a concern for high performance applications.
+ *
+ * When identiation is enabled, partial flush is also automatically
+ * issued .
+ */
+static inline void flatcc_json_printer_flush_partial(flatcc_json_printer_t *ctx)
+{
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+}
+
+/* Returns the total printed size but flushed and in buffer. */
+static inline size_t flatcc_json_printer_total(flatcc_json_printer_t *ctx)
+{
+ return ctx->total + (size_t)(ctx->p - ctx->buf);
+}
+
+/*
+ * Flush the remaining data not flushed by partial flush. It is valid to
+ * call at any point if it is acceptable to have unaligned flush units,
+ * but this is not desireable if, for example, compression or encryption
+ * is added to the flush pipeline.
+ *
+ * Not called automatically at the end of printing a flatbuffer object
+ * in case more data needs to be appended without submitting incomplete
+ * flush units prematurely - for example adding a newline at the end.
+ *
+ * The flush behavior depeends on the underlying `ctx` object, for
+ * example dynamic buffers have no distinction between partial and full
+ * flushes - here it is merely ensured that the buffer always has a
+ * reserve capacity left.
+ *
+ * Returns the total printed size.
+ */
+static inline size_t flatcc_json_printer_flush(flatcc_json_printer_t *ctx)
+{
+ ctx->flush(ctx, 1);
+ return flatcc_json_printer_total(ctx);
+}
+
+/*
+ * Helper functions to print anything into the json buffer.
+ * Strings are escaped.
+ *
+ * When pretty printing (indent > 0), level 0 has special significance -
+ * so if wrapping printed json in a manually printed container json
+ * object, these functions can help manage this.
+ */
+
+/* Escaped and quoted string. */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Unescaped and unquoted string. */
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n);
+/* Print a newline and issues a partial flush. */
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx);
+/* Like numbers, a partial flush is not issued. */
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c);
+/* Indents and issues a partial flush. */
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx);
+/* Adjust identation level, usually +/-1. */
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n);
+/* Returns current identation level (0 is top level). */
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx);
+
+/*
+ * If called explicitly be aware that repeated calls to numeric
+ * printers may cause buffer overflow without flush in-between.
+ */
+void flatcc_json_printer_uint8(flatcc_json_printer_t *ctx, uint8_t v);
+void flatcc_json_printer_uint16(flatcc_json_printer_t *ctx, uint16_t v);
+void flatcc_json_printer_uint32(flatcc_json_printer_t *ctx, uint32_t v);
+void flatcc_json_printer_uint64(flatcc_json_printer_t *ctx, uint64_t v);
+void flatcc_json_printer_int8(flatcc_json_printer_t *ctx, int8_t v);
+void flatcc_json_printer_int16(flatcc_json_printer_t *ctx, int16_t v);
+void flatcc_json_printer_int32(flatcc_json_printer_t *ctx, int32_t v);
+void flatcc_json_printer_int64(flatcc_json_printer_t *ctx, int64_t v);
+void flatcc_json_printer_bool(flatcc_json_printer_t *ctx, int v);
+void flatcc_json_printer_float(flatcc_json_printer_t *ctx, float v);
+void flatcc_json_printer_double(flatcc_json_printer_t *ctx, double v);
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx,
+ const char *symbol, size_t len);
+
+/*
+ * Convenience function to add a trailing newline, flush the buffer,
+ * test for error and reset the context for reuse.
+ *
+ * Returns total size printed or < 0 on error.
+ *
+ * This function makes most sense for file oriented output.
+ * See also `finalize_dynamic_buffer`.
+ */
+static inline int flatcc_json_printer_finalize(flatcc_json_printer_t *ctx)
+{
+ int ret;
+ flatcc_json_printer_nl(ctx);
+ ret = (int)flatcc_json_printer_flush(ctx);
+ if (ctx->error) {
+ ret = -1;
+ }
+ flatcc_json_printer_reset(ctx);
+ return ret;
+}
+
+/*
+ * Allocates a small buffer and grows it dynamically.
+ * Buffer survives past reset. To reduce size between uses, call clear
+ * followed by init call. To reuse buffer just call reset between uses.
+ * If `buffer_size` is 0 a sensible default is being used. The size is
+ * automatically rounded up to reserved size if too small.
+ *
+ * Returns -1 on alloc error (no cleanup needed), or 0 on success.
+ * Eventually the clear method must be called to return memory.
+ *
+ * `set_flags` and related may be called subsequently to modify
+ * behavior.
+ */
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size);
+
+/*
+ * Similar to calling `finalize` but returns the buffer and does NOT
+ * reset, but rather clears printer object and the returned buffer must
+ * be deallocated with `free`.
+ *
+ * The returned buffer is zero terminated.
+ *
+ * NOTE: it is entirely optional to use this method. For repeated used
+ * of dynamic buffers, `newline` (or not) followed by `get_buffer`
+ * and `reset` will be an alternative.
+ *
+ * Stores the printed buffer size in `buffer_size` if it is not null.
+ *
+ * See also `get_dynamic_buffer`.
+ */
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size);
+
+
+/*************************************************************
+ * The following is normally only used by generated code.
+ *************************************************************/
+
+typedef struct flatcc_json_printer_table_descriptor flatcc_json_printer_table_descriptor_t;
+
+struct flatcc_json_printer_table_descriptor {
+ const void *table;
+ const void *vtable;
+ int vsize;
+ int ttl;
+ int count;
+};
+
+typedef struct flatcc_json_printer_union_descriptor flatcc_json_printer_union_descriptor_t;
+
+struct flatcc_json_printer_union_descriptor {
+ const void *member;
+ int ttl;
+ uint8_t type;
+};
+
+typedef void flatcc_json_printer_table_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td);
+
+typedef void flatcc_json_printer_struct_f(flatcc_json_printer_t *ctx,
+ const void *p);
+
+typedef void flatcc_json_printer_union_f(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+/* Generated value to name map callbacks. */
+typedef void flatcc_json_printer_union_type_f(flatcc_json_printer_t *ctx, flatbuffers_utype_t type);
+typedef void flatcc_json_printer_uint8_enum_f(flatcc_json_printer_t *ctx, uint8_t v);
+typedef void flatcc_json_printer_uint16_enum_f(flatcc_json_printer_t *ctx, uint16_t v);
+typedef void flatcc_json_printer_uint32_enum_f(flatcc_json_printer_t *ctx, uint32_t v);
+typedef void flatcc_json_printer_uint64_enum_f(flatcc_json_printer_t *ctx, uint64_t v);
+typedef void flatcc_json_printer_int8_enum_f(flatcc_json_printer_t *ctx, int8_t v);
+typedef void flatcc_json_printer_int16_enum_f(flatcc_json_printer_t *ctx, int16_t v);
+typedef void flatcc_json_printer_int32_enum_f(flatcc_json_printer_t *ctx, int32_t v);
+typedef void flatcc_json_printer_int64_enum_f(flatcc_json_printer_t *ctx, int64_t v);
+typedef void flatcc_json_printer_bool_enum_f(flatcc_json_printer_t *ctx, flatbuffers_bool_t v);
+
+#define __define_print_scalar_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v);
+
+#define __define_print_scalar_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_scalar_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len);
+
+#define __define_print_scalar_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count);
+
+#define __define_print_enum_array_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_struct_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_enum_optional_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+#define __define_print_scalar_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field(flatcc_json_printer_t *ctx,\
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len);
+
+#define __define_print_enum_vector_field_proto(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf);
+
+__define_print_scalar_field_proto(uint8, uint8_t)
+__define_print_scalar_field_proto(uint16, uint16_t)
+__define_print_scalar_field_proto(uint32, uint32_t)
+__define_print_scalar_field_proto(uint64, uint64_t)
+__define_print_scalar_field_proto(int8, int8_t)
+__define_print_scalar_field_proto(int16, int16_t)
+__define_print_scalar_field_proto(int32, int32_t)
+__define_print_scalar_field_proto(int64, int64_t)
+__define_print_scalar_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_field_proto(float, float)
+__define_print_scalar_field_proto(double, double)
+
+__define_print_enum_field_proto(uint8, uint8_t)
+__define_print_enum_field_proto(uint16, uint16_t)
+__define_print_enum_field_proto(uint32, uint32_t)
+__define_print_enum_field_proto(uint64, uint64_t)
+__define_print_enum_field_proto(int8, int8_t)
+__define_print_enum_field_proto(int16, int16_t)
+__define_print_enum_field_proto(int32, int32_t)
+__define_print_enum_field_proto(int64, int64_t)
+__define_print_enum_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field_proto(uint8, uint8_t)
+__define_print_scalar_optional_field_proto(uint16, uint16_t)
+__define_print_scalar_optional_field_proto(uint32, uint32_t)
+__define_print_scalar_optional_field_proto(uint64, uint64_t)
+__define_print_scalar_optional_field_proto(int8, int8_t)
+__define_print_scalar_optional_field_proto(int16, int16_t)
+__define_print_scalar_optional_field_proto(int32, int32_t)
+__define_print_scalar_optional_field_proto(int64, int64_t)
+__define_print_scalar_optional_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field_proto(float, float)
+__define_print_scalar_optional_field_proto(double, double)
+
+__define_print_enum_optional_field_proto(uint8, uint8_t)
+__define_print_enum_optional_field_proto(uint16, uint16_t)
+__define_print_enum_optional_field_proto(uint32, uint32_t)
+__define_print_enum_optional_field_proto(uint64, uint64_t)
+__define_print_enum_optional_field_proto(int8, int8_t)
+__define_print_enum_optional_field_proto(int16, int16_t)
+__define_print_enum_optional_field_proto(int32, int32_t)
+__define_print_enum_optional_field_proto(int64, int64_t)
+__define_print_enum_optional_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_struct_field_proto(int8, int8_t)
+__define_print_scalar_struct_field_proto(int16, int16_t)
+__define_print_scalar_struct_field_proto(int32, int32_t)
+__define_print_scalar_struct_field_proto(int64, int64_t)
+__define_print_scalar_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field_proto(float, float)
+__define_print_scalar_struct_field_proto(double, double)
+
+/*
+ * char arrays are special as there are no char fields
+ * without arrays and because they are printed as strings.
+ */
+__define_print_scalar_array_struct_field_proto(char, char)
+
+__define_print_scalar_array_struct_field_proto(uint8, uint8_t)
+__define_print_scalar_array_struct_field_proto(uint16, uint16_t)
+__define_print_scalar_array_struct_field_proto(uint32, uint32_t)
+__define_print_scalar_array_struct_field_proto(uint64, uint64_t)
+__define_print_scalar_array_struct_field_proto(int8, int8_t)
+__define_print_scalar_array_struct_field_proto(int16, int16_t)
+__define_print_scalar_array_struct_field_proto(int32, int32_t)
+__define_print_scalar_array_struct_field_proto(int64, int64_t)
+__define_print_scalar_array_struct_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field_proto(float, float)
+__define_print_scalar_array_struct_field_proto(double, double)
+
+__define_print_enum_array_struct_field_proto(uint8, uint8_t)
+__define_print_enum_array_struct_field_proto(uint16, uint16_t)
+__define_print_enum_array_struct_field_proto(uint32, uint32_t)
+__define_print_enum_array_struct_field_proto(uint64, uint64_t)
+__define_print_enum_array_struct_field_proto(int8, int8_t)
+__define_print_enum_array_struct_field_proto(int16, int16_t)
+__define_print_enum_array_struct_field_proto(int32, int32_t)
+__define_print_enum_array_struct_field_proto(int64, int64_t)
+__define_print_enum_array_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field_proto(uint8, uint8_t)
+__define_print_enum_struct_field_proto(uint16, uint16_t)
+__define_print_enum_struct_field_proto(uint32, uint32_t)
+__define_print_enum_struct_field_proto(uint64, uint64_t)
+__define_print_enum_struct_field_proto(int8, int8_t)
+__define_print_enum_struct_field_proto(int16, int16_t)
+__define_print_enum_struct_field_proto(int32, int32_t)
+__define_print_enum_struct_field_proto(int64, int64_t)
+__define_print_enum_struct_field_proto(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field_proto(uint8, uint8_t)
+__define_print_scalar_vector_field_proto(uint16, uint16_t)
+__define_print_scalar_vector_field_proto(uint32, uint32_t)
+__define_print_scalar_vector_field_proto(uint64, uint64_t)
+__define_print_scalar_vector_field_proto(int8, int8_t)
+__define_print_scalar_vector_field_proto(int16, int16_t)
+__define_print_scalar_vector_field_proto(int32, int32_t)
+__define_print_scalar_vector_field_proto(int64, int64_t)
+__define_print_scalar_vector_field_proto(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field_proto(float, float)
+__define_print_scalar_vector_field_proto(double, double)
+
+__define_print_enum_vector_field_proto(uint8, uint8_t)
+__define_print_enum_vector_field_proto(uint16, uint16_t)
+__define_print_enum_vector_field_proto(uint32, uint32_t)
+__define_print_enum_vector_field_proto(uint64, uint64_t)
+__define_print_enum_vector_field_proto(int8, int8_t)
+__define_print_enum_vector_field_proto(int16, int16_t)
+__define_print_enum_vector_field_proto(int32, int32_t)
+__define_print_enum_vector_field_proto(int64, int64_t)
+__define_print_enum_vector_field_proto(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe);
+
+/*
+ * If `fid` is null, the identifier is not checked and is allowed to be
+ * entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses (as always for flatbuffers).
+ */
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_table_f *pf);
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+/*
+ * Call before and after enum flags to ensure proper quotation. Enum
+ * quotes may be configured runtime, but regardless of this, multiple
+ * flags may be forced to be quoted depending on compile time flag since
+ * not all parsers may be able to handle unquoted space separated values
+ * even if they handle non-strict unquoted json otherwise.
+ *
+ * Flags should only be called when not empty (0) and when there are no
+ * unknown flags in the value. Otherwise print the numeric value. The
+ * auto generated code deals with this.
+ *
+ * This bit twiddling hack may be useful:
+ *
+ * `multiple = 0 != (v & (v - 1);`
+ */
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple);
+
+/* The index increments from 0 to handle space. It is not the flag bit position. */
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int index, const char *symbol, size_t len);
+
+/* A struct inside another struct, as opposed to inside a table or a root. */
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len);
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf);
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf);
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf);
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf);
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_JSON_PRINTER_H */
diff --git a/nostrdb/flatcc/flatcc_portable.h b/nostrdb/flatcc/flatcc_portable.h
@@ -0,0 +1,14 @@
+#ifndef FLATCC_PORTABLE_H
+#define FLATCC_PORTABLE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc/portable/portable_basic.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_PORTABLE_H */
diff --git a/nostrdb/flatcc/flatcc_prologue.h b/nostrdb/flatcc/flatcc_prologue.h
@@ -0,0 +1,8 @@
+/* Include guard intentionally left out. */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED
+#include "pdiagnostic_push.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
diff --git a/nostrdb/flatcc/flatcc_refmap.h b/nostrdb/flatcc/flatcc_refmap.h
@@ -0,0 +1,144 @@
+/*
+ * The flatcc builder supports storing a pointer to a refmap
+ * and wraps some operations to make them work as a dummy
+ * even if no refmap has been set. This enables optional
+ * DAG preservation possible during clone operations.
+ *
+ * A refmap maps a source address to a builder reference.
+ *
+ * This is just a map, but the semantics are important:
+ *
+ * The map thus preserves identity of the source. It is not a
+ * cache because cache eviction would fail to properly track
+ * identity.
+ *
+ * The map is used for memoization during object cloning are and
+ * may also be used by user logic doing similar operations.
+ * This ensures that identity is preserved so a source object is
+ * not duplicated which could lead to either loss of semantic
+ * information, or an explosion in size, or both. In some, or
+ * even most, cases this concern may not be important, but when
+ * it is important, it is important.
+ *
+ * The source address must not be reused for different content
+ * for the lifetime of the map, although the content doest not
+ * have to be valid or event exist at that location since source
+ * address is just used as a key.
+ *
+ * The lifetime may be a single clone operation which then
+ * tracks child object references as well, or it may be the
+ * lifetime of the buffer builder.
+ *
+ * The map may be flushed explicitly when the source addresses
+ * are no longer unique, such as when reusing a memory buffer,
+ * and when identity preservation is no longer important.
+ * Flushing a map is esentially the same as ending a lifetime.
+ *
+ * Multiple maps may exist concurrently for example if cloning
+ * an object twice into two new objects that should have
+ * separate identities. This is especially true and necessary
+ * when creating a new nested buffer because the nested buffer
+ * cannot share references with the parent. Cloning and object
+ * that contains a nested buffer does not require multiple maps
+ * because the nested buffer is then opaque.
+ */
+
+#ifndef FLATCC_REFMAP_H
+#define FLATCC_REFMAP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "flatcc_types.h"
+
+#ifndef FLATCC_REFMAP_MIN_BUCKETS
+/* 8 buckets gives us 5 useful initial entries with a load factor of 0.7 */
+#define FLATCC_REFMAP_MIN_BUCKETS 8
+#endif
+
+#define FLATCC_REFMAP_LOAD_FACTOR 0.7f
+
+typedef struct flatcc_refmap flatcc_refmap_t;
+typedef flatbuffers_soffset_t flatcc_refmap_ref_t;
+
+static const flatcc_refmap_ref_t flatcc_refmap_not_found = 0;
+
+struct flatcc_refmap_item {
+ const void *src;
+ flatcc_refmap_ref_t ref;
+};
+
+struct flatcc_refmap {
+ size_t count;
+ size_t buckets;
+ struct flatcc_refmap_item *table;
+ /* Use stack allocation for small maps. */
+ struct flatcc_refmap_item min_table[FLATCC_REFMAP_MIN_BUCKETS];
+};
+
+/*
+ * Fast zero initialization - does not allocate any memory.
+ * May be replaced by memset 0, but `init` avoids clearing the
+ * stack allocated initial hash table until it is needed.
+ */
+static inline int flatcc_refmap_init(flatcc_refmap_t *refmap)
+{
+ refmap->count = 0;
+ refmap->buckets = 0;
+ refmap->table = 0;
+ return 0;
+}
+
+/*
+ * Removes all items and deallocates memory.
+ * Not required unless `insert` or `resize` took place. The map can be
+ * reused subsequently without calling `init`.
+ */
+void flatcc_refmap_clear(flatcc_refmap_t *refmap);
+
+/*
+ * Keeps allocated memory as is, but removes all items. The map
+ * must intialized first.
+ */
+void flatcc_refmap_reset(flatcc_refmap_t *refmap);
+
+/*
+ * Returns the inserted reference if the `src` pointer was found,
+ * without inspecting the content of the `src` pointer.
+ *
+ * Returns flatcc_refmap_not_found (default 0) if the `src` pointer was
+ * not found.
+ */
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src);
+
+/*
+ * Inserts a `src` source pointer and its associated `ref` reference
+ * into the refmap without inspecting the `src` pointer content. The
+ * `ref` value will be replaced if the the `src` pointer already exists.
+ *
+ * Inserting null will just return the ref without updating the map.
+ *
+ * There is no delete operation which simplifies an open
+ * addressing hash table, and it isn't needed for this use case.
+ *
+ * Returns the input ref or not_found on allocation error.
+ */
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref);
+
+/*
+ * Set the hash table to accommodate at least `count` items while staying
+ * within the predefined load factor.
+ *
+ * Resize is primarily an internal operation, but the user may resize
+ * ahead of a large anticipated load, or after a large load to shrink
+ * the table using 0 as the `count` argument. The table never shrinks
+ * on its own account.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_REFMAP_H */
diff --git a/nostrdb/flatcc/flatcc_rtconfig.h b/nostrdb/flatcc/flatcc_rtconfig.h
@@ -0,0 +1,162 @@
+#ifndef FLATCC_RTCONFIG_H
+#define FLATCC_RTCONFIG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Include portability layer here since all other files depend on it. */
+#ifdef FLATCC_PORTABLE
+#include "flatcc/portable/portable.h"
+#endif
+
+/*
+ * Fast printing and parsing of double.
+ *
+ * This requires the grisu3/grisu3_* files to be in the include path,
+ * otherwise strod and sprintf will be used (these needed anyway
+ * as fallback for cases not supported by grisu3).
+ */
+#ifndef FLATCC_USE_GRISU3
+#define FLATCC_USE_GRISU3 1
+#endif
+
+/*
+ * This requires compiler that has enabled marc=native or similar so
+ * __SSE4_2__ flag is defined. Otherwise it will have no effect.
+ *
+ * While SSE may be used for different purposes, it has (as of this
+ * writing) only be used to test the effect on JSON whitespace handling
+ * which improved, but not by a lot, assuming 64-bit unligned access is
+ * otherwise available:
+ *
+ * With 8 space indentation, the JSON benchmark handles 308K parse ops/sec
+ * while SSE ups that to 333 parse ops/sec or 336 if \r\n is also
+ * consumed by SSE. Disabling indentation leaves SSE spacing handling
+ * ineffective, and performance reaches 450K parse ops/sec and can
+ * improve further to 500+K parse ops/sec if inexact GRISU3 numbers are
+ * allowed (they are pretty accurate anyway, just not exact). This
+ * feature requires hacking a flag direct in the grisu3 double parsing
+ * lib directly and only mentioned for comparison.
+ *
+ * In conclusion SSE doesn't add a lot to JSON space handling at least.
+ *
+ * Disabled by default, but can be overriden by build system.
+ */
+#ifndef FLATCC_USE_SSE4_2
+#define FLATCC_USE_SSE4_2 0
+#endif
+
+/*
+ * The verifier only reports yes and no. The following setting
+ * enables assertions in debug builds. It must be compiled into
+ * the runtime library and is not normally the desired behavior.
+ *
+ * NOTE: enabling this can break test cases so use with build, not test.
+ */
+#if !defined(FLATCC_DEBUG_VERIFY) && !defined(NDEBUG)
+#define FLATCC_DEBUG_VERIFY 0
+#endif
+
+#if !defined(FLATCC_TRACE_VERIFY)
+#define FLATCC_TRACE_VERIFY 0
+#endif
+
+
+/*
+ * Limit recursion level for tables. Actual level may be deeper
+ * when structs are deeply nested - but these are limited by the
+ * schema compiler.
+ */
+#ifndef FLATCC_JSON_PRINT_MAX_LEVELS
+#define FLATCC_JSON_PRINT_MAX_LEVELS 100
+#endif
+
+/* Maximum length of names printed exluding _type suffix. */
+#ifndef FLATCC_JSON_PRINT_NAME_LEN_MAX
+#define FLATCC_JSON_PRINT_NAME_LEN_MAX 100
+#endif
+
+/*
+ * Print float and double values with C99 hexadecimal floating point
+ * notation. This option is not valid JSON but it avoids precision
+ * loss, correctly handles NaN, +/-Infinity and is significantly faster
+ * to parse and print. Some JSON parsers rely on strtod which does
+ * support hexadecimal floating points when C99 compliant.
+ */
+#ifndef FLATCC_JSON_PRINT_HEX_FLOAT
+#define FLATCC_JSON_PRINT_HEX_FLOAT 0
+#endif
+
+/*
+ * Always print multipe enum flags like `color: "Red Green"`
+ * even when unquote is selected as an option for single
+ * value like `color: Green`. Otherwise multiple values
+ * are printed as `color: Red Green`, but this could break
+ * some flatbuffer json parser.
+ */
+#ifndef FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+#define FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS 1
+#endif
+
+/*
+ * The general nesting limit may be lower, but for skipping
+ * JSON we do not need to - we can set this high as it only
+ * costs a single char per level in a stack array.
+ */
+#ifndef FLATCC_JSON_PARSE_GENERIC_MAX_NEST
+#define FLATCC_JSON_PARSE_GENERIC_MAX_NEST 512
+#endif
+
+/* Store value even if it is default. */
+#ifndef FLATCC_JSON_PARSE_FORCE_DEFAULTS
+#define FLATCC_JSON_PARSE_FORCE_DEFAULTS 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED 1
+#endif
+
+/*
+ * Multiple enum values are by default not permitted unless
+ * quoted like `color: "Red Green" as per Googles flatc JSON
+ * parser while a single value like `color: Red` can be
+ * unquoted. Enabling this setting will allow `color: Red
+ * Green`, but only if FLATCC_JSON_PARSE_ALLOW_UNQUOTED is
+ * also enabled.
+ */
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST
+#define FLATCC_JSON_PARSE_ALLOW_UNQUOTED_LIST 0
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD
+#define FLATCC_JSON_PARSE_ALLOW_UNKNOWN_FIELD 1
+#endif
+
+#ifndef FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA
+#define FLATCC_JSON_PARSE_ALLOW_TRAILING_COMMA 1
+#endif
+
+/*
+ * Just parse to the closing bracket '}' if set.
+ * Otherwise parse to end by consuming space and
+ * fail if anything but space follows.
+ */
+#ifndef FLATCC_PARSE_IGNORE_TRAILING_DATA
+#define FLATCC_PARSE_IGNORE_TRAILING_DATA 0
+#endif
+
+/*
+ * Optimize to parse a lot of white space, but
+ * in most cases it probably slows parsing down.
+ */
+#ifndef FLATCC_JSON_PARSE_WIDE_SPACE
+#define FLATCC_JSON_PARSE_WIDE_SPACE 0
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_RTCONFIG_H */
diff --git a/nostrdb/flatcc/flatcc_types.h b/nostrdb/flatcc/flatcc_types.h
@@ -0,0 +1,97 @@
+#ifndef FLATCC_TYPES_H
+#define FLATCC_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/*
+ * This should match generated type declaratios in
+ * `flatbuffers_common_reader.h` (might have different name prefix).
+ * Read only generated code does not depend on library code,
+ * hence the duplication.
+ */
+#ifndef flatbuffers_types_defined
+#define flatbuffers_types_defined
+
+/*
+ * uoffset_t and soffset_t must be same integer type, except for sign.
+ * They can be (u)int16_t, (u)int32_t, or (u)int64_t.
+ * The default is (u)int32_t.
+ *
+ * voffset_t is expected to be uint16_t, but can experimentally be
+ * compiled from uint8_t up to uint32_t.
+ *
+ * ID_MAX is the largest value that can index a vtable. The table size
+ * is given as voffset value. Each id represents a voffset value index
+ * from 0 to max inclusive. Space is required for two header voffset
+ * fields and the unaddressible highest index (due to the table size
+ * representation). For 16-bit voffsets this yields a max of 2^15 - 4,
+ * or (2^16 - 1) / 2 - 3.
+ */
+
+#define flatbuffers_uoffset_t_defined
+#define flatbuffers_soffset_t_defined
+#define flatbuffers_voffset_t_defined
+#define flatbuffers_utype_t_defined
+#define flatbuffers_bool_t_defined
+#define flatbuffers_thash_t_defined
+#define flatbuffers_fid_t_defined
+
+/* uoffset_t is also used for vector and string headers. */
+#define FLATBUFFERS_UOFFSET_MAX UINT32_MAX
+#define FLATBUFFERS_SOFFSET_MAX INT32_MAX
+#define FLATBUFFERS_SOFFSET_MIN INT32_MIN
+#define FLATBUFFERS_VOFFSET_MAX UINT16_MAX
+#define FLATBUFFERS_UTYPE_MAX UINT8_MAX
+/* Well - the max of the underlying type. */
+#define FLATBUFFERS_BOOL_MAX UINT8_MAX
+#define FLATBUFFERS_THASH_MAX UINT32_MAX
+
+#define FLATBUFFERS_ID_MAX (FLATBUFFERS_VOFFSET_MAX / sizeof(flatbuffers_voffset_t) - 3)
+/* Vectors of empty structs can yield div by zero, so we must guard against this. */
+#define FLATBUFFERS_COUNT_MAX(elem_size) (FLATBUFFERS_UOFFSET_MAX/((elem_size) == 0 ? 1 : (elem_size)))
+
+#define FLATBUFFERS_UOFFSET_WIDTH 32
+#define FLATBUFFERS_COUNT_WIDTH 32
+#define FLATBUFFERS_SOFFSET_WIDTH 32
+#define FLATBUFFERS_VOFFSET_WIDTH 16
+#define FLATBUFFERS_UTYPE_WIDTH 8
+#define FLATBUFFERS_BOOL_WIDTH 8
+#define FLATBUFFERS_THASH_WIDTH 32
+
+#define FLATBUFFERS_TRUE 1
+#define FLATBUFFERS_FALSE 0
+
+#define FLATBUFFERS_PROTOCOL_IS_LE 1
+#define FLATBUFFERS_PROTOCOL_IS_BE 0
+
+typedef uint32_t flatbuffers_uoffset_t;
+typedef int32_t flatbuffers_soffset_t;
+typedef uint16_t flatbuffers_voffset_t;
+typedef uint8_t flatbuffers_utype_t;
+typedef uint8_t flatbuffers_bool_t;
+typedef uint32_t flatbuffers_thash_t;
+/* Public facing type operations. */
+typedef flatbuffers_utype_t flatbuffers_union_type_t;
+
+static const flatbuffers_bool_t flatbuffers_true = FLATBUFFERS_TRUE;
+static const flatbuffers_bool_t flatbuffers_false = FLATBUFFERS_FALSE;
+
+#define FLATBUFFERS_IDENTIFIER_SIZE (FLATBUFFERS_THASH_WIDTH / 8)
+
+typedef char flatbuffers_fid_t[FLATBUFFERS_IDENTIFIER_SIZE];
+
+#endif /* flatbuffers_types_defined */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_TYPES_H */
diff --git a/nostrdb/flatcc/flatcc_unaligned.h b/nostrdb/flatcc/flatcc_unaligned.h
@@ -0,0 +1,16 @@
+#ifndef FLATCC_UNLIGNED_H
+#define FLATCC_UNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "punaligned.h"
+
+#define FLATCC_ALLOW_UNALIGNED_ACCESS PORTABLE_UNALIGNED_ACCESS
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_UNLIGNED_H */
diff --git a/nostrdb/flatcc/flatcc_verifier.h b/nostrdb/flatcc/flatcc_verifier.h
@@ -0,0 +1,239 @@
+#ifndef FLATCC_VERIFIER_H
+#define FLATCC_VERIFIER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Link with the verifier implementation file.
+ *
+ * Note:
+ *
+ * 1) nested buffers will NOT have their identifier verified.
+ * The user may do so subsequently. The reason is in part because
+ * the information is not readily avaible without generated reader code,
+ * in part because the buffer might use a different, but valid,
+ * identifier and the user has no chance of specifiying this in the
+ * verifier code. The root verifier also doesn't assume a specific id
+ * but accepts a user supplied input which may be null.
+ *
+ * 2) All offsets in a buffer are verified for alignment relative to the
+ * buffer start, but the buffer itself is only assumed to aligned to
+ * uoffset_t. A reader should therefore ensure buffer alignment separately
+ * before reading the buffer. Nested buffers are in fact checked for
+ * alignment, but still only relative to the root buffer.
+ *
+ * 3) The max nesting level includes nested buffer nestings, so the
+ * verifier might fail even if the individual buffers are otherwise ok.
+ * This is to prevent abuse with lots of nested buffers.
+ *
+ *
+ * IMPORTANT:
+ *
+ * Even if verifier passes, the buffer may be invalid to access due to
+ * lack of alignemnt in memory, but the verifier is safe to call.
+ *
+ * NOTE: The buffer is not safe to modify after verification because an
+ * attacker may craft overlapping data structures such that modification
+ * of one field updates another in a way that violates the buffer
+ * constraints. This may also be caused by a clever compression scheme.
+ *
+ * It is likely faster to rewrite the table although this is also
+ * dangerous because an attacker (or even normal user) can draft a DAG
+ * that explodes when expanded carelesslessly. A safer approach is to
+ * hash all object references written and reuse those that match. This
+ * will expand references into other objects while bounding expansion
+ * and it will be safe to update assuming shared objects are ok to
+ * update.
+ *
+ */
+
+#include "flatcc/flatcc_types.h"
+
+#define FLATCC_VERIFY_ERROR_MAP(XX)\
+ XX(ok, "ok")\
+ XX(buffer_header_too_small, "buffer header too small")\
+ XX(identifier_mismatch, "identifier mismatch")\
+ XX(max_nesting_level_reached, "max nesting level reached")\
+ XX(required_field_missing, "required field missing")\
+ XX(runtime_buffer_header_not_aligned, "runtime: buffer header not aligned")\
+ XX(runtime_buffer_size_too_large, "runtime: buffer size too large")\
+ XX(string_not_zero_terminated, "string not zero terminated")\
+ XX(string_out_of_range, "string out of range")\
+ XX(struct_out_of_range, "struct out of range")\
+ XX(struct_size_overflow, "struct size overflow")\
+ XX(struct_unaligned, "struct unaligned")\
+ XX(table_field_not_aligned, "table field not aligned")\
+ XX(table_field_out_of_range, "table field out of range")\
+ XX(table_field_size_overflow, "table field size overflow")\
+ XX(table_header_out_of_range_or_unaligned, "table header out of range or unaligned")\
+ XX(vector_header_out_of_range_or_unaligned, "vector header out of range or unaligned")\
+ XX(string_header_out_of_range_or_unaligned, "string header out of range or unaligned")\
+ XX(offset_out_of_range, "offset out of range")\
+ XX(table_offset_out_of_range_or_unaligned, "table offset out of range or unaligned")\
+ XX(table_size_out_of_range, "table size out of range")\
+ XX(type_field_absent_from_required_union_field, "type field absent from required union field")\
+ XX(type_field_absent_from_required_union_vector_field, "type field absent from required union vector field")\
+ XX(union_cannot_have_a_table_without_a_type, "union cannot have a table without a type")\
+ XX(union_type_NONE_cannot_have_a_value, "union value field present with type NONE")\
+ XX(vector_count_exceeds_representable_vector_size, "vector count exceeds representable vector size")\
+ XX(vector_out_of_range, "vector out of range")\
+ XX(vtable_header_out_of_range, "vtable header out of range")\
+ XX(vtable_header_too_small, "vtable header too small")\
+ XX(vtable_offset_out_of_range_or_unaligned, "vtable offset out of range or unaligned")\
+ XX(vtable_size_out_of_range_or_unaligned, "vtable size out of range or unaligned")\
+ XX(vtable_size_overflow, "vtable size overflow")\
+ XX(union_element_absent_without_type_NONE, "union element absent without type NONE")\
+ XX(union_element_present_with_type_NONE, "union element present with type NONE")\
+ XX(union_vector_length_mismatch, "union type and table vectors have different lengths")\
+ XX(union_vector_verification_not_supported, "union vector verification not supported")\
+ XX(not_supported, "not supported")
+
+
+enum flatcc_verify_error_no {
+#define XX(no, str) flatcc_verify_error_##no,
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+};
+
+#define flatcc_verify_ok flatcc_verify_error_ok
+
+const char *flatcc_verify_error_string(int err);
+
+/*
+ * Type specific table verifier function that checks each known field
+ * for existence in the vtable and then calls the appropriate verifier
+ * function in this library.
+ *
+ * The table descriptor values have been verified for bounds, overflow,
+ * and alignment, but vtable entries after header must be verified
+ * for all fields the table verifier function understands.
+ *
+ * Calls other typespecific verifier functions recursively whenever a
+ * table field, union or table vector is encountered.
+ */
+typedef struct flatcc_table_verifier_descriptor flatcc_table_verifier_descriptor_t;
+struct flatcc_table_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Vtable of current table. */
+ const void *vtable;
+ /* Table offset relative to buffer start */
+ flatbuffers_uoffset_t table;
+ /* Table end relative to buffer start as per vtable[1] field. */
+ flatbuffers_voffset_t tsize;
+ /* Size of vtable in bytes. */
+ flatbuffers_voffset_t vsize;
+};
+
+typedef int flatcc_table_verifier_f(flatcc_table_verifier_descriptor_t *td);
+
+typedef struct flatcc_union_verifier_descriptor flatcc_union_verifier_descriptor_t;
+
+struct flatcc_union_verifier_descriptor {
+ /* Pointer to buffer. Not assumed to be aligned beyond uoffset_t. */
+ const void *buf;
+ /* Buffer size. */
+ flatbuffers_uoffset_t end;
+ /* Time to live: number nesting levels left before failure. */
+ int ttl;
+ /* Type of union value to be verified */
+ flatbuffers_utype_t type;
+ /* Offset relative to buffer start to where union value offset is stored. */
+ flatbuffers_uoffset_t base;
+ /* Offset of union value relative to base. */
+ flatbuffers_uoffset_t offset;
+};
+
+typedef int flatcc_union_verifier_f(flatcc_union_verifier_descriptor_t *ud);
+
+/*
+ * The `as_root` functions are normally the only functions called
+ * explicitly in this interface.
+ *
+ * If `fid` is null, the identifier is not checked and is allowed to be entirely absent.
+ *
+ * The buffer must at least be aligned to uoffset_t on systems that
+ * require aligned memory addresses. The buffer pointers alignment is
+ * not significant to internal verification of the buffer.
+ */
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid,
+ size_t size, uint16_t align);
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ size_t size, uint16_t align);
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid,
+ flatcc_table_verifier_f *root_tvf);
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash,
+ flatcc_table_verifier_f *root_tvf);
+/*
+ * The buffer header is verified by any of the `_as_root` verifiers, but
+ * this function may be used as a quick sanity check.
+ */
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid);
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t type_hash);
+
+/*
+ * The following functions are typically called by a generated table
+ * verifier function.
+ */
+
+/* Scalar, enum or struct field. */
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, size_t size, uint16_t align);
+/* Vector of scalars, enums or structs. */
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count);
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required);
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_table_verifier_f tvf);
+/* Table verifiers pass 0 as fid. */
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ size_t size, uint16_t align);
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf);
+
+/*
+ * A NONE type will not accept a table being present, and a required
+ * union will not accept a type field being absent, and an absent type
+ * field will not accept a table field being present.
+ *
+ * If the above checks out and the type is not NONE, the uvf callback
+ * is executed. It must test each known table type and silently accept
+ * any unknown table type for forward compatibility. A union table
+ * value is verified without the required flag because an absent table
+ * encodes a typed NULL value while an absent type field encodes a
+ * missing union which fails if required.
+ */
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf);
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf);
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align);
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FLATCC_VERIFIER_H */
diff --git a/nostrdb/flatcc/flatcc_version.h b/nostrdb/flatcc/flatcc_version.h
@@ -0,0 +1,14 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FLATCC_VERSION_TEXT "0.6.1"
+#define FLATCC_VERSION_MAJOR 0
+#define FLATCC_VERSION_MINOR 6
+#define FLATCC_VERSION_PATCH 1
+/* 1 or 0 */
+#define FLATCC_VERSION_RELEASED 1
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/nostrdb/flatcc/json_parser.c b/nostrdb/flatcc/json_parser.c
@@ -0,0 +1,1298 @@
+#include "flatcc_rtconfig.h"
+#include "flatcc_json_parser.h"
+#include "flatcc_assert.h"
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+#include "portable/pparsefp.h"
+#include "portable/pbase64.h"
+
+#if FLATCC_USE_SSE4_2
+#ifdef __SSE4_2__
+#define USE_SSE4_2
+#endif
+#endif
+
+#ifdef USE_SSE4_2
+#include <nmmintrin.h>
+#define cmpistri(end, haystack, needle, flags) \
+ if (end - haystack >= 16) do { \
+ int i; \
+ __m128i a = _mm_loadu_si128((const __m128i *)(needle)); \
+ do { \
+ __m128i b = _mm_loadu_si128((const __m128i *)(haystack)); \
+ i = _mm_cmpistri(a, b, flags); \
+ haystack += i; \
+ } while (i == 16 && end - haystack >= 16); \
+ } while(0)
+#endif
+
+const char *flatcc_json_parser_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_parser_error_##no: \
+ return str;
+ FLATCC_JSON_PARSE_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+const char *flatcc_json_parser_set_error(flatcc_json_parser_t *ctx, const char *loc, const char *end, int err)
+{
+ if (!ctx->error) {
+ ctx->error = err;
+ ctx->pos = (int)(loc - ctx->line_start + 1);
+ ctx->error_loc = loc;
+ }
+ return end;
+}
+
+const char *flatcc_json_parser_string_part(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+/*
+ * Disabled because it doesn't catch all control characters, but is
+ * useful for performance testing.
+ */
+#if 0
+//#ifdef USE_SSE4_2
+ cmpistri(end, buf, "\"\\\0\r\n\t\v\f", _SIDD_POSITIVE_POLARITY);
+#else
+ /*
+ * Testing for signed char >= 0x20 would also capture UTF-8
+ * encodings that we could verify, and also invalid encodings like
+ * 0xff, but we do not wan't to enforce strict UTF-8.
+ */
+ while (buf != end && *buf != '\"' && ((unsigned char)*buf) >= 0x20 && *buf != '\\') {
+ ++buf;
+ }
+#endif
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unterminated_string);
+ }
+ if (*buf == '"') {
+ return buf;
+ }
+ if (*buf < 0x20) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_character);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_space_ext(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+again:
+#ifdef USE_SSE4_2
+ /*
+ * We can include line break, but then error reporting suffers and
+ * it really makes no big difference.
+ */
+ //cmpistri(end, buf, "\x20\t\v\f\r\n", _SIDD_NEGATIVE_POLARITY);
+ cmpistri(end, buf, "\x20\t\v\f", _SIDD_NEGATIVE_POLARITY);
+#else
+#if FLATCC_ALLOW_UNALIGNED_ACCESS
+ while (end - buf >= 16) {
+ if (*buf > 0x20) {
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ if (((uint64_t *)buf)[0] != 0x2020202020202020) {
+descend:
+ if (((uint32_t *)buf)[0] == 0x20202020) {
+ buf += 4;
+ }
+#endif
+ if (((uint16_t *)buf)[0] == 0x2020) {
+ buf += 2;
+ }
+ if (*buf == 0x20) {
+ ++buf;
+ }
+ if (*buf > 0x20) {
+ return buf;
+ }
+ break;
+#if FLATCC_JSON_PARSE_WIDE_SPACE
+ }
+ if (((uint64_t *)buf)[1] != 0x2020202020202020) {
+ buf += 8;
+ goto descend;
+ }
+ buf += 16;
+#endif
+ }
+#endif
+#endif
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ while (buf != end && *buf <= 0x20) {
+ /* Fall through comments needed to silence gcc 7 warnings. */
+ switch (*buf) {
+ case 0x0d: buf += (end - buf > 1 && buf[1] == 0x0a);
+ /* Consume following LF or treating CR as LF. */
+ fallthrough;
+ case 0x0a: ++ctx->line; ctx->line_start = ++buf; continue;
+ case 0x09: ++buf; continue;
+ case 0x20: goto again; /* Don't consume here, sync with power of 2 spaces. */
+ default: return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ return buf;
+}
+
+static int decode_hex4(const char *buf, uint32_t *result)
+{
+ uint32_t u, x;
+ char c;
+
+ u = 0;
+ c = buf[0];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u = x << 12;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 12;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[1];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 8;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 8;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x << 4;
+ } else {
+ return -1;
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ x = (uint32_t)(c - '0');
+ u |= x;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = (uint32_t)(c - 'a' + 10);
+ u |= x;
+ } else {
+ return -1;
+ }
+ }
+ *result = u;
+ return 0;
+}
+
+static int decode_unicode_char(uint32_t u, char *code)
+{
+ if (u <= 0x7f) {
+ code[0] = 1;
+ code[1] = (char)u;
+ } else if (u <= 0x7ff) {
+ code[0] = 2;
+ code[1] = (char)(0xc0 | (u >> 6));
+ code[2] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0xffff) {
+ code[0] = 3;
+ code[1] = (char)(0xe0 | (u >> 12));
+ code[2] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[3] = (char)(0x80 | (u & 0x3f));
+ } else if (u <= 0x10ffff) {
+ code[0] = 4;
+ code[1] = (char)(0xf0 | (u >> 18));
+ code[2] = (char)(0x80 | ((u >> 12) & 0x3f));
+ code[3] = (char)(0x80 | ((u >> 6) & 0x3f));
+ code[4] = (char)(0x80 | (u & 0x3f));
+ } else {
+ code[0] = 0;
+ return -1;
+ }
+ return 0;
+}
+
+static inline uint32_t combine_utf16_surrogate_pair(uint32_t high, uint32_t low)
+{
+ return (high - 0xd800) * 0x400 + (low - 0xdc00) + 0x10000;
+}
+
+static inline int decode_utf16_surrogate_pair(uint32_t high, uint32_t low, char *code)
+{
+ return decode_unicode_char(combine_utf16_surrogate_pair(high, low), code);
+}
+
+
+/*
+ * UTF-8 code points can have up to 4 bytes but JSON can only
+ * encode up to 3 bytes via the \uXXXX syntax.
+ * To handle the range U+10000..U+10FFFF two UTF-16 surrogate
+ * pairs must be used. If this is not detected, the pairs
+ * survive in the output which is not valid but often tolerated.
+ * Emojis generally require such a pair, unless encoded
+ * unescaped in UTF-8.
+ *
+ * If a high surrogate pair is detected and a low surrogate pair
+ * follows, the combined sequence is decoded as a 4 byte
+ * UTF-8 sequence. Unpaired surrogate halves are decoded as is
+ * despite being an invalid UTF-8 value.
+ */
+
+const char *flatcc_json_parser_string_escape(flatcc_json_parser_t *ctx, const char *buf, const char *end, flatcc_json_parser_escape_buffer_t code)
+{
+ char c, v;
+ uint32_t u, u2;
+
+ if (end - buf < 2 || buf[0] != '\\') {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ switch (buf[1]) {
+ case 'x':
+ v = 0;
+ code[0] = 1;
+ if (end - buf < 4) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ c = buf[2];
+ if (c >= '0' && c <= '9') {
+ v |= (c - '0') << 4;
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= (c - 'a' + 10) << 4;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ c = buf[3];
+ if (c >= '0' && c <= '9') {
+ v |= c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ v |= c - 'a' + 10;
+ } else {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ }
+ code[1] = v;
+ return buf + 4;
+ case 'u':
+ if (end - buf < 6) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ if (decode_hex4(buf + 2, &u)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ };
+ /* If a high UTF-16 surrogate half pair was detected */
+ if (u >= 0xd800 && u <= 0xdbff &&
+ /* and there is space for a matching low half pair */
+ end - buf >= 12 &&
+ /* and there is a second escape following immediately */
+ buf[6] == '\\' && buf[7] == 'u' &&
+ /* and it is valid hex */
+ decode_hex4(buf + 8, &u2) == 0 &&
+ /* and it is a low UTF-16 surrogate pair */
+ u2 >= 0xdc00 && u2 <= 0xdfff) {
+ /* then decode the pair into a single 4 byte utf-8 sequence. */
+ if (decode_utf16_surrogate_pair(u, u2, code)) {
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+ return buf + 12;
+ /*
+ * Otherwise decode unmatched surrogate pairs as is any
+ * other UTF-8. Some systems might depend on these surviving.
+ * Leave ignored errors for the next parse step.
+ */
+ }
+ decode_unicode_char(u, code);
+ return buf + 6;
+ case 't':
+ code[0] = 1;
+ code[1] = '\t';
+ return buf + 2;
+ case 'n':
+ code[0] = 1;
+ code[1] = '\n';
+ return buf + 2;
+ case 'r':
+ code[0] = 1;
+ code[1] = '\r';
+ return buf + 2;
+ case 'b':
+ code[0] = 1;
+ code[1] = '\b';
+ return buf + 2;
+ case 'f':
+ code[0] = 1;
+ code[1] = '\f';
+ return buf + 2;
+ case '\"':
+ code[0] = 1;
+ code[1] = '\"';
+ return buf + 2;
+ case '\\':
+ code[0] = 1;
+ code[1] = '\\';
+ return buf + 2;
+ case '/':
+ code[0] = 1;
+ code[1] = '/';
+ return buf + 2;
+ default:
+ code[0] = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ }
+}
+
+/* Only applies to unquoted constants during generic parsring, otherwise it is skipped as a string. */
+const char *flatcc_json_parser_skip_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char c;
+ const char *k;
+
+ while (buf != end) {
+ c = *buf;
+ if ((c & 0x80) || (c == '_') || (c >= '0' && c <= '9') || c == '.') {
+ ++buf;
+ continue;
+ }
+ /* Upper case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ ++buf;
+ continue;
+ }
+ buf = flatcc_json_parser_space(ctx, (k = buf), end);
+ if (buf == k) {
+ return buf;
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_match_constant(flatcc_json_parser_t *ctx, const char *buf, const char *end, int pos, int *more)
+{
+ const char *mark = buf, *k = buf + pos;
+
+ if (end - buf <= pos) {
+ *more = 0;
+ return buf;
+ }
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ if (ctx->unquoted) {
+ buf = flatcc_json_parser_space(ctx, k, end);
+ if (buf == end) {
+ /*
+ * We cannot make a decision on more.
+ * Just return end and let parser handle sync point in
+ * case it is able to resume parse later on.
+ * For the same reason we do not lower ctx->unquoted.
+ */
+ *more = 0;
+ return buf;
+ }
+ if (buf != k) {
+ char c = *buf;
+ /*
+ * Space was seen - and thus we have a valid match.
+ * If the next char is an identifier start symbol
+ * we raise the more flag to support syntax like:
+ *
+ * `flags: Hungry Sleepy Awake, ...`
+ */
+ if (c == '_' || (c & 0x80)) {
+ *more = 1;
+ return buf;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'z') {
+ *more = 1;
+ return buf;
+ }
+ }
+ /*
+ * Space was not seen, so the match is only valid if followed
+ * by a JSON separator symbol, and there cannot be more values
+ * following so `more` is lowered.
+ */
+ *more = 0;
+ if (*buf == ',' || *buf == '}' || *buf == ']') {
+ return buf;
+ }
+ return mark;
+ }
+#endif
+ buf = k;
+ if (*buf == 0x20) {
+ ++buf;
+ while (buf != end && *buf == 0x20) {
+ ++buf;
+ }
+ if (buf == end) {
+ *more = 0;
+ return buf;
+ }
+ /* We accept untrimmed space like " Green Blue ". */
+ if (*buf != '\"') {
+ *more = 1;
+ return buf;
+ }
+ }
+ switch (*buf) {
+ case '\\':
+ *more = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_escape);
+ case '\"':
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ *more = 0;
+ return buf;
+ }
+ *more = 0;
+ return mark;
+}
+
+const char *flatcc_json_parser_unmatched_symbol(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (ctx->flags & flatcc_json_parser_f_skip_unknown) {
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf != end && *buf == ':') {
+ ++buf;
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ return flatcc_json_parser_generic_json(ctx, buf, end);
+ } else {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_symbol);
+ }
+}
+
+static const char *__flatcc_json_parser_number(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ if (buf == end) {
+ return buf;
+ }
+ if (*buf == '-') {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ if (buf != end) {
+ if (*buf == '.') {
+ ++buf;
+ if (*buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E')) {
+ ++buf;
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ if (*buf == '+' || *buf == '-') {
+ ++buf;
+ }
+ if (buf == end || *buf < '0' || *buf > '9') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+
+ /*
+ * For strtod termination we must ensure the tail is not valid
+ * including non-json exponent types. The simplest approach is
+ * to accept anything that could be valid json successor
+ * characters and reject end of buffer since we expect a closing
+ * '}'.
+ *
+ * The ',' is actually not safe if strtod uses a non-POSIX locale.
+ */
+ if (buf != end) {
+ switch (*buf) {
+ case ',':
+ case ':':
+ case ']':
+ case '}':
+ case ' ':
+ case '\r':
+ case '\t':
+ case '\n':
+ case '\v':
+ return buf;
+ }
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+}
+
+const char *flatcc_json_parser_double(flatcc_json_parser_t *ctx, const char *buf, const char *end, double *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_double(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_double_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_float(flatcc_json_parser_t *ctx, const char *buf, const char *end, float *v)
+{
+ const char *next, *k;
+
+ *v = 0.0;
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ if (*buf == '-') ++k;
+ if (end - k > 1 && (k[0] == '.' || (k[0] == '0' && k[1] == '0'))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ next = parse_float(buf, (size_t)(end - buf), v);
+ if (next == 0 || next == buf) {
+ if (parse_float_isinf(*v)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_overflow);
+ }
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_invalid_numeric);
+ }
+ return next;
+}
+
+const char *flatcc_json_parser_generic_json(flatcc_json_parser_t *ctx, const char *buf, const char *end)
+{
+ char stack[FLATCC_JSON_PARSE_GENERIC_MAX_NEST];
+ char *sp, *spend;
+ const char *k;
+ flatcc_json_parser_escape_buffer_t code;
+ int more = 0;
+
+ sp = stack;
+ spend = sp + FLATCC_JSON_PARSE_GENERIC_MAX_NEST;
+
+again:
+ if (buf == end) {
+ return buf;
+ }
+ if (sp != stack && sp[-1] == '}') {
+ /* Inside an object, about to read field name. */
+ buf = flatcc_json_parser_symbol_start(ctx, buf, end);
+ buf = flatcc_json_parser_symbol_end(ctx, buf, end);
+ buf = flatcc_json_parser_space(ctx, buf, end);
+ if (buf == end) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unbalanced_object);
+ }
+ if (*buf != ':') {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_expected_colon);
+ }
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ }
+ switch (*buf) {
+ case '\"':
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, buf, end);
+ if (buf != end && *buf == '\"') {
+ break;
+ }
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ }
+ buf = flatcc_json_parser_string_end(ctx, buf, end);
+ break;
+ case '-':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ buf = __flatcc_json_parser_number(ctx, buf, end);
+ break;
+#if !FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ case 't': case 'f':
+ {
+ uint8_t v;
+ buf = flatcc_json_parser_bool(ctx, (k = buf), end, &v);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ }
+ break;
+ case 'n':
+ buf = flatcc_json_parser_null((k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#endif
+ case '[':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = ']';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == ']') {
+ break;
+ }
+ goto again;
+ case '{':
+ if (sp == spend) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_deep_nesting);
+ }
+ *sp++ = '}';
+ buf = flatcc_json_parser_space(ctx, buf + 1, end);
+ if (buf != end && *buf == '}') {
+ break;
+ }
+ goto again;
+
+ default:
+#if FLATCC_JSON_PARSE_ALLOW_UNQUOTED
+ buf = flatcc_json_parser_skip_constant(ctx, (k = buf), end);
+ if (k == buf) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+ }
+ break;
+#else
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unexpected_character);
+#endif
+ }
+ while (buf != end && sp != stack) {
+ --sp;
+ if (*sp == ']') {
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ } else {
+ buf = flatcc_json_parser_object_end(ctx, buf, end, &more);
+ }
+ if (more) {
+ ++sp;
+ goto again;
+ }
+ }
+ if (buf == end && sp != stack) {
+ return flatcc_json_parser_set_error(ctx, buf, end, sp[-1] == ']' ?
+ flatcc_json_parser_error_unbalanced_array :
+ flatcc_json_parser_error_unbalanced_object);
+ }
+ /* Any ',', ']', or '}' belongs to parent context. */
+ return buf;
+}
+
+const char *flatcc_json_parser_integer(flatcc_json_parser_t *ctx, const char *buf, const char *end,
+ int *value_sign, uint64_t *value)
+{
+ uint64_t x0, x = 0;
+ const char *k;
+
+ if (buf == end) {
+ return buf;
+ }
+ k = buf;
+ *value_sign = *buf == '-';
+ buf += *value_sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ return flatcc_json_parser_set_error(ctx, buf, end, value_sign ?
+ flatcc_json_parser_error_underflow : flatcc_json_parser_error_overflow);
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* Give up, but don't fail the parse just yet, it might be a valid symbol. */
+ return buf;
+ }
+ if (buf != end && (*buf == 'e' || *buf == 'E' || *buf == '.')) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_float_unexpected);
+ }
+ *value = x;
+ return buf;
+}
+
+/* Array Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_uint8_vector_base64(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref, int urlsafe)
+{
+ const char *mark;
+ uint8_t *pval;
+ size_t max_len;
+ size_t decoded_len, src_len;
+ int mode;
+ int ret;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end || *buf != '\"') {
+ goto base64_failed;
+ }
+ max_len = base64_decoded_size((size_t)(buf - mark));
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) {
+ goto failed;
+ }
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, max_len))) {
+ goto failed;
+ }
+ src_len = (size_t)(buf - mark);
+ decoded_len = max_len;
+ if ((ret = base64_decode(pval, (const uint8_t *)mark, &decoded_len, &src_len, mode))) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (src_len != (size_t)(buf - mark)) {
+ buf = mark + src_len;
+ goto base64_failed;
+ }
+ if (decoded_len < max_len) {
+ if (flatcc_builder_truncate_vector(ctx->ctx, max_len - decoded_len)) {
+ goto failed;
+ }
+ }
+ if (!(*ref = flatcc_builder_end_vector(ctx->ctx))) {
+ goto failed;
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+
+base64_failed:
+ *ref = 0;
+ return flatcc_json_parser_set_error(ctx, buf, end,
+ urlsafe ? flatcc_json_parser_error_base64url : flatcc_json_parser_error_base64);
+}
+
+const char *flatcc_json_parser_char_array(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, char *s, size_t n)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+ size_t k = 0;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ if (buf != end)
+ while (*buf != '\"') {
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf == end) return end;
+ k = (size_t)(buf - mark);
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ if (*buf == '\"') break;
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (buf == end) return end;
+ k = (size_t)code[0];
+ mark = code + 1;
+ if (k > n) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_array_overflow)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_overflow);
+ }
+ k = n; /* Might truncate UTF-8. */
+ }
+ memcpy(s, mark, k);
+ s += k;
+ n -= k;
+ }
+ if (n != 0) {
+ if (ctx->flags & flatcc_json_parser_f_reject_array_underflow) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_array_underflow);
+ }
+ memset(s, 0, n - k);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+}
+
+
+/* String Creation - depends on flatcc builder. */
+
+const char *flatcc_json_parser_build_string(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, flatcc_builder_ref_t *ref)
+{
+ flatcc_json_parser_escape_buffer_t code;
+ const char *mark;
+
+ buf = flatcc_json_parser_string_start(ctx, buf, end);
+ buf = flatcc_json_parser_string_part(ctx, (mark = buf), end);
+ if (buf != end && *buf == '\"') {
+ *ref = flatcc_builder_create_string(ctx->ctx, mark, (size_t)(buf - mark));
+ } else {
+ if (flatcc_builder_start_string(ctx->ctx) ||
+ 0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ while (buf != end && *buf != '\"') {
+ buf = flatcc_json_parser_string_escape(ctx, buf, end, code);
+ if (0 == flatcc_builder_append_string(ctx->ctx, code + 1, (size_t)code[0])) goto failed;
+ if (end != (buf = flatcc_json_parser_string_part(ctx, (mark = buf), end))) {
+ if (0 == flatcc_builder_append_string(ctx->ctx, mark, (size_t)(buf - mark))) goto failed;
+ }
+ }
+ *ref = flatcc_builder_end_string(ctx->ctx);
+ }
+ return flatcc_json_parser_string_end(ctx, buf, end);
+
+failed:
+ *ref = 0;
+ return buf;
+}
+
+/* UNIONS */
+
+/*
+ * Unions are difficult to parse because the type field may appear after
+ * the union table and because having two fields opens up for many more
+ * possible error scenarios. We must store each union of a table
+ * temporarily - this cannot be in the generated table parser function
+ * because there could be many unions (about 2^15 with default voffsets)
+ * although usually there will be only a few. We can also not store the
+ * data encoded in the existing table buffer in builder because we may
+ * have to remove it due to schema forwarding and removing it messes up
+ * the table layout. We also cannot naively allocate it dynamically for
+ * performance reasons. Instead we place the temporary union data in a
+ * separate frame from the table buffer, but on a similar stack. This is
+ * called the user stack and we manage one frame per table that is known
+ * to contain unions.
+ *
+ * Even the temporary structures in place we still cannot parse a union
+ * before we know its type. Due to JSON typically sorting fields
+ * alphabetically in various pretty printers, we are likely to receive
+ * the type late with (`<union_name>_type` following `<union_name>`.
+ * To deal with this we store a backtracking pointer and parses the
+ * table generically in a first pass and reparse the table once the type
+ * is known. This can happen recursively with nested tables containing
+ * unions which is why we need to have a stack frame.
+ *
+ * If the type field is stored first we just store the type in the
+ * custom frame and immediately parses the table with the right type
+ * once we see it. The parse will be much faster and we can strongly
+ * recommend that flatbuffer serializers do this, but we cannot require
+ * it.
+ *
+ * The actual overhead of dealing with the custom stack frame is fairly
+ * cheap once we get past the first custom stack allocation.
+ *
+ * We cannot update the builder before both the table and table type
+ * has been parsed because the the type might have to be ingored due
+ * to schema forwarding. Therefore the union type must be cached or
+ * reread. This happens trivially be calling the union parser with the
+ * type as argument, but it is important to be aware of before
+ * refactoring the code.
+ *
+ * The user frame is created at table start and remains valid until
+ * table exit, but we cannot assume the pointers to the frame remain
+ * valid. Specifically we cannot use frame pointers after calling
+ * the union parser. This means the union type must be cached or reread
+ * so it can be added to the table. Because the type is passed to
+ * the union parser this caching happens automatically but it is still
+ * important to be aware that it is required.
+ *
+ * The frame reserves temporary information for all unions the table
+ * holds, enumerated 0 <= `union_index` < `union_total`
+ * where the `union_total` is fixed type specific number.
+ *
+ * The `type_present` is needed because union types range from 0..255
+ * and we need an extra bit do distinguish not present from union type
+ * `NONE = 0`.
+ */
+
+typedef struct {
+ const char *backtrace;
+ const char *line_start;
+ int line;
+ uint8_t type_present;
+ uint8_t type;
+ /* Union vectors: */
+ uoffset_t count;
+ size_t h_types;
+} __flatcc_json_parser_union_entry_t;
+
+typedef struct {
+ size_t union_total;
+ size_t union_count;
+ __flatcc_json_parser_union_entry_t unions[1];
+} __flatcc_json_parser_union_frame_t;
+
+const char *flatcc_json_parser_prepare_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_total, size_t *handle)
+{
+ __flatcc_json_parser_union_frame_t *f;
+
+ if (!(*handle = flatcc_builder_enter_user_frame(ctx->ctx,
+ sizeof(__flatcc_json_parser_union_frame_t) + (union_total - 1) *
+ sizeof(__flatcc_json_parser_union_entry_t)))) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+ }
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, *handle);
+ /* Frames have zeroed memory. */
+ f->union_total = union_total;
+ return buf;
+}
+
+const char *flatcc_json_parser_finalize_unions(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t handle)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+
+ if (f->union_count) {
+ buf = flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_incomplete);
+ }
+ flatcc_builder_exit_user_frame_at(ctx->ctx, handle);
+ return buf;
+}
+
+const char *flatcc_json_parser_union(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = &f->unions[union_index];
+ flatcc_builder_union_ref_t uref;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ /* If we supported table: null, we should not count it, but we don't. */
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ uref.type = e->type;
+ if (e->type == 0) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_none_present);
+ }
+ --f->union_count;
+ buf = union_parser(ctx, buf, end, e->type, &uref.value);
+ if (buf != end) {
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ }
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ flatcc_builder_union_ref_t uref;
+ const char *mark;
+ int line;
+ const char *line_start;
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &e->type);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, buf, end, type_parsers, &e->type);
+ }
+ /* Only count the union if the type is not NONE. */
+ if (e->backtrace == 0) {
+ f->union_count += e->type != 0;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ /*
+ * IMPORTANT: we cannot access any value in the frame or entry
+ * pointer after calling union parse because it might cause the
+ * stack to reallocate. We should read the frame pointer again if
+ * needed - we don't but remember it if refactoring code.
+ *
+ * IMPORTANT 2: Do not assign buf here. We are backtracking.
+ */
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ uref.type = e->type;
+ if (end == union_parser(ctx, e->backtrace, end, e->type, &uref.value)) {
+ return end;
+ }
+ if (flatcc_builder_table_add_union(ctx->ctx, id, uref)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+}
+
+static const char *_parse_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t h_types, uoffset_t count,
+ flatbuffers_voffset_t id, flatcc_json_parser_union_f *union_parser)
+{
+ flatcc_builder_ref_t ref = 0, *pref;
+ utype_t *types;
+ int more;
+ size_t i;
+
+ if (flatcc_builder_start_offset_vector(ctx->ctx)) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ i = 0;
+ while (more) {
+ if (i == count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ buf = union_parser(ctx, buf, end, types[i], &ref);
+ if (buf == end) {
+ return buf;
+ }
+ if (!(pref = flatcc_builder_extend_offset_vector(ctx->ctx, 1))) goto failed;
+ *pref = ref;
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ ++i;
+ }
+ if (i != count) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_union_vector_length);
+ }
+ /* Frame must be restored between calls to table parser. */
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ if (!(ref = flatcc_builder_end_offset_vector_for_unions(ctx->ctx, types))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id))) goto failed;
+ *pref = ref;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+const char *flatcc_json_parser_union_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index,
+ flatbuffers_voffset_t id, size_t handle, flatcc_json_parser_union_f *union_parser)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ if (e->backtrace) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ if (!e->type_present) {
+ ++f->union_count;
+ e->line = ctx->line;
+ e->line_start = ctx->line_start;
+ buf = flatcc_json_parser_generic_json(ctx, (e->backtrace = buf), end);
+ } else {
+ --f->union_count;
+ buf = _parse_union_vector(ctx, buf, end, e->h_types, e->count, id, union_parser);
+ }
+ return buf;
+}
+
+const char *flatcc_json_parser_union_type_vector(flatcc_json_parser_t *ctx,
+ const char *buf, const char *end, size_t union_index, flatbuffers_voffset_t id,
+ size_t handle,
+ flatcc_json_parser_integral_symbol_f *type_parsers[],
+ flatcc_json_parser_union_f *union_parser,
+ flatcc_json_parser_is_known_type_f accept_type)
+{
+ __flatcc_json_parser_union_frame_t *f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ __flatcc_json_parser_union_entry_t *e = f->unions + union_index;
+
+ const char *mark;
+ int line;
+ const char *line_start;
+ int more;
+ utype_t val;
+ void *pval;
+ flatcc_builder_ref_t ref, *pref;
+ utype_t *types;
+ size_t size;
+ size_t h_types;
+ uoffset_t count;
+
+#if FLATBUFFERS_UTYPE_MAX != UINT8_MAX
+#error "Update union vector parser to support current union type definition."
+#endif
+
+ if (e->type_present) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_duplicate);
+ }
+ e->type_present = 1;
+ if (flatcc_builder_start_vector(ctx->ctx, 1, 1, FLATBUFFERS_COUNT_MAX((utype_size)))) goto failed;
+ buf = flatcc_json_parser_array_start(ctx, buf, end, &more);
+ while (more) {
+ if (!(pval = flatcc_builder_extend_vector(ctx->ctx, 1))) goto failed;
+ buf = flatcc_json_parser_uint8(ctx, (mark = buf), end, &val);
+ if (mark == buf) {
+ buf = flatcc_json_parser_symbolic_uint8(ctx, (mark = buf), end, type_parsers, &val);
+ if (buf == mark || buf == end) goto failed;
+ }
+ /* Parse unknown types as NONE */
+ if (!accept_type(val)) {
+ if (!(ctx->flags & flatcc_json_parser_f_skip_unknown)) {
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_unknown_union);
+ }
+ val = 0;
+ }
+ flatbuffers_uint8_write_to_pe(pval, val);
+ buf = flatcc_json_parser_array_end(ctx, buf, end, &more);
+ }
+ count = (uoffset_t)flatcc_builder_vector_count(ctx->ctx);
+ e->count = count;
+ size = count * utype_size;
+ /* Store type vector so it is accessible to the table vector parser. */
+ h_types = flatcc_builder_enter_user_frame(ctx->ctx, size);
+ types = flatcc_builder_get_user_frame_ptr(ctx->ctx, h_types);
+ memcpy(types, flatcc_builder_vector_edit(ctx->ctx), size);
+ if (!((ref = flatcc_builder_end_vector(ctx->ctx)))) goto failed;
+ if (!(pref = flatcc_builder_table_add_offset(ctx->ctx, id - 1))) goto failed;
+ *pref = ref;
+
+ /* Restore union frame after possible invalidation due to types frame allocation. */
+ f = flatcc_builder_get_user_frame_ptr(ctx->ctx, handle);
+ e = f->unions + union_index;
+
+ e->h_types = h_types;
+ if (e->backtrace == 0) {
+ ++f->union_count;
+ return buf;
+ }
+ FLATCC_ASSERT(f->union_count);
+ --f->union_count;
+ line = ctx->line;
+ line_start = ctx->line_start;
+ ctx->line = e->line;
+ ctx->line_start = e->line_start;
+ /* We must not assign buf here because we are backtracking. */
+ if (end == _parse_union_vector(ctx, e->backtrace, end, h_types, count, id, union_parser)) return end;
+ /*
+ * NOTE: We do not need the user frame anymore, but if we did, it
+ * would have to be restored from its handle due to the above parse.
+ */
+ ctx->line = line;
+ ctx->line_start = line_start;
+ return buf;
+failed:
+ return flatcc_json_parser_set_error(ctx, buf, end, flatcc_json_parser_error_runtime);
+}
+
+int flatcc_json_parser_table_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ int builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
+
+int flatcc_json_parser_struct_as_root(flatcc_builder_t *B, flatcc_json_parser_t *ctx,
+ const char *buf, size_t bufsiz, int flags, const char *fid,
+ flatcc_json_parser_table_f *parser)
+{
+ flatcc_json_parser_t _ctx;
+ flatcc_builder_ref_t root;
+ int builder_flags = flags & flatcc_json_parser_f_with_size ? flatcc_builder_with_size : 0;
+
+ ctx = ctx ? ctx : &_ctx;
+ flatcc_json_parser_init(ctx, B, buf, buf + bufsiz, flags);
+ if (flatcc_builder_start_buffer(B, fid, 0, builder_flags)) return -1;
+ buf = parser(ctx, buf, buf + bufsiz, &root);
+ if (ctx->error) {
+ return ctx->error;
+ }
+ if (!flatcc_builder_end_buffer(B, root)) return -1;
+ ctx->end_loc = buf;
+ return 0;
+}
diff --git a/nostrdb/flatcc/json_printer.c b/nostrdb/flatcc/json_printer.c
@@ -0,0 +1,1486 @@
+/*
+ * Runtime support for printing flatbuffers to JSON.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_assert.h"
+
+/*
+ * Grisu significantly improves printing speed of floating point values
+ * and also the overall printing speed when floating point values are
+ * present in non-trivial amounts. (Also applies to parsing).
+ */
+#if FLATCC_USE_GRISU3 && !defined(PORTABLE_USE_GRISU3)
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_json_printer.h"
+#include "flatcc/flatcc_identifier.h"
+
+#include "flatcc/portable/pprintint.h"
+#include "flatcc/portable/pprintfp.h"
+#include "flatcc/portable/pbase64.h"
+
+
+#define RAISE_ERROR(err) flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_##err)
+
+const char *flatcc_json_printer_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_json_printer_error_##no: \
+ return str;
+ FLATCC_JSON_PRINT_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+#define flatcc_json_printer_utype_enum_f flatcc_json_printer_union_type_f
+#define flatbuffers_utype_read_from_pe __flatbuffers_utype_read_from_pe
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+
+#define offset_size uoffset_size
+
+#if FLATBUFFERS_UTYPE_MAX == UINT8_MAX
+#define print_utype print_uint8
+#else
+#ifdef FLATBUFFERS_UTYPE_MIN
+#define print_utype print_int64
+#else
+#define print_utype print_uint64
+#endif
+#endif
+
+static inline const void *read_uoffset_ptr(const void *p)
+{
+ return (uint8_t *)p + __flatbuffers_uoffset_read_from_pe(p);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline const void *get_field_ptr(flatcc_json_printer_table_descriptor_t *td, int id)
+{
+ uoffset_t vo = (uoffset_t)(id + 2) * (uoffset_t)sizeof(voffset_t);
+
+ if (vo >= (uoffset_t)td->vsize) {
+ return 0;
+ }
+ vo = read_voffset(td->vtable, vo);
+ if (vo == 0) {
+ return 0;
+ }
+ return (uint8_t *)td->table + vo;
+}
+
+#define print_char(c) *ctx->p++ = (c)
+
+#define print_null() do { \
+ print_char('n'); \
+ print_char('u'); \
+ print_char('l'); \
+ print_char('l'); \
+} while (0)
+
+#define print_start(c) do { \
+ ++ctx->level; \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_end(c) do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ --ctx->level; \
+ print_indent(ctx); \
+ } \
+ *ctx->p++ = c; \
+} while (0)
+
+#define print_space() do { \
+ *ctx->p = ' '; \
+ ctx->p += !!ctx->indent; \
+} while (0)
+
+#define print_nl() do { \
+ if (ctx->indent) { \
+ *ctx->p++ = '\n'; \
+ print_indent(ctx); \
+ } else { \
+ flatcc_json_printer_flush_partial(ctx); \
+ } \
+} while (0)
+
+/* Call at the end so print_end does not have to check for level. */
+#define print_last_nl() do { \
+ if (ctx->indent && ctx->level == 0) { \
+ *ctx->p++ = '\n'; \
+ } \
+ ctx->flush(ctx, 1); \
+} while (0)
+
+int flatcc_json_printer_fmt_float(char *buf, float n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_float(buf, n);
+#else
+ return print_float(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_double(char *buf, double n)
+{
+#if FLATCC_JSON_PRINT_HEX_FLOAT
+ return print_hex_double(buf, n);
+#else
+ return print_double(n, buf);
+#endif
+}
+
+int flatcc_json_printer_fmt_bool(char *buf, int n)
+{
+ if (n) {
+ memcpy(buf, "true", 4);
+ return 4;
+ }
+ memcpy(buf, "false", 5);
+ return 5;
+}
+
+static void print_ex(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memcpy(ctx->p, s, k);
+ ctx->p += k;
+ s += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+}
+
+static inline void print(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ if (ctx->p + n >= ctx->pflush) {
+ print_ex(ctx, s, n);
+ } else {
+ memcpy(ctx->p, s, n);
+ ctx->p += n;
+ }
+}
+
+static void print_escape(flatcc_json_printer_t *ctx, unsigned char c)
+{
+ unsigned char x;
+
+ print_char('\\');
+ switch (c) {
+ case '"': print_char('\"'); break;
+ case '\\': print_char('\\'); break;
+ case '\t' : print_char('t'); break;
+ case '\f' : print_char('f'); break;
+ case '\r' : print_char('r'); break;
+ case '\n' : print_char('n'); break;
+ case '\b' : print_char('b'); break;
+ default:
+ print_char('u');
+ print_char('0');
+ print_char('0');
+ x = c >> 4;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ x = c & 15;
+ x += x < 10 ? '0' : 'a' - 10;
+ print_char((char)x);
+ break;
+ }
+}
+
+/*
+ * Even though we know the the string length, we need to scan for escape
+ * characters. There may be embedded zeroes. Because FlatBuffer strings
+ * are always zero terminated, we assume and optimize for this.
+ *
+ * We enforce \u00xx for control characters, but not for invalid
+ * characters like 0xff - this makes it possible to handle some other
+ * codepages transparently while formally not valid. (Formally JSON
+ * also supports UTF-16/32 little/big endian but flatbuffers only
+ * support UTF-8 and we expect this in JSON input/output too).
+ */
+static void print_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c;
+ size_t k;
+
+ print_char('\"');
+ for (;;) {
+ c = (unsigned char)*p;
+ while (c >= 0x20 && c != '\"' && c != '\\') {
+ c = (unsigned char)*++p;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ n -= k;
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+/*
+ * Similar to print_string, but null termination is not guaranteed, and
+ * trailing nulls are stripped.
+ */
+static void print_char_array(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ const char *p = s;
+ /* Unsigned is important. */
+ unsigned char c = 0;
+ size_t k;
+
+ while (n > 0 && s[n - 1] == '\0') --n;
+
+ print_char('\"');
+ for (;;) {
+ while (n) {
+ c = (unsigned char)*p;
+ if (c < 0x20 || c == '\"' || c == '\\') break;
+ ++p;
+ --n;
+ }
+ k = (size_t)(p - s);
+ /* Even if k == 0, print ensures buffer flush. */
+ print(ctx, s, k);
+ if (n == 0) break;
+ s += k;
+ print_escape(ctx, c);
+ ++p;
+ --n;
+ ++s;
+ }
+ print_char('\"');
+}
+
+static void print_uint8_vector_base64_object(flatcc_json_printer_t *ctx, const void *p, int mode)
+{
+ const int unpadded_mode = mode & ~base64_enc_modifier_padding;
+ size_t k, n, len;
+ const uint8_t *data;
+ size_t data_len, src_len;
+
+ data_len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ data = (const uint8_t *)p + uoffset_size;
+
+ print_char('\"');
+
+ len = base64_encoded_size(data_len, mode);
+ if (ctx->p + len >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ while (ctx->p + len > ctx->pflush) {
+ /* Multiples of 4 output chars consumes exactly 3 bytes before final padding. */
+ k = (size_t)(ctx->pflush - ctx->p) & ~(size_t)3;
+ n = k * 3 / 4;
+ FLATCC_ASSERT(n > 0);
+ src_len = k * 3 / 4;
+ base64_encode((uint8_t *)ctx->p, data, 0, &src_len, unpadded_mode);
+ ctx->p += k;
+ data += n;
+ data_len -= n;
+ ctx->flush(ctx, 0);
+ len = base64_encoded_size(data_len, mode);
+ }
+ base64_encode((uint8_t *)ctx->p, data, 0, &data_len, mode);
+ ctx->p += len;
+ print_char('\"');
+}
+
+static void print_indent_ex(flatcc_json_printer_t *ctx, size_t n)
+{
+ size_t k;
+
+ if (ctx->p >= ctx->pflush) {
+ ctx->flush(ctx, 0);
+ }
+ k = (size_t)(ctx->pflush - ctx->p);
+ while (n > k) {
+ memset(ctx->p, ' ', k);
+ ctx->p += k;
+ n -= k;
+ ctx->flush(ctx, 0);
+ k = (size_t)(ctx->pflush - ctx->p);
+ }
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+}
+
+static inline void print_indent(flatcc_json_printer_t *ctx)
+{
+ size_t n = (size_t)(ctx->level * ctx->indent);
+
+ if (ctx->p + n > ctx->pflush) {
+ print_indent_ex(ctx, n);
+ } else {
+ memset(ctx->p, ' ', n);
+ ctx->p += n;
+ }
+}
+
+/*
+ * Helpers for external use - does not do autmatic pretty printing, but
+ * does escape strings.
+ */
+void flatcc_json_printer_string(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print_string(ctx, s, n);
+}
+
+void flatcc_json_printer_write(flatcc_json_printer_t *ctx, const char *s, size_t n)
+{
+ print(ctx, s, n);
+}
+
+void flatcc_json_printer_nl(flatcc_json_printer_t *ctx)
+{
+ print_char('\n');
+ flatcc_json_printer_flush_partial(ctx);
+}
+
+void flatcc_json_printer_char(flatcc_json_printer_t *ctx, char c)
+{
+ print_char(c);
+}
+
+void flatcc_json_printer_indent(flatcc_json_printer_t *ctx)
+{
+ /*
+ * This is only needed when indent is 0 but helps external users
+ * to avoid flushing when indenting.
+ */
+ print_indent(ctx);
+}
+
+void flatcc_json_printer_add_level(flatcc_json_printer_t *ctx, int n)
+{
+ ctx->level += n;
+}
+
+int flatcc_json_printer_get_level(flatcc_json_printer_t *ctx)
+{
+ return ctx->level;
+}
+
+static inline void print_symbol(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+}
+
+static inline void print_name(flatcc_json_printer_t *ctx, const char *name, size_t len)
+{
+ print_nl();
+ print_symbol(ctx, name, len);
+ print_char(':');
+ print_space();
+}
+
+#define __flatcc_define_json_printer_scalar(TN, T) \
+void flatcc_json_printer_ ## TN( \
+ flatcc_json_printer_t *ctx, T v) \
+{ \
+ ctx->p += print_ ## TN(v, ctx->p); \
+}
+
+__flatcc_define_json_printer_scalar(uint8, uint8_t)
+__flatcc_define_json_printer_scalar(uint16, uint16_t)
+__flatcc_define_json_printer_scalar(uint32, uint32_t)
+__flatcc_define_json_printer_scalar(uint64, uint64_t)
+__flatcc_define_json_printer_scalar(int8, int8_t)
+__flatcc_define_json_printer_scalar(int16, int16_t)
+__flatcc_define_json_printer_scalar(int32, int32_t)
+__flatcc_define_json_printer_scalar(int64, int64_t)
+__flatcc_define_json_printer_scalar(float, float)
+__flatcc_define_json_printer_scalar(double, double)
+
+void flatcc_json_printer_enum(flatcc_json_printer_t *ctx, const char *symbol, size_t len)
+{
+ print_symbol(ctx, symbol, len);
+}
+
+void flatcc_json_printer_delimit_enum_flags(flatcc_json_printer_t *ctx, int multiple)
+{
+#if FLATCC_JSON_PRINT_ALWAYS_QUOTE_MULTIPLE_FLAGS
+ int quote = !ctx->unquote || multiple;
+#else
+ int quote = !ctx->unquote;
+#endif
+ *ctx->p = '"';
+ ctx->p += quote;
+}
+
+void flatcc_json_printer_enum_flag(flatcc_json_printer_t *ctx, int count, const char *symbol, size_t len)
+{
+ *ctx->p = ' ';
+ ctx->p += count > 0;
+ print(ctx, symbol, len);
+}
+
+static inline void print_string_object(flatcc_json_printer_t *ctx, const void *p)
+{
+ size_t len;
+ const char *s;
+
+ len = (size_t)__flatbuffers_uoffset_read_from_pe(p);
+ s = (const char *)p + uoffset_size;
+ print_string(ctx, s, len);
+}
+
+#define __define_print_scalar_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _struct_field(flatcc_json_printer_t *ctx,\
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+void flatcc_json_printer_char_array_struct_field(
+ flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len, size_t count)
+{
+ p = (void *)((size_t)p + offset);
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_char_array(ctx, p, count);
+}
+
+#define __define_print_scalar_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count) \
+{ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_array_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_array_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, size_t count, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ \
+ p = (void *)((size_t)p + offset); \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+}
+
+#define __define_print_enum_struct_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_struct_field( \
+ flatcc_json_printer_t *ctx, \
+ int index, const void *p, size_t offset, \
+ const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x = flatbuffers_ ## TN ## _read_from_pe((uint8_t *)p + offset); \
+ \
+ if (index) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_scalar_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+#define __define_print_scalar_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ ctx->p += print_ ## TN (x, ctx->p); \
+}
+
+
+#define __define_print_enum_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_field(flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, T v, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (p) { \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (x == v && ctx->skip_default) { \
+ return; \
+ } \
+ } else { \
+ if (!ctx->force_default) { \
+ return; \
+ } \
+ x = v; \
+ } \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+#define __define_print_enum_optional_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_optional_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ T x; \
+ const void *p = get_field_ptr(td, id); \
+ \
+ if (!p) return; \
+ x = flatbuffers_ ## TN ## _read_from_pe(p); \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ print_name(ctx, name, len); \
+ if (ctx->noenum) { \
+ ctx->p += print_ ## TN (x, ctx->p); \
+ } else { \
+ pf(ctx, x); \
+ } \
+}
+
+static inline void print_table_object(flatcc_json_printer_t *ctx,
+ const void *p, int ttl, flatcc_json_printer_table_f pf)
+{
+ flatcc_json_printer_table_descriptor_t td;
+
+ if (!--ttl) {
+ flatcc_json_printer_set_error(ctx, flatcc_json_printer_error_deep_recursion);
+ return;
+ }
+ print_start('{');
+ td.count = 0;
+ td.ttl = ttl;
+ td.table = p;
+ td.vtable = (uint8_t *)p - __flatbuffers_soffset_read_from_pe(p);
+ td.vsize = __flatbuffers_voffset_read_from_pe(td.vtable);
+ pf(ctx, &td);
+ print_end('}');
+}
+
+void flatcc_json_printer_string_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+}
+
+void flatcc_json_printer_uint8_vector_base64_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len, int urlsafe)
+{
+ const void *p = get_field_ptr(td, id);
+ int mode;
+
+ mode = urlsafe ? base64_mode_url : base64_mode_rfc4648;
+ mode |= base64_enc_modifier_padding;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_uint8_vector_base64_object(ctx, read_uoffset_ptr(p), mode);
+ }
+}
+
+#define __define_print_scalar_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len) \
+{ \
+ const void *p = get_field_ptr(td, id); \
+ uoffset_t count; \
+ \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ ctx->p += print_ ## TN ( \
+ flatbuffers_ ## TN ## _read_from_pe(p), \
+ ctx->p); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+#define __define_print_enum_vector_field(TN, T) \
+void flatcc_json_printer_ ## TN ## _enum_vector_field( \
+ flatcc_json_printer_t *ctx, \
+ flatcc_json_printer_table_descriptor_t *td, \
+ int id, const char *name, size_t len, \
+ flatcc_json_printer_ ## TN ##_enum_f *pf) \
+{ \
+ const void *p; \
+ uoffset_t count; \
+ \
+ if (ctx->noenum) { \
+ flatcc_json_printer_ ## TN ## _vector_field(ctx, td, id, name, len);\
+ return; \
+ } \
+ p = get_field_ptr(td, id); \
+ if (p) { \
+ if (td->count++) { \
+ print_char(','); \
+ } \
+ p = read_uoffset_ptr(p); \
+ count = __flatbuffers_uoffset_read_from_pe(p); \
+ p = (void *)((size_t)p + uoffset_size); \
+ print_name(ctx, name, len); \
+ print_start('['); \
+ if (count) { \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ --count; \
+ } \
+ while (count--) { \
+ print_char(','); \
+ print_nl(); \
+ pf(ctx, flatbuffers_ ## TN ## _read_from_pe(p)); \
+ p = (void *)((size_t)p + sizeof(T)); \
+ } \
+ print_end(']'); \
+ } \
+}
+
+__define_print_scalar_field(uint8, uint8_t)
+__define_print_scalar_field(uint16, uint16_t)
+__define_print_scalar_field(uint32, uint32_t)
+__define_print_scalar_field(uint64, uint64_t)
+__define_print_scalar_field(int8, int8_t)
+__define_print_scalar_field(int16, int16_t)
+__define_print_scalar_field(int32, int32_t)
+__define_print_scalar_field(int64, int64_t)
+__define_print_scalar_field(bool, flatbuffers_bool_t)
+__define_print_scalar_field(float, float)
+__define_print_scalar_field(double, double)
+
+__define_print_enum_field(uint8, uint8_t)
+__define_print_enum_field(uint16, uint16_t)
+__define_print_enum_field(uint32, uint32_t)
+__define_print_enum_field(uint64, uint64_t)
+__define_print_enum_field(int8, int8_t)
+__define_print_enum_field(int16, int16_t)
+__define_print_enum_field(int32, int32_t)
+__define_print_enum_field(int64, int64_t)
+__define_print_enum_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_optional_field(uint8, uint8_t)
+__define_print_scalar_optional_field(uint16, uint16_t)
+__define_print_scalar_optional_field(uint32, uint32_t)
+__define_print_scalar_optional_field(uint64, uint64_t)
+__define_print_scalar_optional_field(int8, int8_t)
+__define_print_scalar_optional_field(int16, int16_t)
+__define_print_scalar_optional_field(int32, int32_t)
+__define_print_scalar_optional_field(int64, int64_t)
+__define_print_scalar_optional_field(bool, flatbuffers_bool_t)
+__define_print_scalar_optional_field(float, float)
+__define_print_scalar_optional_field(double, double)
+
+__define_print_enum_optional_field(uint8, uint8_t)
+__define_print_enum_optional_field(uint16, uint16_t)
+__define_print_enum_optional_field(uint32, uint32_t)
+__define_print_enum_optional_field(uint64, uint64_t)
+__define_print_enum_optional_field(int8, int8_t)
+__define_print_enum_optional_field(int16, int16_t)
+__define_print_enum_optional_field(int32, int32_t)
+__define_print_enum_optional_field(int64, int64_t)
+__define_print_enum_optional_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_struct_field(uint8, uint8_t)
+__define_print_scalar_struct_field(uint16, uint16_t)
+__define_print_scalar_struct_field(uint32, uint32_t)
+__define_print_scalar_struct_field(uint64, uint64_t)
+__define_print_scalar_struct_field(int8, int8_t)
+__define_print_scalar_struct_field(int16, int16_t)
+__define_print_scalar_struct_field(int32, int32_t)
+__define_print_scalar_struct_field(int64, int64_t)
+__define_print_scalar_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_struct_field(float, float)
+__define_print_scalar_struct_field(double, double)
+
+__define_print_scalar_array_struct_field(uint8, uint8_t)
+__define_print_scalar_array_struct_field(uint16, uint16_t)
+__define_print_scalar_array_struct_field(uint32, uint32_t)
+__define_print_scalar_array_struct_field(uint64, uint64_t)
+__define_print_scalar_array_struct_field(int8, int8_t)
+__define_print_scalar_array_struct_field(int16, int16_t)
+__define_print_scalar_array_struct_field(int32, int32_t)
+__define_print_scalar_array_struct_field(int64, int64_t)
+__define_print_scalar_array_struct_field(bool, flatbuffers_bool_t)
+__define_print_scalar_array_struct_field(float, float)
+__define_print_scalar_array_struct_field(double, double)
+
+__define_print_enum_array_struct_field(uint8, uint8_t)
+__define_print_enum_array_struct_field(uint16, uint16_t)
+__define_print_enum_array_struct_field(uint32, uint32_t)
+__define_print_enum_array_struct_field(uint64, uint64_t)
+__define_print_enum_array_struct_field(int8, int8_t)
+__define_print_enum_array_struct_field(int16, int16_t)
+__define_print_enum_array_struct_field(int32, int32_t)
+__define_print_enum_array_struct_field(int64, int64_t)
+__define_print_enum_array_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_enum_struct_field(uint8, uint8_t)
+__define_print_enum_struct_field(uint16, uint16_t)
+__define_print_enum_struct_field(uint32, uint32_t)
+__define_print_enum_struct_field(uint64, uint64_t)
+__define_print_enum_struct_field(int8, int8_t)
+__define_print_enum_struct_field(int16, int16_t)
+__define_print_enum_struct_field(int32, int32_t)
+__define_print_enum_struct_field(int64, int64_t)
+__define_print_enum_struct_field(bool, flatbuffers_bool_t)
+
+__define_print_scalar_vector_field(utype, flatbuffers_utype_t)
+__define_print_scalar_vector_field(uint8, uint8_t)
+__define_print_scalar_vector_field(uint16, uint16_t)
+__define_print_scalar_vector_field(uint32, uint32_t)
+__define_print_scalar_vector_field(uint64, uint64_t)
+__define_print_scalar_vector_field(int8, int8_t)
+__define_print_scalar_vector_field(int16, int16_t)
+__define_print_scalar_vector_field(int32, int32_t)
+__define_print_scalar_vector_field(int64, int64_t)
+__define_print_scalar_vector_field(bool, flatbuffers_bool_t)
+__define_print_scalar_vector_field(float, float)
+__define_print_scalar_vector_field(double, double)
+
+__define_print_enum_vector_field(utype, flatbuffers_utype_t)
+__define_print_enum_vector_field(uint8, uint8_t)
+__define_print_enum_vector_field(uint16, uint16_t)
+__define_print_enum_vector_field(uint32, uint32_t)
+__define_print_enum_vector_field(uint64, uint64_t)
+__define_print_enum_vector_field(int8, int8_t)
+__define_print_enum_vector_field(int16, int16_t)
+__define_print_enum_vector_field(int32, int32_t)
+__define_print_enum_vector_field(int64, int64_t)
+__define_print_enum_vector_field(bool, flatbuffers_bool_t)
+
+void flatcc_json_printer_struct_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ size_t size,
+ flatcc_json_printer_struct_f pf)
+{
+ const uint8_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ p += uoffset_size;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ --count;
+ }
+ while (count--) {
+ p += size;
+ print_char(',');
+ print_nl();
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_string_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_nl();
+ print_string_object(ctx, read_uoffset_ptr(p));
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *p = get_field_ptr(td, id);
+ uoffset_t count;
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ print_name(ctx, name, len);
+ print_start('[');
+ if (count) {
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ --count;
+ }
+ while (count--) {
+ ++p;
+ print_char(',');
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_union_vector_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const uoffset_t *pt = get_field_ptr(td, id - 1);
+ const uoffset_t *p = get_field_ptr(td, id);
+ utype_t *types, type;
+ uoffset_t count;
+ char type_name[FLATCC_JSON_PRINT_NAME_LEN_MAX + 5];
+ flatcc_json_printer_union_descriptor_t ud;
+
+ ud.ttl = td->ttl;
+ if (len > FLATCC_JSON_PRINT_NAME_LEN_MAX) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier too long");
+ return;
+ }
+ memcpy(type_name, name, len);
+ memcpy(type_name + len, "_type", 5);
+ if (p && pt) {
+ flatcc_json_printer_utype_enum_vector_field(ctx, td, id - 1,
+ type_name, len + 5, ptf);
+ if (td->count++) {
+ print_char(',');
+ }
+ p = read_uoffset_ptr(p);
+ pt = read_uoffset_ptr(pt);
+ count = __flatbuffers_uoffset_read_from_pe(p);
+ ++p;
+ ++pt;
+ types = (utype_t *)pt;
+ print_name(ctx, name, len);
+ print_start('[');
+
+ if (count) {
+ type = __flatbuffers_utype_read_from_pe(types);
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ --count;
+ }
+ while (count--) {
+ ++p;
+ ++types;
+ type = __flatbuffers_utype_read_from_pe(types);
+ print_char(',');
+ if (type != 0) {
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ } else {
+ print_null();
+ }
+ }
+ print_end(']');
+ }
+}
+
+void flatcc_json_printer_table_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_table_f pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(p), td->ttl, pf);
+ }
+}
+
+void flatcc_json_printer_union_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_union_type_f ptf,
+ flatcc_json_printer_union_f pf)
+{
+ const void *pt = get_field_ptr(td, id - 1);
+ const void *p = get_field_ptr(td, id);
+ utype_t type;
+ flatcc_json_printer_union_descriptor_t ud;
+
+ if (!p || !pt) {
+ return;
+ }
+ type = __flatbuffers_utype_read_from_pe(pt);
+ if (td->count++) {
+ print_char(',');
+ }
+ print_nl();
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ if (ctx->p + len < ctx->pflush) {
+ memcpy(ctx->p, name, len);
+ ctx->p += len;
+ } else {
+ print(ctx, name, len);
+ }
+ print(ctx, "_type", 5);
+ *ctx->p = '\"';
+ ctx->p += !ctx->unquote;
+ print_char(':');
+ print_space();
+ if (ctx->noenum) {
+ ctx->p += print_utype(type, ctx->p);
+ } else {
+ ptf(ctx, type);
+ }
+ if (type != 0) {
+ print_char(',');
+ print_name(ctx, name, len);
+ ud.ttl = td->ttl;
+ ud.type = type;
+ ud.member = p;
+ pf(ctx, &ud);
+ }
+}
+
+void flatcc_json_printer_union_table(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_table_f pf)
+{
+ print_table_object(ctx, read_uoffset_ptr(ud->member), ud->ttl, pf);
+}
+
+void flatcc_json_printer_union_struct(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud,
+ flatcc_json_printer_struct_f pf)
+{
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(ud->member));
+ print_end('}');
+}
+
+void flatcc_json_printer_union_string(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_union_descriptor_t *ud)
+{
+ print_string_object(ctx, read_uoffset_ptr(ud->member));
+}
+
+void flatcc_json_printer_embedded_struct_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ flatcc_json_printer_struct_f pf)
+{
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, (uint8_t *)p + offset);
+ print_end('}');
+}
+
+void flatcc_json_printer_embedded_struct_array_field(flatcc_json_printer_t *ctx,
+ int index, const void *p, size_t offset,
+ const char *name, size_t len,
+ size_t size, size_t count,
+ flatcc_json_printer_struct_f pf)
+{
+ size_t i;
+ if (index) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('[');
+ for (i = 0; i < count; ++i) {
+ if (i > 0) {
+ print_char(',');
+ }
+ print_start('{'); \
+ pf(ctx, (uint8_t *)p + offset + i * size);
+ print_end('}');
+ }
+ print_end(']');
+}
+
+void flatcc_json_printer_struct_field(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ flatcc_json_printer_struct_f *pf)
+{
+ const void *p = get_field_ptr(td, id);
+
+ if (p) {
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, p);
+ print_end('}');
+ }
+}
+
+/*
+ * Make sure the buffer identifier is valid before assuming the rest of
+ * the buffer is sane.
+ * NOTE: this won't work with type hashes because these can contain
+ * nulls in the fid string. In this case use null as fid to disable
+ * check.
+ */
+static int accept_header(flatcc_json_printer_t * ctx,
+ const void *buf, size_t bufsiz, const char *fid)
+{
+ flatbuffers_thash_t id, id2 = 0;
+
+ if (buf == 0 || bufsiz < offset_size + FLATBUFFERS_IDENTIFIER_SIZE) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "buffer header too small");
+ return 0;
+ }
+ if (fid != 0) {
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe((uint8_t *)buf + offset_size);
+ if (!(id2 == 0 || id == id2)) {
+ RAISE_ERROR(bad_input);
+ FLATCC_ASSERT(0 && "identifier mismatch");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int flatcc_json_printer_struct_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+int flatcc_json_printer_table_as_root(flatcc_json_printer_t *ctx,
+ const void *buf, size_t bufsiz, const char *fid, flatcc_json_printer_table_f *pf)
+{
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return -1;
+ }
+ print_table_object(ctx, read_uoffset_ptr(buf), FLATCC_JSON_PRINT_MAX_LEVELS, pf);
+ print_last_nl();
+ return flatcc_json_printer_get_error(ctx) ? -1 : (int)ctx->total + (int)(ctx->p - ctx->buf);
+}
+
+void flatcc_json_printer_struct_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_struct_f *pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_start('{');
+ pf(ctx, read_uoffset_ptr(buf));
+ print_end('}');
+}
+
+void flatcc_json_printer_table_as_nested_root(flatcc_json_printer_t *ctx,
+ flatcc_json_printer_table_descriptor_t *td,
+ int id, const char *name, size_t len,
+ const char *fid,
+ flatcc_json_printer_table_f pf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return;
+ }
+ buf = (const uoffset_t *)((size_t)buf + __flatbuffers_uoffset_read_from_pe(buf));
+ bufsiz = __flatbuffers_uoffset_read_from_pe(buf);
+ ++buf;
+ if (!accept_header(ctx, buf, bufsiz, fid)) {
+ return;
+ }
+ if (td->count++) {
+ print_char(',');
+ }
+ print_name(ctx, name, len);
+ print_table_object(ctx, read_uoffset_ptr(buf), td->ttl, pf);
+}
+
+static void __flatcc_json_printer_flush(flatcc_json_printer_t *ctx, int all)
+{
+ if (!all && ctx->p >= ctx->pflush) {
+ size_t spill = (size_t)(ctx->p - ctx->pflush);
+
+ fwrite(ctx->buf, ctx->flush_size, 1, ctx->fp);
+ memcpy(ctx->buf, ctx->buf + ctx->flush_size, spill);
+ ctx->p = ctx->buf + spill;
+ ctx->total += ctx->flush_size;
+ } else {
+ size_t len = (size_t)(ctx->p - ctx->buf);
+
+ fwrite(ctx->buf, len, 1, ctx->fp);
+ ctx->p = ctx->buf;
+ ctx->total += len;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init(flatcc_json_printer_t *ctx, void *fp)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->fp = fp ? fp : stdout;
+ ctx->flush = __flatcc_json_printer_flush;
+ if (!(ctx->buf = FLATCC_JSON_PRINTER_ALLOC(FLATCC_JSON_PRINT_BUFFER_SIZE))) {
+ return -1;
+ }
+ ctx->own_buffer = 1;
+ ctx->size = FLATCC_JSON_PRINT_BUFFER_SIZE;
+ ctx->flush_size = FLATCC_JSON_PRINT_FLUSH_SIZE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ /*
+ * Make sure we have space for primitive operations such as printing numbers
+ * without having to flush.
+ */
+ FLATCC_ASSERT(ctx->flush_size + FLATCC_JSON_PRINT_RESERVE <= ctx->size);
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ (void)all;
+
+ if (ctx->p >= ctx->pflush) {
+ RAISE_ERROR(overflow);
+ ctx->total += (size_t)(ctx->p - ctx->buf);
+ ctx->p = ctx->buf;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_buffer(flatcc_json_printer_t *ctx, char *buffer, size_t buffer_size)
+{
+ FLATCC_ASSERT(buffer_size >= FLATCC_JSON_PRINT_RESERVE);
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ return -1;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = buffer;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_buffer;
+ return 0;
+}
+
+static void __flatcc_json_printer_flush_dynamic_buffer(flatcc_json_printer_t *ctx, int all)
+{
+ size_t len = (size_t)(ctx->p - ctx->buf);
+ char *p;
+
+ (void)all;
+
+ *ctx->p = '\0';
+ if (ctx->p < ctx->pflush) {
+ return;
+ }
+ p = FLATCC_JSON_PRINTER_REALLOC(ctx->buf, ctx->size * 2);
+ if (!p) {
+ RAISE_ERROR(overflow);
+ ctx->total += len;
+ ctx->p = ctx->buf;
+ } else {
+ ctx->size *= 2;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->buf = p;
+ ctx->p = p + len;
+ ctx->pflush = p + ctx->flush_size;
+ }
+ *ctx->p = '\0';
+}
+
+int flatcc_json_printer_init_dynamic_buffer(flatcc_json_printer_t *ctx, size_t buffer_size)
+{
+ if (buffer_size == 0) {
+ buffer_size = FLATCC_JSON_PRINT_DYN_BUFFER_SIZE;
+ }
+ if (buffer_size < FLATCC_JSON_PRINT_RESERVE) {
+ buffer_size = FLATCC_JSON_PRINT_RESERVE;
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->buf = FLATCC_JSON_PRINTER_ALLOC(buffer_size);
+ ctx->own_buffer = 1;
+ ctx->size = buffer_size;
+ ctx->flush_size = ctx->size - FLATCC_JSON_PRINT_RESERVE;
+ ctx->p = ctx->buf;
+ ctx->pflush = ctx->buf + ctx->flush_size;
+ ctx->flush = __flatcc_json_printer_flush_dynamic_buffer;
+ if (!ctx->buf) {
+ RAISE_ERROR(overflow);
+ return -1;
+ }
+ return 0;
+}
+
+void *flatcc_json_printer_get_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ ctx->flush(ctx, 0);
+ if (buffer_size) {
+ *buffer_size = (size_t)(ctx->p - ctx->buf);
+ }
+ return ctx->buf;
+}
+
+void *flatcc_json_printer_finalize_dynamic_buffer(flatcc_json_printer_t *ctx, size_t *buffer_size)
+{
+ void *buffer;
+
+ buffer = flatcc_json_printer_get_buffer(ctx, buffer_size);
+ memset(ctx, 0, sizeof(*ctx));
+ return buffer;
+}
+
+void flatcc_json_printer_clear(flatcc_json_printer_t *ctx)
+{
+ if (ctx->own_buffer && ctx->buf) {
+ FLATCC_JSON_PRINTER_FREE(ctx->buf);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+}
diff --git a/nostrdb/flatcc/portable/LICENSE b/nostrdb/flatcc/portable/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+Some files also Copyright author of MathGeoLib (https://github.com/juj)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
diff --git a/nostrdb/flatcc/portable/README.md b/nostrdb/flatcc/portable/README.md
@@ -0,0 +1,57 @@
+A small library for adding C11 compatibility to older C compilers, but
+only a small highly useful subset such as static assertions, inline
+functions and alignment.
+
+C++ is not a primary target, but the library has been updated to be more
+C++ friendly based on user feedback.
+
+Many compilers already have the required functionality but with slightly
+different names and arguments.
+
+In addition, compatibility with the Linux `<endian.h>` system file is
+provided, and "punaligned.h" is provided for unaligned memory reads
+which in part depends on endian support.
+
+The library also provides fast integer printing and floating point
+printing and parsing optionally using the grisu3 algorithm, but can fall
+back to strtod and related. The `pgrisu3` folder is header only and
+excludes test cases found in the main grisu3 project the files were
+extracted from. Base64 conversion is also provided.
+
+Integer conversion is not just an optimization. It is more difficult
+than it would appear to portably parse an integer of known size such as
+`uint64_t` up to at most n bytes which is needed for safe parsing. At
+the same time, the sometimes significant performance gains warrants
+custom implementations that might as well be done once and for all.
+
+Files can be included individually, or portable.h may be included to get
+all functionality. If the compiler is C11 compliant, portable.h will not
+include anything, except: it will provide a patch for static assertions
+which clang does not fully support in all versions even with C11 flagged.
+
+The grisu3 header files are the runtime files for the Grisu3 floating
+point conversion to/from text C port. Test coverage is provided separately.
+This library can be used indirectly via pparsefp.h and pprintfp.h.
+
+The `pstatic_assert.h` file is often needed on C11 systems because the
+compiler and standard library may support `_Static_assert` without
+`static_assert`. For compilers without `_Static_assert`, a unique
+identifier is needed for each assertion. This is done non-standard with
+the `__COUNTER__` macro, but has a fallback to `pstatic_assert_scope.h`
+for systems witout the `__COUNTER__` macro. Because of this fallback,
+`pstatic_assert.h` needs to be included in every file using
+`static_assert` in order to increment a scope counter, otherwise there
+is a risk of assert identifier conflicts when `static_assert` happen on
+the same line in different files.
+
+The `paligned_alloc.h` file implements the non-standard `aligned_free`
+to match the C11 standard `aligned_alloc` call. `aligned_free` is
+normally equivalent to `free`, but not on systems where `aligned_free`
+cannot be implemented using a system provived `free` call. Use of
+`aligned_free` is thus optional on some systems, but using it increases
+general portablity at the cost of pure C11 compatibility.
+
+IMPORTANT NOTE: this library has been used on various platforms and
+updated with user feedback but it is impossibly to systematically test
+all platforms so please test for specific uses cases and report
+any issues upstream.
diff --git a/nostrdb/flatcc/portable/grisu3_math.h b/nostrdb/flatcc/portable/grisu3_math.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/* 2016-02-02: Updated by mikkelfj
+ *
+ * Extracted from MatGeoLib grisu3.c, Apache 2.0 license, and extended.
+ *
+ * This file is usually include via grisu3_print.h or grisu3_parse.h.
+ *
+ * The original MatGeoLib dtoa_grisu3 implementation is largely
+ * unchanged except for the uint64 to double cast. The remaining changes
+ * are file structure, name changes, and new additions for parsing:
+ *
+ * - Split into header files only:
+ * grisu3_math.h, grisu3_print.h, (added grisu3_parse.h)
+ *
+ * - names prefixed with grisu3_, grisu3_diy_fp_, GRISU3_.
+ * - added static to all functions.
+ * - disabled clang unused function warnings.
+ * - guarded <stdint.h> to allow for alternative impl.
+ * - added extra numeric constants needed for parsing.
+ * - added dec_pow, cast_double_from_diy_fp.
+ * - changed some function names for consistency.
+ * - moved printing specific grisu3 functions to grisu3_print.h.
+ * - changed double to uint64 cast to avoid aliasing.
+ * - added new grisu3_parse.h for parsing doubles.
+ * - grisu3_print_double (dtoa_grisu3) format .1 as 0.1 needed for valid JSON output
+ * and grisu3_parse_double wouldn't consume it.
+ * - grsu3_print_double changed formatting to prefer 0.012 over 1.2e-2.
+ *
+ * These changes make it possible to include the files as headers only
+ * in other software libraries without risking name conflicts, and to
+ * extend the implementation with a port of Googles Double Conversion
+ * strtod functionality for parsing doubles.
+ *
+ * Extracted from: rev. 915501a / Dec 22, 2015
+ * <https://github.com/juj/MathGeoLib/blob/master/src/Math/grisu3.c>
+ * MathGeoLib License: http://www.apache.org/licenses/LICENSE-2.0.html
+ */
+
+#ifndef GRISU3_MATH_H
+#define GRISU3_MATH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h> /* uint64_t etc. */
+#endif
+
+#ifdef GRISU3_NO_ASSERT
+#undef GRISU3_ASSERT
+#define GRISU3_ASSERT(x) ((void)0)
+#endif
+
+#ifndef GRISU3_ASSERT
+#include <assert.h> /* assert */
+#define GRISU3_ASSERT(x) assert(x)
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer */
+#endif
+
+#define GRISU3_D64_SIGN 0x8000000000000000ULL
+#define GRISU3_D64_EXP_MASK 0x7FF0000000000000ULL
+#define GRISU3_D64_FRACT_MASK 0x000FFFFFFFFFFFFFULL
+#define GRISU3_D64_IMPLICIT_ONE 0x0010000000000000ULL
+#define GRISU3_D64_EXP_POS 52
+#define GRISU3_D64_EXP_BIAS 1075
+#define GRISU3_D64_DENORM_EXP (-GRISU3_D64_EXP_BIAS + 1)
+#define GRISU3_DIY_FP_FRACT_SIZE 64
+#define GRISU3_D_1_LOG2_10 0.30102999566398114 /* 1 / lg(10) */
+#define GRISU3_MIN_TARGET_EXP -60
+#define GRISU3_MASK32 0xFFFFFFFFULL
+#define GRISU3_MIN_CACHED_EXP -348
+#define GRISU3_MAX_CACHED_EXP 340
+#define GRISU3_CACHED_EXP_STEP 8
+#define GRISU3_D64_MAX_DEC_EXP 309
+#define GRISU3_D64_MIN_DEC_EXP -324
+#define GRISU3_D64_INF GRISU3_D64_EXP_MASK
+
+#define GRISU3_MIN(x,y) ((x) <= (y) ? (x) : (y))
+#define GRISU3_MAX(x,y) ((x) >= (y) ? (x) : (y))
+
+
+typedef struct grisu3_diy_fp
+{
+ uint64_t f;
+ int e;
+} grisu3_diy_fp_t;
+
+typedef struct grisu3_diy_fp_power
+{
+ uint64_t fract;
+ int16_t b_exp, d_exp;
+} grisu3_diy_fp_power_t;
+
+typedef union {
+ uint64_t u64;
+ double d64;
+} grisu3_cast_double_t;
+
+static uint64_t grisu3_cast_uint64_from_double(double d)
+{
+ grisu3_cast_double_t cd;
+ cd.d64 = d;
+ return cd.u64;
+}
+
+static double grisu3_cast_double_from_uint64(uint64_t u)
+{
+ grisu3_cast_double_t cd;
+ cd.u64 = u;
+ return cd.d64;
+}
+
+#define grisu3_double_infinity grisu3_cast_double_from_uint64(GRISU3_D64_INF)
+#define grisu3_double_nan grisu3_cast_double_from_uint64(GRISU3_D64_INF + 1)
+
+static const grisu3_diy_fp_power_t grisu3_diy_fp_pow_cache[] =
+{
+ { 0xfa8fd5a0081c0288ULL, -1220, -348 },
+ { 0xbaaee17fa23ebf76ULL, -1193, -340 },
+ { 0x8b16fb203055ac76ULL, -1166, -332 },
+ { 0xcf42894a5dce35eaULL, -1140, -324 },
+ { 0x9a6bb0aa55653b2dULL, -1113, -316 },
+ { 0xe61acf033d1a45dfULL, -1087, -308 },
+ { 0xab70fe17c79ac6caULL, -1060, -300 },
+ { 0xff77b1fcbebcdc4fULL, -1034, -292 },
+ { 0xbe5691ef416bd60cULL, -1007, -284 },
+ { 0x8dd01fad907ffc3cULL, -980, -276 },
+ { 0xd3515c2831559a83ULL, -954, -268 },
+ { 0x9d71ac8fada6c9b5ULL, -927, -260 },
+ { 0xea9c227723ee8bcbULL, -901, -252 },
+ { 0xaecc49914078536dULL, -874, -244 },
+ { 0x823c12795db6ce57ULL, -847, -236 },
+ { 0xc21094364dfb5637ULL, -821, -228 },
+ { 0x9096ea6f3848984fULL, -794, -220 },
+ { 0xd77485cb25823ac7ULL, -768, -212 },
+ { 0xa086cfcd97bf97f4ULL, -741, -204 },
+ { 0xef340a98172aace5ULL, -715, -196 },
+ { 0xb23867fb2a35b28eULL, -688, -188 },
+ { 0x84c8d4dfd2c63f3bULL, -661, -180 },
+ { 0xc5dd44271ad3cdbaULL, -635, -172 },
+ { 0x936b9fcebb25c996ULL, -608, -164 },
+ { 0xdbac6c247d62a584ULL, -582, -156 },
+ { 0xa3ab66580d5fdaf6ULL, -555, -148 },
+ { 0xf3e2f893dec3f126ULL, -529, -140 },
+ { 0xb5b5ada8aaff80b8ULL, -502, -132 },
+ { 0x87625f056c7c4a8bULL, -475, -124 },
+ { 0xc9bcff6034c13053ULL, -449, -116 },
+ { 0x964e858c91ba2655ULL, -422, -108 },
+ { 0xdff9772470297ebdULL, -396, -100 },
+ { 0xa6dfbd9fb8e5b88fULL, -369, -92 },
+ { 0xf8a95fcf88747d94ULL, -343, -84 },
+ { 0xb94470938fa89bcfULL, -316, -76 },
+ { 0x8a08f0f8bf0f156bULL, -289, -68 },
+ { 0xcdb02555653131b6ULL, -263, -60 },
+ { 0x993fe2c6d07b7facULL, -236, -52 },
+ { 0xe45c10c42a2b3b06ULL, -210, -44 },
+ { 0xaa242499697392d3ULL, -183, -36 },
+ { 0xfd87b5f28300ca0eULL, -157, -28 },
+ { 0xbce5086492111aebULL, -130, -20 },
+ { 0x8cbccc096f5088ccULL, -103, -12 },
+ { 0xd1b71758e219652cULL, -77, -4 },
+ { 0x9c40000000000000ULL, -50, 4 },
+ { 0xe8d4a51000000000ULL, -24, 12 },
+ { 0xad78ebc5ac620000ULL, 3, 20 },
+ { 0x813f3978f8940984ULL, 30, 28 },
+ { 0xc097ce7bc90715b3ULL, 56, 36 },
+ { 0x8f7e32ce7bea5c70ULL, 83, 44 },
+ { 0xd5d238a4abe98068ULL, 109, 52 },
+ { 0x9f4f2726179a2245ULL, 136, 60 },
+ { 0xed63a231d4c4fb27ULL, 162, 68 },
+ { 0xb0de65388cc8ada8ULL, 189, 76 },
+ { 0x83c7088e1aab65dbULL, 216, 84 },
+ { 0xc45d1df942711d9aULL, 242, 92 },
+ { 0x924d692ca61be758ULL, 269, 100 },
+ { 0xda01ee641a708deaULL, 295, 108 },
+ { 0xa26da3999aef774aULL, 322, 116 },
+ { 0xf209787bb47d6b85ULL, 348, 124 },
+ { 0xb454e4a179dd1877ULL, 375, 132 },
+ { 0x865b86925b9bc5c2ULL, 402, 140 },
+ { 0xc83553c5c8965d3dULL, 428, 148 },
+ { 0x952ab45cfa97a0b3ULL, 455, 156 },
+ { 0xde469fbd99a05fe3ULL, 481, 164 },
+ { 0xa59bc234db398c25ULL, 508, 172 },
+ { 0xf6c69a72a3989f5cULL, 534, 180 },
+ { 0xb7dcbf5354e9beceULL, 561, 188 },
+ { 0x88fcf317f22241e2ULL, 588, 196 },
+ { 0xcc20ce9bd35c78a5ULL, 614, 204 },
+ { 0x98165af37b2153dfULL, 641, 212 },
+ { 0xe2a0b5dc971f303aULL, 667, 220 },
+ { 0xa8d9d1535ce3b396ULL, 694, 228 },
+ { 0xfb9b7cd9a4a7443cULL, 720, 236 },
+ { 0xbb764c4ca7a44410ULL, 747, 244 },
+ { 0x8bab8eefb6409c1aULL, 774, 252 },
+ { 0xd01fef10a657842cULL, 800, 260 },
+ { 0x9b10a4e5e9913129ULL, 827, 268 },
+ { 0xe7109bfba19c0c9dULL, 853, 276 },
+ { 0xac2820d9623bf429ULL, 880, 284 },
+ { 0x80444b5e7aa7cf85ULL, 907, 292 },
+ { 0xbf21e44003acdd2dULL, 933, 300 },
+ { 0x8e679c2f5e44ff8fULL, 960, 308 },
+ { 0xd433179d9c8cb841ULL, 986, 316 },
+ { 0x9e19db92b4e31ba9ULL, 1013, 324 },
+ { 0xeb96bf6ebadf77d9ULL, 1039, 332 },
+ { 0xaf87023b9bf0ee6bULL, 1066, 340 }
+};
+
+/* Avoid dependence on lib math to get (int)ceil(v) */
+static int grisu3_iceil(double v)
+{
+ int k = (int)v;
+ if (v < 0) return k;
+ return v - k == 0 ? k : k + 1;
+}
+
+static int grisu3_diy_fp_cached_pow(int exp, grisu3_diy_fp_t *p)
+{
+ int k = grisu3_iceil((exp+GRISU3_DIY_FP_FRACT_SIZE-1) * GRISU3_D_1_LOG2_10);
+ int i = (k-GRISU3_MIN_CACHED_EXP-1) / GRISU3_CACHED_EXP_STEP + 1;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+ return grisu3_diy_fp_pow_cache[i].d_exp;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_minus(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ grisu3_diy_fp_t d; d.f = x.f - y.f; d.e = x.e;
+ GRISU3_ASSERT(x.e == y.e && x.f >= y.f);
+ return d;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_multiply(grisu3_diy_fp_t x, grisu3_diy_fp_t y)
+{
+ uint64_t a, b, c, d, ac, bc, ad, bd, tmp;
+ grisu3_diy_fp_t r;
+ a = x.f >> 32; b = x.f & GRISU3_MASK32;
+ c = y.f >> 32; d = y.f & GRISU3_MASK32;
+ ac = a*c; bc = b*c;
+ ad = a*d; bd = b*d;
+ tmp = (bd >> 32) + (ad & GRISU3_MASK32) + (bc & GRISU3_MASK32);
+ tmp += 1U << 31; /* round */
+ r.f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ r.e = x.e + y.e + 64;
+ return r;
+}
+
+static grisu3_diy_fp_t grisu3_diy_fp_normalize(grisu3_diy_fp_t n)
+{
+ GRISU3_ASSERT(n.f != 0);
+ while(!(n.f & 0xFFC0000000000000ULL)) { n.f <<= 10; n.e -= 10; }
+ while(!(n.f & GRISU3_D64_SIGN)) { n.f <<= 1; --n.e; }
+ return n;
+}
+
+static grisu3_diy_fp_t grisu3_cast_diy_fp_from_double(double d)
+{
+ grisu3_diy_fp_t fp;
+ uint64_t u64 = grisu3_cast_uint64_from_double(d);
+ if (!(u64 & GRISU3_D64_EXP_MASK)) { fp.f = u64 & GRISU3_D64_FRACT_MASK; fp.e = 1 - GRISU3_D64_EXP_BIAS; }
+ else { fp.f = (u64 & GRISU3_D64_FRACT_MASK) + GRISU3_D64_IMPLICIT_ONE; fp.e = (int)((u64 & GRISU3_D64_EXP_MASK) >> GRISU3_D64_EXP_POS) - GRISU3_D64_EXP_BIAS; }
+ return fp;
+}
+
+static double grisu3_cast_double_from_diy_fp(grisu3_diy_fp_t n)
+{
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const uint64_t frac_mask = GRISU3_D64_FRACT_MASK;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const int exp_bias = GRISU3_D64_EXP_BIAS;
+ const int exp_pos = GRISU3_D64_EXP_POS;
+
+ grisu3_diy_fp_t v = n;
+ uint64_t e_biased;
+
+ while (v.f > hidden_bit + frac_mask) {
+ v.f >>= 1;
+ ++v.e;
+ }
+ if (v.e < denorm_exp) {
+ return 0.0;
+ }
+ while (v.e > denorm_exp && (v.f & hidden_bit) == 0) {
+ v.f <<= 1;
+ --v.e;
+ }
+ if (v.e == denorm_exp && (v.f & hidden_bit) == 0) {
+ e_biased = 0;
+ } else {
+ e_biased = (uint64_t)(v.e + exp_bias);
+ }
+ return grisu3_cast_double_from_uint64((v.f & frac_mask) | (e_biased << exp_pos));
+}
+
+/* pow10_cache[i] = 10^(i-1) */
+static const unsigned int grisu3_pow10_cache[] = { 0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+
+static int grisu3_largest_pow10(uint32_t n, int n_bits, uint32_t *power)
+{
+ int guess = ((n_bits + 1) * 1233 >> 12) + 1/*skip first entry*/;
+ if (n < grisu3_pow10_cache[guess]) --guess; /* We don't have any guarantees that 2^n_bits <= n. */
+ *power = grisu3_pow10_cache[guess];
+ return guess;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_MATH_H */
diff --git a/nostrdb/flatcc/portable/grisu3_parse.h b/nostrdb/flatcc/portable/grisu3_parse.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Port of parts of Google Double Conversion strtod functionality
+ * but with fallback to strtod instead of a bignum implementation.
+ *
+ * Based on grisu3 math from MathGeoLib.
+ *
+ * See also grisu3_math.h comments.
+ */
+
+#ifndef GRISU3_PARSE_H
+#define GRISU3_PARSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include <stdlib.h>
+#include <limits.h>
+
+#include "grisu3_math.h"
+
+
+/*
+ * The maximum number characters a valid number may contain. The parse
+ * fails if the input length is longer but the character after max len
+ * was part of the number.
+ *
+ * The length should not be set too high because it protects against
+ * overflow in the exponent part derived from the input length.
+ */
+#define GRISU3_NUM_MAX_LEN 1000
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_parse_double_is_defined 1
+
+/*
+ * Disable to compare performance and to test diy_fp algorithm in
+ * broader range.
+ */
+#define GRISU3_PARSE_FAST_CASE
+
+/* May result in a one off error, otherwise when uncertain, fall back to strtod. */
+//#define GRISU3_PARSE_ALLOW_ERROR
+
+
+/*
+ * The dec output exponent jumps in 8, so the result is offset at most
+ * by 7 when the input is within range.
+ */
+static int grisu3_diy_fp_cached_dec_pow(int d_exp, grisu3_diy_fp_t *p)
+{
+ const int cached_offset = -GRISU3_MIN_CACHED_EXP;
+ const int d_exp_dist = GRISU3_CACHED_EXP_STEP;
+ int i, a_exp;
+
+ GRISU3_ASSERT(GRISU3_MIN_CACHED_EXP <= d_exp);
+ GRISU3_ASSERT(d_exp < GRISU3_MAX_CACHED_EXP + d_exp_dist);
+
+ i = (d_exp + cached_offset) / d_exp_dist;
+ a_exp = grisu3_diy_fp_pow_cache[i].d_exp;
+ p->f = grisu3_diy_fp_pow_cache[i].fract;
+ p->e = grisu3_diy_fp_pow_cache[i].b_exp;
+
+ GRISU3_ASSERT(a_exp <= d_exp);
+ GRISU3_ASSERT(d_exp < a_exp + d_exp_dist);
+
+ return a_exp;
+}
+
+/*
+ * Ported from google double conversion strtod using
+ * MathGeoLibs diy_fp functions for grisu3 in C.
+ *
+ * ulp_half_error is set if needed to trunacted non-zero trialing
+ * characters.
+ *
+ * The actual value we need to encode is:
+ *
+ * (sign ? -1 : 1) * fraction * 2 ^ (exponent - fraction_exp)
+ * where exponent is the base 10 exponent assuming the decimal point is
+ * after the first digit. fraction_exp is the base 10 magnitude of the
+ * fraction or number of significant digits - 1.
+ *
+ * If the exponent is between 0 and 22 and the fraction is encoded in
+ * the lower 53 bits (the largest bit is implicit in a double, but not
+ * in this fraction), then the value can be trivially converted to
+ * double without loss of precision. If the fraction was in fact
+ * multiplied by trailing zeroes that we didn't convert to exponent,
+ * we there are larger values the 53 bits that can also be encoded
+ * trivially - but then it is better to handle this during parsing
+ * if it is worthwhile. We do not optimize for this here, because it
+ * can be done in a simple check before calling, and because it might
+ * not be worthwile to do at all since it cery likely will fail for
+ * numbers printed to be convertible back to double without loss.
+ *
+ * Returns 0 if conversion was not exact. In that case the vale is
+ * either one smaller than the correct one, or the correct one.
+ *
+ * Exponents must be range protected before calling otherwise cached
+ * powers will blow up.
+ *
+ * Google Double Conversion seems to prefer the following notion:
+ *
+ * x >= 10^309 => +Inf
+ * x <= 10^-324 => 0,
+ *
+ * max double: HUGE_VAL = 1.7976931348623157 * 10^308
+ * min double: 4.9406564584124654 * 10^-324
+ *
+ * Values just below or above min/max representable number
+ * may round towards large/small non-Inf/non-neg values.
+ *
+ * but `strtod` seems to return +/-HUGE_VAL on overflow?
+ */
+static int grisu3_diy_fp_encode_double(uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ /*
+ * Error is measures in fractions of integers, so we scale up to get
+ * some resolution to represent error expressions.
+ */
+ const int log2_error_one = 3;
+ const int error_one = 1 << log2_error_one;
+ const int denorm_exp = GRISU3_D64_DENORM_EXP;
+ const uint64_t hidden_bit = GRISU3_D64_IMPLICIT_ONE;
+ const int diy_size = GRISU3_DIY_FP_FRACT_SIZE;
+ const int max_digits = 19;
+
+ int error = ulp_half_error ? error_one / 2 : 0;
+ int d_exp = (exponent - fraction_exp);
+ int a_exp;
+ int o_exp;
+ grisu3_diy_fp_t v = { fraction, 0 };
+ grisu3_diy_fp_t cp;
+ grisu3_diy_fp_t rounded;
+ int mag;
+ int prec;
+ int prec_bits;
+ int half_way;
+
+ /* When fractions in a double aren't stored with implicit msb fraction bit. */
+
+ /* Shift fraction to msb. */
+ v = grisu3_diy_fp_normalize(v);
+ /* The half point error moves up while the exponent moves down. */
+ error <<= -v.e;
+
+ a_exp = grisu3_diy_fp_cached_dec_pow(d_exp, &cp);
+
+ /* Interpolate between cached powers at distance 8. */
+ if (a_exp != d_exp) {
+ int adj_exp = d_exp - a_exp - 1;
+ static grisu3_diy_fp_t cp_10_lut[] = {
+ { 0xa000000000000000ULL, -60 },
+ { 0xc800000000000000ULL, -57 },
+ { 0xfa00000000000000ULL, -54 },
+ { 0x9c40000000000000ULL, -50 },
+ { 0xc350000000000000ULL, -47 },
+ { 0xf424000000000000ULL, -44 },
+ { 0x9896800000000000ULL, -40 },
+ };
+ GRISU3_ASSERT(adj_exp >= 0 && adj_exp < 7);
+ v = grisu3_diy_fp_multiply(v, cp_10_lut[adj_exp]);
+
+ /* 20 decimal digits won't always fit in 64 bit.
+ * (`fraction_exp` is one less than significant decimal
+ * digits in fraction, e.g. 1 * 10e0).
+ * If we cannot fit, introduce 1/2 ulp error
+ * (says double conversion reference impl.) */
+ if (1 + fraction_exp + adj_exp > max_digits) {
+ error += error_one / 2;
+ }
+ }
+
+ v = grisu3_diy_fp_multiply(v, cp);
+ /*
+ * Google double conversion claims that:
+ *
+ * The error introduced by a multiplication of a*b equals
+ * error_a + error_b + error_a*error_b/2^64 + 0.5
+ * Substituting a with 'input' and b with 'cached_power' we have
+ * error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ * error_ab = 0 or 1 / error_oner > error_a*error_b/ 2^64
+ *
+ * which in our encoding becomes:
+ * error_a = error_one/2
+ * error_ab = 1 / error_one (rounds up to 1 if error != 0, or 0 * otherwise)
+ * fixed_error = error_one/2
+ *
+ * error += error_a + fixed_error + (error ? 1 : 0)
+ *
+ * (this isn't entirely clear, but that is as close as we get).
+ */
+ error += error_one + (error ? 1 : 0);
+
+ o_exp = v.e;
+ v = grisu3_diy_fp_normalize(v);
+ /* Again, if we shift the significant bits, the error moves along. */
+ error <<= o_exp - v.e;
+
+ /*
+ * The value `v` is bounded by 2^mag which is 64 + v.e. because we
+ * just normalized it by shifting towards msb.
+ */
+ mag = diy_size + v.e;
+
+ /* The effective magnitude of the IEEE double representation. */
+ mag = mag >= diy_size + denorm_exp ? diy_size : mag <= denorm_exp ? 0 : mag - denorm_exp;
+ prec = diy_size - mag;
+ if (prec + log2_error_one >= diy_size) {
+ int e_scale = prec + log2_error_one - diy_size - 1;
+ v.f >>= e_scale;
+ v.e += e_scale;
+ error = (error >> e_scale) + 1 + error_one;
+ prec -= e_scale;
+ }
+ rounded.f = v.f >> prec;
+ rounded.e = v.e + prec;
+ prec_bits = (int)(v.f & ((uint64_t)1 << (prec - 1))) * error_one;
+ half_way = (int)((uint64_t)1 << (prec - 1)) * error_one;
+ if (prec >= half_way + error) {
+ rounded.f++;
+ /* Prevent overflow. */
+ if (rounded.f & (hidden_bit << 1)) {
+ rounded.f >>= 1;
+ rounded.e += 1;
+ }
+ }
+ *result = grisu3_cast_double_from_diy_fp(rounded);
+ return half_way - error >= prec_bits || prec_bits >= half_way + error;
+}
+
+/*
+ * `end` is unchanged if number is handled natively, or it is the result
+ * of strtod parsing in case of fallback.
+ */
+static const char *grisu3_encode_double(const char *buf, const char *end, int sign, uint64_t fraction, int exponent, int fraction_exp, int ulp_half_error, double *result)
+{
+ const int max_d_exp = GRISU3_D64_MAX_DEC_EXP;
+ const int min_d_exp = GRISU3_D64_MIN_DEC_EXP;
+
+ char *v_end;
+
+ /* Both for user experience, and to protect internal power table lookups. */
+ if (fraction == 0 || exponent < min_d_exp) {
+ *result = 0.0;
+ goto done;
+ }
+ if (exponent - 1 > max_d_exp) {
+ *result = grisu3_double_infinity;
+ goto done;
+ }
+
+ /*
+ * `exponent` is the normalized value, fraction_exp is the size of
+ * the representation in the `fraction value`, or one less than
+ * number of significant digits.
+ *
+ * If the final value can be kept in 53 bits and we can avoid
+ * division, then we can convert to double quite fast.
+ *
+ * ulf_half_error only happens when fraction is maxed out, so
+ * fraction_exp > 22 by definition.
+ *
+ * fraction_exp >= 0 always.
+ *
+ * http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ */
+
+
+#ifdef GRISU3_PARSE_FAST_CASE
+ if (fraction < (1ULL << 53) && exponent >= 0 && exponent <= 22) {
+ double v = (double)fraction;
+ /* Multiplying by 1e-k instead of dividing by 1ek results in rounding error. */
+ switch (exponent - fraction_exp) {
+ case -22: v /= 1e22; break;
+ case -21: v /= 1e21; break;
+ case -20: v /= 1e20; break;
+ case -19: v /= 1e19; break;
+ case -18: v /= 1e18; break;
+ case -17: v /= 1e17; break;
+ case -16: v /= 1e16; break;
+ case -15: v /= 1e15; break;
+ case -14: v /= 1e14; break;
+ case -13: v /= 1e13; break;
+ case -12: v /= 1e12; break;
+ case -11: v /= 1e11; break;
+ case -10: v /= 1e10; break;
+ case -9: v /= 1e9; break;
+ case -8: v /= 1e8; break;
+ case -7: v /= 1e7; break;
+ case -6: v /= 1e6; break;
+ case -5: v /= 1e5; break;
+ case -4: v /= 1e4; break;
+ case -3: v /= 1e3; break;
+ case -2: v /= 1e2; break;
+ case -1: v /= 1e1; break;
+ case 0: break;
+ case 1: v *= 1e1; break;
+ case 2: v *= 1e2; break;
+ case 3: v *= 1e3; break;
+ case 4: v *= 1e4; break;
+ case 5: v *= 1e5; break;
+ case 6: v *= 1e6; break;
+ case 7: v *= 1e7; break;
+ case 8: v *= 1e8; break;
+ case 9: v *= 1e9; break;
+ case 10: v *= 1e10; break;
+ case 11: v *= 1e11; break;
+ case 12: v *= 1e12; break;
+ case 13: v *= 1e13; break;
+ case 14: v *= 1e14; break;
+ case 15: v *= 1e15; break;
+ case 16: v *= 1e16; break;
+ case 17: v *= 1e17; break;
+ case 18: v *= 1e18; break;
+ case 19: v *= 1e19; break;
+ case 20: v *= 1e20; break;
+ case 21: v *= 1e21; break;
+ case 22: v *= 1e22; break;
+ }
+ *result = v;
+ goto done;
+ }
+#endif
+
+ if (grisu3_diy_fp_encode_double(fraction, exponent, fraction_exp, ulp_half_error, result)) {
+ goto done;
+ }
+#ifdef GRISU3_PARSE_ALLOW_ERROR
+ goto done;
+#endif
+ *result = strtod(buf, &v_end);
+ if (v_end < end) {
+ return v_end;
+ }
+ return end;
+done:
+ if (sign) {
+ *result = -*result;
+ }
+ return end;
+}
+
+/*
+ * Returns buf if number wasn't matched, or null if number starts ok
+ * but contains invalid content.
+ */
+static const char *grisu3_parse_hex_fp(const char *buf, const char *end, int sign, double *result)
+{
+ (void)buf;
+ (void)end;
+ (void)sign;
+ *result = 0.0;
+ /* Not currently supported. */
+ return buf;
+}
+
+/*
+ * Returns end pointer on success, or null, or buf if start is not a number.
+ * Sets result to 0.0 on error.
+ * Reads up to len + 1 bytes from buffer where len + 1 must not be a
+ * valid part of a number, but all of buf, buf + len need not be a
+ * number. Leading whitespace is NOT valid.
+ * Very small numbers are truncated to +/-0.0 and numerically very large
+ * numbers are returns as +/-infinity.
+ *
+ * A value must not end or begin with '.' (like JSON), but can have
+ * leading zeroes (unlike JSON). A single leading zero followed by
+ * an encoding symbol may or may not be interpreted as a non-decimal
+ * encoding prefix, e.g. 0x, but a leading zero followed by a digit is
+ * NOT interpreted as octal.
+ * A single leading negative sign may appear before digits, but positive
+ * sign is not allowed and space after the sign is not allowed.
+ * At most the first 1000 characters of the input is considered.
+ */
+static const char *grisu3_parse_double(const char *buf, size_t len, double *result)
+{
+ const char *mark, *k, *end;
+ int sign = 0, esign = 0;
+ uint64_t fraction = 0;
+ int exponent = 0;
+ int ee = 0;
+ int fraction_exp = 0;
+ int ulp_half_error = 0;
+
+ *result = 0.0;
+
+ end = buf + len + 1;
+
+ /* Failsafe for exponent overflow. */
+ if (len > GRISU3_NUM_MAX_LEN) {
+ end = buf + GRISU3_NUM_MAX_LEN + 1;
+ }
+
+ if (buf == end) {
+ return buf;
+ }
+ mark = buf;
+ if (*buf == '-') {
+ ++buf;
+ sign = 1;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf == '0') {
+ ++buf;
+ /* | 0x20 is lower case ASCII. */
+ if (buf != end && (*buf | 0x20) == 'x') {
+ k = grisu3_parse_hex_fp(buf, end, sign, result);
+ if (k == buf) {
+ return mark;
+ }
+ return k;
+ }
+ /* Not worthwhile, except for getting the scale of integer part. */
+ while (buf != end && *buf == '0') {
+ ++buf;
+ }
+ } else {
+ if (*buf < '1' || *buf > '9') {
+ /*
+ * If we didn't see a sign, just don't recognize it as
+ * number, otherwise make it an error.
+ */
+ return sign ? 0 : mark;
+ }
+ fraction = (uint64_t)(*buf++ - '0');
+ }
+ k = buf;
+ /*
+ * We do not catch trailing zeroes when there is no decimal point.
+ * This misses an opportunity for moving the exponent down into the
+ * fast case. But it is unlikely to be worthwhile as it complicates
+ * parsing.
+ */
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ }
+ fraction_exp = (int)(buf - k);
+ /* Skip surplus digits. Trailing zero does not introduce error. */
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++exponent;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++exponent;
+ ++buf;
+ }
+ }
+ if (buf != end && *buf == '.') {
+ ++buf;
+ k = buf;
+ if (*buf < '0' || *buf > '9') {
+ /* We don't accept numbers without leading or trailing digit. */
+ return 0;
+ }
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ if (fraction >= UINT64_MAX / 10) {
+ if (!ulp_half_error) {
+ fraction += *buf >= '5';
+ ulp_half_error = 1;
+ }
+ break;
+ }
+ fraction = fraction * 10 + (uint64_t)(*buf++ - '0');
+ --exponent;
+ }
+ fraction_exp += (int)(buf - k);
+ while (buf != end && *buf == '0') {
+ ++exponent;
+ ++buf;
+ }
+ if (buf != end && *buf >= '1' && *buf <= '9') {
+ ulp_half_error = 1;
+ ++buf;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ ++buf;
+ }
+ }
+ }
+ /*
+ * Normalized exponent e.g: 1.23434e3 with fraction = 123434,
+ * fraction_exp = 5, exponent = 3.
+ * So value = fraction * 10^(exponent - fraction_exp)
+ */
+ exponent += fraction_exp;
+ if (buf != end && (*buf | 0x20) == 'e') {
+ if (end - buf < 2) {
+ return 0;
+ }
+ ++buf;
+ if (*buf == '+') {
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ } else if (*buf == '-') {
+ esign = 1;
+ ++buf;
+ if (buf == end) {
+ return 0;
+ }
+ }
+ if (*buf < '0' || *buf > '9') {
+ return 0;
+ }
+ ee = *buf++ - '0';
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ /*
+ * This test impacts performance and we do not need an
+ * exact value just one large enough to dominate the fraction_exp.
+ * Subsequent handling maps large absolute ee to 0 or infinity.
+ */
+ if (ee <= 0x7fff) {
+ ee = ee * 10 + *buf - '0';
+ }
+ ++buf;
+ }
+ }
+ exponent = exponent + (esign ? -ee : ee);
+
+ /*
+ * Exponent is now a base 10 normalized exponent so the absolute value
+ * is less the 10^(exponent + 1) for positive exponents. For
+ * denormalized doubles (using 11 bit exponent 0 with a fraction
+ * shiftet down, extra small numbers can be achieved.
+ *
+ * https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+ *
+ * 10^-324 holds the smallest normalized exponent (but not value) and
+ * 10^308 holds the largest exponent. Internally our lookup table is
+ * only safe to use within a range slightly larger than this.
+ * Externally, a slightly larger/smaller value represents NaNs which
+ * are technically also possible to store as a number.
+ *
+ */
+
+ /* This also protects strod fallback parsing. */
+ if (buf == end) {
+ return 0;
+ }
+ return grisu3_encode_double(mark, buf, sign, fraction, exponent, fraction_exp, ulp_half_error, result);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PARSE_H */
diff --git a/nostrdb/flatcc/portable/grisu3_print.h b/nostrdb/flatcc/portable/grisu3_print.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ * Copyright author of MathGeoLib (https://github.com/juj)
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+/*
+ * Extracted from MathGeoLib.
+ *
+ * mikkelfj:
+ * - Fixed final output when printing single digit negative exponent to
+ * have leading zero (important for JSON).
+ * - Changed formatting to prefer 0.012 over 1.2-e-2.
+ *
+ * Large portions of the original grisu3.c file has been moved to
+ * grisu3_math.h, the rest is placed here.
+ *
+ * See also comments in grisu3_math.h.
+ *
+ * MatGeoLib grisu3.c comment:
+ *
+ * This file is part of an implementation of the "grisu3" double to string
+ * conversion algorithm described in the research paper
+ *
+ * "Printing Floating-Point Numbers Quickly And Accurately with Integers"
+ * by Florian Loitsch, available at
+ * http://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf
+ */
+
+#ifndef GRISU3_PRINT_H
+#define GRISU3_PRINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h> /* sprintf, only needed for fallback printing */
+#include <assert.h> /* assert */
+
+#include "grisu3_math.h"
+
+/*
+ * The lightweight "portable" C library recognizes grisu3 support if
+ * included first.
+ */
+#define grisu3_print_double_is_defined 1
+
+/*
+ * Not sure we have an exact definition, but we get up to 23
+ * emperically. There is some math ensuring it does not go awol though,
+ * like 18 digits + exponent or so.
+ * This max should be safe size buffer for printing, including zero term.
+ */
+#define GRISU3_PRINT_MAX 30
+
+static int grisu3_round_weed(char *buffer, int len, uint64_t wp_W, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t ulp)
+{
+ uint64_t wp_Wup = wp_W - ulp;
+ uint64_t wp_Wdown = wp_W + ulp;
+ while(rest < wp_Wup && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wup || wp_Wup - rest >= rest + ten_kappa - wp_Wup))
+ {
+ --buffer[len-1];
+ rest += ten_kappa;
+ }
+ if (rest < wp_Wdown && delta - rest >= ten_kappa
+ && (rest + ten_kappa < wp_Wdown || wp_Wdown - rest > rest + ten_kappa - wp_Wdown))
+ return 0;
+
+ return 2*ulp <= rest && rest <= delta - 4*ulp;
+}
+
+static int grisu3_digit_gen(grisu3_diy_fp_t low, grisu3_diy_fp_t w, grisu3_diy_fp_t high, char *buffer, int *length, int *kappa)
+{
+ uint64_t unit = 1;
+ grisu3_diy_fp_t too_low = { low.f - unit, low.e };
+ grisu3_diy_fp_t too_high = { high.f + unit, high.e };
+ grisu3_diy_fp_t unsafe_interval = grisu3_diy_fp_minus(too_high, too_low);
+ grisu3_diy_fp_t one = { 1ULL << -w.e, w.e };
+ uint32_t p1 = (uint32_t)(too_high.f >> -one.e);
+ uint64_t p2 = too_high.f & (one.f - 1);
+ uint32_t div;
+ *kappa = grisu3_largest_pow10(p1, GRISU3_DIY_FP_FRACT_SIZE + one.e, &div);
+ *length = 0;
+
+ while(*kappa > 0)
+ {
+ uint64_t rest;
+ char digit = (char)(p1 / div);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p1 %= div;
+ --*kappa;
+ rest = ((uint64_t)p1 << -one.e) + p2;
+ if (rest < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f, unsafe_interval.f, rest, (uint64_t)div << -one.e, unit);
+ div /= 10;
+ }
+
+ for(;;)
+ {
+ char digit;
+ p2 *= 10;
+ unit *= 10;
+ unsafe_interval.f *= 10;
+ /* Integer division by one. */
+ digit = (char)(p2 >> -one.e);
+ buffer[*length] = '0' + digit;
+ ++*length;
+ p2 &= one.f - 1; /* Modulo by one. */
+ --*kappa;
+ if (p2 < unsafe_interval.f) return grisu3_round_weed(buffer, *length, grisu3_diy_fp_minus(too_high, w).f * unit, unsafe_interval.f, p2, one.f, unit);
+ }
+}
+
+static int grisu3(double v, char *buffer, int *length, int *d_exp)
+{
+ int mk, kappa, success;
+ grisu3_diy_fp_t dfp = grisu3_cast_diy_fp_from_double(v);
+ grisu3_diy_fp_t w = grisu3_diy_fp_normalize(dfp);
+
+ /* normalize boundaries */
+ grisu3_diy_fp_t t = { (dfp.f << 1) + 1, dfp.e - 1 };
+ grisu3_diy_fp_t b_plus = grisu3_diy_fp_normalize(t);
+ grisu3_diy_fp_t b_minus;
+ grisu3_diy_fp_t c_mk; /* Cached power of ten: 10^-k */
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ assert(v > 0 && v <= 1.7976931348623157e308); /* Grisu only handles strictly positive finite numbers. */
+ if (!(u64 & GRISU3_D64_FRACT_MASK) && (u64 & GRISU3_D64_EXP_MASK) != 0) { b_minus.f = (dfp.f << 2) - 1; b_minus.e = dfp.e - 2;} /* lower boundary is closer? */
+ else { b_minus.f = (dfp.f << 1) - 1; b_minus.e = dfp.e - 1; }
+ b_minus.f = b_minus.f << (b_minus.e - b_plus.e);
+ b_minus.e = b_plus.e;
+
+ mk = grisu3_diy_fp_cached_pow(GRISU3_MIN_TARGET_EXP - GRISU3_DIY_FP_FRACT_SIZE - w.e, &c_mk);
+
+ w = grisu3_diy_fp_multiply(w, c_mk);
+ b_minus = grisu3_diy_fp_multiply(b_minus, c_mk);
+ b_plus = grisu3_diy_fp_multiply(b_plus, c_mk);
+
+ success = grisu3_digit_gen(b_minus, w, b_plus, buffer, length, &kappa);
+ *d_exp = kappa - mk;
+ return success;
+}
+
+static int grisu3_i_to_str(int val, char *str)
+{
+ int len, i;
+ char *s;
+ char *begin = str;
+ if (val < 0) { *str++ = '-'; val = -val; }
+ s = str;
+
+ for(;;)
+ {
+ int ni = val / 10;
+ int digit = val - ni*10;
+ *s++ = (char)('0' + digit);
+ if (ni == 0)
+ break;
+ val = ni;
+ }
+ *s = '\0';
+ len = (int)(s - str);
+ for(i = 0; i < len/2; ++i)
+ {
+ char ch = str[i];
+ str[i] = str[len-1-i];
+ str[len-1-i] = ch;
+ }
+
+ return (int)(s - begin);
+}
+
+static int grisu3_print_nan(uint64_t v, char *dst)
+{
+ static char hexdigits[16] = "0123456789ABCDEF";
+ int i = 0;
+
+ dst[0] = 'N';
+ dst[1] = 'a';
+ dst[2] = 'N';
+ dst[3] = '(';
+ dst[20] = ')';
+ dst[21] = '\0';
+ dst += 4;
+ for (i = 15; i >= 0; --i) {
+ dst[i] = hexdigits[v & 0x0F];
+ v >>= 4;
+ }
+ return 21;
+}
+
+static int grisu3_print_double(double v, char *dst)
+{
+ int d_exp, len, success, decimals, i;
+ uint64_t u64 = grisu3_cast_uint64_from_double(v);
+ char *s2 = dst;
+ assert(dst);
+
+ /* Prehandle NaNs */
+ if ((u64 << 1) > 0xFFE0000000000000ULL) return grisu3_print_nan(u64, dst);
+ /* Prehandle negative values. */
+ if ((u64 & GRISU3_D64_SIGN) != 0) { *s2++ = '-'; v = -v; u64 ^= GRISU3_D64_SIGN; }
+ /* Prehandle zero. */
+ if (!u64) { *s2++ = '0'; *s2 = '\0'; return (int)(s2 - dst); }
+ /* Prehandle infinity. */
+ if (u64 == GRISU3_D64_EXP_MASK) { *s2++ = 'i'; *s2++ = 'n'; *s2++ = 'f'; *s2 = '\0'; return (int)(s2 - dst); }
+
+ success = grisu3(v, s2, &len, &d_exp);
+ /* If grisu3 was not able to convert the number to a string, then use old sprintf (suboptimal). */
+ if (!success) return sprintf(s2, "%.17g", v) + (int)(s2 - dst);
+
+ /* We now have an integer string of form "151324135" and a base-10 exponent for that number. */
+ /* Next, decide the best presentation for that string by whether to use a decimal point, or the scientific exponent notation 'e'. */
+ /* We don't pick the absolute shortest representation, but pick a balance between readability and shortness, e.g. */
+ /* 1.545056189557677e-308 could be represented in a shorter form */
+ /* 1545056189557677e-323 but that would be somewhat unreadable. */
+ decimals = GRISU3_MIN(-d_exp, GRISU3_MAX(1, len-1));
+
+ /* mikkelfj:
+ * fix zero prefix .1 => 0.1, important for JSON export.
+ * prefer unscientific notation at same length:
+ * -1.2345e-4 over -1.00012345,
+ * -1.0012345 over -1.2345e-3
+ */
+ if (d_exp < 0 && (len + d_exp) > -3 && len <= -d_exp)
+ {
+ /* mikkelfj: fix zero prefix .1 => 0.1, and short exponents 1.3e-2 => 0.013. */
+ memmove(s2 + 2 - d_exp - len, s2, (size_t)len);
+ s2[0] = '0';
+ s2[1] = '.';
+ for (i = 2; i < 2-d_exp-len; ++i) s2[i] = '0';
+ len += i;
+ }
+ else if (d_exp < 0 && len > 1) /* Add decimal point? */
+ {
+ for(i = 0; i < decimals; ++i) s2[len-i] = s2[len-i-1];
+ s2[len++ - decimals] = '.';
+ d_exp += decimals;
+ /* Need scientific notation as well? */
+ if (d_exp != 0) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ }
+ /* Add scientific notation? */
+ else if (d_exp < 0 || d_exp > 2) { s2[len++] = 'e'; len += grisu3_i_to_str(d_exp, s2+len); }
+ /* Add zeroes instead of scientific notation? */
+ else if (d_exp > 0) { while(d_exp-- > 0) s2[len++] = '0'; }
+ s2[len] = '\0'; /* grisu3 doesn't null terminate, so ensure termination. */
+ return (int)(s2+len-dst);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRISU3_PRINT_H */
diff --git a/nostrdb/flatcc/portable/include/README b/nostrdb/flatcc/portable/include/README
@@ -0,0 +1,4 @@
+This directory holds subdirectories it can be added to the include path
+such that standard and OS specific header includes like <stdint.h>,
+<bool.h> and <endian.h> can succeed without explicitly including
+special headers explicitly.
diff --git a/nostrdb/flatcc/portable/include/linux/endian.h b/nostrdb/flatcc/portable/include/linux/endian.h
@@ -0,0 +1 @@
+#include "portable/pendian.h"
diff --git a/nostrdb/flatcc/portable/include/std/inttypes.h b/nostrdb/flatcc/portable/include/std/inttypes.h
@@ -0,0 +1 @@
+#include "portable/inttypes.h"
diff --git a/nostrdb/flatcc/portable/include/std/stdalign.h b/nostrdb/flatcc/portable/include/std/stdalign.h
@@ -0,0 +1 @@
+#include "portable/pstdalign.h"
diff --git a/nostrdb/flatcc/portable/include/std/stdbool.h b/nostrdb/flatcc/portable/include/std/stdbool.h
@@ -0,0 +1 @@
+#include "portable/pstdbool.h"
diff --git a/nostrdb/flatcc/portable/include/std/stdint.h b/nostrdb/flatcc/portable/include/std/stdint.h
@@ -0,0 +1 @@
+#include "portable/pstdint.h"
diff --git a/nostrdb/flatcc/portable/paligned_alloc.h b/nostrdb/flatcc/portable/paligned_alloc.h
@@ -0,0 +1,210 @@
+#ifndef PALIGNED_ALLOC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NOTE: MSVC in general has no aligned alloc function that is
+ * compatible with free and it is not trivial to implement a version
+ * which is. Therefore, to remain portable, end user code needs to
+ * use `aligned_free` which is not part of C11 but defined in this header.
+ *
+ * glibc only provides aligned_alloc when _ISOC11_SOURCE is defined, but
+ * MingW does not support aligned_alloc despite of this, it uses the
+ * the _aligned_malloc as MSVC.
+ *
+ * The same issue is present on some Unix systems not providing
+ * posix_memalign.
+ *
+ * Note that clang and gcc with -std=c11 or -std=c99 will not define
+ * _POSIX_C_SOURCE and thus posix_memalign cannot be detected but
+ * aligned_alloc is not necessarily available either. We assume
+ * that clang always has posix_memalign although it is not strictly
+ * correct. For gcc, use -std=gnu99 or -std=gnu11 or don't use -std in
+ * order to enable posix_memalign, or live with the fallback until using
+ * a system where glibc has a version that supports aligned_alloc.
+ *
+ * For C11 compliant compilers and compilers with posix_memalign,
+ * it is valid to use free instead of aligned_free with the above
+ * caveats.
+ */
+
+#include <stdlib.h>
+
+/*
+ * Define this to see which version is used so the fallback is not
+ * enganged unnecessarily:
+ *
+ * #define PORTABLE_DEBUG_ALIGNED_ALLOC
+ */
+
+#if 0
+#define PORTABLE_DEBUG_ALIGNED_ALLOC
+#endif
+
+#if !defined(PORTABLE_C11_ALIGNED_ALLOC)
+
+/*
+ * PORTABLE_C11_ALIGNED_ALLOC = 1
+ * indicates that the system has builtin aligned_alloc
+ * If it doesn't, the section after detection provides an implemention.
+ */
+#if defined (__MINGW32__)
+/* MingW does not provide aligned_alloc despite defining _ISOC11_SOURCE */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (_ISOC11_SOURCE)
+/* glibc aligned_alloc detection, but MingW is not truthful */
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#elif defined (__GLIBC__)
+/* aligned_alloc is not available in glibc just because __STDC_VERSION__ >= 201112L. */
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined (__clang__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif defined(__IBMC__)
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+#define PORTABLE_C11_ALIGNED_ALLOC 1
+#else
+#define PORTABLE_C11_ALIGNED_ALLOC 0
+#endif
+
+#endif /* PORTABLE_C11_ALIGNED_ALLOC */
+
+/* https://linux.die.net/man/3/posix_memalign */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_GNU_SOURCE)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_XOPEN_SOURCE)
+#if _XOPEN_SOURCE >= 600
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(_POSIX_C_SOURCE)
+#if _POSIX_C_SOURCE >= 200112L
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN) && defined(__clang__)
+#define PORTABLE_POSIX_MEMALIGN 1
+#endif
+
+#if !defined(PORTABLE_POSIX_MEMALIGN)
+#define PORTABLE_POSIX_MEMALIGN 0
+#endif
+
+/* https://forum.kde.org/viewtopic.php?p=66274 */
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+/* C11 or newer */
+#include <stdalign.h>
+#endif
+
+/* C11 or newer */
+#if !defined(aligned_alloc) && !defined(__aligned_alloc_is_defined)
+
+#if PORTABLE_C11_ALIGNED_ALLOC
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: C11_ALIGNED_ALLOC configured"
+#endif
+#elif defined(_MSC_VER) || defined(__MINGW32__)
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: Windows _aligned_malloc configured"
+#endif
+
+/* Aligned _aligned_malloc is not compatible with free. */
+#define aligned_alloc(alignment, size) _aligned_malloc(size, alignment)
+#define aligned_free(p) _aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#elif PORTABLE_POSIX_MEMALIGN
+
+#if defined(__GNUC__)
+#if !defined(__GNUCC__)
+extern int posix_memalign (void **, size_t, size_t);
+#elif __GNUCC__ < 5
+extern int posix_memalign (void **, size_t, size_t);
+#endif
+#endif
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ int err;
+ void *p = 0;
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ err = posix_memalign(&p, alignment, size);
+ if (err && p) {
+ free(p);
+ p = 0;
+ }
+ return p;
+}
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: POSIX_MEMALIGN configured"
+#endif
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#else
+
+static inline void *__portable_aligned_alloc(size_t alignment, size_t size)
+{
+ char *raw;
+ void *buf;
+ size_t total_size = (size + alignment - 1 + sizeof(void *));
+
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+ raw = (char *)(size_t)malloc(total_size);
+ buf = raw + alignment - 1 + sizeof(void *);
+ buf = (void *)(((size_t)buf) & ~(alignment - 1));
+ ((void **)buf)[-1] = raw;
+ return buf;
+}
+
+static inline void __portable_aligned_free(void *p)
+{
+ char *raw;
+
+ if (p) {
+ raw = (char*)((void **)p)[-1];
+ free(raw);
+ }
+}
+
+#define aligned_alloc(alignment, size) __portable_aligned_alloc(alignment, size)
+#define aligned_free(p) __portable_aligned_free(p)
+#define __aligned_alloc_is_defined 1
+#define __aligned_free_is_defined 1
+
+#ifdef PORTABLE_DEBUG_ALIGNED_ALLOC
+#error "DEBUG: aligned_alloc malloc fallback configured"
+#endif
+
+#endif
+
+#endif /* aligned_alloc */
+
+#if !defined(aligned_free) && !defined(__aligned_free_is_defined)
+#define aligned_free(p) free(p)
+#define __aligned_free_is_defined 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PALIGNED_ALLOC_H */
diff --git a/nostrdb/flatcc/portable/pattributes.h b/nostrdb/flatcc/portable/pattributes.h
@@ -0,0 +1,84 @@
+
+/*
+ * C23 introduces an attribute syntax `[[<attribute>]]`. Prior to that
+ * other non-standard syntaxes such as `__attribute__((<attribute>))`
+ * and `__declspec(<attribute>)` have been supported by some compiler
+ * versions.
+ *
+ * See also:
+ * https://en.cppreference.com/w/c/language/attributes
+ *
+ * There is no portable way to use C23 attributes in older C standards
+ * so in order to use these portably, some macro name needs to be
+ * defined for each attribute that either maps to the older supported
+ * syntax, or ignores the attribute as appropriate.
+ *
+ * The Linux kernel defines certain attributes as macros, such as
+ * `fallthrough`. When adding attributes it seems reasonable to follow
+ * the Linux conventions in lack of any official standard. However, it
+ * is not the intention that this file should mirror the Linux
+ * attributes 1 to 1.
+ *
+ * See also:
+ * https://github.com/torvalds/linux/blob/master/include/linux/compiler_attributes.h
+ *
+ * There is a risk that exposed attribute names may lead to name
+ * conflicts. A conflicting name can be undefined and if necessary used
+ * using `pattribute(<attribute>)`. All attributes can be hidden by
+ * defining `PORTABLE_EXPOSE_ATTRIBUTES=0` in which case
+ * `pattribute(<attribute>)` can still be used and then if a specific
+ * attribute name still needs to be exposed, it can be defined manually
+ * like `#define fallthrough pattribute(fallthrough)`.
+ */
+
+
+#ifndef PATTRIBUTES_H
+#define PATTRIBUTES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_EXPOSE_ATTRIBUTES
+#define PORTABLE_EXPOSE_ATTRIBUTES 1
+#endif
+
+#ifdef __has_c_attribute
+# define PORTABLE_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define PORTABLE_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+#ifdef __has_attribute
+# define PORTABLE_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define PORTABLE_HAS_ATTRIBUTE(x) 0
+#endif
+
+
+/* https://en.cppreference.com/w/c/language/attributes/fallthrough */
+#if PORTABLE_HAS_C_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough [[__fallthrough__]]
+#elif PORTABLE_HAS_ATTRIBUTE(__fallthrough__)
+# define pattribute_fallthrough __attribute__((__fallthrough__))
+#else
+# define pattribute_fallthrough ((void)0)
+#endif
+
+
+#define pattribute(x) pattribute_##x
+
+#if PORTABLE_EXPOSE_ATTRIBUTES
+
+#ifndef fallthrough
+# define fallthrough pattribute(fallthrough)
+#endif
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PATTRIBUTES_H */
diff --git a/nostrdb/flatcc/portable/pbase64.h b/nostrdb/flatcc/portable/pbase64.h
@@ -0,0 +1,448 @@
+#ifndef PBASE64_H
+#define PBASE64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+
+/* Guarded to allow inclusion of pstdint.h first, if stdint.h is not supported. */
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define BASE64_EOK 0
+/* 0 or mure full blocks decoded, remaining content may be parsed with fresh buffer. */
+#define BASE64_EMORE 1
+/* The `src_len` argument is required when encoding. */
+#define BASE64_EARGS 2
+/* Unsupported mode, or modifier not supported by mode when encoding. */
+#define BASE64_EMODE 3
+/* Decoding ends at invalid tail length - either by source length or by non-alphabet symbol. */
+#define BASE64_ETAIL 4
+/* Decoding ends at valid tail length but last byte has non-zero bits where it shouldn't have. */
+#define BASE64_EDIRTY 5
+
+static inline const char *base64_strerror(int err);
+
+/* All codecs are URL safe. Only Crockford allow for non-canocical decoding. */
+enum {
+ /* Most common base64 codec, but not url friendly. */
+ base64_mode_rfc4648 = 0,
+
+ /* URL safe version, '+' -> '-', '/' -> '_'. */
+ base64_mode_url = 1,
+
+ /*
+ * Skip ' ', '\r', and '\n' - we do not allow tab because common
+ * uses of base64 such as PEM do not allow tab.
+ */
+ base64_dec_modifier_skipspace = 32,
+
+ /* Padding is excluded by default. Not allowed for zbase64. */
+ base64_enc_modifier_padding = 128,
+
+ /* For internal use or to decide codec of mode. */
+ base64_modifier_mask = 32 + 64 + 128,
+};
+
+/* Encoded size with or without padding. */
+static inline size_t base64_encoded_size(size_t len, int mode);
+
+/*
+ * Decoded size assuming no padding.
+ * If `len` does include padding, the actual size may be less
+ * when decoding, but never more.
+ */
+static inline size_t base64_decoded_size(size_t len);
+
+/*
+ * `dst` must hold ceil(len * 4 / 3) bytes.
+ * `src_len` points to length of source and is updated with length of
+ * parse on both success and failure. If `dst_len` is not null
+ * it is used to store resulting output lengt withh length of decoded
+ * output on both success and failure.
+ * If `hyphen` is non-zero a hyphen is encoded every `hyphen` output bytes.
+ * `mode` selects encoding alphabet defaulting to Crockfords base64.
+ * Returns 0 on success.
+ *
+ * A terminal space can be added with `dst[dst_len++] = ' '` after the
+ * encode call. All non-alphabet can be used as terminators except the
+ * padding character '='. The following characters will work as
+ * terminator for all modes: { '\0', '\n', ' ', '\t' }. A terminator is
+ * optional when the source length is given to the decoder. Note that
+ * crockford also reserves a few extra characters for checksum but the
+ * checksum must be separate from the main buffer and is not supported
+ * by this library.
+ */
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+/*
+ * Decodes according to mode while ignoring encoding modifiers.
+ * `src_len` and `dst_len` are optional pointers. If `src_len` is set it
+ * must contain the length of the input, otherwise the input must be
+ * terminated with a non-alphabet character or valid padding (a single
+ * padding character is accepted) - if the src_len output is needed but
+ * not the input due to guaranteed termination, then set it to
+ * (size_t)-1. `dst_len` must contain length of output buffer if present
+ * and parse will fail with BASE64_EMORE after decoding a block multiple
+ * if dst_len is exhausted - the parse can thus be resumed after
+ * draining destination. `src_len` and `dst_len` are updated with parsed
+ * and decoded length, when present, on both success and failure.
+ * Returns 0 on success. Invalid characters are not considered errors -
+ * they simply terminate the parse, however, if the termination is not
+ * at a block multiple or a valid partial block length then BASE64_ETAIL
+ * without output holding the last full block, if any. BASE64_ETAIL is also
+ * returned if the a valid length holds non-zero unused tail bits.
+ */
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode);
+
+static inline const char *base64_strerror(int err)
+{
+ switch (err) {
+ case BASE64_EOK: return "ok";
+ case BASE64_EARGS: return "invalid argument";
+ case BASE64_EMODE: return "invalid mode";
+ case BASE64_EMORE: return "destination full";
+ case BASE64_ETAIL: return "invalid tail length";
+ case BASE64_EDIRTY: return "invalid tail content";
+ default: return "unknown error";
+ }
+}
+
+static inline size_t base64_encoded_size(size_t len, int mode)
+{
+ size_t k = len % 3;
+ size_t n = (len * 4 / 3 + 3) & ~(size_t)3;
+ int pad = mode & base64_enc_modifier_padding;
+
+ if (!pad) {
+ switch (k) {
+ case 2:
+ n -= 1;
+ break;
+ case 1:
+ n -= 2;
+ break;
+ default:
+ break;
+ }
+ }
+ return n;
+}
+
+static inline size_t base64_decoded_size(size_t len)
+{
+ size_t k = len % 4;
+ size_t n = len / 4 * 3;
+
+ switch (k) {
+ case 3:
+ return n + 2;
+ case 2:
+ return n + 1;
+ case 1: /* Not valid without padding. */
+ case 0:
+ default:
+ return n;
+ }
+}
+
+static inline int base64_encode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ const uint8_t *rfc4648_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ const uint8_t *url_alphabet = (const uint8_t *)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+ const uint8_t *T;
+ uint8_t *dst_base = dst;
+ int pad = mode & base64_enc_modifier_padding;
+ size_t len = 0;
+ int ret = BASE64_EMODE;
+
+ if (!src_len) {
+ ret = BASE64_EARGS;
+ goto done;
+ }
+ len = *src_len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = rfc4648_alphabet;
+ break;
+ case base64_mode_url:
+ T = url_alphabet;
+ break;
+ default:
+ /* Invalid mode. */
+ goto done;
+ }
+
+ ret = BASE64_EOK;
+
+ /* Encodes 4 destination bytes from 3 source bytes. */
+ while (len >= 3) {
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c) | (src[2] >> 6)];
+ dst[3] = T[((src[2] & 0x3f))];
+ len -= 3;
+ dst += 4;
+ src += 3;
+ }
+ /* Encodes 8 destination bytes from 1 to 4 source bytes, if any. */
+ switch(len) {
+ case 2:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30) | (src[1] >> 4)];
+ dst[2] = T[((src[1] << 2) & 0x3c)];
+ dst += 3;
+ if (pad) {
+ *dst++ = '=';
+ }
+ break;
+ case 1:
+ dst[0] = T[((src[0] >> 2))];
+ dst[1] = T[((src[0] << 4) & 0x30)];
+ dst += 2;
+ if (pad) {
+ *dst++ = '=';
+ *dst++ = '=';
+ }
+ break;
+ default:
+ pad = 0;
+ break;
+ }
+ len = 0;
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= len;
+ }
+ return ret;
+}
+
+static inline int base64_decode(uint8_t *dst, const uint8_t *src, size_t *dst_len, size_t *src_len, int mode)
+{
+ static const uint8_t cinvalid = 64;
+ static const uint8_t cignore = 65;
+ static const uint8_t cpadding = 66;
+
+ /*
+ * 0..63: 6-bit encoded value.
+ * 64: flags non-alphabet symbols.
+ * 65: codes for ignored symbols.
+ * 66: codes for pad symbol '='.
+ * All codecs consider padding an optional terminator and if present
+ * consumes as many pad bytes as possible up to block termination,
+ * but does not fail if a block is not full.
+ *
+ * We do not currently have any ignored characters but we might
+ * add spaces as per MIME spec, but assuming spaces only happen
+ * at block boundaries this is probalby better handled by repeated
+ * parsing.
+ */
+ static const uint8_t base64rfc4648_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64rfc4648_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ static const uint8_t base64url_decode_skipspace[256] = {
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 64, 64, 65, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 65, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 66, 64, 64,
+ 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 63,
+ 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+ };
+
+ int ret = BASE64_EOK;
+ size_t i, k;
+ uint8_t hold[4];
+ uint8_t *dst_base = dst;
+ size_t limit = (size_t)-1;
+ size_t len = (size_t)-1, mark;
+ const uint8_t *T = base64rfc4648_decode;
+ int skipspace = mode & base64_dec_modifier_skipspace;
+
+ if (src_len) {
+ len = *src_len;
+ }
+ mark = len;
+ mode = mode & ~base64_modifier_mask;
+ switch (mode) {
+ case base64_mode_rfc4648:
+ T = skipspace ? base64rfc4648_decode_skipspace : base64rfc4648_decode;
+ break;
+ case base64_mode_url:
+ T = skipspace ? base64url_decode_skipspace : base64url_decode;
+ break;
+ default:
+ ret = BASE64_EMODE;
+ goto done;
+ }
+
+ if (dst_len && *dst_len > 0) {
+ limit = *dst_len;
+ }
+ while(limit > 0) {
+ for (i = 0; i < 4; ++i) {
+ if (len == i) {
+ k = i;
+ len -= i;
+ goto tail;
+ }
+ if ((hold[i] = T[src[i]]) >= cinvalid) {
+ if (hold[i] == cignore) {
+ ++src;
+ --len;
+ --i;
+ continue;
+ }
+ k = i;
+ /* Strip padding and ignore hyphen in padding, if present. */
+ if (hold[i] == cpadding) {
+ ++i;
+ while (i < len && i < 8) {
+ if (T[src[i]] != cpadding && T[src[i]] != cignore) {
+ break;
+ }
+ ++i;
+ }
+ }
+ len -= i;
+ goto tail;
+ }
+ }
+ if (limit < 3) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst[2] = (uint8_t)((hold[2] << 6) | (hold[3]));
+ dst += 3;
+ src += 4;
+ limit -= 3;
+ len -= 4;
+ mark = len;
+ }
+done:
+ if (dst_len) {
+ *dst_len = (size_t)(dst - dst_base);
+ }
+ if (src_len) {
+ *src_len -= mark;
+ }
+ return ret;
+
+tail:
+ switch (k) {
+ case 0:
+ break;
+ case 2:
+ if ((hold[1] << 4) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 1) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst += 1;
+ break;
+ case 3:
+ if ((hold[2] << 6) & 0xff) {
+ goto dirty;
+ }
+ if (limit < 2) {
+ goto more;
+ }
+ dst[0] = (uint8_t)((hold[0] << 2) | (hold[1] >> 4));
+ dst[1] = (uint8_t)((hold[1] << 4) | (hold[2] >> 2));
+ dst += 2;
+ break;
+ default:
+ ret = BASE64_ETAIL;
+ goto done;
+ }
+ mark = len;
+ goto done;
+dirty:
+ ret = BASE64_EDIRTY;
+ goto done;
+more:
+ ret = BASE64_EMORE;
+ goto done;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PBASE64_H */
diff --git a/nostrdb/flatcc/portable/pcrt.h b/nostrdb/flatcc/portable/pcrt.h
@@ -0,0 +1,48 @@
+#ifndef PCRT_H
+#define PCRT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Assertions and pointer violations in debug mode may trigger a dialog
+ * on Windows. When running headless this is not helpful, but
+ * unfortunately it cannot be disabled with a compiler option so code
+ * must be injected into the runtime early in the main function.
+ * A call to the provided `init_headless_crt()` macro does this in
+ * a portable manner.
+ *
+ * See also:
+ * https://stackoverflow.com/questions/13943665/how-can-i-disable-the-debug-assertion-dialog-on-windows
+ */
+
+#if defined(_WIN32)
+
+#include <crtdbg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static int _portable_msvc_headless_report_hook(int reportType, char *message, int *returnValue)
+{
+ fprintf(stderr, "CRT[%d]: %s\n", reportType, message);
+ *returnValue = 1;
+ exit(1);
+ return 1;
+}
+
+#define init_headless_crt() _CrtSetReportHook(_portable_msvc_headless_report_hook)
+
+#else
+
+#define init_headless_crt() ((void)0)
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PCRT_H */
diff --git a/nostrdb/flatcc/portable/pdiagnostic.h b/nostrdb/flatcc/portable/pdiagnostic.h
@@ -0,0 +1,85 @@
+ /* There is intentionally no include guard in this file. */
+
+
+/*
+ * Usage: optionally disable any of these before including.
+ *
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+ * #define PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+ * #define PDIAGNOSTIC_IGNORE_UNUSED // all of the above
+ *
+ * #include "pdiagnostic.h"
+ *
+ * Alternatively use #include "pdiagnostic_push/pop.h"
+ */
+
+#ifdef _MSC_VER
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_MSVC) && defined(_MSC_VER)
+#define PDIAGNOSTIC_AWARE_MSVC 1
+#elif !defined(PDIAGNOSTIC_AWARE_MSVC)
+#define PDIAGNOSTIC_AWARE_MSVC 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_CLANG) && defined(__clang__)
+#define PDIAGNOSTIC_AWARE_CLANG 1
+#elif !defined(PDIAGNOSTIC_AWARE_CLANG)
+#define PDIAGNOSTIC_AWARE_CLANG 0
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC) && defined(__GNUC__) && !defined(__clang__)
+/* Can disable some warnings even if push is not available (gcc-4.2 vs gcc-4.7) */
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
+#define PDIAGNOSTIC_AWARE_GCC 1
+#endif
+#endif
+
+#if !defined(PDIAGNOSTIC_AWARE_GCC)
+#define PDIAGNOSTIC_AWARE_GCC 0
+#endif
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-function"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_MSVC
+#pragma warning(disable: 4101) /* unused local variable */
+#elif PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-variable"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_VARIABLE
+
+#if defined(PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER) || defined(PDIAGNOSTIC_IGNORE_UNUSED)
+#if PDIAGNOSTIC_AWARE_CLANG
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#elif PDIAGNOSTIC_AWARE_GCC
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+#endif
+#undef PDIAGNOSTIC_IGNORE_UNUSED_PARAMETER
+
+#undef PDIAGNOSTIC_IGNORE_UNUSED
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+#if PDIAGNOSTIC_AWARE_CLANG
+/* Needed for < C++11 clang C++ static_assert */
+#pragma clang diagnostic ignored "-Wc11-extensions"
+/* Needed for empty macro arguments. */
+#pragma clang diagnostic ignored "-Wc99-extensions"
+/* Needed for trailing commas. */
+#pragma clang diagnostic ignored "-Wc++11-extensions"
+#endif
+#endif
+
diff --git a/nostrdb/flatcc/portable/pdiagnostic_pop.h b/nostrdb/flatcc/portable/pdiagnostic_pop.h
@@ -0,0 +1,20 @@
+#if defined(PDIAGNOSTIC_PUSHED_MSVC)
+#if PDIAGNOSTIC_PUSHED_MSVC
+#pragma warning( pop )
+#endif // PDIAGNOSTIC_PUSHED_MSVC
+#undef PDIAGNOSTIC_PUSHED_MSVC
+#endif // defined(PDIAGNOSTIC_PUSHED_MSVC)
+
+#if defined(PDIAGNOSTIC_PUSHED_CLANG)
+#if PDIAGNOSTIC_PUSHED_CLANG
+#pragma clang diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_CLANG
+#undef PDIAGNOSTIC_PUSHED_CLANG
+#endif // defined(PDIAGNOSTIC_PUSHED_CLANG)
+
+#if defined(PDIAGNOSTIC_PUSHED_GCC)
+#if PDIAGNOSTIC_PUSHED_GCC
+#pragma GCC diagnostic pop
+#endif // PDIAGNOSTIC_PUSHED_GCC
+#undef PDIAGNOSTIC_PUSHED_GCC
+#endif // defined(PDIAGNOSTIC_PUSHED_GCC)
diff --git a/nostrdb/flatcc/portable/pdiagnostic_push.h b/nostrdb/flatcc/portable/pdiagnostic_push.h
@@ -0,0 +1,51 @@
+/*
+ * See also comment in "pdiagnostic.h"
+ *
+ * e.g.
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic_push"
+ * ...
+ * #include "pdiagnostic_pop.h"
+ * <eof>
+ *
+ * or if push pop isn't desired:
+ * #define PDIAGNOSTIC_IGNORE_USED_FUNCTION
+ * #define PDIAGNOSTIC_IGNORE_USED_VARIABLE
+ * #include "pdiagnostic.h"
+ * ...
+ * <eof>
+ *
+ *
+ * Some if these warnings cannot be ignored
+ * at the #pragma level, but might in the future.
+ * Use compiler switches like -Wno-unused-function
+ * to work around this.
+ */
+
+#if defined(_MSC_VER)
+#pragma warning( push )
+#define PDIAGNOSTIC_PUSHED_MSVC 1
+#else
+#define PDIAGNOSTIC_PUSHED_MSVC 0
+#endif
+
+#if defined(__clang__)
+#pragma clang diagnostic push
+#define PDIAGNOSTIC_PUSHED_CLANG 1
+#else
+#define PDIAGNOSTIC_PUSHED_CLANG 0
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+#pragma GCC diagnostic push
+#define PDIAGNOSTIC_PUSHED_GCC 1
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // GNUC >= 4.6
+#else
+#define PDIAGNOSTIC_PUSHED_GCC 0
+#endif // defined(__GNUC__) && !defined(__clang__)
+
+#include "pdiagnostic.h"
diff --git a/nostrdb/flatcc/portable/pendian.h b/nostrdb/flatcc/portable/pendian.h
@@ -0,0 +1,206 @@
+#ifndef PENDIAN_H
+#define PENDIAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Defines platform optimized (as per linux <endian.h>
+ *
+ * le16toh, le32to, le64toh, be16toh, be32toh, be64toh
+ * htole16, htole32, htole64, htobe16, htobe32, htobe64
+ *
+ * Falls back to auto-detect endian conversion which is also fast
+ * if fast byteswap operation was detected.
+ *
+ * Also defines platform optimized:
+ *
+ * bswap16, bswap32, bswap64,
+ *
+ * with fall-back to shift-or implementation.
+ *
+ * For convenience also defines:
+ *
+ * le8to, be8toh, htole8, htobe8
+ * bswap8
+ *
+ * The convience functions makes is simpler to define conversion macros
+ * based on type size.
+ *
+ * NOTE: this implementation expects arguments with no side-effects and
+ * with appropriately sized unsigned arguments. These are expected to be
+ * used with typesafe wrappers.
+ */
+
+#ifndef UINT8_MAX
+#include "pstdint.h"
+#endif
+
+#if defined(__linux__)
+#include <endian.h>
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#include <sys/endian.h>
+#endif
+
+#include "pendian_detect.h"
+
+#if defined(_MSC_VER)
+#if _MSC_VER >= 1300
+#include <stdlib.h>
+#define bswap16 _byteswap_ushort
+#define bswap32 _byteswap_ulong
+#define bswap64 _byteswap_uint64
+#endif
+#elif defined(__clang__)
+#if __has_builtin(__builtin_bswap16)
+#ifndef bswap16
+#define bswap16 __builtin_bswap16
+#endif
+#endif
+#if __has_builtin(__builtin_bswap32)
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#endif
+#if __has_builtin(__builtin_bswap64)
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+#elif defined(__OpenBSD__) || defined(__FreeBSD__)
+#ifndef bswap16
+#define bswap16 swap16
+#endif
+#ifndef bswap32
+#define bswap32 swap32
+#endif
+#ifndef bswap64
+#define bswap64 swap64
+#endif
+#elif defined(__GNUC__) /* Supported since at least GCC 4.4 */
+#ifndef bswap32
+#define bswap32 __builtin_bswap32
+#endif
+#ifndef bswap64
+#define bswap64 __builtin_bswap64
+#endif
+#endif
+
+#ifndef bswap16
+#define bswap16(v) \
+ (((uint16_t)(v) << 8) | ((uint16_t)(v) >> 8))
+#endif
+
+#ifndef bswap32
+#define bswap32(v) \
+ ((((uint32_t)(v) << 24)) \
+ | (((uint32_t)(v) << 8) & UINT32_C(0x00FF0000)) \
+ | (((uint32_t)(v) >> 8) & UINT32_C(0x0000FF00)) \
+ | (((uint32_t)(v) >> 24)))
+#endif
+
+#ifndef bswap64
+#define bswap64(v) \
+ ((((uint64_t)(v) << 56)) \
+ | (((uint64_t)(v) << 40) & UINT64_C(0x00FF000000000000)) \
+ | (((uint64_t)(v) << 24) & UINT64_C(0x0000FF0000000000)) \
+ | (((uint64_t)(v) << 8) & UINT64_C(0x000000FF00000000)) \
+ | (((uint64_t)(v) >> 8) & UINT64_C(0x00000000FF000000)) \
+ | (((uint64_t)(v) >> 24) & UINT64_C(0x0000000000FF0000)) \
+ | (((uint64_t)(v) >> 40) & UINT64_C(0x000000000000FF00)) \
+ | (((uint64_t)(v) >> 56)))
+#endif
+
+#ifndef bswap8
+#define bswap8(v) ((uint8_t)(v))
+#endif
+
+#if !defined(le16toh) && defined(letoh16)
+#define le16toh letoh16
+#define le32toh letoh32
+#define le64toh letoh64
+#endif
+
+#if !defined(be16toh) && defined(betoh16)
+#define be16toh betoh16
+#define be32toh betoh32
+#define be64toh betoh64
+#endif
+
+/* Assume it goes for all. */
+#if !defined(le16toh)
+
+#if defined(__LITTLE_ENDIAN__)
+
+#define le16toh(v) (v)
+#define le32toh(v) (v)
+#define le64toh(v) (v)
+
+#define htole16(v) (v)
+#define htole32(v) (v)
+#define htole64(v) (v)
+
+#define be16toh(v) bswap16(v)
+#define be32toh(v) bswap32(v)
+#define be64toh(v) bswap64(v)
+
+#define htobe16(v) bswap16(v)
+#define htobe32(v) bswap32(v)
+#define htobe64(v) bswap64(v)
+
+#elif defined(__BIG_ENDIAN__)
+
+#define le16toh(v) bswap16(v)
+#define le32toh(v) bswap32(v)
+#define le64toh(v) bswap64(v)
+
+#define htole16(v) bswap16(v)
+#define htole32(v) bswap32(v)
+#define htole64(v) bswap64(v)
+
+#define be16toh(v) (v)
+#define be32toh(v) (v)
+#define be64toh(v) (v)
+
+#define htobe16(v) (v)
+#define htobe32(v) (v)
+#define htobe64(v) (v)
+
+#else
+
+static const int __pendian_test = 1;
+
+#define le16toh(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define le32toh(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define le64toh(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define htole16(v) (*(char *)&__pendian_test ? (v) : bswap16(v))
+#define htole32(v) (*(char *)&__pendian_test ? (v) : bswap32(v))
+#define htole64(v) (*(char *)&__pendian_test ? (v) : bswap64(v))
+
+#define be16toh(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define be32toh(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define be64toh(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#define htobe16(v) (*(char *)&__pendian_test ? bswap16(v) : (v))
+#define htobe32(v) (*(char *)&__pendian_test ? bswap32(v) : (v))
+#define htobe64(v) (*(char *)&__pendian_test ? bswap64(v) : (v))
+
+#endif
+
+#endif /* le16toh */
+
+/* Helpers not part of Linux <endian.h> */
+#if !defined(le8toh)
+#define le8toh(n) (n)
+#define htole8(n) (n)
+#define be8toh(n) (n)
+#define htobe8(n) (n)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_H */
diff --git a/nostrdb/flatcc/portable/pendian_detect.h b/nostrdb/flatcc/portable/pendian_detect.h
@@ -0,0 +1,118 @@
+/*
+ * Uses various known flags to decide endianness and defines:
+ *
+ * __LITTLE_ENDIAN__ or __BIG_ENDIAN__ if not already defined
+ *
+ * and also defines
+ *
+ * __BYTE_ORDER__ to either __ORDER_LITTLE_ENDIAN__ or
+ * __ORDER_BIG_ENDIAN__ if not already defined
+ *
+ * If none of these could be set, __UNKNOWN_ENDIAN__ is defined,
+ * which is not a known flag. If __BYTE_ORDER__ is defined but
+ * not big or little endian, __UNKNOWN_ENDIAN__ is also defined.
+ *
+ * Note: Some systems define __BYTE_ORDER without __ at the end
+ * - this will be mapped to to __BYTE_ORDER__.
+ */
+
+#ifndef PENDIAN_DETECT
+#define PENDIAN_DETECT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __ORDER_LITTLE_ENDIAN__
+#define __ORDER_LITTLE_ENDIAN__ 1234
+#endif
+
+#ifndef __ORDER_BIG_ENDIAN__
+#define __ORDER_BIG_ENDIAN__ 4321
+#endif
+
+#ifdef __BYTE_ORDER__
+
+#if defined(__LITTLE_ENDIAN__) && __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error __LITTLE_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#if defined(__BIG_ENDIAN__) && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#error __BIG_ENDIAN__ inconsistent with __BYTE_ORDER__
+#endif
+
+#else /* __BYTE_ORDER__ */
+
+
+#if \
+ defined(__LITTLE_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_LITTLE_ENDIAN) || \
+ defined(__ARMEL__) || defined(__THUMBEL__) || \
+ defined(__AARCH64EL__) || \
+ (defined(_MSC_VER) && defined(_M_ARM)) || \
+ defined(_MIPSEL) || defined(__MIPSEL) || defined(__MIPSEL__) || \
+ defined(_M_X64) || defined(_M_IX86) || defined(_M_I86) || \
+ defined(__i386__) || defined(__alpha__) || \
+ defined(__ia64) || defined(__ia64__) || \
+ defined(_M_IA64) || defined(_M_ALPHA) || \
+ defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(__bfin__)
+
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+
+#endif
+
+#if \
+ defined (__BIG_ENDIAN__) || \
+ (defined(__BYTE_ORDER) && __BYTE_ORDER == __ORDER_BIG_ENDIAN) || \
+ defined(__ARMEB__) || defined(THUMBEB__) || defined (__AARCH64EB__) || \
+ defined(_MIPSEB) || defined(__MIPSEB) || defined(__MIPSEB__) || \
+ defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || \
+ defined(__hpux) || defined(__hppa) || defined(__s390__)
+
+#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
+
+#endif
+
+#endif /* __BYTE_ORDER__ */
+
+#ifdef __BYTE_ORDER__
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+#ifndef __LITTLE_ENDIAN__
+#define __LITTLE_ENDIAN__ 1
+#endif
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#ifndef __BIG_ENDIAN__
+#define __BIG_ENDIAN__ 1
+#endif
+
+#else
+
+/*
+ * Custom extension - we only define __BYTE_ORDER__ if known big or little.
+ * User code that understands __BYTE_ORDER__ may also assume unkown if
+ * it is not defined by now - this will allow other endian formats than
+ * big or little when supported by compiler.
+ */
+#ifndef __UNKNOWN_ENDIAN__
+#define __UNKNOWN_ENDIAN__ 1
+#endif
+
+#endif
+#endif /* __BYTE_ORDER__ */
+
+#if defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__)
+#error conflicting definitions of __LITTLE_ENDIAN__ and __BIG_ENDIAN__
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PENDIAN_DETECT */
diff --git a/nostrdb/flatcc/portable/pinline.h b/nostrdb/flatcc/portable/pinline.h
@@ -0,0 +1,19 @@
+#ifndef PINLINE_H
+#define PINLINE_H
+
+#ifndef __cplusplus
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#elif _MSC_VER >= 1500 /* MSVC 9 or newer */
+#undef inline
+#define inline __inline
+#elif __GNUC__ >= 3 /* GCC 3 or newer */
+#define inline __inline
+#else /* Unknown or ancient */
+#define inline
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* PINLINE_H */
diff --git a/nostrdb/flatcc/portable/pinttypes.h b/nostrdb/flatcc/portable/pinttypes.h
@@ -0,0 +1,52 @@
+#ifndef PINTTYPES_H
+#define PINTTYPES_H
+
+#ifndef PRId16
+
+#if (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+#include <inttypes.h>
+#else
+
+/*
+ * This is not a complete implementation of <inttypes.h>, just the most
+ * useful printf modifiers.
+ */
+
+#include "pstdint.h"
+
+#ifndef PRINTF_INT64_MODIFIER
+#error "please define PRINTF_INT64_MODIFIER"
+#endif
+
+#ifndef PRId64
+#define PRId64 PRINTF_INT64_MODIFIER "d"
+#define PRIu64 PRINTF_INT64_MODIFIER "u"
+#define PRIx64 PRINTF_INT64_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT32_MODIFIER
+#define PRINTF_INT32_MODIFIER "l"
+#endif
+
+#ifndef PRId32
+#define PRId32 PRINTF_INT32_MODIFIER "d"
+#define PRIu32 PRINTF_INT32_MODIFIER "u"
+#define PRIx32 PRINTF_INT32_MODIFIER "x"
+#endif
+
+#ifndef PRINTF_INT16_MODIFIER
+#define PRINTF_INT16_MODIFIER "h"
+#endif
+
+#ifndef PRId16
+#define PRId16 PRINTF_INT16_MODIFIER "d"
+#define PRIu16 PRINTF_INT16_MODIFIER "u"
+#define PRIx16 PRINTF_INT16_MODIFIER "x"
+#endif
+
+# endif /* __STDC__ */
+
+#endif /* PRId16 */
+
+#endif /* PINTTYPES */
diff --git a/nostrdb/flatcc/portable/portable.h b/nostrdb/flatcc/portable/portable.h
@@ -0,0 +1,2 @@
+/* portable.h is widely used, so we redirect to a less conflicting name. */
+#include "portable_basic.h"
diff --git a/nostrdb/flatcc/portable/portable_basic.h b/nostrdb/flatcc/portable/portable_basic.h
@@ -0,0 +1,25 @@
+#ifndef PORTABLE_BASIC_H
+#define PORTABLE_BASIC_H
+
+/*
+ * Basic features need to make compilers support the most common moden C
+ * features, and endian / unligned read support as well.
+ *
+ * It is not assumed that this file is always included.
+ * Other include files are independent or include what they need.
+ */
+
+#include "pversion.h"
+#include "pwarnings.h"
+
+/* Featutures that ought to be supported by C11, but some aren't. */
+#include "pinttypes.h"
+#include "pstdalign.h"
+#include "pinline.h"
+#include "pstatic_assert.h"
+
+/* These are not supported by C11 and are general platform abstractions. */
+#include "pendian.h"
+#include "punaligned.h"
+
+#endif /* PORTABLE_BASIC_H */
diff --git a/nostrdb/flatcc/portable/pparsefp.h b/nostrdb/flatcc/portable/pparsefp.h
@@ -0,0 +1,140 @@
+#ifndef PPARSEFP_H
+#define PPARSEFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Parses a float or double number and returns the length parsed if
+ * successful. The length argument is of limited value due to dependency
+ * on `strtod` - buf[len] must be accessible and must not be part of
+ * a valid number, including hex float numbers..
+ *
+ * Unlike strtod, whitespace is not parsed.
+ *
+ * May return:
+ * - null on error,
+ * - buffer start if first character does not start a number,
+ * - or end of parse on success.
+ *
+ */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+/*
+ * isinf is needed in order to stay compatible with strtod's
+ * over/underflow handling but isinf has some portability issues.
+ *
+ * Use the parse_double/float_is_range_error instead of isinf directly.
+ * This ensures optimizations can be added when not using strtod.
+ *
+ * On gcc, clang and msvc we can use isinf or equivalent directly.
+ * Other compilers such as xlc may require linking with -lm which may not
+ * be convienent so a default isinf is provided. If isinf is available
+ * and there is a noticable performance issue, define
+ * `PORTABLE_USE_ISINF`.
+ */
+#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || defined(PORTABLE_USE_ISINF)
+#include <math.h>
+#if defined(_MSC_VER) && !defined(isinf)
+#include <float.h>
+#define isnan _isnan
+#define isinf(x) (!_finite(x))
+#endif
+/*
+ * clang-5 through clang-8 but not clang-9 issues incorrect precision
+ * loss warning with -Wconversion flag when cast is absent.
+ */
+#if defined(__clang__)
+#if __clang_major__ >= 5 && __clang_major__ <= 8
+#define parse_double_isinf(x) isinf((float)x)
+#endif
+#endif
+#if !defined(parse_double_isinf)
+#define parse_double_isinf isinf
+#endif
+#define parse_float_isinf isinf
+
+#else
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+/* Avoid linking with libmath but depends on float/double being IEEE754 */
+static inline int parse_double_isinf(double x)
+{
+ union { uint64_t u64; double f64; } v;
+ v.f64 = x;
+ return (v.u64 & 0x7fffffff00000000ULL) == 0x7ff0000000000000ULL;
+}
+
+static inline int parse_float_isinf(float x)
+{
+ union { uint32_t u32; float f32; } v;
+ v.f32 = x;
+ return (v.u32 & 0x7fffffff) == 0x7f800000;
+}
+#endif
+
+/* Returns 0 when in range, 1 on overflow, and -1 on underflow. */
+static inline int parse_double_is_range_error(double x)
+{
+ return parse_double_isinf(x) ? (x < 0.0 ? -1 : 1) : 0;
+}
+
+static inline int parse_float_is_range_error(float x)
+{
+ return parse_float_isinf(x) ? (x < 0.0f ? -1 : 1) : 0;
+}
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_parse.h"
+#endif
+
+#ifdef grisu3_parse_double_is_defined
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ return grisu3_parse_double(buf, len, result);
+}
+#else
+#include <stdio.h>
+static inline const char *parse_double(const char *buf, size_t len, double *result)
+{
+ char *end;
+
+ (void)len;
+ *result = strtod(buf, &end);
+ return end;
+}
+#endif
+
+static inline const char *parse_float(const char *buf, size_t len, float *result)
+{
+ const char *end;
+ double v;
+ union { uint32_t u32; float f32; } inf;
+ inf.u32 = 0x7f800000;
+
+ end = parse_double(buf, len, &v);
+ *result = (float)v;
+ if (parse_float_isinf(*result)) {
+ *result = v < 0 ? -inf.f32 : inf.f32;
+ return buf;
+ }
+ return end;
+}
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEFP_H */
diff --git a/nostrdb/flatcc/portable/pparseint.h b/nostrdb/flatcc/portable/pparseint.h
@@ -0,0 +1,374 @@
+#ifndef PPARSEINT_H
+#define PPARSEINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Type specific integer parsers:
+ *
+ * const char *
+ * parse_<type-name>(const char *buf, size_t len, <type> *value, int *status);
+ *
+ * parse_uint64, parse_int64
+ * parse_uint32, parse_int32
+ * parse_uint16, parse_int16
+ * parse_uint8, parse_int8
+ * parse_ushort, parse_short
+ * parse_uint, parse_int
+ * parse_ulong, parse_long
+ *
+ * Leading space must be stripped in advance. Status argument can be
+ * null.
+ *
+ * Returns pointer to end of match and a non-negative status code
+ * on succcess (0 for unsigned, 1 for signed):
+ *
+ * PARSE_INTEGER_UNSIGNED
+ * PARSE_INTEGER_SIGNED
+ *
+ * Returns null with a negative status code and unmodified value on
+ * invalid integer formats:
+ *
+ * PARSE_INTEGER_OVERFLOW
+ * PARSE_INTEGER_UNDERFLOW
+ * PARSE_INTEGER_INVALID
+ *
+ * Returns input buffer with negative status code and unmodified value
+ * if first character does not start an integer (not a sign or a digit).
+ *
+ * PARSE_INTEGER_UNMATCHED
+ * PARSE_INTEGER_END
+ *
+ * The signed parsers only works with two's complement architectures.
+ *
+ * Note: the corresponding parse_float and parse_double parsers do not
+ * have a status argument because +/-Inf and NaN are conventionally used
+ * for this.
+ */
+
+#include "limits.h"
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#define PARSE_INTEGER_UNSIGNED 0
+#define PARSE_INTEGER_SIGNED 1
+#define PARSE_INTEGER_OVERFLOW -1
+#define PARSE_INTEGER_UNDERFLOW -2
+#define PARSE_INTEGER_INVALID -3
+#define PARSE_INTEGER_UNMATCHED -4
+#define PARSE_INTEGER_END -5
+
+/*
+ * Generic integer parser that holds 64-bit unsigned values and stores
+ * sign separately. Leading space is not valid.
+ *
+ * Note: this function differs from the type specific parsers like
+ * parse_int64 by not negating the value when there is a sign. It
+ * differs from parse_uint64 by being able to return a negative
+ * UINT64_MAX successfully.
+ *
+ * This parser is used by all type specific integer parsers.
+ *
+ * Status argument can be null.
+ */
+static const char *parse_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x0, x = 0;
+ const char *k, *end = buf + len;
+ int sign, status_;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ k = buf;
+ sign = *buf == '-';
+ buf += sign;
+ while (buf != end && *buf >= '0' && *buf <= '9') {
+ x0 = x;
+ x = x * 10 + (uint64_t)(*buf - '0');
+ if (x0 > x) {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ if (buf == k + sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ if (buf != end)
+ switch (*buf) {
+ case 'e': case 'E': case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+/*
+ * Parse hex values like 0xff, -0xff, 0XdeAdBeaf42, cannot be trailed by '.', 'p', or 'P'.
+ * Overflows if string is more than 16 valid hex digits. Otherwise similar to parse_integer.
+ */
+static const char *parse_hex_integer(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ uint64_t x = 0;
+ const char *k, *k2, *end = buf + len;
+ int sign, status_;
+ unsigned char c;
+
+ if (!status) {
+ status = &status_;
+ }
+ if (buf == end) {
+ *status = PARSE_INTEGER_END;
+ return buf;
+ }
+ sign = *buf == '-';
+ buf += sign;
+ if (end - buf < 2 || buf[0] != '0' || (buf[1] | 0x20) != 'x') {
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf - sign;
+ }
+ buf += 2;
+ k = buf;
+ k2 = end;
+ if (end - buf > 16) {
+ k2 = buf + 16;
+ }
+ while (buf != k2) {
+ c = (unsigned char)*buf;
+ if (c >= '0' && c <= '9') {
+ x = x * 16 + c - '0';
+ } else {
+ /* Lower case. */
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ x = x * 16 + c - 'a' + 10;
+ } else {
+ break;
+ }
+ }
+ ++buf;
+ }
+ if (buf == k) {
+ if (sign) {
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ } else {
+ /* No number was matched, but it isn't an invalid number either. */
+ *status = PARSE_INTEGER_UNMATCHED;
+ return buf;
+ }
+ }
+ if (buf == end) {
+ goto done;
+ }
+ c = (unsigned char)*buf;
+ if (buf == k2) {
+ if (c >= '0' && c <= '9') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ c |= 0x20;
+ if (c >= 'a' && c <= 'f') {
+ *status = sign ? PARSE_INTEGER_UNDERFLOW : PARSE_INTEGER_OVERFLOW;
+ return 0;
+ }
+ }
+ switch (c) {
+ case '.': case 'p': case 'P':
+ *status = PARSE_INTEGER_INVALID;
+ return 0;
+ }
+done:
+ *value = x;
+ *status = sign;
+ return buf;
+}
+
+
+#define __portable_define_parse_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+#define __portable_define_parse_hex_unsigned(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+/* This assumes two's complement. */
+#define __portable_define_parse_hex_signed(NAME, TYPE, LIMIT) \
+static inline const char *parse_hex_ ## NAME \
+ (const char *buf, size_t len, TYPE *value, int *status) \
+{ \
+ int status_ = 0; \
+ uint64_t x; \
+ \
+ if (!status) { \
+ status = &status_; \
+ } \
+ buf = parse_hex_integer(buf, len, &x, status); \
+ switch (*status) { \
+ case PARSE_INTEGER_UNSIGNED: \
+ if (x <= LIMIT) { \
+ *value = (TYPE)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_OVERFLOW; \
+ return 0; \
+ case PARSE_INTEGER_SIGNED: \
+ if (x <= (uint64_t)(LIMIT) + 1) { \
+ *value = (TYPE)-(int64_t)x; \
+ return buf; \
+ } \
+ *status = PARSE_INTEGER_UNDERFLOW; \
+ return 0; \
+ default: \
+ return buf; \
+ } \
+}
+
+static inline const char *parse_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+static inline const char *parse_hex_uint64(const char *buf, size_t len, uint64_t *value, int *status)
+{
+ buf = parse_hex_integer(buf, len, value, status);
+ if (*status == PARSE_INTEGER_SIGNED) {
+ *status = PARSE_INTEGER_UNDERFLOW;
+ return 0;
+ }
+ return buf;
+}
+
+__portable_define_parse_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_hex_signed(int64, int64_t, INT64_MAX)
+__portable_define_parse_hex_signed(int32, int32_t, INT32_MAX)
+__portable_define_parse_hex_unsigned(uint16, uint16_t, UINT16_MAX)
+__portable_define_parse_hex_signed(int16, int16_t, INT16_MAX)
+__portable_define_parse_hex_unsigned(uint8, uint8_t, UINT8_MAX)
+__portable_define_parse_hex_signed(int8, int8_t, INT8_MAX)
+
+__portable_define_parse_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_signed(short, short, SHRT_MAX)
+__portable_define_parse_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_signed(int, int, INT_MAX)
+__portable_define_parse_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_signed(long, unsigned long, LONG_MAX)
+
+__portable_define_parse_hex_unsigned(ushort, unsigned short, USHRT_MAX)
+__portable_define_parse_hex_signed(short, short, SHRT_MAX)
+__portable_define_parse_hex_unsigned(uint, unsigned int, UINT_MAX)
+__portable_define_parse_hex_signed(int, int, INT_MAX)
+__portable_define_parse_hex_unsigned(ulong, unsigned long, ULONG_MAX)
+__portable_define_parse_hex_signed(long, unsigned long, LONG_MAX)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPARSEINT_H */
diff --git a/nostrdb/flatcc/portable/pprintfp.h b/nostrdb/flatcc/portable/pprintfp.h
@@ -0,0 +1,39 @@
+#ifndef PPRINTFP_H
+#define PPRINTFP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+#ifndef PORTABLE_USE_GRISU3
+#define PORTABLE_USE_GRISU3 1
+#endif
+
+
+#if PORTABLE_USE_GRISU3
+#include "grisu3_print.h"
+#endif
+
+#ifdef grisu3_print_double_is_defined
+/* Currently there is not special support for floats. */
+#define print_float(n, p) grisu3_print_double((float)(n), (p))
+#define print_double(n, p) grisu3_print_double((double)(n), (p))
+#else
+#include <stdio.h>
+#define print_float(n, p) sprintf(p, "%.9g", (float)(n))
+#define print_double(n, p) sprintf(p, "%.17g", (double)(n))
+#endif
+
+#define print_hex_float(n, p) sprintf(p, "%a", (float)(n))
+#define print_hex_double(n, p) sprintf(p, "%a", (double)(n))
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTFP_H */
diff --git a/nostrdb/flatcc/portable/pprintint.h b/nostrdb/flatcc/portable/pprintint.h
@@ -0,0 +1,628 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Mikkel F. Jørgensen, dvide.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ *
+ * Fast printing of (u)int8/16/32/64_t, (u)int, (u)long.
+ *
+ * Functions take for the
+ *
+ * int print_<type>(type value, char *buf);
+ *
+ * and returns number of characters printed, excluding trailing '\0'
+ * which is also printed. Prints at most 21 characters including zero-
+ * termination.
+ *
+ * The function `print_bool` is a bit different - it simply prints "true\0" for
+ * non-zero integers, and "false\0" otherwise.
+ *
+ * The general algorithm is in-place formatting using binary search log10
+ * followed by duff device loop unrolling div / 100 stages.
+ *
+ * The simpler post copy algorithm also provided for fmt_(u)int uses a
+ * temp buffer and loops over div/100 and post copy to target buffer.
+ *
+ *
+ * Benchmarks on core-i7, 2.2GHz, 64-bit clang/OS-X -O2:
+ *
+ * print_int64: avg 15ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int64: avg 11ns for values between 10^9 + (0..10,000,000).
+ * print_int32: avg 7ns for values cast from INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int32: avg 7ns for values between 10^9 + (0..10,000,000).
+ * print_int64: avg 13ns for values between 10^16 + (0..10,000,000).
+ * print_int64: avg 5ns for values between 0 and 10,000,000.
+ * print_int32: avg 5ns for values between 0 and 10,000,000.
+ * print_int16: avg 10ns for values cast from 0 and 10,000,000.
+ * print_int8: avg 4ns for values cast from 0 and 10,000,000.
+ *
+ * Post copy algorithm:
+ * print_int: avg 12ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ * print_int: avg 14ns for values between 10^9 + (0..10,000,000).
+ * print_long: avg 29ns for values between INT64_MIN + (10^7/2 .. 10^7/2)
+ *
+ * The post copy algorithm is nearly half as fast as the in-place
+ * algorithm, but can also be faster occasionally - possibly because the
+ * optimizer being able to skip the copy step.
+ */
+
+#ifndef PPRINTINT_H
+#define PPRINTINT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#include "pattributes.h" /* fallthrough */
+
+#define PDIAGNOSTIC_IGNORE_UNUSED_FUNCTION
+#include "pdiagnostic_push.h"
+
+static int print_bool(int n, char *p);
+
+static int print_uint8(uint8_t n, char *p);
+static int print_uint16(uint16_t n, char *p);
+static int print_uint32(uint32_t n, char *p);
+static int print_uint64(uint64_t n, char *p);
+static int print_int8(int8_t n, char *p);
+static int print_int16(int16_t n, char *p);
+static int print_int32(int32_t n, char *p);
+static int print_int64(int64_t n, char *p);
+
+/*
+ * Uses slightly slower, but more compact alogrithm
+ * that is not hardcoded to implementation size.
+ * Other types may be defined using macros below.
+ */
+static int print_ulong(unsigned long n, char *p);
+static int print_uint(unsigned int n, char *p);
+static int print_int(int n, char *p);
+static int print_long(long n, char *p);
+
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define __print_unaligned_copy_16(p, q) (*(uint16_t*)(p) = *(uint16_t*)(q))
+#else
+#define __print_unaligned_copy_16(p, q) \
+ ((((uint8_t*)(p))[0] = ((uint8_t*)(q))[0]), \
+ (((uint8_t*)(p))[1] = ((uint8_t*)(q))[1]))
+#endif
+
+static const char __print_digit_pairs[] =
+ "0001020304050607080910111213141516171819"
+ "2021222324252627282930313233343536373839"
+ "4041424344454647484950515253545556575859"
+ "6061626364656667686970717273747576777879"
+ "8081828384858687888990919293949596979899";
+
+#define __print_stage() \
+ p -= 2; \
+ dp = __print_digit_pairs + (n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, dp);
+
+#define __print_long_stage() \
+ __print_stage() \
+ __print_stage()
+
+#define __print_short_stage() \
+ *--p = (n % 10) + '0'; \
+ n /= 10;
+
+static int print_bool(int n, char *buf)
+{
+ if (n) {
+ memcpy(buf, "true\0", 5);
+ return 4;
+ } else {
+ memcpy(buf, "false\0", 6);
+ return 5;
+ }
+}
+
+static int print_uint8(uint8_t n, char *p)
+{
+ const char *dp;
+
+ if (n >= 100) {
+ p += 3;
+ *p = '\0';
+ __print_stage();
+ p[-1] = (char)n + '0';
+ return 3;
+ }
+ if (n >= 10) {
+ p += 2;
+ *p = '\0';
+ __print_stage();
+ return 2;
+ }
+ p[1] = '\0';
+ p[0] = (char)n + '0';
+ return 1;
+}
+
+static int print_uint16(uint16_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if (n >= 1000) {
+ if(n >= 10000) {
+ k = 5;
+ } else {
+ k = 4;
+ }
+ } else {
+ if(n >= 100) {
+ k = 3;
+ } else if(n >= 10) {
+ k = 2;
+ } else {
+ k = 1;
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint32(uint32_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+
+ if(n >= 10000UL) {
+ if(n >= 10000000UL) {
+ if(n >= 1000000000UL) {
+ k = 10;
+ } else if(n >= 100000000UL) {
+ k = 9;
+ } else {
+ k = 8;
+ }
+ } else {
+ if(n >= 1000000UL) {
+ k = 7;
+ } else if(n >= 100000UL) {
+ k = 6;
+ } else {
+ k = 5;
+ }
+ }
+ } else {
+ if(n >= 100UL) {
+ if(n >= 1000UL) {
+ k = 4;
+ } else {
+ k = 3;
+ }
+ } else {
+ if(n >= 10UL) {
+ k = 2;
+ } else {
+ k = 1UL;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 9:
+ __print_stage();
+ pattribute(fallthrough);
+ case 7:
+ __print_stage();
+ pattribute(fallthrough);
+ case 5:
+ __print_stage();
+ pattribute(fallthrough);
+ case 3:
+ __print_stage();
+ pattribute(fallthrough);
+ case 1:
+ p[-1] = (char)n + '0';
+ }
+ } else {
+ switch (k) {
+ case 10:
+ __print_stage();
+ pattribute(fallthrough);
+ case 8:
+ __print_stage();
+ pattribute(fallthrough);
+ case 6:
+ __print_stage();
+ pattribute(fallthrough);
+ case 4:
+ __print_stage();
+ pattribute(fallthrough);
+ case 2:
+ __print_stage();
+ }
+ }
+ return k;
+}
+
+static int print_uint64(uint64_t n, char *p)
+{
+ int k = 0;
+ const char *dp;
+ const uint64_t x = 1000000000ULL;
+
+ if (n < x) {
+ return print_uint32((uint32_t)n, p);
+ }
+ if(n >= 10000ULL * x) {
+ if(n >= 10000000ULL * x) {
+ if(n >= 1000000000ULL * x) {
+ if (n >= 10000000000ULL * x) {
+ k = 11 + 9;
+ } else {
+ k = 10 + 9;
+ }
+ } else if(n >= 100000000ULL * x) {
+ k = 9 + 9;
+ } else {
+ k = 8 + 9;
+ }
+ } else {
+ if(n >= 1000000ULL * x) {
+ k = 7 + 9;
+ } else if(n >= 100000ULL * x) {
+ k = 6 + 9;
+ } else {
+ k = 5 + 9;
+ }
+ }
+ } else {
+ if(n >= 100ULL * x) {
+ if(n >= 1000ULL * x) {
+ k = 4 + 9;
+ } else {
+ k = 3 + 9;
+ }
+ } else {
+ if(n >= 10ULL * x) {
+ k = 2 + 9;
+ } else {
+ k = 1 + 9;
+ }
+ }
+ }
+ p += k;
+ *p = '\0';
+ if (k & 1) {
+ switch (k) {
+ case 19:
+ __print_stage();
+ pattribute(fallthrough);
+ case 17:
+ __print_stage();
+ pattribute(fallthrough);
+ case 15:
+ __print_stage();
+ pattribute(fallthrough);
+ case 13:
+ __print_stage();
+ pattribute(fallthrough);
+ case 11:
+ __print_stage()
+ __print_short_stage();
+ }
+ } else {
+ switch (k) {
+ case 20:
+ __print_stage();
+ pattribute(fallthrough);
+ case 18:
+ __print_stage();
+ pattribute(fallthrough);
+ case 16:
+ __print_stage();
+ pattribute(fallthrough);
+ case 14:
+ __print_stage();
+ pattribute(fallthrough);
+ case 12:
+ __print_stage();
+ pattribute(fallthrough);
+ case 10:
+ __print_stage();
+ }
+ }
+ __print_long_stage()
+ __print_long_stage()
+ return k;
+}
+
+static int print_int8(int8_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint8((uint8_t)n, p) + sign;
+}
+
+static int print_int16(int16_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint16((uint16_t)n, p) + sign;
+}
+
+static int print_int32(int32_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint32((uint32_t)n, p) + sign;
+}
+
+static int print_int64(int64_t n, char *p)
+{
+ int sign;
+
+ if ((sign = n < 0)) {
+ *p++ = '-';
+ n = -n;
+ }
+ return print_uint64((uint64_t)n, p) + sign;
+}
+
+#define __define_print_int_simple(NAME, UNAME, T, UT) \
+static int UNAME(UT n, char *buf) \
+{ \
+ char tmp[20]; \
+ char* p = tmp + 20; \
+ char* q = p; \
+ unsigned int k, m; \
+ \
+ while (n >= 100) { \
+ p -= 2; \
+ m = (unsigned int)(n % 100) * 2; \
+ n /= 100; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ } \
+ p -= 2; \
+ m = (unsigned int)n * 2; \
+ __print_unaligned_copy_16(p, __print_digit_pairs + m); \
+ if (n < 10) { \
+ ++p; \
+ } \
+ k = (unsigned int)(q - p); \
+ while (p != q) { \
+ *buf++ = *p++; \
+ } \
+ *buf = '\0'; \
+ return (int)k; \
+} \
+ \
+static int NAME(T n, char *buf) \
+{ \
+ int sign = n < 0; \
+ \
+ if (sign) { \
+ *buf++ = '-'; \
+ n = -n; \
+ } \
+ return UNAME((UT)n, buf) + sign; \
+}
+
+__define_print_int_simple(print_int, print_uint, int, unsigned int)
+__define_print_int_simple(print_long, print_ulong, long, unsigned long)
+
+#ifdef PPRINTINT_BENCH
+int main() {
+ int64_t count = 10000000; /* 10^7 */
+#if 0
+ int64_t base = 0;
+ int64_t base = 10000000000000000; /* 10^16 */
+ int64_t base = 1000000000; /* 10^9 */
+#endif
+ int64_t base = INT64_MIN - count/2;
+ char buf[100];
+ int i, k = 0, n = 0;
+ for (i = 0; i < count; i++) {
+ k = print_int64(i + base, buf);
+ n += buf[0] + buf[k - 1];
+ }
+ return n;
+}
+/* Call with time on executable, multiply time in seconds by 100 to get time unit in ns/number. */
+#endif /* PPRINTINT_BENCH */
+
+#ifdef PPRINTINT_TEST
+
+#include <stdio.h>
+#include <string.h>
+
+int main()
+{
+ char buf[21];
+ int failed = 0;
+ int k;
+
+ k = print_uint64(UINT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("18446744073709551615", buf)) {
+ printf("UINT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("9223372036854775807", buf)) {
+ printf("INT64_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int64(INT64_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-9223372036854775808", buf)) {
+ printf("INT64_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint32(UINT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("4294967295", buf)) {
+ printf("UINT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int32(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint16(UINT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("65535", buf)) {
+ printf("UINT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("32767", buf)) {
+ printf("INT16_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int16(INT16_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-32768", buf)) {
+ printf("INT16_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_uint8(UINT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("255", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("127", buf)) {
+ printf("INT8_MAX didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int8(INT8_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-128", buf)) {
+ printf("INT8_MIN didn't print correctly, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_int(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly k = print_int, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MAX, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("2147483647", buf)) {
+ printf("INT32_MAX didn't print correctly with fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_long(INT32_MIN, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("-2147483648", buf)) {
+ printf("INT32_MIN didn't print correctly fmt_long, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(-1, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("true", buf) {
+ printf("-1 didn't print 'true' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ k = print_bool(, buf);
+ if (strlen(buf) != k) printf("length error\n");
+ if (strcmp("false", buf) {
+ printf("0 didn't print 'false' as expected, got:\n'%s'\n", buf);
+ ++failed;
+ }
+ if (failed) {
+ printf("FAILED\n");
+ return -1;
+ }
+ printf("SUCCESS\n");
+ return 0;
+}
+#endif /* PPRINTINT_TEST */
+
+#include "pdiagnostic_pop.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PPRINTINT_H */
diff --git a/nostrdb/flatcc/portable/pstatic_assert.h b/nostrdb/flatcc/portable/pstatic_assert.h
@@ -0,0 +1,67 @@
+#ifndef PSTATIC_ASSERT_H
+#define PSTATIC_ASSERT_H
+
+#include <assert.h>
+
+/* Handle clang */
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
+#if defined(static_assert)
+#ifndef __static_assert_is_defined
+#define __static_assert_is_defined 1
+#endif
+#endif
+
+/* Handle static_assert as a keyword in C++ and compiler specifics. */
+#if !defined(__static_assert_is_defined)
+
+#if defined(__cplusplus)
+
+#if __cplusplus >= 201103L
+#define __static_assert_is_defined 1
+#elif __has_feature(cxx_static_assert)
+#define __static_assert_is_defined 1
+#elif defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#endif
+
+#else
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+#define __static_assert_is_defined 1
+#elif __has_feature(c_static_assert)
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#elif defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+/* In case the clib headers are not compliant. */
+#define static_assert(pred, msg) _Static_assert(pred, msg)
+#define __static_assert_is_defined 1
+#endif
+
+#endif /* __cplusplus */
+#endif /* __static_assert_is_defined */
+
+
+#if !defined(__static_assert_is_defined)
+
+#define __PSTATIC_ASSERT_CONCAT_(a, b) static_assert_scope_##a##_line_##b
+#define __PSTATIC_ASSERT_CONCAT(a, b) __PSTATIC_ASSERT_CONCAT_(a, b)
+#ifdef __COUNTER__
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__COUNTER__, __LINE__) = 1/(!!(e)) }
+#else
+#include "pstatic_assert_scope.h"
+#define static_assert(e, msg) enum { __PSTATIC_ASSERT_CONCAT(__PSTATIC_ASSERT_COUNTER, __LINE__) = 1/(int)(!!(e)) }
+#endif
+
+#define __static_assert_is_defined 1
+
+#endif /* __static_assert_is_defined */
+
+#endif /* PSTATIC_ASSERT_H */
+
+/* Update scope counter outside of include guard. */
+#ifdef __PSTATIC_ASSERT_COUNTER
+#include "pstatic_assert_scope.h"
+#endif
diff --git a/nostrdb/flatcc/portable/pstatic_assert_scope.h b/nostrdb/flatcc/portable/pstatic_assert_scope.h
@@ -0,0 +1,280 @@
+/*
+ * january, 2017, ported to portable library by mikkelfj.
+ * Based on dbgtools static assert counter, but with renamed macros.
+ */
+
+/*
+ dbgtools - platform independent wrapping of "nice to have" debug functions.
+
+ version 0.1, october, 2013
+
+ https://github.com/wc-duck/dbgtools
+
+ Copyright (C) 2013- Fredrik Kihlander
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Fredrik Kihlander
+*/
+
+/**
+ * Auto-generated header implementing a counter that increases by each include of the file.
+ *
+ * This header will define the macro __PSTATIC_ASSERT_COUNTER to be increased for each inclusion of the file.
+ *
+ * It has been generated with 3 amount of digits resulting in the counter wrapping around after
+ * 10000 inclusions.
+ *
+ * Usage:
+ *
+ * #include "this_header.h"
+ * int a = __PSTATIC_ASSERT_COUNTER; // 0
+ * #include "this_header.h"
+ * int b = __PSTATIC_ASSERT_COUNTER; // 1
+ * #include "this_header.h"
+ * int c = __PSTATIC_ASSERT_COUNTER; // 2
+ * #include "this_header.h"
+ * int d = __PSTATIC_ASSERT_COUNTER; // 3
+ */
+
+#ifndef __PSTATIC_ASSERT_COUNTER
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# define __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+#endif /* __PSTATIC_ASSERT_COUNTER */
+
+#if !defined( __PSTATIC_ASSERT_COUNTER_D0_0 )
+# define __PSTATIC_ASSERT_COUNTER_D0_0
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_1 )
+# define __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 1
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_2 )
+# define __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 2
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_3 )
+# define __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 3
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_4 )
+# define __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 4
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_5 )
+# define __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 5
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_6 )
+# define __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 6
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_7 )
+# define __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 7
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_8 )
+# define __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 8
+#elif !defined( __PSTATIC_ASSERT_COUNTER_D0_9 )
+# define __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 9
+#else
+# undef __PSTATIC_ASSERT_COUNTER_D0_1
+# undef __PSTATIC_ASSERT_COUNTER_D0_2
+# undef __PSTATIC_ASSERT_COUNTER_D0_3
+# undef __PSTATIC_ASSERT_COUNTER_D0_4
+# undef __PSTATIC_ASSERT_COUNTER_D0_5
+# undef __PSTATIC_ASSERT_COUNTER_D0_6
+# undef __PSTATIC_ASSERT_COUNTER_D0_7
+# undef __PSTATIC_ASSERT_COUNTER_D0_8
+# undef __PSTATIC_ASSERT_COUNTER_D0_9
+# undef __PSTATIC_ASSERT_COUNTER_0
+# define __PSTATIC_ASSERT_COUNTER_0 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D1_0 )
+# define __PSTATIC_ASSERT_COUNTER_D1_0
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_1 )
+# define __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_2 )
+# define __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_3 )
+# define __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_4 )
+# define __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_5 )
+# define __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_6 )
+# define __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_7 )
+# define __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_8 )
+# define __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D1_9 )
+# define __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D1_1
+# undef __PSTATIC_ASSERT_COUNTER_D1_2
+# undef __PSTATIC_ASSERT_COUNTER_D1_3
+# undef __PSTATIC_ASSERT_COUNTER_D1_4
+# undef __PSTATIC_ASSERT_COUNTER_D1_5
+# undef __PSTATIC_ASSERT_COUNTER_D1_6
+# undef __PSTATIC_ASSERT_COUNTER_D1_7
+# undef __PSTATIC_ASSERT_COUNTER_D1_8
+# undef __PSTATIC_ASSERT_COUNTER_D1_9
+# undef __PSTATIC_ASSERT_COUNTER_1
+# define __PSTATIC_ASSERT_COUNTER_1 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D2_0 )
+# define __PSTATIC_ASSERT_COUNTER_D2_0
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_1 )
+# define __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_2 )
+# define __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_3 )
+# define __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_4 )
+# define __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_5 )
+# define __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_6 )
+# define __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_7 )
+# define __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_8 )
+# define __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D2_9 )
+# define __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D2_1
+# undef __PSTATIC_ASSERT_COUNTER_D2_2
+# undef __PSTATIC_ASSERT_COUNTER_D2_3
+# undef __PSTATIC_ASSERT_COUNTER_D2_4
+# undef __PSTATIC_ASSERT_COUNTER_D2_5
+# undef __PSTATIC_ASSERT_COUNTER_D2_6
+# undef __PSTATIC_ASSERT_COUNTER_D2_7
+# undef __PSTATIC_ASSERT_COUNTER_D2_8
+# undef __PSTATIC_ASSERT_COUNTER_D2_9
+# undef __PSTATIC_ASSERT_COUNTER_2
+# define __PSTATIC_ASSERT_COUNTER_2 0
+# if !defined( __PSTATIC_ASSERT_COUNTER_D3_0 )
+# define __PSTATIC_ASSERT_COUNTER_D3_0
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_1 )
+# define __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 1
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_2 )
+# define __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 2
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_3 )
+# define __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 3
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_4 )
+# define __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 4
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_5 )
+# define __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 5
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_6 )
+# define __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 6
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_7 )
+# define __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 7
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_8 )
+# define __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 8
+# elif !defined( __PSTATIC_ASSERT_COUNTER_D3_9 )
+# define __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 9
+# else
+# undef __PSTATIC_ASSERT_COUNTER_D3_1
+# undef __PSTATIC_ASSERT_COUNTER_D3_2
+# undef __PSTATIC_ASSERT_COUNTER_D3_3
+# undef __PSTATIC_ASSERT_COUNTER_D3_4
+# undef __PSTATIC_ASSERT_COUNTER_D3_5
+# undef __PSTATIC_ASSERT_COUNTER_D3_6
+# undef __PSTATIC_ASSERT_COUNTER_D3_7
+# undef __PSTATIC_ASSERT_COUNTER_D3_8
+# undef __PSTATIC_ASSERT_COUNTER_D3_9
+# undef __PSTATIC_ASSERT_COUNTER_3
+# define __PSTATIC_ASSERT_COUNTER_3 0
+# endif
+# endif
+# endif
+#endif
+
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3) digit0##digit1##digit2##digit3
+#define __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(digit0,digit1,digit2,digit3) __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO_(digit0,digit1,digit2,digit3)
+#undef __PSTATIC_ASSERT_COUNTER
+#define __PSTATIC_ASSERT_COUNTER __PSTATIC_ASSERT_COUNTER_JOIN_DIGITS_MACRO(__PSTATIC_ASSERT_COUNTER_3,__PSTATIC_ASSERT_COUNTER_2,__PSTATIC_ASSERT_COUNTER_1,__PSTATIC_ASSERT_COUNTER_0)
diff --git a/nostrdb/flatcc/portable/pstdalign.h b/nostrdb/flatcc/portable/pstdalign.h
@@ -0,0 +1,162 @@
+#ifndef PSTDALIGN_H
+#define PSTDALIGN_H
+
+/*
+ * NOTE: aligned_alloc is defined via paligned_alloc.h
+ * and requires aligned_free to be fully portable although
+ * free also works on C11 and platforms with posix_memalign.
+ *
+ * NOTE: C++11 defines alignas as a keyword but then also defines
+ * __alignas_is_defined.
+ *
+ * C++14 does not define __alignas_is_defined, at least sometimes.
+ *
+ * GCC 8.3 reverts on this and makes C++11 behave the same as C++14
+ * preventing a simple __cplusplus version check from working.
+ *
+ * Clang C++ without std=c++11 or std=c++14 does define alignas
+ * but does so incorrectly wrt. C11 and C++11 semantics because
+ * `alignas(4) float x;` is not recognized.
+ * To fix such issues, either move to a std version, or
+ * include a working stdalign.h for the given compiler before
+ * this file.
+ *
+ * newlib defines _Alignas and _Alignof in sys/cdefs but rely on
+ * gcc version for <stdaligh.h> which can lead to conflicts if
+ * stdalign is not included.
+ *
+ * newlibs need for <stdalign.h> conflicts with broken C++ stdalign
+ * but this can be fixed be using std=C++11 or newer.
+ *
+ * MSVC does not support <stdalign.h> at least up to MSVC 2015,
+ * but does appear to support alignas and alignof keywords in
+ * recent standard C++.
+ *
+ * TCC only supports alignas with a numeric argument like
+ * `alignas(4)`, but not `alignas(float)`.
+ *
+ * If stdalign.h is supported but heuristics in this file are
+ * insufficient to detect this, try including <stdaligh.h> manually
+ * or define HAVE_STDALIGN_H.
+ */
+
+/* https://github.com/dvidelabs/flatcc/issues/130 */
+#ifndef __alignas_is_defined
+#if defined(__cplusplus)
+#if __cplusplus == 201103 && !defined(__clang__) && ((__GNUC__ > 8) || (__GNUC__ == 8 && __GNUC_MINOR__ >= 3))
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#include <stdalign.h>
+#endif
+#endif
+#endif
+
+/* Allow for alternative solution to be included first. */
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+#if defined(PORTABLE_PATCH_CPLUSPLUS_STDALIGN)
+#include <stdalign.h>
+#undef alignas
+#define alignas(t) __attribute__((__aligned__(t)))
+#endif
+#endif
+
+#if !defined(PORTABLE_HAS_INCLUDE_STDALIGN)
+#if defined(__has_include)
+#if __has_include(<stdalign.h>)
+#define PORTABLE_HAS_INCLUDE_STDALIGN 1
+#else
+#define PORTABLE_HAS_INCLUDE_STDALIGN 0
+#endif
+#endif
+#endif
+
+ /* https://lists.gnu.org/archive/html/bug-gnulib/2015-08/msg00003.html */
+#if defined(__cplusplus)
+#if !defined(_MSC_VER)
+#include <stdalign.h>
+#endif
+#if __cplusplus > 201103
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+#endif
+#elif PORTABLE_HAS_INCLUDE_STDALIGN
+#include <stdalign.h>
+#elif !defined(__clang__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+#include <stdalign.h>
+#elif defined(HAVE_STDALIGN_H)
+#include <stdaligh.h>
+#endif
+
+#endif /* __alignas_is_defined */
+
+#ifndef __alignas_is_defined
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (!defined(__clang__) && defined(__GNUC__) && \
+ ((__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)))
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if defined(__IBMC__)
+#undef PORTABLE_C11_STDALIGN_MISSING
+#define PORTABLE_C11_STDALIGN_MISSING
+#endif
+
+#if ((defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && \
+ !defined(PORTABLE_C11_STDALIGN_MISSING))
+/* C11 or newer */
+#include <stdalign.h>
+#else
+#if defined(__GNUC__) || defined(__IBM_ALIGNOF__) || defined(__clang__)
+
+#ifndef _Alignas
+#define _Alignas(t) __attribute__((__aligned__(t)))
+#endif
+
+#ifndef _Alignof
+#define _Alignof(t) __alignof__(t)
+#endif
+
+#elif defined(_MSC_VER)
+
+#define _Alignas(t) __declspec (align(t))
+#define _Alignof(t) __alignof(t)
+
+#elif defined(__TINYC__)
+
+/* Supports `_Alignas(integer-expression)`, but not `_Alignas(type)`. */
+#define _Alignas(t) __attribute__(aligned(t))
+#define _Alignof(t) __alignof__(t)
+
+#else
+#error please update pstdalign.h with support for current compiler and library
+#endif
+
+#endif /* __STDC__ */
+
+#ifndef alignas
+#define alignas _Alignas
+#endif
+
+#ifndef alignof
+#define alignof _Alignof
+#endif
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __alignas__is_defined */
+
+#include "paligned_alloc.h"
+
+#endif /* PSTDALIGN_H */
diff --git a/nostrdb/flatcc/portable/pstdbool.h b/nostrdb/flatcc/portable/pstdbool.h
@@ -0,0 +1,37 @@
+#ifndef PSTDBOOL_H
+#define PSTDBOOL_H
+
+#if !defined(__cplusplus) && !__bool_true_false_are_defined && !defined(bool) && !defined(__STDBOOL_H)
+
+#ifdef HAVE_STDBOOL_H
+
+#include <stdbool.h>
+
+#elif (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
+/* C99 or newer */
+
+#define bool _Bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+
+#define bool bool
+#define true true
+#define false false
+#define __bool_true_false_are_defined 1
+
+#else
+
+typedef unsigned char _Portable_bool;
+#define bool _Portable_bool
+#define true 1
+#define false 0
+#define __bool_true_false_are_defined 1
+
+#endif
+
+#endif
+
+#endif /* PSTDBOOL_H */
diff --git a/nostrdb/flatcc/portable/pstdint.h b/nostrdb/flatcc/portable/pstdint.h
@@ -0,0 +1,898 @@
+/* A portable stdint.h
+ ****************************************************************************
+ * BSD License:
+ ****************************************************************************
+ *
+ * Copyright (c) 2005-2016 Paul Hsieh
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************
+ *
+ * Version 0.1.15.2
+ *
+ * The ANSI C standard committee, for the C99 standard, specified the
+ * inclusion of a new standard include file called stdint.h. This is
+ * a very useful and long desired include file which contains several
+ * very precise definitions for integer scalar types that is
+ * critically important for making portable several classes of
+ * applications including cryptography, hashing, variable length
+ * integer libraries and so on. But for most developers its likely
+ * useful just for programming sanity.
+ *
+ * The problem is that some compiler vendors chose to ignore the C99
+ * standard and some older compilers have no opportunity to be updated.
+ * Because of this situation, simply including stdint.h in your code
+ * makes it unportable.
+ *
+ * So that's what this file is all about. Its an attempt to build a
+ * single universal include file that works on as many platforms as
+ * possible to deliver what stdint.h is supposed to. Even compilers
+ * that already come with stdint.h can use this file instead without
+ * any loss of functionality. A few things that should be noted about
+ * this file:
+ *
+ * 1) It is not guaranteed to be portable and/or present an identical
+ * interface on all platforms. The extreme variability of the
+ * ANSI C standard makes this an impossibility right from the
+ * very get go. Its really only meant to be useful for the vast
+ * majority of platforms that possess the capability of
+ * implementing usefully and precisely defined, standard sized
+ * integer scalars. Systems which are not intrinsically 2s
+ * complement may produce invalid constants.
+ *
+ * 2) There is an unavoidable use of non-reserved symbols.
+ *
+ * 3) Other standard include files are invoked.
+ *
+ * 4) This file may come in conflict with future platforms that do
+ * include stdint.h. The hope is that one or the other can be
+ * used with no real difference.
+ *
+ * 5) In the current verison, if your platform can't represent
+ * int32_t, int16_t and int8_t, it just dumps out with a compiler
+ * error.
+ *
+ * 6) 64 bit integers may or may not be defined. Test for their
+ * presence with the test: #ifdef INT64_MAX or #ifdef UINT64_MAX.
+ * Note that this is different from the C99 specification which
+ * requires the existence of 64 bit support in the compiler. If
+ * this is not defined for your platform, yet it is capable of
+ * dealing with 64 bits then it is because this file has not yet
+ * been extended to cover all of your system's capabilities.
+ *
+ * 7) (u)intptr_t may or may not be defined. Test for its presence
+ * with the test: #ifdef PTRDIFF_MAX. If this is not defined
+ * for your platform, then it is because this file has not yet
+ * been extended to cover all of your system's capabilities, not
+ * because its optional.
+ *
+ * 8) The following might not been defined even if your platform is
+ * capable of defining it:
+ *
+ * WCHAR_MIN
+ * WCHAR_MAX
+ * (u)int64_t
+ * PTRDIFF_MIN
+ * PTRDIFF_MAX
+ * (u)intptr_t
+ *
+ * 9) The following have not been defined:
+ *
+ * WINT_MIN
+ * WINT_MAX
+ *
+ * 10) The criteria for defining (u)int_least(*)_t isn't clear,
+ * except for systems which don't have a type that precisely
+ * defined 8, 16, or 32 bit types (which this include file does
+ * not support anyways). Default definitions have been given.
+ *
+ * 11) The criteria for defining (u)int_fast(*)_t isn't something I
+ * would trust to any particular compiler vendor or the ANSI C
+ * committee. It is well known that "compatible systems" are
+ * commonly created that have very different performance
+ * characteristics from the systems they are compatible with,
+ * especially those whose vendors make both the compiler and the
+ * system. Default definitions have been given, but its strongly
+ * recommended that users never use these definitions for any
+ * reason (they do *NOT* deliver any serious guarantee of
+ * improved performance -- not in this file, nor any vendor's
+ * stdint.h).
+ *
+ * 12) The following macros:
+ *
+ * PRINTF_INTMAX_MODIFIER
+ * PRINTF_INT64_MODIFIER
+ * PRINTF_INT32_MODIFIER
+ * PRINTF_INT16_MODIFIER
+ * PRINTF_LEAST64_MODIFIER
+ * PRINTF_LEAST32_MODIFIER
+ * PRINTF_LEAST16_MODIFIER
+ * PRINTF_INTPTR_MODIFIER
+ *
+ * are strings which have been defined as the modifiers required
+ * for the "d", "u" and "x" printf formats to correctly output
+ * (u)intmax_t, (u)int64_t, (u)int32_t, (u)int16_t, (u)least64_t,
+ * (u)least32_t, (u)least16_t and (u)intptr_t types respectively.
+ * PRINTF_INTPTR_MODIFIER is not defined for some systems which
+ * provide their own stdint.h. PRINTF_INT64_MODIFIER is not
+ * defined if INT64_MAX is not defined. These are an extension
+ * beyond what C99 specifies must be in stdint.h.
+ *
+ * In addition, the following macros are defined:
+ *
+ * PRINTF_INTMAX_HEX_WIDTH
+ * PRINTF_INT64_HEX_WIDTH
+ * PRINTF_INT32_HEX_WIDTH
+ * PRINTF_INT16_HEX_WIDTH
+ * PRINTF_INT8_HEX_WIDTH
+ * PRINTF_INTMAX_DEC_WIDTH
+ * PRINTF_INT64_DEC_WIDTH
+ * PRINTF_INT32_DEC_WIDTH
+ * PRINTF_INT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ * PRINTF_UINTMAX_DEC_WIDTH
+ * PRINTF_UINT64_DEC_WIDTH
+ * PRINTF_UINT32_DEC_WIDTH
+ * PRINTF_UINT16_DEC_WIDTH
+ * PRINTF_UINT8_DEC_WIDTH
+ *
+ * Which specifies the maximum number of characters required to
+ * print the number of that type in either hexadecimal or decimal.
+ * These are an extension beyond what C99 specifies must be in
+ * stdint.h.
+ *
+ * Compilers tested (all with 0 warnings at their highest respective
+ * settings): Borland Turbo C 2.0, WATCOM C/C++ 11.0 (16 bits and 32
+ * bits), Microsoft Visual C++ 6.0 (32 bit), Microsoft Visual Studio
+ * .net (VC7), Intel C++ 4.0, GNU gcc v3.3.3
+ *
+ * This file should be considered a work in progress. Suggestions for
+ * improvements, especially those which increase coverage are strongly
+ * encouraged.
+ *
+ * Acknowledgements
+ *
+ * The following people have made significant contributions to the
+ * development and testing of this file:
+ *
+ * Chris Howie
+ * John Steele Scott
+ * Dave Thorup
+ * John Dill
+ * Florian Wobbe
+ * Christopher Sean Morrison
+ * Mikkel Fahnoe Jorgensen
+ *
+ */
+
+#include <stddef.h>
+#include <limits.h>
+#include <signal.h>
+
+/*
+ * For gcc with _STDINT_H, fill in the PRINTF_INT*_MODIFIER macros, and
+ * do nothing else. On the Mac OS X version of gcc this is _STDINT_H_.
+ */
+
+#if ((defined(_MSC_VER) && _MSC_VER >= 1600) || (defined(__STDC__) && __STDC__ && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || (defined (__WATCOMC__) && (defined (_STDINT_H_INCLUDED) || __WATCOMC__ >= 1250)) || (defined(__GNUC__) && (__GNUC__ > 3 || defined(_STDINT_H) || defined(_STDINT_H_) || defined (__UINT_FAST64_TYPE__)) )) && !defined (_PSTDINT_H_INCLUDED)
+#include <stdint.h>
+#define _PSTDINT_H_INCLUDED
+# if defined(__GNUC__) && (defined(__x86_64__) || defined(__ppc64__)) && !(defined(__APPLE__) && defined(__MACH__))
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "l"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# else
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# ifndef PRINTF_INT32_MODIFIER
+# if (UINT_MAX == UINT32_MAX)
+# define PRINTF_INT32_MODIFIER ""
+# else
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+# endif
+# endif
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_UINT64_HEX_WIDTH
+# define PRINTF_UINT64_HEX_WIDTH "16"
+# endif
+# ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_UINT32_HEX_WIDTH
+# define PRINTF_UINT32_HEX_WIDTH "8"
+# endif
+# ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_UINT16_HEX_WIDTH
+# define PRINTF_UINT16_HEX_WIDTH "4"
+# endif
+# ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_UINT8_HEX_WIDTH
+# define PRINTF_UINT8_HEX_WIDTH "2"
+# endif
+# ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+# endif
+# ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+# endif
+# ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+# endif
+# ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+# endif
+# ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_HEX_WIDTH
+# define PRINTF_UINTMAX_HEX_WIDTH PRINTF_UINT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+# ifndef PRINTF_UINTMAX_DEC_WIDTH
+# define PRINTF_UINTMAX_DEC_WIDTH PRINTF_UINT64_DEC_WIDTH
+# endif
+
+/*
+ * Something really weird is going on with Open Watcom. Just pull some of
+ * these duplicated definitions from Open Watcom's stdint.h file for now.
+ */
+
+# if defined (__WATCOMC__) && __WATCOMC__ >= 1250
+# if !defined (INT64_C)
+# define INT64_C(x) (x + (INT64_MAX - INT64_MAX))
+# endif
+# if !defined (UINT64_C)
+# define UINT64_C(x) (x + (UINT64_MAX - UINT64_MAX))
+# endif
+# if !defined (INT32_C)
+# define INT32_C(x) (x + (INT32_MAX - INT32_MAX))
+# endif
+# if !defined (UINT32_C)
+# define UINT32_C(x) (x + (UINT32_MAX - UINT32_MAX))
+# endif
+# if !defined (INT16_C)
+# define INT16_C(x) (x)
+# endif
+# if !defined (UINT16_C)
+# define UINT16_C(x) (x)
+# endif
+# if !defined (INT8_C)
+# define INT8_C(x) (x)
+# endif
+# if !defined (UINT8_C)
+# define UINT8_C(x) (x)
+# endif
+# if !defined (UINT64_MAX)
+# define UINT64_MAX 18446744073709551615ULL
+# endif
+# if !defined (INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+# endif
+# if !defined (UINT32_MAX)
+# define UINT32_MAX 4294967295UL
+# endif
+# if !defined (INT32_MAX)
+# define INT32_MAX 2147483647L
+# endif
+# if !defined (INTMAX_MAX)
+# define INTMAX_MAX INT64_MAX
+# endif
+# if !defined (INTMAX_MIN)
+# define INTMAX_MIN INT64_MIN
+# endif
+# endif
+#endif
+
+#ifndef _PSTDINT_H_INCLUDED
+#define _PSTDINT_H_INCLUDED
+
+#ifndef SIZE_MAX
+# define SIZE_MAX (~(size_t)0)
+#endif
+
+/*
+ * Deduce the type assignments from limits.h under the assumption that
+ * integer sizes in bits are powers of 2, and follow the ANSI
+ * definitions.
+ */
+
+#ifndef UINT8_MAX
+# define UINT8_MAX 0xff
+#endif
+#if !defined(uint8_t) && !defined(_UINT8_T)
+# if (UCHAR_MAX == UINT8_MAX) || defined (S_SPLINT_S)
+ typedef unsigned char uint8_t;
+# define UINT8_C(v) ((uint8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef INT8_MAX
+# define INT8_MAX 0x7f
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN INT8_C(0x80)
+#endif
+#if !defined(int8_t) && !defined(_INT8_T)
+# if (SCHAR_MAX == INT8_MAX) || defined (S_SPLINT_S)
+ typedef signed char int8_t;
+# define INT8_C(v) ((int8_t) v)
+# else
+# error "Platform not supported"
+# endif
+#endif
+
+#ifndef UINT16_MAX
+# define UINT16_MAX 0xffff
+#endif
+#if !defined(uint16_t) && !defined(_UINT16_T)
+#if (UINT_MAX == UINT16_MAX) || defined (S_SPLINT_S)
+ typedef unsigned int uint16_t;
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+# define UINT16_C(v) ((uint16_t) (v))
+#elif (USHRT_MAX == UINT16_MAX)
+ typedef unsigned short uint16_t;
+# define UINT16_C(v) ((uint16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT16_MAX
+# define INT16_MAX 0x7fff
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN INT16_C(0x8000)
+#endif
+#if !defined(int16_t) && !defined(_INT16_T)
+#if (INT_MAX == INT16_MAX) || defined (S_SPLINT_S)
+ typedef signed int int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT16_MAX)
+ typedef signed short int16_t;
+# define INT16_C(v) ((int16_t) (v))
+# ifndef PRINTF_INT16_MODIFIER
+# define PRINTF_INT16_MODIFIER "h"
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef UINT32_MAX
+# define UINT32_MAX (0xffffffffUL)
+#endif
+#if !defined(uint32_t) && !defined(_UINT32_T)
+#if (ULONG_MAX == UINT32_MAX) || defined (S_SPLINT_S)
+ typedef unsigned long uint32_t;
+# define UINT32_C(v) v ## UL
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (UINT_MAX == UINT32_MAX)
+ typedef unsigned int uint32_t;
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+# define UINT32_C(v) v ## U
+#elif (USHRT_MAX == UINT32_MAX)
+ typedef unsigned short uint32_t;
+# define UINT32_C(v) ((unsigned short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+#ifndef INT32_MAX
+# define INT32_MAX (0x7fffffffL)
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN INT32_C(0x80000000)
+#endif
+#if !defined(int32_t) && !defined(_INT32_T)
+#if (LONG_MAX == INT32_MAX) || defined (S_SPLINT_S)
+ typedef signed long int32_t;
+# define INT32_C(v) v ## L
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER "l"
+# endif
+#elif (INT_MAX == INT32_MAX)
+ typedef signed int int32_t;
+# define INT32_C(v) v
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#elif (SHRT_MAX == INT32_MAX)
+ typedef signed short int32_t;
+# define INT32_C(v) ((short) (v))
+# ifndef PRINTF_INT32_MODIFIER
+# define PRINTF_INT32_MODIFIER ""
+# endif
+#else
+#error "Platform not supported"
+#endif
+#endif
+
+/*
+ * The macro stdint_int64_defined is temporarily used to record
+ * whether or not 64 integer support is available. It must be
+ * defined for any 64 integer extensions for new platforms that are
+ * added.
+ */
+
+#undef stdint_int64_defined
+#if (defined(__STDC__) && defined(__STDC_VERSION__)) || defined (S_SPLINT_S)
+# if (__STDC__ && __STDC_VERSION__ >= 199901L) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# endif
+#endif
+
+#if !defined (stdint_int64_defined)
+# if defined(__GNUC__)
+# define stdint_int64_defined
+ __extension__ typedef long long int64_t;
+ __extension__ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif defined(__MWERKS__) || defined (__SUNPRO_C) || defined (__SUNPRO_CC) || defined (__APPLE_CC__) || defined (_LONG_LONG) || defined (_CRAYC) || defined (S_SPLINT_S)
+# define stdint_int64_defined
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# define UINT64_C(v) v ## ULL
+# define INT64_C(v) v ## LL
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "ll"
+# endif
+# elif (defined(__WATCOMC__) && defined(__WATCOM_INT64__)) || (defined(_MSC_VER) && _INTEGRAL_MAX_BITS >= 64) || (defined (__BORLANDC__) && __BORLANDC__ > 0x460) || defined (__alpha) || defined (__DECC)
+# define stdint_int64_defined
+ typedef __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+# define UINT64_C(v) v ## UI64
+# define INT64_C(v) v ## I64
+# ifndef PRINTF_INT64_MODIFIER
+# define PRINTF_INT64_MODIFIER "I64"
+# endif
+# endif
+#endif
+
+#if !defined (LONG_LONG_MAX) && defined (INT64_C)
+# define LONG_LONG_MAX INT64_C (9223372036854775807)
+#endif
+#ifndef ULONG_LONG_MAX
+# define ULONG_LONG_MAX UINT64_C (18446744073709551615)
+#endif
+
+#if !defined (INT64_MAX) && defined (INT64_C)
+# define INT64_MAX INT64_C (9223372036854775807)
+#endif
+#if !defined (INT64_MIN) && defined (INT64_C)
+# define INT64_MIN INT64_C (-9223372036854775808)
+#endif
+#if !defined (UINT64_MAX) && defined (INT64_C)
+# define UINT64_MAX UINT64_C (18446744073709551615)
+#endif
+
+/*
+ * Width of hexadecimal for number field.
+ */
+
+#ifndef PRINTF_INT64_HEX_WIDTH
+# define PRINTF_INT64_HEX_WIDTH "16"
+#endif
+#ifndef PRINTF_INT32_HEX_WIDTH
+# define PRINTF_INT32_HEX_WIDTH "8"
+#endif
+#ifndef PRINTF_INT16_HEX_WIDTH
+# define PRINTF_INT16_HEX_WIDTH "4"
+#endif
+#ifndef PRINTF_INT8_HEX_WIDTH
+# define PRINTF_INT8_HEX_WIDTH "2"
+#endif
+#ifndef PRINTF_INT64_DEC_WIDTH
+# define PRINTF_INT64_DEC_WIDTH "19"
+#endif
+#ifndef PRINTF_INT32_DEC_WIDTH
+# define PRINTF_INT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_INT16_DEC_WIDTH
+# define PRINTF_INT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_INT8_DEC_WIDTH
+# define PRINTF_INT8_DEC_WIDTH "3"
+#endif
+#ifndef PRINTF_UINT64_DEC_WIDTH
+# define PRINTF_UINT64_DEC_WIDTH "20"
+#endif
+#ifndef PRINTF_UINT32_DEC_WIDTH
+# define PRINTF_UINT32_DEC_WIDTH "10"
+#endif
+#ifndef PRINTF_UINT16_DEC_WIDTH
+# define PRINTF_UINT16_DEC_WIDTH "5"
+#endif
+#ifndef PRINTF_UINT8_DEC_WIDTH
+# define PRINTF_UINT8_DEC_WIDTH "3"
+#endif
+
+/*
+ * Ok, lets not worry about 128 bit integers for now. Moore's law says
+ * we don't need to worry about that until about 2040 at which point
+ * we'll have bigger things to worry about.
+ */
+
+#ifdef stdint_int64_defined
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+# define INTMAX_MAX INT64_MAX
+# define INTMAX_MIN INT64_MIN
+# define UINTMAX_MAX UINT64_MAX
+# define UINTMAX_C(v) UINT64_C(v)
+# define INTMAX_C(v) INT64_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT64_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT64_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT64_DEC_WIDTH
+# endif
+#else
+ typedef int32_t intmax_t;
+ typedef uint32_t uintmax_t;
+# define INTMAX_MAX INT32_MAX
+# define UINTMAX_MAX UINT32_MAX
+# define UINTMAX_C(v) UINT32_C(v)
+# define INTMAX_C(v) INT32_C(v)
+# ifndef PRINTF_INTMAX_MODIFIER
+# define PRINTF_INTMAX_MODIFIER PRINTF_INT32_MODIFIER
+# endif
+# ifndef PRINTF_INTMAX_HEX_WIDTH
+# define PRINTF_INTMAX_HEX_WIDTH PRINTF_INT32_HEX_WIDTH
+# endif
+# ifndef PRINTF_INTMAX_DEC_WIDTH
+# define PRINTF_INTMAX_DEC_WIDTH PRINTF_INT32_DEC_WIDTH
+# endif
+#endif
+
+/*
+ * Because this file currently only supports platforms which have
+ * precise powers of 2 as bit sizes for the default integers, the
+ * least definitions are all trivial. Its possible that a future
+ * version of this file could have different definitions.
+ */
+
+#ifndef stdint_least_defined
+ typedef int8_t int_least8_t;
+ typedef uint8_t uint_least8_t;
+ typedef int16_t int_least16_t;
+ typedef uint16_t uint_least16_t;
+ typedef int32_t int_least32_t;
+ typedef uint32_t uint_least32_t;
+# define PRINTF_LEAST32_MODIFIER PRINTF_INT32_MODIFIER
+# define PRINTF_LEAST16_MODIFIER PRINTF_INT16_MODIFIER
+# define UINT_LEAST8_MAX UINT8_MAX
+# define INT_LEAST8_MAX INT8_MAX
+# define UINT_LEAST16_MAX UINT16_MAX
+# define INT_LEAST16_MAX INT16_MAX
+# define UINT_LEAST32_MAX UINT32_MAX
+# define INT_LEAST32_MAX INT32_MAX
+# define INT_LEAST8_MIN INT8_MIN
+# define INT_LEAST16_MIN INT16_MIN
+# define INT_LEAST32_MIN INT32_MIN
+# ifdef stdint_int64_defined
+ typedef int64_t int_least64_t;
+ typedef uint64_t uint_least64_t;
+# define PRINTF_LEAST64_MODIFIER PRINTF_INT64_MODIFIER
+# define UINT_LEAST64_MAX UINT64_MAX
+# define INT_LEAST64_MAX INT64_MAX
+# define INT_LEAST64_MIN INT64_MIN
+# endif
+#endif
+#undef stdint_least_defined
+
+/*
+ * The ANSI C committee pretending to know or specify anything about
+ * performance is the epitome of misguided arrogance. The mandate of
+ * this file is to *ONLY* ever support that absolute minimum
+ * definition of the fast integer types, for compatibility purposes.
+ * No extensions, and no attempt to suggest what may or may not be a
+ * faster integer type will ever be made in this file. Developers are
+ * warned to stay away from these types when using this or any other
+ * stdint.h.
+ */
+
+typedef int_least8_t int_fast8_t;
+typedef uint_least8_t uint_fast8_t;
+typedef int_least16_t int_fast16_t;
+typedef uint_least16_t uint_fast16_t;
+typedef int_least32_t int_fast32_t;
+typedef uint_least32_t uint_fast32_t;
+#define UINT_FAST8_MAX UINT_LEAST8_MAX
+#define INT_FAST8_MAX INT_LEAST8_MAX
+#define UINT_FAST16_MAX UINT_LEAST16_MAX
+#define INT_FAST16_MAX INT_LEAST16_MAX
+#define UINT_FAST32_MAX UINT_LEAST32_MAX
+#define INT_FAST32_MAX INT_LEAST32_MAX
+#define INT_FAST8_MIN INT_LEAST8_MIN
+#define INT_FAST16_MIN INT_LEAST16_MIN
+#define INT_FAST32_MIN INT_LEAST32_MIN
+#ifdef stdint_int64_defined
+ typedef int_least64_t int_fast64_t;
+ typedef uint_least64_t uint_fast64_t;
+# define UINT_FAST64_MAX UINT_LEAST64_MAX
+# define INT_FAST64_MAX INT_LEAST64_MAX
+# define INT_FAST64_MIN INT_LEAST64_MIN
+#endif
+
+#undef stdint_int64_defined
+
+/*
+ * Whatever piecemeal, per compiler thing we can do about the wchar_t
+ * type limits.
+ */
+
+#if defined(__WATCOMC__) || defined(_MSC_VER) || defined (__GNUC__)
+# include <wchar.h>
+# ifndef WCHAR_MIN
+# define WCHAR_MIN 0
+# endif
+# ifndef WCHAR_MAX
+# define WCHAR_MAX ((wchar_t)-1)
+# endif
+#endif
+
+/*
+ * Whatever piecemeal, per compiler/platform thing we can do about the
+ * (u)intptr_t types and limits.
+ */
+
+#if (defined (_MSC_VER) && defined (_UINTPTR_T_DEFINED)) || defined (_UINTPTR_T)
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+#ifndef STDINT_H_UINTPTR_T_DEFINED
+# if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) || defined (_WIN64) || defined (__ppc64__)
+# define stdint_intptr_bits 64
+# elif defined (__WATCOMC__) || defined (__TURBOC__)
+# if defined(__TINY__) || defined(__SMALL__) || defined(__MEDIUM__)
+# define stdint_intptr_bits 16
+# else
+# define stdint_intptr_bits 32
+# endif
+# elif defined (__i386__) || defined (_WIN32) || defined (WIN32) || defined (__ppc64__)
+# define stdint_intptr_bits 32
+# elif defined (__INTEL_COMPILER)
+/* TODO -- what did Intel do about x86-64? */
+# else
+/* #error "This platform might not be supported yet" */
+# endif
+
+# ifdef stdint_intptr_bits
+# define stdint_intptr_glue3_i(a,b,c) a##b##c
+# define stdint_intptr_glue3(a,b,c) stdint_intptr_glue3_i(a,b,c)
+# ifndef PRINTF_INTPTR_MODIFIER
+# define PRINTF_INTPTR_MODIFIER stdint_intptr_glue3(PRINTF_INT,stdint_intptr_bits,_MODIFIER)
+# endif
+# ifndef PTRDIFF_MAX
+# define PTRDIFF_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef PTRDIFF_MIN
+# define PTRDIFF_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef UINTPTR_MAX
+# define UINTPTR_MAX stdint_intptr_glue3(UINT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MAX
+# define INTPTR_MAX stdint_intptr_glue3(INT,stdint_intptr_bits,_MAX)
+# endif
+# ifndef INTPTR_MIN
+# define INTPTR_MIN stdint_intptr_glue3(INT,stdint_intptr_bits,_MIN)
+# endif
+# ifndef INTPTR_C
+# define INTPTR_C(x) stdint_intptr_glue3(INT,stdint_intptr_bits,_C)(x)
+# endif
+# ifndef UINTPTR_C
+# define UINTPTR_C(x) stdint_intptr_glue3(UINT,stdint_intptr_bits,_C)(x)
+# endif
+ typedef stdint_intptr_glue3(uint,stdint_intptr_bits,_t) uintptr_t;
+ typedef stdint_intptr_glue3( int,stdint_intptr_bits,_t) intptr_t;
+# else
+/* TODO -- This following is likely wrong for some platforms, and does
+ nothing for the definition of uintptr_t. */
+ typedef ptrdiff_t intptr_t;
+# endif
+# define STDINT_H_UINTPTR_T_DEFINED
+#endif
+
+/*
+ * Assumes sig_atomic_t is signed and we have a 2s complement machine.
+ */
+
+#ifndef SIG_ATOMIC_MAX
+# define SIG_ATOMIC_MAX ((((sig_atomic_t) 1) << (sizeof (sig_atomic_t)*CHAR_BIT-1)) - 1)
+#endif
+
+#endif
+
+#if defined (__TEST_PSTDINT_FOR_CORRECTNESS)
+
+/*
+ * Please compile with the maximum warning settings to make sure macros are
+ * not defined more than once.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#define glue3_aux(x,y,z) x ## y ## z
+#define glue3(x,y,z) glue3_aux(x,y,z)
+
+#define DECLU(bits) glue3(uint,bits,_t) glue3(u,bits,) = glue3(UINT,bits,_C) (0);
+#define DECLI(bits) glue3(int,bits,_t) glue3(i,bits,) = glue3(INT,bits,_C) (0);
+
+#define DECL(us,bits) glue3(DECL,us,) (bits)
+
+#define TESTUMAX(bits) glue3(u,bits,) = ~glue3(u,bits,); if (glue3(UINT,bits,_MAX) != glue3(u,bits,)) printf ("Something wrong with UINT%d_MAX\n", bits)
+
+#define REPORTERROR(msg) { err_n++; if (err_first <= 0) err_first = __LINE__; printf msg; }
+
+int main () {
+ int err_n = 0;
+ int err_first = 0;
+ DECL(I,8)
+ DECL(U,8)
+ DECL(I,16)
+ DECL(U,16)
+ DECL(I,32)
+ DECL(U,32)
+#ifdef INT64_MAX
+ DECL(I,64)
+ DECL(U,64)
+#endif
+ intmax_t imax = INTMAX_C(0);
+ uintmax_t umax = UINTMAX_C(0);
+ char str0[256], str1[256];
+
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "d", INT32_C(2147483647));
+ if (0 != strcmp (str0, "2147483647")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_INT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_INT32_DEC_WIDTH : %s\n", PRINTF_INT32_DEC_WIDTH));
+ sprintf (str0, "%" PRINTF_INT32_MODIFIER "u", UINT32_C(4294967295));
+ if (0 != strcmp (str0, "4294967295")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str0));
+ if (atoi(PRINTF_UINT32_DEC_WIDTH) != (int) strlen(str0)) REPORTERROR (("Something wrong with PRINTF_UINT32_DEC_WIDTH : %s\n", PRINTF_UINT32_DEC_WIDTH));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d", INT64_C(9223372036854775807));
+ if (0 != strcmp (str1, "9223372036854775807")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_INT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_INT64_DEC_WIDTH : %s, %d\n", PRINTF_INT64_DEC_WIDTH, (int) strlen(str1)));
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "u", UINT64_C(18446744073709550591));
+ if (0 != strcmp (str1, "18446744073709550591")) REPORTERROR (("Something wrong with PRINTF_INT32_MODIFIER : %s\n", str1));
+ if (atoi(PRINTF_UINT64_DEC_WIDTH) != (int) strlen(str1)) REPORTERROR (("Something wrong with PRINTF_UINT64_DEC_WIDTH : %s, %d\n", PRINTF_UINT64_DEC_WIDTH, (int) strlen(str1)));
+#endif
+
+ sprintf (str0, "%d %x\n", 0, ~0);
+
+ sprintf (str1, "%d %x\n", i8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i8 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u8, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u8 : %s\n", str1));
+ sprintf (str1, "%d %x\n", i16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i16 : %s\n", str1));
+ sprintf (str1, "%u %x\n", u16, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u16 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "d %x\n", i32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i32 : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INT32_MODIFIER "u %x\n", u32, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with u32 : %s\n", str1));
+#ifdef INT64_MAX
+ sprintf (str1, "%" PRINTF_INT64_MODIFIER "d %x\n", i64, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with i64 : %s\n", str1));
+#endif
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "d %x\n", imax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with imax : %s\n", str1));
+ sprintf (str1, "%" PRINTF_INTMAX_MODIFIER "u %x\n", umax, ~0);
+ if (0 != strcmp (str0, str1)) REPORTERROR (("Something wrong with umax : %s\n", str1));
+
+ TESTUMAX(8);
+ TESTUMAX(16);
+ TESTUMAX(32);
+#ifdef INT64_MAX
+ TESTUMAX(64);
+#endif
+
+#define STR(v) #v
+#define Q(v) printf ("sizeof " STR(v) " = %u\n", (unsigned) sizeof (v));
+ if (err_n) {
+ printf ("pstdint.h is not correct. Please use sizes below to correct it:\n");
+ }
+
+ Q(int)
+ Q(unsigned)
+ Q(long int)
+ Q(short int)
+ Q(int8_t)
+ Q(int16_t)
+ Q(int32_t)
+#ifdef INT64_MAX
+ Q(int64_t)
+#endif
+
+ return EXIT_SUCCESS;
+}
+
+#endif
diff --git a/nostrdb/flatcc/portable/punaligned.h b/nostrdb/flatcc/portable/punaligned.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Mikkel Fahnøe Jørgensen, dvide.com
+ *
+ * (MIT License)
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * - The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * - The Software is provided "as is", without warranty of any kind, express or
+ * implied, including but not limited to the warranties of merchantability,
+ * fitness for a particular purpose and noninfringement. In no event shall the
+ * authors or copyright holders be liable for any claim, damages or other
+ * liability, whether in an action of contract, tort or otherwise, arising from,
+ * out of or in connection with the Software or the use or other dealings in the
+ * Software.
+ */
+
+#ifndef PUNLIGNED_H
+#define PUNLIGNED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef PORTABLE_UNALIGNED_ACCESS
+
+#if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+#define PORTABLE_UNALIGNED_ACCESS 1
+#else
+#define PORTABLE_UNALIGNED_ACCESS 0
+#endif
+
+#endif
+
+/* `unaligned_read_16` might not be defined if endianness was not determined. */
+#if !defined(unaligned_read_le16toh)
+
+#include "pendian.h"
+
+#ifndef UINT8_MAX
+#include <stdint.h>
+#endif
+
+#if PORTABLE_UNALIGNED_ACCESS
+
+#define unaligned_read_16(p) (*(uint16_t*)(p))
+#define unaligned_read_32(p) (*(uint32_t*)(p))
+#define unaligned_read_64(p) (*(uint64_t*)(p))
+
+#define unaligned_read_le16toh(p) le16toh(*(uint16_t*)(p))
+#define unaligned_read_le32toh(p) le32toh(*(uint32_t*)(p))
+#define unaligned_read_le64toh(p) le64toh(*(uint64_t*)(p))
+
+#define unaligned_read_be16toh(p) be16toh(*(uint16_t*)(p))
+#define unaligned_read_be32toh(p) be32toh(*(uint32_t*)(p))
+#define unaligned_read_be64toh(p) be64toh(*(uint64_t*)(p))
+
+#define unaligned_write_16(p, v) (*(uint16_t*)(p) = (uint16_t)(v))
+#define unaligned_write_32(p, v) (*(uint32_t*)(p) = (uint32_t)(v))
+#define unaligned_write_64(p, v) (*(uint64_t*)(p) = (uint64_t)(v))
+
+#define unaligned_write_htole16(p, v) (*(uint16_t*)(p) = htole16(v))
+#define unaligned_write_htole32(p, v) (*(uint32_t*)(p) = htole32(v))
+#define unaligned_write_htole64(p, v) (*(uint64_t*)(p) = htole64(v))
+
+#define unaligned_write_htobe16(p, v) (*(uint16_t*)(p) = htobe16(v))
+#define unaligned_write_htobe32(p, v) (*(uint32_t*)(p) = htobe32(v))
+#define unaligned_write_htobe64(p, v) (*(uint64_t*)(p) = htobe64(v))
+
+#else
+
+#define unaligned_read_le16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 8))
+
+#define unaligned_read_le32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 24))
+
+#define unaligned_read_le64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 0) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 56))
+
+#define unaligned_read_be16toh(p) ( \
+ (((uint16_t)(((uint8_t *)(p))[0])) << 8) | \
+ (((uint16_t)(((uint8_t *)(p))[1])) << 0))
+
+#define unaligned_read_be32toh(p) ( \
+ (((uint32_t)(((uint8_t *)(p))[0])) << 24) | \
+ (((uint32_t)(((uint8_t *)(p))[1])) << 16) | \
+ (((uint32_t)(((uint8_t *)(p))[2])) << 8) | \
+ (((uint32_t)(((uint8_t *)(p))[3])) << 0))
+
+#define unaligned_read_be64toh(p) ( \
+ (((uint64_t)(((uint8_t *)(p))[0])) << 56) | \
+ (((uint64_t)(((uint8_t *)(p))[1])) << 48) | \
+ (((uint64_t)(((uint8_t *)(p))[2])) << 40) | \
+ (((uint64_t)(((uint8_t *)(p))[3])) << 32) | \
+ (((uint64_t)(((uint8_t *)(p))[4])) << 24) | \
+ (((uint64_t)(((uint8_t *)(p))[5])) << 16) | \
+ (((uint64_t)(((uint8_t *)(p))[6])) << 8) | \
+ (((uint64_t)(((uint8_t *)(p))[7])) << 0))
+
+#define unaligned_write_htole16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ } while (0)
+
+#define unaligned_write_htole32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ } while (0)
+
+#define unaligned_write_htole64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ } while (0)
+
+#define unaligned_write_htobe16(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint16_t)(v)) >> 8); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint16_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe32(p, v) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint32_t)(v)) >> 24); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint32_t)(v)) >> 16); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint32_t)(v)) >> 8); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint32_t)(v)) >> 0); \
+ } while (0)
+
+#define unaligned_write_htobe64(p) do { \
+ ((uint8_t *)(p))[0] = (uint8_t)(((uint64_t)(v)) >> 56); \
+ ((uint8_t *)(p))[1] = (uint8_t)(((uint64_t)(v)) >> 48); \
+ ((uint8_t *)(p))[2] = (uint8_t)(((uint64_t)(v)) >> 40); \
+ ((uint8_t *)(p))[3] = (uint8_t)(((uint64_t)(v)) >> 32); \
+ ((uint8_t *)(p))[4] = (uint8_t)(((uint64_t)(v)) >> 24); \
+ ((uint8_t *)(p))[5] = (uint8_t)(((uint64_t)(v)) >> 16); \
+ ((uint8_t *)(p))[6] = (uint8_t)(((uint64_t)(v)) >> 8); \
+ ((uint8_t *)(p))[7] = (uint8_t)(((uint64_t)(v)) >> 0); \
+ } while (0)
+
+#if __LITTLE_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_le16toh(p)
+#define unaligned_read_32(p) unaligned_read_le32toh(p)
+#define unaligned_read_64(p) unaligned_read_le64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htole16(p)
+#define unaligned_write_32(p) unaligned_write_htole32(p)
+#define unaligned_write_64(p) unaligned_write_htole64(p)
+#endif
+
+#if __BIG_ENDIAN__
+#define unaligned_read_16(p) unaligned_read_be16toh(p)
+#define unaligned_read_32(p) unaligned_read_be32toh(p)
+#define unaligned_read_64(p) unaligned_read_be64toh(p)
+
+#define unaligned_write_16(p) unaligned_write_htobe16(p)
+#define unaligned_write_32(p) unaligned_write_htobe32(p)
+#define unaligned_write_64(p) unaligned_write_htobe64(p)
+#endif
+
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PUNALIGNED_H */
diff --git a/nostrdb/flatcc/portable/pversion.h b/nostrdb/flatcc/portable/pversion.h
@@ -0,0 +1,6 @@
+#define PORTABLE_VERSION_TEXT "0.2.6-pre"
+#define PORTABLE_VERSION_MAJOR 0
+#define PORTABLE_VERSION_MINOR 2
+#define PORTABLE_VERSION_PATCH 6
+/* 1 or 0 */
+#define PORTABLE_VERSION_RELEASED 0
diff --git a/nostrdb/flatcc/portable/pwarnings.h b/nostrdb/flatcc/portable/pwarnings.h
@@ -0,0 +1,52 @@
+#ifndef PWARNINGS_H
+#define PWARNINGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * See also pdiagnostics.h headers for per file control of common
+ * warnings.
+ *
+ * This file is intended for global disabling of warnings that shouldn't
+ * be present in C11 or perhaps C99, or a generally just noise where
+ * recent clang / gcc compile cleanly with high warning levels.
+ */
+
+#if defined(_MSC_VER)
+/* Needed when flagging code in or out and more. */
+#pragma warning(disable: 4127) /* conditional expression is constant */
+/* happens also in MS's own headers. */
+#pragma warning(disable: 4668) /* preprocessor name not defined */
+/* MSVC does not respect double parenthesis for intent */
+#pragma warning(disable: 4706) /* assignment within conditional expression */
+/* `inline` only advisory anyway. */
+#pragma warning(disable: 4710) /* function not inlined */
+/* Well, we don't intend to add the padding manually. */
+#pragma warning(disable: 4820) /* x bytes padding added in struct */
+
+/*
+ * Don't warn that fopen etc. are unsafe
+ *
+ * Define a compiler flag like `-D_CRT_SECURE_NO_WARNINGS` in the build.
+ * For some reason it doesn't work when defined here.
+ *
+ * #define _CRT_SECURE_NO_WARNINGS
+ */
+
+/*
+ * Anonymous union in struct is valid in C11 and has been supported in
+ * GCC and Clang for a while, but it is not C99. MSVC also handles it,
+ * but warns. Truly portable code should perhaps not use this feature,
+ * but this is not the place to complain about it.
+ */
+#pragma warning(disable: 4201) /* nonstandard extension used: nameless struct/union */
+
+#endif /* _MSV_VER */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PWARNINGS_H */
diff --git a/nostrdb/flatcc/reflection/README b/nostrdb/flatcc/reflection/README
@@ -0,0 +1,19 @@
+Generated by flatcc
+
+Keep checked in - needed by flatcc to generate binary schema.
+
+NOTE TO CONTRIBUTORS: DO NOT EDIT THESE FILES BY HAND
+
+If you need to change anything here, it is done in the code generator,
+possibly followed by running `reflection/generate_code.sh` from the
+project root. But please only do this for testing do not include the
+generated files in a pull request unless agreed otherwise, and if so,
+do it in a separate commit.
+
+Normally new reflection code is generated during a release which also
+updates the version number in comments and there is no reason to update
+reflection on every commit unless it breaks something fundamentally.
+
+There is a build option `FLATCC_REFLECTION` to disable reflection which
+is helpful while making changes that affect the content of these files
+in a way that would prevent the flatcc compiler from building.
diff --git a/nostrdb/flatcc/reflection/flatbuffers_common_builder.h b/nostrdb/flatcc/reflection/flatbuffers_common_builder.h
@@ -0,0 +1,685 @@
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#define FLATBUFFERS_COMMON_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers build functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#ifndef FLATBUILDER_H
+#include "flatcc/flatcc_builder.h"
+#endif
+typedef flatcc_builder_t flatbuffers_builder_t;
+typedef flatcc_builder_ref_t flatbuffers_ref_t;
+typedef flatcc_builder_ref_t flatbuffers_vec_ref_t;
+typedef flatcc_builder_union_ref_t flatbuffers_union_ref_t;
+typedef flatcc_builder_union_vec_ref_t flatbuffers_union_vec_ref_t;
+/* integer return code (ref and ptr always fail on 0) */
+#define flatbuffers_failed(x) ((x) < 0)
+typedef flatbuffers_ref_t flatbuffers_root_t;
+#define flatbuffers_root(ref) ((flatbuffers_root_t)(ref))
+
+#define __flatbuffers_memoize_begin(B, src)\
+do { flatcc_builder_ref_t _ref; if ((_ref = flatcc_builder_refmap_find((B), (src)))) return _ref; } while (0)
+#define __flatbuffers_memoize_end(B, src, op) do { return flatcc_builder_refmap_insert((B), (src), (op)); } while (0)
+#define __flatbuffers_memoize(B, src, op) do { __flatbuffers_memoize_begin(B, src); __flatbuffers_memoize_end(B, src, op); } while (0)
+
+#define __flatbuffers_build_buffer(NS)\
+typedef NS ## ref_t NS ## buffer_ref_t;\
+static inline int NS ## buffer_start(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, 0); }\
+static inline int NS ## buffer_start_with_size(NS ## builder_t *B, const NS ##fid_t fid)\
+{ return flatcc_builder_start_buffer(B, fid, 0, flatcc_builder_with_size); }\
+static inline int NS ## buffer_start_aligned(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, 0); }\
+static inline int NS ## buffer_start_aligned_with_size(NS ## builder_t *B, NS ##fid_t fid, uint16_t block_align)\
+{ return flatcc_builder_start_buffer(B, fid, block_align, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t NS ## buffer_end(NS ## builder_t *B, NS ## ref_t root)\
+{ return flatcc_builder_end_buffer(B, root); }
+
+#define __flatbuffers_build_table_root(NS, N, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : N ## _start(B); }\
+static inline int N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? -1 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _create(B __ ## N ## _call_args)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, FID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start(B, TFID)) return 0;return NS ## buffer_end(B, N ## _clone(B, t)); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _table_t t)\
+{ if (NS ## buffer_start_with_size(B, TFID)) return 0; return NS ## buffer_end(B, N ## _clone(B, t)); }
+
+#define __flatbuffers_build_table_prolog(NS, N, FID, TFID)\
+__flatbuffers_build_table_vector_ops(NS, N ## _vec, N)\
+__flatbuffers_build_table_root(NS, N, FID, TFID)
+
+#define __flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+static inline N ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, FID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? 0 : N ## _start(B); }\
+static inline N ## _t *N ## _start_as_typed_root_with_size(NS ## builder_t *B)\
+{ return NS ## buffer_start_with_size(B, TFID) ? 0 : N ## _start(B); }\
+static inline NS ## buffer_ref_t N ## _end_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _end_pe_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_end(B, N ## _end_pe(B)); }\
+static inline NS ## buffer_ref_t N ## _create_as_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, FID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, 0); }\
+static inline NS ## buffer_ref_t N ## _create_as_typed_root_with_size(NS ## builder_t *B __ ## N ## _formal_args)\
+{ return flatcc_builder_create_buffer(B, TFID, 0,\
+ N ## _create(B __ ## N ## _call_args), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, FID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, 0); }\
+static inline NS ## buffer_ref_t N ## _clone_as_typed_root_with_size(NS ## builder_t *B, N ## _struct_t p)\
+{ return flatcc_builder_create_buffer(B, TFID, 0, N ## _clone(B, p), A, flatcc_builder_with_size); }
+
+#define __flatbuffers_build_nested_table_root(NS, N, TN, FID, TFID)\
+static inline int N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? -1 : TN ## _start(B); }\
+static inline int N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, TFID) ? -1 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align ? align : 8, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_root(B, t)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _table_t t)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, t)); }
+
+#define __flatbuffers_build_nested_struct_root(NS, N, TN, A, FID, TFID)\
+static inline TN ## _t *N ## _start_as_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline TN ## _t *N ## _start_as_typed_root(NS ## builder_t *B)\
+{ return NS ## buffer_start(B, FID) ? 0 : TN ## _start(B); }\
+static inline int N ## _end_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_as_typed_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end(B))); }\
+static inline int N ## _end_pe_as_root(NS ## builder_t *B)\
+{ return N ## _add(B, NS ## buffer_end(B, TN ## _end_pe(B))); }\
+static inline int N ## _create_as_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, FID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _create_as_typed_root(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ return N ## _add(B, flatcc_builder_create_buffer(B, TFID, 0,\
+ TN ## _create(B __ ## TN ## _call_args), A, flatcc_builder_is_nested)); }\
+static inline int N ## _nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _typed_nest(NS ## builder_t *B, void *data, size_t size, uint16_t align)\
+{ return N ## _add(B, flatcc_builder_create_vector(B, data, size, 1,\
+ align < A ? A : align, FLATBUFFERS_COUNT_MAX(1))); }\
+static inline int N ## _clone_as_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_root(B, p)); }\
+static inline int N ## _clone_as_typed_root(NS ## builder_t *B, TN ## _struct_t p)\
+{ return N ## _add(B, TN ## _clone_as_typed_root(B, p)); }
+
+#define __flatbuffers_build_vector_ops(NS, V, N, TN, T)\
+static inline T *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return (T *)flatcc_builder_extend_vector(B, len); }\
+static inline T *V ## _append(NS ## builder_t *B, const T *data, size_t len)\
+{ return (T *)flatcc_builder_append_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_vector(B, len); }\
+static inline T *V ## _edit(NS ## builder_t *B)\
+{ return (T *)flatcc_builder_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_vector_count(B); }\
+static inline T *V ## _push(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? (memcpy(_p, p, TN ## __size()), _p) : 0; }\
+static inline T *V ## _push_copy(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_clone(NS ## builder_t *B, const T *p)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _copy(_p, p) : 0; }\
+static inline T *V ## _push_create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ T *_p; return (_p = (T *)flatcc_builder_extend_vector(B, 1)) ? TN ## _assign(_p __ ## TN ## _call_args) : 0; }
+
+#define __flatbuffers_build_vector(NS, N, T, S, A)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { size_t i, n; T *p = (T *)flatcc_builder_vector_edit(B);\
+ for (i = 0, n = flatcc_builder_vector_count(B); i < n; ++i)\
+ { N ## _to_pe(N ## __ptr_add(p, i)); }} return flatcc_builder_end_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const T *data, size_t len)\
+{ if (!NS ## is_native_pe()) { size_t i; T *p; int ret = flatcc_builder_start_vector(B, S, A, FLATBUFFERS_COUNT_MAX(S)); if (ret) { return ret; }\
+ p = (T *)flatcc_builder_extend_vector(B, len); if (!p) return 0;\
+ for (i = 0; i < len; ++i) { N ## _copy_to_pe(N ## __ptr_add(p, i), N ## __const_ptr_add(data, i)); }\
+ return flatcc_builder_end_vector(B); } else return flatcc_builder_create_vector(B, data, len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ __flatbuffers_memoize(B, vec, flatcc_builder_create_vector(B, vec, N ## _vec_len(vec), S, A, FLATBUFFERS_COUNT_MAX(S))); }\
+static inline N ## _vec_ref_t N ## _vec_slice(NS ## builder_t *B, N ##_vec_t vec, size_t index, size_t len)\
+{ size_t n = N ## _vec_len(vec); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_vector(B, N ## __const_ptr_add(vec, index), len, S, A, FLATBUFFERS_COUNT_MAX(S)); }\
+__flatbuffers_build_vector_ops(NS, N ## _vec, N, N, T)
+
+#define __flatbuffers_build_union_vector_ops(NS, V, N, TN)\
+static inline TN ## _union_ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _append(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_append_union_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_union_vector(B, len); }\
+static inline TN ## _union_ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _union_ref_t *) flatcc_builder_union_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_union_vector_count(B); }\
+static inline TN ## _union_ref_t *V ## _push(NS ## builder_t *B, const TN ## _union_ref_t ref)\
+{ return flatcc_builder_union_vector_push(B, ref); }\
+static inline TN ## _union_ref_t *V ## _push_clone(NS ## builder_t *B, TN ## _union_t u)\
+{ return TN ## _vec_push(B, TN ## _clone(B, u)); }
+
+#define __flatbuffers_build_union_vector(NS, N)\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_union_vector(B); }\
+static inline N ## _union_vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _union_ref_t *data, size_t len)\
+{ return flatcc_builder_create_union_vector(B, data, len); }\
+__flatbuffers_build_union_vector_ops(NS, N ## _vec, N, N)\
+/* Preserves DAG structure separately for type and value vector, so a type vector could be shared for many value vectors. */\
+static inline N ## _union_vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_union_vec_t vec)\
+{ N ## _union_vec_ref_t _uvref, _ret = { 0, 0 }; NS ## union_ref_t _uref; size_t _i, _len;\
+ if (vec.type == 0) return _ret;\
+ _uvref.type = flatcc_builder_refmap_find(B, vec.type); _uvref.value = flatcc_builder_refmap_find(B, vec.value);\
+ _len = N ## _union_vec_len(vec); if (_uvref.type == 0) {\
+ _uvref.type = flatcc_builder_refmap_insert(B, vec.type, (flatcc_builder_create_type_vector(B, vec.type, _len))); }\
+ if (_uvref.type == 0) return _ret; if (_uvref.value == 0) {\
+ if (flatcc_builder_start_offset_vector(B)) return _ret;\
+ for (_i = 0; _i < _len; ++_i) { _uref = N ## _clone(B, N ## _union_vec_at(vec, _i));\
+ if (!_uref.value || !(flatcc_builder_offset_vector_push(B, _uref.value))) return _ret; }\
+ _uvref.value = flatcc_builder_refmap_insert(B, vec.value, flatcc_builder_end_offset_vector(B));\
+ if (_uvref.value == 0) return _ret; } return _uvref; }
+
+#define __flatbuffers_build_string_vector_ops(NS, N)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NS ## string_ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return NS ## string_vec_push(B, NS ## string_end(B)); }\
+static inline NS ## string_ref_t *N ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_create(B, s, len)); }\
+static inline NS ## string_ref_t *N ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NS ## string_vec_push(B, NS ## string_create_str(B, s)); }\
+static inline NS ## string_ref_t *N ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NS ## string_vec_push(B, NS ## string_create_strn(B, s, max_len)); }\
+static inline NS ## string_ref_t *N ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NS ## string_vec_push(B, NS ## string_clone(B, string)); }\
+static inline NS ## string_ref_t *N ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NS ## string_vec_push(B, NS ## string_slice(B, string, index, len)); }
+
+#define __flatbuffers_build_table_vector_ops(NS, N, TN)\
+static inline int N ## _push_start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline TN ## _ref_t *N ## _push_end(NS ## builder_t *B)\
+{ return N ## _push(B, TN ## _end(B)); }\
+static inline TN ## _ref_t *N ## _push_create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _push(B, TN ## _create(B __ ## TN ## _call_args)); }
+
+#define __flatbuffers_build_offset_vector_ops(NS, V, N, TN)\
+static inline TN ## _ref_t *V ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _append(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return flatcc_builder_append_offset_vector(B, data, len); }\
+static inline int V ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_offset_vector(B, len); }\
+static inline TN ## _ref_t *V ## _edit(NS ## builder_t *B)\
+{ return (TN ## _ref_t *)flatcc_builder_offset_vector_edit(B); }\
+static inline size_t V ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_offset_vector_count(B); }\
+static inline TN ## _ref_t *V ## _push(NS ## builder_t *B, const TN ## _ref_t ref)\
+{ return ref ? flatcc_builder_offset_vector_push(B, ref) : 0; }
+
+#define __flatbuffers_build_offset_vector(NS, N)\
+typedef NS ## ref_t N ## _vec_ref_t;\
+static inline int N ## _vec_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_offset_vector(B); }\
+static inline N ## _vec_ref_t N ## _vec_create(NS ## builder_t *B, const N ## _ref_t *data, size_t len)\
+{ return flatcc_builder_create_offset_vector(B, data, len); }\
+__flatbuffers_build_offset_vector_ops(NS, N ## _vec, N, N)\
+static inline N ## _vec_ref_t N ## _vec_clone(NS ## builder_t *B, N ##_vec_t vec)\
+{ int _ret; N ## _ref_t _e; size_t _i, _len; __flatbuffers_memoize_begin(B, vec);\
+ _len = N ## _vec_len(vec); if (flatcc_builder_start_offset_vector(B)) return 0;\
+ for (_i = 0; _i < _len; ++_i) { if (!(_e = N ## _clone(B, N ## _vec_at(vec, _i)))) return 0;\
+ if (!flatcc_builder_offset_vector_push(B, _e)) return 0; }\
+ __flatbuffers_memoize_end(B, vec, flatcc_builder_end_offset_vector(B)); }\
+
+#define __flatbuffers_build_string_ops(NS, N)\
+static inline char *N ## _append(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string(B, s, len); }\
+static inline char *N ## _append_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_append_string_str(B, s); }\
+static inline char *N ## _append_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_append_string_strn(B, s, len); }\
+static inline size_t N ## _reserved_len(NS ## builder_t *B)\
+{ return flatcc_builder_string_len(B); }\
+static inline char *N ## _extend(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_extend_string(B, len); }\
+static inline char *N ## _edit(NS ## builder_t *B)\
+{ return flatcc_builder_string_edit(B); }\
+static inline int N ## _truncate(NS ## builder_t *B, size_t len)\
+{ return flatcc_builder_truncate_string(B, len); }
+
+#define __flatbuffers_build_string(NS)\
+typedef NS ## ref_t NS ## string_ref_t;\
+static inline int NS ## string_start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline NS ## string_ref_t NS ## string_end(NS ## builder_t *B)\
+{ return flatcc_builder_end_string(B); }\
+static inline NS ## ref_t NS ## string_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string(B, s, len); }\
+static inline NS ## ref_t NS ## string_create_str(NS ## builder_t *B, const char *s)\
+{ return flatcc_builder_create_string_str(B, s); }\
+static inline NS ## ref_t NS ## string_create_strn(NS ## builder_t *B, const char *s, size_t len)\
+{ return flatcc_builder_create_string_strn(B, s, len); }\
+static inline NS ## string_ref_t NS ## string_clone(NS ## builder_t *B, NS ## string_t string)\
+{ __flatbuffers_memoize(B, string, flatcc_builder_create_string(B, string, NS ## string_len(string))); }\
+static inline NS ## string_ref_t NS ## string_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ size_t n = NS ## string_len(string); if (index >= n) index = n; n -= index; if (len > n) len = n;\
+ return flatcc_builder_create_string(B, string + index, len); }\
+__flatbuffers_build_string_ops(NS, NS ## string)\
+__flatbuffers_build_offset_vector(NS, NS ## string)
+
+#define __flatbuffers_copy_from_pe(P, P2, N) (*(P) = N ## _read_from_pe(P2), (P))
+#define __flatbuffers_from_pe(P, N) (*(P) = N ## _read_from_pe(P), (P))
+#define __flatbuffers_copy_to_pe(P, P2, N) (N ## _write_to_pe((P), *(P2)), (P))
+#define __flatbuffers_to_pe(P, N) (N ## _write_to_pe((P), *(P)), (P))
+#define __flatbuffers_define_fixed_array_primitives(NS, N, T)\
+static inline T *N ## _array_copy(T *p, const T *p2, size_t n)\
+{ memcpy(p, p2, n * sizeof(T)); return p; }\
+static inline T *N ## _array_copy_from_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_from_pe(&p[i], &p2[i]); return p; }\
+static inline T *N ## _array_copy_to_pe(T *p, const T *p2, size_t n)\
+{ size_t i; if (NS ## is_native_pe()) memcpy(p, p2, n * sizeof(T)); else\
+ for (i = 0; i < n; ++i) N ## _copy_to_pe(&p[i], &p2[i]); return p; }
+#define __flatbuffers_define_scalar_primitives(NS, N, T)\
+static inline T *N ## _from_pe(T *p) { return __ ## NS ## from_pe(p, N); }\
+static inline T *N ## _to_pe(T *p) { return __ ## NS ## to_pe(p, N); }\
+static inline T *N ## _copy(T *p, const T *p2) { *p = *p2; return p; }\
+static inline T *N ## _copy_from_pe(T *p, const T *p2)\
+{ return __ ## NS ## copy_from_pe(p, p2, N); }\
+static inline T *N ## _copy_to_pe(T *p, const T *p2) \
+{ return __ ## NS ## copy_to_pe(p, p2, N); }\
+static inline T *N ## _assign(T *p, const T v0) { *p = v0; return p; }\
+static inline T *N ## _assign_from_pe(T *p, T v0)\
+{ *p = N ## _read_from_pe(&v0); return p; }\
+static inline T *N ## _assign_to_pe(T *p, T v0)\
+{ N ## _write_to_pe(p, v0); return p; }
+#define __flatbuffers_build_scalar(NS, N, T)\
+__ ## NS ## define_scalar_primitives(NS, N, T)\
+__ ## NS ## define_fixed_array_primitives(NS, N, T)\
+__ ## NS ## build_vector(NS, N, T, sizeof(T), sizeof(T))
+/* Depends on generated copy_to/from_pe functions, and the type. */
+#define __flatbuffers_define_struct_primitives(NS, N)\
+static inline N ## _t *N ##_to_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_to_pe(p, p); }; return p; }\
+static inline N ## _t *N ##_from_pe(N ## _t *p)\
+{ if (!NS ## is_native_pe()) { N ## _copy_from_pe(p, p); }; return p; }\
+static inline N ## _t *N ## _clear(N ## _t *p) { return (N ## _t *)memset(p, 0, N ## __size()); }
+
+/* Depends on generated copy/assign_to/from_pe functions, and the type. */
+#define __flatbuffers_build_struct(NS, N, S, A, FID, TFID)\
+__ ## NS ## define_struct_primitives(NS, N)\
+typedef NS ## ref_t N ## _ref_t;\
+static inline N ## _t *N ## _start(NS ## builder_t *B)\
+{ return (N ## _t *)flatcc_builder_start_struct(B, S, A); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { N ## _to_pe((N ## _t *)flatcc_builder_struct_edit(B)); }\
+ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _end_pe(NS ## builder_t *B)\
+{ return flatcc_builder_end_struct(B); }\
+static inline N ## _ref_t N ## _create(NS ## builder_t *B __ ## N ## _formal_args)\
+{ N ## _t *_p = N ## _start(B); if (!_p) return 0; N ##_assign_to_pe(_p __ ## N ## _call_args);\
+ return N ## _end_pe(B); }\
+static inline N ## _ref_t N ## _clone(NS ## builder_t *B, N ## _struct_t p)\
+{ N ## _t *_p; __flatbuffers_memoize_begin(B, p); _p = N ## _start(B); if (!_p) return 0;\
+ N ## _copy(_p, p); __flatbuffers_memoize_end(B, p, N ##_end_pe(B)); }\
+__flatbuffers_build_vector(NS, N, N ## _t, S, A)\
+__flatbuffers_build_struct_root(NS, N, A, FID, TFID)\
+
+#define __flatbuffers_struct_clear_field(p) memset((p), 0, sizeof(*(p)))
+#define __flatbuffers_build_table(NS, N, K)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_table(B, K); }\
+static inline N ## _ref_t N ## _end(NS ## builder_t *B)\
+{ FLATCC_ASSERT(flatcc_builder_check_required(B, __ ## N ## _required,\
+ sizeof(__ ## N ## _required) / sizeof(__ ## N ## _required[0]) - 1));\
+ return flatcc_builder_end_table(B); }\
+__flatbuffers_build_offset_vector(NS, N)
+
+#define __flatbuffers_build_table_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _ref_t ref)\
+{ TN ## _ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ?\
+ ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _start(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _end(B)); }\
+static inline TN ## _ref_t N ## _create(NS ## builder_t *B __ ## TN ##_formal_args)\
+{ return N ## _add(B, TN ## _create(B __ ## TN ## _call_args)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _table_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _table_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *_p; TN ## _union_type_t *_pt; if (uref.type == TN ## _NONE) return 0; if (uref.value == 0) return -1;\
+ if (!(_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1, sizeof(*_pt), sizeof(*_pt)))) return -1;\
+ *_pt = uref.type; if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uref.value; return 0; }\
+static inline int N ## _add_type(NS ## builder_t *B, TN ## _union_type_t type)\
+{ TN ## _union_type_t *_pt; if (type == TN ## _NONE) return 0; return (_pt = (TN ## _union_type_t *)flatcc_builder_table_add(B, ID - 1,\
+ sizeof(*_pt), sizeof(*_pt))) ? ((*_pt = type), 0) : -1; }\
+static inline int N ## _add_value(NS ## builder_t *B, TN ## _union_ref_t uref)\
+{ NS ## ref_t *p; if (uref.type == TN ## _NONE) return 0; return (p = flatcc_builder_table_add_offset(B, ID)) ?\
+ ((*p = uref.value), 0) : -1; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_t p)\
+{ return N ## _add(B, TN ## _clone(B, p)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_table_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _table_t t)\
+{ T ## _ref_t ref = T ## _clone(B, t);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+
+/* M is the union value name and T is its type, i.e. the qualified name. */
+#define __flatbuffers_build_union_struct_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, T ## _ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+static inline T ## _t *N ## _ ## M ## _start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline int N ## _ ## M ## _end(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end(B);\
+ return ref ? N ## _ ## M ## _add(B, ref) : -1; }\
+static inline int N ## _ ## M ## _create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ T ## _ref_t ref = T ## _create(B __ ## T ## _call_args);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _end_pe(NS ## builder_t *B)\
+{ T ## _ref_t ref = T ## _end_pe(B);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }\
+static inline int N ## _ ## M ## _clone(NS ## builder_t *B, T ## _struct_t p)\
+{ T ## _ref_t ref = T ## _clone(B, p);\
+ return ref ? N ## _add(B, NU ## _as_ ## M(ref)) : -1; }
+#define __flatbuffers_build_union_string_value_field(NS, N, NU, M)\
+static inline int N ## _ ## M ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return N ## _add(B, NU ## _as_ ## M (ref)); }\
+__flatbuffers_build_string_field_ops(NS, N ## _ ## M)
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T, default value V of type T. */
+#define __flatbuffers_build_scalar_field(ID, NS, N, TN, T, S, A, V, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (v == V) return 0; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+static inline int N ## _force_add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* NS: common namespace, ID: table field id (not offset), TN: name of type T, TT: name of table type
+ * S: sizeof of scalar type, A: alignment of type T. */
+#define __flatbuffers_build_scalar_optional_field(ID, NS, N, TN, T, S, A, TT)\
+static inline int N ## _add(NS ## builder_t *B, const T v)\
+{ T *_p; if (!(_p = (T *)flatcc_builder_table_add(B, ID, S, A))) return -1;\
+ TN ## _assign_to_pe(_p, v); return 0; }\
+/* Clone does not skip default values and expects pe endian content. */\
+static inline int N ## _clone(NS ## builder_t *B, const T *p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+/* Transferring a missing field is a nop success with 0 as result. */\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ const T *_p = N ## _get_ptr(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_struct_field(ID, NS, N, TN, S, A, TT)\
+static inline TN ## _t *N ## _start(NS ## builder_t *B)\
+{ return (TN ## _t *)flatcc_builder_table_add(B, ID, S, A); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ if (!NS ## is_native_pe()) { TN ## _to_pe((TN ## _t *)flatcc_builder_table_edit(B, S)); } return 0; }\
+static inline int N ## _end_pe(NS ## builder_t *B) { return 0; }\
+static inline int N ## _create(NS ## builder_t *B __ ## TN ## _formal_args)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_assign_to_pe(_p __ ## TN ## _call_args);\
+ return 0; }\
+static inline int N ## _add(NS ## builder_t *B, const TN ## _t *p)\
+{ TN ## _t *_p = N ## _start(B); if (!_p) return -1; TN ##_copy_to_pe(_p, p); return 0; }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _struct_t p)\
+{ return 0 == flatcc_builder_table_add_copy(B, ID, p, S, A) ? -1 : 0; }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _struct_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_vector_field(ID, NS, N, TN, T, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return TN ## _vec_start(B); }\
+static inline int N ## _end_pe(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end_pe(B)); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, TN ## _vec_end(B)); }\
+static inline int N ## _create_pe(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create_pe(B, data, len)); }\
+static inline int N ## _create(NS ## builder_t *B, const T *data, size_t len)\
+{ return N ## _add(B, TN ## _vec_create(B, data, len)); }\
+static inline int N ## _slice(NS ## builder_t *B, TN ## _vec_t vec, size_t index, size_t len)\
+{ return N ## _add(B, TN ## _vec_slice(B, vec, index, len)); }\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }\
+__flatbuffers_build_vector_ops(NS, N, N, TN, T)\
+
+#define __flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _vec_ref_t ref)\
+{ TN ## _vec_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_offset_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_offset_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_offset_vector(B, data, len)); }\
+__flatbuffers_build_offset_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _vec_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+/* depends on N ## _add which differs for union member fields and ordinary fields */\
+#define __flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_string(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_string(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const char *s, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_string(B, s, len)); }\
+static inline int N ## _create_str(NS ## builder_t *B, const char *s)\
+{ return N ## _add(B, flatcc_builder_create_string_str(B, s)); }\
+static inline int N ## _create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return N ## _add(B, flatcc_builder_create_string_strn(B, s, max_len)); }\
+static inline int N ## _clone(NS ## builder_t *B, NS ## string_t string)\
+{ return N ## _add(B, NS ## string_clone(B, string)); }\
+static inline int N ## _slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return N ## _add(B, NS ## string_slice(B, string, index, len)); }\
+__flatbuffers_build_string_ops(NS, N)
+
+#define __flatbuffers_build_string_field(ID, NS, N, TT)\
+static inline int N ## _add(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ NS ## string_ref_t *_p; return (ref && (_p = flatcc_builder_table_add_offset(B, ID))) ? ((*_p = ref), 0) : -1; }\
+__flatbuffers_build_string_field_ops(NS, N)\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ NS ## string_t _p = N ## _get(t); return _p ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_table_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, TN, TT)\
+__flatbuffers_build_table_vector_ops(NS, N, TN)
+
+#define __flatbuffers_build_union_vector_field(ID, NS, N, TN, TT)\
+static inline int N ## _add(NS ## builder_t *B, TN ## _union_vec_ref_t uvref)\
+{ NS ## vec_ref_t *_p; if (!uvref.type || !uvref.value) return uvref.type == uvref.value ? 0 : -1;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID - 1))) return -1; *_p = uvref.type;\
+ if (!(_p = flatcc_builder_table_add_offset(B, ID))) return -1; *_p = uvref.value; return 0; }\
+static inline int N ## _start(NS ## builder_t *B)\
+{ return flatcc_builder_start_union_vector(B); }\
+static inline int N ## _end(NS ## builder_t *B)\
+{ return N ## _add(B, flatcc_builder_end_union_vector(B)); }\
+static inline int N ## _create(NS ## builder_t *B, const TN ## _union_ref_t *data, size_t len)\
+{ return N ## _add(B, flatcc_builder_create_union_vector(B, data, len)); }\
+__flatbuffers_build_union_vector_ops(NS, N, N, TN)\
+static inline int N ## _clone(NS ## builder_t *B, TN ## _union_vec_t vec)\
+{ return N ## _add(B, TN ## _vec_clone(B, vec)); }\
+static inline int N ## _pick(NS ## builder_t *B, TT ## _table_t t)\
+{ TN ## _union_vec_t _p = N ## _union(t); return _p.type ? N ## _clone(B, _p) : 0; }
+
+#define __flatbuffers_build_union_table_vector_value_field(NS, N, NU, M, T)\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _table_t t)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, t))); }
+
+#define __flatbuffers_build_union_struct_vector_value_field(NS, N, NU, M, T)\
+static inline T ## _t *N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return T ## _start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (T ## _end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, T ## _ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B __ ## T ##_formal_args)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _create(B __ ## T ## _call_args))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, T ## _struct_t p)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(T ## _clone(B, p))); }
+
+#define __flatbuffers_build_union_string_vector_value_field(NS, N, NU, M)\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push(NS ## builder_t *B, NS ## string_ref_t ref)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M (ref)); }\
+static inline int N ## _ ## M ## _push_start(NS ## builder_t *B)\
+{ return NS ## string_start(B); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_end(NS ## builder_t *B)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_end(B))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create(NS ## builder_t *B, const char *s, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create(B, s, len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_str(NS ## builder_t *B, const char *s)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_str(B, s))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_create_strn(NS ## builder_t *B, const char *s, size_t max_len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_create_strn(B, s, max_len))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_clone(NS ## builder_t *B, NS ## string_t string)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_clone(B, string))); }\
+static inline NU ## _union_ref_t *N ## _ ## M ## _push_slice(NS ## builder_t *B, NS ## string_t string, size_t index, size_t len)\
+{ return NU ## _vec_push(B, NU ## _as_ ## M(NS ## string_slice(B, string, index, len))); }
+
+#define __flatbuffers_build_string_vector_field(ID, NS, N, TT)\
+__flatbuffers_build_offset_vector_field(ID, NS, N, NS ## string, TT)\
+__flatbuffers_build_string_vector_ops(NS, N)
+
+#define __flatbuffers_char_formal_args , char v0
+#define __flatbuffers_char_call_args , v0
+#define __flatbuffers_uint8_formal_args , uint8_t v0
+#define __flatbuffers_uint8_call_args , v0
+#define __flatbuffers_int8_formal_args , int8_t v0
+#define __flatbuffers_int8_call_args , v0
+#define __flatbuffers_bool_formal_args , flatbuffers_bool_t v0
+#define __flatbuffers_bool_call_args , v0
+#define __flatbuffers_uint16_formal_args , uint16_t v0
+#define __flatbuffers_uint16_call_args , v0
+#define __flatbuffers_uint32_formal_args , uint32_t v0
+#define __flatbuffers_uint32_call_args , v0
+#define __flatbuffers_uint64_formal_args , uint64_t v0
+#define __flatbuffers_uint64_call_args , v0
+#define __flatbuffers_int16_formal_args , int16_t v0
+#define __flatbuffers_int16_call_args , v0
+#define __flatbuffers_int32_formal_args , int32_t v0
+#define __flatbuffers_int32_call_args , v0
+#define __flatbuffers_int64_formal_args , int64_t v0
+#define __flatbuffers_int64_call_args , v0
+#define __flatbuffers_float_formal_args , float v0
+#define __flatbuffers_float_call_args , v0
+#define __flatbuffers_double_formal_args , double v0
+#define __flatbuffers_double_call_args , v0
+
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_char, char)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint8, uint8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int8, int8_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint16, uint16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint32, uint32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_uint64, uint64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int16, int16_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int32, int32_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_int64, int64_t)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_float, float)
+__flatbuffers_build_scalar(flatbuffers_, flatbuffers_double, double)
+
+__flatbuffers_build_string(flatbuffers_)
+
+__flatbuffers_build_buffer(flatbuffers_)
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_BUILDER_H */
diff --git a/nostrdb/flatcc/reflection/flatbuffers_common_reader.h b/nostrdb/flatcc/reflection/flatbuffers_common_reader.h
@@ -0,0 +1,578 @@
+#ifndef FLATBUFFERS_COMMON_READER_H
+#define FLATBUFFERS_COMMON_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+/* Common FlatBuffers read functionality for C. */
+
+#include "flatcc/flatcc_prologue.h"
+#include "flatcc/flatcc_flatbuffers.h"
+
+
+#define __flatbuffers_read_scalar_at_byteoffset(N, p, o) N ## _read_from_pe((uint8_t *)(p) + (o))
+#define __flatbuffers_read_scalar(N, p) N ## _read_from_pe(p)
+#define __flatbuffers_read_vt(ID, offset, t)\
+flatbuffers_voffset_t offset = 0;\
+{ flatbuffers_voffset_t id__tmp, *vt__tmp;\
+ FLATCC_ASSERT(t != 0 && "null pointer table access");\
+ id__tmp = ID;\
+ vt__tmp = (flatbuffers_voffset_t *)((uint8_t *)(t) -\
+ __flatbuffers_soffset_read_from_pe(t));\
+ if (__flatbuffers_voffset_read_from_pe(vt__tmp) >= sizeof(vt__tmp[0]) * (id__tmp + 3u)) {\
+ offset = __flatbuffers_voffset_read_from_pe(vt__tmp + id__tmp + 2);\
+ }\
+}
+#define __flatbuffers_field_present(ID, t) { __flatbuffers_read_vt(ID, offset__tmp, t) return offset__tmp != 0; }
+#define __flatbuffers_scalar_field(T, ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (const T *)((uint8_t *)(t) + offset__tmp);\
+ }\
+ return 0;\
+}
+#define __flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+{ __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+}\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _table_t t__tmp)\
+__flatbuffers_scalar_field(T, ID, t__tmp)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_scalar_optional_field(ID, N, NK, TK, T, V)\
+__flatbuffers_define_scalar_field(ID, N, NK, TK, T, V)\
+static inline TK ## _option_t N ## _ ## NK ## _option(N ## _table_t t__tmp)\
+{ TK ## _option_t ret; __flatbuffers_read_vt(ID, offset__tmp, t__tmp)\
+ ret.is_null = offset__tmp == 0; ret.value = offset__tmp ?\
+ __flatbuffers_read_scalar_at_byteoffset(TK, t__tmp, offset__tmp) : V;\
+ return ret; }
+#define __flatbuffers_struct_field(T, ID, t, r)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ return (T)((uint8_t *)(t) + offset__tmp);\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_offset_field(T, ID, t, r, adjust)\
+{\
+ flatbuffers_uoffset_t *elem__tmp;\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ if (offset__tmp) {\
+ elem__tmp = (flatbuffers_uoffset_t *)((uint8_t *)(t) + offset__tmp);\
+ /* Add sizeof so C api can have raw access past header field. */\
+ return (T)((uint8_t *)(elem__tmp) + adjust +\
+ __flatbuffers_uoffset_read_from_pe(elem__tmp));\
+ }\
+ FLATCC_ASSERT(!(r) && "required field missing");\
+ return 0;\
+}
+#define __flatbuffers_vector_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, sizeof(flatbuffers_uoffset_t))
+#define __flatbuffers_table_field(T, ID, t, r) __flatbuffers_offset_field(T, ID, t, r, 0)
+#define __flatbuffers_define_struct_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_struct_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_vector_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_table_field(ID, N, NK, T, r)\
+static inline T N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline T N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_table_field(T, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)
+#define __flatbuffers_define_string_field(ID, N, NK, r)\
+static inline flatbuffers_string_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline flatbuffers_string_t N ## _ ## NK(N ## _table_t t__tmp)\
+__flatbuffers_vector_field(flatbuffers_string_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__flatbuffers_field_present(ID, t__tmp)\
+__flatbuffers_define_scan_by_string_field(N, NK)
+#define __flatbuffers_vec_len(vec)\
+{ return (vec) ? (size_t)__flatbuffers_uoffset_read_from_pe((flatbuffers_uoffset_t *)vec - 1) : 0; }
+#define __flatbuffers_string_len(s) __flatbuffers_vec_len(s)
+static inline size_t flatbuffers_vec_len(const void *vec)
+__flatbuffers_vec_len(vec)
+#define __flatbuffers_scalar_vec_at(N, vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return __flatbuffers_read_scalar(N, &(vec)[i]); }
+#define __flatbuffers_struct_vec_at(vec, i)\
+{ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range"); return (vec) + (i); }
+/* `adjust` skips past the header for string vectors. */
+#define __flatbuffers_offset_vec_at(T, vec, i, adjust)\
+{ const flatbuffers_uoffset_t *elem__tmp = (vec) + (i);\
+ FLATCC_ASSERT(flatbuffers_vec_len(vec) > (i) && "index out of range");\
+ return (T)((uint8_t *)(elem__tmp) + (size_t)__flatbuffers_uoffset_read_from_pe(elem__tmp) + (adjust)); }
+#define __flatbuffers_define_scalar_vec_len(N)\
+static inline size_t N ## _vec_len(N ##_vec_t vec__tmp)\
+{ return flatbuffers_vec_len(vec__tmp); }
+#define __flatbuffers_define_scalar_vec_at(N, T) \
+static inline T N ## _vec_at(N ## _vec_t vec__tmp, size_t i__tmp)\
+__flatbuffers_scalar_vec_at(N, vec__tmp, i__tmp)
+typedef const char *flatbuffers_string_t;
+static inline size_t flatbuffers_string_len(flatbuffers_string_t s)
+__flatbuffers_string_len(s)
+typedef const flatbuffers_uoffset_t *flatbuffers_string_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_string_mutable_vec_t;
+static inline size_t flatbuffers_string_vec_len(flatbuffers_string_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_string_t flatbuffers_string_vec_at(flatbuffers_string_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_string_t, vec, i, sizeof(vec[0]))
+typedef const void *flatbuffers_generic_t;
+typedef void *flatbuffers_mutable_generic_t;
+static inline flatbuffers_string_t flatbuffers_string_cast_from_generic(const flatbuffers_generic_t p)
+{ return p ? ((const char *)p) + __flatbuffers_uoffset__size() : 0; }
+typedef const flatbuffers_uoffset_t *flatbuffers_generic_vec_t;
+typedef flatbuffers_uoffset_t *flatbuffers_generic_table_mutable_vec_t;
+static inline size_t flatbuffers_generic_vec_len(flatbuffers_generic_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, 0)
+static inline flatbuffers_generic_t flatbuffers_generic_vec_at_as_string(flatbuffers_generic_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(flatbuffers_generic_t, vec, i, sizeof(vec[0]))
+typedef struct flatbuffers_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_generic_t value;
+} flatbuffers_union_t;
+typedef struct flatbuffers_union_vec {
+ const flatbuffers_union_type_t *type;
+ const flatbuffers_uoffset_t *value;
+} flatbuffers_union_vec_t;
+typedef struct flatbuffers_mutable_union {
+ flatbuffers_union_type_t type;
+ flatbuffers_mutable_generic_t value;
+} flatbuffers_mutable_union_t;
+typedef struct flatbuffers_mutable_union_vec {
+ flatbuffers_union_type_t *type;
+ flatbuffers_uoffset_t *value;
+} flatbuffers_mutable_union_vec_t;
+static inline flatbuffers_mutable_union_t flatbuffers_mutable_union_cast(flatbuffers_union_t u__tmp)\
+{ flatbuffers_mutable_union_t mu = { u__tmp.type, (flatbuffers_mutable_generic_t)u__tmp.value };\
+ return mu; }
+static inline flatbuffers_mutable_union_vec_t flatbuffers_mutable_union_vec_cast(flatbuffers_union_vec_t uv__tmp)\
+{ flatbuffers_mutable_union_vec_t muv =\
+ { (flatbuffers_union_type_t *)uv__tmp.type, (flatbuffers_uoffset_t *)uv__tmp.value }; return muv; }
+#define __flatbuffers_union_type_field(ID, t)\
+{\
+ __flatbuffers_read_vt(ID, offset__tmp, t)\
+ return offset__tmp ? __flatbuffers_read_scalar_at_byteoffset(__flatbuffers_utype, t, offset__tmp) : 0;\
+}
+static inline flatbuffers_string_t flatbuffers_string_cast_from_union(const flatbuffers_union_t u__tmp)\
+{ return flatbuffers_string_cast_from_generic(u__tmp.value); }
+#define __flatbuffers_define_union_field(NS, ID, N, NK, T, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type_get(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK ## _get(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline T ## _union_type_t N ## _ ## NK ## _type(N ## _table_t t__tmp)\
+__## NS ## union_type_field(((ID) - 1), t__tmp)\
+static inline NS ## generic_t N ## _ ## NK(N ## _table_t t__tmp)\
+__## NS ## table_field(NS ## generic_t, ID, t__tmp, r)\
+static inline int N ## _ ## NK ## _is_present(N ## _table_t t__tmp)\
+__## NS ## field_present(ID, t__tmp)\
+static inline T ## _union_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; u__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ if (u__tmp.type == 0) return u__tmp; u__tmp.value = N ## _ ## NK ## _get(t__tmp); return u__tmp; }\
+static inline NS ## string_t N ## _ ## NK ## _as_string(N ## _table_t t__tmp)\
+{ return NS ## string_cast_from_generic(N ## _ ## NK ## _get(t__tmp)); }\
+
+#define __flatbuffers_define_union_vector_ops(NS, T)\
+static inline size_t T ## _union_vec_len(T ## _union_vec_t uv__tmp)\
+{ return NS ## vec_len(uv__tmp.type); }\
+static inline T ## _union_t T ## _union_vec_at(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ T ## _union_t u__tmp = { 0, 0 }; size_t n__tmp = NS ## vec_len(uv__tmp.type);\
+ FLATCC_ASSERT(n__tmp > (i__tmp) && "index out of range"); u__tmp.type = uv__tmp.type[i__tmp];\
+ /* Unknown type is treated as NONE for schema evolution. */\
+ if (u__tmp.type == 0) return u__tmp;\
+ u__tmp.value = NS ## generic_vec_at(uv__tmp.value, i__tmp); return u__tmp; }\
+static inline NS ## string_t T ## _union_vec_at_as_string(T ## _union_vec_t uv__tmp, size_t i__tmp)\
+{ return (NS ## string_t) NS ## generic_vec_at_as_string(uv__tmp.value, i__tmp); }\
+
+#define __flatbuffers_define_union_vector(NS, T)\
+typedef NS ## union_vec_t T ## _union_vec_t;\
+typedef NS ## mutable_union_vec_t T ## _mutable_union_vec_t;\
+static inline T ## _mutable_union_vec_t T ## _mutable_union_vec_cast(T ## _union_vec_t u__tmp)\
+{ return NS ## mutable_union_vec_cast(u__tmp); }\
+__## NS ## define_union_vector_ops(NS, T)
+#define __flatbuffers_define_union(NS, T)\
+typedef NS ## union_t T ## _union_t;\
+typedef NS ## mutable_union_t T ## _mutable_union_t;\
+static inline T ## _mutable_union_t T ## _mutable_union_cast(T ## _union_t u__tmp)\
+{ return NS ## mutable_union_cast(u__tmp); }\
+__## NS ## define_union_vector(NS, T)
+#define __flatbuffers_define_union_vector_field(NS, ID, N, NK, T, r)\
+__## NS ## define_vector_field(ID - 1, N, NK ## _type, T ## _vec_t, r)\
+__## NS ## define_vector_field(ID, N, NK, flatbuffers_generic_vec_t, r)\
+static inline T ## _union_vec_t N ## _ ## NK ## _union(N ## _table_t t__tmp)\
+{ T ## _union_vec_t uv__tmp; uv__tmp.type = N ## _ ## NK ## _type_get(t__tmp);\
+ uv__tmp.value = N ## _ ## NK(t__tmp);\
+ FLATCC_ASSERT(NS ## vec_len(uv__tmp.type) == NS ## vec_len(uv__tmp.value)\
+ && "union vector type length mismatch"); return uv__tmp; }
+#include <string.h>
+static const size_t flatbuffers_not_found = (size_t)-1;
+static const size_t flatbuffers_end = (size_t)-1;
+#define __flatbuffers_identity(n) (n)
+#define __flatbuffers_min(a, b) ((a) < (b) ? (a) : (b))
+/* Subtraction doesn't work for unsigned types. */
+#define __flatbuffers_scalar_cmp(x, y, n) ((x) < (y) ? -1 : (x) > (y))
+static inline int __flatbuffers_string_n_cmp(flatbuffers_string_t v, const char *s, size_t n)
+{ size_t nv = flatbuffers_string_len(v); int x = strncmp(v, s, nv < n ? nv : n);
+ return x != 0 ? x : nv < n ? -1 : nv > n; }
+/* `n` arg unused, but needed by string find macro expansion. */
+static inline int __flatbuffers_string_cmp(flatbuffers_string_t v, const char *s, size_t n) { (void)n; return strcmp(v, s); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_find_by_field(A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t a__tmp = 0, b__tmp, m__tmp; if (!(b__tmp = L(V))) { return flatbuffers_not_found; }\
+ --b__tmp;\
+ while (a__tmp < b__tmp) {\
+ m__tmp = a__tmp + ((b__tmp - a__tmp) >> 1);\
+ v__tmp = A(E(V, m__tmp));\
+ if ((D(v__tmp, (K), (Kn))) < 0) {\
+ a__tmp = m__tmp + 1;\
+ } else {\
+ b__tmp = m__tmp;\
+ }\
+ }\
+ if (a__tmp == b__tmp) {\
+ v__tmp = A(E(V, a__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return a__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_find_by_scalar_field(A, V, E, L, K, T)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_find_by_string_field(A, V, E, L, K)\
+__flatbuffers_find_by_field(A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_find_by_string_n_field(A, V, E, L, K, Kn)\
+__flatbuffers_find_by_field(A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, TK key__tmp)\
+__flatbuffers_find_by_scalar_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, TK)
+#define __flatbuffers_define_scalar_find(N, T)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_find_by_scalar_field(__flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_find_by_string_field(N, NK) \
+/* Note: find only works on vectors sorted by this field. */\
+static inline size_t N ## _vec_find_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_find_by_string_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_find_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_find_by_string_n_field(N ## _ ## NK, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_find_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, key__tmp); }
+#define __flatbuffers_define_default_find_by_string_field(N, NK) \
+static inline size_t N ## _vec_find(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_find_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_find_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_find_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }
+/* A = identity if searching scalar vectors rather than key fields. */
+/* Returns lowest matching index or not_found. */
+#define __flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp;\
+ for (i__tmp = b; i__tmp < e; ++i__tmp) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, T, D)\
+{ T v__tmp; size_t i__tmp = e;\
+ while (i__tmp-- > b) {\
+ v__tmp = A(E(V, i__tmp));\
+ if (D(v__tmp, (K), (Kn)) == 0) {\
+ return i__tmp;\
+ }\
+ }\
+ return flatbuffers_not_found;\
+}
+#define __flatbuffers_scan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_scan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_scan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_scan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_rscan_by_scalar_field(b, e, A, V, E, L, K, T)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, T, __flatbuffers_scalar_cmp)
+#define __flatbuffers_rscan_by_string_field(b, e, A, V, E, L, K)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, 0, flatbuffers_string_t, __flatbuffers_string_cmp)
+#define __flatbuffers_rscan_by_string_n_field(b, e, A, V, E, L, K, Kn)\
+__flatbuffers_rscan_by_field(b, e, A, V, E, L, K, Kn, flatbuffers_string_t, __flatbuffers_string_n_cmp)
+#define __flatbuffers_define_scan_by_scalar_field(N, NK, T)\
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scalar_scan(N, T)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_scan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(0, N ## _vec_len(vec__tmp), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, T key__tmp)\
+__flatbuffers_rscan_by_scalar_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), __flatbuffers_identity, vec__tmp, N ## _vec_at, N ## _vec_len, key__tmp, T)
+#define __flatbuffers_define_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_scan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_scan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_scan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_scan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_n_by_ ## NK(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(0, N ## _vec_len(vec__tmp), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)\
+static inline size_t N ## _vec_rscan_ex_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+__flatbuffers_rscan_by_string_field(begin__tmp, __flatbuffers_min(end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp)\
+static inline size_t N ## _vec_rscan_ex_n_by_ ## NK(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+__flatbuffers_rscan_by_string_n_field(begin__tmp, __flatbuffers_min( end__tmp, N ## _vec_len(vec__tmp)), N ## _ ## NK ## _get, vec__tmp, N ## _vec_at, N ## _vec_len, s__tmp, n__tmp)
+#define __flatbuffers_define_default_scan_by_scalar_field(N, NK, TK)\
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, key__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, TK key__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, key__tmp); }
+#define __flatbuffers_define_default_scan_by_string_field(N, NK) \
+static inline size_t N ## _vec_scan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_scan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_scan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_scan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_scan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan(N ## _vec_t vec__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_by_ ## NK(vec__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_n(N ## _vec_t vec__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_n_by_ ## NK(vec__tmp, s__tmp, n__tmp); }\
+static inline size_t N ## _vec_rscan_ex(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp)\
+{ return N ## _vec_rscan_ex_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp); }\
+static inline size_t N ## _vec_rscan_ex_n(N ## _vec_t vec__tmp, size_t begin__tmp, size_t end__tmp, const char *s__tmp, size_t n__tmp)\
+{ return N ## _vec_rscan_ex_n_by_ ## NK(vec__tmp, begin__tmp, end__tmp, s__tmp, n__tmp); }
+#define __flatbuffers_heap_sort(N, X, A, E, L, TK, TE, D, S)\
+static inline void __ ## N ## X ## __heap_sift_down(\
+ N ## _mutable_vec_t vec__tmp, size_t start__tmp, size_t end__tmp)\
+{ size_t child__tmp, root__tmp; TK v1__tmp, v2__tmp, vroot__tmp;\
+ root__tmp = start__tmp;\
+ while ((root__tmp << 1) <= end__tmp) {\
+ child__tmp = root__tmp << 1;\
+ if (child__tmp < end__tmp) {\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ v2__tmp = A(E(vec__tmp, child__tmp + 1));\
+ if (D(v1__tmp, v2__tmp) < 0) {\
+ child__tmp++;\
+ }\
+ }\
+ vroot__tmp = A(E(vec__tmp, root__tmp));\
+ v1__tmp = A(E(vec__tmp, child__tmp));\
+ if (D(vroot__tmp, v1__tmp) < 0) {\
+ S(vec__tmp, root__tmp, child__tmp, TE);\
+ root__tmp = child__tmp;\
+ } else {\
+ return;\
+ }\
+ }\
+}\
+static inline void __ ## N ## X ## __heap_sort(N ## _mutable_vec_t vec__tmp)\
+{ size_t start__tmp, end__tmp, size__tmp;\
+ size__tmp = L(vec__tmp); if (size__tmp == 0) return; end__tmp = size__tmp - 1; start__tmp = size__tmp >> 1;\
+ do { __ ## N ## X ## __heap_sift_down(vec__tmp, start__tmp, end__tmp); } while (start__tmp--);\
+ while (end__tmp > 0) { \
+ S(vec__tmp, 0, end__tmp, TE);\
+ __ ## N ## X ## __heap_sift_down(vec__tmp, 0, --end__tmp); } }
+#define __flatbuffers_define_sort_by_field(N, NK, TK, TE, D, S)\
+ __flatbuffers_heap_sort(N, _sort_by_ ## NK, N ## _ ## NK ## _get, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort_by_ ## NK(N ## _mutable_vec_t vec__tmp)\
+{ __ ## N ## _sort_by_ ## NK ## __heap_sort(vec__tmp); }
+#define __flatbuffers_define_sort(N, TK, TE, D, S)\
+__flatbuffers_heap_sort(N, , __flatbuffers_identity, N ## _vec_at, N ## _vec_len, TK, TE, D, S)\
+static inline void N ## _vec_sort(N ## _mutable_vec_t vec__tmp) { __ ## N ## __heap_sort(vec__tmp); }
+#define __flatbuffers_scalar_diff(x, y) ((x) < (y) ? -1 : (x) > (y))
+#define __flatbuffers_string_diff(x, y) __flatbuffers_string_n_cmp((x), (const char *)(y), flatbuffers_string_len(y))
+#define __flatbuffers_value_swap(vec, a, b, TE) { TE x__tmp = vec[b]; vec[b] = vec[a]; vec[a] = x__tmp; }
+#define __flatbuffers_uoffset_swap(vec, a, b, TE)\
+{ TE ta__tmp, tb__tmp, d__tmp;\
+ d__tmp = (TE)((a - b) * sizeof(vec[0]));\
+ ta__tmp = __flatbuffers_uoffset_read_from_pe(vec + b) - d__tmp;\
+ tb__tmp = __flatbuffers_uoffset_read_from_pe(vec + a) + d__tmp;\
+ __flatbuffers_uoffset_write_to_pe(vec + a, ta__tmp);\
+ __flatbuffers_uoffset_write_to_pe(vec + b, tb__tmp); }
+#define __flatbuffers_scalar_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_string_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_struct_swap(vec, a, b, TE) __flatbuffers_value_swap(vec, a, b, TE)
+#define __flatbuffers_table_swap(vec, a, b, TE) __flatbuffers_uoffset_swap(vec, a, b, TE)
+#define __flatbuffers_define_struct_sort_by_scalar_field(N, NK, TK, TE)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, TE, __flatbuffers_scalar_diff, __flatbuffers_struct_swap)
+#define __flatbuffers_define_table_sort_by_scalar_field(N, NK, TK)\
+ __flatbuffers_define_sort_by_field(N, NK, TK, flatbuffers_uoffset_t, __flatbuffers_scalar_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_table_sort_by_string_field(N, NK)\
+ __flatbuffers_define_sort_by_field(N, NK, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_table_swap)
+#define __flatbuffers_define_scalar_sort(N, T) __flatbuffers_define_sort(N, T, T, __flatbuffers_scalar_diff, __flatbuffers_scalar_swap)
+#define __flatbuffers_define_string_sort() __flatbuffers_define_sort(flatbuffers_string, flatbuffers_string_t, flatbuffers_uoffset_t, __flatbuffers_string_diff, __flatbuffers_string_swap)
+#define __flatbuffers_sort_vector_field(N, NK, T, t)\
+{ T ## _mutable_vec_t v__tmp = (T ## _mutable_vec_t) N ## _ ## NK ## _get(t);\
+ if (v__tmp) T ## _vec_sort(v__tmp); }
+#define __flatbuffers_sort_table_field(N, NK, T, t)\
+{ T ## _sort((T ## _mutable_table_t)N ## _ ## NK ## _get(t)); }
+#define __flatbuffers_sort_union_field(N, NK, T, t)\
+{ T ## _sort(T ## _mutable_union_cast(N ## _ ## NK ## _union(t))); }
+#define __flatbuffers_sort_table_vector_field_elements(N, NK, T, t)\
+{ T ## _vec_t v__tmp = N ## _ ## NK ## _get(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort((T ## _mutable_table_t)T ## _vec_at(v__tmp, i__tmp)); }}
+#define __flatbuffers_sort_union_vector_field_elements(N, NK, T, t)\
+{ T ## _union_vec_t v__tmp = N ## _ ## NK ## _union(t); size_t i__tmp, n__tmp;\
+ n__tmp = T ## _union_vec_len(v__tmp); for (i__tmp = 0; i__tmp < n__tmp; ++i__tmp) {\
+ T ## _sort(T ## _mutable_union_cast(T ## _union_vec_at(v__tmp, i__tmp))); }}
+#define __flatbuffers_define_scalar_vector(N, T)\
+typedef const T *N ## _vec_t;\
+typedef T *N ## _mutable_vec_t;\
+__flatbuffers_define_scalar_vec_len(N)\
+__flatbuffers_define_scalar_vec_at(N, T)\
+__flatbuffers_define_scalar_find(N, T)\
+__flatbuffers_define_scalar_scan(N, T)\
+__flatbuffers_define_scalar_sort(N, T)
+
+#define __flatbuffers_define_integer_type(N, T, W)\
+__flatcc_define_integer_accessors(N, T, W, flatbuffers_endian)\
+__flatbuffers_define_scalar_vector(N, T)
+__flatbuffers_define_scalar_vector(flatbuffers_bool, flatbuffers_bool_t)
+__flatbuffers_define_scalar_vector(flatbuffers_char, char)
+__flatbuffers_define_scalar_vector(flatbuffers_uint8, uint8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int8, int8_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint16, uint16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int16, int16_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint32, uint32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int32, int32_t)
+__flatbuffers_define_scalar_vector(flatbuffers_uint64, uint64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_int64, int64_t)
+__flatbuffers_define_scalar_vector(flatbuffers_float, float)
+__flatbuffers_define_scalar_vector(flatbuffers_double, double)
+__flatbuffers_define_scalar_vector(flatbuffers_union_type, flatbuffers_union_type_t)
+static inline size_t flatbuffers_string_vec_find(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_find_by_string_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_find_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_find_by_string_n_field(__flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_scan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_scan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_scan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_scan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_scan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan(flatbuffers_string_vec_t vec, const char *s)
+__flatbuffers_rscan_by_string_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_n(flatbuffers_string_vec_t vec, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(0, flatbuffers_string_vec_len(vec), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+static inline size_t flatbuffers_string_vec_rscan_ex(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s)
+__flatbuffers_rscan_by_string_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s)
+static inline size_t flatbuffers_string_vec_rscan_ex_n(flatbuffers_string_vec_t vec, size_t begin, size_t end, const char *s, size_t n)
+__flatbuffers_rscan_by_string_n_field(begin, __flatbuffers_min(end, flatbuffers_string_vec_len(vec)), __flatbuffers_identity, vec, flatbuffers_string_vec_at, flatbuffers_string_vec_len, s, n)
+__flatbuffers_define_string_sort()
+#define __flatbuffers_define_struct_scalar_fixed_array_field(N, NK, TK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0;\
+ return __flatbuffers_read_scalar(TK, &(t__tmp->NK[i__tmp])); }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp, size_t i__tmp)\
+{ return N ## _ ## NK ## _get(t__tmp, i__tmp); }
+#define __flatbuffers_define_struct_struct_fixed_array_field(N, NK, T, L)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }static inline T N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? t__tmp->NK : 0; }\
+static inline size_t N ## _ ## NK ## _get_len(void) { return L; }\
+static inline T N ## _ ## NK(N ## _struct_t t__tmp, size_t i__tmp)\
+{ if (!t__tmp || i__tmp >= L) return 0; return t__tmp->NK + i__tmp; }
+#define __flatbuffers_define_struct_scalar_field(N, NK, TK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+static inline const T *N ## _ ## NK ## _get_ptr(N ## _struct_t t__tmp)\
+{ return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp)\
+{ return t__tmp ? __flatbuffers_read_scalar(TK, &(t__tmp->NK)) : 0; }\
+__flatbuffers_define_scan_by_scalar_field(N, NK, T)
+#define __flatbuffers_define_struct_struct_field(N, NK, T)\
+static inline T N ## _ ## NK ## _get(N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }\
+static inline T N ## _ ## NK (N ## _struct_t t__tmp) { return t__tmp ? &(t__tmp->NK) : 0; }
+/* If fid is null, the function returns true without testing as buffer is not expected to have any id. */
+static inline int flatbuffers_has_identifier(const void *buffer, const char *fid)
+{ flatbuffers_thash_t id, id2 = 0; if (fid == 0) { return 1; };
+ id2 = flatbuffers_type_hash_from_string(fid);
+ id = __flatbuffers_thash_read_from_pe(((flatbuffers_uoffset_t *)buffer) + 1);
+ return id2 == 0 || id == id2; }
+static inline int flatbuffers_has_type_hash(const void *buffer, flatbuffers_thash_t thash)
+{ return thash == 0 || (__flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1) == thash); }
+
+static inline flatbuffers_thash_t flatbuffers_get_type_hash(const void *buffer)
+{ return __flatbuffers_thash_read_from_pe((flatbuffers_uoffset_t *)buffer + 1); }
+
+#define flatbuffers_verify_endian() flatbuffers_has_identifier("\x00\x00\x00\x00" "1234", "1234")
+static inline void *flatbuffers_read_size_prefix(void *b, size_t *size_out)
+{ if (size_out) { *size_out = (size_t)__flatbuffers_uoffset_read_from_pe(b); }
+ return (uint8_t *)b + sizeof(flatbuffers_uoffset_t); }
+/* Null file identifier accepts anything, otherwise fid should be 4 characters. */
+#define __flatbuffers_read_root(T, K, buffer, fid)\
+ ((!buffer || !flatbuffers_has_identifier(buffer, fid)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_read_typed_root(T, K, buffer, thash)\
+ ((!buffer || !flatbuffers_has_type_hash(buffer, thash)) ? 0 :\
+ ((T ## _ ## K ## t)(((uint8_t *)buffer) +\
+ __flatbuffers_uoffset_read_from_pe(buffer))))
+#define __flatbuffers_nested_buffer_as_root(C, N, T, K)\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root_with_identifier(C ## _ ## table_t t__tmp, const char *fid__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_typed_root(C ## _ ## table_t t__tmp)\
+{ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, C ## _ ## type_identifier); }\
+static inline T ## _ ## K ## t C ## _ ## N ## _as_root(C ## _ ## table_t t__tmp)\
+{ const char *fid__tmp = T ## _file_identifier;\
+ const uint8_t *buffer__tmp = C ## _ ## N(t__tmp); return __flatbuffers_read_root(T, K, buffer__tmp, fid__tmp); }
+#define __flatbuffers_buffer_as_root(N, K)\
+static inline N ## _ ## K ## t N ## _as_root_with_identifier(const void *buffer__tmp, const char *fid__tmp)\
+{ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root_with_type_hash(const void *buffer__tmp, flatbuffers_thash_t thash__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, thash__tmp); }\
+static inline N ## _ ## K ## t N ## _as_root(const void *buffer__tmp)\
+{ const char *fid__tmp = N ## _file_identifier;\
+ return __flatbuffers_read_root(N, K, buffer__tmp, fid__tmp); }\
+static inline N ## _ ## K ## t N ## _as_typed_root(const void *buffer__tmp)\
+{ return __flatbuffers_read_typed_root(N, K, buffer__tmp, N ## _type_hash); }
+#define __flatbuffers_struct_as_root(N) __flatbuffers_buffer_as_root(N, struct_)
+#define __flatbuffers_table_as_root(N) __flatbuffers_buffer_as_root(N, table_)
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* FLATBUFFERS_COMMON_H */
diff --git a/nostrdb/flatcc/reflection/reflection_builder.h b/nostrdb/flatcc/reflection/reflection_builder.h
@@ -0,0 +1,457 @@
+#ifndef REFLECTION_BUILDER_H
+#define REFLECTION_BUILDER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#ifndef FLATBUFFERS_COMMON_BUILDER_H
+#include "flatbuffers_common_builder.h"
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+#define __reflection_BaseType_formal_args , reflection_BaseType_enum_t v0
+#define __reflection_BaseType_call_args , v0
+__flatbuffers_build_scalar(flatbuffers_, reflection_BaseType, reflection_BaseType_enum_t)
+
+static const flatbuffers_voffset_t __reflection_Type_required[] = { 0 };
+typedef flatbuffers_ref_t reflection_Type_ref_t;
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Type, 4)
+
+static const flatbuffers_voffset_t __reflection_KeyValue_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_KeyValue_ref_t;
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_KeyValue, 2)
+
+static const flatbuffers_voffset_t __reflection_EnumVal_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_EnumVal_ref_t;
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_EnumVal, 5)
+
+static const flatbuffers_voffset_t __reflection_Enum_required[] = { 0, 1, 3, 0 };
+typedef flatbuffers_ref_t reflection_Enum_ref_t;
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Enum, 6)
+
+static const flatbuffers_voffset_t __reflection_Field_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Field_ref_t;
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Field, 12)
+
+static const flatbuffers_voffset_t __reflection_Object_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Object_ref_t;
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Object, 7)
+
+static const flatbuffers_voffset_t __reflection_RPCCall_required[] = { 0, 1, 2, 0 };
+typedef flatbuffers_ref_t reflection_RPCCall_ref_t;
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_RPCCall, 5)
+
+static const flatbuffers_voffset_t __reflection_Service_required[] = { 0, 0 };
+typedef flatbuffers_ref_t reflection_Service_ref_t;
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Service, 4)
+
+static const flatbuffers_voffset_t __reflection_Schema_required[] = { 0, 1, 0 };
+typedef flatbuffers_ref_t reflection_Schema_ref_t;
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t);
+__flatbuffers_build_table(flatbuffers_, reflection_Schema, 6)
+
+#define __reflection_Type_formal_args , reflection_BaseType_enum_t v0, reflection_BaseType_enum_t v1, int32_t v2, uint16_t v3
+#define __reflection_Type_call_args , v0, v1, v2, v3
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Type, reflection_Type_file_identifier, reflection_Type_type_identifier)
+
+#define __reflection_KeyValue_formal_args , flatbuffers_string_ref_t v0, flatbuffers_string_ref_t v1
+#define __reflection_KeyValue_call_args , v0, v1
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_KeyValue, reflection_KeyValue_file_identifier, reflection_KeyValue_type_identifier)
+
+#define __reflection_EnumVal_formal_args ,\
+ flatbuffers_string_ref_t v0, int64_t v1, reflection_Object_ref_t v2, reflection_Type_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_EnumVal_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_EnumVal, reflection_EnumVal_file_identifier, reflection_EnumVal_type_identifier)
+
+#define __reflection_Enum_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_EnumVal_vec_ref_t v1, flatbuffers_bool_t v2, reflection_Type_ref_t v3, reflection_KeyValue_vec_ref_t v4, flatbuffers_string_vec_ref_t v5
+#define __reflection_Enum_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Enum, reflection_Enum_file_identifier, reflection_Enum_type_identifier)
+
+#define __reflection_Field_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Type_ref_t v1, uint16_t v2, uint16_t v3,\
+ int64_t v4, double v5, flatbuffers_bool_t v6, flatbuffers_bool_t v7,\
+ flatbuffers_bool_t v8, reflection_KeyValue_vec_ref_t v9, flatbuffers_string_vec_ref_t v10, flatbuffers_bool_t v11
+#define __reflection_Field_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6, v7,\
+ v8, v9, v10, v11
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Field, reflection_Field_file_identifier, reflection_Field_type_identifier)
+
+#define __reflection_Object_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Field_vec_ref_t v1, flatbuffers_bool_t v2, int32_t v3,\
+ int32_t v4, reflection_KeyValue_vec_ref_t v5, flatbuffers_string_vec_ref_t v6
+#define __reflection_Object_call_args ,\
+ v0, v1, v2, v3,\
+ v4, v5, v6
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Object, reflection_Object_file_identifier, reflection_Object_type_identifier)
+
+#define __reflection_RPCCall_formal_args ,\
+ flatbuffers_string_ref_t v0, reflection_Object_ref_t v1, reflection_Object_ref_t v2, reflection_KeyValue_vec_ref_t v3, flatbuffers_string_vec_ref_t v4
+#define __reflection_RPCCall_call_args ,\
+ v0, v1, v2, v3, v4
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_RPCCall, reflection_RPCCall_file_identifier, reflection_RPCCall_type_identifier)
+
+#define __reflection_Service_formal_args , flatbuffers_string_ref_t v0, reflection_RPCCall_vec_ref_t v1, reflection_KeyValue_vec_ref_t v2, flatbuffers_string_vec_ref_t v3
+#define __reflection_Service_call_args , v0, v1, v2, v3
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Service, reflection_Service_file_identifier, reflection_Service_type_identifier)
+
+#define __reflection_Schema_formal_args ,\
+ reflection_Object_vec_ref_t v0, reflection_Enum_vec_ref_t v1, flatbuffers_string_ref_t v2, flatbuffers_string_ref_t v3, reflection_Object_ref_t v4, reflection_Service_vec_ref_t v5
+#define __reflection_Schema_call_args ,\
+ v0, v1, v2, v3, v4, v5
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args);
+__flatbuffers_build_table_prolog(flatbuffers_, reflection_Schema, reflection_Schema_file_identifier, reflection_Schema_type_identifier)
+
+__flatbuffers_build_scalar_field(0, flatbuffers_, reflection_Type_base_type, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_Type_element, reflection_BaseType, reflection_BaseType_enum_t, 1, 1, INT8_C(0), reflection_Type)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Type_index, flatbuffers_int32, int32_t, 4, 4, INT32_C(-1), reflection_Type)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Type_fixed_length, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Type)
+
+static inline reflection_Type_ref_t reflection_Type_create(flatbuffers_builder_t *B __reflection_Type_formal_args)
+{
+ if (reflection_Type_start(B)
+ || reflection_Type_index_add(B, v2)
+ || reflection_Type_fixed_length_add(B, v3)
+ || reflection_Type_base_type_add(B, v0)
+ || reflection_Type_element_add(B, v1)) {
+ return 0;
+ }
+ return reflection_Type_end(B);
+}
+
+static reflection_Type_ref_t reflection_Type_clone(flatbuffers_builder_t *B, reflection_Type_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Type_start(B)
+ || reflection_Type_index_pick(B, t)
+ || reflection_Type_fixed_length_pick(B, t)
+ || reflection_Type_base_type_pick(B, t)
+ || reflection_Type_element_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Type_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_KeyValue_key, reflection_KeyValue)
+__flatbuffers_build_string_field(1, flatbuffers_, reflection_KeyValue_value, reflection_KeyValue)
+
+static inline reflection_KeyValue_ref_t reflection_KeyValue_create(flatbuffers_builder_t *B __reflection_KeyValue_formal_args)
+{
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_add(B, v0)
+ || reflection_KeyValue_value_add(B, v1)) {
+ return 0;
+ }
+ return reflection_KeyValue_end(B);
+}
+
+static reflection_KeyValue_ref_t reflection_KeyValue_clone(flatbuffers_builder_t *B, reflection_KeyValue_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_KeyValue_start(B)
+ || reflection_KeyValue_key_pick(B, t)
+ || reflection_KeyValue_value_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_KeyValue_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_EnumVal_name, reflection_EnumVal)
+__flatbuffers_build_scalar_field(1, flatbuffers_, reflection_EnumVal_value, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_EnumVal)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_EnumVal_object, reflection_Object, reflection_EnumVal)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_EnumVal_union_type, reflection_Type, reflection_EnumVal)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_EnumVal_documentation, reflection_EnumVal)
+
+static inline reflection_EnumVal_ref_t reflection_EnumVal_create(flatbuffers_builder_t *B __reflection_EnumVal_formal_args)
+{
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_add(B, v1)
+ || reflection_EnumVal_name_add(B, v0)
+ || reflection_EnumVal_object_add(B, v2)
+ || reflection_EnumVal_union_type_add(B, v3)
+ || reflection_EnumVal_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_EnumVal_end(B);
+}
+
+static reflection_EnumVal_ref_t reflection_EnumVal_clone(flatbuffers_builder_t *B, reflection_EnumVal_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_EnumVal_start(B)
+ || reflection_EnumVal_value_pick(B, t)
+ || reflection_EnumVal_name_pick(B, t)
+ || reflection_EnumVal_object_pick(B, t)
+ || reflection_EnumVal_union_type_pick(B, t)
+ || reflection_EnumVal_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_EnumVal_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Enum_name, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Enum_values, reflection_EnumVal, reflection_Enum)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Enum_is_union, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Enum)
+__flatbuffers_build_table_field(3, flatbuffers_, reflection_Enum_underlying_type, reflection_Type, reflection_Enum)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(4, flatbuffers_, reflection_Enum_attributes, reflection_KeyValue, reflection_Enum)
+__flatbuffers_build_string_vector_field(5, flatbuffers_, reflection_Enum_documentation, reflection_Enum)
+
+static inline reflection_Enum_ref_t reflection_Enum_create(flatbuffers_builder_t *B __reflection_Enum_formal_args)
+{
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_add(B, v0)
+ || reflection_Enum_values_add(B, v1)
+ || reflection_Enum_underlying_type_add(B, v3)
+ || reflection_Enum_attributes_add(B, v4)
+ || reflection_Enum_documentation_add(B, v5)
+ || reflection_Enum_is_union_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Enum_end(B);
+}
+
+static reflection_Enum_ref_t reflection_Enum_clone(flatbuffers_builder_t *B, reflection_Enum_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Enum_start(B)
+ || reflection_Enum_name_pick(B, t)
+ || reflection_Enum_values_pick(B, t)
+ || reflection_Enum_underlying_type_pick(B, t)
+ || reflection_Enum_attributes_pick(B, t)
+ || reflection_Enum_documentation_pick(B, t)
+ || reflection_Enum_is_union_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Enum_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Field_name, reflection_Field)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_Field_type, reflection_Type, reflection_Field)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Field_id, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Field_offset, flatbuffers_uint16, uint16_t, 2, 2, UINT16_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Field_default_integer, flatbuffers_int64, int64_t, 8, 8, INT64_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(5, flatbuffers_, reflection_Field_default_real, flatbuffers_double, double, 8, 8, 0.0000000000000000, reflection_Field)
+__flatbuffers_build_scalar_field(6, flatbuffers_, reflection_Field_deprecated, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(7, flatbuffers_, reflection_Field_required, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+__flatbuffers_build_scalar_field(8, flatbuffers_, reflection_Field_key, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(9, flatbuffers_, reflection_Field_attributes, reflection_KeyValue, reflection_Field)
+__flatbuffers_build_string_vector_field(10, flatbuffers_, reflection_Field_documentation, reflection_Field)
+__flatbuffers_build_scalar_field(11, flatbuffers_, reflection_Field_optional, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Field)
+
+static inline reflection_Field_ref_t reflection_Field_create(flatbuffers_builder_t *B __reflection_Field_formal_args)
+{
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_add(B, v4)
+ || reflection_Field_default_real_add(B, v5)
+ || reflection_Field_name_add(B, v0)
+ || reflection_Field_type_add(B, v1)
+ || reflection_Field_attributes_add(B, v9)
+ || reflection_Field_documentation_add(B, v10)
+ || reflection_Field_id_add(B, v2)
+ || reflection_Field_offset_add(B, v3)
+ || reflection_Field_deprecated_add(B, v6)
+ || reflection_Field_required_add(B, v7)
+ || reflection_Field_key_add(B, v8)
+ || reflection_Field_optional_add(B, v11)) {
+ return 0;
+ }
+ return reflection_Field_end(B);
+}
+
+static reflection_Field_ref_t reflection_Field_clone(flatbuffers_builder_t *B, reflection_Field_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Field_start(B)
+ || reflection_Field_default_integer_pick(B, t)
+ || reflection_Field_default_real_pick(B, t)
+ || reflection_Field_name_pick(B, t)
+ || reflection_Field_type_pick(B, t)
+ || reflection_Field_attributes_pick(B, t)
+ || reflection_Field_documentation_pick(B, t)
+ || reflection_Field_id_pick(B, t)
+ || reflection_Field_offset_pick(B, t)
+ || reflection_Field_deprecated_pick(B, t)
+ || reflection_Field_required_pick(B, t)
+ || reflection_Field_key_pick(B, t)
+ || reflection_Field_optional_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Field_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Object_name, reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Object_fields, reflection_Field, reflection_Object)
+__flatbuffers_build_scalar_field(2, flatbuffers_, reflection_Object_is_struct, flatbuffers_bool, flatbuffers_bool_t, 1, 1, UINT8_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(3, flatbuffers_, reflection_Object_minalign, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+__flatbuffers_build_scalar_field(4, flatbuffers_, reflection_Object_bytesize, flatbuffers_int32, int32_t, 4, 4, INT32_C(0), reflection_Object)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Object_attributes, reflection_KeyValue, reflection_Object)
+__flatbuffers_build_string_vector_field(6, flatbuffers_, reflection_Object_documentation, reflection_Object)
+
+static inline reflection_Object_ref_t reflection_Object_create(flatbuffers_builder_t *B __reflection_Object_formal_args)
+{
+ if (reflection_Object_start(B)
+ || reflection_Object_name_add(B, v0)
+ || reflection_Object_fields_add(B, v1)
+ || reflection_Object_minalign_add(B, v3)
+ || reflection_Object_bytesize_add(B, v4)
+ || reflection_Object_attributes_add(B, v5)
+ || reflection_Object_documentation_add(B, v6)
+ || reflection_Object_is_struct_add(B, v2)) {
+ return 0;
+ }
+ return reflection_Object_end(B);
+}
+
+static reflection_Object_ref_t reflection_Object_clone(flatbuffers_builder_t *B, reflection_Object_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Object_start(B)
+ || reflection_Object_name_pick(B, t)
+ || reflection_Object_fields_pick(B, t)
+ || reflection_Object_minalign_pick(B, t)
+ || reflection_Object_bytesize_pick(B, t)
+ || reflection_Object_attributes_pick(B, t)
+ || reflection_Object_documentation_pick(B, t)
+ || reflection_Object_is_struct_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Object_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_RPCCall_name, reflection_RPCCall)
+__flatbuffers_build_table_field(1, flatbuffers_, reflection_RPCCall_request, reflection_Object, reflection_RPCCall)
+__flatbuffers_build_table_field(2, flatbuffers_, reflection_RPCCall_response, reflection_Object, reflection_RPCCall)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(3, flatbuffers_, reflection_RPCCall_attributes, reflection_KeyValue, reflection_RPCCall)
+__flatbuffers_build_string_vector_field(4, flatbuffers_, reflection_RPCCall_documentation, reflection_RPCCall)
+
+static inline reflection_RPCCall_ref_t reflection_RPCCall_create(flatbuffers_builder_t *B __reflection_RPCCall_formal_args)
+{
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_add(B, v0)
+ || reflection_RPCCall_request_add(B, v1)
+ || reflection_RPCCall_response_add(B, v2)
+ || reflection_RPCCall_attributes_add(B, v3)
+ || reflection_RPCCall_documentation_add(B, v4)) {
+ return 0;
+ }
+ return reflection_RPCCall_end(B);
+}
+
+static reflection_RPCCall_ref_t reflection_RPCCall_clone(flatbuffers_builder_t *B, reflection_RPCCall_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_RPCCall_start(B)
+ || reflection_RPCCall_name_pick(B, t)
+ || reflection_RPCCall_request_pick(B, t)
+ || reflection_RPCCall_response_pick(B, t)
+ || reflection_RPCCall_attributes_pick(B, t)
+ || reflection_RPCCall_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_RPCCall_end(B));
+}
+
+__flatbuffers_build_string_field(0, flatbuffers_, reflection_Service_name, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Service_calls, reflection_RPCCall, reflection_Service)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(2, flatbuffers_, reflection_Service_attributes, reflection_KeyValue, reflection_Service)
+__flatbuffers_build_string_vector_field(3, flatbuffers_, reflection_Service_documentation, reflection_Service)
+
+static inline reflection_Service_ref_t reflection_Service_create(flatbuffers_builder_t *B __reflection_Service_formal_args)
+{
+ if (reflection_Service_start(B)
+ || reflection_Service_name_add(B, v0)
+ || reflection_Service_calls_add(B, v1)
+ || reflection_Service_attributes_add(B, v2)
+ || reflection_Service_documentation_add(B, v3)) {
+ return 0;
+ }
+ return reflection_Service_end(B);
+}
+
+static reflection_Service_ref_t reflection_Service_clone(flatbuffers_builder_t *B, reflection_Service_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Service_start(B)
+ || reflection_Service_name_pick(B, t)
+ || reflection_Service_calls_pick(B, t)
+ || reflection_Service_attributes_pick(B, t)
+ || reflection_Service_documentation_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Service_end(B));
+}
+
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(0, flatbuffers_, reflection_Schema_objects, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(1, flatbuffers_, reflection_Schema_enums, reflection_Enum, reflection_Schema)
+__flatbuffers_build_string_field(2, flatbuffers_, reflection_Schema_file_ident, reflection_Schema)
+__flatbuffers_build_string_field(3, flatbuffers_, reflection_Schema_file_ext, reflection_Schema)
+__flatbuffers_build_table_field(4, flatbuffers_, reflection_Schema_root_table, reflection_Object, reflection_Schema)
+/* vector has keyed elements */
+__flatbuffers_build_table_vector_field(5, flatbuffers_, reflection_Schema_services, reflection_Service, reflection_Schema)
+
+static inline reflection_Schema_ref_t reflection_Schema_create(flatbuffers_builder_t *B __reflection_Schema_formal_args)
+{
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_add(B, v0)
+ || reflection_Schema_enums_add(B, v1)
+ || reflection_Schema_file_ident_add(B, v2)
+ || reflection_Schema_file_ext_add(B, v3)
+ || reflection_Schema_root_table_add(B, v4)
+ || reflection_Schema_services_add(B, v5)) {
+ return 0;
+ }
+ return reflection_Schema_end(B);
+}
+
+static reflection_Schema_ref_t reflection_Schema_clone(flatbuffers_builder_t *B, reflection_Schema_table_t t)
+{
+ __flatbuffers_memoize_begin(B, t);
+ if (reflection_Schema_start(B)
+ || reflection_Schema_objects_pick(B, t)
+ || reflection_Schema_enums_pick(B, t)
+ || reflection_Schema_file_ident_pick(B, t)
+ || reflection_Schema_file_ext_pick(B, t)
+ || reflection_Schema_root_table_pick(B, t)
+ || reflection_Schema_services_pick(B, t)) {
+ return 0;
+ }
+ __flatbuffers_memoize_end(B, t, reflection_Schema_end(B));
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_BUILDER_H */
diff --git a/nostrdb/flatcc/reflection/reflection_reader.h b/nostrdb/flatcc/reflection/reflection_reader.h
@@ -0,0 +1,411 @@
+#ifndef REFLECTION_READER_H
+#define REFLECTION_READER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef FLATBUFFERS_COMMON_READER_H
+#include "flatbuffers_common_reader.h"
+#endif
+#include "flatcc/flatcc_flatbuffers.h"
+#ifndef __alignas_is_defined
+#include <stdalign.h>
+#endif
+#include "flatcc/flatcc_prologue.h"
+#undef flatbuffers_identifier
+#define flatbuffers_identifier "BFBS"
+#undef flatbuffers_extension
+#define flatbuffers_extension "bfbs"
+
+
+typedef const struct reflection_Type_table *reflection_Type_table_t;
+typedef struct reflection_Type_table *reflection_Type_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Type_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Type_mutable_vec_t;
+typedef const struct reflection_KeyValue_table *reflection_KeyValue_table_t;
+typedef struct reflection_KeyValue_table *reflection_KeyValue_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_KeyValue_vec_t;
+typedef flatbuffers_uoffset_t *reflection_KeyValue_mutable_vec_t;
+typedef const struct reflection_EnumVal_table *reflection_EnumVal_table_t;
+typedef struct reflection_EnumVal_table *reflection_EnumVal_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_EnumVal_vec_t;
+typedef flatbuffers_uoffset_t *reflection_EnumVal_mutable_vec_t;
+typedef const struct reflection_Enum_table *reflection_Enum_table_t;
+typedef struct reflection_Enum_table *reflection_Enum_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Enum_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Enum_mutable_vec_t;
+typedef const struct reflection_Field_table *reflection_Field_table_t;
+typedef struct reflection_Field_table *reflection_Field_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Field_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Field_mutable_vec_t;
+typedef const struct reflection_Object_table *reflection_Object_table_t;
+typedef struct reflection_Object_table *reflection_Object_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Object_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Object_mutable_vec_t;
+typedef const struct reflection_RPCCall_table *reflection_RPCCall_table_t;
+typedef struct reflection_RPCCall_table *reflection_RPCCall_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_RPCCall_vec_t;
+typedef flatbuffers_uoffset_t *reflection_RPCCall_mutable_vec_t;
+typedef const struct reflection_Service_table *reflection_Service_table_t;
+typedef struct reflection_Service_table *reflection_Service_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Service_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Service_mutable_vec_t;
+typedef const struct reflection_Schema_table *reflection_Schema_table_t;
+typedef struct reflection_Schema_table *reflection_Schema_mutable_table_t;
+typedef const flatbuffers_uoffset_t *reflection_Schema_vec_t;
+typedef flatbuffers_uoffset_t *reflection_Schema_mutable_vec_t;
+#ifndef reflection_Type_file_identifier
+#define reflection_Type_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Type_file_identifier */
+#ifndef reflection_Type_identifier
+#define reflection_Type_identifier "BFBS"
+#endif
+#define reflection_Type_type_hash ((flatbuffers_thash_t)0x44c8fe5e)
+#define reflection_Type_type_identifier "\x5e\xfe\xc8\x44"
+#ifndef reflection_Type_file_extension
+#define reflection_Type_file_extension "bfbs"
+#endif
+#ifndef reflection_KeyValue_file_identifier
+#define reflection_KeyValue_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_KeyValue_file_identifier */
+#ifndef reflection_KeyValue_identifier
+#define reflection_KeyValue_identifier "BFBS"
+#endif
+#define reflection_KeyValue_type_hash ((flatbuffers_thash_t)0x8c761eaa)
+#define reflection_KeyValue_type_identifier "\xaa\x1e\x76\x8c"
+#ifndef reflection_KeyValue_file_extension
+#define reflection_KeyValue_file_extension "bfbs"
+#endif
+#ifndef reflection_EnumVal_file_identifier
+#define reflection_EnumVal_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_EnumVal_file_identifier */
+#ifndef reflection_EnumVal_identifier
+#define reflection_EnumVal_identifier "BFBS"
+#endif
+#define reflection_EnumVal_type_hash ((flatbuffers_thash_t)0x9531c946)
+#define reflection_EnumVal_type_identifier "\x46\xc9\x31\x95"
+#ifndef reflection_EnumVal_file_extension
+#define reflection_EnumVal_file_extension "bfbs"
+#endif
+#ifndef reflection_Enum_file_identifier
+#define reflection_Enum_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Enum_file_identifier */
+#ifndef reflection_Enum_identifier
+#define reflection_Enum_identifier "BFBS"
+#endif
+#define reflection_Enum_type_hash ((flatbuffers_thash_t)0xacffa90f)
+#define reflection_Enum_type_identifier "\x0f\xa9\xff\xac"
+#ifndef reflection_Enum_file_extension
+#define reflection_Enum_file_extension "bfbs"
+#endif
+#ifndef reflection_Field_file_identifier
+#define reflection_Field_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Field_file_identifier */
+#ifndef reflection_Field_identifier
+#define reflection_Field_identifier "BFBS"
+#endif
+#define reflection_Field_type_hash ((flatbuffers_thash_t)0x9f7e408a)
+#define reflection_Field_type_identifier "\x8a\x40\x7e\x9f"
+#ifndef reflection_Field_file_extension
+#define reflection_Field_file_extension "bfbs"
+#endif
+#ifndef reflection_Object_file_identifier
+#define reflection_Object_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Object_file_identifier */
+#ifndef reflection_Object_identifier
+#define reflection_Object_identifier "BFBS"
+#endif
+#define reflection_Object_type_hash ((flatbuffers_thash_t)0xb09729bd)
+#define reflection_Object_type_identifier "\xbd\x29\x97\xb0"
+#ifndef reflection_Object_file_extension
+#define reflection_Object_file_extension "bfbs"
+#endif
+#ifndef reflection_RPCCall_file_identifier
+#define reflection_RPCCall_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_RPCCall_file_identifier */
+#ifndef reflection_RPCCall_identifier
+#define reflection_RPCCall_identifier "BFBS"
+#endif
+#define reflection_RPCCall_type_hash ((flatbuffers_thash_t)0xe2d586f1)
+#define reflection_RPCCall_type_identifier "\xf1\x86\xd5\xe2"
+#ifndef reflection_RPCCall_file_extension
+#define reflection_RPCCall_file_extension "bfbs"
+#endif
+#ifndef reflection_Service_file_identifier
+#define reflection_Service_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Service_file_identifier */
+#ifndef reflection_Service_identifier
+#define reflection_Service_identifier "BFBS"
+#endif
+#define reflection_Service_type_hash ((flatbuffers_thash_t)0xf31a13b5)
+#define reflection_Service_type_identifier "\xb5\x13\x1a\xf3"
+#ifndef reflection_Service_file_extension
+#define reflection_Service_file_extension "bfbs"
+#endif
+#ifndef reflection_Schema_file_identifier
+#define reflection_Schema_file_identifier "BFBS"
+#endif
+/* deprecated, use reflection_Schema_file_identifier */
+#ifndef reflection_Schema_identifier
+#define reflection_Schema_identifier "BFBS"
+#endif
+#define reflection_Schema_type_hash ((flatbuffers_thash_t)0xfaf93779)
+#define reflection_Schema_type_identifier "\x79\x37\xf9\xfa"
+#ifndef reflection_Schema_file_extension
+#define reflection_Schema_file_extension "bfbs"
+#endif
+
+typedef int8_t reflection_BaseType_enum_t;
+__flatbuffers_define_integer_type(reflection_BaseType, reflection_BaseType_enum_t, 8)
+#define reflection_BaseType_None ((reflection_BaseType_enum_t)INT8_C(0))
+#define reflection_BaseType_UType ((reflection_BaseType_enum_t)INT8_C(1))
+#define reflection_BaseType_Bool ((reflection_BaseType_enum_t)INT8_C(2))
+#define reflection_BaseType_Byte ((reflection_BaseType_enum_t)INT8_C(3))
+#define reflection_BaseType_UByte ((reflection_BaseType_enum_t)INT8_C(4))
+#define reflection_BaseType_Short ((reflection_BaseType_enum_t)INT8_C(5))
+#define reflection_BaseType_UShort ((reflection_BaseType_enum_t)INT8_C(6))
+#define reflection_BaseType_Int ((reflection_BaseType_enum_t)INT8_C(7))
+#define reflection_BaseType_UInt ((reflection_BaseType_enum_t)INT8_C(8))
+#define reflection_BaseType_Long ((reflection_BaseType_enum_t)INT8_C(9))
+#define reflection_BaseType_ULong ((reflection_BaseType_enum_t)INT8_C(10))
+#define reflection_BaseType_Float ((reflection_BaseType_enum_t)INT8_C(11))
+#define reflection_BaseType_Double ((reflection_BaseType_enum_t)INT8_C(12))
+#define reflection_BaseType_String ((reflection_BaseType_enum_t)INT8_C(13))
+#define reflection_BaseType_Vector ((reflection_BaseType_enum_t)INT8_C(14))
+#define reflection_BaseType_Obj ((reflection_BaseType_enum_t)INT8_C(15))
+#define reflection_BaseType_Union ((reflection_BaseType_enum_t)INT8_C(16))
+#define reflection_BaseType_Array ((reflection_BaseType_enum_t)INT8_C(17))
+#define reflection_BaseType_MaxBaseType ((reflection_BaseType_enum_t)INT8_C(18))
+
+static inline const char *reflection_BaseType_name(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return "None";
+ case reflection_BaseType_UType: return "UType";
+ case reflection_BaseType_Bool: return "Bool";
+ case reflection_BaseType_Byte: return "Byte";
+ case reflection_BaseType_UByte: return "UByte";
+ case reflection_BaseType_Short: return "Short";
+ case reflection_BaseType_UShort: return "UShort";
+ case reflection_BaseType_Int: return "Int";
+ case reflection_BaseType_UInt: return "UInt";
+ case reflection_BaseType_Long: return "Long";
+ case reflection_BaseType_ULong: return "ULong";
+ case reflection_BaseType_Float: return "Float";
+ case reflection_BaseType_Double: return "Double";
+ case reflection_BaseType_String: return "String";
+ case reflection_BaseType_Vector: return "Vector";
+ case reflection_BaseType_Obj: return "Obj";
+ case reflection_BaseType_Union: return "Union";
+ case reflection_BaseType_Array: return "Array";
+ case reflection_BaseType_MaxBaseType: return "MaxBaseType";
+ default: return "";
+ }
+}
+
+static inline int reflection_BaseType_is_known_value(reflection_BaseType_enum_t value)
+{
+ switch (value) {
+ case reflection_BaseType_None: return 1;
+ case reflection_BaseType_UType: return 1;
+ case reflection_BaseType_Bool: return 1;
+ case reflection_BaseType_Byte: return 1;
+ case reflection_BaseType_UByte: return 1;
+ case reflection_BaseType_Short: return 1;
+ case reflection_BaseType_UShort: return 1;
+ case reflection_BaseType_Int: return 1;
+ case reflection_BaseType_UInt: return 1;
+ case reflection_BaseType_Long: return 1;
+ case reflection_BaseType_ULong: return 1;
+ case reflection_BaseType_Float: return 1;
+ case reflection_BaseType_Double: return 1;
+ case reflection_BaseType_String: return 1;
+ case reflection_BaseType_Vector: return 1;
+ case reflection_BaseType_Obj: return 1;
+ case reflection_BaseType_Union: return 1;
+ case reflection_BaseType_Array: return 1;
+ case reflection_BaseType_MaxBaseType: return 1;
+ default: return 0;
+ }
+}
+
+
+
+struct reflection_Type_table { uint8_t unused__; };
+
+static inline size_t reflection_Type_vec_len(reflection_Type_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Type_table_t reflection_Type_vec_at(reflection_Type_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Type_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Type)
+
+__flatbuffers_define_scalar_field(0, reflection_Type, base_type, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(1, reflection_Type, element, reflection_BaseType, reflection_BaseType_enum_t, INT8_C(0))
+__flatbuffers_define_scalar_field(2, reflection_Type, index, flatbuffers_int32, int32_t, INT32_C(-1))
+__flatbuffers_define_scalar_field(3, reflection_Type, fixed_length, flatbuffers_uint16, uint16_t, UINT16_C(0))
+
+struct reflection_KeyValue_table { uint8_t unused__; };
+
+static inline size_t reflection_KeyValue_vec_len(reflection_KeyValue_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_KeyValue_table_t reflection_KeyValue_vec_at(reflection_KeyValue_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_KeyValue_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_KeyValue)
+
+__flatbuffers_define_string_field(0, reflection_KeyValue, key, 1)
+__flatbuffers_define_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_table_sort_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_find_by_string_field(reflection_KeyValue, key)
+__flatbuffers_define_default_scan_by_string_field(reflection_KeyValue, key)
+#define reflection_KeyValue_vec_sort reflection_KeyValue_vec_sort_by_key
+__flatbuffers_define_string_field(1, reflection_KeyValue, value, 0)
+
+struct reflection_EnumVal_table { uint8_t unused__; };
+
+static inline size_t reflection_EnumVal_vec_len(reflection_EnumVal_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_EnumVal_table_t reflection_EnumVal_vec_at(reflection_EnumVal_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_EnumVal_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_EnumVal)
+
+__flatbuffers_define_string_field(0, reflection_EnumVal, name, 1)
+__flatbuffers_define_scalar_field(1, reflection_EnumVal, value, flatbuffers_int64, int64_t, INT64_C(0))
+/* Note: find only works on vectors sorted by this field. */
+__flatbuffers_define_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_table_sort_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_find_by_scalar_field(reflection_EnumVal, value, int64_t)
+__flatbuffers_define_default_scan_by_scalar_field(reflection_EnumVal, value, int64_t)
+#define reflection_EnumVal_vec_sort reflection_EnumVal_vec_sort_by_value
+__flatbuffers_define_table_field(2, reflection_EnumVal, object, reflection_Object_table_t, 0)
+__flatbuffers_define_table_field(3, reflection_EnumVal, union_type, reflection_Type_table_t, 0)
+__flatbuffers_define_vector_field(4, reflection_EnumVal, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Enum_table { uint8_t unused__; };
+
+static inline size_t reflection_Enum_vec_len(reflection_Enum_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Enum_table_t reflection_Enum_vec_at(reflection_Enum_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Enum_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Enum)
+
+__flatbuffers_define_string_field(0, reflection_Enum, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Enum, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Enum, name)
+#define reflection_Enum_vec_sort reflection_Enum_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Enum, values, reflection_EnumVal_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Enum, is_union, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_table_field(3, reflection_Enum, underlying_type, reflection_Type_table_t, 1)
+__flatbuffers_define_vector_field(4, reflection_Enum, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Enum, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Field_table { uint8_t unused__; };
+
+static inline size_t reflection_Field_vec_len(reflection_Field_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Field_table_t reflection_Field_vec_at(reflection_Field_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Field_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Field)
+
+__flatbuffers_define_string_field(0, reflection_Field, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Field, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Field, name)
+#define reflection_Field_vec_sort reflection_Field_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_Field, type, reflection_Type_table_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Field, id, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Field, offset, flatbuffers_uint16, uint16_t, UINT16_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Field, default_integer, flatbuffers_int64, int64_t, INT64_C(0))
+__flatbuffers_define_scalar_field(5, reflection_Field, default_real, flatbuffers_double, double, 0.0000000000000000)
+__flatbuffers_define_scalar_field(6, reflection_Field, deprecated, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(7, reflection_Field, required, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(8, reflection_Field, key, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_vector_field(9, reflection_Field, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(10, reflection_Field, documentation, flatbuffers_string_vec_t, 0)
+__flatbuffers_define_scalar_field(11, reflection_Field, optional, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+
+struct reflection_Object_table { uint8_t unused__; };
+
+static inline size_t reflection_Object_vec_len(reflection_Object_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Object_table_t reflection_Object_vec_at(reflection_Object_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Object_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Object)
+
+__flatbuffers_define_string_field(0, reflection_Object, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Object, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Object, name)
+#define reflection_Object_vec_sort reflection_Object_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Object, fields, reflection_Field_vec_t, 1)
+__flatbuffers_define_scalar_field(2, reflection_Object, is_struct, flatbuffers_bool, flatbuffers_bool_t, UINT8_C(0))
+__flatbuffers_define_scalar_field(3, reflection_Object, minalign, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_scalar_field(4, reflection_Object, bytesize, flatbuffers_int32, int32_t, INT32_C(0))
+__flatbuffers_define_vector_field(5, reflection_Object, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(6, reflection_Object, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_RPCCall_table { uint8_t unused__; };
+
+static inline size_t reflection_RPCCall_vec_len(reflection_RPCCall_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_RPCCall_table_t reflection_RPCCall_vec_at(reflection_RPCCall_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_RPCCall_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_RPCCall)
+
+__flatbuffers_define_string_field(0, reflection_RPCCall, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_find_by_string_field(reflection_RPCCall, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_RPCCall, name)
+#define reflection_RPCCall_vec_sort reflection_RPCCall_vec_sort_by_name
+__flatbuffers_define_table_field(1, reflection_RPCCall, request, reflection_Object_table_t, 1)
+__flatbuffers_define_table_field(2, reflection_RPCCall, response, reflection_Object_table_t, 1)
+__flatbuffers_define_vector_field(3, reflection_RPCCall, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(4, reflection_RPCCall, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Service_table { uint8_t unused__; };
+
+static inline size_t reflection_Service_vec_len(reflection_Service_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Service_table_t reflection_Service_vec_at(reflection_Service_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Service_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Service)
+
+__flatbuffers_define_string_field(0, reflection_Service, name, 1)
+__flatbuffers_define_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_table_sort_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_find_by_string_field(reflection_Service, name)
+__flatbuffers_define_default_scan_by_string_field(reflection_Service, name)
+#define reflection_Service_vec_sort reflection_Service_vec_sort_by_name
+__flatbuffers_define_vector_field(1, reflection_Service, calls, reflection_RPCCall_vec_t, 0)
+__flatbuffers_define_vector_field(2, reflection_Service, attributes, reflection_KeyValue_vec_t, 0)
+__flatbuffers_define_vector_field(3, reflection_Service, documentation, flatbuffers_string_vec_t, 0)
+
+struct reflection_Schema_table { uint8_t unused__; };
+
+static inline size_t reflection_Schema_vec_len(reflection_Schema_vec_t vec)
+__flatbuffers_vec_len(vec)
+static inline reflection_Schema_table_t reflection_Schema_vec_at(reflection_Schema_vec_t vec, size_t i)
+__flatbuffers_offset_vec_at(reflection_Schema_table_t, vec, i, 0)
+__flatbuffers_table_as_root(reflection_Schema)
+
+__flatbuffers_define_vector_field(0, reflection_Schema, objects, reflection_Object_vec_t, 1)
+__flatbuffers_define_vector_field(1, reflection_Schema, enums, reflection_Enum_vec_t, 1)
+__flatbuffers_define_string_field(2, reflection_Schema, file_ident, 0)
+__flatbuffers_define_string_field(3, reflection_Schema, file_ext, 0)
+__flatbuffers_define_table_field(4, reflection_Schema, root_table, reflection_Object_table_t, 0)
+__flatbuffers_define_vector_field(5, reflection_Schema, services, reflection_Service_vec_t, 0)
+
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_READER_H */
diff --git a/nostrdb/flatcc/reflection/reflection_verifier.h b/nostrdb/flatcc/reflection/reflection_verifier.h
@@ -0,0 +1,308 @@
+#ifndef REFLECTION_VERIFIER_H
+#define REFLECTION_VERIFIER_H
+
+/* Generated by flatcc 0.6.1 FlatBuffers schema compiler for C by dvide.com */
+
+#ifndef REFLECTION_READER_H
+#include "reflection_reader.h"
+#endif
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_prologue.h"
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td);
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td);
+
+static int reflection_Type_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_field(td, 0, 1, 1) /* base_type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 1, 1) /* element */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 4, 4) /* index */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* fixed_length */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Type_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Type_type_identifier, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Type_verify_table);
+}
+
+static inline int reflection_Type_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Type_verify_table);
+}
+
+static int reflection_KeyValue_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 1, 0) /* value */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_KeyValue_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_KeyValue_type_identifier, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_KeyValue_verify_table);
+}
+
+static inline int reflection_KeyValue_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_KeyValue_verify_table);
+}
+
+static int reflection_EnumVal_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_field(td, 1, 8, 8) /* value */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 0, &reflection_Object_verify_table) /* object */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 0, &reflection_Type_verify_table) /* union_type */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_EnumVal_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_EnumVal_type_identifier, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_EnumVal_verify_table);
+}
+
+static inline int reflection_EnumVal_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_EnumVal_verify_table);
+}
+
+static int reflection_Enum_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_EnumVal_verify_table) /* values */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_union */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 3, 1, &reflection_Type_verify_table) /* underlying_type */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 4, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 5, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Enum_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Enum_type_identifier, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Enum_verify_table);
+}
+
+static inline int reflection_Enum_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Enum_verify_table);
+}
+
+static int reflection_Field_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Type_verify_table) /* type */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 2, 2) /* id */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 2, 2) /* offset */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 8, 8) /* default_integer */)) return ret;
+ if ((ret = flatcc_verify_field(td, 5, 8, 8) /* default_real */)) return ret;
+ if ((ret = flatcc_verify_field(td, 6, 1, 1) /* deprecated */)) return ret;
+ if ((ret = flatcc_verify_field(td, 7, 1, 1) /* required */)) return ret;
+ if ((ret = flatcc_verify_field(td, 8, 1, 1) /* key */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 9, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 10, 0) /* documentation */)) return ret;
+ if ((ret = flatcc_verify_field(td, 11, 1, 1) /* optional */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Field_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Field_type_identifier, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Field_verify_table);
+}
+
+static inline int reflection_Field_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Field_verify_table);
+}
+
+static int reflection_Object_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Field_verify_table) /* fields */)) return ret;
+ if ((ret = flatcc_verify_field(td, 2, 1, 1) /* is_struct */)) return ret;
+ if ((ret = flatcc_verify_field(td, 3, 4, 4) /* minalign */)) return ret;
+ if ((ret = flatcc_verify_field(td, 4, 4, 4) /* bytesize */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 6, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Object_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Object_type_identifier, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Object_verify_table);
+}
+
+static inline int reflection_Object_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Object_verify_table);
+}
+
+static int reflection_RPCCall_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 1, 1, &reflection_Object_verify_table) /* request */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 2, 1, &reflection_Object_verify_table) /* response */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 3, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 4, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_RPCCall_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_RPCCall_type_identifier, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_RPCCall_verify_table);
+}
+
+static inline int reflection_RPCCall_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_RPCCall_verify_table);
+}
+
+static int reflection_Service_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_string_field(td, 0, 1) /* name */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 0, &reflection_RPCCall_verify_table) /* calls */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 2, 0, &reflection_KeyValue_verify_table) /* attributes */)) return ret;
+ if ((ret = flatcc_verify_string_vector_field(td, 3, 0) /* documentation */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Service_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Service_type_identifier, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Service_verify_table);
+}
+
+static inline int reflection_Service_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Service_verify_table);
+}
+
+static int reflection_Schema_verify_table(flatcc_table_verifier_descriptor_t *td)
+{
+ int ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 0, 1, &reflection_Object_verify_table) /* objects */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 1, 1, &reflection_Enum_verify_table) /* enums */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 2, 0) /* file_ident */)) return ret;
+ if ((ret = flatcc_verify_string_field(td, 3, 0) /* file_ext */)) return ret;
+ if ((ret = flatcc_verify_table_field(td, 4, 0, &reflection_Object_verify_table) /* root_table */)) return ret;
+ if ((ret = flatcc_verify_table_vector_field(td, 5, 0, &reflection_Service_verify_table) /* services */)) return ret;
+ return flatcc_verify_ok;
+}
+
+static inline int reflection_Schema_verify_as_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_typed_root(const void *buf, size_t bufsiz)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, reflection_Schema_type_identifier, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_identifier(const void *buf, size_t bufsiz, const char *fid)
+{
+ return flatcc_verify_table_as_root(buf, bufsiz, fid, &reflection_Schema_verify_table);
+}
+
+static inline int reflection_Schema_verify_as_root_with_type_hash(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ return flatcc_verify_table_as_typed_root(buf, bufsiz, thash, &reflection_Schema_verify_table);
+}
+
+#include "flatcc/flatcc_epilogue.h"
+#endif /* REFLECTION_VERIFIER_H */
diff --git a/nostrdb/flatcc/refmap.c b/nostrdb/flatcc/refmap.c
@@ -0,0 +1,248 @@
+/*
+ * Optional file that can be included in runtime library to support DAG
+ * cloning with the builder and may also be used for custom purposes
+ * standalone. See also comments in `flatcc/flatcc_builder.h`.
+ *
+ * Note that dynamic construction takes place and that large offset
+ * vectors might consume significant space if there are not many shared
+ * references. In the basic use case no allocation takes place because a
+ * few references can be held using only a small stack allocated hash
+ * table.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "flatcc_rtconfig.h"
+#include "flatcc_refmap.h"
+#include "flatcc_alloc.h"
+#include "flatcc_assert.h"
+
+#define _flatcc_refmap_calloc FLATCC_CALLOC
+#define _flatcc_refmap_free FLATCC_FREE
+
+/* Can be used as a primitive defense against collision attacks. */
+#ifdef FLATCC_HASH_SEED
+#define _flatcc_refmap_seed FLATCC_HASH_SEED
+#else
+#define _flatcc_refmap_seed 0x2f693b52
+#endif
+
+static inline size_t _flatcc_refmap_above_load_factor(size_t count, size_t buckets)
+{
+ static const size_t d = 256;
+ static const size_t n = (size_t)((FLATCC_REFMAP_LOAD_FACTOR) * 256.0f);
+
+ return count >= buckets * n / d;
+}
+
+#define _flatcc_refmap_probe(k, i, N) ((k + i) & N)
+
+void flatcc_refmap_clear(flatcc_refmap_t *refmap)
+{
+ if (refmap->table && refmap->table != refmap->min_table) {
+ _flatcc_refmap_free(refmap->table);
+ }
+ flatcc_refmap_init(refmap);
+}
+
+static inline size_t _flatcc_refmap_hash(const void *src)
+{
+ /* MurmurHash3 64-bit finalizer */
+ uint64_t x;
+
+ x = (uint64_t)((size_t)src) ^ _flatcc_refmap_seed;
+
+ x ^= x >> 33;
+ x *= 0xff51afd7ed558ccdULL;
+ x ^= x >> 33;
+ x *= 0xc4ceb9fe1a85ec53ULL;
+ x ^= x >> 33;
+ return (size_t)x;
+}
+
+void flatcc_refmap_reset(flatcc_refmap_t *refmap)
+{
+ if (refmap->count) {
+ memset(refmap->table, 0, sizeof(refmap->table[0]) * refmap->buckets);
+ }
+ refmap->count = 0;
+}
+
+/*
+ * Technically resize also supports shrinking which may be useful for
+ * adapations, but the current hash table never deletes individual items.
+ */
+int flatcc_refmap_resize(flatcc_refmap_t *refmap, size_t count)
+{
+ const size_t min_buckets = sizeof(refmap->min_table) / sizeof(refmap->min_table[0]);
+
+ size_t i;
+ size_t buckets;
+ size_t buckets_old;
+ struct flatcc_refmap_item *T_old;
+
+ if (count < refmap->count) {
+ count = refmap->count;
+ }
+ buckets = min_buckets;
+
+ while (_flatcc_refmap_above_load_factor(count, buckets)) {
+ buckets *= 2;
+ }
+ if (refmap->buckets == buckets) {
+ return 0;
+ }
+ T_old = refmap->table;
+ buckets_old = refmap->buckets;
+ if (buckets == min_buckets) {
+ memset(refmap->min_table, 0, sizeof(refmap->min_table));
+ refmap->table = refmap->min_table;
+ } else {
+ refmap->table = _flatcc_refmap_calloc(buckets, sizeof(refmap->table[0]));
+ if (refmap->table == 0) {
+ refmap->table = T_old;
+ FLATCC_ASSERT(0); /* out of memory */
+ return -1;
+ }
+ }
+ refmap->buckets = buckets;
+ refmap->count = 0;
+ for (i = 0; i < buckets_old; ++i) {
+ if (T_old[i].src) {
+ flatcc_refmap_insert(refmap, T_old[i].src, T_old[i].ref);
+ }
+ }
+ if (T_old && T_old != refmap->min_table) {
+ _flatcc_refmap_free(T_old);
+ }
+ return 0;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_insert(flatcc_refmap_t *refmap, const void *src, flatcc_refmap_ref_t ref)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (src == 0) return ref;
+ if (_flatcc_refmap_above_load_factor(refmap->count, refmap->buckets)) {
+ if (flatcc_refmap_resize(refmap, refmap->count * 2)) {
+ return flatcc_refmap_not_found; /* alloc failed */
+ }
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) {
+ return T[j].ref = ref;
+ }
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ ++refmap->count;
+ T[j].src = src;
+ return T[j].ref = ref;
+}
+
+flatcc_refmap_ref_t flatcc_refmap_find(flatcc_refmap_t *refmap, const void *src)
+{
+ struct flatcc_refmap_item *T;
+ size_t N, i, j, k;
+
+ if (refmap->count == 0) {
+ return flatcc_refmap_not_found;
+ }
+ T = refmap->table;
+ N = refmap->buckets - 1;
+ k = _flatcc_refmap_hash(src);
+ i = 0;
+ j = _flatcc_refmap_probe(k, i, N);
+ while (T[j].src) {
+ if (T[j].src == src) return T[j].ref;
+ ++i;
+ j = _flatcc_refmap_probe(k, i, N);
+ }
+ return flatcc_refmap_not_found;
+}
+
+/*
+ * To run test from project root:
+ *
+ * cc -D FLATCC_REFMAP_TEST -I include src/runtime/refmap.c -o test_refmap && ./test_refmap
+ *
+ */
+#ifdef FLATCC_REFMAP_TEST
+
+#include <stdio.h>
+
+#ifndef FLATCC_REFMAP_H
+#include "flatcc/flatcc_refmap.h"
+#endif
+
+#define test(x) do { if (!(x)) { fprintf(stderr, "%02d: refmap test failed\n", __LINE__); exit(-1); } } while (0)
+#define test_start() fprintf(stderr, "starting refmap test ...\n")
+#define test_ok() fprintf(stderr, "refmap test succeeded\n")
+
+int main()
+{
+ int i;
+ int data[1000];
+ int a = 1;
+ int b = 2;
+ int c = 3;
+ flatcc_refmap_t refmap;
+
+ flatcc_refmap_init(&refmap);
+
+ test(flatcc_refmap_find(&refmap, &a) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, 0) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &a) == 0);
+
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(flatcc_refmap_find(&refmap, &b) == flatcc_refmap_not_found);
+ test(flatcc_refmap_find(&refmap, &c) == flatcc_refmap_not_found);
+ test(flatcc_refmap_insert(&refmap, &a, 42) == 42);
+ test(flatcc_refmap_find(&refmap, &a) == 42);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &a, 43) == 43);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(refmap.count == 1);
+ test(flatcc_refmap_insert(&refmap, &b, -10) == -10);
+ test(flatcc_refmap_insert(&refmap, &c, 100) == 100);
+ test(refmap.count == 3);
+ test(flatcc_refmap_find(&refmap, &a) == 43);
+ test(flatcc_refmap_find(&refmap, &b) == -10);
+ test(flatcc_refmap_find(&refmap, &c) == 100);
+
+ test(flatcc_refmap_insert(&refmap, 0, 1000) == 1000);
+ test(flatcc_refmap_find(&refmap, 0) == 0);
+ test(refmap.count == 3);
+
+ test(flatcc_refmap_insert(&refmap, &b, 0) == 0);
+ test(flatcc_refmap_find(&refmap, &b) == 0);
+ test(refmap.count == 3);
+
+ flatcc_refmap_reset(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets > 0);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_insert(&refmap, data + i, i + 42) == i + 42);
+ }
+ test(refmap.count == 1000);
+ for (i = 0; i < 1000; ++i) {
+ test(flatcc_refmap_find(&refmap, data + i) == i + 42);
+ }
+ flatcc_refmap_clear(&refmap);
+ test(refmap.count == 0);
+ test(refmap.buckets == 0);
+ test_ok();
+ return 0;
+}
+
+#endif /* FLATCC_REFMAP_TEST */
diff --git a/nostrdb/flatcc/support/README b/nostrdb/flatcc/support/README
@@ -0,0 +1 @@
+support files mainly used for testing
diff --git a/nostrdb/flatcc/support/cdump.h b/nostrdb/flatcc/support/cdump.h
@@ -0,0 +1,38 @@
+#ifndef CDUMP_H
+#define CDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Generates a constant a C byte array. */
+static void cdump(const char *name, void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char *pc = (unsigned char*)addr;
+
+ // Output description if given.
+ name = name ? name : "dump";
+ fprintf(fp, "const unsigned char %s[] = {", name);
+
+ // Process every byte in the data.
+ for (i = 0; i < (unsigned int)len; i++) {
+ // Multiple of 16 means new line (with line offset).
+
+ if ((i % 16) == 0) {
+ fprintf(fp, "\n ");
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+
+ fprintf(fp, " 0x%02x,", pc[i]);
+ }
+ fprintf(fp, "\n};\n");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CDUMP_H */
diff --git a/nostrdb/flatcc/support/elapsed.h b/nostrdb/flatcc/support/elapsed.h
@@ -0,0 +1,73 @@
+#ifndef ELAPSED_H
+#define ELAPSED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on http://stackoverflow.com/a/8583395 */
+#if !defined(_WIN32)
+#include <sys/time.h>
+static double elapsed_realtime(void) { // returns 0 seconds first time called
+ static struct timeval t0;
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ if (!t0.tv_sec)
+ t0 = tv;
+ return (double)(tv.tv_sec - t0.tv_sec) + (double)(tv.tv_usec - t0.tv_usec) / 1e6;
+}
+#else
+#include <windows.h>
+#ifndef FatalError
+#define FatalError(s) do { perror(s); exit(-1); } while(0)
+#endif
+static double elapsed_realtime(void) { // granularity about 50 microsecs on my machine
+ static LARGE_INTEGER freq, start;
+ LARGE_INTEGER count;
+ if (!QueryPerformanceCounter(&count))
+ FatalError("QueryPerformanceCounter");
+ if (!freq.QuadPart) { // one time initialization
+ if (!QueryPerformanceFrequency(&freq))
+ FatalError("QueryPerformanceFrequency");
+ start = count;
+ }
+ return (double)(count.QuadPart - start.QuadPart) / freq.QuadPart;
+}
+#endif
+
+/* end Based on stackoverflow */
+
+static int show_benchmark(const char *descr, double t1, double t2, size_t size, int rep, const char *reptext)
+{
+ double tdiff = t2 - t1;
+ double nstime;
+
+ printf("operation: %s\n", descr);
+ printf("elapsed time: %.3f (s)\n", tdiff);
+ printf("iterations: %d\n", rep);
+ printf("size: %lu (bytes)\n", (unsigned long)size);
+ printf("bandwidth: %.3f (MB/s)\n", (double)rep * (double)size / 1e6 / tdiff);
+ printf("throughput in ops per sec: %.3f\n", rep / tdiff);
+ if (reptext && rep != 1) {
+ printf("throughput in %s ops per sec: %.3f\n", reptext, 1 / tdiff);
+ }
+ nstime = tdiff * 1e9 / rep;
+ if (nstime < 1000) {
+ printf("time per op: %.3f (ns)\n", nstime);
+ } else if (nstime < 1e6) {
+ printf("time per op: %.3f (us)\n", nstime / 1000);
+ } else if (nstime < 1e9) {
+ printf("time per op: %.3f (ms)\n", nstime / 1e6);
+ } else {
+ printf("time per op: %.3f (s)\n", nstime / 1e9);
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ELAPSED_H */
diff --git a/nostrdb/flatcc/support/hexdump.h b/nostrdb/flatcc/support/hexdump.h
@@ -0,0 +1,47 @@
+#ifndef HEXDUMP_H
+#define HEXDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+/* Based on: http://stackoverflow.com/a/7776146 */
+static void hexdump(const char *desc, const void *addr, size_t len, FILE *fp) {
+ unsigned int i;
+ unsigned char buf[17];
+ const unsigned char *pc = (const unsigned char*)addr;
+
+ /* Output description if given. */
+ if (desc != NULL) fprintf(fp, "%s:\n", desc);
+
+ for (i = 0; i < (unsigned int)len; i++) {
+
+ if ((i % 16) == 0) {
+ if (i != 0) fprintf(fp, " |%s|\n", buf);
+ fprintf(fp, "%08x ", i);
+ } else if ((i % 8) == 0) {
+ fprintf(fp, " ");
+ }
+ fprintf(fp, " %02x", pc[i]);
+ if ((pc[i] < 0x20) || (pc[i] > 0x7e)) {
+ buf[i % 16] = '.';
+ } else {
+ buf[i % 16] = pc[i];
+ }
+ buf[(i % 16) + 1] = '\0';
+ }
+ if (i % 16 <= 8 && i % 16 != 0) fprintf(fp, " ");
+ while ((i % 16) != 0) {
+ fprintf(fp, " ");
+ i++;
+ }
+ fprintf(fp, " |%s|\n", buf);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HEXDUMP_H */
diff --git a/nostrdb/flatcc/support/readfile.h b/nostrdb/flatcc/support/readfile.h
@@ -0,0 +1,66 @@
+#ifndef READFILE_H
+#define READFILE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+
+static char *readfile(const char *filename, size_t max_size, size_t *size_out)
+{
+ FILE *fp;
+ long k;
+ size_t size, pos, n, _out;
+ char *buf;
+
+ size_out = size_out ? size_out : &_out;
+
+ fp = fopen(filename, "rb");
+ size = 0;
+ buf = 0;
+
+ if (!fp) {
+ goto fail;
+ }
+ fseek(fp, 0L, SEEK_END);
+ k = ftell(fp);
+ if (k < 0) goto fail;
+ size = (size_t)k;
+ *size_out = size;
+ if (max_size > 0 && size > max_size) {
+ goto fail;
+ }
+ rewind(fp);
+ buf = (char *)malloc(size ? size : 1);
+ if (!buf) {
+ goto fail;
+ }
+ pos = 0;
+ while ((n = fread(buf + pos, 1, size - pos, fp))) {
+ pos += n;
+ }
+ if (pos != size) {
+ goto fail;
+ }
+ fclose(fp);
+ *size_out = size;
+ return buf;
+
+fail:
+ if (fp) {
+ fclose(fp);
+ }
+ if (buf) {
+ free(buf);
+ }
+ *size_out = size;
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* READFILE_H */
diff --git a/nostrdb/flatcc/verifier.c b/nostrdb/flatcc/verifier.c
@@ -0,0 +1,617 @@
+/*
+ * Runtime support for verifying flatbuffers.
+ *
+ * Depends mutually on generated verifier functions for table types that
+ * call into this library.
+ */
+#include <string.h>
+
+#include "flatcc/flatcc_rtconfig.h"
+#include "flatcc/flatcc_flatbuffers.h"
+#include "flatcc/flatcc_verifier.h"
+#include "flatcc/flatcc_identifier.h"
+
+/* Customization for testing. */
+#if FLATCC_DEBUG_VERIFY
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#include <stdio.h>
+#define FLATCC_VERIFIER_ASSERT(cond, reason) \
+ if (!(cond)) { fprintf(stderr, "verifier assert: %s\n", \
+ flatcc_verify_error_string(reason)); FLATCC_ASSERT(0); return reason; }
+#endif
+
+#if FLATCC_TRACE_VERIFY
+#include <stdio.h>
+#define trace_verify(s, p) \
+ fprintf(stderr, "trace verify: %s: 0x%02x\n", (s), (unsigned)(size_t)(p));
+#else
+#define trace_verify(s, p) ((void)0)
+#endif
+
+/* The runtime library does not use the global config file. */
+
+/* This is a guideline, not an exact measure. */
+#ifndef FLATCC_VERIFIER_MAX_LEVELS
+#define FLATCC_VERIFIER_MAX_LEVELS 100
+#endif
+
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 0
+#endif
+
+/*
+ * Generally a check should tell if a buffer is valid or not such
+ * that runtime can take appropriate actions rather than crash,
+ * also in debug, but assertions are helpful in debugging a problem.
+ *
+ * This must be compiled into the debug runtime library to take effect.
+ */
+#ifndef FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define FLATCC_VERIFIER_ASSERT_ON_ERROR 1
+#endif
+
+/* May be redefined for logging purposes. */
+#ifndef FLATCC_VERIFIER_ASSERT
+#define FLATCC_VERIFIER_ASSERT(cond, reason) FLATCC_ASSERT(cond)
+#endif
+
+#if FLATCC_VERIFIER_ASSERT_ON_ERROR
+#define flatcc_verify(cond, reason) if (!(cond)) { FLATCC_VERIFIER_ASSERT(cond, reason); return reason; }
+#else
+#define flatcc_verify(cond, reason) if (!(cond)) { return reason; }
+#endif
+
+
+#define uoffset_t flatbuffers_uoffset_t
+#define soffset_t flatbuffers_soffset_t
+#define voffset_t flatbuffers_voffset_t
+#define utype_t flatbuffers_utype_t
+#define thash_t flatbuffers_thash_t
+
+#define uoffset_size sizeof(uoffset_t)
+#define soffset_size sizeof(soffset_t)
+#define voffset_size sizeof(voffset_t)
+#define utype_size sizeof(utype_t)
+#define thash_size sizeof(thash_t)
+#define offset_size uoffset_size
+
+const char *flatcc_verify_error_string(int err)
+{
+ switch (err) {
+#define XX(no, str) \
+ case flatcc_verify_error_##no: \
+ return str;
+ FLATCC_VERIFY_ERROR_MAP(XX)
+#undef XX
+ default:
+ return "unknown";
+ }
+}
+
+/* `cond` may have side effects. */
+#define verify(cond, reason) do { int c = (cond); flatcc_verify(c, reason); } while(0)
+
+/*
+ * Identify checks related to runtime conditions (buffer size and
+ * alignment) as seperate from those related to buffer content.
+ */
+#define verify_runtime(cond, reason) verify(cond, reason)
+
+#define check_result(x) if (x) { return (x); }
+
+#define check_field(td, id, required, base) do { \
+ int ret = get_offset_field(td, id, required, &base); \
+ if (ret || !base) { return ret; }} while (0)
+
+static inline uoffset_t read_uoffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_uoffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline thash_t read_thash_identifier(const char *identifier)
+{
+ return flatbuffers_type_hash_from_string(identifier);
+}
+
+static inline thash_t read_thash(const void *p, uoffset_t base)
+{
+ return __flatbuffers_thash_read_from_pe((uint8_t *)p + base);
+}
+
+static inline voffset_t read_voffset(const void *p, uoffset_t base)
+{
+ return __flatbuffers_voffset_read_from_pe((uint8_t *)p + base);
+}
+
+static inline int check_header(uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+
+ /* The `k > base` rather than `k >= base` is to avoid null offsets. */
+ return k > base && k + offset_size <= end && !(k & (offset_size - 1));
+}
+
+static inline int check_aligned_header(uoffset_t end, uoffset_t base, uoffset_t offset, uint16_t align)
+{
+ uoffset_t k = base + offset;
+
+ if (uoffset_size <= voffset_size && k + offset_size < k) {
+ return 0;
+ }
+ /* Alignment refers to element 0 and header must also be aligned. */
+ align = align < uoffset_size ? uoffset_size : align;
+
+ /* Note to self: the builder can also use the mask OR trick to propagate `min_align`. */
+ return k > base && k + offset_size <= end && !((k + offset_size) & ((offset_size - 1) | (align - 1u)));
+}
+
+static inline int verify_struct(uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t size, uint16_t align)
+{
+ /* Structs can have zero size so `end` is a valid value. */
+ if (offset == 0 || base + offset > end) {
+ return flatcc_verify_error_offset_out_of_range;
+ }
+ base += offset;
+ verify(base + size >= base, flatcc_verify_error_struct_size_overflow);
+ verify(base + size <= end, flatcc_verify_error_struct_out_of_range);
+ verify (!(base & (align - 1u)), flatcc_verify_error_struct_unaligned);
+ return flatcc_verify_ok;
+}
+
+static inline voffset_t read_vt_entry(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vo = (id + 2u) * sizeof(voffset_t);
+
+ /* Assumes tsize has been verified for alignment. */
+ if (vo >= td->vsize) {
+ return 0;
+ }
+ return read_voffset(td->vtable, vo);
+}
+
+static inline const void *get_field_ptr(flatcc_table_verifier_descriptor_t *td, voffset_t id)
+{
+ voffset_t vte = read_vt_entry(td, id);
+ return vte ? (const uint8_t *)td->buf + td->table + vte : 0;
+}
+
+static int verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, uoffset_t size, uint16_t align)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+ uoffset_t base = (uoffset_t)(size_t)td->buf;
+
+
+ /*
+ * Otherwise range check assumptions break, and normal access code likely also.
+ * We don't require voffset_size < uoffset_size, but some checks are faster if true.
+ */
+ FLATCC_ASSERT(uoffset_size >= voffset_size);
+ FLATCC_ASSERT(soffset_size == uoffset_size);
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ verify(!required, flatcc_verify_error_required_field_missing);
+ return flatcc_verify_ok;
+ }
+ trace_verify("table buffer", td->buf);
+ trace_verify("table", td->table);
+ trace_verify("id", id);
+ trace_verify("vte", vte);
+
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ trace_verify("table + vte", vte + td->table);
+ k += td->table + base;
+ trace_verify("entry: buf + table + vte", k);
+ trace_verify("align", align);
+ trace_verify("align masked entry", k & (align - 1u));
+ verify(!(k & (align - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ return flatcc_verify_ok;
+}
+
+static int get_offset_field(flatcc_table_verifier_descriptor_t *td, voffset_t id, int required, uoffset_t *out)
+{
+ uoffset_t k, k2;
+ voffset_t vte;
+
+ vte = read_vt_entry(td, id);
+ if (!vte) {
+ *out = 0;
+ if (required) {
+ return flatcc_verify_error_required_field_missing;
+ }
+ /* Missing, but not invalid. */
+ return flatcc_verify_ok;
+ }
+ /*
+ * Note that we don't add td.table to k and we test against table
+ * size not table end or buffer end. Otherwise it would not be safe
+ * to optimized out the k <= k2 check for normal uoffset and voffset
+ * configurations.
+ */
+ k = vte;
+ k2 = k + offset_size;
+ verify(k2 <= td->tsize, flatcc_verify_error_table_field_out_of_range);
+ /* This normally optimizes to nop. */
+ verify(uoffset_size > voffset_size || k <= k2, flatcc_verify_error_table_field_size_overflow);
+ k += td->table;
+ verify(!(k & (offset_size - 1u)), flatcc_verify_error_table_field_not_aligned);
+ /* We assume the table size has already been verified. */
+ *out = k;
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t n;
+
+ verify(check_header(end, base, offset), flatcc_verify_error_string_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ verify(end - base > n, flatcc_verify_error_string_out_of_range);
+ verify(((uint8_t *)buf + base)[n] == 0, flatcc_verify_error_string_not_zero_terminated);
+ return flatcc_verify_ok;
+}
+
+/*
+ * Keep interface somwewhat similar ot flatcc_builder_start_vector.
+ * `max_count` is a precomputed division to manage overflow check on vector length.
+ */
+static inline int verify_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, uoffset_t elem_size, uint16_t align, uoffset_t max_count)
+{
+ uoffset_t n;
+
+ verify(check_aligned_header(end, base, offset, align), flatcc_verify_error_vector_header_out_of_range_or_unaligned);
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ /* `n * elem_size` can overflow uncontrollably otherwise. */
+ verify(n <= max_count, flatcc_verify_error_vector_count_exceeds_representable_vector_size);
+ verify(end - base >= n * elem_size, flatcc_verify_error_vector_out_of_range);
+ return flatcc_verify_ok;
+}
+
+static inline int verify_string_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset)
+{
+ uoffset_t i, n;
+
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_string(buf, end, base, read_uoffset(buf, base)));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_table(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t vbase, vend;
+ flatcc_table_verifier_descriptor_t td;
+
+ verify((td.ttl = ttl - 1), flatcc_verify_error_max_nesting_level_reached);
+ verify(check_header(end, base, offset), flatcc_verify_error_table_header_out_of_range_or_unaligned);
+ td.table = base + offset;
+ /* Read vtable offset - it is signed, but we want it unsigned, assuming 2's complement works. */
+ vbase = td.table - read_uoffset(buf, td.table);
+ verify((soffset_t)vbase >= 0 && !(vbase & (voffset_size - 1)), flatcc_verify_error_vtable_offset_out_of_range_or_unaligned);
+ verify(vbase + voffset_size <= end, flatcc_verify_error_vtable_header_out_of_range);
+ /* Read vtable size. */
+ td.vsize = read_voffset(buf, vbase);
+ vend = vbase + td.vsize;
+ verify(vend <= end && !(td.vsize & (voffset_size - 1)), flatcc_verify_error_vtable_size_out_of_range_or_unaligned);
+ /* Optimizes away overflow check if uoffset_t is large enough. */
+ verify(uoffset_size > voffset_size || vend >= vbase, flatcc_verify_error_vtable_size_overflow);
+
+ verify(td.vsize >= 2 * voffset_size, flatcc_verify_error_vtable_header_too_small);
+ /* Read table size. */
+ td.tsize = read_voffset(buf, vbase + voffset_size);
+ verify(end - td.table >= td.tsize, flatcc_verify_error_table_size_out_of_range);
+ td.vtable = (uint8_t *)buf + vbase;
+ td.buf = buf;
+ td.end = end;
+ return tvf(&td);
+}
+
+static inline int verify_table_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset, int ttl, flatcc_table_verifier_f tvf)
+{
+ uoffset_t i, n;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ base += offset_size;
+ for (i = 0; i < n; ++i, base += offset_size) {
+ check_result(verify_table(buf, end, base, read_uoffset(buf, base), ttl, tvf));
+ }
+ return flatcc_verify_ok;
+}
+
+static inline int verify_union_vector(const void *buf, uoffset_t end, uoffset_t base, uoffset_t offset,
+ uoffset_t count, const utype_t *types, int ttl, flatcc_union_verifier_f uvf)
+{
+ uoffset_t i, n, elem;
+ flatcc_union_verifier_descriptor_t ud;
+
+ verify(ttl-- > 0, flatcc_verify_error_max_nesting_level_reached);
+ check_result(verify_vector(buf, end, base, offset, offset_size, offset_size, FLATBUFFERS_COUNT_MAX(offset_size)));
+ base += offset;
+ n = read_uoffset(buf, base);
+ verify(n == count, flatcc_verify_error_union_vector_length_mismatch);
+ base += offset_size;
+
+ ud.buf = buf;
+ ud.end = end;
+ ud.ttl = ttl;
+
+ for (i = 0; i < n; ++i, base += offset_size) {
+ /* Table vectors can never be null, but unions can when the type is NONE. */
+ elem = read_uoffset(buf, base);
+ if (elem == 0) {
+ verify(types[i] == 0, flatcc_verify_error_union_element_absent_without_type_NONE);
+ } else {
+ verify(types[i] != 0, flatcc_verify_error_union_element_present_with_type_NONE);
+ ud.type = types[i];
+ ud.base = base;
+ ud.offset = elem;
+ check_result(uvf(&ud));
+ }
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, size_t size, uint16_t align)
+{
+ check_result(verify_field(td, id, 0, (uoffset_t)size, align));
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_string_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, size_t elem_size, uint16_t align, size_t max_count)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ (uoffset_t)elem_size, align, (uoffset_t)max_count);
+}
+
+int flatcc_verify_string_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_string_vector(td->buf, td->end, base, read_uoffset(td->buf, base));
+}
+
+int flatcc_verify_table_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_table_vector_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_table_verifier_f tvf)
+{
+ uoffset_t base;
+
+ check_field(td, id, required, base);
+ return verify_table_vector(td->buf, td->end, base, read_uoffset(td->buf, base), td->ttl, tvf);
+}
+
+int flatcc_verify_union_table(flatcc_union_verifier_descriptor_t *ud, flatcc_table_verifier_f *tvf)
+{
+ return verify_table(ud->buf, ud->end, ud->base, ud->offset, ud->ttl, tvf);
+}
+
+int flatcc_verify_union_struct(flatcc_union_verifier_descriptor_t *ud, size_t size, uint16_t align)
+{
+ return verify_struct(ud->end, ud->base, ud->offset, (uoffset_t)size, align);
+}
+
+int flatcc_verify_union_string(flatcc_union_verifier_descriptor_t *ud)
+{
+ return verify_string(ud->buf, ud->end, ud->base, ud->offset);
+}
+
+int flatcc_verify_buffer_header(const void *buf, size_t bufsiz, const char *fid)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (fid != 0) {
+ id2 = read_thash_identifier(fid);
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_typed_buffer_header(const void *buf, size_t bufsiz, flatbuffers_thash_t thash)
+{
+ thash_t id, id2;
+
+ verify_runtime(!(((size_t)buf) & (offset_size - 1)), flatcc_verify_error_runtime_buffer_header_not_aligned);
+ /* -8 ensures no scalar or offset field size can overflow. */
+ verify_runtime(bufsiz <= FLATBUFFERS_UOFFSET_MAX - 8, flatcc_verify_error_runtime_buffer_size_too_large);
+ /*
+ * Even if we specify no fid, the user might later. Therefore
+ * require space for it. Not all buffer generators will take this
+ * into account, so it is possible to fail an otherwise valid buffer
+ * - but such buffers aren't safe.
+ */
+ verify(bufsiz >= offset_size + FLATBUFFERS_IDENTIFIER_SIZE, flatcc_verify_error_buffer_header_too_small);
+ if (thash != 0) {
+ id2 = thash;
+ id = read_thash(buf, offset_size);
+ verify(id2 == 0 || id == id2, flatcc_verify_error_identifier_mismatch);
+ }
+ return flatcc_verify_ok;
+}
+
+int flatcc_verify_struct_as_root(const void *buf, size_t bufsiz, const char *fid, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_struct_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, size_t size, uint16_t align)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, bufsiz, thash));
+ return verify_struct((uoffset_t)bufsiz, 0, read_uoffset(buf, 0), (uoffset_t)size, align);
+}
+
+int flatcc_verify_table_as_root(const void *buf, size_t bufsiz, const char *fid, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_buffer_header(buf, (uoffset_t)bufsiz, fid));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_table_as_typed_root(const void *buf, size_t bufsiz, flatbuffers_thash_t thash, flatcc_table_verifier_f *tvf)
+{
+ check_result(flatcc_verify_typed_buffer_header(buf, (uoffset_t)bufsiz, thash));
+ return verify_table(buf, (uoffset_t)bufsiz, 0, read_uoffset(buf, 0), FLATCC_VERIFIER_MAX_LEVELS, tvf);
+}
+
+int flatcc_verify_struct_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid, size_t size, uint16_t align)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ return flatcc_verify_struct_as_root(buf, bufsiz, fid, size, align);
+}
+
+int flatcc_verify_table_as_nested_root(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, const char *fid,
+ uint16_t align, flatcc_table_verifier_f tvf)
+{
+ const uoffset_t *buf;
+ uoffset_t bufsiz;
+
+ check_result(flatcc_verify_vector_field(td, id, required, align, 1, FLATBUFFERS_COUNT_MAX(1)));
+ if (0 == (buf = get_field_ptr(td, id))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ bufsiz = read_uoffset(buf, 0);
+ ++buf;
+ /*
+ * Don't verify nested buffers identifier - information is difficult to get and
+ * might not be what is desired anyway. User can do it later.
+ */
+ check_result(flatcc_verify_buffer_header(buf, bufsiz, fid));
+ return verify_table(buf, bufsiz, 0, read_uoffset(buf, 0), td->ttl, tvf);
+}
+
+int flatcc_verify_union_field(flatcc_table_verifier_descriptor_t *td,
+ voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uint8_t *type;
+ uoffset_t base;
+ flatcc_union_verifier_descriptor_t ud;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ vte_table = read_vt_entry(td, id);
+ verify(vte_table == 0, flatcc_verify_error_union_cannot_have_a_table_without_a_type);
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_field);
+ return flatcc_verify_ok;
+ }
+ /* No need to check required here. */
+ check_result(verify_field(td, id - 1, 0, 1, 1));
+ /* Only now is it safe to read the type. */
+ vte_table = read_vt_entry(td, id);
+ type = (const uint8_t *)td->buf + td->table + vte_type;
+ verify(*type || vte_table == 0, flatcc_verify_error_union_type_NONE_cannot_have_a_value);
+
+ if (*type == 0) {
+ return flatcc_verify_ok;
+ }
+ check_field(td, id, required, base);
+ ud.buf = td->buf;
+ ud.end = td->end;
+ ud.ttl = td->ttl;
+ ud.base = base;
+ ud.offset = read_uoffset(td->buf, base);
+ ud.type = *type;
+ return uvf(&ud);
+}
+
+int flatcc_verify_union_vector_field(flatcc_table_verifier_descriptor_t *td,
+ flatbuffers_voffset_t id, int required, flatcc_union_verifier_f uvf)
+{
+ voffset_t vte_type, vte_table;
+ const uoffset_t *buf;
+ const utype_t *types;
+ uoffset_t count, base;
+
+ if (0 == (vte_type = read_vt_entry(td, id - 1))) {
+ if (0 == (vte_table = read_vt_entry(td, id))) {
+ verify(!required, flatcc_verify_error_type_field_absent_from_required_union_vector_field);
+ }
+ }
+ check_result(flatcc_verify_vector_field(td, id - 1, required,
+ utype_size, utype_size, FLATBUFFERS_COUNT_MAX(utype_size)));
+ if (0 == (buf = get_field_ptr(td, id - 1))) {
+ return flatcc_verify_ok;
+ }
+ buf = (const uoffset_t *)((size_t)buf + read_uoffset(buf, 0));
+ count = read_uoffset(buf, 0);
+ ++buf;
+ types = (utype_t *)buf;
+
+ check_field(td, id, required, base);
+ return verify_union_vector(td->buf, td->end, base, read_uoffset(td->buf, base),
+ count, types, td->ttl, uvf);
+}
diff --git a/nostrdb/jsmn.h b/nostrdb/jsmn.h
@@ -100,7 +100,7 @@ JSMN_API void jsmn_init(jsmn_parser *parser);
* a single JSON object.
*/
JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len,
- jsmntok_t *tokens, const unsigned int num_tokens);
+ jsmntok_t *tokens, const unsigned int num_tokens, int stop_at_id);
#ifndef JSMN_HEADER
/**
@@ -269,12 +269,13 @@ static int jsmn_parse_string(jsmn_parser *parser, const char *js,
* Parse JSON string and fill tokens.
*/
JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len,
- jsmntok_t *tokens, const unsigned int num_tokens) {
- int r;
- int i;
+ jsmntok_t *tokens, const unsigned int num_tokens, int stop_at_id) {
+ int r, i, idkey;
jsmntok_t *token;
int count = parser->toknext;
+ idkey = 0;
+
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
char c;
jsmntype_t type;
@@ -370,6 +371,22 @@ JSMN_API int jsmn_parse(jsmn_parser *parser, const char *js, const size_t len,
if (parser->toksuper != -1 && tokens != NULL) {
tokens[parser->toksuper].size++;
}
+
+ // big hack. resumable parsing when encountering the id field
+ if (stop_at_id) {
+ token = &tokens[parser->toknext-1];
+ if (idkey == 1 && (token->end - token->start) == 64) {
+ //printf("jsmn: found id '%.*s'\n", token->end - token->start, js + token->start);
+ parser->pos++;
+ return -42;
+ } else if (idkey == 0 && (token->end - token->start) == 2 &&
+ (js + token->start)[0] == 'i' &&
+ (js + token->start)[1] == 'd') {
+ //printf("jsmn: found id key\n");
+ idkey = 1;
+ }
+ }
+
break;
case '\t':
case '\r':
diff --git a/nostrdb/lmdb.h b/nostrdb/lmdb.h
@@ -0,0 +1,1608 @@
+/** @file lmdb.h
+ * @brief Lightning memory-mapped database library
+ *
+ * @mainpage Lightning Memory-Mapped Database Manager (LMDB)
+ *
+ * @section intro_sec Introduction
+ * LMDB is a Btree-based database management library modeled loosely on the
+ * BerkeleyDB API, but much simplified. The entire database is exposed
+ * in a memory map, and all data fetches return data directly
+ * from the mapped memory, so no malloc's or memcpy's occur during
+ * data fetches. As such, the library is extremely simple because it
+ * requires no page caching layer of its own, and it is extremely high
+ * performance and memory-efficient. It is also fully transactional with
+ * full ACID semantics, and when the memory map is read-only, the
+ * database integrity cannot be corrupted by stray pointer writes from
+ * application code.
+ *
+ * The library is fully thread-aware and supports concurrent read/write
+ * access from multiple processes and threads. Data pages use a copy-on-
+ * write strategy so no active data pages are ever overwritten, which
+ * also provides resistance to corruption and eliminates the need of any
+ * special recovery procedures after a system crash. Writes are fully
+ * serialized; only one write transaction may be active at a time, which
+ * guarantees that writers can never deadlock. The database structure is
+ * multi-versioned so readers run with no locks; writers cannot block
+ * readers, and readers don't block writers.
+ *
+ * Unlike other well-known database mechanisms which use either write-ahead
+ * transaction logs or append-only data writes, LMDB requires no maintenance
+ * during operation. Both write-ahead loggers and append-only databases
+ * require periodic checkpointing and/or compaction of their log or database
+ * files otherwise they grow without bound. LMDB tracks free pages within
+ * the database and re-uses them for new write operations, so the database
+ * size does not grow without bound in normal use.
+ *
+ * The memory map can be used as a read-only or read-write map. It is
+ * read-only by default as this provides total immunity to corruption.
+ * Using read-write mode offers much higher write performance, but adds
+ * the possibility for stray application writes thru pointers to silently
+ * corrupt the database. Of course if your application code is known to
+ * be bug-free (...) then this is not an issue.
+ *
+ * If this is your first time using a transactional embedded key/value
+ * store, you may find the \ref starting page to be helpful.
+ *
+ * @section caveats_sec Caveats
+ * Troubleshooting the lock file, plus semaphores on BSD systems:
+ *
+ * - A broken lockfile can cause sync issues.
+ * Stale reader transactions left behind by an aborted program
+ * cause further writes to grow the database quickly, and
+ * stale locks can block further operation.
+ *
+ * Fix: Check for stale readers periodically, using the
+ * #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool.
+ * Stale writers will be cleared automatically on some systems:
+ * - Windows - automatic
+ * - Linux, systems using POSIX mutexes with Robust option - automatic
+ * - not on BSD, systems using POSIX semaphores.
+ * Otherwise just make all programs using the database close it;
+ * the lockfile is always reset on first open of the environment.
+ *
+ * - On BSD systems or others configured with MDB_USE_POSIX_SEM,
+ * startup can fail due to semaphores owned by another userid.
+ *
+ * Fix: Open and close the database as the user which owns the
+ * semaphores (likely last user) or as root, while no other
+ * process is using the database.
+ *
+ * Restrictions/caveats (in addition to those listed for some functions):
+ *
+ * - Only the database owner should normally use the database on
+ * BSD systems or when otherwise configured with MDB_USE_POSIX_SEM.
+ * Multiple users can cause startup to fail later, as noted above.
+ *
+ * - There is normally no pure read-only mode, since readers need write
+ * access to locks and lock file. Exceptions: On read-only filesystems
+ * or with the #MDB_NOLOCK flag described under #mdb_env_open().
+ *
+ * - An LMDB configuration will often reserve considerable \b unused
+ * memory address space and maybe file size for future growth.
+ * This does not use actual memory or disk space, but users may need
+ * to understand the difference so they won't be scared off.
+ *
+ * - By default, in versions before 0.9.10, unused portions of the data
+ * file might receive garbage data from memory freed by other code.
+ * (This does not happen when using the #MDB_WRITEMAP flag.) As of
+ * 0.9.10 the default behavior is to initialize such memory before
+ * writing to the data file. Since there may be a slight performance
+ * cost due to this initialization, applications may disable it using
+ * the #MDB_NOMEMINIT flag. Applications handling sensitive data
+ * which must not be written should not use this flag. This flag is
+ * irrelevant when using #MDB_WRITEMAP.
+ *
+ * - A thread can only use one transaction at a time, plus any child
+ * transactions. Each transaction belongs to one thread. See below.
+ * The #MDB_NOTLS flag changes this for read-only transactions.
+ *
+ * - Use an MDB_env* in the process which opened it, not after fork().
+ *
+ * - Do not have open an LMDB database twice in the same process at
+ * the same time. Not even from a plain open() call - close()ing it
+ * breaks fcntl() advisory locking. (It is OK to reopen it after
+ * fork() - exec*(), since the lockfile has FD_CLOEXEC set.)
+ *
+ * - Avoid long-lived transactions. Read transactions prevent
+ * reuse of pages freed by newer write transactions, thus the
+ * database can grow quickly. Write transactions prevent
+ * other write transactions, since writes are serialized.
+ *
+ * - Avoid suspending a process with active transactions. These
+ * would then be "long-lived" as above. Also read transactions
+ * suspended when writers commit could sometimes see wrong data.
+ *
+ * ...when several processes can use a database concurrently:
+ *
+ * - Avoid aborting a process with an active transaction.
+ * The transaction becomes "long-lived" as above until a check
+ * for stale readers is performed or the lockfile is reset,
+ * since the process may not remove it from the lockfile.
+ *
+ * This does not apply to write transactions if the system clears
+ * stale writers, see above.
+ *
+ * - If you do that anyway, do a periodic check for stale readers. Or
+ * close the environment once in a while, so the lockfile can get reset.
+ *
+ * - Do not use LMDB databases on remote filesystems, even between
+ * processes on the same host. This breaks flock() on some OSes,
+ * possibly memory map sync, and certainly sync between programs
+ * on different hosts.
+ *
+ * - Opening a database can fail if another process is opening or
+ * closing it at exactly the same time.
+ *
+ * @author Howard Chu, Symas Corporation.
+ *
+ * @copyright Copyright 2011-2021 Howard Chu, Symas Corp. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted only as authorized by the OpenLDAP
+ * Public License.
+ *
+ * A copy of this license is available in the file LICENSE in the
+ * top-level directory of the distribution or, alternatively, at
+ * <http://www.OpenLDAP.org/license.html>.
+ *
+ * @par Derived From:
+ * This code is derived from btree.c written by Martin Hedenfalk.
+ *
+ * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _LMDB_H_
+#define _LMDB_H_
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Unix permissions for creating files, or dummy definition for Windows */
+#ifdef _MSC_VER
+typedef int mdb_mode_t;
+#else
+typedef mode_t mdb_mode_t;
+#endif
+
+/** An abstraction for a file handle.
+ * On POSIX systems file handles are small integers. On Windows
+ * they're opaque pointers.
+ */
+#ifdef _WIN32
+typedef void *mdb_filehandle_t;
+#else
+typedef int mdb_filehandle_t;
+#endif
+
+/** @defgroup mdb LMDB API
+ * @{
+ * @brief OpenLDAP Lightning Memory-Mapped Database Manager
+ */
+/** @defgroup Version Version Macros
+ * @{
+ */
+/** Library major version */
+#define MDB_VERSION_MAJOR 0
+/** Library minor version */
+#define MDB_VERSION_MINOR 9
+/** Library patch version */
+#define MDB_VERSION_PATCH 31
+
+/** Combine args a,b,c into a single integer for easy version comparisons */
+#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c))
+
+/** The full library version as a single integer */
+#define MDB_VERSION_FULL \
+ MDB_VERINT(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH)
+
+/** The release date of this library version */
+#define MDB_VERSION_DATE "July 10, 2023"
+
+/** A stringifier for the version info */
+#define MDB_VERSTR(a,b,c,d) "LMDB " #a "." #b "." #c ": (" d ")"
+
+/** A helper for the stringifier macro */
+#define MDB_VERFOO(a,b,c,d) MDB_VERSTR(a,b,c,d)
+
+/** The full library version as a C string */
+#define MDB_VERSION_STRING \
+ MDB_VERFOO(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH,MDB_VERSION_DATE)
+/** @} */
+
+/** @brief Opaque structure for a database environment.
+ *
+ * A DB environment supports multiple databases, all residing in the same
+ * shared-memory map.
+ */
+typedef struct MDB_env MDB_env;
+
+/** @brief Opaque structure for a transaction handle.
+ *
+ * All database operations require a transaction handle. Transactions may be
+ * read-only or read-write.
+ */
+typedef struct MDB_txn MDB_txn;
+
+/** @brief A handle for an individual database in the DB environment. */
+typedef unsigned int MDB_dbi;
+
+/** @brief Opaque structure for navigating through a database */
+typedef struct MDB_cursor MDB_cursor;
+
+/** @brief Generic structure used for passing keys and data in and out
+ * of the database.
+ *
+ * Values returned from the database are valid only until a subsequent
+ * update operation, or the end of the transaction. Do not modify or
+ * free them, they commonly point into the database itself.
+ *
+ * Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive.
+ * The same applies to data sizes in databases with the #MDB_DUPSORT flag.
+ * Other data items can in theory be from 0 to 0xffffffff bytes long.
+ */
+typedef struct MDB_val {
+ size_t mv_size; /**< size of the data item */
+ void *mv_data; /**< address of the data item */
+} MDB_val;
+
+/** @brief A callback function used to compare two keys in a database */
+typedef int (MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
+
+/** @brief A callback function used to relocate a position-dependent data item
+ * in a fixed-address database.
+ *
+ * The \b newptr gives the item's desired address in
+ * the memory map, and \b oldptr gives its previous address. The item's actual
+ * data resides at the address in \b item. This callback is expected to walk
+ * through the fields of the record in \b item and modify any
+ * values based at the \b oldptr address to be relative to the \b newptr address.
+ * @param[in,out] item The item that is to be relocated.
+ * @param[in] oldptr The previous address.
+ * @param[in] newptr The new address to relocate to.
+ * @param[in] relctx An application-provided context, set by #mdb_set_relctx().
+ * @todo This feature is currently unimplemented.
+ */
+typedef void (MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx);
+
+/** @defgroup mdb_env Environment Flags
+ * @{
+ */
+ /** mmap at a fixed address (experimental) */
+#define MDB_FIXEDMAP 0x01
+ /** no environment directory */
+#define MDB_NOSUBDIR 0x4000
+ /** don't fsync after commit */
+#define MDB_NOSYNC 0x10000
+ /** read only */
+#define MDB_RDONLY 0x20000
+ /** don't fsync metapage after commit */
+#define MDB_NOMETASYNC 0x40000
+ /** use writable mmap */
+#define MDB_WRITEMAP 0x80000
+ /** use asynchronous msync when #MDB_WRITEMAP is used */
+#define MDB_MAPASYNC 0x100000
+ /** tie reader locktable slots to #MDB_txn objects instead of to threads */
+#define MDB_NOTLS 0x200000
+ /** don't do any locking, caller must manage their own locks */
+#define MDB_NOLOCK 0x400000
+ /** don't do readahead (no effect on Windows) */
+#define MDB_NORDAHEAD 0x800000
+ /** don't initialize malloc'd memory before writing to datafile */
+#define MDB_NOMEMINIT 0x1000000
+/** @} */
+
+/** @defgroup mdb_dbi_open Database Flags
+ * @{
+ */
+ /** use reverse string keys */
+#define MDB_REVERSEKEY 0x02
+ /** use sorted duplicates */
+#define MDB_DUPSORT 0x04
+ /** numeric keys in native byte order: either unsigned int or size_t.
+ * The keys must all be of the same size. */
+#define MDB_INTEGERKEY 0x08
+ /** with #MDB_DUPSORT, sorted dup items have fixed size */
+#define MDB_DUPFIXED 0x10
+ /** with #MDB_DUPSORT, dups are #MDB_INTEGERKEY-style integers */
+#define MDB_INTEGERDUP 0x20
+ /** with #MDB_DUPSORT, use reverse string dups */
+#define MDB_REVERSEDUP 0x40
+ /** create DB if not already existing */
+#define MDB_CREATE 0x40000
+/** @} */
+
+/** @defgroup mdb_put Write Flags
+ * @{
+ */
+/** For put: Don't write if the key already exists. */
+#define MDB_NOOVERWRITE 0x10
+/** Only for #MDB_DUPSORT<br>
+ * For put: don't write if the key and data pair already exist.<br>
+ * For mdb_cursor_del: remove all duplicate data items.
+ */
+#define MDB_NODUPDATA 0x20
+/** For mdb_cursor_put: overwrite the current key/data pair */
+#define MDB_CURRENT 0x40
+/** For put: Just reserve space for data, don't copy it. Return a
+ * pointer to the reserved space.
+ */
+#define MDB_RESERVE 0x10000
+/** Data is being appended, don't split full pages. */
+#define MDB_APPEND 0x20000
+/** Duplicate data is being appended, don't split full pages. */
+#define MDB_APPENDDUP 0x40000
+/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */
+#define MDB_MULTIPLE 0x80000
+/* @} */
+
+/** @defgroup mdb_copy Copy Flags
+ * @{
+ */
+/** Compacting copy: Omit free space from copy, and renumber all
+ * pages sequentially.
+ */
+#define MDB_CP_COMPACT 0x01
+/* @} */
+
+/** @brief Cursor Get operations.
+ *
+ * This is the set of all operations for retrieving data
+ * using a cursor.
+ */
+typedef enum MDB_cursor_op {
+ MDB_FIRST, /**< Position at first key/data item */
+ MDB_FIRST_DUP, /**< Position at first data item of current key.
+ Only for #MDB_DUPSORT */
+ MDB_GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */
+ MDB_GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */
+ MDB_GET_CURRENT, /**< Return key/data at current cursor position */
+ MDB_GET_MULTIPLE, /**< Return up to a page of duplicate data items
+ from current cursor position. Move cursor to prepare
+ for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */
+ MDB_LAST, /**< Position at last key/data item */
+ MDB_LAST_DUP, /**< Position at last data item of current key.
+ Only for #MDB_DUPSORT */
+ MDB_NEXT, /**< Position at next data item */
+ MDB_NEXT_DUP, /**< Position at next data item of current key.
+ Only for #MDB_DUPSORT */
+ MDB_NEXT_MULTIPLE, /**< Return up to a page of duplicate data items
+ from next cursor position. Move cursor to prepare
+ for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */
+ MDB_NEXT_NODUP, /**< Position at first data item of next key */
+ MDB_PREV, /**< Position at previous data item */
+ MDB_PREV_DUP, /**< Position at previous data item of current key.
+ Only for #MDB_DUPSORT */
+ MDB_PREV_NODUP, /**< Position at last data item of previous key */
+ MDB_SET, /**< Position at specified key */
+ MDB_SET_KEY, /**< Position at specified key, return key + data */
+ MDB_SET_RANGE, /**< Position at first key greater than or equal to specified key. */
+ MDB_PREV_MULTIPLE /**< Position at previous page and return up to
+ a page of duplicate data items. Only for #MDB_DUPFIXED */
+} MDB_cursor_op;
+
+/** @defgroup errors Return Codes
+ *
+ * BerkeleyDB uses -30800 to -30999, we'll go under them
+ * @{
+ */
+ /** Successful result */
+#define MDB_SUCCESS 0
+ /** key/data pair already exists */
+#define MDB_KEYEXIST (-30799)
+ /** key/data pair not found (EOF) */
+#define MDB_NOTFOUND (-30798)
+ /** Requested page not found - this usually indicates corruption */
+#define MDB_PAGE_NOTFOUND (-30797)
+ /** Located page was wrong type */
+#define MDB_CORRUPTED (-30796)
+ /** Update of meta page failed or environment had fatal error */
+#define MDB_PANIC (-30795)
+ /** Environment version mismatch */
+#define MDB_VERSION_MISMATCH (-30794)
+ /** File is not a valid LMDB file */
+#define MDB_INVALID (-30793)
+ /** Environment mapsize reached */
+#define MDB_MAP_FULL (-30792)
+ /** Environment maxdbs reached */
+#define MDB_DBS_FULL (-30791)
+ /** Environment maxreaders reached */
+#define MDB_READERS_FULL (-30790)
+ /** Too many TLS keys in use - Windows only */
+#define MDB_TLS_FULL (-30789)
+ /** Txn has too many dirty pages */
+#define MDB_TXN_FULL (-30788)
+ /** Cursor stack too deep - internal error */
+#define MDB_CURSOR_FULL (-30787)
+ /** Page has not enough space - internal error */
+#define MDB_PAGE_FULL (-30786)
+ /** Database contents grew beyond environment mapsize */
+#define MDB_MAP_RESIZED (-30785)
+ /** Operation and DB incompatible, or DB type changed. This can mean:
+ * <ul>
+ * <li>The operation expects an #MDB_DUPSORT / #MDB_DUPFIXED database.
+ * <li>Opening a named DB when the unnamed DB has #MDB_DUPSORT / #MDB_INTEGERKEY.
+ * <li>Accessing a data record as a database, or vice versa.
+ * <li>The database was dropped and recreated with different flags.
+ * </ul>
+ */
+#define MDB_INCOMPATIBLE (-30784)
+ /** Invalid reuse of reader locktable slot */
+#define MDB_BAD_RSLOT (-30783)
+ /** Transaction must abort, has a child, or is invalid */
+#define MDB_BAD_TXN (-30782)
+ /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */
+#define MDB_BAD_VALSIZE (-30781)
+ /** The specified DBI was changed unexpectedly */
+#define MDB_BAD_DBI (-30780)
+ /** The last defined error code */
+#define MDB_LAST_ERRCODE MDB_BAD_DBI
+/** @} */
+
+/** @brief Statistics for a database in the environment */
+typedef struct MDB_stat {
+ unsigned int ms_psize; /**< Size of a database page.
+ This is currently the same for all databases. */
+ unsigned int ms_depth; /**< Depth (height) of the B-tree */
+ size_t ms_branch_pages; /**< Number of internal (non-leaf) pages */
+ size_t ms_leaf_pages; /**< Number of leaf pages */
+ size_t ms_overflow_pages; /**< Number of overflow pages */
+ size_t ms_entries; /**< Number of data items */
+} MDB_stat;
+
+/** @brief Information about the environment */
+typedef struct MDB_envinfo {
+ void *me_mapaddr; /**< Address of map, if fixed */
+ size_t me_mapsize; /**< Size of the data memory map */
+ size_t me_last_pgno; /**< ID of the last used page */
+ size_t me_last_txnid; /**< ID of the last committed transaction */
+ unsigned int me_maxreaders; /**< max reader slots in the environment */
+ unsigned int me_numreaders; /**< max reader slots used in the environment */
+} MDB_envinfo;
+
+ /** @brief Return the LMDB library version information.
+ *
+ * @param[out] major if non-NULL, the library major version number is copied here
+ * @param[out] minor if non-NULL, the library minor version number is copied here
+ * @param[out] patch if non-NULL, the library patch version number is copied here
+ * @retval "version string" The library version as a string
+ */
+char *mdb_version(int *major, int *minor, int *patch);
+
+ /** @brief Return a string describing a given error code.
+ *
+ * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3)
+ * function. If the error code is greater than or equal to 0, then the string
+ * returned by the system function strerror(3) is returned. If the error code
+ * is less than 0, an error string corresponding to the LMDB library error is
+ * returned. See @ref errors for a list of LMDB-specific error codes.
+ * @param[in] err The error code
+ * @retval "error message" The description of the error
+ */
+char *mdb_strerror(int err);
+
+ /** @brief Create an LMDB environment handle.
+ *
+ * This function allocates memory for a #MDB_env structure. To release
+ * the allocated memory and discard the handle, call #mdb_env_close().
+ * Before the handle may be used, it must be opened using #mdb_env_open().
+ * Various other options may also need to be set before opening the handle,
+ * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(),
+ * depending on usage requirements.
+ * @param[out] env The address where the new handle will be stored
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_create(MDB_env **env);
+
+ /** @brief Open an environment handle.
+ *
+ * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] path The directory in which the database files reside. This
+ * directory must already exist and be writable.
+ * @param[in] flags Special options for this environment. This parameter
+ * must be set to 0 or by bitwise OR'ing together one or more of the
+ * values described here.
+ * Flags set by mdb_env_set_flags() are also used.
+ * <ul>
+ * <li>#MDB_FIXEDMAP
+ * use a fixed address for the mmap region. This flag must be specified
+ * when creating the environment, and is stored persistently in the environment.
+ * If successful, the memory map will always reside at the same virtual address
+ * and pointers used to reference data items in the database will be constant
+ * across multiple invocations. This option may not always work, depending on
+ * how the operating system has allocated memory to shared libraries and other uses.
+ * The feature is highly experimental.
+ * <li>#MDB_NOSUBDIR
+ * By default, LMDB creates its environment in a directory whose
+ * pathname is given in \b path, and creates its data and lock files
+ * under that directory. With this option, \b path is used as-is for
+ * the database main data file. The database lock file is the \b path
+ * with "-lock" appended.
+ * <li>#MDB_RDONLY
+ * Open the environment in read-only mode. No write operations will be
+ * allowed. LMDB will still modify the lock file - except on read-only
+ * filesystems, where LMDB does not use locks.
+ * <li>#MDB_WRITEMAP
+ * Use a writeable memory map unless MDB_RDONLY is set. This uses
+ * fewer mallocs but loses protection from application bugs
+ * like wild pointer writes and other bad updates into the database.
+ * This may be slightly faster for DBs that fit entirely in RAM, but
+ * is slower for DBs larger than RAM.
+ * Incompatible with nested transactions.
+ * Do not mix processes with and without MDB_WRITEMAP on the same
+ * environment. This can defeat durability (#mdb_env_sync etc).
+ * <li>#MDB_NOMETASYNC
+ * Flush system buffers to disk only once per transaction, omit the
+ * metadata flush. Defer that until the system flushes files to disk,
+ * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization
+ * maintains database integrity, but a system crash may undo the last
+ * committed transaction. I.e. it preserves the ACI (atomicity,
+ * consistency, isolation) but not D (durability) database property.
+ * This flag may be changed at any time using #mdb_env_set_flags().
+ * <li>#MDB_NOSYNC
+ * Don't flush system buffers to disk when committing a transaction.
+ * This optimization means a system crash can corrupt the database or
+ * lose the last transactions if buffers are not yet flushed to disk.
+ * The risk is governed by how often the system flushes dirty buffers
+ * to disk and how often #mdb_env_sync() is called. However, if the
+ * filesystem preserves write order and the #MDB_WRITEMAP flag is not
+ * used, transactions exhibit ACI (atomicity, consistency, isolation)
+ * properties and only lose D (durability). I.e. database integrity
+ * is maintained, but a system crash may undo the final transactions.
+ * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no
+ * hint for when to write transactions to disk, unless #mdb_env_sync()
+ * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable.
+ * This flag may be changed at any time using #mdb_env_set_flags().
+ * <li>#MDB_MAPASYNC
+ * When using #MDB_WRITEMAP, use asynchronous flushes to disk.
+ * As with #MDB_NOSYNC, a system crash can then corrupt the
+ * database or lose the last transactions. Calling #mdb_env_sync()
+ * ensures on-disk database integrity until next commit.
+ * This flag may be changed at any time using #mdb_env_set_flags().
+ * <li>#MDB_NOTLS
+ * Don't use Thread-Local Storage. Tie reader locktable slots to
+ * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps
+ * the slot reserved for the #MDB_txn object. A thread may use parallel
+ * read-only transactions. A read-only transaction may span threads if
+ * the user synchronizes its use. Applications that multiplex many
+ * user threads over individual OS threads need this option. Such an
+ * application must also serialize the write transactions in an OS
+ * thread, since LMDB's write locking is unaware of the user threads.
+ * <li>#MDB_NOLOCK
+ * Don't do any locking. If concurrent access is anticipated, the
+ * caller must manage all concurrency itself. For proper operation
+ * the caller must enforce single-writer semantics, and must ensure
+ * that no readers are using old transactions while a writer is
+ * active. The simplest approach is to use an exclusive lock so that
+ * no readers may be active at all when a writer begins.
+ * <li>#MDB_NORDAHEAD
+ * Turn off readahead. Most operating systems perform readahead on
+ * read requests by default. This option turns it off if the OS
+ * supports it. Turning it off may help random read performance
+ * when the DB is larger than RAM and system RAM is full.
+ * The option is not implemented on Windows.
+ * <li>#MDB_NOMEMINIT
+ * Don't initialize malloc'd memory before writing to unused spaces
+ * in the data file. By default, memory for pages written to the data
+ * file is obtained using malloc. While these pages may be reused in
+ * subsequent transactions, freshly malloc'd pages will be initialized
+ * to zeroes before use. This avoids persisting leftover data from other
+ * code (that used the heap and subsequently freed the memory) into the
+ * data file. Note that many other system libraries may allocate
+ * and free memory from the heap for arbitrary uses. E.g., stdio may
+ * use the heap for file I/O buffers. This initialization step has a
+ * modest performance cost so some applications may want to disable
+ * it using this flag. This option can be a problem for applications
+ * which handle sensitive data like passwords, and it makes memory
+ * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP,
+ * which writes directly to the mmap instead of using malloc for pages. The
+ * initialization is also skipped if #MDB_RESERVE is used; the
+ * caller is expected to overwrite all of the memory that was
+ * reserved in that case.
+ * This flag may be changed at any time using #mdb_env_set_flags().
+ * </ul>
+ * @param[in] mode The UNIX permissions to set on created files and semaphores.
+ * This parameter is ignored on Windows.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the
+ * version that created the database environment.
+ * <li>#MDB_INVALID - the environment file headers are corrupted.
+ * <li>ENOENT - the directory specified by the path parameter doesn't exist.
+ * <li>EACCES - the user didn't have permission to access the environment files.
+ * <li>EAGAIN - the environment was locked by another process.
+ * </ul>
+ */
+int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode);
+
+ /** @brief Copy an LMDB environment to the specified path.
+ *
+ * This function may be used to make a backup of an existing environment.
+ * No lockfile is created, since it gets recreated at need.
+ * @note This call can trigger significant file size growth if run in
+ * parallel with write transactions, because it employs a read-only
+ * transaction. See long-lived transactions under @ref caveats_sec.
+ * @param[in] env An environment handle returned by #mdb_env_create(). It
+ * must have already been opened successfully.
+ * @param[in] path The directory in which the copy will reside. This
+ * directory must already exist and be writable but must otherwise be
+ * empty.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_copy(MDB_env *env, const char *path);
+
+ /** @brief Copy an LMDB environment to the specified file descriptor.
+ *
+ * This function may be used to make a backup of an existing environment.
+ * No lockfile is created, since it gets recreated at need.
+ * @note This call can trigger significant file size growth if run in
+ * parallel with write transactions, because it employs a read-only
+ * transaction. See long-lived transactions under @ref caveats_sec.
+ * @param[in] env An environment handle returned by #mdb_env_create(). It
+ * must have already been opened successfully.
+ * @param[in] fd The filedescriptor to write the copy to. It must
+ * have already been opened for Write access.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_copyfd(MDB_env *env, mdb_filehandle_t fd);
+
+ /** @brief Copy an LMDB environment to the specified path, with options.
+ *
+ * This function may be used to make a backup of an existing environment.
+ * No lockfile is created, since it gets recreated at need.
+ * @note This call can trigger significant file size growth if run in
+ * parallel with write transactions, because it employs a read-only
+ * transaction. See long-lived transactions under @ref caveats_sec.
+ * @param[in] env An environment handle returned by #mdb_env_create(). It
+ * must have already been opened successfully.
+ * @param[in] path The directory in which the copy will reside. This
+ * directory must already exist and be writable but must otherwise be
+ * empty.
+ * @param[in] flags Special options for this operation. This parameter
+ * must be set to 0 or by bitwise OR'ing together one or more of the
+ * values described here.
+ * <ul>
+ * <li>#MDB_CP_COMPACT - Perform compaction while copying: omit free
+ * pages and sequentially renumber all pages in output. This option
+ * consumes more CPU and runs more slowly than the default.
+ * Currently it fails if the environment has suffered a page leak.
+ * </ul>
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags);
+
+ /** @brief Copy an LMDB environment to the specified file descriptor,
+ * with options.
+ *
+ * This function may be used to make a backup of an existing environment.
+ * No lockfile is created, since it gets recreated at need. See
+ * #mdb_env_copy2() for further details.
+ * @note This call can trigger significant file size growth if run in
+ * parallel with write transactions, because it employs a read-only
+ * transaction. See long-lived transactions under @ref caveats_sec.
+ * @param[in] env An environment handle returned by #mdb_env_create(). It
+ * must have already been opened successfully.
+ * @param[in] fd The filedescriptor to write the copy to. It must
+ * have already been opened for Write access.
+ * @param[in] flags Special options for this operation.
+ * See #mdb_env_copy2() for options.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_copyfd2(MDB_env *env, mdb_filehandle_t fd, unsigned int flags);
+
+ /** @brief Return statistics about the LMDB environment.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] stat The address of an #MDB_stat structure
+ * where the statistics will be copied
+ */
+int mdb_env_stat(MDB_env *env, MDB_stat *stat);
+
+ /** @brief Return information about the LMDB environment.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] stat The address of an #MDB_envinfo structure
+ * where the information will be copied
+ */
+int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
+
+ /** @brief Flush the data buffers to disk.
+ *
+ * Data is always written to disk when #mdb_txn_commit() is called,
+ * but the operating system may keep it buffered. LMDB always flushes
+ * the OS buffers upon commit as well, unless the environment was
+ * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. This call is
+ * not valid if the environment was opened with #MDB_RDONLY.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] force If non-zero, force a synchronous flush. Otherwise
+ * if the environment has the #MDB_NOSYNC flag set the flushes
+ * will be omitted, and with #MDB_MAPASYNC they will be asynchronous.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EACCES - the environment is read-only.
+ * <li>EINVAL - an invalid parameter was specified.
+ * <li>EIO - an error occurred during synchronization.
+ * </ul>
+ */
+int mdb_env_sync(MDB_env *env, int force);
+
+ /** @brief Close the environment and release the memory map.
+ *
+ * Only a single thread may call this function. All transactions, databases,
+ * and cursors must already be closed before calling this function. Attempts to
+ * use any such handles after calling this function will cause a SIGSEGV.
+ * The environment handle will be freed and must not be used again after this call.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ */
+void mdb_env_close(MDB_env *env);
+
+ /** @brief Set environment flags.
+ *
+ * This may be used to set some flags in addition to those from
+ * #mdb_env_open(), or to unset these flags. If several threads
+ * change the flags at the same time, the result is undefined.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] flags The flags to change, bitwise OR'ed together
+ * @param[in] onoff A non-zero value sets the flags, zero clears them.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
+
+ /** @brief Get environment flags.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] flags The address of an integer to store the flags
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
+
+ /** @brief Return the path that was used in #mdb_env_open().
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] path Address of a string pointer to contain the path. This
+ * is the actual string in the environment, not a copy. It should not be
+ * altered in any way.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_env_get_path(MDB_env *env, const char **path);
+
+ /** @brief Return the filedescriptor for the given environment.
+ *
+ * This function may be called after fork(), so the descriptor can be
+ * closed before exec*(). Other LMDB file descriptors have FD_CLOEXEC.
+ * (Until LMDB 0.9.18, only the lockfile had that.)
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *fd);
+
+ /** @brief Set the size of the memory map to use for this environment.
+ *
+ * The size should be a multiple of the OS page size. The default is
+ * 10485760 bytes. The size of the memory map is also the maximum size
+ * of the database. The value should be chosen as large as possible,
+ * to accommodate future growth of the database.
+ * This function should be called after #mdb_env_create() and before #mdb_env_open().
+ * It may be called at later times if no transactions are active in
+ * this process. Note that the library does not check for this condition,
+ * the caller must ensure it explicitly.
+ *
+ * The new size takes effect immediately for the current process but
+ * will not be persisted to any others until a write transaction has been
+ * committed by the current process. Also, only mapsize increases are
+ * persisted into the environment.
+ *
+ * If the mapsize is increased by another process, and data has grown
+ * beyond the range of the current mapsize, #mdb_txn_begin() will
+ * return #MDB_MAP_RESIZED. This function may be called with a size
+ * of zero to adopt the new size.
+ *
+ * Any attempt to set a size smaller than the space already consumed
+ * by the environment will be silently changed to the current size of the used space.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] size The size in bytes
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified, or the environment has
+ * an active write transaction.
+ * </ul>
+ */
+int mdb_env_set_mapsize(MDB_env *env, size_t size);
+
+ /** @brief Set the maximum number of threads/reader slots for the environment.
+ *
+ * This defines the number of slots in the lock table that is used to track readers in the
+ * the environment. The default is 126.
+ * Starting a read-only transaction normally ties a lock table slot to the
+ * current thread until the environment closes or the thread exits. If
+ * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the
+ * MDB_txn object until it or the #MDB_env object is destroyed.
+ * This function may only be called after #mdb_env_create() and before #mdb_env_open().
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] readers The maximum number of reader lock table slots
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified, or the environment is already open.
+ * </ul>
+ */
+int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
+
+ /** @brief Get the maximum number of threads/reader slots for the environment.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] readers Address of an integer to store the number of readers
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
+
+ /** @brief Set the maximum number of named databases for the environment.
+ *
+ * This function is only needed if multiple databases will be used in the
+ * environment. Simpler applications that use the environment as a single
+ * unnamed database can ignore this option.
+ * This function may only be called after #mdb_env_create() and before #mdb_env_open().
+ *
+ * Currently a moderate number of slots are cheap but a huge number gets
+ * expensive: 7-120 words per transaction, and every #mdb_dbi_open()
+ * does a linear search of the opened slots.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] dbs The maximum number of databases
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified, or the environment is already open.
+ * </ul>
+ */
+int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
+
+ /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write.
+ *
+ * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511.
+ * See @ref MDB_val.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @return The maximum size of a key we can write
+ */
+int mdb_env_get_maxkeysize(MDB_env *env);
+
+ /** @brief Set application information associated with the #MDB_env.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] ctx An arbitrary pointer for whatever the application needs.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_set_userctx(MDB_env *env, void *ctx);
+
+ /** @brief Get the application information associated with the #MDB_env.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @return The pointer set by #mdb_env_set_userctx().
+ */
+void *mdb_env_get_userctx(MDB_env *env);
+
+ /** @brief A callback function for most LMDB assert() failures,
+ * called before printing the message and aborting.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create().
+ * @param[in] msg The assertion message, not including newline.
+ */
+typedef void MDB_assert_func(MDB_env *env, const char *msg);
+
+ /** Set or reset the assert() callback of the environment.
+ * Disabled if liblmdb is built with NDEBUG.
+ * @note This hack should become obsolete as lmdb's error handling matures.
+ * @param[in] env An environment handle returned by #mdb_env_create().
+ * @param[in] func An #MDB_assert_func function, or 0.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_env_set_assert(MDB_env *env, MDB_assert_func *func);
+
+ /** @brief Create a transaction for use with the environment.
+ *
+ * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit().
+ * @note A transaction and its cursors must only be used by a single
+ * thread, and a thread may only have a single transaction at a time.
+ * If #MDB_NOTLS is in use, this does not apply to read-only transactions.
+ * @note Cursors may not span transactions.
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] parent If this parameter is non-NULL, the new transaction
+ * will be a nested transaction, with the transaction indicated by \b parent
+ * as its parent. Transactions may be nested to any level. A parent
+ * transaction and its cursors may not issue any other operations than
+ * mdb_txn_commit and mdb_txn_abort while it has active child transactions.
+ * @param[in] flags Special options for this transaction. This parameter
+ * must be set to 0 or by bitwise OR'ing together one or more of the
+ * values described here.
+ * <ul>
+ * <li>#MDB_RDONLY
+ * This transaction will not perform any write operations.
+ * </ul>
+ * @param[out] txn Address where the new #MDB_txn handle will be stored
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_PANIC - a fatal error occurred earlier and the environment
+ * must be shut down.
+ * <li>#MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's
+ * mapsize and this environment's map must be resized as well.
+ * See #mdb_env_set_mapsize().
+ * <li>#MDB_READERS_FULL - a read-only transaction was requested and
+ * the reader lock table is full. See #mdb_env_set_maxreaders().
+ * <li>ENOMEM - out of memory.
+ * </ul>
+ */
+int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn);
+
+ /** @brief Returns the transaction's #MDB_env
+ *
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ */
+MDB_env *mdb_txn_env(MDB_txn *txn);
+
+ /** @brief Return the transaction's ID.
+ *
+ * This returns the identifier associated with this transaction. For a
+ * read-only transaction, this corresponds to the snapshot being read;
+ * concurrent readers will frequently have the same transaction ID.
+ *
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @return A transaction ID, valid if input is an active transaction.
+ */
+size_t mdb_txn_id(MDB_txn *txn);
+
+ /** @brief Commit all the operations of a transaction into the database.
+ *
+ * The transaction handle is freed. It and its cursors must not be used
+ * again after this call, except with #mdb_cursor_renew().
+ * @note Earlier documentation incorrectly said all cursors would be freed.
+ * Only write-transactions free cursors.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * <li>ENOSPC - no more disk space.
+ * <li>EIO - a low-level I/O error occurred while writing.
+ * <li>ENOMEM - out of memory.
+ * </ul>
+ */
+int mdb_txn_commit(MDB_txn *txn);
+
+ /** @brief Abandon all the operations of the transaction instead of saving them.
+ *
+ * The transaction handle is freed. It and its cursors must not be used
+ * again after this call, except with #mdb_cursor_renew().
+ * @note Earlier documentation incorrectly said all cursors would be freed.
+ * Only write-transactions free cursors.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ */
+void mdb_txn_abort(MDB_txn *txn);
+
+ /** @brief Reset a read-only transaction.
+ *
+ * Abort the transaction like #mdb_txn_abort(), but keep the transaction
+ * handle. #mdb_txn_renew() may reuse the handle. This saves allocation
+ * overhead if the process will start a new read-only transaction soon,
+ * and also locking overhead if #MDB_NOTLS is in use. The reader table
+ * lock is released, but the table slot stays tied to its thread or
+ * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free
+ * its lock table slot if MDB_NOTLS is in use.
+ * Cursors opened within the transaction must not be used
+ * again after this call, except with #mdb_cursor_renew().
+ * Reader locks generally don't interfere with writers, but they keep old
+ * versions of database pages allocated. Thus they prevent the old pages
+ * from being reused when writers commit new data, and so under heavy load
+ * the database size may grow much more rapidly than otherwise.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ */
+void mdb_txn_reset(MDB_txn *txn);
+
+ /** @brief Renew a read-only transaction.
+ *
+ * This acquires a new reader lock for a transaction handle that had been
+ * released by #mdb_txn_reset(). It must be called before a reset transaction
+ * may be used again.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_PANIC - a fatal error occurred earlier and the environment
+ * must be shut down.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_txn_renew(MDB_txn *txn);
+
+/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */
+#define mdb_open(txn,name,flags,dbi) mdb_dbi_open(txn,name,flags,dbi)
+/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */
+#define mdb_close(env,dbi) mdb_dbi_close(env,dbi)
+
+ /** @brief Open a database in the environment.
+ *
+ * A database handle denotes the name and parameters of a database,
+ * independently of whether such a database exists.
+ * The database handle may be discarded by calling #mdb_dbi_close().
+ * The old database handle is returned if the database was already open.
+ * The handle may only be closed once.
+ *
+ * The database handle will be private to the current transaction until
+ * the transaction is successfully committed. If the transaction is
+ * aborted the handle will be closed automatically.
+ * After a successful commit the handle will reside in the shared
+ * environment, and may be used by other transactions.
+ *
+ * This function must not be called from multiple concurrent
+ * transactions in the same process. A transaction that uses
+ * this function must finish (either commit or abort) before
+ * any other transaction in the process may use this function.
+ *
+ * To use named databases (with name != NULL), #mdb_env_set_maxdbs()
+ * must be called before opening the environment. Database names are
+ * keys in the unnamed database, and may be read but not written.
+ *
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] name The name of the database to open. If only a single
+ * database is needed in the environment, this value may be NULL.
+ * @param[in] flags Special options for this database. This parameter
+ * must be set to 0 or by bitwise OR'ing together one or more of the
+ * values described here.
+ * <ul>
+ * <li>#MDB_REVERSEKEY
+ * Keys are strings to be compared in reverse order, from the end
+ * of the strings to the beginning. By default, Keys are treated as strings and
+ * compared from beginning to end.
+ * <li>#MDB_DUPSORT
+ * Duplicate keys may be used in the database. (Or, from another perspective,
+ * keys may have multiple data items, stored in sorted order.) By default
+ * keys must be unique and may have only a single data item.
+ * <li>#MDB_INTEGERKEY
+ * Keys are binary integers in native byte order, either unsigned int
+ * or size_t, and will be sorted as such.
+ * The keys must all be of the same size.
+ * <li>#MDB_DUPFIXED
+ * This flag may only be used in combination with #MDB_DUPSORT. This option
+ * tells the library that the data items for this database are all the same
+ * size, which allows further optimizations in storage and retrieval. When
+ * all data items are the same size, the #MDB_GET_MULTIPLE, #MDB_NEXT_MULTIPLE
+ * and #MDB_PREV_MULTIPLE cursor operations may be used to retrieve multiple
+ * items at once.
+ * <li>#MDB_INTEGERDUP
+ * This option specifies that duplicate data items are binary integers,
+ * similar to #MDB_INTEGERKEY keys.
+ * <li>#MDB_REVERSEDUP
+ * This option specifies that duplicate data items should be compared as
+ * strings in reverse order.
+ * <li>#MDB_CREATE
+ * Create the named database if it doesn't exist. This option is not
+ * allowed in a read-only transaction or a read-only environment.
+ * </ul>
+ * @param[out] dbi Address where the new #MDB_dbi handle will be stored
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_NOTFOUND - the specified database doesn't exist in the environment
+ * and #MDB_CREATE was not specified.
+ * <li>#MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs().
+ * </ul>
+ */
+int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi);
+
+ /** @brief Retrieve statistics for a database.
+ *
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[out] stat The address of an #MDB_stat structure
+ * where the statistics will be copied
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
+
+ /** @brief Retrieve the DB flags for a database handle.
+ *
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[out] flags Address where the flags will be returned.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
+
+ /** @brief Close a database handle. Normally unnecessary. Use with care:
+ *
+ * This call is not mutex protected. Handles should only be closed by
+ * a single thread, and only if no other threads are going to reference
+ * the database handle or one of its cursors any further. Do not close
+ * a handle if an existing transaction has modified its database.
+ * Doing so can cause misbehavior from database corruption to errors
+ * like MDB_BAD_VALSIZE (since the DB name is gone).
+ *
+ * Closing a database handle is not necessary, but lets #mdb_dbi_open()
+ * reuse the handle value. Usually it's better to set a bigger
+ * #mdb_env_set_maxdbs(), unless that value would be large.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ */
+void mdb_dbi_close(MDB_env *env, MDB_dbi dbi);
+
+ /** @brief Empty or delete+close a database.
+ *
+ * See #mdb_dbi_close() for restrictions about closing the DB handle.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] del 0 to empty the DB, 1 to delete it from the
+ * environment and close the DB handle.
+ * @return A non-zero error value on failure and 0 on success.
+ */
+int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del);
+
+ /** @brief Set a custom key comparison function for a database.
+ *
+ * The comparison function is called whenever it is necessary to compare a
+ * key specified by the application with a key currently stored in the database.
+ * If no comparison function is specified, and no special key flags were specified
+ * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating
+ * before longer keys.
+ * @warning This function must be called before any data access functions are used,
+ * otherwise data corruption may occur. The same comparison function must be used by every
+ * program accessing the database, every time the database is used.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] cmp A #MDB_cmp_func function
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp);
+
+ /** @brief Set a custom data comparison function for a #MDB_DUPSORT database.
+ *
+ * This comparison function is called whenever it is necessary to compare a data
+ * item specified by the application with a data item currently stored in the database.
+ * This function only takes effect if the database was opened with the #MDB_DUPSORT
+ * flag.
+ * If no comparison function is specified, and no special key flags were specified
+ * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating
+ * before longer items.
+ * @warning This function must be called before any data access functions are used,
+ * otherwise data corruption may occur. The same comparison function must be used by every
+ * program accessing the database, every time the database is used.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] cmp A #MDB_cmp_func function
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp);
+
+ /** @brief Set a relocation function for a #MDB_FIXEDMAP database.
+ *
+ * @todo The relocation function is called whenever it is necessary to move the data
+ * of an item to a different position in the database (e.g. through tree
+ * balancing operations, shifts as a result of adds or deletes, etc.). It is
+ * intended to allow address/position-dependent data items to be stored in
+ * a database in an environment opened with the #MDB_FIXEDMAP option.
+ * Currently the relocation feature is unimplemented and setting
+ * this function has no effect.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] rel A #MDB_rel_func function
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel);
+
+ /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function.
+ *
+ * See #mdb_set_relfunc and #MDB_rel_func for more details.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] ctx An arbitrary pointer for whatever the application needs.
+ * It will be passed to the callback function set by #mdb_set_relfunc
+ * as its \b relctx parameter whenever the callback is invoked.
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx);
+
+ /** @brief Get items from a database.
+ *
+ * This function retrieves key/data pairs from the database. The address
+ * and length of the data associated with the specified \b key are returned
+ * in the structure to which \b data refers.
+ * If the database supports duplicate keys (#MDB_DUPSORT) then the
+ * first data item for the key will be returned. Retrieval of other
+ * items requires the use of #mdb_cursor_get().
+ *
+ * @note The memory pointed to by the returned values is owned by the
+ * database. The caller need not dispose of the memory, and may not
+ * modify it in any way. For values returned in a read-only transaction
+ * any modification attempts will cause a SIGSEGV.
+ * @note Values returned from the database are valid only until a
+ * subsequent update operation, or the end of the transaction.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] key The key to search for in the database
+ * @param[out] data The data corresponding to the key
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_NOTFOUND - the key was not in the database.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
+
+ /** @brief Store items into a database.
+ *
+ * This function stores key/data pairs in the database. The default behavior
+ * is to enter the new key/data pair, replacing any previously existing key
+ * if duplicates are disallowed, or adding a duplicate data item if
+ * duplicates are allowed (#MDB_DUPSORT).
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] key The key to store in the database
+ * @param[in,out] data The data to store
+ * @param[in] flags Special options for this operation. This parameter
+ * must be set to 0 or by bitwise OR'ing together one or more of the
+ * values described here.
+ * <ul>
+ * <li>#MDB_NODUPDATA - enter the new key/data pair only if it does not
+ * already appear in the database. This flag may only be specified
+ * if the database was opened with #MDB_DUPSORT. The function will
+ * return #MDB_KEYEXIST if the key/data pair already appears in the
+ * database.
+ * <li>#MDB_NOOVERWRITE - enter the new key/data pair only if the key
+ * does not already appear in the database. The function will return
+ * #MDB_KEYEXIST if the key already appears in the database, even if
+ * the database supports duplicates (#MDB_DUPSORT). The \b data
+ * parameter will be set to point to the existing item.
+ * <li>#MDB_RESERVE - reserve space for data of the given size, but
+ * don't copy the given data. Instead, return a pointer to the
+ * reserved space, which the caller can fill in later - before
+ * the next update operation or the transaction ends. This saves
+ * an extra memcpy if the data is being generated later.
+ * LMDB does nothing else with this memory, the caller is expected
+ * to modify all of the space requested. This flag must not be
+ * specified if the database was opened with #MDB_DUPSORT.
+ * <li>#MDB_APPEND - append the given key/data pair to the end of the
+ * database. This option allows fast bulk loading when keys are
+ * already known to be in the correct order. Loading unsorted keys
+ * with this flag will cause a #MDB_KEYEXIST error.
+ * <li>#MDB_APPENDDUP - as above, but for sorted dup data.
+ * </ul>
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize().
+ * <li>#MDB_TXN_FULL - the transaction has too many dirty pages.
+ * <li>EACCES - an attempt was made to write in a read-only transaction.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_put(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data,
+ unsigned int flags);
+
+ /** @brief Delete items from a database.
+ *
+ * This function removes key/data pairs from the database.
+ * If the database does not support sorted duplicate data items
+ * (#MDB_DUPSORT) the data parameter is ignored.
+ * If the database supports sorted duplicates and the data parameter
+ * is NULL, all of the duplicate data items for the key will be
+ * deleted. Otherwise, if the data parameter is non-NULL
+ * only the matching data item will be deleted.
+ * This function will return #MDB_NOTFOUND if the specified key/data
+ * pair is not in the database.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] key The key to delete from the database
+ * @param[in] data The data to delete
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EACCES - an attempt was made to write in a read-only transaction.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_del(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
+
+ /** @brief Create a cursor handle.
+ *
+ * A cursor is associated with a specific transaction and database.
+ * A cursor cannot be used when its database handle is closed. Nor
+ * when its transaction has ended, except with #mdb_cursor_renew().
+ * It can be discarded with #mdb_cursor_close().
+ * A cursor in a write-transaction can be closed before its transaction
+ * ends, and will otherwise be closed when its transaction ends.
+ * A cursor in a read-only transaction must be closed explicitly, before
+ * or after its transaction ends. It can be reused with
+ * #mdb_cursor_renew() before finally closing it.
+ * @note Earlier documentation said that cursors in every transaction
+ * were closed when the transaction committed or aborted.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[out] cursor Address where the new #MDB_cursor handle will be stored
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
+
+ /** @brief Close a cursor handle.
+ *
+ * The cursor handle will be freed and must not be used again after this call.
+ * Its transaction must still be live if it is a write-transaction.
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ */
+void mdb_cursor_close(MDB_cursor *cursor);
+
+ /** @brief Renew a cursor handle.
+ *
+ * A cursor is associated with a specific transaction and database.
+ * Cursors that are only used in read-only
+ * transactions may be re-used, to avoid unnecessary malloc/free overhead.
+ * The cursor may be associated with a new read-only transaction, and
+ * referencing the same database handle as it was created with.
+ * This may be done whether the previous transaction is live or dead.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_renew(MDB_txn *txn, MDB_cursor *cursor);
+
+ /** @brief Return the cursor's transaction handle.
+ *
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ */
+MDB_txn *mdb_cursor_txn(MDB_cursor *cursor);
+
+ /** @brief Return the cursor's database handle.
+ *
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ */
+MDB_dbi mdb_cursor_dbi(MDB_cursor *cursor);
+
+ /** @brief Retrieve by cursor.
+ *
+ * This function retrieves key/data pairs from the database. The address and length
+ * of the key are returned in the object to which \b key refers (except for the
+ * case of the #MDB_SET option, in which the \b key object is unchanged), and
+ * the address and length of the data are returned in the object to which \b data
+ * refers.
+ * See #mdb_get() for restrictions on using the output values.
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ * @param[in,out] key The key for a retrieved item
+ * @param[in,out] data The data of a retrieved item
+ * @param[in] op A cursor operation #MDB_cursor_op
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_NOTFOUND - no matching key found.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data,
+ MDB_cursor_op op);
+
+ /** @brief Store by cursor.
+ *
+ * This function stores key/data pairs into the database.
+ * The cursor is positioned at the new item, or on failure usually near it.
+ * @note Earlier documentation incorrectly said errors would leave the
+ * state of the cursor unchanged.
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ * @param[in] key The key operated on.
+ * @param[in] data The data operated on.
+ * @param[in] flags Options for this operation. This parameter
+ * must be set to 0 or one of the values described here.
+ * <ul>
+ * <li>#MDB_CURRENT - replace the item at the current cursor position.
+ * The \b key parameter must still be provided, and must match it.
+ * If using sorted duplicates (#MDB_DUPSORT) the data item must still
+ * sort into the same place. This is intended to be used when the
+ * new data is the same size as the old. Otherwise it will simply
+ * perform a delete of the old record followed by an insert.
+ * <li>#MDB_NODUPDATA - enter the new key/data pair only if it does not
+ * already appear in the database. This flag may only be specified
+ * if the database was opened with #MDB_DUPSORT. The function will
+ * return #MDB_KEYEXIST if the key/data pair already appears in the
+ * database.
+ * <li>#MDB_NOOVERWRITE - enter the new key/data pair only if the key
+ * does not already appear in the database. The function will return
+ * #MDB_KEYEXIST if the key already appears in the database, even if
+ * the database supports duplicates (#MDB_DUPSORT).
+ * <li>#MDB_RESERVE - reserve space for data of the given size, but
+ * don't copy the given data. Instead, return a pointer to the
+ * reserved space, which the caller can fill in later - before
+ * the next update operation or the transaction ends. This saves
+ * an extra memcpy if the data is being generated later. This flag
+ * must not be specified if the database was opened with #MDB_DUPSORT.
+ * <li>#MDB_APPEND - append the given key/data pair to the end of the
+ * database. No key comparisons are performed. This option allows
+ * fast bulk loading when keys are already known to be in the
+ * correct order. Loading unsorted keys with this flag will cause
+ * a #MDB_KEYEXIST error.
+ * <li>#MDB_APPENDDUP - as above, but for sorted dup data.
+ * <li>#MDB_MULTIPLE - store multiple contiguous data elements in a
+ * single request. This flag may only be specified if the database
+ * was opened with #MDB_DUPFIXED. The \b data argument must be an
+ * array of two MDB_vals. The mv_size of the first MDB_val must be
+ * the size of a single data element. The mv_data of the first MDB_val
+ * must point to the beginning of the array of contiguous data elements.
+ * The mv_size of the second MDB_val must be the count of the number
+ * of data elements to store. On return this field will be set to
+ * the count of the number of elements actually written. The mv_data
+ * of the second MDB_val is unused.
+ * </ul>
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>#MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize().
+ * <li>#MDB_TXN_FULL - the transaction has too many dirty pages.
+ * <li>EACCES - an attempt was made to write in a read-only transaction.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_put(MDB_cursor *cursor, MDB_val *key, MDB_val *data,
+ unsigned int flags);
+
+ /** @brief Delete current key/data pair
+ *
+ * This function deletes the key/data pair to which the cursor refers.
+ * This does not invalidate the cursor, so operations such as MDB_NEXT
+ * can still be used on it.
+ * Both MDB_NEXT and MDB_GET_CURRENT will return the same record after
+ * this operation.
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ * @param[in] flags Options for this operation. This parameter
+ * must be set to 0 or one of the values described here.
+ * <ul>
+ * <li>#MDB_NODUPDATA - delete all of the data items for the current key.
+ * This flag may only be specified if the database was opened with #MDB_DUPSORT.
+ * </ul>
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EACCES - an attempt was made to write in a read-only transaction.
+ * <li>EINVAL - an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
+
+ /** @brief Return count of duplicates for current key.
+ *
+ * This call is only valid on databases that support sorted duplicate
+ * data items #MDB_DUPSORT.
+ * @param[in] cursor A cursor handle returned by #mdb_cursor_open()
+ * @param[out] countp Address where the count will be stored
+ * @return A non-zero error value on failure and 0 on success. Some possible
+ * errors are:
+ * <ul>
+ * <li>EINVAL - cursor is not initialized, or an invalid parameter was specified.
+ * </ul>
+ */
+int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
+
+ /** @brief Compare two data items according to a particular database.
+ *
+ * This returns a comparison as if the two data items were keys in the
+ * specified database.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] a The first item to compare
+ * @param[in] b The second item to compare
+ * @return < 0 if a < b, 0 if a == b, > 0 if a > b
+ */
+int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
+
+ /** @brief Compare two data items according to a particular database.
+ *
+ * This returns a comparison as if the two items were data items of
+ * the specified database. The database must have the #MDB_DUPSORT flag.
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ * @param[in] a The first item to compare
+ * @param[in] b The second item to compare
+ * @return < 0 if a < b, 0 if a == b, > 0 if a > b
+ */
+int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
+
+ /** @brief A callback function used to print a message from the library.
+ *
+ * @param[in] msg The string to be printed.
+ * @param[in] ctx An arbitrary context pointer for the callback.
+ * @return < 0 on failure, >= 0 on success.
+ */
+typedef int (MDB_msg_func)(const char *msg, void *ctx);
+
+ /** @brief Dump the entries in the reader lock table.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[in] func A #MDB_msg_func function
+ * @param[in] ctx Anything the message function needs
+ * @return < 0 on failure, >= 0 on success.
+ */
+int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
+
+ /** @brief Check for stale entries in the reader lock table.
+ *
+ * @param[in] env An environment handle returned by #mdb_env_create()
+ * @param[out] dead Number of stale slots that were cleared
+ * @return 0 on success, non-zero on failure.
+ */
+int mdb_reader_check(MDB_env *env, int *dead);
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+/** @page tools LMDB Command Line Tools
+ The following describes the command line tools that are available for LMDB.
+ \li \ref mdb_copy_1
+ \li \ref mdb_dump_1
+ \li \ref mdb_load_1
+ \li \ref mdb_stat_1
+*/
+
+#endif /* _LMDB_H_ */
diff --git a/nostrdb/mdb.c b/nostrdb/mdb.c
@@ -0,0 +1,10354 @@
+/** @file mdb.c
+ * @brief Lightning memory-mapped database library
+ *
+ * A Btree-based database management library modeled loosely on the
+ * BerkeleyDB API, but much simplified.
+ */
+/*
+ * Copyright 2011-2021 Howard Chu, Symas Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted only as authorized by the OpenLDAP
+ * Public License.
+ *
+ * A copy of this license is available in the file LICENSE in the
+ * top-level directory of the distribution or, alternatively, at
+ * <http://www.OpenLDAP.org/license.html>.
+ *
+ * This code is derived from btree.c written by Martin Hedenfalk.
+ *
+ * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1
+#endif
+#if defined(__WIN64__)
+#define _FILE_OFFSET_BITS 64
+#endif
+#ifdef _WIN32
+#include <malloc.h>
+#include <windows.h>
+#include <wchar.h> /* get wcscpy() */
+
+/** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it
+ * as int64 which is wrong. MSVC doesn't define it at all, so just
+ * don't use it.
+ */
+#define MDB_PID_T int
+#define MDB_THR_T DWORD
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef __GNUC__
+# include <sys/param.h>
+#else
+# define LITTLE_ENDIAN 1234
+# define BIG_ENDIAN 4321
+# define BYTE_ORDER LITTLE_ENDIAN
+# ifndef SSIZE_MAX
+# define SSIZE_MAX INT_MAX
+# endif
+#endif
+#else
+#include <sys/types.h>
+#include <sys/stat.h>
+#define MDB_PID_T pid_t
+#define MDB_THR_T pthread_t
+#include <sys/param.h>
+#include <sys/uio.h>
+#include <sys/mman.h>
+#ifdef HAVE_SYS_FILE_H
+#include <sys/file.h>
+#endif
+#include <fcntl.h>
+#endif
+
+#if defined(__mips) && defined(__linux)
+/* MIPS has cache coherency issues, requires explicit cache control */
+#include <sys/cachectl.h>
+#define CACHEFLUSH(addr, bytes, cache) cacheflush(addr, bytes, cache)
+#else
+#define CACHEFLUSH(addr, bytes, cache)
+#endif
+
+#if defined(__linux) && !defined(MDB_FDATASYNC_WORKS)
+/** fdatasync is broken on ext3/ext4fs on older kernels, see
+ * description in #mdb_env_open2 comments. You can safely
+ * define MDB_FDATASYNC_WORKS if this code will only be run
+ * on kernels 3.6 and newer.
+ */
+#define BROKEN_FDATASYNC
+#endif
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _MSC_VER
+#include <io.h>
+typedef SSIZE_T ssize_t;
+#else
+#include <unistd.h>
+#endif
+
+#if defined(__sun) || defined(ANDROID)
+/* Most platforms have posix_memalign, older may only have memalign */
+#define HAVE_MEMALIGN 1
+#include <malloc.h>
+/* On Solaris, we need the POSIX sigwait function */
+#if defined (__sun)
+# define _POSIX_PTHREAD_SEMANTICS 1
+#endif
+#endif
+
+#if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER))
+#include <netinet/in.h>
+#include <resolv.h> /* defines BYTE_ORDER on HPUX and Solaris */
+#endif
+
+#if defined(__FreeBSD__) && defined(__FreeBSD_version) && __FreeBSD_version >= 1100110
+# define MDB_USE_POSIX_MUTEX 1
+# define MDB_USE_ROBUST 1
+#elif defined(__APPLE__) || defined (BSD) || defined(__FreeBSD_kernel__)
+# define MDB_USE_POSIX_SEM 1
+# define MDB_FDATASYNC fsync
+#elif defined(ANDROID)
+# define MDB_FDATASYNC fsync
+#endif
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#ifdef MDB_USE_POSIX_SEM
+# define MDB_USE_HASH 1
+#include <semaphore.h>
+#else
+#define MDB_USE_POSIX_MUTEX 1
+#endif
+#endif
+
+#if defined(_WIN32) + defined(MDB_USE_POSIX_SEM) \
+ + defined(MDB_USE_POSIX_MUTEX) != 1
+# error "Ambiguous shared-lock implementation"
+#endif
+
+#ifdef USE_VALGRIND
+#include <valgrind/memcheck.h>
+#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
+#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
+#define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a)
+#define VGMEMP_DESTROY(h) VALGRIND_DESTROY_MEMPOOL(h)
+#define VGMEMP_DEFINED(a,s) VALGRIND_MAKE_MEM_DEFINED(a,s)
+#else
+#define VGMEMP_CREATE(h,r,z)
+#define VGMEMP_ALLOC(h,a,s)
+#define VGMEMP_FREE(h,a)
+#define VGMEMP_DESTROY(h)
+#define VGMEMP_DEFINED(a,s)
+#endif
+
+#ifndef BYTE_ORDER
+# if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN))
+/* Solaris just defines one or the other */
+# define LITTLE_ENDIAN 1234
+# define BIG_ENDIAN 4321
+# ifdef _LITTLE_ENDIAN
+# define BYTE_ORDER LITTLE_ENDIAN
+# else
+# define BYTE_ORDER BIG_ENDIAN
+# endif
+# else
+# define BYTE_ORDER __BYTE_ORDER
+# endif
+#endif
+
+#ifndef LITTLE_ENDIAN
+#define LITTLE_ENDIAN __LITTLE_ENDIAN
+#endif
+#ifndef BIG_ENDIAN
+#define BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+#if defined(__i386) || defined(__x86_64) || defined(_M_IX86)
+#define MISALIGNED_OK 1
+#endif
+
+#include "lmdb.h"
+#include "midl.h"
+
+#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN)
+# error "Unknown or unsupported endianness (BYTE_ORDER)"
+#elif (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF
+# error "Two's complement, reasonably sized integer types, please"
+#endif
+
+#if (((__clang_major__ << 8) | __clang_minor__) >= 0x0302) || (((__GNUC__ << 8) | __GNUC_MINOR__) >= 0x0403)
+/** Mark infrequently used env functions as cold. This puts them in a separate
+ * section, and optimizes them for size */
+#define ESECT __attribute__ ((cold))
+#else
+/* On older compilers, use a separate section */
+# ifdef __GNUC__
+# ifdef __APPLE__
+# define ESECT __attribute__ ((section("__TEXT,text_env")))
+# else
+# define ESECT __attribute__ ((section("text_env")))
+# endif
+# else
+# define ESECT
+# endif
+#endif
+
+#ifdef _WIN32
+#define CALL_CONV WINAPI
+#else
+#define CALL_CONV
+#endif
+
+/** @defgroup internal LMDB Internals
+ * @{
+ */
+/** @defgroup compat Compatibility Macros
+ * A bunch of macros to minimize the amount of platform-specific ifdefs
+ * needed throughout the rest of the code. When the features this library
+ * needs are similar enough to POSIX to be hidden in a one-or-two line
+ * replacement, this macro approach is used.
+ * @{
+ */
+
+ /** Features under development */
+#ifndef MDB_DEVEL
+#define MDB_DEVEL 0
+#endif
+
+ /** Wrapper around __func__, which is a C99 feature */
+#if __STDC_VERSION__ >= 199901L
+# define mdb_func_ __func__
+#elif __GNUC__ >= 2 || _MSC_VER >= 1300
+# define mdb_func_ __FUNCTION__
+#else
+/* If a debug message says <mdb_unknown>(), update the #if statements above */
+# define mdb_func_ "<mdb_unknown>"
+#endif
+
+/* Internal error codes, not exposed outside liblmdb */
+#define MDB_NO_ROOT (MDB_LAST_ERRCODE + 10)
+#ifdef _WIN32
+#define MDB_OWNERDEAD ((int) WAIT_ABANDONED)
+#elif defined(MDB_USE_POSIX_MUTEX) && defined(EOWNERDEAD)
+#define MDB_OWNERDEAD EOWNERDEAD /**< #LOCK_MUTEX0() result if dead owner */
+#endif
+
+#ifdef __GLIBC__
+#define GLIBC_VER ((__GLIBC__ << 16 )| __GLIBC_MINOR__)
+#endif
+/** Some platforms define the EOWNERDEAD error code
+ * even though they don't support Robust Mutexes.
+ * Compile with -DMDB_USE_ROBUST=0, or use some other
+ * mechanism like -DMDB_USE_POSIX_SEM instead of
+ * -DMDB_USE_POSIX_MUTEX.
+ * (Posix semaphores are not robust.)
+ */
+#ifndef MDB_USE_ROBUST
+/* Android currently lacks Robust Mutex support. So does glibc < 2.4. */
+# if defined(MDB_USE_POSIX_MUTEX) && (defined(ANDROID) || \
+ (defined(__GLIBC__) && GLIBC_VER < 0x020004))
+# define MDB_USE_ROBUST 0
+# else
+# define MDB_USE_ROBUST 1
+# endif
+#endif /* !MDB_USE_ROBUST */
+
+#if defined(MDB_USE_POSIX_MUTEX) && (MDB_USE_ROBUST)
+/* glibc < 2.12 only provided _np API */
+# if (defined(__GLIBC__) && GLIBC_VER < 0x02000c) || \
+ (defined(PTHREAD_MUTEX_ROBUST_NP) && !defined(PTHREAD_MUTEX_ROBUST))
+# define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP
+# define pthread_mutexattr_setrobust(attr, flag) pthread_mutexattr_setrobust_np(attr, flag)
+# define pthread_mutex_consistent(mutex) pthread_mutex_consistent_np(mutex)
+# endif
+#endif /* MDB_USE_POSIX_MUTEX && MDB_USE_ROBUST */
+
+#if defined(MDB_OWNERDEAD) && (MDB_USE_ROBUST)
+#define MDB_ROBUST_SUPPORTED 1
+#endif
+
+#ifdef _WIN32
+#define MDB_USE_HASH 1
+#define MDB_PIDLOCK 0
+#define THREAD_RET DWORD
+#define pthread_t HANDLE
+#define pthread_mutex_t HANDLE
+#define pthread_cond_t HANDLE
+typedef HANDLE mdb_mutex_t, mdb_mutexref_t;
+#define pthread_key_t DWORD
+#define pthread_self() GetCurrentThreadId()
+#define pthread_key_create(x,y) \
+ ((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0)
+#define pthread_key_delete(x) TlsFree(x)
+#define pthread_getspecific(x) TlsGetValue(x)
+#define pthread_setspecific(x,y) (TlsSetValue(x,y) ? 0 : ErrCode())
+#define pthread_mutex_unlock(x) ReleaseMutex(*x)
+#define pthread_mutex_lock(x) WaitForSingleObject(*x, INFINITE)
+#define pthread_cond_signal(x) SetEvent(*x)
+#define pthread_cond_wait(cond,mutex) do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0)
+#define THREAD_CREATE(thr,start,arg) \
+ (((thr) = CreateThread(NULL, 0, start, arg, 0, NULL)) ? 0 : ErrCode())
+#define THREAD_FINISH(thr) \
+ (WaitForSingleObject(thr, INFINITE) ? ErrCode() : 0)
+#define LOCK_MUTEX0(mutex) WaitForSingleObject(mutex, INFINITE)
+#define UNLOCK_MUTEX(mutex) ReleaseMutex(mutex)
+#define mdb_mutex_consistent(mutex) 0
+#define getpid() GetCurrentProcessId()
+#define MDB_FDATASYNC(fd) (!FlushFileBuffers(fd))
+#define MDB_MSYNC(addr,len,flags) (!FlushViewOfFile(addr,len))
+#define ErrCode() GetLastError()
+#define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;}
+#define close(fd) (CloseHandle(fd) ? 0 : -1)
+#define munmap(ptr,len) UnmapViewOfFile(ptr)
+#ifdef PROCESS_QUERY_LIMITED_INFORMATION
+#define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION
+#else
+#define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000
+#endif
+#define Z "I"
+#else
+#define THREAD_RET void *
+#define THREAD_CREATE(thr,start,arg) pthread_create(&thr,NULL,start,arg)
+#define THREAD_FINISH(thr) pthread_join(thr,NULL)
+#define Z "z" /**< printf format modifier for size_t */
+
+ /** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */
+#define MDB_PIDLOCK 1
+
+#ifdef MDB_USE_POSIX_SEM
+
+typedef sem_t *mdb_mutex_t, *mdb_mutexref_t;
+#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex)
+#define UNLOCK_MUTEX(mutex) sem_post(mutex)
+
+static int
+mdb_sem_wait(sem_t *sem)
+{
+ int rc;
+ while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ;
+ return rc;
+}
+
+#else /* MDB_USE_POSIX_MUTEX: */
+ /** Shared mutex/semaphore as the original is stored.
+ *
+ * Not for copies. Instead it can be assigned to an #mdb_mutexref_t.
+ * When mdb_mutexref_t is a pointer and mdb_mutex_t is not, then it
+ * is array[size 1] so it can be assigned to the pointer.
+ */
+typedef pthread_mutex_t mdb_mutex_t[1];
+ /** Reference to an #mdb_mutex_t */
+typedef pthread_mutex_t *mdb_mutexref_t;
+ /** Lock the reader or writer mutex.
+ * Returns 0 or a code to give #mdb_mutex_failed(), as in #LOCK_MUTEX().
+ */
+#define LOCK_MUTEX0(mutex) pthread_mutex_lock(mutex)
+ /** Unlock the reader or writer mutex.
+ */
+#define UNLOCK_MUTEX(mutex) pthread_mutex_unlock(mutex)
+ /** Mark mutex-protected data as repaired, after death of previous owner.
+ */
+#define mdb_mutex_consistent(mutex) pthread_mutex_consistent(mutex)
+#endif /* MDB_USE_POSIX_SEM */
+
+ /** Get the error code for the last failed system function.
+ */
+#define ErrCode() errno
+
+ /** An abstraction for a file handle.
+ * On POSIX systems file handles are small integers. On Windows
+ * they're opaque pointers.
+ */
+#define HANDLE int
+
+ /** A value for an invalid file handle.
+ * Mainly used to initialize file variables and signify that they are
+ * unused.
+ */
+#define INVALID_HANDLE_VALUE (-1)
+
+ /** Get the size of a memory page for the system.
+ * This is the basic size that the platform's memory manager uses, and is
+ * fundamental to the use of memory-mapped files.
+ */
+#define GET_PAGESIZE(x) ((x) = sysconf(_SC_PAGE_SIZE))
+#endif
+
+#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
+#define MNAME_LEN 32
+#else
+#define MNAME_LEN (sizeof(pthread_mutex_t))
+#endif
+
+/** @} */
+
+#ifdef MDB_ROBUST_SUPPORTED
+ /** Lock mutex, handle any error, set rc = result.
+ * Return 0 on success, nonzero (not rc) on error.
+ */
+#define LOCK_MUTEX(rc, env, mutex) \
+ (((rc) = LOCK_MUTEX0(mutex)) && \
+ ((rc) = mdb_mutex_failed(env, mutex, rc)))
+static int mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc);
+#else
+#define LOCK_MUTEX(rc, env, mutex) ((rc) = LOCK_MUTEX0(mutex))
+#define mdb_mutex_failed(env, mutex, rc) (rc)
+#endif
+
+#ifndef _WIN32
+/** A flag for opening a file and requesting synchronous data writes.
+ * This is only used when writing a meta page. It's not strictly needed;
+ * we could just do a normal write and then immediately perform a flush.
+ * But if this flag is available it saves us an extra system call.
+ *
+ * @note If O_DSYNC is undefined but exists in /usr/include,
+ * preferably set some compiler flag to get the definition.
+ */
+#ifndef MDB_DSYNC
+# ifdef O_DSYNC
+# define MDB_DSYNC O_DSYNC
+# else
+# define MDB_DSYNC O_SYNC
+# endif
+#endif
+#endif
+
+/** Function for flushing the data of a file. Define this to fsync
+ * if fdatasync() is not supported.
+ */
+#ifndef MDB_FDATASYNC
+# define MDB_FDATASYNC fdatasync
+#endif
+
+#ifndef MDB_MSYNC
+# define MDB_MSYNC(addr,len,flags) msync(addr,len,flags)
+#endif
+
+#ifndef MS_SYNC
+#define MS_SYNC 1
+#endif
+
+#ifndef MS_ASYNC
+#define MS_ASYNC 0
+#endif
+
+ /** A page number in the database.
+ * Note that 64 bit page numbers are overkill, since pages themselves
+ * already represent 12-13 bits of addressable memory, and the OS will
+ * always limit applications to a maximum of 63 bits of address space.
+ *
+ * @note In the #MDB_node structure, we only store 48 bits of this value,
+ * which thus limits us to only 60 bits of addressable data.
+ */
+typedef MDB_ID pgno_t;
+
+ /** A transaction ID.
+ * See struct MDB_txn.mt_txnid for details.
+ */
+typedef MDB_ID txnid_t;
+
+/** @defgroup debug Debug Macros
+ * @{
+ */
+#ifndef MDB_DEBUG
+ /** Enable debug output. Needs variable argument macros (a C99 feature).
+ * Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs
+ * read from and written to the database (used for free space management).
+ */
+#define MDB_DEBUG 0
+#endif
+
+#if MDB_DEBUG
+static int mdb_debug;
+static txnid_t mdb_debug_start;
+
+ /** Print a debug message with printf formatting.
+ * Requires double parenthesis around 2 or more args.
+ */
+# define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args))
+# define DPRINTF0(fmt, ...) \
+ fprintf(stderr, "%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__)
+#else
+# define DPRINTF(args) ((void) 0)
+#endif
+ /** Print a debug string.
+ * The string is printed literally, with no format processing.
+ */
+#define DPUTS(arg) DPRINTF(("%s", arg))
+ /** Debugging output value of a cursor DBI: Negative in a sub-cursor. */
+#define DDBI(mc) \
+ (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi)
+/** @} */
+
+ /** @brief The maximum size of a database page.
+ *
+ * It is 32k or 64k, since value-PAGEBASE must fit in
+ * #MDB_page.%mp_upper.
+ *
+ * LMDB will use database pages < OS pages if needed.
+ * That causes more I/O in write transactions: The OS must
+ * know (read) the whole page before writing a partial page.
+ *
+ * Note that we don't currently support Huge pages. On Linux,
+ * regular data files cannot use Huge pages, and in general
+ * Huge pages aren't actually pageable. We rely on the OS
+ * demand-pager to read our data and page it out when memory
+ * pressure from other processes is high. So until OSs have
+ * actual paging support for Huge pages, they're not viable.
+ */
+#define MAX_PAGESIZE (PAGEBASE ? 0x10000 : 0x8000)
+
+ /** The minimum number of keys required in a database page.
+ * Setting this to a larger value will place a smaller bound on the
+ * maximum size of a data item. Data items larger than this size will
+ * be pushed into overflow pages instead of being stored directly in
+ * the B-tree node. This value used to default to 4. With a page size
+ * of 4096 bytes that meant that any item larger than 1024 bytes would
+ * go into an overflow page. That also meant that on average 2-3KB of
+ * each overflow page was wasted space. The value cannot be lower than
+ * 2 because then there would no longer be a tree structure. With this
+ * value, items larger than 2KB will go into overflow pages, and on
+ * average only 1KB will be wasted.
+ */
+#define MDB_MINKEYS 2
+
+ /** A stamp that identifies a file as an LMDB file.
+ * There's nothing special about this value other than that it is easily
+ * recognizable, and it will reflect any byte order mismatches.
+ */
+#define MDB_MAGIC 0xBEEFC0DE
+
+ /** The version number for a database's datafile format. */
+#define MDB_DATA_VERSION ((MDB_DEVEL) ? 999 : 1)
+ /** The version number for a database's lockfile format. */
+#define MDB_LOCK_VERSION 1
+
+ /** @brief The max size of a key we can write, or 0 for computed max.
+ *
+ * This macro should normally be left alone or set to 0.
+ * Note that a database with big keys or dupsort data cannot be
+ * reliably modified by a liblmdb which uses a smaller max.
+ * The default is 511 for backwards compat, or 0 when #MDB_DEVEL.
+ *
+ * Other values are allowed, for backwards compat. However:
+ * A value bigger than the computed max can break if you do not
+ * know what you are doing, and liblmdb <= 0.9.10 can break when
+ * modifying a DB with keys/dupsort data bigger than its max.
+ *
+ * Data items in an #MDB_DUPSORT database are also limited to
+ * this size, since they're actually keys of a sub-DB. Keys and
+ * #MDB_DUPSORT data items must fit on a node in a regular page.
+ */
+#ifndef MDB_MAXKEYSIZE
+#define MDB_MAXKEYSIZE ((MDB_DEVEL) ? 0 : 511)
+#endif
+
+ /** The maximum size of a key we can write to the environment. */
+#if MDB_MAXKEYSIZE
+#define ENV_MAXKEY(env) (MDB_MAXKEYSIZE)
+#else
+#define ENV_MAXKEY(env) ((env)->me_maxkey)
+#endif
+
+ /** @brief The maximum size of a data item.
+ *
+ * We only store a 32 bit value for node sizes.
+ */
+#define MAXDATASIZE 0xffffffffUL
+
+#if MDB_DEBUG
+ /** Key size which fits in a #DKBUF.
+ * @ingroup debug
+ */
+#define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511)
+ /** A key buffer.
+ * @ingroup debug
+ * This is used for printing a hex dump of a key's contents.
+ */
+#define DKBUF char kbuf[DKBUF_MAXKEYSIZE*2+1]
+ /** Display a key in hex.
+ * @ingroup debug
+ * Invoke a function to display a key in hex.
+ */
+#define DKEY(x) mdb_dkey(x, kbuf)
+#else
+#define DKBUF
+#define DKEY(x) 0
+#endif
+
+ /** An invalid page number.
+ * Mainly used to denote an empty tree.
+ */
+#define P_INVALID (~(pgno_t)0)
+
+ /** Test if the flags \b f are set in a flag word \b w. */
+#define F_ISSET(w, f) (((w) & (f)) == (f))
+
+ /** Round \b n up to an even number. */
+#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */
+
+ /** Used for offsets within a single page.
+ * Since memory pages are typically 4 or 8KB in size, 12-13 bits,
+ * this is plenty.
+ */
+typedef uint16_t indx_t;
+
+ /** Default size of memory map.
+ * This is certainly too small for any actual applications. Apps should always set
+ * the size explicitly using #mdb_env_set_mapsize().
+ */
+#define DEFAULT_MAPSIZE 1048576
+
+/** @defgroup readers Reader Lock Table
+ * Readers don't acquire any locks for their data access. Instead, they
+ * simply record their transaction ID in the reader table. The reader
+ * mutex is needed just to find an empty slot in the reader table. The
+ * slot's address is saved in thread-specific data so that subsequent read
+ * transactions started by the same thread need no further locking to proceed.
+ *
+ * If #MDB_NOTLS is set, the slot address is not saved in thread-specific data.
+ *
+ * No reader table is used if the database is on a read-only filesystem, or
+ * if #MDB_NOLOCK is set.
+ *
+ * Since the database uses multi-version concurrency control, readers don't
+ * actually need any locking. This table is used to keep track of which
+ * readers are using data from which old transactions, so that we'll know
+ * when a particular old transaction is no longer in use. Old transactions
+ * that have discarded any data pages can then have those pages reclaimed
+ * for use by a later write transaction.
+ *
+ * The lock table is constructed such that reader slots are aligned with the
+ * processor's cache line size. Any slot is only ever used by one thread.
+ * This alignment guarantees that there will be no contention or cache
+ * thrashing as threads update their own slot info, and also eliminates
+ * any need for locking when accessing a slot.
+ *
+ * A writer thread will scan every slot in the table to determine the oldest
+ * outstanding reader transaction. Any freed pages older than this will be
+ * reclaimed by the writer. The writer doesn't use any locks when scanning
+ * this table. This means that there's no guarantee that the writer will
+ * see the most up-to-date reader info, but that's not required for correct
+ * operation - all we need is to know the upper bound on the oldest reader,
+ * we don't care at all about the newest reader. So the only consequence of
+ * reading stale information here is that old pages might hang around a
+ * while longer before being reclaimed. That's actually good anyway, because
+ * the longer we delay reclaiming old pages, the more likely it is that a
+ * string of contiguous pages can be found after coalescing old pages from
+ * many old transactions together.
+ * @{
+ */
+ /** Number of slots in the reader table.
+ * This value was chosen somewhat arbitrarily. 126 readers plus a
+ * couple mutexes fit exactly into 8KB on my development machine.
+ * Applications should set the table size using #mdb_env_set_maxreaders().
+ */
+#define DEFAULT_READERS 126
+
+ /** The size of a CPU cache line in bytes. We want our lock structures
+ * aligned to this size to avoid false cache line sharing in the
+ * lock table.
+ * This value works for most CPUs. For Itanium this should be 128.
+ */
+#ifndef CACHELINE
+#define CACHELINE 64
+#endif
+
+ /** The information we store in a single slot of the reader table.
+ * In addition to a transaction ID, we also record the process and
+ * thread ID that owns a slot, so that we can detect stale information,
+ * e.g. threads or processes that went away without cleaning up.
+ * @note We currently don't check for stale records. We simply re-init
+ * the table when we know that we're the only process opening the
+ * lock file.
+ */
+typedef struct MDB_rxbody {
+ /** Current Transaction ID when this transaction began, or (txnid_t)-1.
+ * Multiple readers that start at the same time will probably have the
+ * same ID here. Again, it's not important to exclude them from
+ * anything; all we need to know is which version of the DB they
+ * started from so we can avoid overwriting any data used in that
+ * particular version.
+ */
+ volatile txnid_t mrb_txnid;
+ /** The process ID of the process owning this reader txn. */
+ volatile MDB_PID_T mrb_pid;
+ /** The thread ID of the thread owning this txn. */
+ volatile MDB_THR_T mrb_tid;
+} MDB_rxbody;
+
+ /** The actual reader record, with cacheline padding. */
+typedef struct MDB_reader {
+ union {
+ MDB_rxbody mrx;
+ /** shorthand for mrb_txnid */
+#define mr_txnid mru.mrx.mrb_txnid
+#define mr_pid mru.mrx.mrb_pid
+#define mr_tid mru.mrx.mrb_tid
+ /** cache line alignment */
+ char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)];
+ } mru;
+} MDB_reader;
+
+ /** The header for the reader table.
+ * The table resides in a memory-mapped file. (This is a different file
+ * than is used for the main database.)
+ *
+ * For POSIX the actual mutexes reside in the shared memory of this
+ * mapped file. On Windows, mutexes are named objects allocated by the
+ * kernel; we store the mutex names in this mapped file so that other
+ * processes can grab them. This same approach is also used on
+ * MacOSX/Darwin (using named semaphores) since MacOSX doesn't support
+ * process-shared POSIX mutexes. For these cases where a named object
+ * is used, the object name is derived from a 64 bit FNV hash of the
+ * environment pathname. As such, naming collisions are extremely
+ * unlikely. If a collision occurs, the results are unpredictable.
+ */
+typedef struct MDB_txbody {
+ /** Stamp identifying this as an LMDB file. It must be set
+ * to #MDB_MAGIC. */
+ uint32_t mtb_magic;
+ /** Format of this lock file. Must be set to #MDB_LOCK_FORMAT. */
+ uint32_t mtb_format;
+#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
+ char mtb_rmname[MNAME_LEN];
+#else
+ /** Mutex protecting access to this table.
+ * This is the reader table lock used with LOCK_MUTEX().
+ */
+ mdb_mutex_t mtb_rmutex;
+#endif
+ /** The ID of the last transaction committed to the database.
+ * This is recorded here only for convenience; the value can always
+ * be determined by reading the main database meta pages.
+ */
+ volatile txnid_t mtb_txnid;
+ /** The number of slots that have been used in the reader table.
+ * This always records the maximum count, it is not decremented
+ * when readers release their slots.
+ */
+ volatile unsigned mtb_numreaders;
+} MDB_txbody;
+
+ /** The actual reader table definition. */
+typedef struct MDB_txninfo {
+ union {
+ MDB_txbody mtb;
+#define mti_magic mt1.mtb.mtb_magic
+#define mti_format mt1.mtb.mtb_format
+#define mti_rmutex mt1.mtb.mtb_rmutex
+#define mti_rmname mt1.mtb.mtb_rmname
+#define mti_txnid mt1.mtb.mtb_txnid
+#define mti_numreaders mt1.mtb.mtb_numreaders
+ char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)];
+ } mt1;
+ union {
+#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
+ char mt2_wmname[MNAME_LEN];
+#define mti_wmname mt2.mt2_wmname
+#else
+ mdb_mutex_t mt2_wmutex;
+#define mti_wmutex mt2.mt2_wmutex
+#endif
+ char pad[(MNAME_LEN+CACHELINE-1) & ~(CACHELINE-1)];
+ } mt2;
+ MDB_reader mti_readers[1];
+} MDB_txninfo;
+
+ /** Lockfile format signature: version, features and field layout */
+#define MDB_LOCK_FORMAT \
+ ((uint32_t) \
+ ((MDB_LOCK_VERSION) \
+ /* Flags which describe functionality */ \
+ + (((MDB_PIDLOCK) != 0) << 16)))
+/** @} */
+
+/** Common header for all page types. The page type depends on #mp_flags.
+ *
+ * #P_BRANCH and #P_LEAF pages have unsorted '#MDB_node's at the end, with
+ * sorted #mp_ptrs[] entries referring to them. Exception: #P_LEAF2 pages
+ * omit mp_ptrs and pack sorted #MDB_DUPFIXED values after the page header.
+ *
+ * #P_OVERFLOW records occupy one or more contiguous pages where only the
+ * first has a page header. They hold the real data of #F_BIGDATA nodes.
+ *
+ * #P_SUBP sub-pages are small leaf "pages" with duplicate data.
+ * A node with flag #F_DUPDATA but not #F_SUBDATA contains a sub-page.
+ * (Duplicate data can also go in sub-databases, which use normal pages.)
+ *
+ * #P_META pages contain #MDB_meta, the start point of an LMDB snapshot.
+ *
+ * Each non-metapage up to #MDB_meta.%mm_last_pg is reachable exactly once
+ * in the snapshot: Either used by a database or listed in a freeDB record.
+ */
+typedef struct MDB_page {
+#define mp_pgno mp_p.p_pgno
+#define mp_next mp_p.p_next
+ union {
+ pgno_t p_pgno; /**< page number */
+ struct MDB_page *p_next; /**< for in-memory list of freed pages */
+ } mp_p;
+ uint16_t mp_pad; /**< key size if this is a LEAF2 page */
+/** @defgroup mdb_page Page Flags
+ * @ingroup internal
+ * Flags for the page headers.
+ * @{
+ */
+#define P_BRANCH 0x01 /**< branch page */
+#define P_LEAF 0x02 /**< leaf page */
+#define P_OVERFLOW 0x04 /**< overflow page */
+#define P_META 0x08 /**< meta page */
+#define P_DIRTY 0x10 /**< dirty page, also set for #P_SUBP pages */
+#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */
+#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */
+#define P_LOOSE 0x4000 /**< page was dirtied then freed, can be reused */
+#define P_KEEP 0x8000 /**< leave this page alone during spill */
+/** @} */
+ uint16_t mp_flags; /**< @ref mdb_page */
+#define mp_lower mp_pb.pb.pb_lower
+#define mp_upper mp_pb.pb.pb_upper
+#define mp_pages mp_pb.pb_pages
+ union {
+ struct {
+ indx_t pb_lower; /**< lower bound of free space */
+ indx_t pb_upper; /**< upper bound of free space */
+ } pb;
+ uint32_t pb_pages; /**< number of overflow pages */
+ } mp_pb;
+ indx_t mp_ptrs[0]; /**< dynamic size */
+} MDB_page;
+
+/** Alternate page header, for 2-byte aligned access */
+typedef struct MDB_page2 {
+ uint16_t mp2_p[sizeof(pgno_t)/2];
+ uint16_t mp2_pad;
+ uint16_t mp2_flags;
+ indx_t mp2_lower;
+ indx_t mp2_upper;
+ indx_t mp2_ptrs[0];
+} MDB_page2;
+
+#define MP_PGNO(p) (((MDB_page2 *)(void *)(p))->mp2_p)
+#define MP_PAD(p) (((MDB_page2 *)(void *)(p))->mp2_pad)
+#define MP_FLAGS(p) (((MDB_page2 *)(void *)(p))->mp2_flags)
+#define MP_LOWER(p) (((MDB_page2 *)(void *)(p))->mp2_lower)
+#define MP_UPPER(p) (((MDB_page2 *)(void *)(p))->mp2_upper)
+#define MP_PTRS(p) (((MDB_page2 *)(void *)(p))->mp2_ptrs)
+
+ /** Size of the page header, excluding dynamic data at the end */
+#define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs))
+
+ /** Address of first usable data byte in a page, after the header */
+#define METADATA(p) ((void *)((char *)(p) + PAGEHDRSZ))
+
+ /** ITS#7713, change PAGEBASE to handle 65536 byte pages */
+#define PAGEBASE ((MDB_DEVEL) ? PAGEHDRSZ : 0)
+
+ /** Number of nodes on a page */
+#define NUMKEYS(p) ((MP_LOWER(p) - (PAGEHDRSZ-PAGEBASE)) >> 1)
+
+ /** The amount of space remaining in the page */
+#define SIZELEFT(p) (indx_t)(MP_UPPER(p) - MP_LOWER(p))
+
+ /** The percentage of space used in the page, in tenths of a percent. */
+#define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \
+ ((env)->me_psize - PAGEHDRSZ))
+ /** The minimum page fill factor, in tenths of a percent.
+ * Pages emptier than this are candidates for merging.
+ */
+#define FILL_THRESHOLD 250
+
+ /** Test if a page is a leaf page */
+#define IS_LEAF(p) F_ISSET(MP_FLAGS(p), P_LEAF)
+ /** Test if a page is a LEAF2 page */
+#define IS_LEAF2(p) F_ISSET(MP_FLAGS(p), P_LEAF2)
+ /** Test if a page is a branch page */
+#define IS_BRANCH(p) F_ISSET(MP_FLAGS(p), P_BRANCH)
+ /** Test if a page is an overflow page */
+#define IS_OVERFLOW(p) F_ISSET(MP_FLAGS(p), P_OVERFLOW)
+ /** Test if a page is a sub page */
+#define IS_SUBP(p) F_ISSET(MP_FLAGS(p), P_SUBP)
+
+ /** The number of overflow pages needed to store the given size. */
+#define OVPAGES(size, psize) ((PAGEHDRSZ-1 + (size)) / (psize) + 1)
+
+ /** Link in #MDB_txn.%mt_loose_pgs list.
+ * Kept outside the page header, which is needed when reusing the page.
+ */
+#define NEXT_LOOSE_PAGE(p) (*(MDB_page **)((p) + 2))
+
+ /** Header for a single key/data pair within a page.
+ * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2.
+ * We guarantee 2-byte alignment for 'MDB_node's.
+ *
+ * #mn_lo and #mn_hi are used for data size on leaf nodes, and for child
+ * pgno on branch nodes. On 64 bit platforms, #mn_flags is also used
+ * for pgno. (Branch nodes have no flags). Lo and hi are in host byte
+ * order in case some accesses can be optimized to 32-bit word access.
+ *
+ * Leaf node flags describe node contents. #F_BIGDATA says the node's
+ * data part is the page number of an overflow page with actual data.
+ * #F_DUPDATA and #F_SUBDATA can be combined giving duplicate data in
+ * a sub-page/sub-database, and named databases (just #F_SUBDATA).
+ */
+typedef struct MDB_node {
+ /** part of data size or pgno
+ * @{ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ unsigned short mn_lo, mn_hi;
+#else
+ unsigned short mn_hi, mn_lo;
+#endif
+ /** @} */
+/** @defgroup mdb_node Node Flags
+ * @ingroup internal
+ * Flags for node headers.
+ * @{
+ */
+#define F_BIGDATA 0x01 /**< data put on overflow page */
+#define F_SUBDATA 0x02 /**< data is a sub-database */
+#define F_DUPDATA 0x04 /**< data has duplicates */
+
+/** valid flags for #mdb_node_add() */
+#define NODE_ADD_FLAGS (F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND)
+
+/** @} */
+ unsigned short mn_flags; /**< @ref mdb_node */
+ unsigned short mn_ksize; /**< key size */
+ char mn_data[1]; /**< key and data are appended here */
+} MDB_node;
+
+ /** Size of the node header, excluding dynamic data at the end */
+#define NODESIZE offsetof(MDB_node, mn_data)
+
+ /** Bit position of top word in page number, for shifting mn_flags */
+#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0)
+
+ /** Size of a node in a branch page with a given key.
+ * This is just the node header plus the key, there is no data.
+ */
+#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size))
+
+ /** Size of a node in a leaf page with a given key and data.
+ * This is node header plus key plus data size.
+ */
+#define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size)
+
+ /** Address of node \b i in page \b p */
+#define NODEPTR(p, i) ((MDB_node *)((char *)(p) + MP_PTRS(p)[i] + PAGEBASE))
+
+ /** Address of the key for the node */
+#define NODEKEY(node) (void *)((node)->mn_data)
+
+ /** Address of the data for a node */
+#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize)
+
+ /** Get the page number pointed to by a branch node */
+#define NODEPGNO(node) \
+ ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \
+ (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0))
+ /** Set the page number in a branch node */
+#define SETPGNO(node,pgno) do { \
+ (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \
+ if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0)
+
+ /** Get the size of the data in a leaf node */
+#define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16))
+ /** Set the size of the data for a leaf node */
+#define SETDSZ(node,size) do { \
+ (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0)
+ /** The size of a key in a node */
+#define NODEKSZ(node) ((node)->mn_ksize)
+
+ /** Copy a page number from src to dst */
+#ifdef MISALIGNED_OK
+#define COPY_PGNO(dst,src) dst = src
+#undef MP_PGNO
+#define MP_PGNO(p) ((p)->mp_pgno)
+#else
+#if SIZE_MAX > 4294967295UL
+#define COPY_PGNO(dst,src) do { \
+ unsigned short *s, *d; \
+ s = (unsigned short *)&(src); \
+ d = (unsigned short *)&(dst); \
+ *d++ = *s++; \
+ *d++ = *s++; \
+ *d++ = *s++; \
+ *d = *s; \
+} while (0)
+#else
+#define COPY_PGNO(dst,src) do { \
+ unsigned short *s, *d; \
+ s = (unsigned short *)&(src); \
+ d = (unsigned short *)&(dst); \
+ *d++ = *s++; \
+ *d = *s; \
+} while (0)
+#endif
+#endif
+ /** The address of a key in a LEAF2 page.
+ * LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs.
+ * There are no node headers, keys are stored contiguously.
+ */
+#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks)))
+
+ /** Set the \b node's key into \b keyptr, if requested. */
+#define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { \
+ (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } }
+
+ /** Set the \b node's key into \b key. */
+#define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); }
+
+ /** Information about a single database in the environment. */
+typedef struct MDB_db {
+ uint32_t md_pad; /**< also ksize for LEAF2 pages */
+ uint16_t md_flags; /**< @ref mdb_dbi_open */
+ uint16_t md_depth; /**< depth of this tree */
+ pgno_t md_branch_pages; /**< number of internal pages */
+ pgno_t md_leaf_pages; /**< number of leaf pages */
+ pgno_t md_overflow_pages; /**< number of overflow pages */
+ size_t md_entries; /**< number of data items */
+ pgno_t md_root; /**< the root page of this tree */
+} MDB_db;
+
+#define MDB_VALID 0x8000 /**< DB handle is valid, for me_dbflags */
+#define PERSISTENT_FLAGS (0xffff & ~(MDB_VALID))
+ /** #mdb_dbi_open() flags */
+#define VALID_FLAGS (MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\
+ MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE)
+
+ /** Handle for the DB used to track free pages. */
+#define FREE_DBI 0
+ /** Handle for the default DB. */
+#define MAIN_DBI 1
+ /** Number of DBs in metapage (free and main) - also hardcoded elsewhere */
+#define CORE_DBS 2
+
+ /** Number of meta pages - also hardcoded elsewhere */
+#define NUM_METAS 2
+
+ /** Meta page content.
+ * A meta page is the start point for accessing a database snapshot.
+ * Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2).
+ */
+typedef struct MDB_meta {
+ /** Stamp identifying this as an LMDB file. It must be set
+ * to #MDB_MAGIC. */
+ uint32_t mm_magic;
+ /** Version number of this file. Must be set to #MDB_DATA_VERSION. */
+ uint32_t mm_version;
+ void *mm_address; /**< address for fixed mapping */
+ size_t mm_mapsize; /**< size of mmap region */
+ MDB_db mm_dbs[CORE_DBS]; /**< first is free space, 2nd is main db */
+ /** The size of pages used in this DB */
+#define mm_psize mm_dbs[FREE_DBI].md_pad
+ /** Any persistent environment flags. @ref mdb_env */
+#define mm_flags mm_dbs[FREE_DBI].md_flags
+ /** Last used page in the datafile.
+ * Actually the file may be shorter if the freeDB lists the final pages.
+ */
+ pgno_t mm_last_pg;
+ volatile txnid_t mm_txnid; /**< txnid that committed this page */
+} MDB_meta;
+
+ /** Buffer for a stack-allocated meta page.
+ * The members define size and alignment, and silence type
+ * aliasing warnings. They are not used directly; that could
+ * mean incorrectly using several union members in parallel.
+ */
+typedef union MDB_metabuf {
+ MDB_page mb_page;
+ struct {
+ char mm_pad[PAGEHDRSZ];
+ MDB_meta mm_meta;
+ } mb_metabuf;
+} MDB_metabuf;
+
+ /** Auxiliary DB info.
+ * The information here is mostly static/read-only. There is
+ * only a single copy of this record in the environment.
+ */
+typedef struct MDB_dbx {
+ MDB_val md_name; /**< name of the database */
+ MDB_cmp_func *md_cmp; /**< function for comparing keys */
+ MDB_cmp_func *md_dcmp; /**< function for comparing data items */
+ MDB_rel_func *md_rel; /**< user relocate function */
+ void *md_relctx; /**< user-provided context for md_rel */
+} MDB_dbx;
+
+ /** A database transaction.
+ * Every operation requires a transaction handle.
+ */
+struct MDB_txn {
+ MDB_txn *mt_parent; /**< parent of a nested txn */
+ /** Nested txn under this txn, set together with flag #MDB_TXN_HAS_CHILD */
+ MDB_txn *mt_child;
+ pgno_t mt_next_pgno; /**< next unallocated page */
+ /** The ID of this transaction. IDs are integers incrementing from 1.
+ * Only committed write transactions increment the ID. If a transaction
+ * aborts, the ID may be re-used by the next writer.
+ */
+ txnid_t mt_txnid;
+ MDB_env *mt_env; /**< the DB environment */
+ /** The list of pages that became unused during this transaction.
+ */
+ MDB_IDL mt_free_pgs;
+ /** The list of loose pages that became unused and may be reused
+ * in this transaction, linked through #NEXT_LOOSE_PAGE(page).
+ */
+ MDB_page *mt_loose_pgs;
+ /** Number of loose pages (#mt_loose_pgs) */
+ int mt_loose_count;
+ /** The sorted list of dirty pages we temporarily wrote to disk
+ * because the dirty list was full. page numbers in here are
+ * shifted left by 1, deleted slots have the LSB set.
+ */
+ MDB_IDL mt_spill_pgs;
+ union {
+ /** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */
+ MDB_ID2L dirty_list;
+ /** For read txns: This thread/txn's reader table slot, or NULL. */
+ MDB_reader *reader;
+ } mt_u;
+ /** Array of records for each DB known in the environment. */
+ MDB_dbx *mt_dbxs;
+ /** Array of MDB_db records for each known DB */
+ MDB_db *mt_dbs;
+ /** Array of sequence numbers for each DB handle */
+ unsigned int *mt_dbiseqs;
+/** @defgroup mt_dbflag Transaction DB Flags
+ * @ingroup internal
+ * @{
+ */
+#define DB_DIRTY 0x01 /**< DB was written in this txn */
+#define DB_STALE 0x02 /**< Named-DB record is older than txnID */
+#define DB_NEW 0x04 /**< Named-DB handle opened in this txn */
+#define DB_VALID 0x08 /**< DB handle is valid, see also #MDB_VALID */
+#define DB_USRVALID 0x10 /**< As #DB_VALID, but not set for #FREE_DBI */
+#define DB_DUPDATA 0x20 /**< DB is #MDB_DUPSORT data */
+/** @} */
+ /** In write txns, array of cursors for each DB */
+ MDB_cursor **mt_cursors;
+ /** Array of flags for each DB */
+ unsigned char *mt_dbflags;
+ /** Number of DB records in use, or 0 when the txn is finished.
+ * This number only ever increments until the txn finishes; we
+ * don't decrement it when individual DB handles are closed.
+ */
+ MDB_dbi mt_numdbs;
+
+/** @defgroup mdb_txn Transaction Flags
+ * @ingroup internal
+ * @{
+ */
+ /** #mdb_txn_begin() flags */
+#define MDB_TXN_BEGIN_FLAGS MDB_RDONLY
+#define MDB_TXN_RDONLY MDB_RDONLY /**< read-only transaction */
+ /* internal txn flags */
+#define MDB_TXN_WRITEMAP MDB_WRITEMAP /**< copy of #MDB_env flag in writers */
+#define MDB_TXN_FINISHED 0x01 /**< txn is finished or never began */
+#define MDB_TXN_ERROR 0x02 /**< txn is unusable after an error */
+#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */
+#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */
+#define MDB_TXN_HAS_CHILD 0x10 /**< txn has an #MDB_txn.%mt_child */
+ /** most operations on the txn are currently illegal */
+#define MDB_TXN_BLOCKED (MDB_TXN_FINISHED|MDB_TXN_ERROR|MDB_TXN_HAS_CHILD)
+/** @} */
+ unsigned int mt_flags; /**< @ref mdb_txn */
+ /** #dirty_list room: Array size - \#dirty pages visible to this txn.
+ * Includes ancestor txns' dirty pages not hidden by other txns'
+ * dirty/spilled pages. Thus commit(nested txn) has room to merge
+ * dirty_list into mt_parent after freeing hidden mt_parent pages.
+ */
+ unsigned int mt_dirty_room;
+};
+
+/** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty.
+ * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to
+ * raise this on a 64 bit machine.
+ */
+#define CURSOR_STACK 32
+
+struct MDB_xcursor;
+
+ /** Cursors are used for all DB operations.
+ * A cursor holds a path of (page pointer, key index) from the DB
+ * root to a position in the DB, plus other state. #MDB_DUPSORT
+ * cursors include an xcursor to the current data item. Write txns
+ * track their cursors and keep them up to date when data moves.
+ * Exception: An xcursor's pointer to a #P_SUBP page can be stale.
+ * (A node with #F_DUPDATA but no #F_SUBDATA contains a subpage).
+ */
+struct MDB_cursor {
+ /** Next cursor on this DB in this txn */
+ MDB_cursor *mc_next;
+ /** Backup of the original cursor if this cursor is a shadow */
+ MDB_cursor *mc_backup;
+ /** Context used for databases with #MDB_DUPSORT, otherwise NULL */
+ struct MDB_xcursor *mc_xcursor;
+ /** The transaction that owns this cursor */
+ MDB_txn *mc_txn;
+ /** The database handle this cursor operates on */
+ MDB_dbi mc_dbi;
+ /** The database record for this cursor */
+ MDB_db *mc_db;
+ /** The database auxiliary record for this cursor */
+ MDB_dbx *mc_dbx;
+ /** The @ref mt_dbflag for this database */
+ unsigned char *mc_dbflag;
+ unsigned short mc_snum; /**< number of pushed pages */
+ unsigned short mc_top; /**< index of top page, normally mc_snum-1 */
+/** @defgroup mdb_cursor Cursor Flags
+ * @ingroup internal
+ * Cursor state flags.
+ * @{
+ */
+#define C_INITIALIZED 0x01 /**< cursor has been initialized and is valid */
+#define C_EOF 0x02 /**< No more data */
+#define C_SUB 0x04 /**< Cursor is a sub-cursor */
+#define C_DEL 0x08 /**< last op was a cursor_del */
+#define C_UNTRACK 0x40 /**< Un-track cursor when closing */
+/** @} */
+ unsigned int mc_flags; /**< @ref mdb_cursor */
+ MDB_page *mc_pg[CURSOR_STACK]; /**< stack of pushed pages */
+ indx_t mc_ki[CURSOR_STACK]; /**< stack of page indices */
+};
+
+ /** Context for sorted-dup records.
+ * We could have gone to a fully recursive design, with arbitrarily
+ * deep nesting of sub-databases. But for now we only handle these
+ * levels - main DB, optional sub-DB, sorted-duplicate DB.
+ */
+typedef struct MDB_xcursor {
+ /** A sub-cursor for traversing the Dup DB */
+ MDB_cursor mx_cursor;
+ /** The database record for this Dup DB */
+ MDB_db mx_db;
+ /** The auxiliary DB record for this Dup DB */
+ MDB_dbx mx_dbx;
+ /** The @ref mt_dbflag for this Dup DB */
+ unsigned char mx_dbflag;
+} MDB_xcursor;
+
+ /** Check if there is an inited xcursor */
+#define XCURSOR_INITED(mc) \
+ ((mc)->mc_xcursor && ((mc)->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))
+
+ /** Update the xcursor's sub-page pointer, if any, in \b mc. Needed
+ * when the node which contains the sub-page may have moved. Called
+ * with leaf page \b mp = mc->mc_pg[\b top].
+ */
+#define XCURSOR_REFRESH(mc, top, mp) do { \
+ MDB_page *xr_pg = (mp); \
+ MDB_node *xr_node; \
+ if (!XCURSOR_INITED(mc) || (mc)->mc_ki[top] >= NUMKEYS(xr_pg)) break; \
+ xr_node = NODEPTR(xr_pg, (mc)->mc_ki[top]); \
+ if ((xr_node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) \
+ (mc)->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(xr_node); \
+} while (0)
+
+ /** State of FreeDB old pages, stored in the MDB_env */
+typedef struct MDB_pgstate {
+ pgno_t *mf_pghead; /**< Reclaimed freeDB pages, or NULL before use */
+ txnid_t mf_pglast; /**< ID of last used record, or 0 if !mf_pghead */
+} MDB_pgstate;
+
+ /** The database environment. */
+struct MDB_env {
+ HANDLE me_fd; /**< The main data file */
+ HANDLE me_lfd; /**< The lock file */
+ HANDLE me_mfd; /**< For writing and syncing the meta pages */
+ /** Failed to update the meta page. Probably an I/O error. */
+#define MDB_FATAL_ERROR 0x80000000U
+ /** Some fields are initialized. */
+#define MDB_ENV_ACTIVE 0x20000000U
+ /** me_txkey is set */
+#define MDB_ENV_TXKEY 0x10000000U
+ /** fdatasync is unreliable */
+#define MDB_FSYNCONLY 0x08000000U
+ uint32_t me_flags; /**< @ref mdb_env */
+ unsigned int me_psize; /**< DB page size, inited from me_os_psize */
+ unsigned int me_os_psize; /**< OS page size, from #GET_PAGESIZE */
+ unsigned int me_maxreaders; /**< size of the reader table */
+ /** Max #MDB_txninfo.%mti_numreaders of interest to #mdb_env_close() */
+ volatile int me_close_readers;
+ MDB_dbi me_numdbs; /**< number of DBs opened */
+ MDB_dbi me_maxdbs; /**< size of the DB table */
+ MDB_PID_T me_pid; /**< process ID of this env */
+ char *me_path; /**< path to the DB files */
+ char *me_map; /**< the memory map of the data file */
+ MDB_txninfo *me_txns; /**< the memory map of the lock file or NULL */
+ MDB_meta *me_metas[NUM_METAS]; /**< pointers to the two meta pages */
+ void *me_pbuf; /**< scratch area for DUPSORT put() */
+ MDB_txn *me_txn; /**< current write transaction */
+ MDB_txn *me_txn0; /**< prealloc'd write transaction */
+ size_t me_mapsize; /**< size of the data memory map */
+ off_t me_size; /**< current file size */
+ pgno_t me_maxpg; /**< me_mapsize / me_psize */
+ MDB_dbx *me_dbxs; /**< array of static DB info */
+ uint16_t *me_dbflags; /**< array of flags from MDB_db.md_flags */
+ unsigned int *me_dbiseqs; /**< array of dbi sequence numbers */
+ pthread_key_t me_txkey; /**< thread-key for readers */
+ txnid_t me_pgoldest; /**< ID of oldest reader last time we looked */
+ MDB_pgstate me_pgstate; /**< state of old pages from freeDB */
+# define me_pglast me_pgstate.mf_pglast
+# define me_pghead me_pgstate.mf_pghead
+ MDB_page *me_dpages; /**< list of malloc'd blocks for re-use */
+ /** IDL of pages that became unused in a write txn */
+ MDB_IDL me_free_pgs;
+ /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */
+ MDB_ID2L me_dirty_list;
+ /** Max number of freelist items that can fit in a single overflow page */
+ int me_maxfree_1pg;
+ /** Max size of a node on a page */
+ unsigned int me_nodemax;
+#if !(MDB_MAXKEYSIZE)
+ unsigned int me_maxkey; /**< max size of a key */
+#endif
+ int me_live_reader; /**< have liveness lock in reader table */
+#ifdef _WIN32
+ int me_pidquery; /**< Used in OpenProcess */
+#endif
+#ifdef MDB_USE_POSIX_MUTEX /* Posix mutexes reside in shared mem */
+# define me_rmutex me_txns->mti_rmutex /**< Shared reader lock */
+# define me_wmutex me_txns->mti_wmutex /**< Shared writer lock */
+#else
+ mdb_mutex_t me_rmutex;
+ mdb_mutex_t me_wmutex;
+#endif
+ void *me_userctx; /**< User-settable context */
+ MDB_assert_func *me_assert_func; /**< Callback for assertion failures */
+};
+
+ /** Nested transaction */
+typedef struct MDB_ntxn {
+ MDB_txn mnt_txn; /**< the transaction */
+ MDB_pgstate mnt_pgstate; /**< parent transaction's saved freestate */
+} MDB_ntxn;
+
+ /** max number of pages to commit in one writev() call */
+#define MDB_COMMIT_PAGES 64
+#if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES
+#undef MDB_COMMIT_PAGES
+#define MDB_COMMIT_PAGES IOV_MAX
+#endif
+
+ /** max bytes to write in one call */
+#define MAX_WRITE (0x40000000U >> (sizeof(ssize_t) == 4))
+
+ /** Check \b txn and \b dbi arguments to a function */
+#define TXN_DBI_EXIST(txn, dbi, validity) \
+ ((txn) && (dbi)<(txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity)))
+
+ /** Check for misused \b dbi handles */
+#define TXN_DBI_CHANGED(txn, dbi) \
+ ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi])
+
+static int mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp);
+static int mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp);
+static int mdb_page_touch(MDB_cursor *mc);
+
+#define MDB_END_NAMES {"committed", "empty-commit", "abort", "reset", \
+ "reset-tmp", "fail-begin", "fail-beginchild"}
+enum {
+ /* mdb_txn_end operation number, for logging */
+ MDB_END_COMMITTED, MDB_END_EMPTY_COMMIT, MDB_END_ABORT, MDB_END_RESET,
+ MDB_END_RESET_TMP, MDB_END_FAIL_BEGIN, MDB_END_FAIL_BEGINCHILD
+};
+#define MDB_END_OPMASK 0x0F /**< mask for #mdb_txn_end() operation number */
+#define MDB_END_UPDATE 0x10 /**< update env state (DBIs) */
+#define MDB_END_FREE 0x20 /**< free txn unless it is #MDB_env.%me_txn0 */
+#define MDB_END_SLOT MDB_NOTLS /**< release any reader slot if #MDB_NOTLS */
+static void mdb_txn_end(MDB_txn *txn, unsigned mode);
+
+static int mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **mp, int *lvl);
+static int mdb_page_search_root(MDB_cursor *mc,
+ MDB_val *key, int modify);
+#define MDB_PS_MODIFY 1
+#define MDB_PS_ROOTONLY 2
+#define MDB_PS_FIRST 4
+#define MDB_PS_LAST 8
+static int mdb_page_search(MDB_cursor *mc,
+ MDB_val *key, int flags);
+static int mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst);
+
+#define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */
+static int mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata,
+ pgno_t newpgno, unsigned int nflags);
+
+static int mdb_env_read_header(MDB_env *env, MDB_meta *meta);
+static MDB_meta *mdb_env_pick_meta(const MDB_env *env);
+static int mdb_env_write_meta(MDB_txn *txn);
+#if defined(MDB_USE_POSIX_MUTEX) && !defined(MDB_ROBUST_SUPPORTED) /* Drop unused excl arg */
+# define mdb_env_close0(env, excl) mdb_env_close1(env)
+#endif
+static void mdb_env_close0(MDB_env *env, int excl);
+
+static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp);
+static int mdb_node_add(MDB_cursor *mc, indx_t indx,
+ MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags);
+static void mdb_node_del(MDB_cursor *mc, int ksize);
+static void mdb_node_shrink(MDB_page *mp, indx_t indx);
+static int mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft);
+static int mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data);
+static size_t mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data);
+static size_t mdb_branch_size(MDB_env *env, MDB_val *key);
+
+static int mdb_rebalance(MDB_cursor *mc);
+static int mdb_update_key(MDB_cursor *mc, MDB_val *key);
+
+static void mdb_cursor_pop(MDB_cursor *mc);
+static int mdb_cursor_push(MDB_cursor *mc, MDB_page *mp);
+
+static int mdb_cursor_del0(MDB_cursor *mc);
+static int mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags);
+static int mdb_cursor_sibling(MDB_cursor *mc, int move_right);
+static int mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op);
+static int mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op);
+static int mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op,
+ int *exactp);
+static int mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data);
+static int mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data);
+
+static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx);
+static void mdb_xcursor_init0(MDB_cursor *mc);
+static void mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node);
+static void mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int force);
+
+static int mdb_drop0(MDB_cursor *mc, int subs);
+static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi);
+static int mdb_reader_check0(MDB_env *env, int rlocked, int *dead);
+
+/** @cond */
+static MDB_cmp_func mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long;
+/** @endcond */
+
+/** Compare two items pointing at size_t's of unknown alignment. */
+#ifdef MISALIGNED_OK
+# define mdb_cmp_clong mdb_cmp_long
+#else
+# define mdb_cmp_clong mdb_cmp_cint
+#endif
+
+#ifdef _WIN32
+static SECURITY_DESCRIPTOR mdb_null_sd;
+static SECURITY_ATTRIBUTES mdb_all_sa;
+static int mdb_sec_inited;
+
+struct MDB_name;
+static int utf8_to_utf16(const char *src, struct MDB_name *dst, int xtra);
+#endif
+
+/** Return the library version info. */
+char * ESECT
+mdb_version(int *major, int *minor, int *patch)
+{
+ if (major) *major = MDB_VERSION_MAJOR;
+ if (minor) *minor = MDB_VERSION_MINOR;
+ if (patch) *patch = MDB_VERSION_PATCH;
+ return MDB_VERSION_STRING;
+}
+
+/** Table of descriptions for LMDB @ref errors */
+static char *const mdb_errstr[] = {
+ "MDB_KEYEXIST: Key/data pair already exists",
+ "MDB_NOTFOUND: No matching key/data pair found",
+ "MDB_PAGE_NOTFOUND: Requested page not found",
+ "MDB_CORRUPTED: Located page was wrong type",
+ "MDB_PANIC: Update of meta page failed or environment had fatal error",
+ "MDB_VERSION_MISMATCH: Database environment version mismatch",
+ "MDB_INVALID: File is not an LMDB file",
+ "MDB_MAP_FULL: Environment mapsize limit reached",
+ "MDB_DBS_FULL: Environment maxdbs limit reached",
+ "MDB_READERS_FULL: Environment maxreaders limit reached",
+ "MDB_TLS_FULL: Thread-local storage keys full - too many environments open",
+ "MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big",
+ "MDB_CURSOR_FULL: Internal error - cursor stack limit reached",
+ "MDB_PAGE_FULL: Internal error - page has no more space",
+ "MDB_MAP_RESIZED: Database contents grew beyond environment mapsize",
+ "MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed",
+ "MDB_BAD_RSLOT: Invalid reuse of reader locktable slot",
+ "MDB_BAD_TXN: Transaction must abort, has a child, or is invalid",
+ "MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size",
+ "MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly",
+};
+
+char *
+mdb_strerror(int err)
+{
+#ifdef _WIN32
+ /** HACK: pad 4KB on stack over the buf. Return system msgs in buf.
+ * This works as long as no function between the call to mdb_strerror
+ * and the actual use of the message uses more than 4K of stack.
+ */
+#define MSGSIZE 1024
+#define PADSIZE 4096
+ char buf[MSGSIZE+PADSIZE], *ptr = buf;
+#endif
+ int i;
+ if (!err)
+ return ("Successful return: 0");
+
+ if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) {
+ i = err - MDB_KEYEXIST;
+ return mdb_errstr[i];
+ }
+
+#ifdef _WIN32
+ /* These are the C-runtime error codes we use. The comment indicates
+ * their numeric value, and the Win32 error they would correspond to
+ * if the error actually came from a Win32 API. A major mess, we should
+ * have used LMDB-specific error codes for everything.
+ */
+ switch(err) {
+ case ENOENT: /* 2, FILE_NOT_FOUND */
+ case EIO: /* 5, ACCESS_DENIED */
+ case ENOMEM: /* 12, INVALID_ACCESS */
+ case EACCES: /* 13, INVALID_DATA */
+ case EBUSY: /* 16, CURRENT_DIRECTORY */
+ case EINVAL: /* 22, BAD_COMMAND */
+ case ENOSPC: /* 28, OUT_OF_PAPER */
+ return strerror(err);
+ default:
+ ;
+ }
+ buf[0] = 0;
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err, 0, ptr, MSGSIZE, (va_list *)buf+MSGSIZE);
+ return ptr;
+#else
+ if (err < 0)
+ return "Invalid error code";
+ return strerror(err);
+#endif
+}
+
+/** assert(3) variant in cursor context */
+#define mdb_cassert(mc, expr) mdb_assert0((mc)->mc_txn->mt_env, expr, #expr)
+/** assert(3) variant in transaction context */
+#define mdb_tassert(txn, expr) mdb_assert0((txn)->mt_env, expr, #expr)
+/** assert(3) variant in environment context */
+#define mdb_eassert(env, expr) mdb_assert0(env, expr, #expr)
+
+#ifndef NDEBUG
+# define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \
+ mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__))
+
+static void ESECT
+mdb_assert_fail(MDB_env *env, const char *expr_txt,
+ const char *func, const char *file, int line)
+{
+ char buf[400];
+ sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()",
+ file, line, expr_txt, func);
+ if (env->me_assert_func)
+ env->me_assert_func(env, buf);
+ fprintf(stderr, "%s\n", buf);
+ abort();
+}
+#else
+# define mdb_assert0(env, expr, expr_txt) ((void) 0)
+#endif /* NDEBUG */
+
+#if MDB_DEBUG
+/** Return the page number of \b mp which may be sub-page, for debug output */
+static pgno_t
+mdb_dbg_pgno(MDB_page *mp)
+{
+ pgno_t ret;
+ COPY_PGNO(ret, MP_PGNO(mp));
+ return ret;
+}
+
+/** Display a key in hexadecimal and return the address of the result.
+ * @param[in] key the key to display
+ * @param[in] buf the buffer to write into. Should always be #DKBUF.
+ * @return The key in hexadecimal form.
+ */
+char *
+mdb_dkey(MDB_val *key, char *buf)
+{
+ char *ptr = buf;
+ unsigned char *c = key->mv_data;
+ unsigned int i;
+
+ if (!key)
+ return "";
+
+ if (key->mv_size > DKBUF_MAXKEYSIZE)
+ return "MDB_MAXKEYSIZE";
+ /* may want to make this a dynamic check: if the key is mostly
+ * printable characters, print it as-is instead of converting to hex.
+ */
+#if 1
+ buf[0] = '\0';
+ for (i=0; i<key->mv_size; i++)
+ ptr += sprintf(ptr, "%02x", *c++);
+#else
+ sprintf(buf, "%.*s", key->mv_size, key->mv_data);
+#endif
+ return buf;
+}
+
+static const char *
+mdb_leafnode_type(MDB_node *n)
+{
+ static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}};
+ return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" :
+ tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)];
+}
+
+/** Display all the keys in the page. */
+void
+mdb_page_list(MDB_page *mp)
+{
+ pgno_t pgno = mdb_dbg_pgno(mp);
+ const char *type, *state = (MP_FLAGS(mp) & P_DIRTY) ? ", dirty" : "";
+ MDB_node *node;
+ unsigned int i, nkeys, nsize, total = 0;
+ MDB_val key;
+ DKBUF;
+
+ switch (MP_FLAGS(mp) & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) {
+ case P_BRANCH: type = "Branch page"; break;
+ case P_LEAF: type = "Leaf page"; break;
+ case P_LEAF|P_SUBP: type = "Sub-page"; break;
+ case P_LEAF|P_LEAF2: type = "LEAF2 page"; break;
+ case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page"; break;
+ case P_OVERFLOW:
+ fprintf(stderr, "Overflow page %"Z"u pages %u%s\n",
+ pgno, mp->mp_pages, state);
+ return;
+ case P_META:
+ fprintf(stderr, "Meta-page %"Z"u txnid %"Z"u\n",
+ pgno, ((MDB_meta *)METADATA(mp))->mm_txnid);
+ return;
+ default:
+ fprintf(stderr, "Bad page %"Z"u flags 0x%X\n", pgno, MP_FLAGS(mp));
+ return;
+ }
+
+ nkeys = NUMKEYS(mp);
+ fprintf(stderr, "%s %"Z"u numkeys %d%s\n", type, pgno, nkeys, state);
+
+ for (i=0; i<nkeys; i++) {
+ if (IS_LEAF2(mp)) { /* LEAF2 pages have no mp_ptrs[] or node headers */
+ key.mv_size = nsize = mp->mp_pad;
+ key.mv_data = LEAF2KEY(mp, i, nsize);
+ total += nsize;
+ fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key));
+ continue;
+ }
+ node = NODEPTR(mp, i);
+ key.mv_size = node->mn_ksize;
+ key.mv_data = node->mn_data;
+ nsize = NODESIZE + key.mv_size;
+ if (IS_BRANCH(mp)) {
+ fprintf(stderr, "key %d: page %"Z"u, %s\n", i, NODEPGNO(node),
+ DKEY(&key));
+ total += nsize;
+ } else {
+ if (F_ISSET(node->mn_flags, F_BIGDATA))
+ nsize += sizeof(pgno_t);
+ else
+ nsize += NODEDSZ(node);
+ total += nsize;
+ nsize += sizeof(indx_t);
+ fprintf(stderr, "key %d: nsize %d, %s%s\n",
+ i, nsize, DKEY(&key), mdb_leafnode_type(node));
+ }
+ total = EVEN(total);
+ }
+ fprintf(stderr, "Total: header %d + contents %d + unused %d\n",
+ IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + MP_LOWER(mp), total, SIZELEFT(mp));
+}
+
+void
+mdb_cursor_chk(MDB_cursor *mc)
+{
+ unsigned int i;
+ MDB_node *node;
+ MDB_page *mp;
+
+ if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) return;
+ for (i=0; i<mc->mc_top; i++) {
+ mp = mc->mc_pg[i];
+ node = NODEPTR(mp, mc->mc_ki[i]);
+ if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno)
+ printf("oops!\n");
+ }
+ if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i]))
+ printf("ack!\n");
+ if (XCURSOR_INITED(mc)) {
+ node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if (((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) &&
+ mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) {
+ printf("blah!\n");
+ }
+ }
+}
+#endif
+
+#if (MDB_DEBUG) > 2
+/** Count all the pages in each DB and in the freelist
+ * and make sure it matches the actual number of pages
+ * being used.
+ * All named DBs must be open for a correct count.
+ */
+static void mdb_audit(MDB_txn *txn)
+{
+ MDB_cursor mc;
+ MDB_val key, data;
+ MDB_ID freecount, count;
+ MDB_dbi i;
+ int rc;
+
+ freecount = 0;
+ mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
+ while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0)
+ freecount += *(MDB_ID *)data.mv_data;
+ mdb_tassert(txn, rc == MDB_NOTFOUND);
+
+ count = 0;
+ for (i = 0; i<txn->mt_numdbs; i++) {
+ MDB_xcursor mx;
+ if (!(txn->mt_dbflags[i] & DB_VALID))
+ continue;
+ mdb_cursor_init(&mc, txn, i, &mx);
+ if (txn->mt_dbs[i].md_root == P_INVALID)
+ continue;
+ count += txn->mt_dbs[i].md_branch_pages +
+ txn->mt_dbs[i].md_leaf_pages +
+ txn->mt_dbs[i].md_overflow_pages;
+ if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) {
+ rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST);
+ for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) {
+ unsigned j;
+ MDB_page *mp;
+ mp = mc.mc_pg[mc.mc_top];
+ for (j=0; j<NUMKEYS(mp); j++) {
+ MDB_node *leaf = NODEPTR(mp, j);
+ if (leaf->mn_flags & F_SUBDATA) {
+ MDB_db db;
+ memcpy(&db, NODEDATA(leaf), sizeof(db));
+ count += db.md_branch_pages + db.md_leaf_pages +
+ db.md_overflow_pages;
+ }
+ }
+ }
+ mdb_tassert(txn, rc == MDB_NOTFOUND);
+ }
+ }
+ if (freecount + count + NUM_METAS != txn->mt_next_pgno) {
+ fprintf(stderr, "audit: %"Z"u freecount: %"Z"u count: %"Z"u total: %"Z"u next_pgno: %"Z"u\n",
+ txn->mt_txnid, freecount, count+NUM_METAS,
+ freecount+count+NUM_METAS, txn->mt_next_pgno);
+ }
+}
+#endif
+
+int
+mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
+{
+ return txn->mt_dbxs[dbi].md_cmp(a, b);
+}
+
+int
+mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
+{
+ MDB_cmp_func *dcmp = txn->mt_dbxs[dbi].md_dcmp;
+#if UINT_MAX < SIZE_MAX
+ if (dcmp == mdb_cmp_int && a->mv_size == sizeof(size_t))
+ dcmp = mdb_cmp_clong;
+#endif
+ return dcmp(a, b);
+}
+
+/** Allocate memory for a page.
+ * Re-use old malloc'd pages first for singletons, otherwise just malloc.
+ * Set #MDB_TXN_ERROR on failure.
+ */
+static MDB_page *
+mdb_page_malloc(MDB_txn *txn, unsigned num)
+{
+ MDB_env *env = txn->mt_env;
+ MDB_page *ret = env->me_dpages;
+ size_t psize = env->me_psize, sz = psize, off;
+ /* For ! #MDB_NOMEMINIT, psize counts how much to init.
+ * For a single page alloc, we init everything after the page header.
+ * For multi-page, we init the final page; if the caller needed that
+ * many pages they will be filling in at least up to the last page.
+ */
+ if (num == 1) {
+ if (ret) {
+ VGMEMP_ALLOC(env, ret, sz);
+ VGMEMP_DEFINED(ret, sizeof(ret->mp_next));
+ env->me_dpages = ret->mp_next;
+ return ret;
+ }
+ psize -= off = PAGEHDRSZ;
+ } else {
+ sz *= num;
+ off = sz - psize;
+ }
+ if ((ret = malloc(sz)) != NULL) {
+ VGMEMP_ALLOC(env, ret, sz);
+ if (!(env->me_flags & MDB_NOMEMINIT)) {
+ memset((char *)ret + off, 0, psize);
+ ret->mp_pad = 0;
+ }
+ } else {
+ txn->mt_flags |= MDB_TXN_ERROR;
+ }
+ return ret;
+}
+/** Free a single page.
+ * Saves single pages to a list, for future reuse.
+ * (This is not used for multi-page overflow pages.)
+ */
+static void
+mdb_page_free(MDB_env *env, MDB_page *mp)
+{
+ mp->mp_next = env->me_dpages;
+ VGMEMP_FREE(env, mp);
+ env->me_dpages = mp;
+}
+
+/** Free a dirty page */
+static void
+mdb_dpage_free(MDB_env *env, MDB_page *dp)
+{
+ if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) {
+ mdb_page_free(env, dp);
+ } else {
+ /* large pages just get freed directly */
+ VGMEMP_FREE(env, dp);
+ free(dp);
+ }
+}
+
+/** Return all dirty pages to dpage list */
+static void
+mdb_dlist_free(MDB_txn *txn)
+{
+ MDB_env *env = txn->mt_env;
+ MDB_ID2L dl = txn->mt_u.dirty_list;
+ unsigned i, n = dl[0].mid;
+
+ for (i = 1; i <= n; i++) {
+ mdb_dpage_free(env, dl[i].mptr);
+ }
+ dl[0].mid = 0;
+}
+
+/** Loosen or free a single page.
+ * Saves single pages to a list for future reuse
+ * in this same txn. It has been pulled from the freeDB
+ * and already resides on the dirty list, but has been
+ * deleted. Use these pages first before pulling again
+ * from the freeDB.
+ *
+ * If the page wasn't dirtied in this txn, just add it
+ * to this txn's free list.
+ */
+static int
+mdb_page_loose(MDB_cursor *mc, MDB_page *mp)
+{
+ int loose = 0;
+ pgno_t pgno = mp->mp_pgno;
+ MDB_txn *txn = mc->mc_txn;
+
+ if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) {
+ if (txn->mt_parent) {
+ MDB_ID2 *dl = txn->mt_u.dirty_list;
+ /* If txn has a parent, make sure the page is in our
+ * dirty list.
+ */
+ if (dl[0].mid) {
+ unsigned x = mdb_mid2l_search(dl, pgno);
+ if (x <= dl[0].mid && dl[x].mid == pgno) {
+ if (mp != dl[x].mptr) { /* bad cursor? */
+ mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_CORRUPTED;
+ }
+ /* ok, it's ours */
+ loose = 1;
+ }
+ }
+ } else {
+ /* no parent txn, so it's just ours */
+ loose = 1;
+ }
+ }
+ if (loose) {
+ DPRINTF(("loosen db %d page %"Z"u", DDBI(mc),
+ mp->mp_pgno));
+ NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs;
+ txn->mt_loose_pgs = mp;
+ txn->mt_loose_count++;
+ mp->mp_flags |= P_LOOSE;
+ } else {
+ int rc = mdb_midl_append(&txn->mt_free_pgs, pgno);
+ if (rc)
+ return rc;
+ }
+
+ return MDB_SUCCESS;
+}
+
+/** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn.
+ * @param[in] mc A cursor handle for the current operation.
+ * @param[in] pflags Flags of the pages to update:
+ * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it.
+ * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush().
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all)
+{
+ enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP };
+ MDB_txn *txn = mc->mc_txn;
+ MDB_cursor *m3, *m0 = mc;
+ MDB_xcursor *mx;
+ MDB_page *dp, *mp;
+ MDB_node *leaf;
+ unsigned i, j;
+ int rc = MDB_SUCCESS, level;
+
+ /* Mark pages seen by cursors */
+ if (mc->mc_flags & C_UNTRACK)
+ mc = NULL; /* will find mc in mt_cursors */
+ for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) {
+ for (; mc; mc=mc->mc_next) {
+ if (!(mc->mc_flags & C_INITIALIZED))
+ continue;
+ for (m3 = mc;; m3 = &mx->mx_cursor) {
+ mp = NULL;
+ for (j=0; j<m3->mc_snum; j++) {
+ mp = m3->mc_pg[j];
+ if ((mp->mp_flags & Mask) == pflags)
+ mp->mp_flags ^= P_KEEP;
+ }
+ mx = m3->mc_xcursor;
+ /* Proceed to mx if it is at a sub-database */
+ if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED)))
+ break;
+ if (! (mp && (mp->mp_flags & P_LEAF)))
+ break;
+ leaf = NODEPTR(mp, m3->mc_ki[j-1]);
+ if (!(leaf->mn_flags & F_SUBDATA))
+ break;
+ }
+ }
+ if (i == 0)
+ break;
+ }
+
+ if (all) {
+ /* Mark dirty root pages */
+ for (i=0; i<txn->mt_numdbs; i++) {
+ if (txn->mt_dbflags[i] & DB_DIRTY) {
+ pgno_t pgno = txn->mt_dbs[i].md_root;
+ if (pgno == P_INVALID)
+ continue;
+ if ((rc = mdb_page_get(m0, pgno, &dp, &level)) != MDB_SUCCESS)
+ break;
+ if ((dp->mp_flags & Mask) == pflags && level <= 1)
+ dp->mp_flags ^= P_KEEP;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int mdb_page_flush(MDB_txn *txn, int keep);
+
+/** Spill pages from the dirty list back to disk.
+ * This is intended to prevent running into #MDB_TXN_FULL situations,
+ * but note that they may still occur in a few cases:
+ * 1) our estimate of the txn size could be too small. Currently this
+ * seems unlikely, except with a large number of #MDB_MULTIPLE items.
+ * 2) child txns may run out of space if their parents dirtied a
+ * lot of pages and never spilled them. TODO: we probably should do
+ * a preemptive spill during #mdb_txn_begin() of a child txn, if
+ * the parent's dirty_room is below a given threshold.
+ *
+ * Otherwise, if not using nested txns, it is expected that apps will
+ * not run into #MDB_TXN_FULL any more. The pages are flushed to disk
+ * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared.
+ * If the txn never references them again, they can be left alone.
+ * If the txn only reads them, they can be used without any fuss.
+ * If the txn writes them again, they can be dirtied immediately without
+ * going thru all of the work of #mdb_page_touch(). Such references are
+ * handled by #mdb_page_unspill().
+ *
+ * Also note, we never spill DB root pages, nor pages of active cursors,
+ * because we'll need these back again soon anyway. And in nested txns,
+ * we can't spill a page in a child txn if it was already spilled in a
+ * parent txn. That would alter the parent txns' data even though
+ * the child hasn't committed yet, and we'd have no way to undo it if
+ * the child aborted.
+ *
+ * @param[in] m0 cursor A cursor handle identifying the transaction and
+ * database for which we are checking space.
+ * @param[in] key For a put operation, the key being stored.
+ * @param[in] data For a put operation, the data being stored.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data)
+{
+ MDB_txn *txn = m0->mc_txn;
+ MDB_page *dp;
+ MDB_ID2L dl = txn->mt_u.dirty_list;
+ unsigned int i, j, need;
+ int rc;
+
+ if (m0->mc_flags & C_SUB)
+ return MDB_SUCCESS;
+
+ /* Estimate how much space this op will take */
+ i = m0->mc_db->md_depth;
+ /* Named DBs also dirty the main DB */
+ if (m0->mc_dbi >= CORE_DBS)
+ i += txn->mt_dbs[MAIN_DBI].md_depth;
+ /* For puts, roughly factor in the key+data size */
+ if (key)
+ i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize;
+ i += i; /* double it for good measure */
+ need = i;
+
+ if (txn->mt_dirty_room > i)
+ return MDB_SUCCESS;
+
+ if (!txn->mt_spill_pgs) {
+ txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX);
+ if (!txn->mt_spill_pgs)
+ return ENOMEM;
+ } else {
+ /* purge deleted slots */
+ MDB_IDL sl = txn->mt_spill_pgs;
+ unsigned int num = sl[0];
+ j=0;
+ for (i=1; i<=num; i++) {
+ if (!(sl[i] & 1))
+ sl[++j] = sl[i];
+ }
+ sl[0] = j;
+ }
+
+ /* Preserve pages which may soon be dirtied again */
+ if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS)
+ goto done;
+
+ /* Less aggressive spill - we originally spilled the entire dirty list,
+ * with a few exceptions for cursor pages and DB root pages. But this
+ * turns out to be a lot of wasted effort because in a large txn many
+ * of those pages will need to be used again. So now we spill only 1/8th
+ * of the dirty pages. Testing revealed this to be a good tradeoff,
+ * better than 1/2, 1/4, or 1/10.
+ */
+ if (need < MDB_IDL_UM_MAX / 8)
+ need = MDB_IDL_UM_MAX / 8;
+
+ /* Save the page IDs of all the pages we're flushing */
+ /* flush from the tail forward, this saves a lot of shifting later on. */
+ for (i=dl[0].mid; i && need; i--) {
+ MDB_ID pn = dl[i].mid << 1;
+ dp = dl[i].mptr;
+ if (dp->mp_flags & (P_LOOSE|P_KEEP))
+ continue;
+ /* Can't spill twice, make sure it's not already in a parent's
+ * spill list.
+ */
+ if (txn->mt_parent) {
+ MDB_txn *tx2;
+ for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) {
+ if (tx2->mt_spill_pgs) {
+ j = mdb_midl_search(tx2->mt_spill_pgs, pn);
+ if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) {
+ dp->mp_flags |= P_KEEP;
+ break;
+ }
+ }
+ }
+ if (tx2)
+ continue;
+ }
+ if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn)))
+ goto done;
+ need--;
+ }
+ mdb_midl_sort(txn->mt_spill_pgs);
+
+ /* Flush the spilled part of dirty list */
+ if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS)
+ goto done;
+
+ /* Reset any dirty pages we kept that page_flush didn't see */
+ rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i);
+
+done:
+ txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS;
+ return rc;
+}
+
+/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */
+static txnid_t
+mdb_find_oldest(MDB_txn *txn)
+{
+ int i;
+ txnid_t mr, oldest = txn->mt_txnid - 1;
+ if (txn->mt_env->me_txns) {
+ MDB_reader *r = txn->mt_env->me_txns->mti_readers;
+ for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) {
+ if (r[i].mr_pid) {
+ mr = r[i].mr_txnid;
+ if (oldest > mr)
+ oldest = mr;
+ }
+ }
+ }
+ return oldest;
+}
+
+/** Add a page to the txn's dirty list */
+static void
+mdb_page_dirty(MDB_txn *txn, MDB_page *mp)
+{
+ MDB_ID2 mid;
+ int rc, (*insert)(MDB_ID2L, MDB_ID2 *);
+
+ if (txn->mt_flags & MDB_TXN_WRITEMAP) {
+ insert = mdb_mid2l_append;
+ } else {
+ insert = mdb_mid2l_insert;
+ }
+ mid.mid = mp->mp_pgno;
+ mid.mptr = mp;
+ rc = insert(txn->mt_u.dirty_list, &mid);
+ mdb_tassert(txn, rc == 0);
+ txn->mt_dirty_room--;
+}
+
+/** Allocate page numbers and memory for writing. Maintain me_pglast,
+ * me_pghead and mt_next_pgno. Set #MDB_TXN_ERROR on failure.
+ *
+ * If there are free pages available from older transactions, they
+ * are re-used first. Otherwise allocate a new page at mt_next_pgno.
+ * Do not modify the freedB, just merge freeDB records into me_pghead[]
+ * and move me_pglast to say which records were consumed. Only this
+ * function can create me_pghead and move me_pglast/mt_next_pgno.
+ * @param[in] mc cursor A cursor handle identifying the transaction and
+ * database for which we are allocating.
+ * @param[in] num the number of pages to allocate.
+ * @param[out] mp Address of the allocated page(s). Requests for multiple pages
+ * will always be satisfied by a single contiguous chunk of memory.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp)
+{
+#ifdef MDB_PARANOID /* Seems like we can ignore this now */
+ /* Get at most <Max_retries> more freeDB records once me_pghead
+ * has enough pages. If not enough, use new pages from the map.
+ * If <Paranoid> and mc is updating the freeDB, only get new
+ * records if me_pghead is empty. Then the freelist cannot play
+ * catch-up with itself by growing while trying to save it.
+ */
+ enum { Paranoid = 1, Max_retries = 500 };
+#else
+ enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ };
+#endif
+ int rc, retry = num * 60;
+ MDB_txn *txn = mc->mc_txn;
+ MDB_env *env = txn->mt_env;
+ pgno_t pgno, *mop = env->me_pghead;
+ unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1;
+ MDB_page *np;
+ txnid_t oldest = 0, last;
+ MDB_cursor_op op;
+ MDB_cursor m2;
+ int found_old = 0;
+
+ /* If there are any loose pages, just use them */
+ if (num == 1 && txn->mt_loose_pgs) {
+ np = txn->mt_loose_pgs;
+ txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np);
+ txn->mt_loose_count--;
+ DPRINTF(("db %d use loose page %"Z"u", DDBI(mc),
+ np->mp_pgno));
+ *mp = np;
+ return MDB_SUCCESS;
+ }
+
+ *mp = NULL;
+
+ /* If our dirty list is already full, we can't do anything */
+ if (txn->mt_dirty_room == 0) {
+ rc = MDB_TXN_FULL;
+ goto fail;
+ }
+
+ for (op = MDB_FIRST;; op = MDB_NEXT) {
+ MDB_val key, data;
+ MDB_node *leaf;
+ pgno_t *idl;
+
+ /* Seek a big enough contiguous page range. Prefer
+ * pages at the tail, just truncating the list.
+ */
+ if (mop_len > n2) {
+ i = mop_len;
+ do {
+ pgno = mop[i];
+ if (mop[i-n2] == pgno+n2)
+ goto search_done;
+ } while (--i > n2);
+ if (--retry < 0)
+ break;
+ }
+
+ if (op == MDB_FIRST) { /* 1st iteration */
+ /* Prepare to fetch more and coalesce */
+ last = env->me_pglast;
+ oldest = env->me_pgoldest;
+ mdb_cursor_init(&m2, txn, FREE_DBI, NULL);
+ if (last) {
+ op = MDB_SET_RANGE;
+ key.mv_data = &last; /* will look up last+1 */
+ key.mv_size = sizeof(last);
+ }
+ if (Paranoid && mc->mc_dbi == FREE_DBI)
+ retry = -1;
+ }
+ if (Paranoid && retry < 0 && mop_len)
+ break;
+
+ last++;
+ /* Do not fetch more if the record will be too recent */
+ if (oldest <= last) {
+ if (!found_old) {
+ oldest = mdb_find_oldest(txn);
+ env->me_pgoldest = oldest;
+ found_old = 1;
+ }
+ if (oldest <= last)
+ break;
+ }
+ rc = mdb_cursor_get(&m2, &key, NULL, op);
+ if (rc) {
+ if (rc == MDB_NOTFOUND)
+ break;
+ goto fail;
+ }
+ last = *(txnid_t*)key.mv_data;
+ if (oldest <= last) {
+ if (!found_old) {
+ oldest = mdb_find_oldest(txn);
+ env->me_pgoldest = oldest;
+ found_old = 1;
+ }
+ if (oldest <= last)
+ break;
+ }
+ np = m2.mc_pg[m2.mc_top];
+ leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]);
+ if ((rc = mdb_node_read(&m2, leaf, &data)) != MDB_SUCCESS)
+ goto fail;
+
+ idl = (MDB_ID *) data.mv_data;
+ i = idl[0];
+ if (!mop) {
+ if (!(env->me_pghead = mop = mdb_midl_alloc(i))) {
+ rc = ENOMEM;
+ goto fail;
+ }
+ } else {
+ if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0)
+ goto fail;
+ mop = env->me_pghead;
+ }
+ env->me_pglast = last;
+#if (MDB_DEBUG) > 1
+ DPRINTF(("IDL read txn %"Z"u root %"Z"u num %u",
+ last, txn->mt_dbs[FREE_DBI].md_root, i));
+ for (j = i; j; j--)
+ DPRINTF(("IDL %"Z"u", idl[j]));
+#endif
+ /* Merge in descending sorted order */
+ mdb_midl_xmerge(mop, idl);
+ mop_len = mop[0];
+ }
+
+ /* Use new pages from the map when nothing suitable in the freeDB */
+ i = 0;
+ pgno = txn->mt_next_pgno;
+ if (pgno + num >= env->me_maxpg) {
+ DPUTS("DB size maxed out");
+ rc = MDB_MAP_FULL;
+ goto fail;
+ }
+
+search_done:
+ if (env->me_flags & MDB_WRITEMAP) {
+ np = (MDB_page *)(env->me_map + env->me_psize * pgno);
+ } else {
+ if (!(np = mdb_page_malloc(txn, num))) {
+ rc = ENOMEM;
+ goto fail;
+ }
+ }
+ if (i) {
+ mop[0] = mop_len -= num;
+ /* Move any stragglers down */
+ for (j = i-num; j < mop_len; )
+ mop[++j] = mop[++i];
+ } else {
+ txn->mt_next_pgno = pgno + num;
+ }
+ np->mp_pgno = pgno;
+ mdb_page_dirty(txn, np);
+ *mp = np;
+
+ return MDB_SUCCESS;
+
+fail:
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+/** Copy the used portions of a non-overflow page.
+ * @param[in] dst page to copy into
+ * @param[in] src page to copy from
+ * @param[in] psize size of a page
+ */
+static void
+mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize)
+{
+ enum { Align = sizeof(pgno_t) };
+ indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower;
+
+ /* If page isn't full, just copy the used portion. Adjust
+ * alignment so memcpy may copy words instead of bytes.
+ */
+ if ((unused &= -Align) && !IS_LEAF2(src)) {
+ upper = (upper + PAGEBASE) & -Align;
+ memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align);
+ memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper),
+ psize - upper);
+ } else {
+ memcpy(dst, src, psize - unused);
+ }
+}
+
+/** Pull a page off the txn's spill list, if present.
+ * If a page being referenced was spilled to disk in this txn, bring
+ * it back and make it dirty/writable again.
+ * @param[in] txn the transaction handle.
+ * @param[in] mp the page being referenced. It must not be dirty.
+ * @param[out] ret the writable page, if any. ret is unchanged if
+ * mp wasn't spilled.
+ */
+static int
+mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret)
+{
+ MDB_env *env = txn->mt_env;
+ const MDB_txn *tx2;
+ unsigned x;
+ pgno_t pgno = mp->mp_pgno, pn = pgno << 1;
+
+ for (tx2 = txn; tx2; tx2=tx2->mt_parent) {
+ if (!tx2->mt_spill_pgs)
+ continue;
+ x = mdb_midl_search(tx2->mt_spill_pgs, pn);
+ if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) {
+ MDB_page *np;
+ int num;
+ if (txn->mt_dirty_room == 0)
+ return MDB_TXN_FULL;
+ if (IS_OVERFLOW(mp))
+ num = mp->mp_pages;
+ else
+ num = 1;
+ if (env->me_flags & MDB_WRITEMAP) {
+ np = mp;
+ } else {
+ np = mdb_page_malloc(txn, num);
+ if (!np)
+ return ENOMEM;
+ if (num > 1)
+ memcpy(np, mp, num * env->me_psize);
+ else
+ mdb_page_copy(np, mp, env->me_psize);
+ }
+ if (tx2 == txn) {
+ /* If in current txn, this page is no longer spilled.
+ * If it happens to be the last page, truncate the spill list.
+ * Otherwise mark it as deleted by setting the LSB.
+ */
+ if (x == txn->mt_spill_pgs[0])
+ txn->mt_spill_pgs[0]--;
+ else
+ txn->mt_spill_pgs[x] |= 1;
+ } /* otherwise, if belonging to a parent txn, the
+ * page remains spilled until child commits
+ */
+
+ mdb_page_dirty(txn, np);
+ np->mp_flags |= P_DIRTY;
+ *ret = np;
+ break;
+ }
+ }
+ return MDB_SUCCESS;
+}
+
+/** Touch a page: make it dirty and re-insert into tree with updated pgno.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in] mc cursor pointing to the page to be touched
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_touch(MDB_cursor *mc)
+{
+ MDB_page *mp = mc->mc_pg[mc->mc_top], *np;
+ MDB_txn *txn = mc->mc_txn;
+ MDB_cursor *m2, *m3;
+ pgno_t pgno;
+ int rc;
+
+ if (!F_ISSET(MP_FLAGS(mp), P_DIRTY)) {
+ if (txn->mt_flags & MDB_TXN_SPILLS) {
+ np = NULL;
+ rc = mdb_page_unspill(txn, mp, &np);
+ if (rc)
+ goto fail;
+ if (np)
+ goto done;
+ }
+ if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) ||
+ (rc = mdb_page_alloc(mc, 1, &np)))
+ goto fail;
+ pgno = np->mp_pgno;
+ DPRINTF(("touched db %d page %"Z"u -> %"Z"u", DDBI(mc),
+ mp->mp_pgno, pgno));
+ mdb_cassert(mc, mp->mp_pgno != pgno);
+ mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno);
+ /* Update the parent page, if any, to point to the new page */
+ if (mc->mc_top) {
+ MDB_page *parent = mc->mc_pg[mc->mc_top-1];
+ MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]);
+ SETPGNO(node, pgno);
+ } else {
+ mc->mc_db->md_root = pgno;
+ }
+ } else if (txn->mt_parent && !IS_SUBP(mp)) {
+ MDB_ID2 mid, *dl = txn->mt_u.dirty_list;
+ pgno = mp->mp_pgno;
+ /* If txn has a parent, make sure the page is in our
+ * dirty list.
+ */
+ if (dl[0].mid) {
+ unsigned x = mdb_mid2l_search(dl, pgno);
+ if (x <= dl[0].mid && dl[x].mid == pgno) {
+ if (mp != dl[x].mptr) { /* bad cursor? */
+ mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_CORRUPTED;
+ }
+ return 0;
+ }
+ }
+ mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX);
+ /* No - copy it */
+ np = mdb_page_malloc(txn, 1);
+ if (!np)
+ return ENOMEM;
+ mid.mid = pgno;
+ mid.mptr = np;
+ rc = mdb_mid2l_insert(dl, &mid);
+ mdb_cassert(mc, rc == 0);
+ } else {
+ return 0;
+ }
+
+ mdb_page_copy(np, mp, txn->mt_env->me_psize);
+ np->mp_pgno = pgno;
+ np->mp_flags |= P_DIRTY;
+
+done:
+ /* Adjust cursors pointing to mp */
+ mc->mc_pg[mc->mc_top] = np;
+ m2 = txn->mt_cursors[mc->mc_dbi];
+ if (mc->mc_flags & C_SUB) {
+ for (; m2; m2=m2->mc_next) {
+ m3 = &m2->mc_xcursor->mx_cursor;
+ if (m3->mc_snum < mc->mc_snum) continue;
+ if (m3->mc_pg[mc->mc_top] == mp)
+ m3->mc_pg[mc->mc_top] = np;
+ }
+ } else {
+ for (; m2; m2=m2->mc_next) {
+ if (m2->mc_snum < mc->mc_snum) continue;
+ if (m2 == mc) continue;
+ if (m2->mc_pg[mc->mc_top] == mp) {
+ m2->mc_pg[mc->mc_top] = np;
+ if (IS_LEAF(np))
+ XCURSOR_REFRESH(m2, mc->mc_top, np);
+ }
+ }
+ }
+ return 0;
+
+fail:
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+int
+mdb_env_sync(MDB_env *env, int force)
+{
+ int rc = 0;
+ if (env->me_flags & MDB_RDONLY)
+ return EACCES;
+ if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) {
+ if (env->me_flags & MDB_WRITEMAP) {
+ int flags = ((env->me_flags & MDB_MAPASYNC) && !force)
+ ? MS_ASYNC : MS_SYNC;
+ if (MDB_MSYNC(env->me_map, env->me_mapsize, flags))
+ rc = ErrCode();
+#ifdef _WIN32
+ else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd))
+ rc = ErrCode();
+#endif
+ } else {
+#ifdef BROKEN_FDATASYNC
+ if (env->me_flags & MDB_FSYNCONLY) {
+ if (fsync(env->me_fd))
+ rc = ErrCode();
+ } else
+#endif
+ if (MDB_FDATASYNC(env->me_fd))
+ rc = ErrCode();
+ }
+ }
+ return rc;
+}
+
+/** Back up parent txn's cursors, then grab the originals for tracking */
+static int
+mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst)
+{
+ MDB_cursor *mc, *bk;
+ MDB_xcursor *mx;
+ size_t size;
+ int i;
+
+ for (i = src->mt_numdbs; --i >= 0; ) {
+ if ((mc = src->mt_cursors[i]) != NULL) {
+ size = sizeof(MDB_cursor);
+ if (mc->mc_xcursor)
+ size += sizeof(MDB_xcursor);
+ for (; mc; mc = bk->mc_next) {
+ bk = malloc(size);
+ if (!bk)
+ return ENOMEM;
+ *bk = *mc;
+ mc->mc_backup = bk;
+ mc->mc_db = &dst->mt_dbs[i];
+ /* Kill pointers into src to reduce abuse: The
+ * user may not use mc until dst ends. But we need a valid
+ * txn pointer here for cursor fixups to keep working.
+ */
+ mc->mc_txn = dst;
+ mc->mc_dbflag = &dst->mt_dbflags[i];
+ if ((mx = mc->mc_xcursor) != NULL) {
+ *(MDB_xcursor *)(bk+1) = *mx;
+ mx->mx_cursor.mc_txn = dst;
+ }
+ mc->mc_next = dst->mt_cursors[i];
+ dst->mt_cursors[i] = mc;
+ }
+ }
+ }
+ return MDB_SUCCESS;
+}
+
+/** Close this write txn's cursors, give parent txn's cursors back to parent.
+ * @param[in] txn the transaction handle.
+ * @param[in] merge true to keep changes to parent cursors, false to revert.
+ * @return 0 on success, non-zero on failure.
+ */
+static void
+mdb_cursors_close(MDB_txn *txn, unsigned merge)
+{
+ MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk;
+ MDB_xcursor *mx;
+ int i;
+
+ for (i = txn->mt_numdbs; --i >= 0; ) {
+ for (mc = cursors[i]; mc; mc = next) {
+ next = mc->mc_next;
+ if ((bk = mc->mc_backup) != NULL) {
+ if (merge) {
+ /* Commit changes to parent txn */
+ mc->mc_next = bk->mc_next;
+ mc->mc_backup = bk->mc_backup;
+ mc->mc_txn = bk->mc_txn;
+ mc->mc_db = bk->mc_db;
+ mc->mc_dbflag = bk->mc_dbflag;
+ if ((mx = mc->mc_xcursor) != NULL)
+ mx->mx_cursor.mc_txn = bk->mc_txn;
+ } else {
+ /* Abort nested txn */
+ *mc = *bk;
+ if ((mx = mc->mc_xcursor) != NULL)
+ *mx = *(MDB_xcursor *)(bk+1);
+ }
+ mc = bk;
+ }
+ /* Only malloced cursors are permanently tracked. */
+ free(mc);
+ }
+ cursors[i] = NULL;
+ }
+}
+
+#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */
+enum Pidlock_op {
+ Pidset, Pidcheck
+};
+#else
+enum Pidlock_op {
+ Pidset = F_SETLK, Pidcheck = F_GETLK
+};
+#endif
+
+/** Set or check a pid lock. Set returns 0 on success.
+ * Check returns 0 if the process is certainly dead, nonzero if it may
+ * be alive (the lock exists or an error happened so we do not know).
+ *
+ * On Windows Pidset is a no-op, we merely check for the existence
+ * of the process with the given pid. On POSIX we use a single byte
+ * lock on the lockfile, set at an offset equal to the pid.
+ */
+static int
+mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid)
+{
+#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */
+ int ret = 0;
+ HANDLE h;
+ if (op == Pidcheck) {
+ h = OpenProcess(env->me_pidquery, FALSE, pid);
+ /* No documented "no such process" code, but other program use this: */
+ if (!h)
+ return ErrCode() != ERROR_INVALID_PARAMETER;
+ /* A process exists until all handles to it close. Has it exited? */
+ ret = WaitForSingleObject(h, 0) != 0;
+ CloseHandle(h);
+ }
+ return ret;
+#else
+ for (;;) {
+ int rc;
+ struct flock lock_info;
+ memset(&lock_info, 0, sizeof(lock_info));
+ lock_info.l_type = F_WRLCK;
+ lock_info.l_whence = SEEK_SET;
+ lock_info.l_start = pid;
+ lock_info.l_len = 1;
+ if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) {
+ if (op == F_GETLK && lock_info.l_type != F_UNLCK)
+ rc = -1;
+ } else if ((rc = ErrCode()) == EINTR) {
+ continue;
+ }
+ return rc;
+ }
+#endif
+}
+
+/** Common code for #mdb_txn_begin() and #mdb_txn_renew().
+ * @param[in] txn the transaction handle to initialize
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_txn_renew0(MDB_txn *txn)
+{
+ MDB_env *env = txn->mt_env;
+ MDB_txninfo *ti = env->me_txns;
+ MDB_meta *meta;
+ unsigned int i, nr, flags = txn->mt_flags;
+ uint16_t x;
+ int rc, new_notls = 0;
+
+ if ((flags &= MDB_TXN_RDONLY) != 0) {
+ if (!ti) {
+ meta = mdb_env_pick_meta(env);
+ txn->mt_txnid = meta->mm_txnid;
+ txn->mt_u.reader = NULL;
+ } else {
+ MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader :
+ pthread_getspecific(env->me_txkey);
+ if (r) {
+ if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1)
+ return MDB_BAD_RSLOT;
+ } else {
+ MDB_PID_T pid = env->me_pid;
+ MDB_THR_T tid = pthread_self();
+ mdb_mutexref_t rmutex = env->me_rmutex;
+
+ if (!env->me_live_reader) {
+ rc = mdb_reader_pid(env, Pidset, pid);
+ if (rc)
+ return rc;
+ env->me_live_reader = 1;
+ }
+
+ if (LOCK_MUTEX(rc, env, rmutex))
+ return rc;
+ nr = ti->mti_numreaders;
+ for (i=0; i<nr; i++)
+ if (ti->mti_readers[i].mr_pid == 0)
+ break;
+ if (i == env->me_maxreaders) {
+ UNLOCK_MUTEX(rmutex);
+ return MDB_READERS_FULL;
+ }
+ r = &ti->mti_readers[i];
+ /* Claim the reader slot, carefully since other code
+ * uses the reader table un-mutexed: First reset the
+ * slot, next publish it in mti_numreaders. After
+ * that, it is safe for mdb_env_close() to touch it.
+ * When it will be closed, we can finally claim it.
+ */
+ r->mr_pid = 0;
+ r->mr_txnid = (txnid_t)-1;
+ r->mr_tid = tid;
+ if (i == nr)
+ ti->mti_numreaders = ++nr;
+ env->me_close_readers = nr;
+ r->mr_pid = pid;
+ UNLOCK_MUTEX(rmutex);
+
+ new_notls = (env->me_flags & MDB_NOTLS);
+ if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) {
+ r->mr_pid = 0;
+ return rc;
+ }
+ }
+ do /* LY: Retry on a race, ITS#7970. */
+ r->mr_txnid = ti->mti_txnid;
+ while(r->mr_txnid != ti->mti_txnid);
+ txn->mt_txnid = r->mr_txnid;
+ txn->mt_u.reader = r;
+ meta = env->me_metas[txn->mt_txnid & 1];
+ }
+
+ } else {
+ /* Not yet touching txn == env->me_txn0, it may be active */
+ if (ti) {
+ if (LOCK_MUTEX(rc, env, env->me_wmutex))
+ return rc;
+ txn->mt_txnid = ti->mti_txnid;
+ meta = env->me_metas[txn->mt_txnid & 1];
+ } else {
+ meta = mdb_env_pick_meta(env);
+ txn->mt_txnid = meta->mm_txnid;
+ }
+ txn->mt_txnid++;
+#if MDB_DEBUG
+ if (txn->mt_txnid == mdb_debug_start)
+ mdb_debug = 1;
+#endif
+ txn->mt_child = NULL;
+ txn->mt_loose_pgs = NULL;
+ txn->mt_loose_count = 0;
+ txn->mt_dirty_room = MDB_IDL_UM_MAX;
+ txn->mt_u.dirty_list = env->me_dirty_list;
+ txn->mt_u.dirty_list[0].mid = 0;
+ txn->mt_free_pgs = env->me_free_pgs;
+ txn->mt_free_pgs[0] = 0;
+ txn->mt_spill_pgs = NULL;
+ env->me_txn = txn;
+ memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int));
+ }
+
+ /* Copy the DB info and flags */
+ memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDB_db));
+
+ /* Moved to here to avoid a data race in read TXNs */
+ txn->mt_next_pgno = meta->mm_last_pg+1;
+
+ txn->mt_flags = flags;
+
+ /* Setup db info */
+ txn->mt_numdbs = env->me_numdbs;
+ for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
+ x = env->me_dbflags[i];
+ txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS;
+ txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_USRVALID|DB_STALE : 0;
+ }
+ txn->mt_dbflags[MAIN_DBI] = DB_VALID|DB_USRVALID;
+ txn->mt_dbflags[FREE_DBI] = DB_VALID;
+
+ if (env->me_flags & MDB_FATAL_ERROR) {
+ DPUTS("environment had fatal error, must shutdown!");
+ rc = MDB_PANIC;
+ } else if (env->me_maxpg < txn->mt_next_pgno) {
+ rc = MDB_MAP_RESIZED;
+ } else {
+ return MDB_SUCCESS;
+ }
+ mdb_txn_end(txn, new_notls /*0 or MDB_END_SLOT*/ | MDB_END_FAIL_BEGIN);
+ return rc;
+}
+
+int
+mdb_txn_renew(MDB_txn *txn)
+{
+ int rc;
+
+ if (!txn || !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY|MDB_TXN_FINISHED))
+ return EINVAL;
+
+ rc = mdb_txn_renew0(txn);
+ if (rc == MDB_SUCCESS) {
+ DPRINTF(("renew txn %"Z"u%c %p on mdbenv %p, root page %"Z"u",
+ txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w',
+ (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root));
+ }
+ return rc;
+}
+
+int
+mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret)
+{
+ MDB_txn *txn;
+ MDB_ntxn *ntxn;
+ int rc, size, tsize;
+
+ flags &= MDB_TXN_BEGIN_FLAGS;
+ flags |= env->me_flags & MDB_WRITEMAP;
+
+ if (env->me_flags & MDB_RDONLY & ~flags) /* write txn in RDONLY env */
+ return EACCES;
+
+ if (parent) {
+ /* Nested transactions: Max 1 child, write txns only, no writemap */
+ flags |= parent->mt_flags;
+ if (flags & (MDB_RDONLY|MDB_WRITEMAP|MDB_TXN_BLOCKED)) {
+ return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN;
+ }
+ /* Child txns save MDB_pgstate and use own copy of cursors */
+ size = env->me_maxdbs * (sizeof(MDB_db)+sizeof(MDB_cursor *)+1);
+ size += tsize = sizeof(MDB_ntxn);
+ } else if (flags & MDB_RDONLY) {
+ size = env->me_maxdbs * (sizeof(MDB_db)+1);
+ size += tsize = sizeof(MDB_txn);
+ } else {
+ /* Reuse preallocated write txn. However, do not touch it until
+ * mdb_txn_renew0() succeeds, since it currently may be active.
+ */
+ txn = env->me_txn0;
+ goto renew;
+ }
+ if ((txn = calloc(1, size)) == NULL) {
+ DPRINTF(("calloc: %s", strerror(errno)));
+ return ENOMEM;
+ }
+ txn->mt_dbxs = env->me_dbxs; /* static */
+ txn->mt_dbs = (MDB_db *) ((char *)txn + tsize);
+ txn->mt_dbflags = (unsigned char *)txn + size - env->me_maxdbs;
+ txn->mt_flags = flags;
+ txn->mt_env = env;
+
+ if (parent) {
+ unsigned int i;
+ txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs);
+ txn->mt_dbiseqs = parent->mt_dbiseqs;
+ txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE);
+ if (!txn->mt_u.dirty_list ||
+ !(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)))
+ {
+ free(txn->mt_u.dirty_list);
+ free(txn);
+ return ENOMEM;
+ }
+ txn->mt_txnid = parent->mt_txnid;
+ txn->mt_dirty_room = parent->mt_dirty_room;
+ txn->mt_u.dirty_list[0].mid = 0;
+ txn->mt_spill_pgs = NULL;
+ txn->mt_next_pgno = parent->mt_next_pgno;
+ parent->mt_flags |= MDB_TXN_HAS_CHILD;
+ parent->mt_child = txn;
+ txn->mt_parent = parent;
+ txn->mt_numdbs = parent->mt_numdbs;
+ memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db));
+ /* Copy parent's mt_dbflags, but clear DB_NEW */
+ for (i=0; i<txn->mt_numdbs; i++)
+ txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW;
+ rc = 0;
+ ntxn = (MDB_ntxn *)txn;
+ ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */
+ if (env->me_pghead) {
+ size = MDB_IDL_SIZEOF(env->me_pghead);
+ env->me_pghead = mdb_midl_alloc(env->me_pghead[0]);
+ if (env->me_pghead)
+ memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size);
+ else
+ rc = ENOMEM;
+ }
+ if (!rc)
+ rc = mdb_cursor_shadow(parent, txn);
+ if (rc)
+ mdb_txn_end(txn, MDB_END_FAIL_BEGINCHILD);
+ } else { /* MDB_RDONLY */
+ txn->mt_dbiseqs = env->me_dbiseqs;
+renew:
+ rc = mdb_txn_renew0(txn);
+ }
+ if (rc) {
+ if (txn != env->me_txn0)
+ free(txn);
+ } else {
+ txn->mt_flags |= flags; /* could not change txn=me_txn0 earlier */
+ *ret = txn;
+ DPRINTF(("begin txn %"Z"u%c %p on mdbenv %p, root page %"Z"u",
+ txn->mt_txnid, (flags & MDB_RDONLY) ? 'r' : 'w',
+ (void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root));
+ }
+
+ return rc;
+}
+
+MDB_env *
+mdb_txn_env(MDB_txn *txn)
+{
+ if(!txn) return NULL;
+ return txn->mt_env;
+}
+
+size_t
+mdb_txn_id(MDB_txn *txn)
+{
+ if(!txn) return 0;
+ return txn->mt_txnid;
+}
+
+/** Export or close DBI handles opened in this txn. */
+static void
+mdb_dbis_update(MDB_txn *txn, int keep)
+{
+ int i;
+ MDB_dbi n = txn->mt_numdbs;
+ MDB_env *env = txn->mt_env;
+ unsigned char *tdbflags = txn->mt_dbflags;
+
+ for (i = n; --i >= CORE_DBS;) {
+ if (tdbflags[i] & DB_NEW) {
+ if (keep) {
+ env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID;
+ } else {
+ char *ptr = env->me_dbxs[i].md_name.mv_data;
+ if (ptr) {
+ env->me_dbxs[i].md_name.mv_data = NULL;
+ env->me_dbxs[i].md_name.mv_size = 0;
+ env->me_dbflags[i] = 0;
+ env->me_dbiseqs[i]++;
+ free(ptr);
+ }
+ }
+ }
+ }
+ if (keep && env->me_numdbs < n)
+ env->me_numdbs = n;
+}
+
+/** End a transaction, except successful commit of a nested transaction.
+ * May be called twice for readonly txns: First reset it, then abort.
+ * @param[in] txn the transaction handle to end
+ * @param[in] mode why and how to end the transaction
+ */
+static void
+mdb_txn_end(MDB_txn *txn, unsigned mode)
+{
+ MDB_env *env = txn->mt_env;
+#if MDB_DEBUG
+ static const char *const names[] = MDB_END_NAMES;
+#endif
+
+ /* Export or close DBI handles opened in this txn */
+ mdb_dbis_update(txn, mode & MDB_END_UPDATE);
+
+ DPRINTF(("%s txn %"Z"u%c %p on mdbenv %p, root page %"Z"u",
+ names[mode & MDB_END_OPMASK],
+ txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w',
+ (void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root));
+
+ if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
+ if (txn->mt_u.reader) {
+ txn->mt_u.reader->mr_txnid = (txnid_t)-1;
+ if (!(env->me_flags & MDB_NOTLS)) {
+ txn->mt_u.reader = NULL; /* txn does not own reader */
+ } else if (mode & MDB_END_SLOT) {
+ txn->mt_u.reader->mr_pid = 0;
+ txn->mt_u.reader = NULL;
+ } /* else txn owns the slot until it does MDB_END_SLOT */
+ }
+ txn->mt_numdbs = 0; /* prevent further DBI activity */
+ txn->mt_flags |= MDB_TXN_FINISHED;
+
+ } else if (!F_ISSET(txn->mt_flags, MDB_TXN_FINISHED)) {
+ pgno_t *pghead = env->me_pghead;
+
+ if (!(mode & MDB_END_UPDATE)) /* !(already closed cursors) */
+ mdb_cursors_close(txn, 0);
+ if (!(env->me_flags & MDB_WRITEMAP)) {
+ mdb_dlist_free(txn);
+ }
+
+ txn->mt_numdbs = 0;
+ txn->mt_flags = MDB_TXN_FINISHED;
+
+ if (!txn->mt_parent) {
+ mdb_midl_shrink(&txn->mt_free_pgs);
+ env->me_free_pgs = txn->mt_free_pgs;
+ /* me_pgstate: */
+ env->me_pghead = NULL;
+ env->me_pglast = 0;
+
+ env->me_txn = NULL;
+ mode = 0; /* txn == env->me_txn0, do not free() it */
+
+ /* The writer mutex was locked in mdb_txn_begin. */
+ if (env->me_txns)
+ UNLOCK_MUTEX(env->me_wmutex);
+ } else {
+ txn->mt_parent->mt_child = NULL;
+ txn->mt_parent->mt_flags &= ~MDB_TXN_HAS_CHILD;
+ env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate;
+ mdb_midl_free(txn->mt_free_pgs);
+ free(txn->mt_u.dirty_list);
+ }
+ mdb_midl_free(txn->mt_spill_pgs);
+
+ mdb_midl_free(pghead);
+ }
+
+ if (mode & MDB_END_FREE)
+ free(txn);
+}
+
+void
+mdb_txn_reset(MDB_txn *txn)
+{
+ if (txn == NULL)
+ return;
+
+ /* This call is only valid for read-only txns */
+ if (!(txn->mt_flags & MDB_TXN_RDONLY))
+ return;
+
+ mdb_txn_end(txn, MDB_END_RESET);
+}
+
+void
+mdb_txn_abort(MDB_txn *txn)
+{
+ if (txn == NULL)
+ return;
+
+ if (txn->mt_child)
+ mdb_txn_abort(txn->mt_child);
+
+ mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE);
+}
+
+/** Save the freelist as of this transaction to the freeDB.
+ * This changes the freelist. Keep trying until it stabilizes.
+ */
+static int
+mdb_freelist_save(MDB_txn *txn)
+{
+ /* env->me_pghead[] can grow and shrink during this call.
+ * env->me_pglast and txn->mt_free_pgs[] can only grow.
+ * Page numbers cannot disappear from txn->mt_free_pgs[].
+ */
+ MDB_cursor mc;
+ MDB_env *env = txn->mt_env;
+ int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1;
+ txnid_t pglast = 0, head_id = 0;
+ pgno_t freecnt = 0, *free_pgs, *mop;
+ ssize_t head_room = 0, total_room = 0, mop_len, clean_limit;
+
+ mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
+
+ if (env->me_pghead) {
+ /* Make sure first page of freeDB is touched and on freelist */
+ rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY);
+ if (rc && rc != MDB_NOTFOUND)
+ return rc;
+ }
+
+ if (!env->me_pghead && txn->mt_loose_pgs) {
+ /* Put loose page numbers in mt_free_pgs, since
+ * we may be unable to return them to me_pghead.
+ */
+ MDB_page *mp = txn->mt_loose_pgs;
+ MDB_ID2 *dl = txn->mt_u.dirty_list;
+ unsigned x;
+ if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0)
+ return rc;
+ for (; mp; mp = NEXT_LOOSE_PAGE(mp)) {
+ mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno);
+ /* must also remove from dirty list */
+ if (txn->mt_flags & MDB_TXN_WRITEMAP) {
+ for (x=1; x<=dl[0].mid; x++)
+ if (dl[x].mid == mp->mp_pgno)
+ break;
+ mdb_tassert(txn, x <= dl[0].mid);
+ } else {
+ x = mdb_mid2l_search(dl, mp->mp_pgno);
+ mdb_tassert(txn, dl[x].mid == mp->mp_pgno);
+ mdb_dpage_free(env, mp);
+ }
+ dl[x].mptr = NULL;
+ }
+ {
+ /* squash freed slots out of the dirty list */
+ unsigned y;
+ for (y=1; dl[y].mptr && y <= dl[0].mid; y++);
+ if (y <= dl[0].mid) {
+ for(x=y, y++;;) {
+ while (!dl[y].mptr && y <= dl[0].mid) y++;
+ if (y > dl[0].mid) break;
+ dl[x++] = dl[y++];
+ }
+ dl[0].mid = x-1;
+ } else {
+ /* all slots freed */
+ dl[0].mid = 0;
+ }
+ }
+ txn->mt_loose_pgs = NULL;
+ txn->mt_loose_count = 0;
+ }
+
+ /* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */
+ clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP))
+ ? SSIZE_MAX : maxfree_1pg;
+
+ for (;;) {
+ /* Come back here after each Put() in case freelist changed */
+ MDB_val key, data;
+ pgno_t *pgs;
+ ssize_t j;
+
+ /* If using records from freeDB which we have not yet
+ * deleted, delete them and any we reserved for me_pghead.
+ */
+ while (pglast < env->me_pglast) {
+ rc = mdb_cursor_first(&mc, &key, NULL);
+ if (rc)
+ return rc;
+ pglast = head_id = *(txnid_t *)key.mv_data;
+ total_room = head_room = 0;
+ mdb_tassert(txn, pglast <= env->me_pglast);
+ rc = mdb_cursor_del(&mc, 0);
+ if (rc)
+ return rc;
+ }
+
+ /* Save the IDL of pages freed by this txn, to a single record */
+ if (freecnt < txn->mt_free_pgs[0]) {
+ if (!freecnt) {
+ /* Make sure last page of freeDB is touched and on freelist */
+ rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY);
+ if (rc && rc != MDB_NOTFOUND)
+ return rc;
+ }
+ free_pgs = txn->mt_free_pgs;
+ /* Write to last page of freeDB */
+ key.mv_size = sizeof(txn->mt_txnid);
+ key.mv_data = &txn->mt_txnid;
+ do {
+ freecnt = free_pgs[0];
+ data.mv_size = MDB_IDL_SIZEOF(free_pgs);
+ rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE);
+ if (rc)
+ return rc;
+ /* Retry if mt_free_pgs[] grew during the Put() */
+ free_pgs = txn->mt_free_pgs;
+ } while (freecnt < free_pgs[0]);
+ mdb_midl_sort(free_pgs);
+ memcpy(data.mv_data, free_pgs, data.mv_size);
+#if (MDB_DEBUG) > 1
+ {
+ unsigned int i = free_pgs[0];
+ DPRINTF(("IDL write txn %"Z"u root %"Z"u num %u",
+ txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i));
+ for (; i; i--)
+ DPRINTF(("IDL %"Z"u", free_pgs[i]));
+ }
+#endif
+ continue;
+ }
+
+ mop = env->me_pghead;
+ mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count;
+
+ /* Reserve records for me_pghead[]. Split it if multi-page,
+ * to avoid searching freeDB for a page range. Use keys in
+ * range [1,me_pglast]: Smaller than txnid of oldest reader.
+ */
+ if (total_room >= mop_len) {
+ if (total_room == mop_len || --more < 0)
+ break;
+ } else if (head_room >= maxfree_1pg && head_id > 1) {
+ /* Keep current record (overflow page), add a new one */
+ head_id--;
+ head_room = 0;
+ }
+ /* (Re)write {key = head_id, IDL length = head_room} */
+ total_room -= head_room;
+ head_room = mop_len - total_room;
+ if (head_room > maxfree_1pg && head_id > 1) {
+ /* Overflow multi-page for part of me_pghead */
+ head_room /= head_id; /* amortize page sizes */
+ head_room += maxfree_1pg - head_room % (maxfree_1pg + 1);
+ } else if (head_room < 0) {
+ /* Rare case, not bothering to delete this record */
+ head_room = 0;
+ }
+ key.mv_size = sizeof(head_id);
+ key.mv_data = &head_id;
+ data.mv_size = (head_room + 1) * sizeof(pgno_t);
+ rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE);
+ if (rc)
+ return rc;
+ /* IDL is initially empty, zero out at least the length */
+ pgs = (pgno_t *)data.mv_data;
+ j = head_room > clean_limit ? head_room : 0;
+ do {
+ pgs[j] = 0;
+ } while (--j >= 0);
+ total_room += head_room;
+ }
+
+ /* Return loose page numbers to me_pghead, though usually none are
+ * left at this point. The pages themselves remain in dirty_list.
+ */
+ if (txn->mt_loose_pgs) {
+ MDB_page *mp = txn->mt_loose_pgs;
+ unsigned count = txn->mt_loose_count;
+ MDB_IDL loose;
+ /* Room for loose pages + temp IDL with same */
+ if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0)
+ return rc;
+ mop = env->me_pghead;
+ loose = mop + MDB_IDL_ALLOCLEN(mop) - count;
+ for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp))
+ loose[ ++count ] = mp->mp_pgno;
+ loose[0] = count;
+ mdb_midl_sort(loose);
+ mdb_midl_xmerge(mop, loose);
+ txn->mt_loose_pgs = NULL;
+ txn->mt_loose_count = 0;
+ mop_len = mop[0];
+ }
+
+ /* Fill in the reserved me_pghead records */
+ rc = MDB_SUCCESS;
+ if (mop_len) {
+ MDB_val key, data;
+
+ mop += mop_len;
+ rc = mdb_cursor_first(&mc, &key, &data);
+ for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) {
+ txnid_t id = *(txnid_t *)key.mv_data;
+ ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1;
+ MDB_ID save;
+
+ mdb_tassert(txn, len >= 0 && id <= env->me_pglast);
+ key.mv_data = &id;
+ if (len > mop_len) {
+ len = mop_len;
+ data.mv_size = (len + 1) * sizeof(MDB_ID);
+ }
+ data.mv_data = mop -= len;
+ save = mop[0];
+ mop[0] = len;
+ rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT);
+ mop[0] = save;
+ if (rc || !(mop_len -= len))
+ break;
+ }
+ }
+ return rc;
+}
+
+/** Flush (some) dirty pages to the map, after clearing their dirty flag.
+ * @param[in] txn the transaction that's being committed
+ * @param[in] keep number of initial pages in dirty_list to keep dirty.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_flush(MDB_txn *txn, int keep)
+{
+ MDB_env *env = txn->mt_env;
+ MDB_ID2L dl = txn->mt_u.dirty_list;
+ unsigned psize = env->me_psize, j;
+ int i, pagecount = dl[0].mid, rc;
+ size_t size = 0, pos = 0;
+ pgno_t pgno = 0;
+ MDB_page *dp = NULL;
+#ifdef _WIN32
+ OVERLAPPED ov;
+#else
+ struct iovec iov[MDB_COMMIT_PAGES];
+ ssize_t wpos = 0, wsize = 0, wres;
+ size_t next_pos = 1; /* impossible pos, so pos != next_pos */
+ int n = 0;
+#endif
+
+ j = i = keep;
+
+ if (env->me_flags & MDB_WRITEMAP) {
+ /* Clear dirty flags */
+ while (++i <= pagecount) {
+ dp = dl[i].mptr;
+ /* Don't flush this page yet */
+ if (dp->mp_flags & (P_LOOSE|P_KEEP)) {
+ dp->mp_flags &= ~P_KEEP;
+ dl[++j] = dl[i];
+ continue;
+ }
+ dp->mp_flags &= ~P_DIRTY;
+ }
+ goto done;
+ }
+
+ /* Write the pages */
+ for (;;) {
+ if (++i <= pagecount) {
+ dp = dl[i].mptr;
+ /* Don't flush this page yet */
+ if (dp->mp_flags & (P_LOOSE|P_KEEP)) {
+ dp->mp_flags &= ~P_KEEP;
+ dl[i].mid = 0;
+ continue;
+ }
+ pgno = dl[i].mid;
+ /* clear dirty flag */
+ dp->mp_flags &= ~P_DIRTY;
+ pos = pgno * psize;
+ size = psize;
+ if (IS_OVERFLOW(dp)) size *= dp->mp_pages;
+ }
+#ifdef _WIN32
+ else break;
+
+ /* Windows actually supports scatter/gather I/O, but only on
+ * unbuffered file handles. Since we're relying on the OS page
+ * cache for all our data, that's self-defeating. So we just
+ * write pages one at a time. We use the ov structure to set
+ * the write offset, to at least save the overhead of a Seek
+ * system call.
+ */
+ DPRINTF(("committing page %"Z"u", pgno));
+ memset(&ov, 0, sizeof(ov));
+ ov.Offset = pos & 0xffffffff;
+ ov.OffsetHigh = pos >> 16 >> 16;
+ if (!WriteFile(env->me_fd, dp, size, NULL, &ov)) {
+ rc = ErrCode();
+ DPRINTF(("WriteFile: %d", rc));
+ return rc;
+ }
+#else
+ /* Write up to MDB_COMMIT_PAGES dirty pages at a time. */
+ if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE) {
+ if (n) {
+retry_write:
+ /* Write previous page(s) */
+#ifdef MDB_USE_PWRITEV
+ wres = pwritev(env->me_fd, iov, n, wpos);
+#else
+ if (n == 1) {
+ wres = pwrite(env->me_fd, iov[0].iov_base, wsize, wpos);
+ } else {
+retry_seek:
+ if (lseek(env->me_fd, wpos, SEEK_SET) == -1) {
+ rc = ErrCode();
+ if (rc == EINTR)
+ goto retry_seek;
+ DPRINTF(("lseek: %s", strerror(rc)));
+ return rc;
+ }
+ wres = writev(env->me_fd, iov, n);
+ }
+#endif
+ if (wres != wsize) {
+ if (wres < 0) {
+ rc = ErrCode();
+ if (rc == EINTR)
+ goto retry_write;
+ DPRINTF(("Write error: %s", strerror(rc)));
+ } else {
+ rc = EIO; /* TODO: Use which error code? */
+ DPUTS("short write, filesystem full?");
+ }
+ return rc;
+ }
+ n = 0;
+ }
+ if (i > pagecount)
+ break;
+ wpos = pos;
+ wsize = 0;
+ }
+ DPRINTF(("committing page %"Z"u", pgno));
+ next_pos = pos + size;
+ iov[n].iov_len = size;
+ iov[n].iov_base = (char *)dp;
+ wsize += size;
+ n++;
+#endif /* _WIN32 */
+ }
+
+ /* MIPS has cache coherency issues, this is a no-op everywhere else
+ * Note: for any size >= on-chip cache size, entire on-chip cache is
+ * flushed.
+ */
+ CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE);
+
+ for (i = keep; ++i <= pagecount; ) {
+ dp = dl[i].mptr;
+ /* This is a page we skipped above */
+ if (!dl[i].mid) {
+ dl[++j] = dl[i];
+ dl[j].mid = dp->mp_pgno;
+ continue;
+ }
+ mdb_dpage_free(env, dp);
+ }
+
+done:
+ i--;
+ txn->mt_dirty_room += i - j;
+ dl[0].mid = j;
+ return MDB_SUCCESS;
+}
+
+int
+mdb_txn_commit(MDB_txn *txn)
+{
+ int rc;
+ unsigned int i, end_mode;
+ MDB_env *env;
+
+ if (txn == NULL)
+ return EINVAL;
+
+ /* mdb_txn_end() mode for a commit which writes nothing */
+ end_mode = MDB_END_EMPTY_COMMIT|MDB_END_UPDATE|MDB_END_SLOT|MDB_END_FREE;
+
+ if (txn->mt_child) {
+ rc = mdb_txn_commit(txn->mt_child);
+ if (rc)
+ goto fail;
+ }
+
+ env = txn->mt_env;
+
+ if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
+ goto done;
+ }
+
+ if (txn->mt_flags & (MDB_TXN_FINISHED|MDB_TXN_ERROR)) {
+ DPUTS("txn has failed/finished, can't commit");
+ if (txn->mt_parent)
+ txn->mt_parent->mt_flags |= MDB_TXN_ERROR;
+ rc = MDB_BAD_TXN;
+ goto fail;
+ }
+
+ if (txn->mt_parent) {
+ MDB_txn *parent = txn->mt_parent;
+ MDB_page **lp;
+ MDB_ID2L dst, src;
+ MDB_IDL pspill;
+ unsigned x, y, len, ps_len;
+
+ /* Append our free list to parent's */
+ rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs);
+ if (rc)
+ goto fail;
+ mdb_midl_free(txn->mt_free_pgs);
+ /* Failures after this must either undo the changes
+ * to the parent or set MDB_TXN_ERROR in the parent.
+ */
+
+ parent->mt_next_pgno = txn->mt_next_pgno;
+ parent->mt_flags = txn->mt_flags;
+
+ /* Merge our cursors into parent's and close them */
+ mdb_cursors_close(txn, 1);
+
+ /* Update parent's DB table. */
+ memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db));
+ parent->mt_numdbs = txn->mt_numdbs;
+ parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI];
+ parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI];
+ for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
+ /* preserve parent's DB_NEW status */
+ x = parent->mt_dbflags[i] & DB_NEW;
+ parent->mt_dbflags[i] = txn->mt_dbflags[i] | x;
+ }
+
+ dst = parent->mt_u.dirty_list;
+ src = txn->mt_u.dirty_list;
+ /* Remove anything in our dirty list from parent's spill list */
+ if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) {
+ x = y = ps_len;
+ pspill[0] = (pgno_t)-1;
+ /* Mark our dirty pages as deleted in parent spill list */
+ for (i=0, len=src[0].mid; ++i <= len; ) {
+ MDB_ID pn = src[i].mid << 1;
+ while (pn > pspill[x])
+ x--;
+ if (pn == pspill[x]) {
+ pspill[x] = 1;
+ y = --x;
+ }
+ }
+ /* Squash deleted pagenums if we deleted any */
+ for (x=y; ++x <= ps_len; )
+ if (!(pspill[x] & 1))
+ pspill[++y] = pspill[x];
+ pspill[0] = y;
+ }
+
+ /* Remove anything in our spill list from parent's dirty list */
+ if (txn->mt_spill_pgs && txn->mt_spill_pgs[0]) {
+ for (i=1; i<=txn->mt_spill_pgs[0]; i++) {
+ MDB_ID pn = txn->mt_spill_pgs[i];
+ if (pn & 1)
+ continue; /* deleted spillpg */
+ pn >>= 1;
+ y = mdb_mid2l_search(dst, pn);
+ if (y <= dst[0].mid && dst[y].mid == pn) {
+ free(dst[y].mptr);
+ while (y < dst[0].mid) {
+ dst[y] = dst[y+1];
+ y++;
+ }
+ dst[0].mid--;
+ }
+ }
+ }
+
+ /* Find len = length of merging our dirty list with parent's */
+ x = dst[0].mid;
+ dst[0].mid = 0; /* simplify loops */
+ if (parent->mt_parent) {
+ len = x + src[0].mid;
+ y = mdb_mid2l_search(src, dst[x].mid + 1) - 1;
+ for (i = x; y && i; y--) {
+ pgno_t yp = src[y].mid;
+ while (yp < dst[i].mid)
+ i--;
+ if (yp == dst[i].mid) {
+ i--;
+ len--;
+ }
+ }
+ } else { /* Simplify the above for single-ancestor case */
+ len = MDB_IDL_UM_MAX - txn->mt_dirty_room;
+ }
+ /* Merge our dirty list with parent's */
+ y = src[0].mid;
+ for (i = len; y; dst[i--] = src[y--]) {
+ pgno_t yp = src[y].mid;
+ while (yp < dst[x].mid)
+ dst[i--] = dst[x--];
+ if (yp == dst[x].mid)
+ free(dst[x--].mptr);
+ }
+ mdb_tassert(txn, i == x);
+ dst[0].mid = len;
+ free(txn->mt_u.dirty_list);
+ parent->mt_dirty_room = txn->mt_dirty_room;
+ if (txn->mt_spill_pgs) {
+ if (parent->mt_spill_pgs) {
+ /* TODO: Prevent failure here, so parent does not fail */
+ rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs);
+ if (rc)
+ parent->mt_flags |= MDB_TXN_ERROR;
+ mdb_midl_free(txn->mt_spill_pgs);
+ mdb_midl_sort(parent->mt_spill_pgs);
+ } else {
+ parent->mt_spill_pgs = txn->mt_spill_pgs;
+ }
+ }
+
+ /* Append our loose page list to parent's */
+ for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(*lp))
+ ;
+ *lp = txn->mt_loose_pgs;
+ parent->mt_loose_count += txn->mt_loose_count;
+
+ parent->mt_child = NULL;
+ mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead);
+ free(txn);
+ return rc;
+ }
+
+ if (txn != env->me_txn) {
+ DPUTS("attempt to commit unknown transaction");
+ rc = EINVAL;
+ goto fail;
+ }
+
+ mdb_cursors_close(txn, 0);
+
+ if (!txn->mt_u.dirty_list[0].mid &&
+ !(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS)))
+ goto done;
+
+ DPRINTF(("committing txn %"Z"u %p on mdbenv %p, root page %"Z"u",
+ txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root));
+
+ /* Update DB root pointers */
+ if (txn->mt_numdbs > CORE_DBS) {
+ MDB_cursor mc;
+ MDB_dbi i;
+ MDB_val data;
+ data.mv_size = sizeof(MDB_db);
+
+ mdb_cursor_init(&mc, txn, MAIN_DBI, NULL);
+ for (i = CORE_DBS; i < txn->mt_numdbs; i++) {
+ if (txn->mt_dbflags[i] & DB_DIRTY) {
+ if (TXN_DBI_CHANGED(txn, i)) {
+ rc = MDB_BAD_DBI;
+ goto fail;
+ }
+ data.mv_data = &txn->mt_dbs[i];
+ rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data,
+ F_SUBDATA);
+ if (rc)
+ goto fail;
+ }
+ }
+ }
+
+ rc = mdb_freelist_save(txn);
+ if (rc)
+ goto fail;
+
+ mdb_midl_free(env->me_pghead);
+ env->me_pghead = NULL;
+ mdb_midl_shrink(&txn->mt_free_pgs);
+
+#if (MDB_DEBUG) > 2
+ mdb_audit(txn);
+#endif
+
+ if ((rc = mdb_page_flush(txn, 0)) ||
+ (rc = mdb_env_sync(env, 0)) ||
+ (rc = mdb_env_write_meta(txn)))
+ goto fail;
+ end_mode = MDB_END_COMMITTED|MDB_END_UPDATE;
+
+done:
+ mdb_txn_end(txn, end_mode);
+ return MDB_SUCCESS;
+
+fail:
+ mdb_txn_abort(txn);
+ return rc;
+}
+
+/** Read the environment parameters of a DB environment before
+ * mapping it into memory.
+ * @param[in] env the environment handle
+ * @param[out] meta address of where to store the meta information
+ * @return 0 on success, non-zero on failure.
+ */
+static int ESECT
+mdb_env_read_header(MDB_env *env, MDB_meta *meta)
+{
+ MDB_metabuf pbuf;
+ MDB_page *p;
+ MDB_meta *m;
+ int i, rc, off;
+ enum { Size = sizeof(pbuf) };
+
+ /* We don't know the page size yet, so use a minimum value.
+ * Read both meta pages so we can use the latest one.
+ */
+
+ for (i=off=0; i<NUM_METAS; i++, off += meta->mm_psize) {
+#ifdef _WIN32
+ DWORD len;
+ OVERLAPPED ov;
+ memset(&ov, 0, sizeof(ov));
+ ov.Offset = off;
+ rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1;
+ if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF)
+ rc = 0;
+#else
+ rc = pread(env->me_fd, &pbuf, Size, off);
+#endif
+ if (rc != Size) {
+ if (rc == 0 && off == 0)
+ return ENOENT;
+ rc = rc < 0 ? (int) ErrCode() : MDB_INVALID;
+ DPRINTF(("read: %s", mdb_strerror(rc)));
+ return rc;
+ }
+
+ p = (MDB_page *)&pbuf;
+
+ if (!F_ISSET(p->mp_flags, P_META)) {
+ DPRINTF(("page %"Z"u not a meta page", p->mp_pgno));
+ return MDB_INVALID;
+ }
+
+ m = METADATA(p);
+ if (m->mm_magic != MDB_MAGIC) {
+ DPUTS("meta has invalid magic");
+ return MDB_INVALID;
+ }
+
+ if (m->mm_version != MDB_DATA_VERSION) {
+ DPRINTF(("database is version %u, expected version %u",
+ m->mm_version, MDB_DATA_VERSION));
+ return MDB_VERSION_MISMATCH;
+ }
+
+ if (off == 0 || m->mm_txnid > meta->mm_txnid)
+ *meta = *m;
+ }
+ return 0;
+}
+
+/** Fill in most of the zeroed #MDB_meta for an empty database environment */
+static void ESECT
+mdb_env_init_meta0(MDB_env *env, MDB_meta *meta)
+{
+ meta->mm_magic = MDB_MAGIC;
+ meta->mm_version = MDB_DATA_VERSION;
+ meta->mm_mapsize = env->me_mapsize;
+ meta->mm_psize = env->me_psize;
+ meta->mm_last_pg = NUM_METAS-1;
+ meta->mm_flags = env->me_flags & 0xffff;
+ meta->mm_flags |= MDB_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */
+ meta->mm_dbs[FREE_DBI].md_root = P_INVALID;
+ meta->mm_dbs[MAIN_DBI].md_root = P_INVALID;
+}
+
+/** Write the environment parameters of a freshly created DB environment.
+ * @param[in] env the environment handle
+ * @param[in] meta the #MDB_meta to write
+ * @return 0 on success, non-zero on failure.
+ */
+static int ESECT
+mdb_env_init_meta(MDB_env *env, MDB_meta *meta)
+{
+ MDB_page *p, *q;
+ int rc;
+ unsigned int psize;
+#ifdef _WIN32
+ DWORD len;
+ OVERLAPPED ov;
+ memset(&ov, 0, sizeof(ov));
+#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \
+ ov.Offset = pos; \
+ rc = WriteFile(fd, ptr, size, &len, &ov); } while(0)
+#else
+ int len;
+#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \
+ len = pwrite(fd, ptr, size, pos); \
+ if (len == -1 && ErrCode() == EINTR) continue; \
+ rc = (len >= 0); break; } while(1)
+#endif
+
+ DPUTS("writing new meta page");
+
+ psize = env->me_psize;
+
+ p = calloc(NUM_METAS, psize);
+ if (!p)
+ return ENOMEM;
+
+ p->mp_pgno = 0;
+ p->mp_flags = P_META;
+ *(MDB_meta *)METADATA(p) = *meta;
+
+ q = (MDB_page *)((char *)p + psize);
+ q->mp_pgno = 1;
+ q->mp_flags = P_META;
+ *(MDB_meta *)METADATA(q) = *meta;
+
+ DO_PWRITE(rc, env->me_fd, p, psize * NUM_METAS, len, 0);
+ if (!rc)
+ rc = ErrCode();
+ else if ((unsigned) len == psize * NUM_METAS)
+ rc = MDB_SUCCESS;
+ else
+ rc = ENOSPC;
+ free(p);
+ return rc;
+}
+
+/** Update the environment info to commit a transaction.
+ * @param[in] txn the transaction that's being committed
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_env_write_meta(MDB_txn *txn)
+{
+ MDB_env *env;
+ MDB_meta meta, metab, *mp;
+ unsigned flags;
+ size_t mapsize;
+ off_t off;
+ int rc, len, toggle;
+ char *ptr;
+ HANDLE mfd;
+#ifdef _WIN32
+ OVERLAPPED ov;
+#else
+ int r2;
+#endif
+
+ toggle = txn->mt_txnid & 1;
+ DPRINTF(("writing meta page %d for root page %"Z"u",
+ toggle, txn->mt_dbs[MAIN_DBI].md_root));
+
+ env = txn->mt_env;
+ flags = env->me_flags;
+ mp = env->me_metas[toggle];
+ mapsize = env->me_metas[toggle ^ 1]->mm_mapsize;
+ /* Persist any increases of mapsize config */
+ if (mapsize < env->me_mapsize)
+ mapsize = env->me_mapsize;
+
+ if (flags & MDB_WRITEMAP) {
+ mp->mm_mapsize = mapsize;
+ mp->mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI];
+ mp->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
+ mp->mm_last_pg = txn->mt_next_pgno - 1;
+#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 404) && /* TODO: portability */ \
+ !(defined(__i386__) || defined(__x86_64__))
+ /* LY: issue a memory barrier, if not x86. ITS#7969 */
+ __sync_synchronize();
+#endif
+ mp->mm_txnid = txn->mt_txnid;
+ if (!(flags & (MDB_NOMETASYNC|MDB_NOSYNC))) {
+ unsigned meta_size = env->me_psize;
+ rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC;
+ ptr = (char *)mp - PAGEHDRSZ;
+#ifndef _WIN32 /* POSIX msync() requires ptr = start of OS page */
+ r2 = (ptr - env->me_map) & (env->me_os_psize - 1);
+ ptr -= r2;
+ meta_size += r2;
+#endif
+ if (MDB_MSYNC(ptr, meta_size, rc)) {
+ rc = ErrCode();
+ goto fail;
+ }
+ }
+ goto done;
+ }
+ metab.mm_txnid = mp->mm_txnid;
+ metab.mm_last_pg = mp->mm_last_pg;
+
+ meta.mm_mapsize = mapsize;
+ meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI];
+ meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
+ meta.mm_last_pg = txn->mt_next_pgno - 1;
+ meta.mm_txnid = txn->mt_txnid;
+
+ off = offsetof(MDB_meta, mm_mapsize);
+ ptr = (char *)&meta + off;
+ len = sizeof(MDB_meta) - off;
+ off += (char *)mp - env->me_map;
+
+ /* Write to the SYNC fd unless MDB_NOSYNC/MDB_NOMETASYNC.
+ * (me_mfd goes to the same file as me_fd, but writing to it
+ * also syncs to disk. Avoids a separate fdatasync() call.)
+ */
+ mfd = (flags & (MDB_NOSYNC|MDB_NOMETASYNC)) ? env->me_fd : env->me_mfd;
+#ifdef _WIN32
+ {
+ memset(&ov, 0, sizeof(ov));
+ ov.Offset = off;
+ if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov))
+ rc = -1;
+ }
+#else
+retry_write:
+ rc = pwrite(mfd, ptr, len, off);
+#endif
+ if (rc != len) {
+ rc = rc < 0 ? ErrCode() : EIO;
+#ifndef _WIN32
+ if (rc == EINTR)
+ goto retry_write;
+#endif
+ DPUTS("write failed, disk error?");
+ /* On a failure, the pagecache still contains the new data.
+ * Write some old data back, to prevent it from being used.
+ * Use the non-SYNC fd; we know it will fail anyway.
+ */
+ meta.mm_last_pg = metab.mm_last_pg;
+ meta.mm_txnid = metab.mm_txnid;
+#ifdef _WIN32
+ memset(&ov, 0, sizeof(ov));
+ ov.Offset = off;
+ WriteFile(env->me_fd, ptr, len, NULL, &ov);
+#else
+ r2 = pwrite(env->me_fd, ptr, len, off);
+ (void)r2; /* Silence warnings. We don't care about pwrite's return value */
+#endif
+fail:
+ env->me_flags |= MDB_FATAL_ERROR;
+ return rc;
+ }
+ /* MIPS has cache coherency issues, this is a no-op everywhere else */
+ CACHEFLUSH(env->me_map + off, len, DCACHE);
+done:
+ /* Memory ordering issues are irrelevant; since the entire writer
+ * is wrapped by wmutex, all of these changes will become visible
+ * after the wmutex is unlocked. Since the DB is multi-version,
+ * readers will get consistent data regardless of how fresh or
+ * how stale their view of these values is.
+ */
+ if (env->me_txns)
+ env->me_txns->mti_txnid = txn->mt_txnid;
+
+ return MDB_SUCCESS;
+}
+
+/** Check both meta pages to see which one is newer.
+ * @param[in] env the environment handle
+ * @return newest #MDB_meta.
+ */
+static MDB_meta *
+mdb_env_pick_meta(const MDB_env *env)
+{
+ MDB_meta *const *metas = env->me_metas;
+ return metas[ metas[0]->mm_txnid < metas[1]->mm_txnid ];
+}
+
+int ESECT
+mdb_env_create(MDB_env **env)
+{
+ MDB_env *e;
+
+ e = calloc(1, sizeof(MDB_env));
+ if (!e)
+ return ENOMEM;
+
+ e->me_maxreaders = DEFAULT_READERS;
+ e->me_maxdbs = e->me_numdbs = CORE_DBS;
+ e->me_fd = INVALID_HANDLE_VALUE;
+ e->me_lfd = INVALID_HANDLE_VALUE;
+ e->me_mfd = INVALID_HANDLE_VALUE;
+#ifdef MDB_USE_POSIX_SEM
+ e->me_rmutex = SEM_FAILED;
+ e->me_wmutex = SEM_FAILED;
+#endif
+ e->me_pid = getpid();
+ GET_PAGESIZE(e->me_os_psize);
+ VGMEMP_CREATE(e,0,0);
+ *env = e;
+ return MDB_SUCCESS;
+}
+
+static int ESECT
+mdb_env_map(MDB_env *env, void *addr)
+{
+ MDB_page *p;
+ unsigned int flags = env->me_flags;
+#ifdef _WIN32
+ int rc;
+ HANDLE mh;
+ LONG sizelo, sizehi;
+ size_t msize;
+
+ if (flags & MDB_RDONLY) {
+ /* Don't set explicit map size, use whatever exists */
+ msize = 0;
+ sizelo = 0;
+ sizehi = 0;
+ } else {
+ msize = env->me_mapsize;
+ sizelo = msize & 0xffffffff;
+ sizehi = msize >> 16 >> 16; /* only needed on Win64 */
+
+ /* Windows won't create mappings for zero length files.
+ * and won't map more than the file size.
+ * Just set the maxsize right now.
+ */
+ if (!(flags & MDB_WRITEMAP) && (SetFilePointer(env->me_fd, sizelo, &sizehi, 0) != (DWORD)sizelo
+ || !SetEndOfFile(env->me_fd)
+ || SetFilePointer(env->me_fd, 0, NULL, 0) != 0))
+ return ErrCode();
+ }
+
+ mh = CreateFileMapping(env->me_fd, NULL, flags & MDB_WRITEMAP ?
+ PAGE_READWRITE : PAGE_READONLY,
+ sizehi, sizelo, NULL);
+ if (!mh)
+ return ErrCode();
+ env->me_map = MapViewOfFileEx(mh, flags & MDB_WRITEMAP ?
+ FILE_MAP_WRITE : FILE_MAP_READ,
+ 0, 0, msize, addr);
+ rc = env->me_map ? 0 : ErrCode();
+ CloseHandle(mh);
+ if (rc)
+ return rc;
+#else
+ int mmap_flags = MAP_SHARED;
+ int prot = PROT_READ;
+#ifdef MAP_NOSYNC /* Used on FreeBSD */
+ if (flags & MDB_NOSYNC)
+ mmap_flags |= MAP_NOSYNC;
+#endif
+ if (flags & MDB_WRITEMAP) {
+ prot |= PROT_WRITE;
+ if (ftruncate(env->me_fd, env->me_mapsize) < 0)
+ return ErrCode();
+ }
+ env->me_map = mmap(addr, env->me_mapsize, prot, mmap_flags,
+ env->me_fd, 0);
+ if (env->me_map == MAP_FAILED) {
+ env->me_map = NULL;
+ return ErrCode();
+ }
+
+ if (flags & MDB_NORDAHEAD) {
+ /* Turn off readahead. It's harmful when the DB is larger than RAM. */
+#ifdef MADV_RANDOM
+ madvise(env->me_map, env->me_mapsize, MADV_RANDOM);
+#else
+#ifdef POSIX_MADV_RANDOM
+ posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM);
+#endif /* POSIX_MADV_RANDOM */
+#endif /* MADV_RANDOM */
+ }
+#endif /* _WIN32 */
+
+ /* Can happen because the address argument to mmap() is just a
+ * hint. mmap() can pick another, e.g. if the range is in use.
+ * The MAP_FIXED flag would prevent that, but then mmap could
+ * instead unmap existing pages to make room for the new map.
+ */
+ if (addr && env->me_map != addr)
+ return EBUSY; /* TODO: Make a new MDB_* error code? */
+
+ p = (MDB_page *)env->me_map;
+ env->me_metas[0] = METADATA(p);
+ env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize);
+
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_set_mapsize(MDB_env *env, size_t size)
+{
+ /* If env is already open, caller is responsible for making
+ * sure there are no active txns.
+ */
+ if (env->me_map) {
+ int rc;
+ MDB_meta *meta;
+ void *old;
+ if (env->me_txn)
+ return EINVAL;
+ meta = mdb_env_pick_meta(env);
+ if (!size)
+ size = meta->mm_mapsize;
+ {
+ /* Silently round up to minimum if the size is too small */
+ size_t minsize = (meta->mm_last_pg + 1) * env->me_psize;
+ if (size < minsize)
+ size = minsize;
+ }
+ munmap(env->me_map, env->me_mapsize);
+ env->me_mapsize = size;
+ old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL;
+ rc = mdb_env_map(env, old);
+ if (rc)
+ return rc;
+ }
+ env->me_mapsize = size;
+ if (env->me_psize)
+ env->me_maxpg = env->me_mapsize / env->me_psize;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs)
+{
+ if (env->me_map)
+ return EINVAL;
+ env->me_maxdbs = dbs + CORE_DBS;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_set_maxreaders(MDB_env *env, unsigned int readers)
+{
+ if (env->me_map || readers < 1)
+ return EINVAL;
+ env->me_maxreaders = readers;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers)
+{
+ if (!env || !readers)
+ return EINVAL;
+ *readers = env->me_maxreaders;
+ return MDB_SUCCESS;
+}
+
+static int ESECT
+mdb_fsize(HANDLE fd, size_t *size)
+{
+#ifdef _WIN32
+ LARGE_INTEGER fsize;
+
+ if (!GetFileSizeEx(fd, &fsize))
+ return ErrCode();
+
+ *size = fsize.QuadPart;
+#else
+ struct stat st;
+
+ if (fstat(fd, &st))
+ return ErrCode();
+
+ *size = st.st_size;
+#endif
+ return MDB_SUCCESS;
+}
+
+
+#ifdef _WIN32
+typedef wchar_t mdb_nchar_t;
+# define MDB_NAME(str) L##str
+# define mdb_name_cpy wcscpy
+#else
+/** Character type for file names: char on Unix, wchar_t on Windows */
+typedef char mdb_nchar_t;
+# define MDB_NAME(str) str /**< #mdb_nchar_t[] string literal */
+# define mdb_name_cpy strcpy /**< Copy name (#mdb_nchar_t string) */
+#endif
+
+/** Filename - string of #mdb_nchar_t[] */
+typedef struct MDB_name {
+ int mn_len; /**< Length */
+ int mn_alloced; /**< True if #mn_val was malloced */
+ mdb_nchar_t *mn_val; /**< Contents */
+} MDB_name;
+
+/** Filename suffixes [datafile,lockfile][without,with MDB_NOSUBDIR] */
+static const mdb_nchar_t *const mdb_suffixes[2][2] = {
+ { MDB_NAME("/data.mdb"), MDB_NAME("") },
+ { MDB_NAME("/lock.mdb"), MDB_NAME("-lock") }
+};
+
+#define MDB_SUFFLEN 9 /**< Max string length in #mdb_suffixes[] */
+
+/** Set up filename + scratch area for filename suffix, for opening files.
+ * It should be freed with #mdb_fname_destroy().
+ * On Windows, paths are converted from char *UTF-8 to wchar_t *UTF-16.
+ *
+ * @param[in] path Pathname for #mdb_env_open().
+ * @param[in] envflags Whether a subdir and/or lockfile will be used.
+ * @param[out] fname Resulting filename, with room for a suffix if necessary.
+ */
+static int ESECT
+mdb_fname_init(const char *path, unsigned envflags, MDB_name *fname)
+{
+ int no_suffix = F_ISSET(envflags, MDB_NOSUBDIR|MDB_NOLOCK);
+ fname->mn_alloced = 0;
+#ifdef _WIN32
+ return utf8_to_utf16(path, fname, no_suffix ? 0 : MDB_SUFFLEN);
+#else
+ fname->mn_len = strlen(path);
+ if (no_suffix)
+ fname->mn_val = (char *) path;
+ else if ((fname->mn_val = malloc(fname->mn_len + MDB_SUFFLEN+1)) != NULL) {
+ fname->mn_alloced = 1;
+ strcpy(fname->mn_val, path);
+ }
+ else
+ return ENOMEM;
+ return MDB_SUCCESS;
+#endif
+}
+
+/** Destroy \b fname from #mdb_fname_init() */
+#define mdb_fname_destroy(fname) \
+ do { if ((fname).mn_alloced) free((fname).mn_val); } while (0)
+
+#ifdef O_CLOEXEC /* POSIX.1-2008: Set FD_CLOEXEC atomically at open() */
+# define MDB_CLOEXEC O_CLOEXEC
+#else
+# define MDB_CLOEXEC 0
+#endif
+
+/** File type, access mode etc. for #mdb_fopen() */
+enum mdb_fopen_type {
+#ifdef _WIN32
+ MDB_O_RDONLY, MDB_O_RDWR, MDB_O_META, MDB_O_COPY, MDB_O_LOCKS
+#else
+ /* A comment in mdb_fopen() explains some O_* flag choices. */
+ MDB_O_RDONLY= O_RDONLY, /**< for RDONLY me_fd */
+ MDB_O_RDWR = O_RDWR |O_CREAT, /**< for me_fd */
+ MDB_O_META = O_WRONLY|MDB_DSYNC |MDB_CLOEXEC, /**< for me_mfd */
+ MDB_O_COPY = O_WRONLY|O_CREAT|O_EXCL|MDB_CLOEXEC, /**< for #mdb_env_copy() */
+ /** Bitmask for open() flags in enum #mdb_fopen_type. The other bits
+ * distinguish otherwise-equal MDB_O_* constants from each other.
+ */
+ MDB_O_MASK = MDB_O_RDWR|MDB_CLOEXEC | MDB_O_RDONLY|MDB_O_META|MDB_O_COPY,
+ MDB_O_LOCKS = MDB_O_RDWR|MDB_CLOEXEC | ((MDB_O_MASK+1) & ~MDB_O_MASK) /**< for me_lfd */
+#endif
+};
+
+/** Open an LMDB file.
+ * @param[in] env The LMDB environment.
+ * @param[in,out] fname Path from from #mdb_fname_init(). A suffix is
+ * appended if necessary to create the filename, without changing mn_len.
+ * @param[in] which Determines file type, access mode, etc.
+ * @param[in] mode The Unix permissions for the file, if we create it.
+ * @param[out] res Resulting file handle.
+ * @return 0 on success, non-zero on failure.
+ */
+static int ESECT
+mdb_fopen(const MDB_env *env, MDB_name *fname,
+ enum mdb_fopen_type which, mdb_mode_t mode,
+ HANDLE *res)
+{
+ int rc = MDB_SUCCESS;
+ HANDLE fd;
+#ifdef _WIN32
+ DWORD acc, share, disp, attrs;
+#else
+ int flags;
+#endif
+
+ if (fname->mn_alloced) /* modifiable copy */
+ mdb_name_cpy(fname->mn_val + fname->mn_len,
+ mdb_suffixes[which==MDB_O_LOCKS][F_ISSET(env->me_flags, MDB_NOSUBDIR)]);
+
+ /* The directory must already exist. Usually the file need not.
+ * MDB_O_META requires the file because we already created it using
+ * MDB_O_RDWR. MDB_O_COPY must not overwrite an existing file.
+ *
+ * With MDB_O_COPY we do not want the OS to cache the writes, since
+ * the source data is already in the OS cache.
+ *
+ * The lockfile needs FD_CLOEXEC (close file descriptor on exec*())
+ * to avoid the flock() issues noted under Caveats in lmdb.h.
+ * Also set it for other filehandles which the user cannot get at
+ * and close himself, which he may need after fork(). I.e. all but
+ * me_fd, which programs do use via mdb_env_get_fd().
+ */
+
+#ifdef _WIN32
+ acc = GENERIC_READ|GENERIC_WRITE;
+ share = FILE_SHARE_READ|FILE_SHARE_WRITE;
+ disp = OPEN_ALWAYS;
+ attrs = FILE_ATTRIBUTE_NORMAL;
+ switch (which) {
+ case MDB_O_RDONLY: /* read-only datafile */
+ acc = GENERIC_READ;
+ disp = OPEN_EXISTING;
+ break;
+ case MDB_O_META: /* for writing metapages */
+ acc = GENERIC_WRITE;
+ disp = OPEN_EXISTING;
+ attrs = FILE_ATTRIBUTE_NORMAL|FILE_FLAG_WRITE_THROUGH;
+ break;
+ case MDB_O_COPY: /* mdb_env_copy() & co */
+ acc = GENERIC_WRITE;
+ share = 0;
+ disp = CREATE_NEW;
+ attrs = FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH;
+ break;
+ default: break; /* silence gcc -Wswitch (not all enum values handled) */
+ }
+ fd = CreateFileW(fname->mn_val, acc, share, NULL, disp, attrs, NULL);
+#else
+ fd = open(fname->mn_val, which & MDB_O_MASK, mode);
+#endif
+
+ if (fd == INVALID_HANDLE_VALUE)
+ rc = ErrCode();
+#ifndef _WIN32
+ else {
+ if (which != MDB_O_RDONLY && which != MDB_O_RDWR) {
+ /* Set CLOEXEC if we could not pass it to open() */
+ if (!MDB_CLOEXEC && (flags = fcntl(fd, F_GETFD)) != -1)
+ (void) fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+ }
+ if (which == MDB_O_COPY && env->me_psize >= env->me_os_psize) {
+ /* This may require buffer alignment. There is no portable
+ * way to ask how much, so we require OS pagesize alignment.
+ */
+# ifdef F_NOCACHE /* __APPLE__ */
+ (void) fcntl(fd, F_NOCACHE, 1);
+# elif defined O_DIRECT
+ /* open(...O_DIRECT...) would break on filesystems without
+ * O_DIRECT support (ITS#7682). Try to set it here instead.
+ */
+ if ((flags = fcntl(fd, F_GETFL)) != -1)
+ (void) fcntl(fd, F_SETFL, flags | O_DIRECT);
+# endif
+ }
+ }
+#endif /* !_WIN32 */
+
+ *res = fd;
+ return rc;
+}
+
+
+#ifdef BROKEN_FDATASYNC
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+#endif
+
+/** Further setup required for opening an LMDB environment
+ */
+static int ESECT
+mdb_env_open2(MDB_env *env)
+{
+ unsigned int flags = env->me_flags;
+ int i, newenv = 0, rc;
+ MDB_meta meta;
+
+#ifdef _WIN32
+ /* See if we should use QueryLimited */
+ rc = GetVersion();
+ if ((rc & 0xff) > 5)
+ env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION;
+ else
+ env->me_pidquery = PROCESS_QUERY_INFORMATION;
+#endif /* _WIN32 */
+
+#ifdef BROKEN_FDATASYNC
+ /* ext3/ext4 fdatasync is broken on some older Linux kernels.
+ * https://lkml.org/lkml/2012/9/3/83
+ * Kernels after 3.6-rc6 are known good.
+ * https://lkml.org/lkml/2012/9/10/556
+ * See if the DB is on ext3/ext4, then check for new enough kernel
+ * Kernels 2.6.32.60, 2.6.34.15, 3.2.30, and 3.5.4 are also known
+ * to be patched.
+ */
+ {
+ struct statfs st;
+ fstatfs(env->me_fd, &st);
+ while (st.f_type == 0xEF53) {
+ struct utsname uts;
+ int i;
+ uname(&uts);
+ if (uts.release[0] < '3') {
+ if (!strncmp(uts.release, "2.6.32.", 7)) {
+ i = atoi(uts.release+7);
+ if (i >= 60)
+ break; /* 2.6.32.60 and newer is OK */
+ } else if (!strncmp(uts.release, "2.6.34.", 7)) {
+ i = atoi(uts.release+7);
+ if (i >= 15)
+ break; /* 2.6.34.15 and newer is OK */
+ }
+ } else if (uts.release[0] == '3') {
+ i = atoi(uts.release+2);
+ if (i > 5)
+ break; /* 3.6 and newer is OK */
+ if (i == 5) {
+ i = atoi(uts.release+4);
+ if (i >= 4)
+ break; /* 3.5.4 and newer is OK */
+ } else if (i == 2) {
+ i = atoi(uts.release+4);
+ if (i >= 30)
+ break; /* 3.2.30 and newer is OK */
+ }
+ } else { /* 4.x and newer is OK */
+ break;
+ }
+ env->me_flags |= MDB_FSYNCONLY;
+ break;
+ }
+ }
+#endif
+
+ if ((i = mdb_env_read_header(env, &meta)) != 0) {
+ if (i != ENOENT)
+ return i;
+ DPUTS("new mdbenv");
+ newenv = 1;
+ env->me_psize = env->me_os_psize;
+ if (env->me_psize > MAX_PAGESIZE)
+ env->me_psize = MAX_PAGESIZE;
+ memset(&meta, 0, sizeof(meta));
+ mdb_env_init_meta0(env, &meta);
+ meta.mm_mapsize = DEFAULT_MAPSIZE;
+ } else {
+ env->me_psize = meta.mm_psize;
+ }
+
+ /* Was a mapsize configured? */
+ if (!env->me_mapsize) {
+ env->me_mapsize = meta.mm_mapsize;
+ }
+ {
+ /* Make sure mapsize >= committed data size. Even when using
+ * mm_mapsize, which could be broken in old files (ITS#7789).
+ */
+ size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize;
+ if (env->me_mapsize < minsize)
+ env->me_mapsize = minsize;
+ }
+ meta.mm_mapsize = env->me_mapsize;
+
+ if (newenv && !(flags & MDB_FIXEDMAP)) {
+ /* mdb_env_map() may grow the datafile. Write the metapages
+ * first, so the file will be valid if initialization fails.
+ * Except with FIXEDMAP, since we do not yet know mm_address.
+ * We could fill in mm_address later, but then a different
+ * program might end up doing that - one with a memory layout
+ * and map address which does not suit the main program.
+ */
+ rc = mdb_env_init_meta(env, &meta);
+ if (rc)
+ return rc;
+ newenv = 0;
+ }
+
+ rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL);
+ if (rc)
+ return rc;
+
+ if (newenv) {
+ if (flags & MDB_FIXEDMAP)
+ meta.mm_address = env->me_map;
+ i = mdb_env_init_meta(env, &meta);
+ if (i != MDB_SUCCESS) {
+ return i;
+ }
+ }
+
+ env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1;
+ env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2)
+ - sizeof(indx_t);
+#if !(MDB_MAXKEYSIZE)
+ env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db));
+#endif
+ env->me_maxpg = env->me_mapsize / env->me_psize;
+
+#if MDB_DEBUG
+ {
+ MDB_meta *meta = mdb_env_pick_meta(env);
+ MDB_db *db = &meta->mm_dbs[MAIN_DBI];
+
+ DPRINTF(("opened database version %u, pagesize %u",
+ meta->mm_version, env->me_psize));
+ DPRINTF(("using meta page %d", (int) (meta->mm_txnid & 1)));
+ DPRINTF(("depth: %u", db->md_depth));
+ DPRINTF(("entries: %"Z"u", db->md_entries));
+ DPRINTF(("branch pages: %"Z"u", db->md_branch_pages));
+ DPRINTF(("leaf pages: %"Z"u", db->md_leaf_pages));
+ DPRINTF(("overflow pages: %"Z"u", db->md_overflow_pages));
+ DPRINTF(("root: %"Z"u", db->md_root));
+ }
+#endif
+
+ return MDB_SUCCESS;
+}
+
+
+/** Release a reader thread's slot in the reader lock table.
+ * This function is called automatically when a thread exits.
+ * @param[in] ptr This points to the slot in the reader lock table.
+ */
+static void
+mdb_env_reader_dest(void *ptr)
+{
+ MDB_reader *reader = ptr;
+
+#ifndef _WIN32
+ if (reader->mr_pid == getpid()) /* catch pthread_exit() in child process */
+#endif
+ /* We omit the mutex, so do this atomically (i.e. skip mr_txnid) */
+ reader->mr_pid = 0;
+}
+
+#ifdef _WIN32
+/** Junk for arranging thread-specific callbacks on Windows. This is
+ * necessarily platform and compiler-specific. Windows supports up
+ * to 1088 keys. Let's assume nobody opens more than 64 environments
+ * in a single process, for now. They can override this if needed.
+ */
+#ifndef MAX_TLS_KEYS
+#define MAX_TLS_KEYS 64
+#endif
+static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS];
+static int mdb_tls_nkeys;
+
+static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr)
+{
+ int i;
+ switch(reason) {
+ case DLL_PROCESS_ATTACH: break;
+ case DLL_THREAD_ATTACH: break;
+ case DLL_THREAD_DETACH:
+ for (i=0; i<mdb_tls_nkeys; i++) {
+ MDB_reader *r = pthread_getspecific(mdb_tls_keys[i]);
+ if (r) {
+ mdb_env_reader_dest(r);
+ }
+ }
+ break;
+ case DLL_PROCESS_DETACH: break;
+ }
+}
+#ifdef __GNUC__
+#ifdef _WIN64
+const PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback;
+#else
+PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback;
+#endif
+#else
+#ifdef _WIN64
+/* Force some symbol references.
+ * _tls_used forces the linker to create the TLS directory if not already done
+ * mdb_tls_cbp prevents whole-program-optimizer from dropping the symbol.
+ */
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#pragma comment(linker, "/INCLUDE:mdb_tls_cbp")
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK mdb_tls_cbp;
+const PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback;
+#pragma const_seg()
+#else /* _WIN32 */
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#pragma comment(linker, "/INCLUDE:_mdb_tls_cbp")
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback;
+#pragma data_seg()
+#endif /* WIN 32/64 */
+#endif /* !__GNUC__ */
+#endif
+
+/** Downgrade the exclusive lock on the region back to shared */
+static int ESECT
+mdb_env_share_locks(MDB_env *env, int *excl)
+{
+ int rc = 0;
+ MDB_meta *meta = mdb_env_pick_meta(env);
+
+ env->me_txns->mti_txnid = meta->mm_txnid;
+
+#ifdef _WIN32
+ {
+ OVERLAPPED ov;
+ /* First acquire a shared lock. The Unlock will
+ * then release the existing exclusive lock.
+ */
+ memset(&ov, 0, sizeof(ov));
+ if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) {
+ rc = ErrCode();
+ } else {
+ UnlockFile(env->me_lfd, 0, 0, 1, 0);
+ *excl = 0;
+ }
+ }
+#else
+ {
+ struct flock lock_info;
+ /* The shared lock replaces the existing lock */
+ memset((void *)&lock_info, 0, sizeof(lock_info));
+ lock_info.l_type = F_RDLCK;
+ lock_info.l_whence = SEEK_SET;
+ lock_info.l_start = 0;
+ lock_info.l_len = 1;
+ while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) &&
+ (rc = ErrCode()) == EINTR) ;
+ *excl = rc ? -1 : 0; /* error may mean we lost the lock */
+ }
+#endif
+
+ return rc;
+}
+
+/** Try to get exclusive lock, otherwise shared.
+ * Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive.
+ */
+static int ESECT
+mdb_env_excl_lock(MDB_env *env, int *excl)
+{
+ int rc = 0;
+#ifdef _WIN32
+ if (LockFile(env->me_lfd, 0, 0, 1, 0)) {
+ *excl = 1;
+ } else {
+ OVERLAPPED ov;
+ memset(&ov, 0, sizeof(ov));
+ if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) {
+ *excl = 0;
+ } else {
+ rc = ErrCode();
+ }
+ }
+#else
+ struct flock lock_info;
+ memset((void *)&lock_info, 0, sizeof(lock_info));
+ lock_info.l_type = F_WRLCK;
+ lock_info.l_whence = SEEK_SET;
+ lock_info.l_start = 0;
+ lock_info.l_len = 1;
+ while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) &&
+ (rc = ErrCode()) == EINTR) ;
+ if (!rc) {
+ *excl = 1;
+ } else
+# ifndef MDB_USE_POSIX_MUTEX
+ if (*excl < 0) /* always true when MDB_USE_POSIX_MUTEX */
+# endif
+ {
+ lock_info.l_type = F_RDLCK;
+ while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) &&
+ (rc = ErrCode()) == EINTR) ;
+ if (rc == 0)
+ *excl = 0;
+ }
+#endif
+ return rc;
+}
+
+#ifdef MDB_USE_HASH
+/*
+ * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code
+ *
+ * @(#) $Revision: 5.1 $
+ * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $
+ * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $
+ *
+ * http://www.isthe.com/chongo/tech/comp/fnv/index.html
+ *
+ ***
+ *
+ * Please do not copyright this code. This code is in the public domain.
+ *
+ * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
+ * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ *
+ * By:
+ * chongo <Landon Curt Noll> /\oo/\
+ * http://www.isthe.com/chongo/
+ *
+ * Share and Enjoy! :-)
+ */
+
+typedef unsigned long long mdb_hash_t;
+#define MDB_HASH_INIT ((mdb_hash_t)0xcbf29ce484222325ULL)
+
+/** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
+ * @param[in] val value to hash
+ * @param[in] hval initial value for hash
+ * @return 64 bit hash
+ *
+ * NOTE: To use the recommended 64 bit FNV-1a hash, use MDB_HASH_INIT as the
+ * hval arg on the first call.
+ */
+static mdb_hash_t
+mdb_hash_val(MDB_val *val, mdb_hash_t hval)
+{
+ unsigned char *s = (unsigned char *)val->mv_data; /* unsigned string */
+ unsigned char *end = s + val->mv_size;
+ /*
+ * FNV-1a hash each octet of the string
+ */
+ while (s < end) {
+ /* xor the bottom with the current octet */
+ hval ^= (mdb_hash_t)*s++;
+
+ /* multiply by the 64 bit FNV magic prime mod 2^64 */
+ hval += (hval << 1) + (hval << 4) + (hval << 5) +
+ (hval << 7) + (hval << 8) + (hval << 40);
+ }
+ /* return our new hash value */
+ return hval;
+}
+
+/** Hash the string and output the encoded hash.
+ * This uses modified RFC1924 Ascii85 encoding to accommodate systems with
+ * very short name limits. We don't care about the encoding being reversible,
+ * we just want to preserve as many bits of the input as possible in a
+ * small printable string.
+ * @param[in] str string to hash
+ * @param[out] encbuf an array of 11 chars to hold the hash
+ */
+static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
+
+static void ESECT
+mdb_pack85(unsigned long l, char *out)
+{
+ int i;
+
+ for (i=0; i<5; i++) {
+ *out++ = mdb_a85[l % 85];
+ l /= 85;
+ }
+}
+
+static void ESECT
+mdb_hash_enc(MDB_val *val, char *encbuf)
+{
+ mdb_hash_t h = mdb_hash_val(val, MDB_HASH_INIT);
+
+ mdb_pack85(h, encbuf);
+ mdb_pack85(h>>32, encbuf+5);
+ encbuf[10] = '\0';
+}
+#endif
+
+/** Open and/or initialize the lock region for the environment.
+ * @param[in] env The LMDB environment.
+ * @param[in] fname Filename + scratch area, from #mdb_fname_init().
+ * @param[in] mode The Unix permissions for the file, if we create it.
+ * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive
+ * @return 0 on success, non-zero on failure.
+ */
+static int ESECT
+mdb_env_setup_locks(MDB_env *env, MDB_name *fname, int mode, int *excl)
+{
+#ifdef _WIN32
+# define MDB_ERRCODE_ROFS ERROR_WRITE_PROTECT
+#else
+# define MDB_ERRCODE_ROFS EROFS
+#endif
+ int rc;
+ off_t size, rsize;
+
+ rc = mdb_fopen(env, fname, MDB_O_LOCKS, mode, &env->me_lfd);
+ if (rc) {
+ /* Omit lockfile if read-only env on read-only filesystem */
+ if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) {
+ return MDB_SUCCESS;
+ }
+ goto fail;
+ }
+
+ if (!(env->me_flags & MDB_NOTLS)) {
+ rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest);
+ if (rc)
+ goto fail;
+ env->me_flags |= MDB_ENV_TXKEY;
+#ifdef _WIN32
+ /* Windows TLS callbacks need help finding their TLS info. */
+ if (mdb_tls_nkeys >= MAX_TLS_KEYS) {
+ rc = MDB_TLS_FULL;
+ goto fail;
+ }
+ mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey;
+#endif
+ }
+
+ /* Try to get exclusive lock. If we succeed, then
+ * nobody is using the lock region and we should initialize it.
+ */
+ if ((rc = mdb_env_excl_lock(env, excl))) goto fail;
+
+#ifdef _WIN32
+ size = GetFileSize(env->me_lfd, NULL);
+#else
+ size = lseek(env->me_lfd, 0, SEEK_END);
+ if (size == -1) goto fail_errno;
+#endif
+ rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo);
+ if (size < rsize && *excl > 0) {
+#ifdef _WIN32
+ if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize
+ || !SetEndOfFile(env->me_lfd))
+ goto fail_errno;
+#else
+ if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno;
+#endif
+ } else {
+ rsize = size;
+ size = rsize - sizeof(MDB_txninfo);
+ env->me_maxreaders = size/sizeof(MDB_reader) + 1;
+ }
+ {
+#ifdef _WIN32
+ HANDLE mh;
+ mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE,
+ 0, 0, NULL);
+ if (!mh) goto fail_errno;
+ env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL);
+ CloseHandle(mh);
+ if (!env->me_txns) goto fail_errno;
+#else
+ void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED,
+ env->me_lfd, 0);
+ if (m == MAP_FAILED) goto fail_errno;
+ env->me_txns = m;
+#endif
+ }
+ if (*excl > 0) {
+#ifdef _WIN32
+ BY_HANDLE_FILE_INFORMATION stbuf;
+ struct {
+ DWORD volume;
+ DWORD nhigh;
+ DWORD nlow;
+ } idbuf;
+ MDB_val val;
+ char encbuf[11];
+
+ if (!mdb_sec_inited) {
+ InitializeSecurityDescriptor(&mdb_null_sd,
+ SECURITY_DESCRIPTOR_REVISION);
+ SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE);
+ mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES);
+ mdb_all_sa.bInheritHandle = FALSE;
+ mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd;
+ mdb_sec_inited = 1;
+ }
+ if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno;
+ idbuf.volume = stbuf.dwVolumeSerialNumber;
+ idbuf.nhigh = stbuf.nFileIndexHigh;
+ idbuf.nlow = stbuf.nFileIndexLow;
+ val.mv_data = &idbuf;
+ val.mv_size = sizeof(idbuf);
+ mdb_hash_enc(&val, encbuf);
+ sprintf(env->me_txns->mti_rmname, "Global\\MDBr%s", encbuf);
+ sprintf(env->me_txns->mti_wmname, "Global\\MDBw%s", encbuf);
+ env->me_rmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_rmname);
+ if (!env->me_rmutex) goto fail_errno;
+ env->me_wmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_wmname);
+ if (!env->me_wmutex) goto fail_errno;
+#elif defined(MDB_USE_POSIX_SEM)
+ struct stat stbuf;
+ struct {
+ dev_t dev;
+ ino_t ino;
+ } idbuf;
+ MDB_val val;
+ char encbuf[11];
+
+#if defined(__NetBSD__)
+#define MDB_SHORT_SEMNAMES 1 /* limited to 14 chars */
+#endif
+ if (fstat(env->me_lfd, &stbuf)) goto fail_errno;
+ idbuf.dev = stbuf.st_dev;
+ idbuf.ino = stbuf.st_ino;
+ val.mv_data = &idbuf;
+ val.mv_size = sizeof(idbuf);
+ mdb_hash_enc(&val, encbuf);
+#ifdef MDB_SHORT_SEMNAMES
+ encbuf[9] = '\0'; /* drop name from 15 chars to 14 chars */
+#endif
+ sprintf(env->me_txns->mti_rmname, "/MDBr%s", encbuf);
+ sprintf(env->me_txns->mti_wmname, "/MDBw%s", encbuf);
+ /* Clean up after a previous run, if needed: Try to
+ * remove both semaphores before doing anything else.
+ */
+ sem_unlink(env->me_txns->mti_rmname);
+ sem_unlink(env->me_txns->mti_wmname);
+ env->me_rmutex = sem_open(env->me_txns->mti_rmname,
+ O_CREAT|O_EXCL, mode, 1);
+ if (env->me_rmutex == SEM_FAILED) goto fail_errno;
+ env->me_wmutex = sem_open(env->me_txns->mti_wmname,
+ O_CREAT|O_EXCL, mode, 1);
+ if (env->me_wmutex == SEM_FAILED) goto fail_errno;
+#else /* MDB_USE_POSIX_MUTEX: */
+ pthread_mutexattr_t mattr;
+
+ /* Solaris needs this before initing a robust mutex. Otherwise
+ * it may skip the init and return EBUSY "seems someone already
+ * inited" or EINVAL "it was inited differently".
+ */
+ memset(env->me_txns->mti_rmutex, 0, sizeof(*env->me_txns->mti_rmutex));
+ memset(env->me_txns->mti_wmutex, 0, sizeof(*env->me_txns->mti_wmutex));
+
+ if ((rc = pthread_mutexattr_init(&mattr)))
+ goto fail;
+
+ rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
+#ifdef MDB_ROBUST_SUPPORTED
+ if (!rc) rc = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST);
+#endif
+ if (!rc) rc = pthread_mutex_init(env->me_txns->mti_rmutex, &mattr);
+ if (!rc) rc = pthread_mutex_init(env->me_txns->mti_wmutex, &mattr);
+ pthread_mutexattr_destroy(&mattr);
+ if (rc)
+ goto fail;
+#endif /* _WIN32 || MDB_USE_POSIX_SEM */
+
+ env->me_txns->mti_magic = MDB_MAGIC;
+ env->me_txns->mti_format = MDB_LOCK_FORMAT;
+ env->me_txns->mti_txnid = 0;
+ env->me_txns->mti_numreaders = 0;
+
+ } else {
+ if (env->me_txns->mti_magic != MDB_MAGIC) {
+ DPUTS("lock region has invalid magic");
+ rc = MDB_INVALID;
+ goto fail;
+ }
+ if (env->me_txns->mti_format != MDB_LOCK_FORMAT) {
+ DPRINTF(("lock region has format+version 0x%x, expected 0x%x",
+ env->me_txns->mti_format, MDB_LOCK_FORMAT));
+ rc = MDB_VERSION_MISMATCH;
+ goto fail;
+ }
+ rc = ErrCode();
+ if (rc && rc != EACCES && rc != EAGAIN) {
+ goto fail;
+ }
+#ifdef _WIN32
+ env->me_rmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_rmname);
+ if (!env->me_rmutex) goto fail_errno;
+ env->me_wmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_wmname);
+ if (!env->me_wmutex) goto fail_errno;
+#elif defined(MDB_USE_POSIX_SEM)
+ env->me_rmutex = sem_open(env->me_txns->mti_rmname, 0);
+ if (env->me_rmutex == SEM_FAILED) goto fail_errno;
+ env->me_wmutex = sem_open(env->me_txns->mti_wmname, 0);
+ if (env->me_wmutex == SEM_FAILED) goto fail_errno;
+#endif
+ }
+ return MDB_SUCCESS;
+
+fail_errno:
+ rc = ErrCode();
+fail:
+ return rc;
+}
+
+ /** Only a subset of the @ref mdb_env flags can be changed
+ * at runtime. Changing other flags requires closing the
+ * environment and re-opening it with the new flags.
+ */
+#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT)
+#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \
+ MDB_WRITEMAP|MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD)
+
+#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS)
+# error "Persistent DB flags & env flags overlap, but both go in mm_flags"
+#endif
+
+int ESECT
+mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode)
+{
+ int rc, excl = -1;
+ MDB_name fname;
+
+ if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS)))
+ return EINVAL;
+
+ flags |= env->me_flags;
+
+ rc = mdb_fname_init(path, flags, &fname);
+ if (rc)
+ return rc;
+
+ if (flags & MDB_RDONLY) {
+ /* silently ignore WRITEMAP when we're only getting read access */
+ flags &= ~MDB_WRITEMAP;
+ } else {
+ if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) &&
+ (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2)))))
+ rc = ENOMEM;
+ }
+ env->me_flags = flags |= MDB_ENV_ACTIVE;
+ if (rc)
+ goto leave;
+
+ env->me_path = strdup(path);
+ env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx));
+ env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t));
+ env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int));
+ if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) {
+ rc = ENOMEM;
+ goto leave;
+ }
+ env->me_dbxs[FREE_DBI].md_cmp = mdb_cmp_long; /* aligned MDB_INTEGERKEY */
+
+ /* For RDONLY, get lockfile after we know datafile exists */
+ if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) {
+ rc = mdb_env_setup_locks(env, &fname, mode, &excl);
+ if (rc)
+ goto leave;
+ }
+
+ rc = mdb_fopen(env, &fname,
+ (flags & MDB_RDONLY) ? MDB_O_RDONLY : MDB_O_RDWR,
+ mode, &env->me_fd);
+ if (rc)
+ goto leave;
+
+ if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) {
+ rc = mdb_env_setup_locks(env, &fname, mode, &excl);
+ if (rc)
+ goto leave;
+ }
+
+ if ((rc = mdb_env_open2(env)) == MDB_SUCCESS) {
+ if (!(flags & (MDB_RDONLY|MDB_WRITEMAP))) {
+ /* Synchronous fd for meta writes. Needed even with
+ * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset.
+ */
+ rc = mdb_fopen(env, &fname, MDB_O_META, mode, &env->me_mfd);
+ if (rc)
+ goto leave;
+ }
+ DPRINTF(("opened dbenv %p", (void *) env));
+ if (excl > 0) {
+ rc = mdb_env_share_locks(env, &excl);
+ if (rc)
+ goto leave;
+ }
+ if (!(flags & MDB_RDONLY)) {
+ MDB_txn *txn;
+ int tsize = sizeof(MDB_txn), size = tsize + env->me_maxdbs *
+ (sizeof(MDB_db)+sizeof(MDB_cursor *)+sizeof(unsigned int)+1);
+ if ((env->me_pbuf = calloc(1, env->me_psize)) &&
+ (txn = calloc(1, size)))
+ {
+ txn->mt_dbs = (MDB_db *)((char *)txn + tsize);
+ txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs);
+ txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs);
+ txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs);
+ txn->mt_env = env;
+ txn->mt_dbxs = env->me_dbxs;
+ txn->mt_flags = MDB_TXN_FINISHED;
+ env->me_txn0 = txn;
+ } else {
+ rc = ENOMEM;
+ }
+ }
+ }
+
+leave:
+ if (rc) {
+ mdb_env_close0(env, excl);
+ }
+ mdb_fname_destroy(fname);
+ return rc;
+}
+
+/** Destroy resources from mdb_env_open(), clear our readers & DBIs */
+static void ESECT
+mdb_env_close0(MDB_env *env, int excl)
+{
+ int i;
+
+ if (!(env->me_flags & MDB_ENV_ACTIVE))
+ return;
+
+ /* Doing this here since me_dbxs may not exist during mdb_env_close */
+ if (env->me_dbxs) {
+ for (i = env->me_maxdbs; --i >= CORE_DBS; )
+ free(env->me_dbxs[i].md_name.mv_data);
+ free(env->me_dbxs);
+ }
+
+ free(env->me_pbuf);
+ free(env->me_dbiseqs);
+ free(env->me_dbflags);
+ free(env->me_path);
+ free(env->me_dirty_list);
+ free(env->me_txn0);
+ mdb_midl_free(env->me_free_pgs);
+
+ if (env->me_flags & MDB_ENV_TXKEY) {
+ pthread_key_delete(env->me_txkey);
+#ifdef _WIN32
+ /* Delete our key from the global list */
+ for (i=0; i<mdb_tls_nkeys; i++)
+ if (mdb_tls_keys[i] == env->me_txkey) {
+ mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1];
+ mdb_tls_nkeys--;
+ break;
+ }
+#endif
+ }
+
+ if (env->me_map) {
+ munmap(env->me_map, env->me_mapsize);
+ }
+ if (env->me_mfd != INVALID_HANDLE_VALUE)
+ (void) close(env->me_mfd);
+ if (env->me_fd != INVALID_HANDLE_VALUE)
+ (void) close(env->me_fd);
+ if (env->me_txns) {
+ MDB_PID_T pid = getpid();
+ /* Clearing readers is done in this function because
+ * me_txkey with its destructor must be disabled first.
+ *
+ * We skip the the reader mutex, so we touch only
+ * data owned by this process (me_close_readers and
+ * our readers), and clear each reader atomically.
+ */
+ for (i = env->me_close_readers; --i >= 0; )
+ if (env->me_txns->mti_readers[i].mr_pid == pid)
+ env->me_txns->mti_readers[i].mr_pid = 0;
+#ifdef _WIN32
+ if (env->me_rmutex) {
+ CloseHandle(env->me_rmutex);
+ if (env->me_wmutex) CloseHandle(env->me_wmutex);
+ }
+ /* Windows automatically destroys the mutexes when
+ * the last handle closes.
+ */
+#elif defined(MDB_USE_POSIX_SEM)
+ if (env->me_rmutex != SEM_FAILED) {
+ sem_close(env->me_rmutex);
+ if (env->me_wmutex != SEM_FAILED)
+ sem_close(env->me_wmutex);
+ /* If we have the filelock: If we are the
+ * only remaining user, clean up semaphores.
+ */
+ if (excl == 0)
+ mdb_env_excl_lock(env, &excl);
+ if (excl > 0) {
+ sem_unlink(env->me_txns->mti_rmname);
+ sem_unlink(env->me_txns->mti_wmname);
+ }
+ }
+#elif defined(MDB_ROBUST_SUPPORTED)
+ /* If we have the filelock: If we are the
+ * only remaining user, clean up robust
+ * mutexes.
+ */
+ if (excl == 0)
+ mdb_env_excl_lock(env, &excl);
+ if (excl > 0) {
+ pthread_mutex_destroy(env->me_txns->mti_rmutex);
+ pthread_mutex_destroy(env->me_txns->mti_wmutex);
+ }
+#endif
+ munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo));
+ }
+ if (env->me_lfd != INVALID_HANDLE_VALUE) {
+#ifdef _WIN32
+ if (excl >= 0) {
+ /* Unlock the lockfile. Windows would have unlocked it
+ * after closing anyway, but not necessarily at once.
+ */
+ UnlockFile(env->me_lfd, 0, 0, 1, 0);
+ }
+#endif
+ (void) close(env->me_lfd);
+ }
+
+ env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY);
+}
+
+void ESECT
+mdb_env_close(MDB_env *env)
+{
+ MDB_page *dp;
+
+ if (env == NULL)
+ return;
+
+ VGMEMP_DESTROY(env);
+ while ((dp = env->me_dpages) != NULL) {
+ VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next));
+ env->me_dpages = dp->mp_next;
+ free(dp);
+ }
+
+ mdb_env_close0(env, 0);
+ free(env);
+}
+
+/** Compare two items pointing at aligned size_t's */
+static int
+mdb_cmp_long(const MDB_val *a, const MDB_val *b)
+{
+ return (*(size_t *)a->mv_data < *(size_t *)b->mv_data) ? -1 :
+ *(size_t *)a->mv_data > *(size_t *)b->mv_data;
+}
+
+/** Compare two items pointing at aligned unsigned int's.
+ *
+ * This is also set as #MDB_INTEGERDUP|#MDB_DUPFIXED's #MDB_dbx.%md_dcmp,
+ * but #mdb_cmp_clong() is called instead if the data type is size_t.
+ */
+static int
+mdb_cmp_int(const MDB_val *a, const MDB_val *b)
+{
+ return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 :
+ *(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data;
+}
+
+/** Compare two items pointing at unsigned ints of unknown alignment.
+ * Nodes and keys are guaranteed to be 2-byte aligned.
+ */
+static int
+mdb_cmp_cint(const MDB_val *a, const MDB_val *b)
+{
+#if BYTE_ORDER == LITTLE_ENDIAN
+ unsigned short *u, *c;
+ int x;
+
+ u = (unsigned short *) ((char *) a->mv_data + a->mv_size);
+ c = (unsigned short *) ((char *) b->mv_data + a->mv_size);
+ do {
+ x = *--u - *--c;
+ } while(!x && u > (unsigned short *)a->mv_data);
+ return x;
+#else
+ unsigned short *u, *c, *end;
+ int x;
+
+ end = (unsigned short *) ((char *) a->mv_data + a->mv_size);
+ u = (unsigned short *)a->mv_data;
+ c = (unsigned short *)b->mv_data;
+ do {
+ x = *u++ - *c++;
+ } while(!x && u < end);
+ return x;
+#endif
+}
+
+/** Compare two items lexically */
+static int
+mdb_cmp_memn(const MDB_val *a, const MDB_val *b)
+{
+ int diff;
+ ssize_t len_diff;
+ unsigned int len;
+
+ len = a->mv_size;
+ len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size;
+ if (len_diff > 0) {
+ len = b->mv_size;
+ len_diff = 1;
+ }
+
+ diff = memcmp(a->mv_data, b->mv_data, len);
+ return diff ? diff : len_diff<0 ? -1 : len_diff;
+}
+
+/** Compare two items in reverse byte order */
+static int
+mdb_cmp_memnr(const MDB_val *a, const MDB_val *b)
+{
+ const unsigned char *p1, *p2, *p1_lim;
+ ssize_t len_diff;
+ int diff;
+
+ p1_lim = (const unsigned char *)a->mv_data;
+ p1 = (const unsigned char *)a->mv_data + a->mv_size;
+ p2 = (const unsigned char *)b->mv_data + b->mv_size;
+
+ len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size;
+ if (len_diff > 0) {
+ p1_lim += len_diff;
+ len_diff = 1;
+ }
+
+ while (p1 > p1_lim) {
+ diff = *--p1 - *--p2;
+ if (diff)
+ return diff;
+ }
+ return len_diff<0 ? -1 : len_diff;
+}
+
+/** Search for key within a page, using binary search.
+ * Returns the smallest entry larger or equal to the key.
+ * If exactp is non-null, stores whether the found entry was an exact match
+ * in *exactp (1 or 0).
+ * Updates the cursor index with the index of the found entry.
+ * If no entry larger or equal to the key is found, returns NULL.
+ */
+static MDB_node *
+mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp)
+{
+ unsigned int i = 0, nkeys;
+ int low, high;
+ int rc = 0;
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ MDB_node *node = NULL;
+ MDB_val nodekey;
+ MDB_cmp_func *cmp;
+ DKBUF;
+
+ nkeys = NUMKEYS(mp);
+
+ DPRINTF(("searching %u keys in %s %spage %"Z"u",
+ nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "",
+ mdb_dbg_pgno(mp)));
+
+ low = IS_LEAF(mp) ? 0 : 1;
+ high = nkeys - 1;
+ cmp = mc->mc_dbx->md_cmp;
+
+ /* Branch pages have no data, so if using integer keys,
+ * alignment is guaranteed. Use faster mdb_cmp_int.
+ */
+ if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) {
+ if (NODEPTR(mp, 1)->mn_ksize == sizeof(size_t))
+ cmp = mdb_cmp_long;
+ else
+ cmp = mdb_cmp_int;
+ }
+
+ if (IS_LEAF2(mp)) {
+ nodekey.mv_size = mc->mc_db->md_pad;
+ node = NODEPTR(mp, 0); /* fake */
+ while (low <= high) {
+ i = (low + high) >> 1;
+ nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size);
+ rc = cmp(key, &nodekey);
+ DPRINTF(("found leaf index %u [%s], rc = %i",
+ i, DKEY(&nodekey), rc));
+ if (rc == 0)
+ break;
+ if (rc > 0)
+ low = i + 1;
+ else
+ high = i - 1;
+ }
+ } else {
+ while (low <= high) {
+ i = (low + high) >> 1;
+
+ node = NODEPTR(mp, i);
+ nodekey.mv_size = NODEKSZ(node);
+ nodekey.mv_data = NODEKEY(node);
+
+ rc = cmp(key, &nodekey);
+#if MDB_DEBUG
+ if (IS_LEAF(mp))
+ DPRINTF(("found leaf index %u [%s], rc = %i",
+ i, DKEY(&nodekey), rc));
+ else
+ DPRINTF(("found branch index %u [%s -> %"Z"u], rc = %i",
+ i, DKEY(&nodekey), NODEPGNO(node), rc));
+#endif
+ if (rc == 0)
+ break;
+ if (rc > 0)
+ low = i + 1;
+ else
+ high = i - 1;
+ }
+ }
+
+ if (rc > 0) { /* Found entry is less than the key. */
+ i++; /* Skip to get the smallest entry larger than key. */
+ if (!IS_LEAF2(mp))
+ node = NODEPTR(mp, i);
+ }
+ if (exactp)
+ *exactp = (rc == 0 && nkeys > 0);
+ /* store the key index */
+ mc->mc_ki[mc->mc_top] = i;
+ if (i >= nkeys)
+ /* There is no entry larger or equal to the key. */
+ return NULL;
+
+ /* nodeptr is fake for LEAF2 */
+ return node;
+}
+
+#if 0
+static void
+mdb_cursor_adjust(MDB_cursor *mc, func)
+{
+ MDB_cursor *m2;
+
+ for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
+ if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) {
+ func(mc, m2);
+ }
+ }
+}
+#endif
+
+/** Pop a page off the top of the cursor's stack. */
+static void
+mdb_cursor_pop(MDB_cursor *mc)
+{
+ if (mc->mc_snum) {
+ DPRINTF(("popping page %"Z"u off db %d cursor %p",
+ mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *) mc));
+
+ mc->mc_snum--;
+ if (mc->mc_snum) {
+ mc->mc_top--;
+ } else {
+ mc->mc_flags &= ~C_INITIALIZED;
+ }
+ }
+}
+
+/** Push a page onto the top of the cursor's stack.
+ * Set #MDB_TXN_ERROR on failure.
+ */
+static int
+mdb_cursor_push(MDB_cursor *mc, MDB_page *mp)
+{
+ DPRINTF(("pushing page %"Z"u on db %d cursor %p", mp->mp_pgno,
+ DDBI(mc), (void *) mc));
+
+ if (mc->mc_snum >= CURSOR_STACK) {
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_CURSOR_FULL;
+ }
+
+ mc->mc_top = mc->mc_snum++;
+ mc->mc_pg[mc->mc_top] = mp;
+ mc->mc_ki[mc->mc_top] = 0;
+
+ return MDB_SUCCESS;
+}
+
+/** Find the address of the page corresponding to a given page number.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in] mc the cursor accessing the page.
+ * @param[in] pgno the page number for the page to retrieve.
+ * @param[out] ret address of a pointer where the page's address will be stored.
+ * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **ret, int *lvl)
+{
+ MDB_txn *txn = mc->mc_txn;
+ MDB_env *env = txn->mt_env;
+ MDB_page *p = NULL;
+ int level;
+
+ if (! (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_WRITEMAP))) {
+ MDB_txn *tx2 = txn;
+ level = 1;
+ do {
+ MDB_ID2L dl = tx2->mt_u.dirty_list;
+ unsigned x;
+ /* Spilled pages were dirtied in this txn and flushed
+ * because the dirty list got full. Bring this page
+ * back in from the map (but don't unspill it here,
+ * leave that unless page_touch happens again).
+ */
+ if (tx2->mt_spill_pgs) {
+ MDB_ID pn = pgno << 1;
+ x = mdb_midl_search(tx2->mt_spill_pgs, pn);
+ if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) {
+ p = (MDB_page *)(env->me_map + env->me_psize * pgno);
+ goto done;
+ }
+ }
+ if (dl[0].mid) {
+ unsigned x = mdb_mid2l_search(dl, pgno);
+ if (x <= dl[0].mid && dl[x].mid == pgno) {
+ p = dl[x].mptr;
+ goto done;
+ }
+ }
+ level++;
+ } while ((tx2 = tx2->mt_parent) != NULL);
+ }
+
+ if (pgno < txn->mt_next_pgno) {
+ level = 0;
+ p = (MDB_page *)(env->me_map + env->me_psize * pgno);
+ } else {
+ DPRINTF(("page %"Z"u not found", pgno));
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_PAGE_NOTFOUND;
+ }
+
+done:
+ *ret = p;
+ if (lvl)
+ *lvl = level;
+ return MDB_SUCCESS;
+}
+
+/** Finish #mdb_page_search() / #mdb_page_search_lowest().
+ * The cursor is at the root page, set up the rest of it.
+ */
+static int
+mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags)
+{
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ int rc;
+ DKBUF;
+
+ while (IS_BRANCH(mp)) {
+ MDB_node *node;
+ indx_t i;
+
+ DPRINTF(("branch page %"Z"u has %u keys", mp->mp_pgno, NUMKEYS(mp)));
+ /* Don't assert on branch pages in the FreeDB. We can get here
+ * while in the process of rebalancing a FreeDB branch page; we must
+ * let that proceed. ITS#8336
+ */
+ mdb_cassert(mc, !mc->mc_dbi || NUMKEYS(mp) > 1);
+ DPRINTF(("found index 0 to page %"Z"u", NODEPGNO(NODEPTR(mp, 0))));
+
+ if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) {
+ i = 0;
+ if (flags & MDB_PS_LAST) {
+ i = NUMKEYS(mp) - 1;
+ /* if already init'd, see if we're already in right place */
+ if (mc->mc_flags & C_INITIALIZED) {
+ if (mc->mc_ki[mc->mc_top] == i) {
+ mc->mc_top = mc->mc_snum++;
+ mp = mc->mc_pg[mc->mc_top];
+ goto ready;
+ }
+ }
+ }
+ } else {
+ int exact;
+ node = mdb_node_search(mc, key, &exact);
+ if (node == NULL)
+ i = NUMKEYS(mp) - 1;
+ else {
+ i = mc->mc_ki[mc->mc_top];
+ if (!exact) {
+ mdb_cassert(mc, i > 0);
+ i--;
+ }
+ }
+ DPRINTF(("following index %u for key [%s]", i, DKEY(key)));
+ }
+
+ mdb_cassert(mc, i < NUMKEYS(mp));
+ node = NODEPTR(mp, i);
+
+ if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)
+ return rc;
+
+ mc->mc_ki[mc->mc_top] = i;
+ if ((rc = mdb_cursor_push(mc, mp)))
+ return rc;
+
+ready:
+ if (flags & MDB_PS_MODIFY) {
+ if ((rc = mdb_page_touch(mc)) != 0)
+ return rc;
+ mp = mc->mc_pg[mc->mc_top];
+ }
+ }
+
+ if (!IS_LEAF(mp)) {
+ DPRINTF(("internal error, index points to a %02X page!?",
+ mp->mp_flags));
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_CORRUPTED;
+ }
+
+ DPRINTF(("found leaf page %"Z"u for key [%s]", mp->mp_pgno,
+ key ? DKEY(key) : "null"));
+ mc->mc_flags |= C_INITIALIZED;
+ mc->mc_flags &= ~C_EOF;
+
+ return MDB_SUCCESS;
+}
+
+/** Search for the lowest key under the current branch page.
+ * This just bypasses a NUMKEYS check in the current page
+ * before calling mdb_page_search_root(), because the callers
+ * are all in situations where the current page is known to
+ * be underfilled.
+ */
+static int
+mdb_page_search_lowest(MDB_cursor *mc)
+{
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ MDB_node *node = NODEPTR(mp, 0);
+ int rc;
+
+ if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)
+ return rc;
+
+ mc->mc_ki[mc->mc_top] = 0;
+ if ((rc = mdb_cursor_push(mc, mp)))
+ return rc;
+ return mdb_page_search_root(mc, NULL, MDB_PS_FIRST);
+}
+
+/** Search for the page a given key should be in.
+ * Push it and its parent pages on the cursor stack.
+ * @param[in,out] mc the cursor for this operation.
+ * @param[in] key the key to search for, or NULL for first/last page.
+ * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB
+ * are touched (updated with new page numbers).
+ * If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf.
+ * This is used by #mdb_cursor_first() and #mdb_cursor_last().
+ * If MDB_PS_ROOTONLY set, just fetch root node, no further lookups.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags)
+{
+ int rc;
+ pgno_t root;
+
+ /* Make sure the txn is still viable, then find the root from
+ * the txn's db table and set it as the root of the cursor's stack.
+ */
+ if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) {
+ DPUTS("transaction may not be used now");
+ return MDB_BAD_TXN;
+ } else {
+ /* Make sure we're using an up-to-date root */
+ if (*mc->mc_dbflag & DB_STALE) {
+ MDB_cursor mc2;
+ if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi))
+ return MDB_BAD_DBI;
+ mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL);
+ rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0);
+ if (rc)
+ return rc;
+ {
+ MDB_val data;
+ int exact = 0;
+ uint16_t flags;
+ MDB_node *leaf = mdb_node_search(&mc2,
+ &mc->mc_dbx->md_name, &exact);
+ if (!exact)
+ return MDB_NOTFOUND;
+ if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA)
+ return MDB_INCOMPATIBLE; /* not a named DB */
+ rc = mdb_node_read(&mc2, leaf, &data);
+ if (rc)
+ return rc;
+ memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)),
+ sizeof(uint16_t));
+ /* The txn may not know this DBI, or another process may
+ * have dropped and recreated the DB with other flags.
+ */
+ if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags)
+ return MDB_INCOMPATIBLE;
+ memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db));
+ }
+ *mc->mc_dbflag &= ~DB_STALE;
+ }
+ root = mc->mc_db->md_root;
+
+ if (root == P_INVALID) { /* Tree is empty. */
+ DPUTS("tree is empty");
+ return MDB_NOTFOUND;
+ }
+ }
+
+ mdb_cassert(mc, root > 1);
+ if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root)
+ if ((rc = mdb_page_get(mc, root, &mc->mc_pg[0], NULL)) != 0)
+ return rc;
+
+ mc->mc_snum = 1;
+ mc->mc_top = 0;
+
+ DPRINTF(("db %d root page %"Z"u has flags 0x%X",
+ DDBI(mc), root, mc->mc_pg[0]->mp_flags));
+
+ if (flags & MDB_PS_MODIFY) {
+ if ((rc = mdb_page_touch(mc)))
+ return rc;
+ }
+
+ if (flags & MDB_PS_ROOTONLY)
+ return MDB_SUCCESS;
+
+ return mdb_page_search_root(mc, key, flags);
+}
+
+static int
+mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp)
+{
+ MDB_txn *txn = mc->mc_txn;
+ pgno_t pg = mp->mp_pgno;
+ unsigned x = 0, ovpages = mp->mp_pages;
+ MDB_env *env = txn->mt_env;
+ MDB_IDL sl = txn->mt_spill_pgs;
+ MDB_ID pn = pg << 1;
+ int rc;
+
+ DPRINTF(("free ov page %"Z"u (%d)", pg, ovpages));
+ /* If the page is dirty or on the spill list we just acquired it,
+ * so we should give it back to our current free list, if any.
+ * Otherwise put it onto the list of pages we freed in this txn.
+ *
+ * Won't create me_pghead: me_pglast must be inited along with it.
+ * Unsupported in nested txns: They would need to hide the page
+ * range in ancestor txns' dirty and spilled lists.
+ */
+ if (env->me_pghead &&
+ !txn->mt_parent &&
+ ((mp->mp_flags & P_DIRTY) ||
+ (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn)))
+ {
+ unsigned i, j;
+ pgno_t *mop;
+ MDB_ID2 *dl, ix, iy;
+ rc = mdb_midl_need(&env->me_pghead, ovpages);
+ if (rc)
+ return rc;
+ if (!(mp->mp_flags & P_DIRTY)) {
+ /* This page is no longer spilled */
+ if (x == sl[0])
+ sl[0]--;
+ else
+ sl[x] |= 1;
+ goto release;
+ }
+ /* Remove from dirty list */
+ dl = txn->mt_u.dirty_list;
+ x = dl[0].mid--;
+ for (ix = dl[x]; ix.mptr != mp; ix = iy) {
+ if (x > 1) {
+ x--;
+ iy = dl[x];
+ dl[x] = ix;
+ } else {
+ mdb_cassert(mc, x > 1);
+ j = ++(dl[0].mid);
+ dl[j] = ix; /* Unsorted. OK when MDB_TXN_ERROR. */
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_CORRUPTED;
+ }
+ }
+ txn->mt_dirty_room++;
+ if (!(env->me_flags & MDB_WRITEMAP))
+ mdb_dpage_free(env, mp);
+release:
+ /* Insert in me_pghead */
+ mop = env->me_pghead;
+ j = mop[0] + ovpages;
+ for (i = mop[0]; i && mop[i] < pg; i--)
+ mop[j--] = mop[i];
+ while (j>i)
+ mop[j--] = pg++;
+ mop[0] += ovpages;
+ } else {
+ rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages);
+ if (rc)
+ return rc;
+ }
+ mc->mc_db->md_overflow_pages -= ovpages;
+ return 0;
+}
+
+/** Return the data associated with a given node.
+ * @param[in] mc The cursor for this operation.
+ * @param[in] leaf The node being read.
+ * @param[out] data Updated to point to the node's data.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data)
+{
+ MDB_page *omp; /* overflow page */
+ pgno_t pgno;
+ int rc;
+
+ if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) {
+ data->mv_size = NODEDSZ(leaf);
+ data->mv_data = NODEDATA(leaf);
+ return MDB_SUCCESS;
+ }
+
+ /* Read overflow data.
+ */
+ data->mv_size = NODEDSZ(leaf);
+ memcpy(&pgno, NODEDATA(leaf), sizeof(pgno));
+ if ((rc = mdb_page_get(mc, pgno, &omp, NULL)) != 0) {
+ DPRINTF(("read overflow page %"Z"u failed", pgno));
+ return rc;
+ }
+ data->mv_data = METADATA(omp);
+
+ return MDB_SUCCESS;
+}
+
+int
+mdb_get(MDB_txn *txn, MDB_dbi dbi,
+ MDB_val *key, MDB_val *data)
+{
+ MDB_cursor mc;
+ MDB_xcursor mx;
+ int exact = 0;
+ DKBUF;
+
+ DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key)));
+
+ if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ if (txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ mdb_cursor_init(&mc, txn, dbi, &mx);
+ return mdb_cursor_set(&mc, key, data, MDB_SET, &exact);
+}
+
+/** Find a sibling for a page.
+ * Replaces the page at the top of the cursor's stack with the
+ * specified sibling, if one exists.
+ * @param[in] mc The cursor for this operation.
+ * @param[in] move_right Non-zero if the right sibling is requested,
+ * otherwise the left sibling.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_cursor_sibling(MDB_cursor *mc, int move_right)
+{
+ int rc;
+ MDB_node *indx;
+ MDB_page *mp;
+
+ if (mc->mc_snum < 2) {
+ return MDB_NOTFOUND; /* root has no siblings */
+ }
+
+ mdb_cursor_pop(mc);
+ DPRINTF(("parent page is page %"Z"u, index %u",
+ mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top]));
+
+ if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top]))
+ : (mc->mc_ki[mc->mc_top] == 0)) {
+ DPRINTF(("no more keys left, moving to %s sibling",
+ move_right ? "right" : "left"));
+ if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) {
+ /* undo cursor_pop before returning */
+ mc->mc_top++;
+ mc->mc_snum++;
+ return rc;
+ }
+ } else {
+ if (move_right)
+ mc->mc_ki[mc->mc_top]++;
+ else
+ mc->mc_ki[mc->mc_top]--;
+ DPRINTF(("just moving to %s index key %u",
+ move_right ? "right" : "left", mc->mc_ki[mc->mc_top]));
+ }
+ mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top]));
+
+ indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if ((rc = mdb_page_get(mc, NODEPGNO(indx), &mp, NULL)) != 0) {
+ /* mc will be inconsistent if caller does mc_snum++ as above */
+ mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
+ return rc;
+ }
+
+ mdb_cursor_push(mc, mp);
+ if (!move_right)
+ mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1;
+
+ return MDB_SUCCESS;
+}
+
+/** Move the cursor to the next data item. */
+static int
+mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op)
+{
+ MDB_page *mp;
+ MDB_node *leaf;
+ int rc;
+
+ if ((mc->mc_flags & C_DEL && op == MDB_NEXT_DUP))
+ return MDB_NOTFOUND;
+
+ if (!(mc->mc_flags & C_INITIALIZED))
+ return mdb_cursor_first(mc, key, data);
+
+ mp = mc->mc_pg[mc->mc_top];
+
+ if (mc->mc_flags & C_EOF) {
+ if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mp)-1)
+ return MDB_NOTFOUND;
+ mc->mc_flags ^= C_EOF;
+ }
+
+ if (mc->mc_db->md_flags & MDB_DUPSORT) {
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ if (op == MDB_NEXT || op == MDB_NEXT_DUP) {
+ rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT);
+ if (op != MDB_NEXT || rc != MDB_NOTFOUND) {
+ if (rc == MDB_SUCCESS)
+ MDB_GET_KEY(leaf, key);
+ return rc;
+ }
+ }
+ } else {
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+ if (op == MDB_NEXT_DUP)
+ return MDB_NOTFOUND;
+ }
+ }
+
+ DPRINTF(("cursor_next: top page is %"Z"u in cursor %p",
+ mdb_dbg_pgno(mp), (void *) mc));
+ if (mc->mc_flags & C_DEL) {
+ mc->mc_flags ^= C_DEL;
+ goto skip;
+ }
+
+ if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) {
+ DPUTS("=====> move to next sibling page");
+ if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) {
+ mc->mc_flags |= C_EOF;
+ return rc;
+ }
+ mp = mc->mc_pg[mc->mc_top];
+ DPRINTF(("next page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top]));
+ } else
+ mc->mc_ki[mc->mc_top]++;
+
+skip:
+ DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u",
+ mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top]));
+
+ if (IS_LEAF2(mp)) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
+ return MDB_SUCCESS;
+ }
+
+ mdb_cassert(mc, IS_LEAF(mp));
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ mdb_xcursor_init1(mc, leaf);
+ rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ } else if (data) {
+ if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
+ return rc;
+ }
+
+ MDB_GET_KEY(leaf, key);
+ return MDB_SUCCESS;
+}
+
+/** Move the cursor to the previous data item. */
+static int
+mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op)
+{
+ MDB_page *mp;
+ MDB_node *leaf;
+ int rc;
+
+ if (!(mc->mc_flags & C_INITIALIZED)) {
+ rc = mdb_cursor_last(mc, key, data);
+ if (rc)
+ return rc;
+ mc->mc_ki[mc->mc_top]++;
+ }
+
+ mp = mc->mc_pg[mc->mc_top];
+
+ if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
+ mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) {
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ if (op == MDB_PREV || op == MDB_PREV_DUP) {
+ rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV);
+ if (op != MDB_PREV || rc != MDB_NOTFOUND) {
+ if (rc == MDB_SUCCESS) {
+ MDB_GET_KEY(leaf, key);
+ mc->mc_flags &= ~C_EOF;
+ }
+ return rc;
+ }
+ }
+ } else {
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+ if (op == MDB_PREV_DUP)
+ return MDB_NOTFOUND;
+ }
+ }
+
+ DPRINTF(("cursor_prev: top page is %"Z"u in cursor %p",
+ mdb_dbg_pgno(mp), (void *) mc));
+
+ mc->mc_flags &= ~(C_EOF|C_DEL);
+
+ if (mc->mc_ki[mc->mc_top] == 0) {
+ DPUTS("=====> move to prev sibling page");
+ if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) {
+ return rc;
+ }
+ mp = mc->mc_pg[mc->mc_top];
+ mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1;
+ DPRINTF(("prev page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top]));
+ } else
+ mc->mc_ki[mc->mc_top]--;
+
+ DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u",
+ mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top]));
+
+ if (!IS_LEAF(mp))
+ return MDB_CORRUPTED;
+
+ if (IS_LEAF2(mp)) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
+ return MDB_SUCCESS;
+ }
+
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ mdb_xcursor_init1(mc, leaf);
+ rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ } else if (data) {
+ if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
+ return rc;
+ }
+
+ MDB_GET_KEY(leaf, key);
+ return MDB_SUCCESS;
+}
+
+/** Set the cursor on a specific data item. */
+static int
+mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data,
+ MDB_cursor_op op, int *exactp)
+{
+ int rc;
+ MDB_page *mp;
+ MDB_node *leaf = NULL;
+ DKBUF;
+
+ if (key->mv_size == 0)
+ return MDB_BAD_VALSIZE;
+
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
+ /* See if we're already on the right page */
+ if (mc->mc_flags & C_INITIALIZED) {
+ MDB_val nodekey;
+
+ mp = mc->mc_pg[mc->mc_top];
+ if (!NUMKEYS(mp)) {
+ mc->mc_ki[mc->mc_top] = 0;
+ return MDB_NOTFOUND;
+ }
+ if (MP_FLAGS(mp) & P_LEAF2) {
+ nodekey.mv_size = mc->mc_db->md_pad;
+ nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size);
+ } else {
+ leaf = NODEPTR(mp, 0);
+ MDB_GET_KEY2(leaf, nodekey);
+ }
+ rc = mc->mc_dbx->md_cmp(key, &nodekey);
+ if (rc == 0) {
+ /* Probably happens rarely, but first node on the page
+ * was the one we wanted.
+ */
+ mc->mc_ki[mc->mc_top] = 0;
+ if (exactp)
+ *exactp = 1;
+ goto set1;
+ }
+ if (rc > 0) {
+ unsigned int i;
+ unsigned int nkeys = NUMKEYS(mp);
+ if (nkeys > 1) {
+ if (MP_FLAGS(mp) & P_LEAF2) {
+ nodekey.mv_data = LEAF2KEY(mp,
+ nkeys-1, nodekey.mv_size);
+ } else {
+ leaf = NODEPTR(mp, nkeys-1);
+ MDB_GET_KEY2(leaf, nodekey);
+ }
+ rc = mc->mc_dbx->md_cmp(key, &nodekey);
+ if (rc == 0) {
+ /* last node was the one we wanted */
+ mc->mc_ki[mc->mc_top] = nkeys-1;
+ if (exactp)
+ *exactp = 1;
+ goto set1;
+ }
+ if (rc < 0) {
+ if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) {
+ /* This is definitely the right page, skip search_page */
+ if (MP_FLAGS(mp) & P_LEAF2) {
+ nodekey.mv_data = LEAF2KEY(mp,
+ mc->mc_ki[mc->mc_top], nodekey.mv_size);
+ } else {
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+ MDB_GET_KEY2(leaf, nodekey);
+ }
+ rc = mc->mc_dbx->md_cmp(key, &nodekey);
+ if (rc == 0) {
+ /* current node was the one we wanted */
+ if (exactp)
+ *exactp = 1;
+ goto set1;
+ }
+ }
+ rc = 0;
+ mc->mc_flags &= ~C_EOF;
+ goto set2;
+ }
+ }
+ /* If any parents have right-sibs, search.
+ * Otherwise, there's nothing further.
+ */
+ for (i=0; i<mc->mc_top; i++)
+ if (mc->mc_ki[i] <
+ NUMKEYS(mc->mc_pg[i])-1)
+ break;
+ if (i == mc->mc_top) {
+ /* There are no other pages */
+ mc->mc_ki[mc->mc_top] = nkeys;
+ return MDB_NOTFOUND;
+ }
+ }
+ if (!mc->mc_top) {
+ /* There are no other pages */
+ mc->mc_ki[mc->mc_top] = 0;
+ if (op == MDB_SET_RANGE && !exactp) {
+ rc = 0;
+ goto set1;
+ } else
+ return MDB_NOTFOUND;
+ }
+ } else {
+ mc->mc_pg[0] = 0;
+ }
+
+ rc = mdb_page_search(mc, key, 0);
+ if (rc != MDB_SUCCESS)
+ return rc;
+
+ mp = mc->mc_pg[mc->mc_top];
+ mdb_cassert(mc, IS_LEAF(mp));
+
+set2:
+ leaf = mdb_node_search(mc, key, exactp);
+ if (exactp != NULL && !*exactp) {
+ /* MDB_SET specified and not an exact match. */
+ return MDB_NOTFOUND;
+ }
+
+ if (leaf == NULL) {
+ DPUTS("===> inexact leaf not found, goto sibling");
+ if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) {
+ mc->mc_flags |= C_EOF;
+ return rc; /* no entries matched */
+ }
+ mp = mc->mc_pg[mc->mc_top];
+ mdb_cassert(mc, IS_LEAF(mp));
+ leaf = NODEPTR(mp, 0);
+ }
+
+set1:
+ mc->mc_flags |= C_INITIALIZED;
+ mc->mc_flags &= ~C_EOF;
+
+ if (IS_LEAF2(mp)) {
+ if (op == MDB_SET_RANGE || op == MDB_SET_KEY) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
+ }
+ return MDB_SUCCESS;
+ }
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ mdb_xcursor_init1(mc, leaf);
+ if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) {
+ rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
+ } else {
+ int ex2, *ex2p;
+ if (op == MDB_GET_BOTH) {
+ ex2p = &ex2;
+ ex2 = 0;
+ } else {
+ ex2p = NULL;
+ }
+ rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ }
+ } else if (data) {
+ if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) {
+ MDB_val olddata;
+ MDB_cmp_func *dcmp;
+ if ((rc = mdb_node_read(mc, leaf, &olddata)) != MDB_SUCCESS)
+ return rc;
+ dcmp = mc->mc_dbx->md_dcmp;
+#if UINT_MAX < SIZE_MAX
+ if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t))
+ dcmp = mdb_cmp_clong;
+#endif
+ rc = dcmp(data, &olddata);
+ if (rc) {
+ if (op == MDB_GET_BOTH || rc > 0)
+ return MDB_NOTFOUND;
+ rc = 0;
+ }
+ *data = olddata;
+
+ } else {
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+ if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
+ return rc;
+ }
+ }
+
+ /* The key already matches in all other cases */
+ if (op == MDB_SET_RANGE || op == MDB_SET_KEY)
+ MDB_GET_KEY(leaf, key);
+ DPRINTF(("==> cursor placed on key [%s]", DKEY(key)));
+
+ return rc;
+}
+
+/** Move the cursor to the first item in the database. */
+static int
+mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data)
+{
+ int rc;
+ MDB_node *leaf;
+
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
+ if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
+ rc = mdb_page_search(mc, NULL, MDB_PS_FIRST);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ }
+ mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top]));
+
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0);
+ mc->mc_flags |= C_INITIALIZED;
+ mc->mc_flags &= ~C_EOF;
+
+ mc->mc_ki[mc->mc_top] = 0;
+
+ if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
+ if ( key ) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size);
+ }
+ return MDB_SUCCESS;
+ }
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ mdb_xcursor_init1(mc, leaf);
+ rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
+ if (rc)
+ return rc;
+ } else if (data) {
+ if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
+ return rc;
+ }
+
+ MDB_GET_KEY(leaf, key);
+ return MDB_SUCCESS;
+}
+
+/** Move the cursor to the last item in the database. */
+static int
+mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data)
+{
+ int rc;
+ MDB_node *leaf;
+
+ if (mc->mc_xcursor)
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+
+ if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
+ rc = mdb_page_search(mc, NULL, MDB_PS_LAST);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ }
+ mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top]));
+
+ mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1;
+ mc->mc_flags |= C_INITIALIZED|C_EOF;
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+
+ if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
+ if (key) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size);
+ }
+ return MDB_SUCCESS;
+ }
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ mdb_xcursor_init1(mc, leaf);
+ rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL);
+ if (rc)
+ return rc;
+ } else if (data) {
+ if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
+ return rc;
+ }
+
+ MDB_GET_KEY(leaf, key);
+ return MDB_SUCCESS;
+}
+
+int
+mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data,
+ MDB_cursor_op op)
+{
+ int rc;
+ int exact = 0;
+ int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data);
+
+ if (mc == NULL)
+ return EINVAL;
+
+ if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ switch (op) {
+ case MDB_GET_CURRENT:
+ if (!(mc->mc_flags & C_INITIALIZED)) {
+ rc = EINVAL;
+ } else {
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ int nkeys = NUMKEYS(mp);
+ if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) {
+ mc->mc_ki[mc->mc_top] = nkeys;
+ rc = MDB_NOTFOUND;
+ break;
+ }
+ rc = MDB_SUCCESS;
+ if (IS_LEAF2(mp)) {
+ key->mv_size = mc->mc_db->md_pad;
+ key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
+ } else {
+ MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+ MDB_GET_KEY(leaf, key);
+ if (data) {
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT);
+ } else {
+ rc = mdb_node_read(mc, leaf, data);
+ }
+ }
+ }
+ }
+ break;
+ case MDB_GET_BOTH:
+ case MDB_GET_BOTH_RANGE:
+ if (data == NULL) {
+ rc = EINVAL;
+ break;
+ }
+ if (mc->mc_xcursor == NULL) {
+ rc = MDB_INCOMPATIBLE;
+ break;
+ }
+ /* FALLTHRU */
+ case MDB_SET:
+ case MDB_SET_KEY:
+ case MDB_SET_RANGE:
+ if (key == NULL) {
+ rc = EINVAL;
+ } else {
+ rc = mdb_cursor_set(mc, key, data, op,
+ op == MDB_SET_RANGE ? NULL : &exact);
+ }
+ break;
+ case MDB_GET_MULTIPLE:
+ if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) {
+ rc = EINVAL;
+ break;
+ }
+ if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
+ rc = MDB_INCOMPATIBLE;
+ break;
+ }
+ rc = MDB_SUCCESS;
+ if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) ||
+ (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF))
+ break;
+ goto fetchm;
+ case MDB_NEXT_MULTIPLE:
+ if (data == NULL) {
+ rc = EINVAL;
+ break;
+ }
+ if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
+ rc = MDB_INCOMPATIBLE;
+ break;
+ }
+ rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP);
+ if (rc == MDB_SUCCESS) {
+ if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) {
+ MDB_cursor *mx;
+fetchm:
+ mx = &mc->mc_xcursor->mx_cursor;
+ data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) *
+ mx->mc_db->md_pad;
+ data->mv_data = METADATA(mx->mc_pg[mx->mc_top]);
+ mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1;
+ } else {
+ rc = MDB_NOTFOUND;
+ }
+ }
+ break;
+ case MDB_PREV_MULTIPLE:
+ if (data == NULL) {
+ rc = EINVAL;
+ break;
+ }
+ if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
+ rc = MDB_INCOMPATIBLE;
+ break;
+ }
+ if (!(mc->mc_flags & C_INITIALIZED))
+ rc = mdb_cursor_last(mc, key, data);
+ else
+ rc = MDB_SUCCESS;
+ if (rc == MDB_SUCCESS) {
+ MDB_cursor *mx = &mc->mc_xcursor->mx_cursor;
+ if (mx->mc_flags & C_INITIALIZED) {
+ rc = mdb_cursor_sibling(mx, 0);
+ if (rc == MDB_SUCCESS)
+ goto fetchm;
+ } else {
+ rc = MDB_NOTFOUND;
+ }
+ }
+ break;
+ case MDB_NEXT:
+ case MDB_NEXT_DUP:
+ case MDB_NEXT_NODUP:
+ rc = mdb_cursor_next(mc, key, data, op);
+ break;
+ case MDB_PREV:
+ case MDB_PREV_DUP:
+ case MDB_PREV_NODUP:
+ rc = mdb_cursor_prev(mc, key, data, op);
+ break;
+ case MDB_FIRST:
+ rc = mdb_cursor_first(mc, key, data);
+ break;
+ case MDB_FIRST_DUP:
+ mfunc = mdb_cursor_first;
+ mmove:
+ if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) {
+ rc = EINVAL;
+ break;
+ }
+ if (mc->mc_xcursor == NULL) {
+ rc = MDB_INCOMPATIBLE;
+ break;
+ }
+ if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) {
+ mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]);
+ rc = MDB_NOTFOUND;
+ break;
+ }
+ mc->mc_flags &= ~C_EOF;
+ {
+ MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ MDB_GET_KEY(leaf, key);
+ rc = mdb_node_read(mc, leaf, data);
+ break;
+ }
+ }
+ if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) {
+ rc = EINVAL;
+ break;
+ }
+ rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL);
+ break;
+ case MDB_LAST:
+ rc = mdb_cursor_last(mc, key, data);
+ break;
+ case MDB_LAST_DUP:
+ mfunc = mdb_cursor_last;
+ goto mmove;
+ default:
+ DPRINTF(("unhandled/unimplemented cursor operation %u", op));
+ rc = EINVAL;
+ break;
+ }
+
+ if (mc->mc_flags & C_DEL)
+ mc->mc_flags ^= C_DEL;
+
+ return rc;
+}
+
+/** Touch all the pages in the cursor stack. Set mc_top.
+ * Makes sure all the pages are writable, before attempting a write operation.
+ * @param[in] mc The cursor to operate on.
+ */
+static int
+mdb_cursor_touch(MDB_cursor *mc)
+{
+ int rc = MDB_SUCCESS;
+
+ if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & (DB_DIRTY|DB_DUPDATA))) {
+ /* Touch DB record of named DB */
+ MDB_cursor mc2;
+ MDB_xcursor mcx;
+ if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi))
+ return MDB_BAD_DBI;
+ mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx);
+ rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY);
+ if (rc)
+ return rc;
+ *mc->mc_dbflag |= DB_DIRTY;
+ }
+ mc->mc_top = 0;
+ if (mc->mc_snum) {
+ do {
+ rc = mdb_page_touch(mc);
+ } while (!rc && ++(mc->mc_top) < mc->mc_snum);
+ mc->mc_top = mc->mc_snum-1;
+ }
+ return rc;
+}
+
+/** Do not spill pages to disk if txn is getting full, may fail instead */
+#define MDB_NOSPILL 0x8000
+
+int
+mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
+ unsigned int flags)
+{
+ MDB_env *env;
+ MDB_node *leaf = NULL;
+ MDB_page *fp, *mp, *sub_root = NULL;
+ uint16_t fp_flags;
+ MDB_val xdata, *rdata, dkey, olddata;
+ MDB_db dummy;
+ int do_sub = 0, insert_key, insert_data;
+ unsigned int mcount = 0, dcount = 0, nospill;
+ size_t nsize;
+ int rc, rc2;
+ unsigned int nflags;
+ DKBUF;
+
+ if (mc == NULL || key == NULL)
+ return EINVAL;
+
+ env = mc->mc_txn->mt_env;
+
+ /* Check this first so counter will always be zero on any
+ * early failures.
+ */
+ if (flags & MDB_MULTIPLE) {
+ dcount = data[1].mv_size;
+ data[1].mv_size = 0;
+ if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED))
+ return MDB_INCOMPATIBLE;
+ }
+
+ nospill = flags & MDB_NOSPILL;
+ flags &= ~MDB_NOSPILL;
+
+ if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
+ return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
+
+ if (key->mv_size-1 >= ENV_MAXKEY(env))
+ return MDB_BAD_VALSIZE;
+
+#if SIZE_MAX > MAXDATASIZE
+ if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE))
+ return MDB_BAD_VALSIZE;
+#else
+ if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env))
+ return MDB_BAD_VALSIZE;
+#endif
+
+ DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u",
+ DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size));
+
+ dkey.mv_size = 0;
+
+ if (flags & MDB_CURRENT) {
+ if (!(mc->mc_flags & C_INITIALIZED))
+ return EINVAL;
+ rc = MDB_SUCCESS;
+ } else if (mc->mc_db->md_root == P_INVALID) {
+ /* new database, cursor has nothing to point to */
+ mc->mc_snum = 0;
+ mc->mc_top = 0;
+ mc->mc_flags &= ~C_INITIALIZED;
+ rc = MDB_NO_ROOT;
+ } else {
+ int exact = 0;
+ MDB_val d2;
+ if (flags & MDB_APPEND) {
+ MDB_val k2;
+ rc = mdb_cursor_last(mc, &k2, &d2);
+ if (rc == 0) {
+ rc = mc->mc_dbx->md_cmp(key, &k2);
+ if (rc > 0) {
+ rc = MDB_NOTFOUND;
+ mc->mc_ki[mc->mc_top]++;
+ } else {
+ /* new key is <= last key */
+ rc = MDB_KEYEXIST;
+ }
+ }
+ } else {
+ rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact);
+ }
+ if ((flags & MDB_NOOVERWRITE) && rc == 0) {
+ DPRINTF(("duplicate key [%s]", DKEY(key)));
+ *data = d2;
+ return MDB_KEYEXIST;
+ }
+ if (rc && rc != MDB_NOTFOUND)
+ return rc;
+ }
+
+ if (mc->mc_flags & C_DEL)
+ mc->mc_flags ^= C_DEL;
+
+ /* Cursor is positioned, check for room in the dirty list */
+ if (!nospill) {
+ if (flags & MDB_MULTIPLE) {
+ rdata = &xdata;
+ xdata.mv_size = data->mv_size * dcount;
+ } else {
+ rdata = data;
+ }
+ if ((rc2 = mdb_page_spill(mc, key, rdata)))
+ return rc2;
+ }
+
+ if (rc == MDB_NO_ROOT) {
+ MDB_page *np;
+ /* new database, write a root leaf page */
+ DPUTS("allocating new root leaf page");
+ if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) {
+ return rc2;
+ }
+ mdb_cursor_push(mc, np);
+ mc->mc_db->md_root = np->mp_pgno;
+ mc->mc_db->md_depth++;
+ *mc->mc_dbflag |= DB_DIRTY;
+ if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED))
+ == MDB_DUPFIXED)
+ MP_FLAGS(np) |= P_LEAF2;
+ mc->mc_flags |= C_INITIALIZED;
+ } else {
+ /* make sure all cursor pages are writable */
+ rc2 = mdb_cursor_touch(mc);
+ if (rc2)
+ return rc2;
+ }
+
+ insert_key = insert_data = rc;
+ if (insert_key) {
+ /* The key does not exist */
+ DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top]));
+ if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
+ LEAFSIZE(key, data) > env->me_nodemax)
+ {
+ /* Too big for a node, insert in sub-DB. Set up an empty
+ * "old sub-page" for prep_subDB to expand to a full page.
+ */
+ fp_flags = P_LEAF|P_DIRTY;
+ fp = env->me_pbuf;
+ fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */
+ MP_LOWER(fp) = MP_UPPER(fp) = (PAGEHDRSZ-PAGEBASE);
+ olddata.mv_size = PAGEHDRSZ;
+ goto prep_subDB;
+ }
+ } else {
+ /* there's only a key anyway, so this is a no-op */
+ if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
+ char *ptr;
+ unsigned int ksize = mc->mc_db->md_pad;
+ if (key->mv_size != ksize)
+ return MDB_BAD_VALSIZE;
+ ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize);
+ memcpy(ptr, key->mv_data, ksize);
+fix_parent:
+ /* if overwriting slot 0 of leaf, need to
+ * update branch key if there is a parent page
+ */
+ if (mc->mc_top && !mc->mc_ki[mc->mc_top]) {
+ unsigned short dtop = 1;
+ mc->mc_top--;
+ /* slot 0 is always an empty key, find real slot */
+ while (mc->mc_top && !mc->mc_ki[mc->mc_top]) {
+ mc->mc_top--;
+ dtop++;
+ }
+ if (mc->mc_ki[mc->mc_top])
+ rc2 = mdb_update_key(mc, key);
+ else
+ rc2 = MDB_SUCCESS;
+ mc->mc_top += dtop;
+ if (rc2)
+ return rc2;
+ }
+ return MDB_SUCCESS;
+ }
+
+more:
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ olddata.mv_size = NODEDSZ(leaf);
+ olddata.mv_data = NODEDATA(leaf);
+
+ /* DB has dups? */
+ if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) {
+ /* Prepare (sub-)page/sub-DB to accept the new item,
+ * if needed. fp: old sub-page or a header faking
+ * it. mp: new (sub-)page. offset: growth in page
+ * size. xdata: node data with new page or DB.
+ */
+ unsigned i, offset = 0;
+ mp = fp = xdata.mv_data = env->me_pbuf;
+ mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno;
+
+ /* Was a single item before, must convert now */
+ if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ MDB_cmp_func *dcmp;
+ /* Just overwrite the current item */
+ if (flags == MDB_CURRENT)
+ goto current;
+ dcmp = mc->mc_dbx->md_dcmp;
+#if UINT_MAX < SIZE_MAX
+ if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t))
+ dcmp = mdb_cmp_clong;
+#endif
+ /* does data match? */
+ if (!dcmp(data, &olddata)) {
+ if (flags & (MDB_NODUPDATA|MDB_APPENDDUP))
+ return MDB_KEYEXIST;
+ /* overwrite it */
+ goto current;
+ }
+
+ /* Back up original data item */
+ dkey.mv_size = olddata.mv_size;
+ dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size);
+
+ /* Make sub-page header for the dup items, with dummy body */
+ MP_FLAGS(fp) = P_LEAF|P_DIRTY|P_SUBP;
+ MP_LOWER(fp) = (PAGEHDRSZ-PAGEBASE);
+ xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size;
+ if (mc->mc_db->md_flags & MDB_DUPFIXED) {
+ MP_FLAGS(fp) |= P_LEAF2;
+ fp->mp_pad = data->mv_size;
+ xdata.mv_size += 2 * data->mv_size; /* leave space for 2 more */
+ } else {
+ xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) +
+ (dkey.mv_size & 1) + (data->mv_size & 1);
+ }
+ MP_UPPER(fp) = xdata.mv_size - PAGEBASE;
+ olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */
+ } else if (leaf->mn_flags & F_SUBDATA) {
+ /* Data is on sub-DB, just store it */
+ flags |= F_DUPDATA|F_SUBDATA;
+ goto put_sub;
+ } else {
+ /* Data is on sub-page */
+ fp = olddata.mv_data;
+ switch (flags) {
+ default:
+ if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
+ offset = EVEN(NODESIZE + sizeof(indx_t) +
+ data->mv_size);
+ break;
+ }
+ offset = fp->mp_pad;
+ if (SIZELEFT(fp) < offset) {
+ offset *= 4; /* space for 4 more */
+ break;
+ }
+ /* FALLTHRU */ /* Big enough MDB_DUPFIXED sub-page */
+ case MDB_CURRENT:
+ MP_FLAGS(fp) |= P_DIRTY;
+ COPY_PGNO(MP_PGNO(fp), MP_PGNO(mp));
+ mc->mc_xcursor->mx_cursor.mc_pg[0] = fp;
+ flags |= F_DUPDATA;
+ goto put_sub;
+ }
+ xdata.mv_size = olddata.mv_size + offset;
+ }
+
+ fp_flags = MP_FLAGS(fp);
+ if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) {
+ /* Too big for a sub-page, convert to sub-DB */
+ fp_flags &= ~P_SUBP;
+prep_subDB:
+ if (mc->mc_db->md_flags & MDB_DUPFIXED) {
+ fp_flags |= P_LEAF2;
+ dummy.md_pad = fp->mp_pad;
+ dummy.md_flags = MDB_DUPFIXED;
+ if (mc->mc_db->md_flags & MDB_INTEGERDUP)
+ dummy.md_flags |= MDB_INTEGERKEY;
+ } else {
+ dummy.md_pad = 0;
+ dummy.md_flags = 0;
+ }
+ dummy.md_depth = 1;
+ dummy.md_branch_pages = 0;
+ dummy.md_leaf_pages = 1;
+ dummy.md_overflow_pages = 0;
+ dummy.md_entries = NUMKEYS(fp);
+ xdata.mv_size = sizeof(MDB_db);
+ xdata.mv_data = &dummy;
+ if ((rc = mdb_page_alloc(mc, 1, &mp)))
+ return rc;
+ offset = env->me_psize - olddata.mv_size;
+ flags |= F_DUPDATA|F_SUBDATA;
+ dummy.md_root = mp->mp_pgno;
+ sub_root = mp;
+ }
+ if (mp != fp) {
+ MP_FLAGS(mp) = fp_flags | P_DIRTY;
+ MP_PAD(mp) = MP_PAD(fp);
+ MP_LOWER(mp) = MP_LOWER(fp);
+ MP_UPPER(mp) = MP_UPPER(fp) + offset;
+ if (fp_flags & P_LEAF2) {
+ memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad);
+ } else {
+ memcpy((char *)mp + MP_UPPER(mp) + PAGEBASE, (char *)fp + MP_UPPER(fp) + PAGEBASE,
+ olddata.mv_size - MP_UPPER(fp) - PAGEBASE);
+ memcpy((char *)MP_PTRS(mp), (char *)MP_PTRS(fp), NUMKEYS(fp) * sizeof(mp->mp_ptrs[0]));
+ for (i=0; i<NUMKEYS(fp); i++)
+ mp->mp_ptrs[i] += offset;
+ }
+ }
+
+ rdata = &xdata;
+ flags |= F_DUPDATA;
+ do_sub = 1;
+ if (!insert_key)
+ mdb_node_del(mc, 0);
+ goto new_sub;
+ }
+current:
+ /* LMDB passes F_SUBDATA in 'flags' to write a DB record */
+ if ((leaf->mn_flags ^ flags) & F_SUBDATA)
+ return MDB_INCOMPATIBLE;
+ /* overflow page overwrites need special handling */
+ if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
+ MDB_page *omp;
+ pgno_t pg;
+ int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize);
+
+ memcpy(&pg, olddata.mv_data, sizeof(pg));
+ if ((rc2 = mdb_page_get(mc, pg, &omp, &level)) != 0)
+ return rc2;
+ ovpages = omp->mp_pages;
+
+ /* Is the ov page large enough? */
+ if (ovpages >= dpages) {
+ if (!(omp->mp_flags & P_DIRTY) &&
+ (level || (env->me_flags & MDB_WRITEMAP)))
+ {
+ rc = mdb_page_unspill(mc->mc_txn, omp, &omp);
+ if (rc)
+ return rc;
+ level = 0; /* dirty in this txn or clean */
+ }
+ /* Is it dirty? */
+ if (omp->mp_flags & P_DIRTY) {
+ /* yes, overwrite it. Note in this case we don't
+ * bother to try shrinking the page if the new data
+ * is smaller than the overflow threshold.
+ */
+ if (level > 1) {
+ /* It is writable only in a parent txn */
+ size_t sz = (size_t) env->me_psize * ovpages, off;
+ MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages);
+ MDB_ID2 id2;
+ if (!np)
+ return ENOMEM;
+ id2.mid = pg;
+ id2.mptr = np;
+ /* Note - this page is already counted in parent's dirty_room */
+ rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2);
+ mdb_cassert(mc, rc2 == 0);
+ /* Currently we make the page look as with put() in the
+ * parent txn, in case the user peeks at MDB_RESERVEd
+ * or unused parts. Some users treat ovpages specially.
+ */
+ if (!(flags & MDB_RESERVE)) {
+ /* Skip the part where LMDB will put *data.
+ * Copy end of page, adjusting alignment so
+ * compiler may copy words instead of bytes.
+ */
+ off = (PAGEHDRSZ + data->mv_size) & -(int)sizeof(size_t);
+ memcpy((size_t *)((char *)np + off),
+ (size_t *)((char *)omp + off), sz - off);
+ sz = PAGEHDRSZ;
+ }
+ memcpy(np, omp, sz); /* Copy beginning of page */
+ omp = np;
+ }
+ SETDSZ(leaf, data->mv_size);
+ if (F_ISSET(flags, MDB_RESERVE))
+ data->mv_data = METADATA(omp);
+ else
+ memcpy(METADATA(omp), data->mv_data, data->mv_size);
+ return MDB_SUCCESS;
+ }
+ }
+ if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
+ return rc2;
+ } else if (data->mv_size == olddata.mv_size) {
+ /* same size, just replace it. Note that we could
+ * also reuse this node if the new data is smaller,
+ * but instead we opt to shrink the node in that case.
+ */
+ if (F_ISSET(flags, MDB_RESERVE))
+ data->mv_data = olddata.mv_data;
+ else if (!(mc->mc_flags & C_SUB))
+ memcpy(olddata.mv_data, data->mv_data, data->mv_size);
+ else {
+ if (key->mv_size != NODEKSZ(leaf))
+ goto new_ksize;
+ memcpy(NODEKEY(leaf), key->mv_data, key->mv_size);
+ goto fix_parent;
+ }
+ return MDB_SUCCESS;
+ }
+new_ksize:
+ mdb_node_del(mc, 0);
+ }
+
+ rdata = data;
+
+new_sub:
+ nflags = flags & NODE_ADD_FLAGS;
+ nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata);
+ if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) {
+ if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA )
+ nflags &= ~MDB_APPEND; /* sub-page may need room to grow */
+ if (!insert_key)
+ nflags |= MDB_SPLIT_REPLACE;
+ rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags);
+ } else {
+ /* There is room already in this leaf page. */
+ rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags);
+ if (rc == 0) {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+ unsigned i = mc->mc_top;
+ MDB_page *mp = mc->mc_pg[i];
+
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (mc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) continue;
+ if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) {
+ m3->mc_ki[i]++;
+ }
+ XCURSOR_REFRESH(m3, i, mp);
+ }
+ }
+ }
+
+ if (rc == MDB_SUCCESS) {
+ /* Now store the actual data in the child DB. Note that we're
+ * storing the user data in the keys field, so there are strict
+ * size limits on dupdata. The actual data fields of the child
+ * DB are all zero size.
+ */
+ if (do_sub) {
+ int xflags, new_dupdata;
+ size_t ecount;
+put_sub:
+ xdata.mv_size = 0;
+ xdata.mv_data = "";
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if ((flags & (MDB_CURRENT|MDB_APPENDDUP)) == MDB_CURRENT) {
+ xflags = MDB_CURRENT|MDB_NOSPILL;
+ } else {
+ mdb_xcursor_init1(mc, leaf);
+ xflags = (flags & MDB_NODUPDATA) ?
+ MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL;
+ }
+ if (sub_root)
+ mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root;
+ new_dupdata = (int)dkey.mv_size;
+ /* converted, write the original data first */
+ if (dkey.mv_size) {
+ rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags);
+ if (rc)
+ goto bad_sub;
+ /* we've done our job */
+ dkey.mv_size = 0;
+ }
+ if (!(leaf->mn_flags & F_SUBDATA) || sub_root) {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2;
+ MDB_xcursor *mx = mc->mc_xcursor;
+ unsigned i = mc->mc_top;
+ MDB_page *mp = mc->mc_pg[i];
+
+ for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
+ if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
+ if (!(m2->mc_flags & C_INITIALIZED)) continue;
+ if (m2->mc_pg[i] == mp) {
+ if (m2->mc_ki[i] == mc->mc_ki[i]) {
+ mdb_xcursor_init2(m2, mx, new_dupdata);
+ } else if (!insert_key) {
+ XCURSOR_REFRESH(m2, i, mp);
+ }
+ }
+ }
+ }
+ ecount = mc->mc_xcursor->mx_db.md_entries;
+ if (flags & MDB_APPENDDUP)
+ xflags |= MDB_APPEND;
+ rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags);
+ if (flags & F_SUBDATA) {
+ void *db = NODEDATA(leaf);
+ memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
+ }
+ insert_data = mc->mc_xcursor->mx_db.md_entries - ecount;
+ }
+ /* Increment count unless we just replaced an existing item. */
+ if (insert_data)
+ mc->mc_db->md_entries++;
+ if (insert_key) {
+ /* Invalidate txn if we created an empty sub-DB */
+ if (rc)
+ goto bad_sub;
+ /* If we succeeded and the key didn't exist before,
+ * make sure the cursor is marked valid.
+ */
+ mc->mc_flags |= C_INITIALIZED;
+ }
+ if (flags & MDB_MULTIPLE) {
+ if (!rc) {
+ mcount++;
+ /* let caller know how many succeeded, if any */
+ data[1].mv_size = mcount;
+ if (mcount < dcount) {
+ data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size;
+ insert_key = insert_data = 0;
+ goto more;
+ }
+ }
+ }
+ return rc;
+bad_sub:
+ if (rc == MDB_KEYEXIST) /* should not happen, we deleted that item */
+ rc = MDB_CORRUPTED;
+ }
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+int
+mdb_cursor_del(MDB_cursor *mc, unsigned int flags)
+{
+ MDB_node *leaf;
+ MDB_page *mp;
+ int rc;
+
+ if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
+ return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
+
+ if (!(mc->mc_flags & C_INITIALIZED))
+ return EINVAL;
+
+ if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top]))
+ return MDB_NOTFOUND;
+
+ if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL)))
+ return rc;
+
+ rc = mdb_cursor_touch(mc);
+ if (rc)
+ return rc;
+
+ mp = mc->mc_pg[mc->mc_top];
+ if (!IS_LEAF(mp))
+ return MDB_CORRUPTED;
+ if (IS_LEAF2(mp))
+ goto del_key;
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+
+ if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ if (flags & MDB_NODUPDATA) {
+ /* mdb_cursor_del0() will subtract the final entry */
+ mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1;
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED;
+ } else {
+ if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) {
+ mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf);
+ }
+ rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL);
+ if (rc)
+ return rc;
+ /* If sub-DB still has entries, we're done */
+ if (mc->mc_xcursor->mx_db.md_entries) {
+ if (leaf->mn_flags & F_SUBDATA) {
+ /* update subDB info */
+ void *db = NODEDATA(leaf);
+ memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
+ } else {
+ MDB_cursor *m2;
+ /* shrink fake page */
+ mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]);
+ leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
+ mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf);
+ /* fix other sub-DB cursors pointed at fake pages on this page */
+ for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
+ if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
+ if (!(m2->mc_flags & C_INITIALIZED)) continue;
+ if (m2->mc_pg[mc->mc_top] == mp) {
+ XCURSOR_REFRESH(m2, mc->mc_top, mp);
+ }
+ }
+ }
+ mc->mc_db->md_entries--;
+ return rc;
+ } else {
+ mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED;
+ }
+ /* otherwise fall thru and delete the sub-DB */
+ }
+
+ if (leaf->mn_flags & F_SUBDATA) {
+ /* add all the child DB's pages to the free list */
+ rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0);
+ if (rc)
+ goto fail;
+ }
+ }
+ /* LMDB passes F_SUBDATA in 'flags' to delete a DB record */
+ else if ((leaf->mn_flags ^ flags) & F_SUBDATA) {
+ rc = MDB_INCOMPATIBLE;
+ goto fail;
+ }
+
+ /* add overflow pages to free list */
+ if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
+ MDB_page *omp;
+ pgno_t pg;
+
+ memcpy(&pg, NODEDATA(leaf), sizeof(pg));
+ if ((rc = mdb_page_get(mc, pg, &omp, NULL)) ||
+ (rc = mdb_ovpage_free(mc, omp)))
+ goto fail;
+ }
+
+del_key:
+ return mdb_cursor_del0(mc);
+
+fail:
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+/** Allocate and initialize new pages for a database.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in] mc a cursor on the database being added to.
+ * @param[in] flags flags defining what type of page is being allocated.
+ * @param[in] num the number of pages to allocate. This is usually 1,
+ * unless allocating overflow pages for a large record.
+ * @param[out] mp Address of a page, or NULL on failure.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp)
+{
+ MDB_page *np;
+ int rc;
+
+ if ((rc = mdb_page_alloc(mc, num, &np)))
+ return rc;
+ DPRINTF(("allocated new mpage %"Z"u, page size %u",
+ np->mp_pgno, mc->mc_txn->mt_env->me_psize));
+ np->mp_flags = flags | P_DIRTY;
+ np->mp_lower = (PAGEHDRSZ-PAGEBASE);
+ np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE;
+
+ if (IS_BRANCH(np))
+ mc->mc_db->md_branch_pages++;
+ else if (IS_LEAF(np))
+ mc->mc_db->md_leaf_pages++;
+ else if (IS_OVERFLOW(np)) {
+ mc->mc_db->md_overflow_pages += num;
+ np->mp_pages = num;
+ }
+ *mp = np;
+
+ return 0;
+}
+
+/** Calculate the size of a leaf node.
+ * The size depends on the environment's page size; if a data item
+ * is too large it will be put onto an overflow page and the node
+ * size will only include the key and not the data. Sizes are always
+ * rounded up to an even number of bytes, to guarantee 2-byte alignment
+ * of the #MDB_node headers.
+ * @param[in] env The environment handle.
+ * @param[in] key The key for the node.
+ * @param[in] data The data for the node.
+ * @return The number of bytes needed to store the node.
+ */
+static size_t
+mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data)
+{
+ size_t sz;
+
+ sz = LEAFSIZE(key, data);
+ if (sz > env->me_nodemax) {
+ /* put on overflow page */
+ sz -= data->mv_size - sizeof(pgno_t);
+ }
+
+ return EVEN(sz + sizeof(indx_t));
+}
+
+/** Calculate the size of a branch node.
+ * The size should depend on the environment's page size but since
+ * we currently don't support spilling large keys onto overflow
+ * pages, it's simply the size of the #MDB_node header plus the
+ * size of the key. Sizes are always rounded up to an even number
+ * of bytes, to guarantee 2-byte alignment of the #MDB_node headers.
+ * @param[in] env The environment handle.
+ * @param[in] key The key for the node.
+ * @return The number of bytes needed to store the node.
+ */
+static size_t
+mdb_branch_size(MDB_env *env, MDB_val *key)
+{
+ size_t sz;
+
+ sz = INDXSIZE(key);
+ if (sz > env->me_nodemax) {
+ /* put on overflow page */
+ /* not implemented */
+ /* sz -= key->size - sizeof(pgno_t); */
+ }
+
+ return sz + sizeof(indx_t);
+}
+
+/** Add a node to the page pointed to by the cursor.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in] mc The cursor for this operation.
+ * @param[in] indx The index on the page where the new node should be added.
+ * @param[in] key The key for the new node.
+ * @param[in] data The data for the new node, if any.
+ * @param[in] pgno The page number, if adding a branch node.
+ * @param[in] flags Flags for the node.
+ * @return 0 on success, non-zero on failure. Possible errors are:
+ * <ul>
+ * <li>ENOMEM - failed to allocate overflow pages for the node.
+ * <li>MDB_PAGE_FULL - there is insufficient room in the page. This error
+ * should never happen since all callers already calculate the
+ * page's free space before calling this function.
+ * </ul>
+ */
+static int
+mdb_node_add(MDB_cursor *mc, indx_t indx,
+ MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags)
+{
+ unsigned int i;
+ size_t node_size = NODESIZE;
+ ssize_t room;
+ indx_t ofs;
+ MDB_node *node;
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ MDB_page *ofp = NULL; /* overflow page */
+ void *ndata;
+ DKBUF;
+
+ mdb_cassert(mc, MP_UPPER(mp) >= MP_LOWER(mp));
+
+ DPRINTF(("add to %s %spage %"Z"u index %i, data size %"Z"u key size %"Z"u [%s]",
+ IS_LEAF(mp) ? "leaf" : "branch",
+ IS_SUBP(mp) ? "sub-" : "",
+ mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0,
+ key ? key->mv_size : 0, key ? DKEY(key) : "null"));
+
+ if (IS_LEAF2(mp)) {
+ /* Move higher keys up one slot. */
+ int ksize = mc->mc_db->md_pad, dif;
+ char *ptr = LEAF2KEY(mp, indx, ksize);
+ dif = NUMKEYS(mp) - indx;
+ if (dif > 0)
+ memmove(ptr+ksize, ptr, dif*ksize);
+ /* insert new key */
+ memcpy(ptr, key->mv_data, ksize);
+
+ /* Just using these for counting */
+ MP_LOWER(mp) += sizeof(indx_t);
+ MP_UPPER(mp) -= ksize - sizeof(indx_t);
+ return MDB_SUCCESS;
+ }
+
+ room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t);
+ if (key != NULL)
+ node_size += key->mv_size;
+ if (IS_LEAF(mp)) {
+ mdb_cassert(mc, key && data);
+ if (F_ISSET(flags, F_BIGDATA)) {
+ /* Data already on overflow page. */
+ node_size += sizeof(pgno_t);
+ } else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) {
+ int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize);
+ int rc;
+ /* Put data on overflow page. */
+ DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page",
+ data->mv_size, node_size+data->mv_size));
+ node_size = EVEN(node_size + sizeof(pgno_t));
+ if ((ssize_t)node_size > room)
+ goto full;
+ if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp)))
+ return rc;
+ DPRINTF(("allocated overflow page %"Z"u", ofp->mp_pgno));
+ flags |= F_BIGDATA;
+ goto update;
+ } else {
+ node_size += data->mv_size;
+ }
+ }
+ node_size = EVEN(node_size);
+ if ((ssize_t)node_size > room)
+ goto full;
+
+update:
+ /* Move higher pointers up one slot. */
+ for (i = NUMKEYS(mp); i > indx; i--)
+ MP_PTRS(mp)[i] = MP_PTRS(mp)[i - 1];
+
+ /* Adjust free space offsets. */
+ ofs = MP_UPPER(mp) - node_size;
+ mdb_cassert(mc, ofs >= MP_LOWER(mp) + sizeof(indx_t));
+ MP_PTRS(mp)[indx] = ofs;
+ MP_UPPER(mp) = ofs;
+ MP_LOWER(mp) += sizeof(indx_t);
+
+ /* Write the node data. */
+ node = NODEPTR(mp, indx);
+ node->mn_ksize = (key == NULL) ? 0 : key->mv_size;
+ node->mn_flags = flags;
+ if (IS_LEAF(mp))
+ SETDSZ(node,data->mv_size);
+ else
+ SETPGNO(node,pgno);
+
+ if (key)
+ memcpy(NODEKEY(node), key->mv_data, key->mv_size);
+
+ if (IS_LEAF(mp)) {
+ ndata = NODEDATA(node);
+ if (ofp == NULL) {
+ if (F_ISSET(flags, F_BIGDATA))
+ memcpy(ndata, data->mv_data, sizeof(pgno_t));
+ else if (F_ISSET(flags, MDB_RESERVE))
+ data->mv_data = ndata;
+ else
+ memcpy(ndata, data->mv_data, data->mv_size);
+ } else {
+ memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t));
+ ndata = METADATA(ofp);
+ if (F_ISSET(flags, MDB_RESERVE))
+ data->mv_data = ndata;
+ else
+ memcpy(ndata, data->mv_data, data->mv_size);
+ }
+ }
+
+ return MDB_SUCCESS;
+
+full:
+ DPRINTF(("not enough room in page %"Z"u, got %u ptrs",
+ mdb_dbg_pgno(mp), NUMKEYS(mp)));
+ DPRINTF(("upper-lower = %u - %u = %"Z"d", MP_UPPER(mp),MP_LOWER(mp),room));
+ DPRINTF(("node size = %"Z"u", node_size));
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return MDB_PAGE_FULL;
+}
+
+/** Delete the specified node from a page.
+ * @param[in] mc Cursor pointing to the node to delete.
+ * @param[in] ksize The size of a node. Only used if the page is
+ * part of a #MDB_DUPFIXED database.
+ */
+static void
+mdb_node_del(MDB_cursor *mc, int ksize)
+{
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ indx_t indx = mc->mc_ki[mc->mc_top];
+ unsigned int sz;
+ indx_t i, j, numkeys, ptr;
+ MDB_node *node;
+ char *base;
+
+ DPRINTF(("delete node %u on %s page %"Z"u", indx,
+ IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp)));
+ numkeys = NUMKEYS(mp);
+ mdb_cassert(mc, indx < numkeys);
+
+ if (IS_LEAF2(mp)) {
+ int x = numkeys - 1 - indx;
+ base = LEAF2KEY(mp, indx, ksize);
+ if (x)
+ memmove(base, base + ksize, x * ksize);
+ MP_LOWER(mp) -= sizeof(indx_t);
+ MP_UPPER(mp) += ksize - sizeof(indx_t);
+ return;
+ }
+
+ node = NODEPTR(mp, indx);
+ sz = NODESIZE + node->mn_ksize;
+ if (IS_LEAF(mp)) {
+ if (F_ISSET(node->mn_flags, F_BIGDATA))
+ sz += sizeof(pgno_t);
+ else
+ sz += NODEDSZ(node);
+ }
+ sz = EVEN(sz);
+
+ ptr = MP_PTRS(mp)[indx];
+ for (i = j = 0; i < numkeys; i++) {
+ if (i != indx) {
+ MP_PTRS(mp)[j] = MP_PTRS(mp)[i];
+ if (MP_PTRS(mp)[i] < ptr)
+ MP_PTRS(mp)[j] += sz;
+ j++;
+ }
+ }
+
+ base = (char *)mp + MP_UPPER(mp) + PAGEBASE;
+ memmove(base + sz, base, ptr - MP_UPPER(mp));
+
+ MP_LOWER(mp) -= sizeof(indx_t);
+ MP_UPPER(mp) += sz;
+}
+
+/** Compact the main page after deleting a node on a subpage.
+ * @param[in] mp The main page to operate on.
+ * @param[in] indx The index of the subpage on the main page.
+ */
+static void
+mdb_node_shrink(MDB_page *mp, indx_t indx)
+{
+ MDB_node *node;
+ MDB_page *sp, *xp;
+ char *base;
+ indx_t delta, nsize, len, ptr;
+ int i;
+
+ node = NODEPTR(mp, indx);
+ sp = (MDB_page *)NODEDATA(node);
+ delta = SIZELEFT(sp);
+ nsize = NODEDSZ(node) - delta;
+
+ /* Prepare to shift upward, set len = length(subpage part to shift) */
+ if (IS_LEAF2(sp)) {
+ len = nsize;
+ if (nsize & 1)
+ return; /* do not make the node uneven-sized */
+ } else {
+ xp = (MDB_page *)((char *)sp + delta); /* destination subpage */
+ for (i = NUMKEYS(sp); --i >= 0; )
+ MP_PTRS(xp)[i] = MP_PTRS(sp)[i] - delta;
+ len = PAGEHDRSZ;
+ }
+ MP_UPPER(sp) = MP_LOWER(sp);
+ COPY_PGNO(MP_PGNO(sp), mp->mp_pgno);
+ SETDSZ(node, nsize);
+
+ /* Shift <lower nodes...initial part of subpage> upward */
+ base = (char *)mp + mp->mp_upper + PAGEBASE;
+ memmove(base + delta, base, (char *)sp + len - base);
+
+ ptr = mp->mp_ptrs[indx];
+ for (i = NUMKEYS(mp); --i >= 0; ) {
+ if (mp->mp_ptrs[i] <= ptr)
+ mp->mp_ptrs[i] += delta;
+ }
+ mp->mp_upper += delta;
+}
+
+/** Initial setup of a sorted-dups cursor.
+ * Sorted duplicates are implemented as a sub-database for the given key.
+ * The duplicate data items are actually keys of the sub-database.
+ * Operations on the duplicate data items are performed using a sub-cursor
+ * initialized when the sub-database is first accessed. This function does
+ * the preliminary setup of the sub-cursor, filling in the fields that
+ * depend only on the parent DB.
+ * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized.
+ */
+static void
+mdb_xcursor_init0(MDB_cursor *mc)
+{
+ MDB_xcursor *mx = mc->mc_xcursor;
+
+ mx->mx_cursor.mc_xcursor = NULL;
+ mx->mx_cursor.mc_txn = mc->mc_txn;
+ mx->mx_cursor.mc_db = &mx->mx_db;
+ mx->mx_cursor.mc_dbx = &mx->mx_dbx;
+ mx->mx_cursor.mc_dbi = mc->mc_dbi;
+ mx->mx_cursor.mc_dbflag = &mx->mx_dbflag;
+ mx->mx_cursor.mc_snum = 0;
+ mx->mx_cursor.mc_top = 0;
+ mx->mx_cursor.mc_flags = C_SUB;
+ mx->mx_dbx.md_name.mv_size = 0;
+ mx->mx_dbx.md_name.mv_data = NULL;
+ mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp;
+ mx->mx_dbx.md_dcmp = NULL;
+ mx->mx_dbx.md_rel = mc->mc_dbx->md_rel;
+}
+
+/** Final setup of a sorted-dups cursor.
+ * Sets up the fields that depend on the data from the main cursor.
+ * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized.
+ * @param[in] node The data containing the #MDB_db record for the
+ * sorted-dup database.
+ */
+static void
+mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node)
+{
+ MDB_xcursor *mx = mc->mc_xcursor;
+
+ if (node->mn_flags & F_SUBDATA) {
+ memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db));
+ mx->mx_cursor.mc_pg[0] = 0;
+ mx->mx_cursor.mc_snum = 0;
+ mx->mx_cursor.mc_top = 0;
+ mx->mx_cursor.mc_flags = C_SUB;
+ } else {
+ MDB_page *fp = NODEDATA(node);
+ mx->mx_db.md_pad = 0;
+ mx->mx_db.md_flags = 0;
+ mx->mx_db.md_depth = 1;
+ mx->mx_db.md_branch_pages = 0;
+ mx->mx_db.md_leaf_pages = 1;
+ mx->mx_db.md_overflow_pages = 0;
+ mx->mx_db.md_entries = NUMKEYS(fp);
+ COPY_PGNO(mx->mx_db.md_root, MP_PGNO(fp));
+ mx->mx_cursor.mc_snum = 1;
+ mx->mx_cursor.mc_top = 0;
+ mx->mx_cursor.mc_flags = C_INITIALIZED|C_SUB;
+ mx->mx_cursor.mc_pg[0] = fp;
+ mx->mx_cursor.mc_ki[0] = 0;
+ if (mc->mc_db->md_flags & MDB_DUPFIXED) {
+ mx->mx_db.md_flags = MDB_DUPFIXED;
+ mx->mx_db.md_pad = fp->mp_pad;
+ if (mc->mc_db->md_flags & MDB_INTEGERDUP)
+ mx->mx_db.md_flags |= MDB_INTEGERKEY;
+ }
+ }
+ DPRINTF(("Sub-db -%u root page %"Z"u", mx->mx_cursor.mc_dbi,
+ mx->mx_db.md_root));
+ mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA;
+#if UINT_MAX < SIZE_MAX
+ if (mx->mx_dbx.md_cmp == mdb_cmp_int && mx->mx_db.md_pad == sizeof(size_t))
+ mx->mx_dbx.md_cmp = mdb_cmp_clong;
+#endif
+}
+
+
+/** Fixup a sorted-dups cursor due to underlying update.
+ * Sets up some fields that depend on the data from the main cursor.
+ * Almost the same as init1, but skips initialization steps if the
+ * xcursor had already been used.
+ * @param[in] mc The main cursor whose sorted-dups cursor is to be fixed up.
+ * @param[in] src_mx The xcursor of an up-to-date cursor.
+ * @param[in] new_dupdata True if converting from a non-#F_DUPDATA item.
+ */
+static void
+mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int new_dupdata)
+{
+ MDB_xcursor *mx = mc->mc_xcursor;
+
+ if (new_dupdata) {
+ mx->mx_cursor.mc_snum = 1;
+ mx->mx_cursor.mc_top = 0;
+ mx->mx_cursor.mc_flags |= C_INITIALIZED;
+ mx->mx_cursor.mc_ki[0] = 0;
+ mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA;
+#if UINT_MAX < SIZE_MAX
+ mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp;
+#endif
+ } else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) {
+ return;
+ }
+ mx->mx_db = src_mx->mx_db;
+ mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0];
+ DPRINTF(("Sub-db -%u root page %"Z"u", mx->mx_cursor.mc_dbi,
+ mx->mx_db.md_root));
+}
+
+/** Initialize a cursor for a given transaction and database. */
+static void
+mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx)
+{
+ mc->mc_next = NULL;
+ mc->mc_backup = NULL;
+ mc->mc_dbi = dbi;
+ mc->mc_txn = txn;
+ mc->mc_db = &txn->mt_dbs[dbi];
+ mc->mc_dbx = &txn->mt_dbxs[dbi];
+ mc->mc_dbflag = &txn->mt_dbflags[dbi];
+ mc->mc_snum = 0;
+ mc->mc_top = 0;
+ mc->mc_pg[0] = 0;
+ mc->mc_ki[0] = 0;
+ mc->mc_flags = 0;
+ if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) {
+ mdb_tassert(txn, mx != NULL);
+ mc->mc_xcursor = mx;
+ mdb_xcursor_init0(mc);
+ } else {
+ mc->mc_xcursor = NULL;
+ }
+ if (*mc->mc_dbflag & DB_STALE) {
+ mdb_page_search(mc, NULL, MDB_PS_ROOTONLY);
+ }
+}
+
+int
+mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret)
+{
+ MDB_cursor *mc;
+ size_t size = sizeof(MDB_cursor);
+
+ if (!ret || !TXN_DBI_EXIST(txn, dbi, DB_VALID))
+ return EINVAL;
+
+ if (txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ if (dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
+ return EINVAL;
+
+ if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT)
+ size += sizeof(MDB_xcursor);
+
+ if ((mc = malloc(size)) != NULL) {
+ mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1));
+ if (txn->mt_cursors) {
+ mc->mc_next = txn->mt_cursors[dbi];
+ txn->mt_cursors[dbi] = mc;
+ mc->mc_flags |= C_UNTRACK;
+ }
+ } else {
+ return ENOMEM;
+ }
+
+ *ret = mc;
+
+ return MDB_SUCCESS;
+}
+
+int
+mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc)
+{
+ if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID))
+ return EINVAL;
+
+ if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors)
+ return EINVAL;
+
+ if (txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor);
+ return MDB_SUCCESS;
+}
+
+/* Return the count of duplicate data items for the current key */
+int
+mdb_cursor_count(MDB_cursor *mc, size_t *countp)
+{
+ MDB_node *leaf;
+
+ if (mc == NULL || countp == NULL)
+ return EINVAL;
+
+ if (mc->mc_xcursor == NULL)
+ return MDB_INCOMPATIBLE;
+
+ if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ if (!(mc->mc_flags & C_INITIALIZED))
+ return EINVAL;
+
+ if (!mc->mc_snum)
+ return MDB_NOTFOUND;
+
+ if (mc->mc_flags & C_EOF) {
+ if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top]))
+ return MDB_NOTFOUND;
+ mc->mc_flags ^= C_EOF;
+ }
+
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ *countp = 1;
+ } else {
+ if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))
+ return EINVAL;
+
+ *countp = mc->mc_xcursor->mx_db.md_entries;
+ }
+ return MDB_SUCCESS;
+}
+
+void
+mdb_cursor_close(MDB_cursor *mc)
+{
+ if (mc && !mc->mc_backup) {
+ /* remove from txn, if tracked */
+ if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) {
+ MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi];
+ while (*prev && *prev != mc) prev = &(*prev)->mc_next;
+ if (*prev == mc)
+ *prev = mc->mc_next;
+ }
+ free(mc);
+ }
+}
+
+MDB_txn *
+mdb_cursor_txn(MDB_cursor *mc)
+{
+ if (!mc) return NULL;
+ return mc->mc_txn;
+}
+
+MDB_dbi
+mdb_cursor_dbi(MDB_cursor *mc)
+{
+ return mc->mc_dbi;
+}
+
+/** Replace the key for a branch node with a new key.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in] mc Cursor pointing to the node to operate on.
+ * @param[in] key The new key to use.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_update_key(MDB_cursor *mc, MDB_val *key)
+{
+ MDB_page *mp;
+ MDB_node *node;
+ char *base;
+ size_t len;
+ int delta, ksize, oksize;
+ indx_t ptr, i, numkeys, indx;
+ DKBUF;
+
+ indx = mc->mc_ki[mc->mc_top];
+ mp = mc->mc_pg[mc->mc_top];
+ node = NODEPTR(mp, indx);
+ ptr = mp->mp_ptrs[indx];
+#if MDB_DEBUG
+ {
+ MDB_val k2;
+ char kbuf2[DKBUF_MAXKEYSIZE*2+1];
+ k2.mv_data = NODEKEY(node);
+ k2.mv_size = node->mn_ksize;
+ DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Z"u",
+ indx, ptr,
+ mdb_dkey(&k2, kbuf2),
+ DKEY(key),
+ mp->mp_pgno));
+ }
+#endif
+
+ /* Sizes must be 2-byte aligned. */
+ ksize = EVEN(key->mv_size);
+ oksize = EVEN(node->mn_ksize);
+ delta = ksize - oksize;
+
+ /* Shift node contents if EVEN(key length) changed. */
+ if (delta) {
+ if (delta > 0 && SIZELEFT(mp) < delta) {
+ pgno_t pgno;
+ /* not enough space left, do a delete and split */
+ DPRINTF(("Not enough room, delta = %d, splitting...", delta));
+ pgno = NODEPGNO(node);
+ mdb_node_del(mc, 0);
+ return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE);
+ }
+
+ numkeys = NUMKEYS(mp);
+ for (i = 0; i < numkeys; i++) {
+ if (mp->mp_ptrs[i] <= ptr)
+ mp->mp_ptrs[i] -= delta;
+ }
+
+ base = (char *)mp + mp->mp_upper + PAGEBASE;
+ len = ptr - mp->mp_upper + NODESIZE;
+ memmove(base - delta, base, len);
+ mp->mp_upper -= delta;
+
+ node = NODEPTR(mp, indx);
+ }
+
+ /* But even if no shift was needed, update ksize */
+ if (node->mn_ksize != key->mv_size)
+ node->mn_ksize = key->mv_size;
+
+ if (key->mv_size)
+ memcpy(NODEKEY(node), key->mv_data, key->mv_size);
+
+ return MDB_SUCCESS;
+}
+
+static void
+mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst);
+
+/** Perform \b act while tracking temporary cursor \b mn */
+#define WITH_CURSOR_TRACKING(mn, act) do { \
+ MDB_cursor dummy, *tracked, **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \
+ if ((mn).mc_flags & C_SUB) { \
+ dummy.mc_flags = C_INITIALIZED; \
+ dummy.mc_xcursor = (MDB_xcursor *)&(mn); \
+ tracked = &dummy; \
+ } else { \
+ tracked = &(mn); \
+ } \
+ tracked->mc_next = *tp; \
+ *tp = tracked; \
+ { act; } \
+ *tp = tracked->mc_next; \
+} while (0)
+
+/** Move a node from csrc to cdst.
+ */
+static int
+mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft)
+{
+ MDB_node *srcnode;
+ MDB_val key, data;
+ pgno_t srcpg;
+ MDB_cursor mn;
+ int rc;
+ unsigned short flags;
+
+ DKBUF;
+
+ /* Mark src and dst as dirty. */
+ if ((rc = mdb_page_touch(csrc)) ||
+ (rc = mdb_page_touch(cdst)))
+ return rc;
+
+ if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
+ key.mv_size = csrc->mc_db->md_pad;
+ key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size);
+ data.mv_size = 0;
+ data.mv_data = NULL;
+ srcpg = 0;
+ flags = 0;
+ } else {
+ srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]);
+ mdb_cassert(csrc, !((size_t)srcnode & 1));
+ srcpg = NODEPGNO(srcnode);
+ flags = srcnode->mn_flags;
+ if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) {
+ unsigned int snum = csrc->mc_snum;
+ MDB_node *s2;
+ /* must find the lowest key below src */
+ rc = mdb_page_search_lowest(csrc);
+ if (rc)
+ return rc;
+ if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
+ key.mv_size = csrc->mc_db->md_pad;
+ key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size);
+ } else {
+ s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0);
+ key.mv_size = NODEKSZ(s2);
+ key.mv_data = NODEKEY(s2);
+ }
+ csrc->mc_snum = snum--;
+ csrc->mc_top = snum;
+ } else {
+ key.mv_size = NODEKSZ(srcnode);
+ key.mv_data = NODEKEY(srcnode);
+ }
+ data.mv_size = NODEDSZ(srcnode);
+ data.mv_data = NODEDATA(srcnode);
+ }
+ mn.mc_xcursor = NULL;
+ if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) {
+ unsigned int snum = cdst->mc_snum;
+ MDB_node *s2;
+ MDB_val bkey;
+ /* must find the lowest key below dst */
+ mdb_cursor_copy(cdst, &mn);
+ rc = mdb_page_search_lowest(&mn);
+ if (rc)
+ return rc;
+ if (IS_LEAF2(mn.mc_pg[mn.mc_top])) {
+ bkey.mv_size = mn.mc_db->md_pad;
+ bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size);
+ } else {
+ s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0);
+ bkey.mv_size = NODEKSZ(s2);
+ bkey.mv_data = NODEKEY(s2);
+ }
+ mn.mc_snum = snum--;
+ mn.mc_top = snum;
+ mn.mc_ki[snum] = 0;
+ rc = mdb_update_key(&mn, &bkey);
+ if (rc)
+ return rc;
+ }
+
+ DPRINTF(("moving %s node %u [%s] on page %"Z"u to node %u on page %"Z"u",
+ IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch",
+ csrc->mc_ki[csrc->mc_top],
+ DKEY(&key),
+ csrc->mc_pg[csrc->mc_top]->mp_pgno,
+ cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno));
+
+ /* Add the node to the destination page.
+ */
+ rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags);
+ if (rc != MDB_SUCCESS)
+ return rc;
+
+ /* Delete the node from the source page.
+ */
+ mdb_node_del(csrc, key.mv_size);
+
+ {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = csrc->mc_dbi;
+ MDB_page *mpd, *mps;
+
+ mps = csrc->mc_pg[csrc->mc_top];
+ /* If we're adding on the left, bump others up */
+ if (fromleft) {
+ mpd = cdst->mc_pg[csrc->mc_top];
+ for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (csrc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top)
+ continue;
+ if (m3 != cdst &&
+ m3->mc_pg[csrc->mc_top] == mpd &&
+ m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) {
+ m3->mc_ki[csrc->mc_top]++;
+ }
+ if (m3 !=csrc &&
+ m3->mc_pg[csrc->mc_top] == mps &&
+ m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) {
+ m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top];
+ m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top];
+ m3->mc_ki[csrc->mc_top-1]++;
+ }
+ if (IS_LEAF(mps))
+ XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]);
+ }
+ } else
+ /* Adding on the right, bump others down */
+ {
+ for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (csrc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == csrc) continue;
+ if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top)
+ continue;
+ if (m3->mc_pg[csrc->mc_top] == mps) {
+ if (!m3->mc_ki[csrc->mc_top]) {
+ m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top];
+ m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top];
+ m3->mc_ki[csrc->mc_top-1]--;
+ } else {
+ m3->mc_ki[csrc->mc_top]--;
+ }
+ if (IS_LEAF(mps))
+ XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]);
+ }
+ }
+ }
+ }
+
+ /* Update the parent separators.
+ */
+ if (csrc->mc_ki[csrc->mc_top] == 0) {
+ if (csrc->mc_ki[csrc->mc_top-1] != 0) {
+ if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
+ key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size);
+ } else {
+ srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0);
+ key.mv_size = NODEKSZ(srcnode);
+ key.mv_data = NODEKEY(srcnode);
+ }
+ DPRINTF(("update separator for source page %"Z"u to [%s]",
+ csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key)));
+ mdb_cursor_copy(csrc, &mn);
+ mn.mc_snum--;
+ mn.mc_top--;
+ /* We want mdb_rebalance to find mn when doing fixups */
+ WITH_CURSOR_TRACKING(mn,
+ rc = mdb_update_key(&mn, &key));
+ if (rc)
+ return rc;
+ }
+ if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) {
+ MDB_val nullkey;
+ indx_t ix = csrc->mc_ki[csrc->mc_top];
+ nullkey.mv_size = 0;
+ csrc->mc_ki[csrc->mc_top] = 0;
+ rc = mdb_update_key(csrc, &nullkey);
+ csrc->mc_ki[csrc->mc_top] = ix;
+ mdb_cassert(csrc, rc == MDB_SUCCESS);
+ }
+ }
+
+ if (cdst->mc_ki[cdst->mc_top] == 0) {
+ if (cdst->mc_ki[cdst->mc_top-1] != 0) {
+ if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
+ key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size);
+ } else {
+ srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0);
+ key.mv_size = NODEKSZ(srcnode);
+ key.mv_data = NODEKEY(srcnode);
+ }
+ DPRINTF(("update separator for destination page %"Z"u to [%s]",
+ cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key)));
+ mdb_cursor_copy(cdst, &mn);
+ mn.mc_snum--;
+ mn.mc_top--;
+ /* We want mdb_rebalance to find mn when doing fixups */
+ WITH_CURSOR_TRACKING(mn,
+ rc = mdb_update_key(&mn, &key));
+ if (rc)
+ return rc;
+ }
+ if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) {
+ MDB_val nullkey;
+ indx_t ix = cdst->mc_ki[cdst->mc_top];
+ nullkey.mv_size = 0;
+ cdst->mc_ki[cdst->mc_top] = 0;
+ rc = mdb_update_key(cdst, &nullkey);
+ cdst->mc_ki[cdst->mc_top] = ix;
+ mdb_cassert(cdst, rc == MDB_SUCCESS);
+ }
+ }
+
+ return MDB_SUCCESS;
+}
+
+/** Merge one page into another.
+ * The nodes from the page pointed to by \b csrc will
+ * be copied to the page pointed to by \b cdst and then
+ * the \b csrc page will be freed.
+ * @param[in] csrc Cursor pointing to the source page.
+ * @param[in] cdst Cursor pointing to the destination page.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst)
+{
+ MDB_page *psrc, *pdst;
+ MDB_node *srcnode;
+ MDB_val key, data;
+ unsigned nkeys;
+ int rc;
+ indx_t i, j;
+
+ psrc = csrc->mc_pg[csrc->mc_top];
+ pdst = cdst->mc_pg[cdst->mc_top];
+
+ DPRINTF(("merging page %"Z"u into %"Z"u", psrc->mp_pgno, pdst->mp_pgno));
+
+ mdb_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */
+ mdb_cassert(csrc, cdst->mc_snum > 1);
+
+ /* Mark dst as dirty. */
+ if ((rc = mdb_page_touch(cdst)))
+ return rc;
+
+ /* get dst page again now that we've touched it. */
+ pdst = cdst->mc_pg[cdst->mc_top];
+
+ /* Move all nodes from src to dst.
+ */
+ j = nkeys = NUMKEYS(pdst);
+ if (IS_LEAF2(psrc)) {
+ key.mv_size = csrc->mc_db->md_pad;
+ key.mv_data = METADATA(psrc);
+ for (i = 0; i < NUMKEYS(psrc); i++, j++) {
+ rc = mdb_node_add(cdst, j, &key, NULL, 0, 0);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ key.mv_data = (char *)key.mv_data + key.mv_size;
+ }
+ } else {
+ for (i = 0; i < NUMKEYS(psrc); i++, j++) {
+ srcnode = NODEPTR(psrc, i);
+ if (i == 0 && IS_BRANCH(psrc)) {
+ MDB_cursor mn;
+ MDB_node *s2;
+ mdb_cursor_copy(csrc, &mn);
+ mn.mc_xcursor = NULL;
+ /* must find the lowest key below src */
+ rc = mdb_page_search_lowest(&mn);
+ if (rc)
+ return rc;
+ if (IS_LEAF2(mn.mc_pg[mn.mc_top])) {
+ key.mv_size = mn.mc_db->md_pad;
+ key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size);
+ } else {
+ s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0);
+ key.mv_size = NODEKSZ(s2);
+ key.mv_data = NODEKEY(s2);
+ }
+ } else {
+ key.mv_size = srcnode->mn_ksize;
+ key.mv_data = NODEKEY(srcnode);
+ }
+
+ data.mv_size = NODEDSZ(srcnode);
+ data.mv_data = NODEDATA(srcnode);
+ rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags);
+ if (rc != MDB_SUCCESS)
+ return rc;
+ }
+ }
+
+ DPRINTF(("dst page %"Z"u now has %u keys (%.1f%% filled)",
+ pdst->mp_pgno, NUMKEYS(pdst),
+ (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10));
+
+ /* Unlink the src page from parent and add to free list.
+ */
+ csrc->mc_top--;
+ mdb_node_del(csrc, 0);
+ if (csrc->mc_ki[csrc->mc_top] == 0) {
+ key.mv_size = 0;
+ rc = mdb_update_key(csrc, &key);
+ if (rc) {
+ csrc->mc_top++;
+ return rc;
+ }
+ }
+ csrc->mc_top++;
+
+ psrc = csrc->mc_pg[csrc->mc_top];
+ /* If not operating on FreeDB, allow this page to be reused
+ * in this txn. Otherwise just add to free list.
+ */
+ rc = mdb_page_loose(csrc, psrc);
+ if (rc)
+ return rc;
+ if (IS_LEAF(psrc))
+ csrc->mc_db->md_leaf_pages--;
+ else
+ csrc->mc_db->md_branch_pages--;
+ {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = csrc->mc_dbi;
+ unsigned int top = csrc->mc_top;
+
+ for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (csrc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == csrc) continue;
+ if (m3->mc_snum < csrc->mc_snum) continue;
+ if (m3->mc_pg[top] == psrc) {
+ m3->mc_pg[top] = pdst;
+ m3->mc_ki[top] += nkeys;
+ m3->mc_ki[top-1] = cdst->mc_ki[top-1];
+ } else if (m3->mc_pg[top-1] == csrc->mc_pg[top-1] &&
+ m3->mc_ki[top-1] > csrc->mc_ki[top-1]) {
+ m3->mc_ki[top-1]--;
+ }
+ if (IS_LEAF(psrc))
+ XCURSOR_REFRESH(m3, top, m3->mc_pg[top]);
+ }
+ }
+ {
+ unsigned int snum = cdst->mc_snum;
+ uint16_t depth = cdst->mc_db->md_depth;
+ mdb_cursor_pop(cdst);
+ rc = mdb_rebalance(cdst);
+ /* Did the tree height change? */
+ if (depth != cdst->mc_db->md_depth)
+ snum += cdst->mc_db->md_depth - depth;
+ cdst->mc_snum = snum;
+ cdst->mc_top = snum-1;
+ }
+ return rc;
+}
+
+/** Copy the contents of a cursor.
+ * @param[in] csrc The cursor to copy from.
+ * @param[out] cdst The cursor to copy to.
+ */
+static void
+mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst)
+{
+ unsigned int i;
+
+ cdst->mc_txn = csrc->mc_txn;
+ cdst->mc_dbi = csrc->mc_dbi;
+ cdst->mc_db = csrc->mc_db;
+ cdst->mc_dbx = csrc->mc_dbx;
+ cdst->mc_snum = csrc->mc_snum;
+ cdst->mc_top = csrc->mc_top;
+ cdst->mc_flags = csrc->mc_flags;
+
+ for (i=0; i<csrc->mc_snum; i++) {
+ cdst->mc_pg[i] = csrc->mc_pg[i];
+ cdst->mc_ki[i] = csrc->mc_ki[i];
+ }
+}
+
+/** Rebalance the tree after a delete operation.
+ * @param[in] mc Cursor pointing to the page where rebalancing
+ * should begin.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_rebalance(MDB_cursor *mc)
+{
+ MDB_node *node;
+ int rc, fromleft;
+ unsigned int ptop, minkeys, thresh;
+ MDB_cursor mn;
+ indx_t oldki;
+
+ if (IS_BRANCH(mc->mc_pg[mc->mc_top])) {
+ minkeys = 2;
+ thresh = 1;
+ } else {
+ minkeys = 1;
+ thresh = FILL_THRESHOLD;
+ }
+ DPRINTF(("rebalancing %s page %"Z"u (has %u keys, %.1f%% full)",
+ IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch",
+ mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]),
+ (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10));
+
+ if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh &&
+ NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) {
+ DPRINTF(("no need to rebalance page %"Z"u, above fill threshold",
+ mdb_dbg_pgno(mc->mc_pg[mc->mc_top])));
+ return MDB_SUCCESS;
+ }
+
+ if (mc->mc_snum < 2) {
+ MDB_page *mp = mc->mc_pg[0];
+ if (IS_SUBP(mp)) {
+ DPUTS("Can't rebalance a subpage, ignoring");
+ return MDB_SUCCESS;
+ }
+ if (NUMKEYS(mp) == 0) {
+ DPUTS("tree is completely empty");
+ mc->mc_db->md_root = P_INVALID;
+ mc->mc_db->md_depth = 0;
+ mc->mc_db->md_leaf_pages = 0;
+ rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno);
+ if (rc)
+ return rc;
+ /* Adjust cursors pointing to mp */
+ mc->mc_snum = 0;
+ mc->mc_top = 0;
+ mc->mc_flags &= ~C_INITIALIZED;
+ {
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (mc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum))
+ continue;
+ if (m3->mc_pg[0] == mp) {
+ m3->mc_snum = 0;
+ m3->mc_top = 0;
+ m3->mc_flags &= ~C_INITIALIZED;
+ }
+ }
+ }
+ } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) {
+ int i;
+ DPUTS("collapsing root page!");
+ rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno);
+ if (rc)
+ return rc;
+ mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0));
+ rc = mdb_page_get(mc, mc->mc_db->md_root, &mc->mc_pg[0], NULL);
+ if (rc)
+ return rc;
+ mc->mc_db->md_depth--;
+ mc->mc_db->md_branch_pages--;
+ mc->mc_ki[0] = mc->mc_ki[1];
+ for (i = 1; i<mc->mc_db->md_depth; i++) {
+ mc->mc_pg[i] = mc->mc_pg[i+1];
+ mc->mc_ki[i] = mc->mc_ki[i+1];
+ }
+ {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (mc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == mc) continue;
+ if (!(m3->mc_flags & C_INITIALIZED))
+ continue;
+ if (m3->mc_pg[0] == mp) {
+ for (i=0; i<mc->mc_db->md_depth; i++) {
+ m3->mc_pg[i] = m3->mc_pg[i+1];
+ m3->mc_ki[i] = m3->mc_ki[i+1];
+ }
+ m3->mc_snum--;
+ m3->mc_top--;
+ }
+ }
+ }
+ } else
+ DPUTS("root page doesn't need rebalancing");
+ return MDB_SUCCESS;
+ }
+
+ /* The parent (branch page) must have at least 2 pointers,
+ * otherwise the tree is invalid.
+ */
+ ptop = mc->mc_top-1;
+ mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1);
+
+ /* Leaf page fill factor is below the threshold.
+ * Try to move keys from left or right neighbor, or
+ * merge with a neighbor page.
+ */
+
+ /* Find neighbors.
+ */
+ mdb_cursor_copy(mc, &mn);
+ mn.mc_xcursor = NULL;
+
+ oldki = mc->mc_ki[mc->mc_top];
+ if (mc->mc_ki[ptop] == 0) {
+ /* We're the leftmost leaf in our parent.
+ */
+ DPUTS("reading right neighbor");
+ mn.mc_ki[ptop]++;
+ node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]);
+ rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL);
+ if (rc)
+ return rc;
+ mn.mc_ki[mn.mc_top] = 0;
+ mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]);
+ fromleft = 0;
+ } else {
+ /* There is at least one neighbor to the left.
+ */
+ DPUTS("reading left neighbor");
+ mn.mc_ki[ptop]--;
+ node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]);
+ rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL);
+ if (rc)
+ return rc;
+ mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1;
+ mc->mc_ki[mc->mc_top] = 0;
+ fromleft = 1;
+ }
+
+ DPRINTF(("found neighbor page %"Z"u (%u keys, %.1f%% full)",
+ mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]),
+ (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10));
+
+ /* If the neighbor page is above threshold and has enough keys,
+ * move one key from it. Otherwise we should try to merge them.
+ * (A branch page must never have less than 2 keys.)
+ */
+ if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) {
+ rc = mdb_node_move(&mn, mc, fromleft);
+ if (fromleft) {
+ /* if we inserted on left, bump position up */
+ oldki++;
+ }
+ } else {
+ if (!fromleft) {
+ rc = mdb_page_merge(&mn, mc);
+ } else {
+ oldki += NUMKEYS(mn.mc_pg[mn.mc_top]);
+ mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1;
+ /* We want mdb_rebalance to find mn when doing fixups */
+ WITH_CURSOR_TRACKING(mn,
+ rc = mdb_page_merge(mc, &mn));
+ mdb_cursor_copy(&mn, mc);
+ }
+ mc->mc_flags &= ~C_EOF;
+ }
+ mc->mc_ki[mc->mc_top] = oldki;
+ return rc;
+}
+
+/** Complete a delete operation started by #mdb_cursor_del(). */
+static int
+mdb_cursor_del0(MDB_cursor *mc)
+{
+ int rc;
+ MDB_page *mp;
+ indx_t ki;
+ unsigned int nkeys;
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+
+ ki = mc->mc_ki[mc->mc_top];
+ mp = mc->mc_pg[mc->mc_top];
+ mdb_node_del(mc, mc->mc_db->md_pad);
+ mc->mc_db->md_entries--;
+ {
+ /* Adjust other cursors pointing to mp */
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2;
+ if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED))
+ continue;
+ if (m3 == mc || m3->mc_snum < mc->mc_snum)
+ continue;
+ if (m3->mc_pg[mc->mc_top] == mp) {
+ if (m3->mc_ki[mc->mc_top] == ki) {
+ m3->mc_flags |= C_DEL;
+ if (mc->mc_db->md_flags & MDB_DUPSORT) {
+ /* Sub-cursor referred into dataset which is gone */
+ m3->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
+ }
+ continue;
+ } else if (m3->mc_ki[mc->mc_top] > ki) {
+ m3->mc_ki[mc->mc_top]--;
+ }
+ XCURSOR_REFRESH(m3, mc->mc_top, mp);
+ }
+ }
+ }
+ rc = mdb_rebalance(mc);
+ if (rc)
+ goto fail;
+
+ /* DB is totally empty now, just bail out.
+ * Other cursors adjustments were already done
+ * by mdb_rebalance and aren't needed here.
+ */
+ if (!mc->mc_snum) {
+ mc->mc_flags |= C_EOF;
+ return rc;
+ }
+
+ mp = mc->mc_pg[mc->mc_top];
+ nkeys = NUMKEYS(mp);
+
+ /* Adjust other cursors pointing to mp */
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) {
+ m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2;
+ if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED))
+ continue;
+ if (m3->mc_snum < mc->mc_snum)
+ continue;
+ if (m3->mc_pg[mc->mc_top] == mp) {
+ if (m3->mc_ki[mc->mc_top] >= mc->mc_ki[mc->mc_top]) {
+ /* if m3 points past last node in page, find next sibling */
+ if (m3->mc_ki[mc->mc_top] >= nkeys) {
+ rc = mdb_cursor_sibling(m3, 1);
+ if (rc == MDB_NOTFOUND) {
+ m3->mc_flags |= C_EOF;
+ rc = MDB_SUCCESS;
+ continue;
+ }
+ if (rc)
+ goto fail;
+ }
+ if (m3->mc_xcursor && !(m3->mc_flags & C_EOF)) {
+ MDB_node *node = NODEPTR(m3->mc_pg[m3->mc_top], m3->mc_ki[m3->mc_top]);
+ /* If this node has dupdata, it may need to be reinited
+ * because its data has moved.
+ * If the xcursor was not initd it must be reinited.
+ * Else if node points to a subDB, nothing is needed.
+ * Else (xcursor was initd, not a subDB) needs mc_pg[0] reset.
+ */
+ if (node->mn_flags & F_DUPDATA) {
+ if (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) {
+ if (!(node->mn_flags & F_SUBDATA))
+ m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node);
+ } else {
+ mdb_xcursor_init1(m3, node);
+ rc = mdb_cursor_first(&m3->mc_xcursor->mx_cursor, NULL, NULL);
+ if (rc)
+ goto fail;
+ }
+ }
+ m3->mc_xcursor->mx_cursor.mc_flags |= C_DEL;
+ }
+ }
+ }
+ }
+ mc->mc_flags |= C_DEL;
+
+fail:
+ if (rc)
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+int
+mdb_del(MDB_txn *txn, MDB_dbi dbi,
+ MDB_val *key, MDB_val *data)
+{
+ if (!key || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
+ return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
+
+ if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) {
+ /* must ignore any data */
+ data = NULL;
+ }
+
+ return mdb_del0(txn, dbi, key, data, 0);
+}
+
+static int
+mdb_del0(MDB_txn *txn, MDB_dbi dbi,
+ MDB_val *key, MDB_val *data, unsigned flags)
+{
+ MDB_cursor mc;
+ MDB_xcursor mx;
+ MDB_cursor_op op;
+ MDB_val rdata, *xdata;
+ int rc, exact = 0;
+ DKBUF;
+
+ DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key)));
+
+ mdb_cursor_init(&mc, txn, dbi, &mx);
+
+ if (data) {
+ op = MDB_GET_BOTH;
+ rdata = *data;
+ xdata = &rdata;
+ } else {
+ op = MDB_SET;
+ xdata = NULL;
+ flags |= MDB_NODUPDATA;
+ }
+ rc = mdb_cursor_set(&mc, key, xdata, op, &exact);
+ if (rc == 0) {
+ /* let mdb_page_split know about this cursor if needed:
+ * delete will trigger a rebalance; if it needs to move
+ * a node from one page to another, it will have to
+ * update the parent's separator key(s). If the new sepkey
+ * is larger than the current one, the parent page may
+ * run out of space, triggering a split. We need this
+ * cursor to be consistent until the end of the rebalance.
+ */
+ mc.mc_flags |= C_UNTRACK;
+ mc.mc_next = txn->mt_cursors[dbi];
+ txn->mt_cursors[dbi] = &mc;
+ rc = mdb_cursor_del(&mc, flags);
+ txn->mt_cursors[dbi] = mc.mc_next;
+ }
+ return rc;
+}
+
+/** Split a page and insert a new node.
+ * Set #MDB_TXN_ERROR on failure.
+ * @param[in,out] mc Cursor pointing to the page and desired insertion index.
+ * The cursor will be updated to point to the actual page and index where
+ * the node got inserted after the split.
+ * @param[in] newkey The key for the newly inserted node.
+ * @param[in] newdata The data for the newly inserted node.
+ * @param[in] newpgno The page number, if the new node is a branch node.
+ * @param[in] nflags The #NODE_ADD_FLAGS for the new node.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno,
+ unsigned int nflags)
+{
+ unsigned int flags;
+ int rc = MDB_SUCCESS, new_root = 0, did_split = 0;
+ indx_t newindx;
+ pgno_t pgno = 0;
+ int i, j, split_indx, nkeys, pmax;
+ MDB_env *env = mc->mc_txn->mt_env;
+ MDB_node *node;
+ MDB_val sepkey, rkey, xdata, *rdata = &xdata;
+ MDB_page *copy = NULL;
+ MDB_page *mp, *rp, *pp;
+ int ptop;
+ MDB_cursor mn;
+ DKBUF;
+
+ mp = mc->mc_pg[mc->mc_top];
+ newindx = mc->mc_ki[mc->mc_top];
+ nkeys = NUMKEYS(mp);
+
+ DPRINTF(("-----> splitting %s page %"Z"u and adding [%s] at index %i/%i",
+ IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno,
+ DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys));
+
+ /* Create a right sibling. */
+ if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp)))
+ return rc;
+ rp->mp_pad = mp->mp_pad;
+ DPRINTF(("new right sibling: page %"Z"u", rp->mp_pgno));
+
+ /* Usually when splitting the root page, the cursor
+ * height is 1. But when called from mdb_update_key,
+ * the cursor height may be greater because it walks
+ * up the stack while finding the branch slot to update.
+ */
+ if (mc->mc_top < 1) {
+ if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp)))
+ goto done;
+ /* shift current top to make room for new parent */
+ for (i=mc->mc_snum; i>0; i--) {
+ mc->mc_pg[i] = mc->mc_pg[i-1];
+ mc->mc_ki[i] = mc->mc_ki[i-1];
+ }
+ mc->mc_pg[0] = pp;
+ mc->mc_ki[0] = 0;
+ mc->mc_db->md_root = pp->mp_pgno;
+ DPRINTF(("root split! new root = %"Z"u", pp->mp_pgno));
+ new_root = mc->mc_db->md_depth++;
+
+ /* Add left (implicit) pointer. */
+ if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) {
+ /* undo the pre-push */
+ mc->mc_pg[0] = mc->mc_pg[1];
+ mc->mc_ki[0] = mc->mc_ki[1];
+ mc->mc_db->md_root = mp->mp_pgno;
+ mc->mc_db->md_depth--;
+ goto done;
+ }
+ mc->mc_snum++;
+ mc->mc_top++;
+ ptop = 0;
+ } else {
+ ptop = mc->mc_top-1;
+ DPRINTF(("parent branch page is %"Z"u", mc->mc_pg[ptop]->mp_pgno));
+ }
+
+ mdb_cursor_copy(mc, &mn);
+ mn.mc_xcursor = NULL;
+ mn.mc_pg[mn.mc_top] = rp;
+ mn.mc_ki[ptop] = mc->mc_ki[ptop]+1;
+
+ if (nflags & MDB_APPEND) {
+ mn.mc_ki[mn.mc_top] = 0;
+ sepkey = *newkey;
+ split_indx = newindx;
+ nkeys = 0;
+ } else {
+
+ split_indx = (nkeys+1) / 2;
+
+ if (IS_LEAF2(rp)) {
+ char *split, *ins;
+ int x;
+ unsigned int lsize, rsize, ksize;
+ /* Move half of the keys to the right sibling */
+ x = mc->mc_ki[mc->mc_top] - split_indx;
+ ksize = mc->mc_db->md_pad;
+ split = LEAF2KEY(mp, split_indx, ksize);
+ rsize = (nkeys - split_indx) * ksize;
+ lsize = (nkeys - split_indx) * sizeof(indx_t);
+ mp->mp_lower -= lsize;
+ rp->mp_lower += lsize;
+ mp->mp_upper += rsize - lsize;
+ rp->mp_upper -= rsize - lsize;
+ sepkey.mv_size = ksize;
+ if (newindx == split_indx) {
+ sepkey.mv_data = newkey->mv_data;
+ } else {
+ sepkey.mv_data = split;
+ }
+ if (x<0) {
+ ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize);
+ memcpy(rp->mp_ptrs, split, rsize);
+ sepkey.mv_data = rp->mp_ptrs;
+ memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize);
+ memcpy(ins, newkey->mv_data, ksize);
+ mp->mp_lower += sizeof(indx_t);
+ mp->mp_upper -= ksize - sizeof(indx_t);
+ } else {
+ if (x)
+ memcpy(rp->mp_ptrs, split, x * ksize);
+ ins = LEAF2KEY(rp, x, ksize);
+ memcpy(ins, newkey->mv_data, ksize);
+ memcpy(ins+ksize, split + x * ksize, rsize - x * ksize);
+ rp->mp_lower += sizeof(indx_t);
+ rp->mp_upper -= ksize - sizeof(indx_t);
+ mc->mc_ki[mc->mc_top] = x;
+ }
+ } else {
+ int psize, nsize, k, keythresh;
+
+ /* Maximum free space in an empty page */
+ pmax = env->me_psize - PAGEHDRSZ;
+ /* Threshold number of keys considered "small" */
+ keythresh = env->me_psize >> 7;
+
+ if (IS_LEAF(mp))
+ nsize = mdb_leaf_size(env, newkey, newdata);
+ else
+ nsize = mdb_branch_size(env, newkey);
+ nsize = EVEN(nsize);
+
+ /* grab a page to hold a temporary copy */
+ copy = mdb_page_malloc(mc->mc_txn, 1);
+ if (copy == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ copy->mp_pgno = mp->mp_pgno;
+ copy->mp_flags = mp->mp_flags;
+ copy->mp_lower = (PAGEHDRSZ-PAGEBASE);
+ copy->mp_upper = env->me_psize - PAGEBASE;
+
+ /* prepare to insert */
+ for (i=0, j=0; i<nkeys; i++) {
+ if (i == newindx) {
+ copy->mp_ptrs[j++] = 0;
+ }
+ copy->mp_ptrs[j++] = mp->mp_ptrs[i];
+ }
+
+ /* When items are relatively large the split point needs
+ * to be checked, because being off-by-one will make the
+ * difference between success or failure in mdb_node_add.
+ *
+ * It's also relevant if a page happens to be laid out
+ * such that one half of its nodes are all "small" and
+ * the other half of its nodes are "large." If the new
+ * item is also "large" and falls on the half with
+ * "large" nodes, it also may not fit.
+ *
+ * As a final tweak, if the new item goes on the last
+ * spot on the page (and thus, onto the new page), bias
+ * the split so the new page is emptier than the old page.
+ * This yields better packing during sequential inserts.
+ */
+ if (nkeys < keythresh || nsize > pmax/16 || newindx >= nkeys) {
+ /* Find split point */
+ psize = 0;
+ if (newindx <= split_indx || newindx >= nkeys) {
+ i = 0; j = 1;
+ k = newindx >= nkeys ? nkeys : split_indx+1+IS_LEAF(mp);
+ } else {
+ i = nkeys; j = -1;
+ k = split_indx-1;
+ }
+ for (; i!=k; i+=j) {
+ if (i == newindx) {
+ psize += nsize;
+ node = NULL;
+ } else {
+ node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE);
+ psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t);
+ if (IS_LEAF(mp)) {
+ if (F_ISSET(node->mn_flags, F_BIGDATA))
+ psize += sizeof(pgno_t);
+ else
+ psize += NODEDSZ(node);
+ }
+ psize = EVEN(psize);
+ }
+ if (psize > pmax || i == k-j) {
+ split_indx = i + (j<0);
+ break;
+ }
+ }
+ }
+ if (split_indx == newindx) {
+ sepkey.mv_size = newkey->mv_size;
+ sepkey.mv_data = newkey->mv_data;
+ } else {
+ node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE);
+ sepkey.mv_size = node->mn_ksize;
+ sepkey.mv_data = NODEKEY(node);
+ }
+ }
+ }
+
+ DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey)));
+
+ /* Copy separator key to the parent.
+ */
+ if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) {
+ int snum = mc->mc_snum;
+ mn.mc_snum--;
+ mn.mc_top--;
+ did_split = 1;
+ /* We want other splits to find mn when doing fixups */
+ WITH_CURSOR_TRACKING(mn,
+ rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0));
+ if (rc)
+ goto done;
+
+ /* root split? */
+ if (mc->mc_snum > snum) {
+ ptop++;
+ }
+ /* Right page might now have changed parent.
+ * Check if left page also changed parent.
+ */
+ if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
+ mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
+ for (i=0; i<ptop; i++) {
+ mc->mc_pg[i] = mn.mc_pg[i];
+ mc->mc_ki[i] = mn.mc_ki[i];
+ }
+ mc->mc_pg[ptop] = mn.mc_pg[ptop];
+ if (mn.mc_ki[ptop]) {
+ mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1;
+ } else {
+ /* find right page's left sibling */
+ mc->mc_ki[ptop] = mn.mc_ki[ptop];
+ mdb_cursor_sibling(mc, 0);
+ }
+ }
+ } else {
+ mn.mc_top--;
+ rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0);
+ mn.mc_top++;
+ }
+ if (rc != MDB_SUCCESS) {
+ goto done;
+ }
+ if (nflags & MDB_APPEND) {
+ mc->mc_pg[mc->mc_top] = rp;
+ mc->mc_ki[mc->mc_top] = 0;
+ rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags);
+ if (rc)
+ goto done;
+ for (i=0; i<mc->mc_top; i++)
+ mc->mc_ki[i] = mn.mc_ki[i];
+ } else if (!IS_LEAF2(mp)) {
+ /* Move nodes */
+ mc->mc_pg[mc->mc_top] = rp;
+ i = split_indx;
+ j = 0;
+ do {
+ if (i == newindx) {
+ rkey.mv_data = newkey->mv_data;
+ rkey.mv_size = newkey->mv_size;
+ if (IS_LEAF(mp)) {
+ rdata = newdata;
+ } else
+ pgno = newpgno;
+ flags = nflags;
+ /* Update index for the new key. */
+ mc->mc_ki[mc->mc_top] = j;
+ } else {
+ node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE);
+ rkey.mv_data = NODEKEY(node);
+ rkey.mv_size = node->mn_ksize;
+ if (IS_LEAF(mp)) {
+ xdata.mv_data = NODEDATA(node);
+ xdata.mv_size = NODEDSZ(node);
+ rdata = &xdata;
+ } else
+ pgno = NODEPGNO(node);
+ flags = node->mn_flags;
+ }
+
+ if (!IS_LEAF(mp) && j == 0) {
+ /* First branch index doesn't need key data. */
+ rkey.mv_size = 0;
+ }
+
+ rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags);
+ if (rc)
+ goto done;
+ if (i == nkeys) {
+ i = 0;
+ j = 0;
+ mc->mc_pg[mc->mc_top] = copy;
+ } else {
+ i++;
+ j++;
+ }
+ } while (i != split_indx);
+
+ nkeys = NUMKEYS(copy);
+ for (i=0; i<nkeys; i++)
+ mp->mp_ptrs[i] = copy->mp_ptrs[i];
+ mp->mp_lower = copy->mp_lower;
+ mp->mp_upper = copy->mp_upper;
+ memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1),
+ env->me_psize - copy->mp_upper - PAGEBASE);
+
+ /* reset back to original page */
+ if (newindx < split_indx) {
+ mc->mc_pg[mc->mc_top] = mp;
+ } else {
+ mc->mc_pg[mc->mc_top] = rp;
+ mc->mc_ki[ptop]++;
+ /* Make sure mc_ki is still valid.
+ */
+ if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
+ mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
+ for (i=0; i<=ptop; i++) {
+ mc->mc_pg[i] = mn.mc_pg[i];
+ mc->mc_ki[i] = mn.mc_ki[i];
+ }
+ }
+ }
+ if (nflags & MDB_RESERVE) {
+ node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if (!(node->mn_flags & F_BIGDATA))
+ newdata->mv_data = NODEDATA(node);
+ }
+ } else {
+ if (newindx >= split_indx) {
+ mc->mc_pg[mc->mc_top] = rp;
+ mc->mc_ki[ptop]++;
+ /* Make sure mc_ki is still valid.
+ */
+ if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
+ mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
+ for (i=0; i<=ptop; i++) {
+ mc->mc_pg[i] = mn.mc_pg[i];
+ mc->mc_ki[i] = mn.mc_ki[i];
+ }
+ }
+ }
+ }
+
+ {
+ /* Adjust other cursors pointing to mp */
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+ nkeys = NUMKEYS(mp);
+
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (mc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == mc)
+ continue;
+ if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED))
+ continue;
+ if (new_root) {
+ int k;
+ /* sub cursors may be on different DB */
+ if (m3->mc_pg[0] != mp)
+ continue;
+ /* root split */
+ for (k=new_root; k>=0; k--) {
+ m3->mc_ki[k+1] = m3->mc_ki[k];
+ m3->mc_pg[k+1] = m3->mc_pg[k];
+ }
+ if (m3->mc_ki[0] >= nkeys) {
+ m3->mc_ki[0] = 1;
+ } else {
+ m3->mc_ki[0] = 0;
+ }
+ m3->mc_pg[0] = mc->mc_pg[0];
+ m3->mc_snum++;
+ m3->mc_top++;
+ }
+ if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) {
+ if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE))
+ m3->mc_ki[mc->mc_top]++;
+ if (m3->mc_ki[mc->mc_top] >= nkeys) {
+ m3->mc_pg[mc->mc_top] = rp;
+ m3->mc_ki[mc->mc_top] -= nkeys;
+ for (i=0; i<mc->mc_top; i++) {
+ m3->mc_ki[i] = mn.mc_ki[i];
+ m3->mc_pg[i] = mn.mc_pg[i];
+ }
+ }
+ } else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] &&
+ m3->mc_ki[ptop] >= mc->mc_ki[ptop]) {
+ m3->mc_ki[ptop]++;
+ }
+ if (IS_LEAF(mp))
+ XCURSOR_REFRESH(m3, mc->mc_top, m3->mc_pg[mc->mc_top]);
+ }
+ }
+ DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp)));
+
+done:
+ if (copy) /* tmp page */
+ mdb_page_free(env, copy);
+ if (rc)
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
+}
+
+int
+mdb_put(MDB_txn *txn, MDB_dbi dbi,
+ MDB_val *key, MDB_val *data, unsigned int flags)
+{
+ MDB_cursor mc;
+ MDB_xcursor mx;
+ int rc;
+
+ if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ if (flags & ~(MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP))
+ return EINVAL;
+
+ if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
+ return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
+
+ mdb_cursor_init(&mc, txn, dbi, &mx);
+ mc.mc_next = txn->mt_cursors[dbi];
+ txn->mt_cursors[dbi] = &mc;
+ rc = mdb_cursor_put(&mc, key, data, flags);
+ txn->mt_cursors[dbi] = mc.mc_next;
+ return rc;
+}
+
+#ifndef MDB_WBUF
+#define MDB_WBUF (1024*1024)
+#endif
+#define MDB_EOF 0x10 /**< #mdb_env_copyfd1() is done reading */
+
+ /** State needed for a double-buffering compacting copy. */
+typedef struct mdb_copy {
+ MDB_env *mc_env;
+ MDB_txn *mc_txn;
+ pthread_mutex_t mc_mutex;
+ pthread_cond_t mc_cond; /**< Condition variable for #mc_new */
+ char *mc_wbuf[2];
+ char *mc_over[2];
+ int mc_wlen[2];
+ int mc_olen[2];
+ pgno_t mc_next_pgno;
+ HANDLE mc_fd;
+ int mc_toggle; /**< Buffer number in provider */
+ int mc_new; /**< (0-2 buffers to write) | (#MDB_EOF at end) */
+ /** Error code. Never cleared if set. Both threads can set nonzero
+ * to fail the copy. Not mutex-protected, LMDB expects atomic int.
+ */
+ volatile int mc_error;
+} mdb_copy;
+
+ /** Dedicated writer thread for compacting copy. */
+static THREAD_RET ESECT CALL_CONV
+mdb_env_copythr(void *arg)
+{
+ mdb_copy *my = arg;
+ char *ptr;
+ int toggle = 0, wsize, rc;
+#ifdef _WIN32
+ DWORD len;
+#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL)
+#else
+ int len;
+#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0)
+#ifdef SIGPIPE
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGPIPE);
+ if ((rc = pthread_sigmask(SIG_BLOCK, &set, NULL)) != 0)
+ my->mc_error = rc;
+#endif
+#endif
+
+ pthread_mutex_lock(&my->mc_mutex);
+ for(;;) {
+ while (!my->mc_new)
+ pthread_cond_wait(&my->mc_cond, &my->mc_mutex);
+ if (my->mc_new == 0 + MDB_EOF) /* 0 buffers, just EOF */
+ break;
+ wsize = my->mc_wlen[toggle];
+ ptr = my->mc_wbuf[toggle];
+again:
+ rc = MDB_SUCCESS;
+ while (wsize > 0 && !my->mc_error) {
+ DO_WRITE(rc, my->mc_fd, ptr, wsize, len);
+ if (!rc) {
+ rc = ErrCode();
+#if defined(SIGPIPE) && !defined(_WIN32)
+ if (rc == EPIPE) {
+ /* Collect the pending SIGPIPE, otherwise at least OS X
+ * gives it to the process on thread-exit (ITS#8504).
+ */
+ int tmp;
+ sigwait(&set, &tmp);
+ }
+#endif
+ break;
+ } else if (len > 0) {
+ rc = MDB_SUCCESS;
+ ptr += len;
+ wsize -= len;
+ continue;
+ } else {
+ rc = EIO;
+ break;
+ }
+ }
+ if (rc) {
+ my->mc_error = rc;
+ }
+ /* If there's an overflow page tail, write it too */
+ if (my->mc_olen[toggle]) {
+ wsize = my->mc_olen[toggle];
+ ptr = my->mc_over[toggle];
+ my->mc_olen[toggle] = 0;
+ goto again;
+ }
+ my->mc_wlen[toggle] = 0;
+ toggle ^= 1;
+ /* Return the empty buffer to provider */
+ my->mc_new--;
+ pthread_cond_signal(&my->mc_cond);
+ }
+ pthread_mutex_unlock(&my->mc_mutex);
+ return (THREAD_RET)0;
+#undef DO_WRITE
+}
+
+ /** Give buffer and/or #MDB_EOF to writer thread, await unused buffer.
+ *
+ * @param[in] my control structure.
+ * @param[in] adjust (1 to hand off 1 buffer) | (MDB_EOF when ending).
+ */
+static int ESECT
+mdb_env_cthr_toggle(mdb_copy *my, int adjust)
+{
+ pthread_mutex_lock(&my->mc_mutex);
+ my->mc_new += adjust;
+ pthread_cond_signal(&my->mc_cond);
+ while (my->mc_new & 2) /* both buffers in use */
+ pthread_cond_wait(&my->mc_cond, &my->mc_mutex);
+ pthread_mutex_unlock(&my->mc_mutex);
+
+ my->mc_toggle ^= (adjust & 1);
+ /* Both threads reset mc_wlen, to be safe from threading errors */
+ my->mc_wlen[my->mc_toggle] = 0;
+ return my->mc_error;
+}
+
+ /** Depth-first tree traversal for compacting copy.
+ * @param[in] my control structure.
+ * @param[in,out] pg database root.
+ * @param[in] flags includes #F_DUPDATA if it is a sorted-duplicate sub-DB.
+ */
+static int ESECT
+mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags)
+{
+ MDB_cursor mc = {0};
+ MDB_node *ni;
+ MDB_page *mo, *mp, *leaf;
+ char *buf, *ptr;
+ int rc, toggle;
+ unsigned int i;
+
+ /* Empty DB, nothing to do */
+ if (*pg == P_INVALID)
+ return MDB_SUCCESS;
+
+ mc.mc_snum = 1;
+ mc.mc_txn = my->mc_txn;
+
+ rc = mdb_page_get(&mc, *pg, &mc.mc_pg[0], NULL);
+ if (rc)
+ return rc;
+ rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST);
+ if (rc)
+ return rc;
+
+ /* Make cursor pages writable */
+ buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum);
+ if (buf == NULL)
+ return ENOMEM;
+
+ for (i=0; i<mc.mc_top; i++) {
+ mdb_page_copy((MDB_page *)ptr, mc.mc_pg[i], my->mc_env->me_psize);
+ mc.mc_pg[i] = (MDB_page *)ptr;
+ ptr += my->mc_env->me_psize;
+ }
+
+ /* This is writable space for a leaf page. Usually not needed. */
+ leaf = (MDB_page *)ptr;
+
+ toggle = my->mc_toggle;
+ while (mc.mc_snum > 0) {
+ unsigned n;
+ mp = mc.mc_pg[mc.mc_top];
+ n = NUMKEYS(mp);
+
+ if (IS_LEAF(mp)) {
+ if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) {
+ for (i=0; i<n; i++) {
+ ni = NODEPTR(mp, i);
+ if (ni->mn_flags & F_BIGDATA) {
+ MDB_page *omp;
+ pgno_t pg;
+
+ /* Need writable leaf */
+ if (mp != leaf) {
+ mc.mc_pg[mc.mc_top] = leaf;
+ mdb_page_copy(leaf, mp, my->mc_env->me_psize);
+ mp = leaf;
+ ni = NODEPTR(mp, i);
+ }
+
+ memcpy(&pg, NODEDATA(ni), sizeof(pg));
+ memcpy(NODEDATA(ni), &my->mc_next_pgno, sizeof(pgno_t));
+ rc = mdb_page_get(&mc, pg, &omp, NULL);
+ if (rc)
+ goto done;
+ if (my->mc_wlen[toggle] >= MDB_WBUF) {
+ rc = mdb_env_cthr_toggle(my, 1);
+ if (rc)
+ goto done;
+ toggle = my->mc_toggle;
+ }
+ mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]);
+ memcpy(mo, omp, my->mc_env->me_psize);
+ mo->mp_pgno = my->mc_next_pgno;
+ my->mc_next_pgno += omp->mp_pages;
+ my->mc_wlen[toggle] += my->mc_env->me_psize;
+ if (omp->mp_pages > 1) {
+ my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1);
+ my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize;
+ rc = mdb_env_cthr_toggle(my, 1);
+ if (rc)
+ goto done;
+ toggle = my->mc_toggle;
+ }
+ } else if (ni->mn_flags & F_SUBDATA) {
+ MDB_db db;
+
+ /* Need writable leaf */
+ if (mp != leaf) {
+ mc.mc_pg[mc.mc_top] = leaf;
+ mdb_page_copy(leaf, mp, my->mc_env->me_psize);
+ mp = leaf;
+ ni = NODEPTR(mp, i);
+ }
+
+ memcpy(&db, NODEDATA(ni), sizeof(db));
+ my->mc_toggle = toggle;
+ rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA);
+ if (rc)
+ goto done;
+ toggle = my->mc_toggle;
+ memcpy(NODEDATA(ni), &db, sizeof(db));
+ }
+ }
+ }
+ } else {
+ mc.mc_ki[mc.mc_top]++;
+ if (mc.mc_ki[mc.mc_top] < n) {
+ pgno_t pg;
+again:
+ ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]);
+ pg = NODEPGNO(ni);
+ rc = mdb_page_get(&mc, pg, &mp, NULL);
+ if (rc)
+ goto done;
+ mc.mc_top++;
+ mc.mc_snum++;
+ mc.mc_ki[mc.mc_top] = 0;
+ if (IS_BRANCH(mp)) {
+ /* Whenever we advance to a sibling branch page,
+ * we must proceed all the way down to its first leaf.
+ */
+ mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize);
+ goto again;
+ } else
+ mc.mc_pg[mc.mc_top] = mp;
+ continue;
+ }
+ }
+ if (my->mc_wlen[toggle] >= MDB_WBUF) {
+ rc = mdb_env_cthr_toggle(my, 1);
+ if (rc)
+ goto done;
+ toggle = my->mc_toggle;
+ }
+ mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]);
+ mdb_page_copy(mo, mp, my->mc_env->me_psize);
+ mo->mp_pgno = my->mc_next_pgno++;
+ my->mc_wlen[toggle] += my->mc_env->me_psize;
+ if (mc.mc_top) {
+ /* Update parent if there is one */
+ ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]);
+ SETPGNO(ni, mo->mp_pgno);
+ mdb_cursor_pop(&mc);
+ } else {
+ /* Otherwise we're done */
+ *pg = mo->mp_pgno;
+ break;
+ }
+ }
+done:
+ free(buf);
+ return rc;
+}
+
+ /** Copy environment with compaction. */
+static int ESECT
+mdb_env_copyfd1(MDB_env *env, HANDLE fd)
+{
+ MDB_meta *mm;
+ MDB_page *mp;
+ mdb_copy my = {0};
+ MDB_txn *txn = NULL;
+ pthread_t thr;
+ pgno_t root, new_root;
+ int rc = MDB_SUCCESS;
+
+#ifdef _WIN32
+ if (!(my.mc_mutex = CreateMutex(NULL, FALSE, NULL)) ||
+ !(my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL))) {
+ rc = ErrCode();
+ goto done;
+ }
+ my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize);
+ if (my.mc_wbuf[0] == NULL) {
+ /* _aligned_malloc() sets errno, but we use Windows error codes */
+ rc = ERROR_NOT_ENOUGH_MEMORY;
+ goto done;
+ }
+#else
+ if ((rc = pthread_mutex_init(&my.mc_mutex, NULL)) != 0)
+ return rc;
+ if ((rc = pthread_cond_init(&my.mc_cond, NULL)) != 0)
+ goto done2;
+#ifdef HAVE_MEMALIGN
+ my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2);
+ if (my.mc_wbuf[0] == NULL) {
+ rc = errno;
+ goto done;
+ }
+#else
+ {
+ void *p;
+ if ((rc = posix_memalign(&p, env->me_os_psize, MDB_WBUF*2)) != 0)
+ goto done;
+ my.mc_wbuf[0] = p;
+ }
+#endif
+#endif
+ memset(my.mc_wbuf[0], 0, MDB_WBUF*2);
+ my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF;
+ my.mc_next_pgno = NUM_METAS;
+ my.mc_env = env;
+ my.mc_fd = fd;
+ rc = THREAD_CREATE(thr, mdb_env_copythr, &my);
+ if (rc)
+ goto done;
+
+ rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
+ if (rc)
+ goto finish;
+
+ mp = (MDB_page *)my.mc_wbuf[0];
+ memset(mp, 0, NUM_METAS * env->me_psize);
+ mp->mp_pgno = 0;
+ mp->mp_flags = P_META;
+ mm = (MDB_meta *)METADATA(mp);
+ mdb_env_init_meta0(env, mm);
+ mm->mm_address = env->me_metas[0]->mm_address;
+
+ mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize);
+ mp->mp_pgno = 1;
+ mp->mp_flags = P_META;
+ *(MDB_meta *)METADATA(mp) = *mm;
+ mm = (MDB_meta *)METADATA(mp);
+
+ /* Set metapage 1 with current main DB */
+ root = new_root = txn->mt_dbs[MAIN_DBI].md_root;
+ if (root != P_INVALID) {
+ /* Count free pages + freeDB pages. Subtract from last_pg
+ * to find the new last_pg, which also becomes the new root.
+ */
+ MDB_ID freecount = 0;
+ MDB_cursor mc;
+ MDB_val key, data;
+ mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
+ while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0)
+ freecount += *(MDB_ID *)data.mv_data;
+ if (rc != MDB_NOTFOUND)
+ goto finish;
+ freecount += txn->mt_dbs[FREE_DBI].md_branch_pages +
+ txn->mt_dbs[FREE_DBI].md_leaf_pages +
+ txn->mt_dbs[FREE_DBI].md_overflow_pages;
+
+ new_root = txn->mt_next_pgno - 1 - freecount;
+ mm->mm_last_pg = new_root;
+ mm->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
+ mm->mm_dbs[MAIN_DBI].md_root = new_root;
+ } else {
+ /* When the DB is empty, handle it specially to
+ * fix any breakage like page leaks from ITS#8174.
+ */
+ mm->mm_dbs[MAIN_DBI].md_flags = txn->mt_dbs[MAIN_DBI].md_flags;
+ }
+ if (root != P_INVALID || mm->mm_dbs[MAIN_DBI].md_flags) {
+ mm->mm_txnid = 1; /* use metapage 1 */
+ }
+
+ my.mc_wlen[0] = env->me_psize * NUM_METAS;
+ my.mc_txn = txn;
+ rc = mdb_env_cwalk(&my, &root, 0);
+ if (rc == MDB_SUCCESS && root != new_root) {
+ rc = MDB_INCOMPATIBLE; /* page leak or corrupt DB */
+ }
+
+finish:
+ if (rc)
+ my.mc_error = rc;
+ mdb_env_cthr_toggle(&my, 1 | MDB_EOF);
+ rc = THREAD_FINISH(thr);
+ mdb_txn_abort(txn);
+
+done:
+#ifdef _WIN32
+ if (my.mc_wbuf[0]) _aligned_free(my.mc_wbuf[0]);
+ if (my.mc_cond) CloseHandle(my.mc_cond);
+ if (my.mc_mutex) CloseHandle(my.mc_mutex);
+#else
+ free(my.mc_wbuf[0]);
+ pthread_cond_destroy(&my.mc_cond);
+done2:
+ pthread_mutex_destroy(&my.mc_mutex);
+#endif
+ return rc ? rc : my.mc_error;
+}
+
+ /** Copy environment as-is. */
+static int ESECT
+mdb_env_copyfd0(MDB_env *env, HANDLE fd)
+{
+ MDB_txn *txn = NULL;
+ mdb_mutexref_t wmutex = NULL;
+ int rc;
+ size_t wsize, w3;
+ char *ptr;
+#ifdef _WIN32
+ DWORD len, w2;
+#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL)
+#else
+ ssize_t len;
+ size_t w2;
+#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0)
+#endif
+
+ /* Do the lock/unlock of the reader mutex before starting the
+ * write txn. Otherwise other read txns could block writers.
+ */
+ rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
+ if (rc)
+ return rc;
+
+ if (env->me_txns) {
+ /* We must start the actual read txn after blocking writers */
+ mdb_txn_end(txn, MDB_END_RESET_TMP);
+
+ /* Temporarily block writers until we snapshot the meta pages */
+ wmutex = env->me_wmutex;
+ if (LOCK_MUTEX(rc, env, wmutex))
+ goto leave;
+
+ rc = mdb_txn_renew0(txn);
+ if (rc) {
+ UNLOCK_MUTEX(wmutex);
+ goto leave;
+ }
+ }
+
+ wsize = env->me_psize * NUM_METAS;
+ ptr = env->me_map;
+ w2 = wsize;
+ while (w2 > 0) {
+ DO_WRITE(rc, fd, ptr, w2, len);
+ if (!rc) {
+ rc = ErrCode();
+ break;
+ } else if (len > 0) {
+ rc = MDB_SUCCESS;
+ ptr += len;
+ w2 -= len;
+ continue;
+ } else {
+ /* Non-blocking or async handles are not supported */
+ rc = EIO;
+ break;
+ }
+ }
+ if (wmutex)
+ UNLOCK_MUTEX(wmutex);
+
+ if (rc)
+ goto leave;
+
+ w3 = txn->mt_next_pgno * env->me_psize;
+ {
+ size_t fsize = 0;
+ if ((rc = mdb_fsize(env->me_fd, &fsize)))
+ goto leave;
+ if (w3 > fsize)
+ w3 = fsize;
+ }
+ wsize = w3 - wsize;
+ while (wsize > 0) {
+ if (wsize > MAX_WRITE)
+ w2 = MAX_WRITE;
+ else
+ w2 = wsize;
+ DO_WRITE(rc, fd, ptr, w2, len);
+ if (!rc) {
+ rc = ErrCode();
+ break;
+ } else if (len > 0) {
+ rc = MDB_SUCCESS;
+ ptr += len;
+ wsize -= len;
+ continue;
+ } else {
+ rc = EIO;
+ break;
+ }
+ }
+
+leave:
+ mdb_txn_abort(txn);
+ return rc;
+}
+
+int ESECT
+mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags)
+{
+ if (flags & MDB_CP_COMPACT)
+ return mdb_env_copyfd1(env, fd);
+ else
+ return mdb_env_copyfd0(env, fd);
+}
+
+int ESECT
+mdb_env_copyfd(MDB_env *env, HANDLE fd)
+{
+ return mdb_env_copyfd2(env, fd, 0);
+}
+
+int ESECT
+mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags)
+{
+ int rc;
+ MDB_name fname;
+ HANDLE newfd = INVALID_HANDLE_VALUE;
+
+ rc = mdb_fname_init(path, env->me_flags | MDB_NOLOCK, &fname);
+ if (rc == MDB_SUCCESS) {
+ rc = mdb_fopen(env, &fname, MDB_O_COPY, 0666, &newfd);
+ mdb_fname_destroy(fname);
+ }
+ if (rc == MDB_SUCCESS) {
+ rc = mdb_env_copyfd2(env, newfd, flags);
+ if (close(newfd) < 0 && rc == MDB_SUCCESS)
+ rc = ErrCode();
+ }
+ return rc;
+}
+
+int ESECT
+mdb_env_copy(MDB_env *env, const char *path)
+{
+ return mdb_env_copy2(env, path, 0);
+}
+
+int ESECT
+mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff)
+{
+ if (flag & ~CHANGEABLE)
+ return EINVAL;
+ if (onoff)
+ env->me_flags |= flag;
+ else
+ env->me_flags &= ~flag;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_get_flags(MDB_env *env, unsigned int *arg)
+{
+ if (!env || !arg)
+ return EINVAL;
+
+ *arg = env->me_flags & (CHANGEABLE|CHANGELESS);
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_set_userctx(MDB_env *env, void *ctx)
+{
+ if (!env)
+ return EINVAL;
+ env->me_userctx = ctx;
+ return MDB_SUCCESS;
+}
+
+void * ESECT
+mdb_env_get_userctx(MDB_env *env)
+{
+ return env ? env->me_userctx : NULL;
+}
+
+int ESECT
+mdb_env_set_assert(MDB_env *env, MDB_assert_func *func)
+{
+ if (!env)
+ return EINVAL;
+#ifndef NDEBUG
+ env->me_assert_func = func;
+#endif
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_get_path(MDB_env *env, const char **arg)
+{
+ if (!env || !arg)
+ return EINVAL;
+
+ *arg = env->me_path;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg)
+{
+ if (!env || !arg)
+ return EINVAL;
+
+ *arg = env->me_fd;
+ return MDB_SUCCESS;
+}
+
+/** Common code for #mdb_stat() and #mdb_env_stat().
+ * @param[in] env the environment to operate in.
+ * @param[in] db the #MDB_db record containing the stats to return.
+ * @param[out] arg the address of an #MDB_stat structure to receive the stats.
+ * @return 0, this function always succeeds.
+ */
+static int ESECT
+mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg)
+{
+ arg->ms_psize = env->me_psize;
+ arg->ms_depth = db->md_depth;
+ arg->ms_branch_pages = db->md_branch_pages;
+ arg->ms_leaf_pages = db->md_leaf_pages;
+ arg->ms_overflow_pages = db->md_overflow_pages;
+ arg->ms_entries = db->md_entries;
+
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_stat(MDB_env *env, MDB_stat *arg)
+{
+ MDB_meta *meta;
+
+ if (env == NULL || arg == NULL)
+ return EINVAL;
+
+ meta = mdb_env_pick_meta(env);
+
+ return mdb_stat0(env, &meta->mm_dbs[MAIN_DBI], arg);
+}
+
+int ESECT
+mdb_env_info(MDB_env *env, MDB_envinfo *arg)
+{
+ MDB_meta *meta;
+
+ if (env == NULL || arg == NULL)
+ return EINVAL;
+
+ meta = mdb_env_pick_meta(env);
+ arg->me_mapaddr = meta->mm_address;
+ arg->me_last_pgno = meta->mm_last_pg;
+ arg->me_last_txnid = meta->mm_txnid;
+
+ arg->me_mapsize = env->me_mapsize;
+ arg->me_maxreaders = env->me_maxreaders;
+ arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : 0;
+ return MDB_SUCCESS;
+}
+
+/** Set the default comparison functions for a database.
+ * Called immediately after a database is opened to set the defaults.
+ * The user can then override them with #mdb_set_compare() or
+ * #mdb_set_dupsort().
+ * @param[in] txn A transaction handle returned by #mdb_txn_begin()
+ * @param[in] dbi A database handle returned by #mdb_dbi_open()
+ */
+static void
+mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi)
+{
+ uint16_t f = txn->mt_dbs[dbi].md_flags;
+
+ txn->mt_dbxs[dbi].md_cmp =
+ (f & MDB_REVERSEKEY) ? mdb_cmp_memnr :
+ (f & MDB_INTEGERKEY) ? mdb_cmp_cint : mdb_cmp_memn;
+
+ txn->mt_dbxs[dbi].md_dcmp =
+ !(f & MDB_DUPSORT) ? 0 :
+ ((f & MDB_INTEGERDUP)
+ ? ((f & MDB_DUPFIXED) ? mdb_cmp_int : mdb_cmp_cint)
+ : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn));
+}
+
+int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi)
+{
+ MDB_val key, data;
+ MDB_dbi i;
+ MDB_cursor mc;
+ MDB_db dummy;
+ int rc, dbflag, exact;
+ unsigned int unused = 0, seq;
+ char *namedup;
+ size_t len;
+
+ if (flags & ~VALID_FLAGS)
+ return EINVAL;
+ if (txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ /* main DB? */
+ if (!name) {
+ *dbi = MAIN_DBI;
+ if (flags & PERSISTENT_FLAGS) {
+ uint16_t f2 = flags & PERSISTENT_FLAGS;
+ /* make sure flag changes get committed */
+ if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) {
+ txn->mt_dbs[MAIN_DBI].md_flags |= f2;
+ txn->mt_flags |= MDB_TXN_DIRTY;
+ }
+ }
+ mdb_default_cmp(txn, MAIN_DBI);
+ return MDB_SUCCESS;
+ }
+
+ if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) {
+ mdb_default_cmp(txn, MAIN_DBI);
+ }
+
+ /* Is the DB already open? */
+ len = strlen(name);
+ for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
+ if (!txn->mt_dbxs[i].md_name.mv_size) {
+ /* Remember this free slot */
+ if (!unused) unused = i;
+ continue;
+ }
+ if (len == txn->mt_dbxs[i].md_name.mv_size &&
+ !strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) {
+ *dbi = i;
+ return MDB_SUCCESS;
+ }
+ }
+
+ /* If no free slot and max hit, fail */
+ if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs)
+ return MDB_DBS_FULL;
+
+ /* Cannot mix named databases with some mainDB flags */
+ if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY))
+ return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND;
+
+ /* Find the DB info */
+ dbflag = DB_NEW|DB_VALID|DB_USRVALID;
+ exact = 0;
+ key.mv_size = len;
+ key.mv_data = (void *)name;
+ mdb_cursor_init(&mc, txn, MAIN_DBI, NULL);
+ rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact);
+ if (rc == MDB_SUCCESS) {
+ /* make sure this is actually a DB */
+ MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]);
+ if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA)
+ return MDB_INCOMPATIBLE;
+ } else {
+ if (rc != MDB_NOTFOUND || !(flags & MDB_CREATE))
+ return rc;
+ if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
+ return EACCES;
+ }
+
+ /* Done here so we cannot fail after creating a new DB */
+ if ((namedup = strdup(name)) == NULL)
+ return ENOMEM;
+
+ if (rc) {
+ /* MDB_NOTFOUND and MDB_CREATE: Create new DB */
+ data.mv_size = sizeof(MDB_db);
+ data.mv_data = &dummy;
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.md_root = P_INVALID;
+ dummy.md_flags = flags & PERSISTENT_FLAGS;
+ WITH_CURSOR_TRACKING(mc,
+ rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA));
+ dbflag |= DB_DIRTY;
+ }
+
+ if (rc) {
+ free(namedup);
+ } else {
+ /* Got info, register DBI in this txn */
+ unsigned int slot = unused ? unused : txn->mt_numdbs;
+ txn->mt_dbxs[slot].md_name.mv_data = namedup;
+ txn->mt_dbxs[slot].md_name.mv_size = len;
+ txn->mt_dbxs[slot].md_rel = NULL;
+ txn->mt_dbflags[slot] = dbflag;
+ /* txn-> and env-> are the same in read txns, use
+ * tmp variable to avoid undefined assignment
+ */
+ seq = ++txn->mt_env->me_dbiseqs[slot];
+ txn->mt_dbiseqs[slot] = seq;
+
+ memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db));
+ *dbi = slot;
+ mdb_default_cmp(txn, slot);
+ if (!unused) {
+ txn->mt_numdbs++;
+ }
+ }
+
+ return rc;
+}
+
+int ESECT
+mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg)
+{
+ if (!arg || !TXN_DBI_EXIST(txn, dbi, DB_VALID))
+ return EINVAL;
+
+ if (txn->mt_flags & MDB_TXN_BLOCKED)
+ return MDB_BAD_TXN;
+
+ if (txn->mt_dbflags[dbi] & DB_STALE) {
+ MDB_cursor mc;
+ MDB_xcursor mx;
+ /* Stale, must read the DB's root. cursor_init does it for us. */
+ mdb_cursor_init(&mc, txn, dbi, &mx);
+ }
+ return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg);
+}
+
+void mdb_dbi_close(MDB_env *env, MDB_dbi dbi)
+{
+ char *ptr;
+ if (dbi < CORE_DBS || dbi >= env->me_maxdbs)
+ return;
+ ptr = env->me_dbxs[dbi].md_name.mv_data;
+ /* If there was no name, this was already closed */
+ if (ptr) {
+ env->me_dbxs[dbi].md_name.mv_data = NULL;
+ env->me_dbxs[dbi].md_name.mv_size = 0;
+ env->me_dbflags[dbi] = 0;
+ env->me_dbiseqs[dbi]++;
+ free(ptr);
+ }
+}
+
+int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags)
+{
+ /* We could return the flags for the FREE_DBI too but what's the point? */
+ if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+ *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS;
+ return MDB_SUCCESS;
+}
+
+/** Add all the DB's pages to the free list.
+ * @param[in] mc Cursor on the DB to free.
+ * @param[in] subs non-Zero to check for sub-DBs in this DB.
+ * @return 0 on success, non-zero on failure.
+ */
+static int
+mdb_drop0(MDB_cursor *mc, int subs)
+{
+ int rc;
+
+ rc = mdb_page_search(mc, NULL, MDB_PS_FIRST);
+ if (rc == MDB_SUCCESS) {
+ MDB_txn *txn = mc->mc_txn;
+ MDB_node *ni;
+ MDB_cursor mx;
+ unsigned int i;
+
+ /* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves.
+ * This also avoids any P_LEAF2 pages, which have no nodes.
+ * Also if the DB doesn't have sub-DBs and has no overflow
+ * pages, omit scanning leaves.
+ */
+ if ((mc->mc_flags & C_SUB) ||
+ (!subs && !mc->mc_db->md_overflow_pages))
+ mdb_cursor_pop(mc);
+
+ mdb_cursor_copy(mc, &mx);
+ while (mc->mc_snum > 0) {
+ MDB_page *mp = mc->mc_pg[mc->mc_top];
+ unsigned n = NUMKEYS(mp);
+ if (IS_LEAF(mp)) {
+ for (i=0; i<n; i++) {
+ ni = NODEPTR(mp, i);
+ if (ni->mn_flags & F_BIGDATA) {
+ MDB_page *omp;
+ pgno_t pg;
+ memcpy(&pg, NODEDATA(ni), sizeof(pg));
+ rc = mdb_page_get(mc, pg, &omp, NULL);
+ if (rc != 0)
+ goto done;
+ mdb_cassert(mc, IS_OVERFLOW(omp));
+ rc = mdb_midl_append_range(&txn->mt_free_pgs,
+ pg, omp->mp_pages);
+ if (rc)
+ goto done;
+ mc->mc_db->md_overflow_pages -= omp->mp_pages;
+ if (!mc->mc_db->md_overflow_pages && !subs)
+ break;
+ } else if (subs && (ni->mn_flags & F_SUBDATA)) {
+ mdb_xcursor_init1(mc, ni);
+ rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0);
+ if (rc)
+ goto done;
+ }
+ }
+ if (!subs && !mc->mc_db->md_overflow_pages)
+ goto pop;
+ } else {
+ if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0)
+ goto done;
+ for (i=0; i<n; i++) {
+ pgno_t pg;
+ ni = NODEPTR(mp, i);
+ pg = NODEPGNO(ni);
+ /* free it */
+ mdb_midl_xappend(txn->mt_free_pgs, pg);
+ }
+ }
+ if (!mc->mc_top)
+ break;
+ mc->mc_ki[mc->mc_top] = i;
+ rc = mdb_cursor_sibling(mc, 1);
+ if (rc) {
+ if (rc != MDB_NOTFOUND)
+ goto done;
+ /* no more siblings, go back to beginning
+ * of previous level.
+ */
+pop:
+ mdb_cursor_pop(mc);
+ mc->mc_ki[0] = 0;
+ for (i=1; i<mc->mc_snum; i++) {
+ mc->mc_ki[i] = 0;
+ mc->mc_pg[i] = mx.mc_pg[i];
+ }
+ }
+ }
+ /* free it */
+ rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root);
+done:
+ if (rc)
+ txn->mt_flags |= MDB_TXN_ERROR;
+ } else if (rc == MDB_NOTFOUND) {
+ rc = MDB_SUCCESS;
+ }
+ mc->mc_flags &= ~C_INITIALIZED;
+ return rc;
+}
+
+int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del)
+{
+ MDB_cursor *mc, *m2;
+ int rc;
+
+ if ((unsigned)del > 1 || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
+ return EACCES;
+
+ if (TXN_DBI_CHANGED(txn, dbi))
+ return MDB_BAD_DBI;
+
+ rc = mdb_cursor_open(txn, dbi, &mc);
+ if (rc)
+ return rc;
+
+ rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT);
+ /* Invalidate the dropped DB's cursors */
+ for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next)
+ m2->mc_flags &= ~(C_INITIALIZED|C_EOF);
+ if (rc)
+ goto leave;
+
+ /* Can't delete the main DB */
+ if (del && dbi >= CORE_DBS) {
+ rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA);
+ if (!rc) {
+ txn->mt_dbflags[dbi] = DB_STALE;
+ mdb_dbi_close(txn->mt_env, dbi);
+ } else {
+ txn->mt_flags |= MDB_TXN_ERROR;
+ }
+ } else {
+ /* reset the DB record, mark it dirty */
+ txn->mt_dbflags[dbi] |= DB_DIRTY;
+ txn->mt_dbs[dbi].md_depth = 0;
+ txn->mt_dbs[dbi].md_branch_pages = 0;
+ txn->mt_dbs[dbi].md_leaf_pages = 0;
+ txn->mt_dbs[dbi].md_overflow_pages = 0;
+ txn->mt_dbs[dbi].md_entries = 0;
+ txn->mt_dbs[dbi].md_root = P_INVALID;
+
+ txn->mt_flags |= MDB_TXN_DIRTY;
+ }
+leave:
+ mdb_cursor_close(mc);
+ return rc;
+}
+
+int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp)
+{
+ if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ txn->mt_dbxs[dbi].md_cmp = cmp;
+ return MDB_SUCCESS;
+}
+
+int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp)
+{
+ if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ txn->mt_dbxs[dbi].md_dcmp = cmp;
+ return MDB_SUCCESS;
+}
+
+int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel)
+{
+ if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ txn->mt_dbxs[dbi].md_rel = rel;
+ return MDB_SUCCESS;
+}
+
+int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx)
+{
+ if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
+ return EINVAL;
+
+ txn->mt_dbxs[dbi].md_relctx = ctx;
+ return MDB_SUCCESS;
+}
+
+int ESECT
+mdb_env_get_maxkeysize(MDB_env *env)
+{
+ return ENV_MAXKEY(env);
+}
+
+int ESECT
+mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx)
+{
+ unsigned int i, rdrs;
+ MDB_reader *mr;
+ char buf[64];
+ int rc = 0, first = 1;
+
+ if (!env || !func)
+ return -1;
+ if (!env->me_txns) {
+ return func("(no reader locks)\n", ctx);
+ }
+ rdrs = env->me_txns->mti_numreaders;
+ mr = env->me_txns->mti_readers;
+ for (i=0; i<rdrs; i++) {
+ if (mr[i].mr_pid) {
+ txnid_t txnid = mr[i].mr_txnid;
+ sprintf(buf, txnid == (txnid_t)-1 ?
+ "%10d %"Z"x -\n" : "%10d %"Z"x %"Z"u\n",
+ (int)mr[i].mr_pid, (size_t)mr[i].mr_tid, txnid);
+ if (first) {
+ first = 0;
+ rc = func(" pid thread txnid\n", ctx);
+ if (rc < 0)
+ break;
+ }
+ rc = func(buf, ctx);
+ if (rc < 0)
+ break;
+ }
+ }
+ if (first) {
+ rc = func("(no active readers)\n", ctx);
+ }
+ return rc;
+}
+
+/** Insert pid into list if not already present.
+ * return -1 if already present.
+ */
+static int ESECT
+mdb_pid_insert(MDB_PID_T *ids, MDB_PID_T pid)
+{
+ /* binary search of pid in list */
+ unsigned base = 0;
+ unsigned cursor = 1;
+ int val = 0;
+ unsigned n = ids[0];
+
+ while( 0 < n ) {
+ unsigned pivot = n >> 1;
+ cursor = base + pivot + 1;
+ val = pid - ids[cursor];
+
+ if( val < 0 ) {
+ n = pivot;
+
+ } else if ( val > 0 ) {
+ base = cursor;
+ n -= pivot + 1;
+
+ } else {
+ /* found, so it's a duplicate */
+ return -1;
+ }
+ }
+
+ if( val > 0 ) {
+ ++cursor;
+ }
+ ids[0]++;
+ for (n = ids[0]; n > cursor; n--)
+ ids[n] = ids[n-1];
+ ids[n] = pid;
+ return 0;
+}
+
+int ESECT
+mdb_reader_check(MDB_env *env, int *dead)
+{
+ if (!env)
+ return EINVAL;
+ if (dead)
+ *dead = 0;
+ return env->me_txns ? mdb_reader_check0(env, 0, dead) : MDB_SUCCESS;
+}
+
+/** As #mdb_reader_check(). \b rlocked is set if caller locked #me_rmutex. */
+static int ESECT
+mdb_reader_check0(MDB_env *env, int rlocked, int *dead)
+{
+ mdb_mutexref_t rmutex = rlocked ? NULL : env->me_rmutex;
+ unsigned int i, j, rdrs;
+ MDB_reader *mr;
+ MDB_PID_T *pids, pid;
+ int rc = MDB_SUCCESS, count = 0;
+
+ rdrs = env->me_txns->mti_numreaders;
+ pids = malloc((rdrs+1) * sizeof(MDB_PID_T));
+ if (!pids)
+ return ENOMEM;
+ pids[0] = 0;
+ mr = env->me_txns->mti_readers;
+ for (i=0; i<rdrs; i++) {
+ pid = mr[i].mr_pid;
+ if (pid && pid != env->me_pid) {
+ if (mdb_pid_insert(pids, pid) == 0) {
+ if (!mdb_reader_pid(env, Pidcheck, pid)) {
+ /* Stale reader found */
+ j = i;
+ if (rmutex) {
+ if ((rc = LOCK_MUTEX0(rmutex)) != 0) {
+ if ((rc = mdb_mutex_failed(env, rmutex, rc)))
+ break;
+ rdrs = 0; /* the above checked all readers */
+ } else {
+ /* Recheck, a new process may have reused pid */
+ if (mdb_reader_pid(env, Pidcheck, pid))
+ j = rdrs;
+ }
+ }
+ for (; j<rdrs; j++)
+ if (mr[j].mr_pid == pid) {
+ DPRINTF(("clear stale reader pid %u txn %"Z"d",
+ (unsigned) pid, mr[j].mr_txnid));
+ mr[j].mr_pid = 0;
+ count++;
+ }
+ if (rmutex)
+ UNLOCK_MUTEX(rmutex);
+ }
+ }
+ }
+ }
+ free(pids);
+ if (dead)
+ *dead = count;
+ return rc;
+}
+
+#ifdef MDB_ROBUST_SUPPORTED
+/** Handle #LOCK_MUTEX0() failure.
+ * Try to repair the lock file if the mutex owner died.
+ * @param[in] env the environment handle
+ * @param[in] mutex LOCK_MUTEX0() mutex
+ * @param[in] rc LOCK_MUTEX0() error (nonzero)
+ * @return 0 on success with the mutex locked, or an error code on failure.
+ */
+static int ESECT
+mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc)
+{
+ int rlocked, rc2;
+ MDB_meta *meta;
+
+ if (rc == MDB_OWNERDEAD) {
+ /* We own the mutex. Clean up after dead previous owner. */
+ rc = MDB_SUCCESS;
+ rlocked = (mutex == env->me_rmutex);
+ if (!rlocked) {
+ /* Keep mti_txnid updated, otherwise next writer can
+ * overwrite data which latest meta page refers to.
+ */
+ meta = mdb_env_pick_meta(env);
+ env->me_txns->mti_txnid = meta->mm_txnid;
+ /* env is hosed if the dead thread was ours */
+ if (env->me_txn) {
+ env->me_flags |= MDB_FATAL_ERROR;
+ env->me_txn = NULL;
+ rc = MDB_PANIC;
+ }
+ }
+ DPRINTF(("%cmutex owner died, %s", (rlocked ? 'r' : 'w'),
+ (rc ? "this process' env is hosed" : "recovering")));
+ rc2 = mdb_reader_check0(env, rlocked, NULL);
+ if (rc2 == 0)
+ rc2 = mdb_mutex_consistent(mutex);
+ if (rc || (rc = rc2)) {
+ DPRINTF(("LOCK_MUTEX recovery failed, %s", mdb_strerror(rc)));
+ UNLOCK_MUTEX(mutex);
+ }
+ } else {
+#ifdef _WIN32
+ rc = ErrCode();
+#endif
+ DPRINTF(("LOCK_MUTEX failed, %s", mdb_strerror(rc)));
+ }
+
+ return rc;
+}
+#endif /* MDB_ROBUST_SUPPORTED */
+
+#if defined(_WIN32)
+/** Convert \b src to new wchar_t[] string with room for \b xtra extra chars */
+static int ESECT
+utf8_to_utf16(const char *src, MDB_name *dst, int xtra)
+{
+ int rc, need = 0;
+ wchar_t *result = NULL;
+ for (;;) { /* malloc result, then fill it in */
+ need = MultiByteToWideChar(CP_UTF8, 0, src, -1, result, need);
+ if (!need) {
+ rc = ErrCode();
+ free(result);
+ return rc;
+ }
+ if (!result) {
+ result = malloc(sizeof(wchar_t) * (need + xtra));
+ if (!result)
+ return ENOMEM;
+ continue;
+ }
+ dst->mn_alloced = 1;
+ dst->mn_len = need - 1;
+ dst->mn_val = result;
+ return MDB_SUCCESS;
+ }
+}
+#endif /* defined(_WIN32) */
+/** @} */
diff --git a/nostrdb/memchr.h b/nostrdb/memchr.h
@@ -0,0 +1,72 @@
+
+
+#ifndef FAST_MEMCHR_H
+#define FAST_MEMCHR_H
+
+#include <string.h>
+
+#ifdef __ARM_NEON
+#define vector_strchr neon_strchr
+#else
+#define vector_strchr native_memchr
+#endif
+
+#ifdef __ARM_NEON
+#include <arm_neon.h>
+static const char *neon_strchr(const char *str, char c, size_t length) {
+ const char* end = str + length;
+
+ // Alignment handling
+ while (str < end && ((size_t)str & 0xF)) {
+ if (*str == c)
+ return str;
+ ++str;
+ }
+
+ uint8x16_t searchChar = vdupq_n_u8(c);
+
+ while (str + 16 <= end) {
+ uint8x16_t chunk = vld1q_u8((const uint8_t*)str);
+ uint8x16_t comparison = vceqq_u8(chunk, searchChar);
+
+ // Check first 64 bits
+ uint64_t result0 =
+ vgetq_lane_u64(vreinterpretq_u64_u8(comparison), 0);
+
+ if (result0)
+ return str + __builtin_ctzll(result0)/8;
+
+ // Check second 64 bits
+ uint64_t result1 = vgetq_lane_u64(vreinterpretq_u64_u8(comparison), 1);
+ if (result1)
+ return str + 8 + __builtin_ctzll(result1)/8;
+
+ str += 16;
+ }
+
+ // Handle remaining unaligned characters
+ for (; str < end; ++str) {
+ if (*str == c)
+ return str;
+ }
+
+ return NULL;
+}
+#endif
+
+static inline const char *native_memchr(const char *str, char c, size_t length) {
+ const void *result = memchr(str, c, length);
+ return (const char *) result;
+}
+
+static inline const char *fast_strchr(const char *str, char c, size_t length)
+{
+ if (length >= 16) {
+ return vector_strchr(str, c, length);
+ }
+
+ return native_memchr(str, c, length);
+}
+
+
+#endif // FAST_MEMCHR_H
diff --git a/nostrdb/midl.c b/nostrdb/midl.c
@@ -0,0 +1,359 @@
+/** @file midl.c
+ * @brief ldap bdb back-end ID List functions */
+/* $OpenLDAP$ */
+/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
+ *
+ * Copyright 2000-2021 The OpenLDAP Foundation.
+ * Portions Copyright 2001-2021 Howard Chu, Symas Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted only as authorized by the OpenLDAP
+ * Public License.
+ *
+ * A copy of this license is available in the file LICENSE in the
+ * top-level directory of the distribution or, alternatively, at
+ * <http://www.OpenLDAP.org/license.html>.
+ */
+
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include "midl.h"
+
+/** @defgroup internal LMDB Internals
+ * @{
+ */
+/** @defgroup idls ID List Management
+ * @{
+ */
+#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) )
+
+unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
+{
+ /*
+ * binary search of id in ids
+ * if found, returns position of id
+ * if not found, returns first position greater than id
+ */
+ unsigned base = 0;
+ unsigned cursor = 1;
+ int val = 0;
+ unsigned n = ids[0];
+
+ while( 0 < n ) {
+ unsigned pivot = n >> 1;
+ cursor = base + pivot + 1;
+ val = CMP( ids[cursor], id );
+
+ if( val < 0 ) {
+ n = pivot;
+
+ } else if ( val > 0 ) {
+ base = cursor;
+ n -= pivot + 1;
+
+ } else {
+ return cursor;
+ }
+ }
+
+ if( val > 0 ) {
+ ++cursor;
+ }
+ return cursor;
+}
+
+#if 0 /* superseded by append/sort */
+int mdb_midl_insert( MDB_IDL ids, MDB_ID id )
+{
+ unsigned x, i;
+
+ x = mdb_midl_search( ids, id );
+ assert( x > 0 );
+
+ if( x < 1 ) {
+ /* internal error */
+ return -2;
+ }
+
+ if ( x <= ids[0] && ids[x] == id ) {
+ /* duplicate */
+ assert(0);
+ return -1;
+ }
+
+ if ( ++ids[0] >= MDB_IDL_DB_MAX ) {
+ /* no room */
+ --ids[0];
+ return -2;
+
+ } else {
+ /* insert id */
+ for (i=ids[0]; i>x; i--)
+ ids[i] = ids[i-1];
+ ids[x] = id;
+ }
+
+ return 0;
+}
+#endif
+
+MDB_IDL mdb_midl_alloc(int num)
+{
+ MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID));
+ if (ids) {
+ *ids++ = num;
+ *ids = 0;
+ }
+ return ids;
+}
+
+void mdb_midl_free(MDB_IDL ids)
+{
+ if (ids)
+ free(ids-1);
+}
+
+void mdb_midl_shrink( MDB_IDL *idp )
+{
+ MDB_IDL ids = *idp;
+ if (*(--ids) > MDB_IDL_UM_MAX &&
+ (ids = realloc(ids, (MDB_IDL_UM_MAX+2) * sizeof(MDB_ID))))
+ {
+ *ids++ = MDB_IDL_UM_MAX;
+ *idp = ids;
+ }
+}
+
+static int mdb_midl_grow( MDB_IDL *idp, int num )
+{
+ MDB_IDL idn = *idp-1;
+ /* grow it */
+ idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID));
+ if (!idn)
+ return ENOMEM;
+ *idn++ += num;
+ *idp = idn;
+ return 0;
+}
+
+int mdb_midl_need( MDB_IDL *idp, unsigned num )
+{
+ MDB_IDL ids = *idp;
+ num += ids[0];
+ if (num > ids[-1]) {
+ num = (num + num/4 + (256 + 2)) & -256;
+ if (!(ids = realloc(ids-1, num * sizeof(MDB_ID))))
+ return ENOMEM;
+ *ids++ = num - 2;
+ *idp = ids;
+ }
+ return 0;
+}
+
+int mdb_midl_append( MDB_IDL *idp, MDB_ID id )
+{
+ MDB_IDL ids = *idp;
+ /* Too big? */
+ if (ids[0] >= ids[-1]) {
+ if (mdb_midl_grow(idp, MDB_IDL_UM_MAX))
+ return ENOMEM;
+ ids = *idp;
+ }
+ ids[0]++;
+ ids[ids[0]] = id;
+ return 0;
+}
+
+int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app )
+{
+ MDB_IDL ids = *idp;
+ /* Too big? */
+ if (ids[0] + app[0] >= ids[-1]) {
+ if (mdb_midl_grow(idp, app[0]))
+ return ENOMEM;
+ ids = *idp;
+ }
+ memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID));
+ ids[0] += app[0];
+ return 0;
+}
+
+int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n )
+{
+ MDB_ID *ids = *idp, len = ids[0];
+ /* Too big? */
+ if (len + n > ids[-1]) {
+ if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX))
+ return ENOMEM;
+ ids = *idp;
+ }
+ ids[0] = len + n;
+ ids += len;
+ while (n)
+ ids[n--] = id++;
+ return 0;
+}
+
+void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge )
+{
+ MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k;
+ idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */
+ old_id = idl[j];
+ while (i) {
+ merge_id = merge[i--];
+ for (; old_id < merge_id; old_id = idl[--j])
+ idl[k--] = old_id;
+ idl[k--] = merge_id;
+ }
+ idl[0] = total;
+}
+
+/* Quicksort + Insertion sort for small arrays */
+
+#define SMALL 8
+#define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; }
+
+void
+mdb_midl_sort( MDB_IDL ids )
+{
+ /* Max possible depth of int-indexed tree * 2 items/level */
+ int istack[sizeof(int)*CHAR_BIT * 2];
+ int i,j,k,l,ir,jstack;
+ MDB_ID a, itmp;
+
+ ir = (int)ids[0];
+ l = 1;
+ jstack = 0;
+ for(;;) {
+ if (ir - l < SMALL) { /* Insertion sort */
+ for (j=l+1;j<=ir;j++) {
+ a = ids[j];
+ for (i=j-1;i>=1;i--) {
+ if (ids[i] >= a) break;
+ ids[i+1] = ids[i];
+ }
+ ids[i+1] = a;
+ }
+ if (jstack == 0) break;
+ ir = istack[jstack--];
+ l = istack[jstack--];
+ } else {
+ k = (l + ir) >> 1; /* Choose median of left, center, right */
+ MIDL_SWAP(ids[k], ids[l+1]);
+ if (ids[l] < ids[ir]) {
+ MIDL_SWAP(ids[l], ids[ir]);
+ }
+ if (ids[l+1] < ids[ir]) {
+ MIDL_SWAP(ids[l+1], ids[ir]);
+ }
+ if (ids[l] < ids[l+1]) {
+ MIDL_SWAP(ids[l], ids[l+1]);
+ }
+ i = l+1;
+ j = ir;
+ a = ids[l+1];
+ for(;;) {
+ do i++; while(ids[i] > a);
+ do j--; while(ids[j] < a);
+ if (j < i) break;
+ MIDL_SWAP(ids[i],ids[j]);
+ }
+ ids[l+1] = ids[j];
+ ids[j] = a;
+ jstack += 2;
+ if (ir-i+1 >= j-l) {
+ istack[jstack] = ir;
+ istack[jstack-1] = i;
+ ir = j-1;
+ } else {
+ istack[jstack] = j-1;
+ istack[jstack-1] = l;
+ l = i;
+ }
+ }
+ }
+}
+
+unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id )
+{
+ /*
+ * binary search of id in ids
+ * if found, returns position of id
+ * if not found, returns first position greater than id
+ */
+ unsigned base = 0;
+ unsigned cursor = 1;
+ int val = 0;
+ unsigned n = (unsigned)ids[0].mid;
+
+ while( 0 < n ) {
+ unsigned pivot = n >> 1;
+ cursor = base + pivot + 1;
+ val = CMP( id, ids[cursor].mid );
+
+ if( val < 0 ) {
+ n = pivot;
+
+ } else if ( val > 0 ) {
+ base = cursor;
+ n -= pivot + 1;
+
+ } else {
+ return cursor;
+ }
+ }
+
+ if( val > 0 ) {
+ ++cursor;
+ }
+ return cursor;
+}
+
+int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id )
+{
+ unsigned x, i;
+
+ x = mdb_mid2l_search( ids, id->mid );
+
+ if( x < 1 ) {
+ /* internal error */
+ return -2;
+ }
+
+ if ( x <= ids[0].mid && ids[x].mid == id->mid ) {
+ /* duplicate */
+ return -1;
+ }
+
+ if ( ids[0].mid >= MDB_IDL_UM_MAX ) {
+ /* too big */
+ return -2;
+
+ } else {
+ /* insert id */
+ ids[0].mid++;
+ for (i=(unsigned)ids[0].mid; i>x; i--)
+ ids[i] = ids[i-1];
+ ids[x] = *id;
+ }
+
+ return 0;
+}
+
+int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id )
+{
+ /* Too big? */
+ if (ids[0].mid >= MDB_IDL_UM_MAX) {
+ return -2;
+ }
+ ids[0].mid++;
+ ids[ids[0].mid] = *id;
+ return 0;
+}
+
+/** @} */
+/** @} */
diff --git a/nostrdb/midl.h b/nostrdb/midl.h
@@ -0,0 +1,186 @@
+/** @file midl.h
+ * @brief LMDB ID List header file.
+ *
+ * This file was originally part of back-bdb but has been
+ * modified for use in libmdb. Most of the macros defined
+ * in this file are unused, just left over from the original.
+ *
+ * This file is only used internally in libmdb and its definitions
+ * are not exposed publicly.
+ */
+/* $OpenLDAP$ */
+/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
+ *
+ * Copyright 2000-2021 The OpenLDAP Foundation.
+ * Portions Copyright 2001-2021 Howard Chu, Symas Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted only as authorized by the OpenLDAP
+ * Public License.
+ *
+ * A copy of this license is available in the file LICENSE in the
+ * top-level directory of the distribution or, alternatively, at
+ * <http://www.OpenLDAP.org/license.html>.
+ */
+
+#ifndef _MDB_MIDL_H_
+#define _MDB_MIDL_H_
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @defgroup internal LMDB Internals
+ * @{
+ */
+
+/** @defgroup idls ID List Management
+ * @{
+ */
+ /** A generic unsigned ID number. These were entryIDs in back-bdb.
+ * Preferably it should have the same size as a pointer.
+ */
+typedef size_t MDB_ID;
+
+ /** An IDL is an ID List, a sorted array of IDs. The first
+ * element of the array is a counter for how many actual
+ * IDs are in the list. In the original back-bdb code, IDLs are
+ * sorted in ascending order. For libmdb IDLs are sorted in
+ * descending order.
+ */
+typedef MDB_ID *MDB_IDL;
+
+/* IDL sizes - likely should be even bigger
+ * limiting factors: sizeof(ID), thread stack size
+ */
+#define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */
+#define MDB_IDL_DB_SIZE (1<<MDB_IDL_LOGN)
+#define MDB_IDL_UM_SIZE (1<<(MDB_IDL_LOGN+1))
+
+#define MDB_IDL_DB_MAX (MDB_IDL_DB_SIZE-1)
+#define MDB_IDL_UM_MAX (MDB_IDL_UM_SIZE-1)
+
+#define MDB_IDL_SIZEOF(ids) (((ids)[0]+1) * sizeof(MDB_ID))
+#define MDB_IDL_IS_ZERO(ids) ( (ids)[0] == 0 )
+#define MDB_IDL_CPY( dst, src ) (memcpy( dst, src, MDB_IDL_SIZEOF( src ) ))
+#define MDB_IDL_FIRST( ids ) ( (ids)[1] )
+#define MDB_IDL_LAST( ids ) ( (ids)[(ids)[0]] )
+
+ /** Current max length of an #mdb_midl_alloc()ed IDL */
+#define MDB_IDL_ALLOCLEN( ids ) ( (ids)[-1] )
+
+ /** Append ID to IDL. The IDL must be big enough. */
+#define mdb_midl_xappend(idl, id) do { \
+ MDB_ID *xidl = (idl), xlen = ++(xidl[0]); \
+ xidl[xlen] = (id); \
+ } while (0)
+
+ /** Search for an ID in an IDL.
+ * @param[in] ids The IDL to search.
+ * @param[in] id The ID to search for.
+ * @return The index of the first ID greater than or equal to \b id.
+ */
+unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id );
+
+ /** Allocate an IDL.
+ * Allocates memory for an IDL of the given size.
+ * @return IDL on success, NULL on failure.
+ */
+MDB_IDL mdb_midl_alloc(int num);
+
+ /** Free an IDL.
+ * @param[in] ids The IDL to free.
+ */
+void mdb_midl_free(MDB_IDL ids);
+
+ /** Shrink an IDL.
+ * Return the IDL to the default size if it has grown larger.
+ * @param[in,out] idp Address of the IDL to shrink.
+ */
+void mdb_midl_shrink(MDB_IDL *idp);
+
+ /** Make room for num additional elements in an IDL.
+ * @param[in,out] idp Address of the IDL.
+ * @param[in] num Number of elements to make room for.
+ * @return 0 on success, ENOMEM on failure.
+ */
+int mdb_midl_need(MDB_IDL *idp, unsigned num);
+
+ /** Append an ID onto an IDL.
+ * @param[in,out] idp Address of the IDL to append to.
+ * @param[in] id The ID to append.
+ * @return 0 on success, ENOMEM if the IDL is too large.
+ */
+int mdb_midl_append( MDB_IDL *idp, MDB_ID id );
+
+ /** Append an IDL onto an IDL.
+ * @param[in,out] idp Address of the IDL to append to.
+ * @param[in] app The IDL to append.
+ * @return 0 on success, ENOMEM if the IDL is too large.
+ */
+int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app );
+
+ /** Append an ID range onto an IDL.
+ * @param[in,out] idp Address of the IDL to append to.
+ * @param[in] id The lowest ID to append.
+ * @param[in] n Number of IDs to append.
+ * @return 0 on success, ENOMEM if the IDL is too large.
+ */
+int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n );
+
+ /** Merge an IDL onto an IDL. The destination IDL must be big enough.
+ * @param[in] idl The IDL to merge into.
+ * @param[in] merge The IDL to merge.
+ */
+void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge );
+
+ /** Sort an IDL.
+ * @param[in,out] ids The IDL to sort.
+ */
+void mdb_midl_sort( MDB_IDL ids );
+
+ /** An ID2 is an ID/pointer pair.
+ */
+typedef struct MDB_ID2 {
+ MDB_ID mid; /**< The ID */
+ void *mptr; /**< The pointer */
+} MDB_ID2;
+
+ /** An ID2L is an ID2 List, a sorted array of ID2s.
+ * The first element's \b mid member is a count of how many actual
+ * elements are in the array. The \b mptr member of the first element is unused.
+ * The array is sorted in ascending order by \b mid.
+ */
+typedef MDB_ID2 *MDB_ID2L;
+
+ /** Search for an ID in an ID2L.
+ * @param[in] ids The ID2L to search.
+ * @param[in] id The ID to search for.
+ * @return The index of the first ID2 whose \b mid member is greater than or equal to \b id.
+ */
+unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id );
+
+
+ /** Insert an ID2 into a ID2L.
+ * @param[in,out] ids The ID2L to insert into.
+ * @param[in] id The ID2 to insert.
+ * @return 0 on success, -1 if the ID was already present in the ID2L.
+ */
+int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id );
+
+ /** Append an ID2 into a ID2L.
+ * @param[in,out] ids The ID2L to append into.
+ * @param[in] id The ID2 to append.
+ * @return 0 on success, -2 if the ID2L is too big.
+ */
+int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id );
+
+/** @} */
+/** @} */
+#ifdef __cplusplus
+}
+#endif
+#endif /* _MDB_MIDL_H_ */
diff --git a/nostrdb/nostrdb.c b/nostrdb/nostrdb.c
@@ -5,13 +5,59 @@
#include "cursor.h"
#include "random.h"
#include "sha256.h"
+#include "lmdb.h"
+#include "util.h"
+#include "threadpool.h"
+#include "protected_queue.h"
+#include "memchr.h"
#include <stdlib.h>
#include <limits.h>
+#include <assert.h>
+#include "bindings/c/profile_json_parser.h"
#include "secp256k1.h"
#include "secp256k1_ecdh.h"
#include "secp256k1_schnorrsig.h"
+// the maximum number of things threads pop and push in bulk
+static const int THREAD_QUEUE_BATCH = 4096;
+
+// the maximum size of inbox queues
+static const int DEFAULT_QUEUE_SIZE = 1000000;
+
+
+#define NDB_PARSED_ID (1 << 0)
+#define NDB_PARSED_PUBKEY (1 << 1)
+#define NDB_PARSED_SIG (1 << 2)
+#define NDB_PARSED_CREATED_AT (1 << 3)
+#define NDB_PARSED_KIND (1 << 4)
+#define NDB_PARSED_CONTENT (1 << 5)
+#define NDB_PARSED_TAGS (1 << 6)
+#define NDB_PARSED_ALL (NDB_PARSED_ID|NDB_PARSED_PUBKEY|NDB_PARSED_SIG|NDB_PARSED_CREATED_AT|NDB_PARSED_KIND|NDB_PARSED_CONTENT|NDB_PARSED_TAGS)
+
+
+// controls whether to continue or stop the json parser
+enum ndb_idres {
+ NDB_IDRES_CONT,
+ NDB_IDRES_STOP,
+};
+
+// closure data for the id-detecting ingest controller
+struct ndb_ingest_controller
+{
+ MDB_txn *read_txn;
+ struct ndb_lmdb *lmdb;
+};
+
+enum ndb_dbs {
+ NDB_DB_NOTE,
+ NDB_DB_META,
+ NDB_DB_PROFILE,
+ NDB_DB_NOTE_ID,
+ NDB_DB_PROFILE_PK,
+ NDB_DBS,
+};
+
struct ndb_json_parser {
const char *json;
int json_len;
@@ -22,6 +68,895 @@ struct ndb_json_parser {
int num_tokens;
};
+// useful to pass to threads on its own
+struct ndb_lmdb {
+ MDB_env *env;
+ MDB_dbi dbs[NDB_DBS];
+};
+
+struct ndb_writer {
+ struct ndb_lmdb *lmdb;
+
+ void *queue_buf;
+ int queue_buflen;
+ pthread_t thread_id;
+
+ struct prot_queue inbox;
+};
+
+struct ndb_ingester {
+ struct threadpool tp;
+ struct ndb_writer *writer;
+};
+
+
+struct ndb {
+ struct ndb_lmdb lmdb;
+ struct ndb_ingester ingester;
+ struct ndb_writer writer;
+ // lmdb environ handles, etc
+};
+
+// A clustered key with an id and a timestamp
+struct ndb_tsid {
+ unsigned char id[32];
+ uint64_t timestamp;
+};
+
+/** From LMDB: Compare two items lexically */
+static int mdb_cmp_memn(const MDB_val *a, const MDB_val *b) {
+ int diff;
+ ssize_t len_diff;
+ unsigned int len;
+
+ len = a->mv_size;
+ len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size;
+ if (len_diff > 0) {
+ len = b->mv_size;
+ len_diff = 1;
+ }
+
+ diff = memcmp(a->mv_data, b->mv_data, len);
+ return diff ? diff : len_diff<0 ? -1 : len_diff;
+}
+
+static int ndb_tsid_compare(const MDB_val *a, const MDB_val *b)
+{
+ struct ndb_tsid *tsa, *tsb;
+ MDB_val a2 = *a, b2 = *b;
+ a2.mv_size = sizeof(tsa->id);
+ b2.mv_size = sizeof(tsb->id);
+
+ int cmp = mdb_cmp_memn(&a2, &b2);
+ if (cmp) return cmp;
+
+ tsa = a->mv_data;
+ tsb = b->mv_data;
+
+ if (tsa->timestamp < tsb->timestamp)
+ return -1;
+ else if (tsa->timestamp > tsb->timestamp)
+ return 1;
+ return 0;
+}
+
+static inline void ndb_tsid_low(struct ndb_tsid *key, unsigned char *id)
+{
+ memcpy(key->id, id, 32);
+ key->timestamp = 0;
+}
+
+static inline void ndb_tsid_init(struct ndb_tsid *key, unsigned char *id,
+ uint64_t timestamp)
+{
+ memcpy(key->id, id, 32);
+ key->timestamp = 0;
+}
+
+// useful for range-searching for the latest key with a clustered created_at timen
+static inline void ndb_tsid_high(struct ndb_tsid *key, const unsigned char *id)
+{
+ memcpy(key->id, id, 32);
+ key->timestamp = UINT64_MAX;
+}
+
+enum ndb_ingester_msgtype {
+ NDB_INGEST_EVENT, // write json to the ingester queue for processing
+ NDB_INGEST_QUIT, // kill ingester thread immediately
+};
+
+enum ndb_writer_msgtype {
+ NDB_WRITER_QUIT, // kill thread immediately
+ NDB_WRITER_NOTE, // write a note to the db
+ NDB_WRITER_PROFILE, // write a profile to the db
+};
+
+struct ndb_ingester_event {
+ char *json;
+ int len;
+};
+
+struct ndb_writer_note {
+ struct ndb_note *note;
+ size_t note_len;
+};
+
+struct ndb_writer_profile {
+ struct ndb_writer_note note;
+ void *profile_flatbuf;
+ size_t profile_len;
+};
+
+struct ndb_ingester_msg {
+ enum ndb_ingester_msgtype type;
+ union {
+ struct ndb_ingester_event event;
+ };
+};
+
+struct ndb_writer_msg {
+ enum ndb_writer_msgtype type;
+ union {
+ struct ndb_writer_note note;
+ struct ndb_writer_profile profile;
+ };
+};
+
+int ndb_note_verify(void *ctx, unsigned char pubkey[32], unsigned char id[32],
+ unsigned char sig[64])
+{
+ secp256k1_xonly_pubkey xonly_pubkey;
+ int ok;
+
+ ok = secp256k1_xonly_pubkey_parse((secp256k1_context*)ctx, &xonly_pubkey,
+ pubkey) != 0;
+ if (!ok) return 0;
+
+ ok = secp256k1_schnorrsig_verify((secp256k1_context*)ctx, sig, id, 32,
+ &xonly_pubkey) > 0;
+ if (!ok) return 0;
+
+ return 1;
+}
+
+static inline int ndb_writer_queue_msgs(struct ndb_writer *writer,
+ struct ndb_writer_msg *msgs,
+ int num_msgs)
+{
+ return prot_queue_push_all(&writer->inbox, msgs, num_msgs);
+}
+
+static int ndb_writer_queue_note(struct ndb_writer *writer,
+ struct ndb_note *note, size_t note_len)
+{
+ struct ndb_writer_msg msg;
+ msg.type = NDB_WRITER_NOTE;
+
+ msg.note.note = note;
+ msg.note.note_len = note_len;
+
+ return prot_queue_push(&writer->inbox, &msg);
+}
+
+// get some value based on a clustered id key
+int ndb_get_tsid(MDB_txn *txn, struct ndb_lmdb *lmdb, enum ndb_dbs db,
+ const unsigned char *id, MDB_val *val)
+{
+ MDB_val k, v;
+ MDB_cursor *cur;
+ struct ndb_tsid tsid;
+ int success = 0;
+
+ ndb_tsid_high(&tsid, id);
+ k.mv_data = &tsid;
+ k.mv_size = sizeof(tsid);
+
+ mdb_cursor_open(txn, lmdb->dbs[db], &cur);
+
+ // Position cursor at the next key greater than or equal to the specified key
+ if (mdb_cursor_get(cur, &k, &v, MDB_SET_RANGE)) {
+ // Failed :(. It could be the last element?
+ if (mdb_cursor_get(cur, &k, &v, MDB_LAST))
+ goto cleanup;
+ } else {
+ // if set range worked and our key exists, it should be
+ // the one right before this one
+ if (mdb_cursor_get(cur, &k, &v, MDB_PREV))
+ goto cleanup;
+ }
+
+ if (memcmp(k.mv_data, id, 32) == 0) {
+ *val = v;
+ success = 1;
+ }
+
+cleanup:
+ mdb_cursor_close(cur);
+ return success;
+}
+
+struct ndb_note *ndb_get_note_by_id(struct ndb *ndb, const unsigned char *id)
+{
+ MDB_val k, v;
+ MDB_txn *txn;
+
+ if (mdb_txn_begin(ndb->lmdb.env, 0, 0, &txn)) {
+ ndb_debug("ndb_get_note_by_id: mdb_txn_begin failed\n");
+ return NULL;
+ }
+
+ if (!ndb_get_tsid(txn, &ndb->lmdb, NDB_DB_NOTE_ID, id, &k)) {
+ ndb_debug("ndb_get_note_by_id: ndb_get_tsid failed\n");
+ return NULL;
+ }
+
+ if (mdb_get(txn, ndb->lmdb.dbs[NDB_DB_NOTE], &k, &v)) {
+ ndb_debug("ndb_get_note_by_id: mdb_get note failed\n");
+ return NULL;
+ }
+
+ mdb_txn_abort(txn);
+
+ return (struct ndb_note *)v.mv_data;
+}
+
+static int ndb_has_note(MDB_txn *txn, struct ndb_lmdb *lmdb, const unsigned char *id)
+{
+ MDB_val val;
+
+ if (!ndb_get_tsid(txn, lmdb, NDB_DB_NOTE_ID, id, &val))
+ return 0;
+
+ return 1;
+}
+
+static enum ndb_idres ndb_ingester_json_controller(void *data, const char *hexid)
+{
+ unsigned char id[32];
+ struct ndb_ingest_controller *c = data;
+
+ hex_decode(hexid, 64, id, sizeof(id));
+
+ // let's see if we already have it
+
+ if (!ndb_has_note(c->read_txn, c->lmdb, id))
+ return NDB_IDRES_CONT;
+
+ return NDB_IDRES_STOP;
+}
+
+
+static int ndb_process_profile_note(struct ndb_note *note, void **profile,
+ size_t *profile_len)
+{
+ int res;
+
+ flatcc_builder_t builder;
+ flatcc_json_parser_t json_parser;
+
+ flatcc_builder_init(&builder);
+
+ //printf("parsing profile '%.*s'\n", note->content_length, ndb_note_content(note));
+ res = profile_parse_json(&builder, &json_parser,
+ ndb_note_content(note),
+ note->content_length,
+ flatcc_json_parser_f_skip_unknown);
+
+ if (res != 0) {
+ ndb_debug("profile_parse_json failed %d '%.*s'\n", res,
+ note->content_length, ndb_note_content(note));
+ return 0;
+ }
+
+ *profile = flatcc_builder_finalize_aligned_buffer(&builder, profile_len);
+ return 1;
+}
+
+
+static int ndb_ingester_process_event(secp256k1_context *ctx,
+ struct ndb_ingester *ingester,
+ struct ndb_ingester_event *ev,
+ struct ndb_writer_msg *out,
+ MDB_txn *read_txn
+ )
+{
+ struct ndb_tce tce;
+ struct ndb_note *note;
+ struct ndb_ingest_controller controller;
+ struct ndb_id_cb cb;
+ void *buf, *flatbuf;
+ size_t bufsize, note_size, profile_len;
+
+ // we will use this to check if we already have it in the DB during
+ // ID parsing
+ controller.read_txn = read_txn;
+ controller.lmdb = ingester->writer->lmdb;
+ cb.fn = ndb_ingester_json_controller;
+ cb.data = &controller;
+
+ // since we're going to be passing this allocated note to a different
+ // thread, we can't use thread-local buffers. just allocate a block
+ bufsize = max(ev->len * 8.0, 4096);
+ buf = malloc(bufsize);
+ if (!buf) {
+ ndb_debug("couldn't malloc buf\n");
+ return 0;
+ }
+
+ note_size =
+ ndb_ws_event_from_json(ev->json, ev->len, &tce, buf, bufsize, &cb);
+
+ if (note_size == -42) {
+ // we already have this!
+ //ndb_debug("already have id??\n");
+ goto cleanup;
+ } else if (note_size == 0) {
+ ndb_debug("failed to parse '%.*s'\n", ev->len, ev->json);
+ goto cleanup;
+ }
+
+ //ndb_debug("parsed evtype:%d '%.*s'\n", tce.evtype, ev->len, ev->json);
+
+ switch (tce.evtype) {
+ case NDB_TCE_NOTICE: goto cleanup;
+ case NDB_TCE_EOSE: goto cleanup;
+ case NDB_TCE_OK: goto cleanup;
+ case NDB_TCE_EVENT:
+ note = tce.event.note;
+ if (note != buf) {
+ ndb_debug("note buffer not equal to malloc'd buffer\n");
+ goto cleanup;
+ }
+
+ // Verify! If it's an invalid note we don't need to
+ // bothter writing it to the database
+ if (!ndb_note_verify(ctx, note->pubkey, note->id, note->sig)) {
+ ndb_debug("signature verification failed\n");
+ goto cleanup;
+ }
+
+ // we didn't find anything. let's send it
+ // to the writer thread
+ note = realloc(note, note_size);
+
+ if (note->kind == 0 &&
+ ndb_process_profile_note(note, &flatbuf, &profile_len)) {
+ out->type = NDB_WRITER_PROFILE;
+ out->profile.note.note = note;
+ out->profile.note.note_len = note_size;
+ out->profile.profile_flatbuf = flatbuf;
+ out->profile.profile_len = profile_len;
+ } else {
+ out->type = NDB_WRITER_NOTE;
+ out->note.note = note;
+ out->note.note_len = note_size;
+ }
+
+ // there's nothing left to do with the original json, so free it
+ free(ev->json);
+ return 1;
+ }
+
+cleanup:
+ free(ev->json);
+ free(buf);
+
+ return 0;
+}
+
+static uint64_t ndb_get_last_key(MDB_txn *txn, MDB_dbi db)
+{
+ MDB_cursor *mc;
+ MDB_val key, val;
+
+ if (mdb_cursor_open(txn, db, &mc))
+ return 0;
+
+ if (mdb_cursor_get(mc, &key, &val, MDB_LAST)) {
+ mdb_cursor_close(mc);
+ return 0;
+ }
+
+ mdb_cursor_close(mc);
+
+ assert(key.mv_size == 8);
+ return *((uint64_t*)key.mv_data);
+}
+
+static int ndb_write_profile(struct ndb_lmdb *lmdb, MDB_txn *txn,
+ struct ndb_writer_profile *profile)
+{
+ uint64_t profile_key;
+ struct ndb_tsid tsid;
+ struct ndb_note *note;
+ int rc;
+
+ MDB_val key, val;
+ MDB_dbi profile_db, pk_db;
+
+ note = profile->note.note;
+
+ // get dbs
+ profile_db = lmdb->dbs[NDB_DB_PROFILE];
+ pk_db = lmdb->dbs[NDB_DB_PROFILE_PK];
+
+ // get new key
+ profile_key = ndb_get_last_key(txn, profile_db) + 1;
+
+ // write profile to profile store
+ key.mv_data = &profile_key;
+ key.mv_size = sizeof(profile_key);
+ val.mv_data = profile->profile_flatbuf;
+ val.mv_size = profile->profile_len;
+ //ndb_debug("profile_len %ld\n", profile->profile_len);
+
+ if ((rc = mdb_put(txn, profile_db, &key, &val, 0))) {
+ ndb_debug("write profile to db failed: %s\n", mdb_strerror(rc));
+ return 0;
+ }
+
+ // write profile_pk + created_at index
+ ndb_tsid_init(&tsid, note->pubkey, note->created_at);
+
+ key.mv_data = &tsid;
+ key.mv_size = sizeof(tsid);
+ val.mv_data = &profile_key;
+ val.mv_size = sizeof(profile_key);
+
+ if ((rc = mdb_put(txn, pk_db, &key, &val, 0))) {
+ ndb_debug("write profile_pk(%" PRIu64 ") to db failed: %s\n",
+ profile_key, mdb_strerror(rc));
+ return 0;
+ }
+
+ return 1;
+}
+
+static uint64_t ndb_write_note(struct ndb_lmdb *lmdb, MDB_txn *txn,
+ struct ndb_writer_note *note)
+{
+ int rc;
+ uint64_t note_key;
+ struct ndb_tsid tsid;
+ MDB_dbi note_db, id_db;
+ MDB_val key, val;
+
+ // get dbs
+ note_db = lmdb->dbs[NDB_DB_NOTE];
+ id_db = lmdb->dbs[NDB_DB_NOTE_ID];
+
+ // get new key
+ note_key = ndb_get_last_key(txn, note_db) + 1;
+
+ // write note to event store
+ key.mv_data = ¬e_key;
+ key.mv_size = sizeof(note_key);
+ val.mv_data = note->note;
+ val.mv_size = note->note_len;
+
+ if ((rc = mdb_put(txn, note_db, &key, &val, 0))) {
+ ndb_debug("write note to db failed: %s\n", mdb_strerror(rc));
+ return 0;
+ }
+
+ // write id index key clustered with created_at
+ ndb_tsid_init(&tsid, note->note->id, note->note->created_at);
+
+ key.mv_data = &tsid;
+ key.mv_size = sizeof(tsid);
+ val.mv_data = ¬e_key;
+ val.mv_size = sizeof(note_key);
+
+ if ((rc = mdb_put(txn, id_db, &key, &val, 0))) {
+ ndb_debug("write note id index to db failed: %s\n",
+ mdb_strerror(rc));
+ return 0;
+ }
+
+ return note_key;
+}
+
+static void *ndb_writer_thread(void *data)
+{
+ struct ndb_writer *writer = data;
+ struct ndb_writer_msg msgs[THREAD_QUEUE_BATCH], *msg;
+ int i, popped, done, any_note;
+ MDB_txn *txn;
+
+ done = 0;
+ while (!done) {
+ txn = NULL;
+ popped = prot_queue_pop_all(&writer->inbox, msgs, THREAD_QUEUE_BATCH);
+ ndb_debug("writer popped %d items\n", popped);
+
+ any_note = 0;
+ for (i = 0 ; i < popped; i++) {
+ msg = &msgs[i];
+ switch (msg->type) {
+ case NDB_WRITER_NOTE: any_note = 1; break;
+ case NDB_WRITER_PROFILE: any_note = 1; break;
+ case NDB_WRITER_QUIT: break;
+ }
+ }
+
+ if (any_note && mdb_txn_begin(writer->lmdb->env, NULL, 0, &txn))
+ {
+ fprintf(stderr, "writer thread txn_begin failed");
+ // should definitely not happen unless DB is full
+ // or something ?
+ assert(false);
+ }
+
+ for (i = 0; i < popped; i++) {
+ msg = &msgs[i];
+
+ switch (msg->type) {
+ case NDB_WRITER_QUIT:
+ // quits are handled before this
+ done = 1;
+ continue;
+ case NDB_WRITER_PROFILE:
+ ndb_write_note(writer->lmdb, txn, &msg->note);
+ // TODO: save note_key with profile
+ ndb_write_profile(writer->lmdb, txn, &msg->profile);
+ break;
+ case NDB_WRITER_NOTE:
+ ndb_write_note(writer->lmdb, txn, &msg->note);
+ break;
+ }
+ }
+
+ // commit writes
+ if (any_note && mdb_txn_commit(txn)) {
+ fprintf(stderr, "writer thread txn commit failed");
+ assert(false);
+ }
+
+
+ // free notes
+ for (i = 0; i < popped; i++) {
+ msg = &msgs[i];
+ if (msg->type == NDB_WRITER_NOTE)
+ free(msg->note.note);
+ else if (msg->type == NDB_WRITER_PROFILE) {
+ free(msg->profile.profile_flatbuf);
+ free(msg->profile.note.note);
+ }
+ }
+ }
+
+ ndb_debug("quitting writer thread\n");
+ return NULL;
+}
+
+static void *ndb_ingester_thread(void *data)
+{
+ secp256k1_context *ctx;
+ struct thread *thread = data;
+ struct ndb_ingester *ingester = (struct ndb_ingester *)thread->ctx;
+ struct ndb_lmdb *lmdb = ingester->writer->lmdb;
+ struct ndb_ingester_msg msgs[THREAD_QUEUE_BATCH], *msg;
+ struct ndb_writer_msg outs[THREAD_QUEUE_BATCH], *out;
+ int i, to_write, popped, done, any_event;
+ MDB_txn *read_txn = NULL;
+
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ ndb_debug("started ingester thread\n");
+
+ done = 0;
+ while (!done) {
+ to_write = 0;
+ any_event = 0;
+
+ popped = prot_queue_pop_all(&thread->inbox, msgs, THREAD_QUEUE_BATCH);
+ ndb_debug("ingester popped %d items\n", popped);
+
+ for (i = 0; i < popped; i++) {
+ msg = &msgs[i];
+ if (msg->type == NDB_INGEST_EVENT) {
+ any_event = 1;
+ break;
+ }
+ }
+
+ if (any_event)
+ mdb_txn_begin(lmdb->env, NULL, MDB_RDONLY, &read_txn);
+
+ for (i = 0; i < popped; i++) {
+ msg = &msgs[i];
+ switch (msg->type) {
+ case NDB_INGEST_QUIT:
+ done = 1;
+ break;
+
+ case NDB_INGEST_EVENT:
+ out = &outs[to_write];
+ if (ndb_ingester_process_event(ctx, ingester,
+ &msg->event, out,
+ read_txn)) {
+ to_write++;
+ }
+ }
+ }
+
+ if (any_event)
+ mdb_txn_abort(read_txn);
+
+ if (to_write > 0) {
+ //ndb_debug("pushing %d events to write queue\n", to_write);
+ if (!ndb_writer_queue_msgs(ingester->writer, outs, to_write)) {
+ ndb_debug("failed pushing %d events to write queue\n", to_write);
+ }
+ }
+ }
+
+ ndb_debug("quitting ingester thread\n");
+ secp256k1_context_destroy(ctx);
+ return NULL;
+}
+
+
+static int ndb_writer_init(struct ndb_writer *writer, struct ndb_lmdb *lmdb)
+{
+ writer->lmdb = lmdb;
+ writer->queue_buflen = sizeof(struct ndb_writer_msg) * DEFAULT_QUEUE_SIZE;
+ writer->queue_buf = malloc(writer->queue_buflen);
+ if (writer->queue_buf == NULL) {
+ fprintf(stderr, "ndb: failed to allocate space for writer queue");
+ return 0;
+ }
+
+ // init the writer queue.
+ prot_queue_init(&writer->inbox, writer->queue_buf,
+ writer->queue_buflen, sizeof(struct ndb_writer_msg));
+
+ // spin up the writer thread
+ if (pthread_create(&writer->thread_id, NULL, ndb_writer_thread, writer))
+ {
+ fprintf(stderr, "ndb writer thread failed to create\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+// initialize the ingester queue and then spawn the thread
+static int ndb_ingester_init(struct ndb_ingester *ingester,
+ struct ndb_writer *writer, int num_threads)
+{
+ int elem_size, num_elems;
+ static struct ndb_ingester_msg quit_msg = { .type = NDB_INGEST_QUIT };
+
+ // TODO: configurable queue sizes
+ elem_size = sizeof(struct ndb_ingester_msg);
+ num_elems = DEFAULT_QUEUE_SIZE;
+
+ ingester->writer = writer;
+
+ if (!threadpool_init(&ingester->tp, num_threads, elem_size, num_elems,
+ &quit_msg, ingester, ndb_ingester_thread))
+ {
+ fprintf(stderr, "ndb ingester threadpool failed to init\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ndb_writer_destroy(struct ndb_writer *writer)
+{
+ struct ndb_writer_msg msg;
+
+ // kill thread
+ msg.type = NDB_WRITER_QUIT;
+ if (!prot_queue_push(&writer->inbox, &msg)) {
+ // queue is too full to push quit message. just kill it.
+ pthread_exit(&writer->thread_id);
+ } else {
+ pthread_join(writer->thread_id, NULL);
+ }
+
+ // cleanup
+ prot_queue_destroy(&writer->inbox);
+
+ free(writer->queue_buf);
+
+ return 1;
+}
+
+static int ndb_ingester_destroy(struct ndb_ingester *ingester)
+{
+ threadpool_destroy(&ingester->tp);
+ return 1;
+}
+
+static int ndb_ingester_queue_event(struct ndb_ingester *ingester,
+ char *json, int len)
+{
+ struct ndb_ingester_msg msg;
+ msg.type = NDB_INGEST_EVENT;
+
+ msg.event.json = json;
+ msg.event.len = len;
+
+ return threadpool_dispatch(&ingester->tp, &msg);
+}
+
+static int ndb_init_lmdb(const char *filename, struct ndb_lmdb *lmdb, size_t mapsize)
+{
+ int rc;
+ MDB_txn *txn;
+
+ if ((rc = mdb_env_create(&lmdb->env))) {
+ fprintf(stderr, "mdb_env_create failed, error %d\n", rc);
+ return 0;
+ }
+
+ if ((rc = mdb_env_set_mapsize(lmdb->env, mapsize))) {
+ fprintf(stderr, "mdb_env_set_mapsize failed, error %d\n", rc);
+ return 0;
+ }
+
+ if ((rc = mdb_env_set_maxdbs(lmdb->env, NDB_DBS))) {
+ fprintf(stderr, "mdb_env_set_mapsize failed, error %d\n", rc);
+ return 0;
+ }
+
+ if ((rc = mdb_env_open(lmdb->env, filename, 0, 0664))) {
+ fprintf(stderr, "mdb_env_open failed, error %d\n", rc);
+ return 0;
+ }
+
+ // Initialize DBs
+ if ((rc = mdb_txn_begin(lmdb->env, NULL, 0, &txn))) {
+ fprintf(stderr, "mdb_txn_begin failed, error %d\n", rc);
+ return 0;
+ }
+
+ // note flatbuffer db
+ if ((rc = mdb_dbi_open(txn, "note", MDB_CREATE | MDB_INTEGERKEY, &lmdb->dbs[NDB_DB_NOTE]))) {
+ fprintf(stderr, "mdb_dbi_open event failed, error %d\n", rc);
+ return 0;
+ }
+
+ // note metadata db
+ if ((rc = mdb_dbi_open(txn, "meta", MDB_CREATE | MDB_INTEGERKEY, &lmdb->dbs[NDB_DB_META]))) {
+ fprintf(stderr, "mdb_dbi_open meta failed, error %d\n", rc);
+ return 0;
+ }
+
+ // profile flatbuffer db
+ if ((rc = mdb_dbi_open(txn, "profile", MDB_CREATE | MDB_INTEGERKEY, &lmdb->dbs[NDB_DB_PROFILE]))) {
+ fprintf(stderr, "mdb_dbi_open profile failed, error %d\n", rc);
+ return 0;
+ }
+
+ // id+ts index flags
+ unsigned int tsid_flags = MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED;
+
+ // index dbs
+ if ((rc = mdb_dbi_open(txn, "note_id", tsid_flags, &lmdb->dbs[NDB_DB_NOTE_ID]))) {
+ fprintf(stderr, "mdb_dbi_open id failed, error %d\n", rc);
+ return 0;
+ }
+ mdb_set_compare(txn, lmdb->dbs[NDB_DB_NOTE_ID], ndb_tsid_compare);
+
+ if ((rc = mdb_dbi_open(txn, "profile_pk", tsid_flags, &lmdb->dbs[NDB_DB_PROFILE_PK]))) {
+ fprintf(stderr, "mdb_dbi_open id failed, error %d\n", rc);
+ return 0;
+ }
+ mdb_set_compare(txn, lmdb->dbs[NDB_DB_PROFILE_PK], ndb_tsid_compare);
+
+
+ // Commit the transaction
+ if ((rc = mdb_txn_commit(txn))) {
+ fprintf(stderr, "mdb_txn_commit failed, error %d\n", rc);
+ return 0;
+ }
+
+ return 1;
+}
+
+int ndb_init(struct ndb **pndb, const char *filename, size_t mapsize, int ingester_threads)
+{
+ struct ndb *ndb;
+ //MDB_dbi ind_id; // TODO: ind_pk, etc
+
+ ndb = *pndb = calloc(1, sizeof(struct ndb));
+ if (ndb == NULL) {
+ fprintf(stderr, "ndb_init: malloc failed\n");
+ return 0;
+ }
+
+ if (!ndb_init_lmdb(filename, &ndb->lmdb, mapsize))
+ return 0;
+
+ if (!ndb_writer_init(&ndb->writer, &ndb->lmdb)) {
+ fprintf(stderr, "ndb_writer_init failed");
+ return 0;
+ }
+
+ if (!ndb_ingester_init(&ndb->ingester, &ndb->writer, ingester_threads)) {
+ fprintf(stderr, "failed to initialize %d ingester thread(s)",
+ ingester_threads);
+ return 0;
+ }
+
+ // Initialize LMDB environment and spin up threads
+ return 1;
+}
+
+void ndb_destroy(struct ndb *ndb)
+{
+ if (ndb == NULL)
+ return;
+
+ // ingester depends on writer and must be destroyed first
+ ndb_ingester_destroy(&ndb->ingester);
+ ndb_writer_destroy(&ndb->writer);
+
+ mdb_env_close(ndb->lmdb.env);
+
+ free(ndb);
+}
+
+// Process a nostr event, ie: ["EVENT", "subid", {"content":"..."}...]
+//
+// This function returns as soon as possible, first copying the passed
+// json and then queueing it up for processing. Worker threads then take
+// the json and process it.
+//
+// Processing:
+//
+// 1. The event is parsed into ndb_notes and the signature is validated
+// 2. A quick lookup is made on the database to see if we already have
+// the note id, if we do we don't need to waste time on json parsing
+// or note validation.
+// 3. Once validation is done we pass it to the writer queue for writing
+// to LMDB.
+//
+int ndb_process_event(struct ndb *ndb, const char *json, int json_len)
+{
+ // Since we need to return as soon as possible, and we're not
+ // making any assumptions about the lifetime of the string, we
+ // definitely need to copy the json here. In the future once we
+ // have our thread that manages a websocket connection, we can
+ // avoid the copy and just use the buffer we get from that
+ // thread.
+ char *json_copy = strdupn(json, json_len);
+ if (json_copy == NULL)
+ return 0;
+
+ return ndb_ingester_queue_event(&ndb->ingester, json_copy, json_len);
+}
+
+int ndb_process_events(struct ndb *ndb, const char *ldjson, size_t json_len)
+{
+ const char *start, *end, *very_end;
+ start = ldjson;
+ end = start + json_len;
+ very_end = ldjson + json_len;
+#if DEBUG
+ int processed = 0;
+#endif
+
+ while ((end = fast_strchr(start, '\n', very_end - start))) {
+ //printf("processing '%.*s'\n", (int)(end-start), start);
+ if (!ndb_process_event(ndb, start, end - start)) {
+ ndb_debug("ndb_process_event failed\n");
+ return 0;
+ }
+ start = end + 1;
+#if DEBUG
+ processed++;
+#endif
+ }
+
+ ndb_debug("ndb_process_events: processed %d events\n", processed);
+
+ return 1;
+}
+
static inline int cursor_push_tag(struct cursor *cur, struct ndb_tag *tag)
{
return cursor_push_u16(cur, tag->count);
@@ -97,15 +1032,34 @@ static inline int ndb_json_parser_init(struct ndb_json_parser *p,
return 1;
}
-static inline int ndb_json_parser_parse(struct ndb_json_parser *p)
+static inline int ndb_json_parser_parse(struct ndb_json_parser *p,
+ struct ndb_id_cb *cb)
{
+ jsmntok_t *tok;
int cap = ((unsigned char *)p->toks_end - (unsigned char*)p->toks)/sizeof(*p->toks);
- p->num_tokens =
- jsmn_parse(&p->json_parser, p->json, p->json_len, p->toks, cap);
+ int res =
+ jsmn_parse(&p->json_parser, p->json, p->json_len, p->toks, cap, cb != NULL);
+
+ // got an ID!
+ if (res == -42) {
+ tok = &p->toks[p->json_parser.toknext-1];
+ switch (cb->fn(cb->data, p->json + tok->start)) {
+ case NDB_IDRES_CONT:
+ res = jsmn_parse(&p->json_parser, p->json, p->json_len,
+ p->toks, cap, 0);
+ break;
+ case NDB_IDRES_STOP:
+ return -42;
+ }
+ } else if (res == 0) {
+ return 0;
+ }
+
+ p->num_tokens = res;
p->i = 0;
- return p->num_tokens;
+ return 1;
}
static int cursor_push_unescaped_char(struct cursor *cur, char c1, char c2)
@@ -117,7 +1071,6 @@ static int cursor_push_unescaped_char(struct cursor *cur, char c1, char c2)
case 'b': return cursor_push_byte(cur, '\b');
case 'f': return cursor_push_byte(cur, '\f');
case '\\': return cursor_push_byte(cur, '\\');
- case '/': return cursor_push_byte(cur, '/');
case '"': return cursor_push_byte(cur, '"');
case 'u':
// these aren't handled yet
@@ -251,7 +1204,8 @@ static int ndb_event_commitment(struct ndb_note *ev, unsigned char *buf, int buf
make_cursor(buf, buf + buflen, &cur);
- snprintf(timebuf, sizeof(timebuf), "%d", ev->created_at);
+ // TODO: update in 2106 ...
+ snprintf(timebuf, sizeof(timebuf), "%d", (uint32_t)ev->created_at);
snprintf(kindbuf, sizeof(kindbuf), "%d", ev->kind);
ok =
@@ -355,7 +1309,7 @@ int ndb_builder_finalize(struct ndb_builder *builder, struct ndb_note **note,
unsigned char *end = builder->mem.end;
unsigned char *start = (unsigned char*)(*note) + total_size;
- ndb_builder_set_pubkey(builder, keypair->pubkey);
+ ndb_builder_set_pubkey(builder, keypair->pubkey);
if (!ndb_calculate_id(builder->note, start, end - start))
return 0;
@@ -654,7 +1608,8 @@ static int parse_unsigned_int(const char *start, int len, unsigned int *num)
}
int ndb_ws_event_from_json(const char *json, int len, struct ndb_tce *tce,
- unsigned char *buf, int bufsize)
+ unsigned char *buf, int bufsize,
+ struct ndb_id_cb *cb)
{
jsmntok_t *tok = NULL;
int tok_len, res;
@@ -664,11 +1619,22 @@ int ndb_ws_event_from_json(const char *json, int len, struct ndb_tce *tce,
tce->subid = "";
ndb_json_parser_init(&parser, json, len, buf, bufsize);
- if ((res = ndb_json_parser_parse(&parser)) < 0)
+ if ((res = ndb_json_parser_parse(&parser, cb)) < 0)
return res;
- if (parser.num_tokens < 3 || parser.toks[0].type != JSMN_ARRAY)
+ if (parser.num_tokens < 3 || parser.toks[0].type != JSMN_ARRAY) {
+ /*
+ tok = &parser.toks[parser.json_parser.toknext-1];
+ ndb_debug("failing at not enough takens (%d) or != JSMN_ARRAY @ '%.*s', '%.*s'\n",
+ parser.num_tokens, 10, json + parser.json_parser.pos,
+ toksize(tok), json + tok->start);
+ tok = &parser.toks[parser.json_parser.toknext-2];
+ ndb_debug("failing at not enough takens (%d) or != JSMN_ARRAY @ '%.*s', '%.*s'\n",
+ parser.num_tokens, 10, json + parser.json_parser.pos,
+ toksize(tok), json + tok->start);
+ */
return 0;
+ }
parser.i = 1;
tok = &parser.toks[parser.i++];
@@ -738,7 +1704,9 @@ int ndb_parse_json_note(struct ndb_json_parser *parser, struct ndb_note **note)
unsigned char hexbuf[64];
const char *json = parser->json;
const char *start;
- int i, tok_len;
+ int i, tok_len, parsed;
+
+ parsed = 0;
if (parser->toks[parser->i].type != JSMN_OBJECT)
return 0;
@@ -758,17 +1726,19 @@ int ndb_parse_json_note(struct ndb_json_parser *parser, struct ndb_note **note)
// pubkey
tok = &parser->toks[i+1];
hex_decode(json + tok->start, toksize(tok), hexbuf, sizeof(hexbuf));
+ parsed |= NDB_PARSED_PUBKEY;
ndb_builder_set_pubkey(&parser->builder, hexbuf);
} else if (tok_len == 2 && start[0] == 'i' && start[1] == 'd') {
// id
tok = &parser->toks[i+1];
hex_decode(json + tok->start, toksize(tok), hexbuf, sizeof(hexbuf));
- // TODO: validate id
+ parsed |= NDB_PARSED_ID;
ndb_builder_set_id(&parser->builder, hexbuf);
} else if (tok_len == 3 && start[0] == 's' && start[1] == 'i' && start[2] == 'g') {
// sig
tok = &parser->toks[i+1];
hex_decode(json + tok->start, toksize(tok), hexbuf, sizeof(hexbuf));
+ parsed |= NDB_PARSED_SIG;
ndb_builder_set_sig(&parser->builder, hexbuf);
} else if (start[0] == 'k' && jsoneq(json, tok, tok_len, "kind")) {
// kind
@@ -779,6 +1749,7 @@ int ndb_parse_json_note(struct ndb_json_parser *parser, struct ndb_note **note)
if (!parse_unsigned_int(start, toksize(tok),
&parser->builder.note->kind))
return 0;
+ parsed |= NDB_PARSED_KIND;
} else if (start[0] == 'c') {
if (jsoneq(json, tok, tok_len, "created_at")) {
// created_at
@@ -786,9 +1757,12 @@ int ndb_parse_json_note(struct ndb_json_parser *parser, struct ndb_note **note)
start = json + tok->start;
if (tok->type != JSMN_PRIMITIVE || tok_len <= 0)
return 0;
- if (!parse_unsigned_int(start, toksize(tok),
- &parser->builder.note->created_at))
+ // TODO: update to int64 in 2106 ... xD
+ unsigned int bigi;
+ if (!parse_unsigned_int(start, toksize(tok), &bigi))
return 0;
+ parser->builder.note->created_at = bigi;
+ parsed |= NDB_PARSED_CREATED_AT;
} else if (jsoneq(json, tok, tok_len, "content")) {
// content
tok = &parser->toks[i+1];
@@ -799,18 +1773,25 @@ int ndb_parse_json_note(struct ndb_json_parser *parser, struct ndb_note **note)
json + tok->start,
tok_len, &pstr,
&written, pack_ids)) {
+ ndb_debug("ndb_builder_make_json_str failed\n");
return 0;
}
parser->builder.note->content_length = written;
parser->builder.note->content = pstr;
+ parsed |= NDB_PARSED_CONTENT;
}
} else if (start[0] == 't' && jsoneq(json, tok, tok_len, "tags")) {
tok = &parser->toks[i+1];
ndb_builder_process_json_tags(parser, tok);
i += tok->size;
+ parsed |= NDB_PARSED_TAGS;
}
}
+ //ndb_debug("parsed %d = %d, &->%d", parsed, NDB_PARSED_ALL, parsed & NDB_PARSED_ALL);
+ if (parsed != NDB_PARSED_ALL)
+ return 0;
+
return ndb_builder_finalize(&parser->builder, note, NULL);
}
@@ -821,7 +1802,7 @@ int ndb_note_from_json(const char *json, int len, struct ndb_note **note,
int res;
ndb_json_parser_init(&parser, json, len, buf, bufsize);
- if ((res = ndb_json_parser_parse(&parser)) < 0)
+ if ((res = ndb_json_parser_parse(&parser, NULL)) < 0)
return res;
if (parser.num_tokens < 1)
@@ -850,7 +1831,7 @@ void ndb_builder_set_kind(struct ndb_builder *builder, uint32_t kind)
builder->note->kind = kind;
}
-void ndb_builder_set_created_at(struct ndb_builder *builder, uint32_t created_at)
+void ndb_builder_set_created_at(struct ndb_builder *builder, uint64_t created_at)
{
builder->note->created_at = created_at;
}
diff --git a/nostrdb/nostrdb.h b/nostrdb/nostrdb.h
@@ -7,7 +7,20 @@
#define NDB_PACKED_STR 0x1
#define NDB_PACKED_ID 0x2
+//#define DEBUG 1
+
+#ifdef DEBUG
+#define ndb_debug(...) printf(__VA_ARGS__)
+#else
+#define ndb_debug(...) (void)0
+#endif
+
struct ndb_json_parser;
+struct ndb;
+
+struct ndb_t {
+ struct ndb *ndb;
+};
// To-client event types
enum tce_type {
@@ -17,6 +30,15 @@ enum tce_type {
NDB_TCE_EOSE = 0x4,
};
+// function pointer for controlling what to do after we parse an id
+typedef enum ndb_idres (*ndb_id_fn)(void *, const char *);
+
+// id callback + closure data
+struct ndb_id_cb {
+ ndb_id_fn fn;
+ void *data;
+};
+
struct ndb_str {
unsigned char flag;
union {
@@ -80,6 +102,7 @@ struct ndb_tag {
};
struct ndb_tags {
+ uint16_t padding;
uint16_t count;
struct ndb_tag tag[0];
};
@@ -92,12 +115,12 @@ struct ndb_note {
unsigned char pubkey[32];
unsigned char sig[64];
- uint32_t created_at;
+ uint64_t created_at;
uint32_t kind;
uint32_t content_length;
union ndb_packed_str content;
uint32_t strings;
-
+ uint32_t reserved[4]; // expansion slots
// nothing can come after tags since it contains variadic data
struct ndb_tags tags;
};
@@ -126,16 +149,24 @@ int ndb_calculate_id(struct ndb_note *note, unsigned char *buf, int buflen);
int ndb_sign_id(struct ndb_keypair *keypair, unsigned char id[32], unsigned char sig[64]);
int ndb_create_keypair(struct ndb_keypair *key);
int ndb_decode_key(const char *secstr, struct ndb_keypair *keypair);
+int ndb_note_verify(void *secp_ctx, unsigned char pubkey[32], unsigned char id[32], unsigned char signature[64]);
-// BUILDER
+// NDB
+int ndb_init(struct ndb **ndb, const char *dbdir, size_t mapsize, int ingester_threads);
+int ndb_process_event(struct ndb *, const char *json, int len);
+int ndb_process_events(struct ndb *, const char *ldjson, size_t len);
+int ndb_get_profile(struct ndb *, unsigned char pubkey[32], void **out);
+struct ndb_note *ndb_get_note_by_id(struct ndb *, const unsigned char *id);
+void ndb_destroy(struct ndb *);
+// BUILDER
int ndb_parse_json_note(struct ndb_json_parser *, struct ndb_note **);
-int ndb_ws_event_from_json(const char *json, int len, struct ndb_tce *tce, unsigned char *buf, int bufsize);
+int ndb_ws_event_from_json(const char *json, int len, struct ndb_tce *tce, unsigned char *buf, int bufsize, struct ndb_id_cb *);
int ndb_note_from_json(const char *json, int len, struct ndb_note **, unsigned char *buf, int buflen);
int ndb_builder_init(struct ndb_builder *builder, unsigned char *buf, int bufsize);
int ndb_builder_finalize(struct ndb_builder *builder, struct ndb_note **note, struct ndb_keypair *privkey);
int ndb_builder_set_content(struct ndb_builder *builder, const char *content, int len);
-void ndb_builder_set_created_at(struct ndb_builder *builder, uint32_t created_at);
+void ndb_builder_set_created_at(struct ndb_builder *builder, uint64_t created_at);
void ndb_builder_set_sig(struct ndb_builder *builder, unsigned char *sig);
void ndb_builder_set_pubkey(struct ndb_builder *builder, unsigned char *pubkey);
void ndb_builder_set_id(struct ndb_builder *builder, unsigned char *id);
diff --git a/nostrdb/protected_queue.h b/nostrdb/protected_queue.h
@@ -0,0 +1,235 @@
+/*
+ * This header file provides a thread-safe queue implementation for generic
+ * data elements. It uses POSIX threads (pthreads) to ensure thread safety.
+ * The queue allows for pushing and popping elements, with the ability to
+ * block or non-block on pop operations. Users are responsible for providing
+ * memory for the queue buffer and ensuring its correct lifespan.
+ *
+ * Author: William Casarin
+ * Inspired-by: https://github.com/hoytech/hoytech-cpp/blob/master/hoytech/protected_queue.h
+ */
+
+#ifndef PROT_QUEUE_H
+#define PROT_QUEUE_H
+
+#include <pthread.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include "cursor.h"
+#include "util.h"
+
+#define BUFFER_SIZE 100
+
+/*
+ * The prot_queue structure represents a thread-safe queue that can hold
+ * generic data elements.
+ */
+struct prot_queue {
+ unsigned char *buf;
+ size_t buflen;
+
+ int head;
+ int tail;
+ int count;
+ int elem_size;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+};
+
+
+/*
+ * Initialize the queue.
+ * Params:
+ * q - Pointer to the queue.
+ * buf - Buffer for holding data elements.
+ * buflen - Length of the buffer.
+ * elem_size - Size of each data element.
+ * Returns 1 if successful, 0 otherwise.
+ */
+static inline int prot_queue_init(struct prot_queue* q, void* buf,
+ size_t buflen, int elem_size)
+{
+ // buffer elements must fit nicely in the buffer
+ if (buflen == 0 || buflen % elem_size != 0)
+ return 0;
+
+ q->head = 0;
+ q->tail = 0;
+ q->count = 0;
+ q->buf = buf;
+ q->buflen = buflen;
+ q->elem_size = elem_size;
+
+ pthread_mutex_init(&q->mutex, NULL);
+ pthread_cond_init(&q->cond, NULL);
+
+ return 1;
+}
+
+/*
+ * Return the capacity of the queue.
+ * q - Pointer to the queue.
+ */
+static inline size_t prot_queue_capacity(struct prot_queue *q) {
+ return q->buflen / q->elem_size;
+}
+
+/*
+ * Push an element onto the queue.
+ * Params:
+ * q - Pointer to the queue.
+ * data - Pointer to the data element to be pushed.
+ *
+ * Returns 1 if successful, 0 if the queue is full.
+ */
+static int prot_queue_push(struct prot_queue* q, void *data)
+{
+ int cap;
+
+ pthread_mutex_lock(&q->mutex);
+
+ cap = prot_queue_capacity(q);
+ if (q->count == cap) {
+ // only signal if the push was sucessful
+ pthread_mutex_unlock(&q->mutex);
+ return 0;
+ }
+
+ memcpy(&q->buf[q->tail * q->elem_size], data, q->elem_size);
+ q->tail = (q->tail + 1) % cap;
+ q->count++;
+
+ pthread_cond_signal(&q->cond);
+ pthread_mutex_unlock(&q->mutex);
+
+ return 1;
+}
+
+/*
+ * Push multiple elements onto the queue.
+ * Params:
+ * q - Pointer to the queue.
+ * data - Pointer to the data elements to be pushed.
+ * count - Number of elements to push.
+ *
+ * Returns the number of elements successfully pushed, 0 if the queue is full or if there is not enough contiguous space.
+ */
+static int prot_queue_push_all(struct prot_queue* q, void *data, int count)
+{
+ int cap;
+ int first_copy_count, second_copy_count;
+
+ pthread_mutex_lock(&q->mutex);
+
+ cap = prot_queue_capacity(q);
+ if (q->count + count > cap) {
+ pthread_mutex_unlock(&q->mutex);
+ return 0; // Return failure if the queue is full
+ }
+
+ first_copy_count = min(count, cap - q->tail); // Elements until the end of the buffer
+ second_copy_count = count - first_copy_count; // Remaining elements if wrap around
+
+ memcpy(&q->buf[q->tail * q->elem_size], data, first_copy_count * q->elem_size);
+ q->tail = (q->tail + first_copy_count) % cap;
+
+ if (second_copy_count > 0) {
+ // If there is a wrap around, copy the remaining elements
+ memcpy(&q->buf[q->tail * q->elem_size], (char *)data + first_copy_count * q->elem_size, second_copy_count * q->elem_size);
+ q->tail = (q->tail + second_copy_count) % cap;
+ }
+
+ q->count += count;
+
+ pthread_cond_signal(&q->cond); // Signal a waiting thread
+ pthread_mutex_unlock(&q->mutex);
+
+ return count;
+}
+
+/*
+ * Try to pop an element from the queue without blocking.
+ * Params:
+ * q - Pointer to the queue.
+ * data - Pointer to where the popped data will be stored.
+ * Returns 1 if successful, 0 if the queue is empty.
+ */
+static inline int prot_queue_try_pop(struct prot_queue *q, void *data) {
+ pthread_mutex_lock(&q->mutex);
+
+ if (q->count == 0) {
+ pthread_mutex_unlock(&q->mutex);
+ return 0;
+ }
+
+ memcpy(data, &q->buf[q->head * q->elem_size], q->elem_size);
+ q->head = (q->head + 1) % prot_queue_capacity(q);
+ q->count--;
+
+ pthread_mutex_unlock(&q->mutex);
+ return 1;
+}
+
+/*
+ * Wait until we have elements, and then pop multiple elements from the queue
+ * up to the specified maximum.
+ *
+ * Params:
+ * q - Pointer to the queue.
+ * buffer - Pointer to the buffer where popped data will be stored.
+ * max_items - Maximum number of items to pop from the queue.
+ * Returns the actual number of items popped.
+ */
+static int prot_queue_pop_all(struct prot_queue *q, void *dest, int max_items) {
+ pthread_mutex_lock(&q->mutex);
+
+ // Wait until there's at least one item to pop
+ while (q->count == 0) {
+ pthread_cond_wait(&q->cond, &q->mutex);
+ }
+
+ int items_until_end = (q->buflen - q->head * q->elem_size) / q->elem_size;
+ int items_to_pop = min(q->count, max_items);
+ items_to_pop = min(items_to_pop, items_until_end);
+
+ memcpy(dest, &q->buf[q->head * q->elem_size], items_to_pop * q->elem_size);
+ q->head = (q->head + items_to_pop) % prot_queue_capacity(q);
+ q->count -= items_to_pop;
+
+ pthread_mutex_unlock(&q->mutex);
+
+ return items_to_pop;
+}
+
+/*
+ * Pop an element from the queue. Blocks if the queue is empty.
+ * Params:
+ * q - Pointer to the queue.
+ * data - Pointer to where the popped data will be stored.
+ */
+static inline void prot_queue_pop(struct prot_queue *q, void *data) {
+ pthread_mutex_lock(&q->mutex);
+
+ while (q->count == 0)
+ pthread_cond_wait(&q->cond, &q->mutex);
+
+ memcpy(data, &q->buf[q->head * q->elem_size], q->elem_size);
+ q->head = (q->head + 1) % prot_queue_capacity(q);
+ q->count--;
+
+ pthread_mutex_unlock(&q->mutex);
+}
+
+/*
+ * Destroy the queue. Releases resources associated with the queue.
+ * Params:
+ * q - Pointer to the queue.
+ */
+static inline void prot_queue_destroy(struct prot_queue* q) {
+ pthread_mutex_destroy(&q->mutex);
+ pthread_cond_destroy(&q->cond);
+}
+
+#endif // PROT_QUEUE_H
diff --git a/nostrdb/threadpool.h b/nostrdb/threadpool.h
@@ -0,0 +1,103 @@
+
+#ifndef THREADPOOL_H
+#define THREADPOOL_H
+
+#include "protected_queue.h"
+
+struct thread
+{
+ pthread_t thread_id;
+ struct prot_queue inbox;
+ void *qmem;
+ void *ctx;
+};
+
+struct threadpool
+{
+ int num_threads;
+ struct thread *pool;
+ int next_thread;
+ void *quit_msg;
+};
+
+static int threadpool_init(struct threadpool *tp, int num_threads,
+ int q_elem_size, int q_num_elems,
+ void *quit_msg, void *ctx, void* (*thread_fn)(void*))
+{
+ int i;
+ struct thread *t;
+
+ if (num_threads <= 0)
+ return 0;
+
+ tp->num_threads = num_threads;
+ tp->pool = malloc(sizeof(*tp->pool) * num_threads);
+ tp->quit_msg = quit_msg;
+ tp->next_thread = -1;
+
+ if (tp->pool == NULL) {
+ fprintf(stderr, "threadpool_init: couldn't allocate memory for pool");
+ return 0;
+ }
+
+ for (i = 0; i < num_threads; i++) {
+ t = &tp->pool[i];
+ t->qmem = malloc(q_elem_size * q_num_elems);
+ t->ctx = ctx;
+
+ if (t->qmem == NULL) {
+ fprintf(stderr, "threadpool_init: couldn't allocate memory for queue");
+ return 0;
+ }
+
+ if (!prot_queue_init(&t->inbox, t->qmem, q_elem_size * q_num_elems, q_elem_size)) {
+ fprintf(stderr, "threadpool_init: couldn't init queue. buffer alignment is wrong.");
+ return 0;
+ }
+
+ if (pthread_create(&t->thread_id, NULL, thread_fn, t) != 0) {
+ fprintf(stderr, "threadpool_init: failed to create thread\n");
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static inline struct thread *threadpool_next_thread(struct threadpool *tp)
+{
+ tp->next_thread = (tp->next_thread + 1) % tp->num_threads;
+ return &tp->pool[tp->next_thread];
+}
+
+static inline int threadpool_dispatch(struct threadpool *tp, void *msg)
+{
+ struct thread *t = threadpool_next_thread(tp);
+ return prot_queue_push(&t->inbox, msg);
+}
+
+static inline int threadpool_dispatch_all(struct threadpool *tp, void *msgs,
+ int num_msgs)
+{
+ struct thread *t = threadpool_next_thread(tp);
+ return prot_queue_push_all(&t->inbox, msgs, num_msgs);
+}
+
+static inline void threadpool_destroy(struct threadpool *tp)
+{
+ struct thread *t;
+
+ for (uint64_t i = 0; i < tp->num_threads; i++) {
+ t = &tp->pool[i];
+ if (!prot_queue_push(&t->inbox, tp->quit_msg)) {
+ pthread_exit(&t->thread_id);
+ } else {
+ pthread_join(t->thread_id, NULL);
+ }
+ prot_queue_destroy(&t->inbox);
+ free(t->qmem);
+ }
+ free(tp->pool);
+}
+
+#endif // THREADPOOL_H
diff --git a/nostrdb/util.h b/nostrdb/util.h
@@ -0,0 +1,33 @@
+
+#ifndef NDB_UTIL_H
+#define NDB_UTIL_H
+
+static inline int min(int a, int b) {
+ return a < b ? a : b;
+}
+
+static inline int max(int a, int b) {
+ return a > b ? a : b;
+}
+
+static inline void* memdup(const void* src, size_t size) {
+ void* dest = malloc(size);
+ if (dest == NULL) {
+ return NULL; // Memory allocation failed
+ }
+ memcpy(dest, src, size);
+ return dest;
+}
+
+static inline char *strdupn(const char *src, size_t size) {
+ char* dest = malloc(size+1);
+ if (dest == NULL) {
+ return NULL; // Memory allocation failed
+ }
+ memcpy(dest, src, size);
+ dest[size] = '\0';
+ return dest;
+}
+
+#endif // NDB_UTIL_H
+