commit 1c3bec5a2402c6365c2423dc06fd910c4ae2a9f4
parent 529add5669b1576530c16ae91abe3f891cfe68fe
Author: William Casarin <jb55@jb55.com>
Date: Thu, 26 Feb 2026 19:54:03 -0800
Merge: Outbox & Account-scoped subscriptions (#1303)
Migrate from ad-hoc RelayPool subscription management to a centralized
outbox + scoped-subscription model. Add DM relay list (NIP-17 kind
10050) support to the Messages app.
New enostr outbox transport stack:
- OutboxPool, OutboxSession, RelayCoordinator
- Transparent vs compaction relay modes
- Reconnect backoff + jitter, CLOSED handling
- BroadcastRelay for publishing
Host-owned scoped subscription runtime:
- ScopedSubRuntime with owner-slot tracking
- RemoteApi as sole app-facing transport facade
- Account-aware oneshot/publish/relay targeting
App migrations:
- Timeline, thread, onboarding, nostrverse, accounts all use scoped subs
- Note publishing (posts/reposts/reactions/zaps/mutes) via outbox APIs
- Legacy RelayPool isolated to dave paths
- Messages: DM relay list ensure + participant relay prefetch
Extensive test coverage for outbox lifecycle, multi-relay EOSE,
scoped-sub ownership, and NIP-17 relay list helpers.
Diffstat:
96 files changed, 12111 insertions(+), 2517 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
@@ -493,30 +493,60 @@ dependencies = [
[[package]]
name = "async-utility"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a349201d80b4aa18d17a34a182bdd7f8ddf845e9e57d2ea130a12e10ef1e3a47"
+dependencies = [
+ "futures-util",
+ "gloo-timers 0.2.6",
+ "tokio",
+ "wasm-bindgen-futures",
+]
+
+[[package]]
+name = "async-utility"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a34a3b57207a7a1007832416c3e4862378c8451b4e8e093e436f48c2d3d2c151"
dependencies = [
"futures-util",
- "gloo-timers",
+ "gloo-timers 0.3.0",
"tokio",
"wasm-bindgen-futures",
]
[[package]]
name = "async-wsocket"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d50cb541e6d09e119e717c64c46ed33f49be7fa592fa805d56c11d6a7ff093c"
+dependencies = [
+ "async-utility 0.2.0",
+ "futures",
+ "futures-util",
+ "js-sys",
+ "tokio",
+ "tokio-rustls",
+ "tokio-tungstenite 0.24.0",
+ "url",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "async-wsocket"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a7d8c7d34a225ba919dd9ba44d4b9106d20142da545e086be8ae21d1897e043"
dependencies = [
- "async-utility",
+ "async-utility 0.3.1",
"futures",
"futures-util",
"js-sys",
"tokio",
"tokio-rustls",
"tokio-socks",
- "tokio-tungstenite",
+ "tokio-tungstenite 0.26.2",
"url",
"wasm-bindgen",
"web-sys",
@@ -524,6 +554,15 @@ dependencies = [
[[package]]
name = "atomic-destructor"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d919cb60ba95c87ba42777e9e246c4e8d658057299b437b7512531ce0a09a23"
+dependencies = [
+ "tracing",
+]
+
+[[package]]
+name = "atomic-destructor"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef49f5882e4b6afaac09ad239a4f8c70a24b8f2b0897edb1f706008efd109cf4"
@@ -2120,15 +2159,20 @@ dependencies = [
"mio",
"negentropy 0.5.0",
"nostr 0.37.0",
+ "nostr-database 0.37.0",
+ "nostr-relay-builder",
"nostrdb",
+ "profiling",
"serde",
"serde_derive",
"serde_json",
"sha2 0.10.9",
+ "tempfile",
"thiserror 2.0.18",
"tokenator",
"tokio",
"tracing",
+ "tracing-subscriber",
"url",
"uuid",
]
@@ -2835,6 +2879,18 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
[[package]]
name = "gloo-timers"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "gloo-timers"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994"
@@ -4014,6 +4070,15 @@ dependencies = [
]
[[package]]
+name = "lru"
+version = "0.12.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
+dependencies = [
+ "hashbrown 0.15.4",
+]
+
+[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -4422,6 +4487,20 @@ dependencies = [
[[package]]
name = "nostr-database"
+version = "0.37.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23696338d51e45cd44e061823847f4b0d1d362eca80d5033facf9c184149f72f"
+dependencies = [
+ "async-trait",
+ "lru",
+ "nostr 0.37.0",
+ "thiserror 1.0.69",
+ "tokio",
+ "tracing",
+]
+
+[[package]]
+name = "nostr-database"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce07b47c77b8e5a856727885fe0ae47b9aa53d8d853a2190dd479b5a0d6e4f52"
@@ -4431,18 +4510,34 @@ dependencies = [
]
[[package]]
+name = "nostr-relay-builder"
+version = "0.37.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68687a6f8b58acdd180bfc71e8f9ce9e2640ca2b187488de1cb1a96734897911"
+dependencies = [
+ "async-utility 0.2.0",
+ "async-wsocket 0.10.1",
+ "atomic-destructor 0.2.0",
+ "nostr 0.37.0",
+ "nostr-database 0.37.0",
+ "thiserror 1.0.69",
+ "tokio",
+ "tracing",
+]
+
+[[package]]
name = "nostr-relay-pool"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "211ac5bbdda1a8eec0c21814a838da832038767a5d354fe2fcc1ca438cae56fd"
dependencies = [
- "async-utility",
- "async-wsocket",
- "atomic-destructor",
+ "async-utility 0.3.1",
+ "async-wsocket 0.13.1",
+ "atomic-destructor 0.3.0",
"negentropy 0.3.1",
"negentropy 0.5.0",
"nostr 0.39.0",
- "nostr-database",
+ "nostr-database 0.39.0",
"tokio",
"tracing",
]
@@ -4885,7 +4980,7 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4cf4f37803a8c4f5c6e2964947d9346227eea1ed57ce1d1c7fe9c33ff8ed59"
dependencies = [
- "async-utility",
+ "async-utility 0.3.1",
"nostr 0.39.0",
"nostr-relay-pool",
]
@@ -7487,6 +7582,22 @@ dependencies = [
[[package]]
name = "tokio-tungstenite"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9"
+dependencies = [
+ "futures-util",
+ "log",
+ "rustls",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls",
+ "tungstenite 0.24.0",
+ "webpki-roots 0.26.11",
+]
+
+[[package]]
+name = "tokio-tungstenite"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084"
diff --git a/crates/enostr/Cargo.toml b/crates/enostr/Cargo.toml
@@ -25,4 +25,12 @@ hkdf = { workspace = true }
sha2 = { workspace = true }
base64 = { workspace = true }
negentropy = { workspace = true }
-uuid = { workspace = true }
-\ No newline at end of file
+uuid = { workspace = true }
+profiling = { workspace = true }
+
+[dev-dependencies]
+nostr-database = "0.37"
+nostr-relay-builder = "0.37"
+tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] }
+tracing-subscriber = { version = "0.3", features = ["env-filter"] }
+tempfile = { workspace = true }
diff --git a/crates/enostr/src/client/message.rs b/crates/enostr/src/client/message.rs
@@ -13,6 +13,22 @@ impl EventClientMessage {
}
}
+impl<'a> TryFrom<&'a Note<'a>> for EventClientMessage {
+ type Error = Error;
+
+ fn try_from(value: &'a Note<'a>) -> Result<Self, Self::Error> {
+ Ok(Self {
+ note_json: value.json()?,
+ })
+ }
+}
+
+impl From<EventClientMessage> for ClientMessage {
+ fn from(value: EventClientMessage) -> Self {
+ ClientMessage::Event(value)
+ }
+}
+
/// Messages sent by clients, received by relays
#[derive(Debug, Clone)]
pub enum ClientMessage {
diff --git a/crates/enostr/src/lib.rs b/crates/enostr/src/lib.rs
@@ -21,6 +21,15 @@ pub use pubkey::{Pubkey, PubkeyRef};
pub use relay::message::{RelayEvent, RelayMessage};
pub use relay::pool::{PoolEvent, PoolEventBuf, PoolRelay, RelayPool};
pub use relay::subs_debug::{OwnedRelayEvent, RelayLogEvent, SubsDebug, TransferStats};
-pub use relay::{Relay, RelayStatus};
+pub use relay::{
+ NormRelayUrl, OutboxPool, OutboxSession, OutboxSessionHandler, OutboxSubId,
+ RelayCoordinatorLimits, RelayId, RelayImplType, RelayLimitations, RelayReqId, RelayReqStatus,
+ RelayStatus, RelayType, RelayUrlPkgs, SubPass, SubPassGuardian, SubPassRevocation,
+ WebsocketConn,
+};
pub type Result<T> = std::result::Result<T, error::Error>;
+
+pub trait Wakeup: Send + Sync + Clone + 'static {
+ fn wake(&self);
+}
diff --git a/crates/enostr/src/relay/broadcast.rs b/crates/enostr/src/relay/broadcast.rs
@@ -0,0 +1,94 @@
+use crate::{
+ relay::{MulticastRelay, UnownedRelay, WebsocketRelay},
+ ClientMessage, EventClientMessage, RelayStatus,
+};
+
+/// BroadcastCache stores queued events for relays that are temporarily disconnected.
+#[derive(Default)]
+pub struct BroadcastCache {
+ to_send: Vec<EventClientMessage>,
+}
+
+/// BroadcastRelay sends events to either a websocket relay or the multicast relay
+/// while handling retries via the shared cache.
+pub struct BroadcastRelay<'a> {
+ relay: Option<UnownedRelay<'a>>,
+ cache: &'a mut BroadcastCache,
+}
+
+impl<'a> BroadcastRelay<'a> {
+ pub fn websocket(
+ websocket: Option<&'a mut WebsocketRelay>,
+ cache: &'a mut BroadcastCache,
+ ) -> Self {
+ Self {
+ relay: websocket.map(UnownedRelay::Websocket),
+ cache,
+ }
+ }
+
+ pub fn multicast(
+ multicast: Option<&'a mut MulticastRelay>,
+ cache: &'a mut BroadcastCache,
+ ) -> Self {
+ Self {
+ relay: multicast.map(UnownedRelay::Multicast),
+ cache,
+ }
+ }
+
+ pub fn broadcast(&mut self, msg: EventClientMessage) {
+ let Some(relay) = &mut self.relay else {
+ self.cache.to_send.push(msg);
+ return;
+ };
+
+ match relay {
+ UnownedRelay::Websocket(websocket_relay) => {
+ if !websocket_relay.is_connected() {
+ self.cache.to_send.push(msg);
+ return;
+ }
+
+ websocket_relay.conn.send(&ClientMessage::Event(msg));
+ }
+ UnownedRelay::Multicast(multicast) => {
+ // Always queue if we're not connected.
+ if multicast.status() != RelayStatus::Connected {
+ self.cache.to_send.push(msg.clone());
+ return;
+ }
+
+ if multicast.send(&msg).is_err() {
+ self.cache.to_send.push(msg.clone());
+ }
+ }
+ }
+ }
+
+ #[profiling::function]
+ pub fn try_flush_queue(&mut self) {
+ let Some(relay) = &mut self.relay else {
+ return;
+ };
+
+ match relay {
+ UnownedRelay::Websocket(websocket) => {
+ if !websocket.is_connected() || self.cache.to_send.is_empty() {
+ return;
+ }
+
+ for item in self.cache.to_send.drain(..) {
+ websocket.conn.send(&ClientMessage::Event(item));
+ }
+ }
+ UnownedRelay::Multicast(multicast) => {
+ if multicast.status() != RelayStatus::Connected || self.cache.to_send.is_empty() {
+ return;
+ }
+
+ self.cache.to_send.retain(|m| multicast.send(m).is_err());
+ }
+ }
+ }
+}
diff --git a/crates/enostr/src/relay/compaction.rs b/crates/enostr/src/relay/compaction.rs
@@ -0,0 +1,1040 @@
+use std::collections::HashMap;
+
+use hashbrown::HashSet;
+use nostrdb::Filter;
+
+use crate::{
+ relay::{
+ websocket::WebsocketRelay, OutboxSubId, OutboxSubscriptions, QueuedTasks, RelayReqId,
+ RelayReqStatus, RelayTask, SubPass, SubPassGuardian, SubPassRevocation,
+ },
+ ClientMessage,
+};
+
+/// CompactionData tracks every compaction REQ on a relay along with the
+/// Outbox sub ids routed into it.
+#[derive(Default)]
+pub struct CompactionData {
+ request_to_sid: HashMap<OutboxSubId, RelayReqId>, // we never split outbox subs over multiple REQs
+ relay_subs: HashMap<RelayReqId, RelaySubData>, // UUID
+ queue: QueuedTasks,
+}
+
+impl CompactionData {
+ #[allow(dead_code)]
+ pub fn num_subs(&self) -> usize {
+ self.relay_subs.len()
+ }
+
+ pub fn set_req_status(&mut self, sid: &str, status: RelayReqStatus) {
+ let Some(data) = self.relay_subs.get_mut(sid) else {
+ return;
+ };
+
+ data.status = status;
+ }
+
+ pub fn req_status(&self, id: &OutboxSubId) -> Option<RelayReqStatus> {
+ let sid = self.request_to_sid.get(id)?;
+ let data = self.relay_subs.get(sid)?;
+ Some(data.status)
+ }
+
+ #[allow(dead_code)]
+ pub fn has_eose(&self, id: &OutboxSubId) -> bool {
+ self.req_status(id) == Some(RelayReqStatus::Eose)
+ }
+
+ /// Returns the OutboxSubIds associated with the given relay subscription ID.
+ pub fn ids(&self, sid: &RelayReqId) -> Option<&HashSet<OutboxSubId>> {
+ self.relay_subs.get(sid).map(|d| &d.requests.requests)
+ }
+}
+
+/// Ensures `max_subs` REQ to the websocket relay by "compacting" subscriptions (combining multiple requests into one)
+pub struct CompactionRelay<'a> {
+ ctx: CompactionCtx<'a>,
+ sub_guardian: &'a mut SubPassGuardian,
+ json_limit: usize,
+}
+
+/// CompactionRelay ensures multiple Outbox subscriptions are packed into as few
+/// REQs as possible, respecting per-relay limits.
+impl<'a> CompactionRelay<'a> {
+ pub fn new(
+ relay: Option<&'a mut WebsocketRelay>,
+ data: &'a mut CompactionData,
+ json_limit: usize,
+ sub_guardian: &'a mut SubPassGuardian,
+ subs: &'a OutboxSubscriptions,
+ ) -> Self {
+ let ctx = match relay {
+ Some(relay) => CompactionCtx::Active(CompactionHandler::new(relay, data, subs)),
+ None => CompactionCtx::Inactive {
+ data,
+ session: CompactionSubSession::default(),
+ subs,
+ },
+ };
+ Self {
+ ctx,
+ sub_guardian,
+ json_limit,
+ }
+ }
+
+ #[profiling::function]
+ pub fn ingest_session(mut self, session: CompactionSession) {
+ let request_free = session.request_free;
+ let mut reserved: Vec<SubPass> = Vec::new();
+
+ // Reserve passes - take from guardian or compact to free them
+ while reserved.len() < request_free {
+ if let Some(pass) = self.sub_guardian.take_pass() {
+ reserved.push(pass);
+ } else if let Some(ejected_pass) = self.compact() {
+ reserved.push(ejected_pass);
+ } else {
+ break;
+ }
+ }
+
+ // Process session (can't touch reserved passes)
+ self.ingest_session_internal(session);
+
+ // Drain queue
+ {
+ profiling::scope!("drain queue");
+ loop {
+ let Some(id) = self.ctx.data().queue.pop() else {
+ break;
+ };
+ if self.subscribe(id) == PlaceResult::Queued {
+ break;
+ }
+ }
+ }
+
+ // Return reserved passes
+ for pass in reserved {
+ self.sub_guardian.return_pass(pass);
+ }
+ }
+
+ #[profiling::function]
+ fn ingest_session_internal(&mut self, session: CompactionSession) {
+ for (id, task) in session.tasks {
+ match task {
+ RelayTask::Unsubscribe => {
+ self.unsubscribe(id);
+ }
+ RelayTask::Subscribe => {
+ self.subscribe(id);
+ }
+ }
+ }
+ }
+
+ #[profiling::function]
+ pub fn handle_relay_open(&mut self) {
+ let CompactionCtx::Active(handler) = &mut self.ctx else {
+ return;
+ };
+
+ if !handler.relay.is_connected() {
+ return;
+ }
+
+ for (sid, sub_data) in &handler.data.relay_subs {
+ let filters = handler.subs.filters_all(&sub_data.requests.requests);
+ if are_filters_empty(&filters) {
+ continue;
+ }
+
+ handler
+ .relay
+ .conn
+ .send(&ClientMessage::req(sid.to_string(), filters));
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn revocate(&mut self, mut revocation: SubPassRevocation) {
+ let Some(pass) = self.compact() else {
+ // this shouldn't be possible
+ return;
+ };
+
+ revocation.revocate(pass);
+ }
+
+ #[allow(dead_code)]
+ pub fn revocate_all(&mut self, revocations: Vec<SubPassRevocation>) {
+ for revocation in revocations {
+ self.revocate(revocation);
+ }
+ }
+
+ #[profiling::function]
+ fn compact(&mut self) -> Option<SubPass> {
+ let SharedCtx {
+ data,
+ session,
+ subs,
+ } = self.ctx.shared();
+
+ let (id, smallest) = take_smallest_sub_reqs(subs, &mut data.relay_subs)?;
+
+ session.tasks.insert(id, SubSessionTask::Removed);
+ for id in smallest.requests.requests {
+ self.ctx.data().request_to_sid.remove(&id);
+ self.place(id);
+ }
+
+ Some(smallest.sub_pass)
+ }
+
+ #[profiling::function]
+ fn new_sub(&mut self, id: OutboxSubId) -> PlaceResult {
+ let Some(new_pass) = self.sub_guardian.take_pass() else {
+ // pass not available, try to place on an existing sub
+ return self.place(id);
+ };
+
+ let relay_id = RelayReqId::default();
+ let mut requests = SubRequests::default();
+ requests.add(id);
+
+ let SharedCtx {
+ data,
+ session,
+ subs: _,
+ } = self.ctx.shared();
+ data.relay_subs.insert(
+ relay_id.clone(),
+ RelaySubData {
+ requests,
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: new_pass,
+ },
+ );
+ data.request_to_sid.insert(id, relay_id.clone());
+ session.tasks.insert(relay_id, SubSessionTask::New);
+ PlaceResult::Placed
+ }
+
+ #[profiling::function]
+ pub fn subscribe(&mut self, id: OutboxSubId) -> PlaceResult {
+ let SharedCtx {
+ data,
+ session,
+ subs: _,
+ } = self.ctx.shared();
+ let Some(relay_id) = data.request_to_sid.get(&id) else {
+ return self.new_sub(id);
+ };
+
+ let Some(sub_data) = data.relay_subs.get_mut(relay_id) else {
+ return self.new_sub(id);
+ };
+
+ // modifying a filter
+ sub_data.requests.add(id);
+
+ sub_data.status = RelayReqStatus::InitialQuery;
+
+ session
+ .tasks
+ .insert(relay_id.clone(), SubSessionTask::Touched);
+ tracing::debug!("Placed {id:?} on an existing subscription: {relay_id:?}");
+ PlaceResult::Placed
+ }
+
+ #[profiling::function]
+ pub fn unsubscribe(&mut self, id: OutboxSubId) {
+ let SharedCtx {
+ data: compaction_data,
+ session,
+ subs: _,
+ } = self.ctx.shared();
+ let Some(relay_id) = compaction_data.request_to_sid.remove(&id) else {
+ compaction_data.queue.add(id, RelayTask::Unsubscribe);
+ return;
+ };
+
+ let Some(data) = compaction_data.relay_subs.get_mut(&relay_id) else {
+ compaction_data.queue.add(id, RelayTask::Unsubscribe);
+ return;
+ };
+
+ data.status = RelayReqStatus::InitialQuery;
+
+ if !data.requests.remove(&id) {
+ return;
+ }
+
+ if !data.requests.is_empty() {
+ session
+ .tasks
+ .insert(relay_id.clone(), SubSessionTask::Touched);
+ return;
+ }
+
+ let Some(data) = compaction_data.relay_subs.remove(&relay_id) else {
+ return;
+ };
+
+ self.sub_guardian.return_pass(data.sub_pass);
+ tracing::debug!("Unsubed from last internal id in REQ, returning pass");
+ session
+ .tasks
+ .insert(relay_id.clone(), SubSessionTask::Removed);
+ }
+
+ #[profiling::function]
+ fn place(&mut self, id: OutboxSubId) -> PlaceResult {
+ let SharedCtx {
+ data,
+ session,
+ subs,
+ } = self.ctx.shared();
+ let placed_on = 'place: {
+ for (relay_id, relay_data) in &mut data.relay_subs {
+ if !relay_data.requests.can_fit(subs, &id, self.json_limit) {
+ continue;
+ }
+
+ session
+ .tasks
+ .insert(relay_id.clone(), SubSessionTask::Touched);
+ relay_data.requests.add(id);
+ break 'place Some(relay_id.clone());
+ }
+
+ None
+ };
+
+ if let Some(relay_id) = placed_on {
+ data.request_to_sid.insert(id, relay_id);
+ return PlaceResult::Placed;
+ }
+
+ data.queue.add(id, RelayTask::Subscribe);
+ PlaceResult::Queued
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum PlaceResult {
+ Placed,
+ Queued,
+}
+
+fn take_smallest_sub_reqs(
+ subs: &OutboxSubscriptions,
+ data: &mut HashMap<RelayReqId, RelaySubData>,
+) -> Option<(RelayReqId, RelaySubData)> {
+ let mut smallest = usize::MAX;
+ let mut res = None;
+
+ for (id, d) in data.iter() {
+ let cur_size = subs.json_size_sum(&d.requests.requests);
+ if cur_size < smallest {
+ smallest = cur_size;
+ res = Some(id.clone());
+ }
+ }
+
+ let id = res?;
+
+ data.remove(&id).map(|r| (id, r))
+}
+
+#[derive(Default)]
+struct CompactionSubSession {
+ tasks: HashMap<RelayReqId, SubSessionTask>,
+}
+
+enum SubSessionTask {
+ New,
+ Touched,
+ Removed,
+}
+
+enum CompactionCtx<'a> {
+ Active(CompactionHandler<'a>),
+ Inactive {
+ data: &'a mut CompactionData,
+ session: CompactionSubSession,
+ subs: &'a OutboxSubscriptions,
+ },
+}
+
+impl<'a> CompactionCtx<'a> {
+ #[profiling::function]
+ pub fn shared(&mut self) -> SharedCtx<'_> {
+ match self {
+ CompactionCtx::Active(compaction_handler) => SharedCtx {
+ data: compaction_handler.data,
+ session: &mut compaction_handler.session,
+ subs: compaction_handler.subs,
+ },
+ CompactionCtx::Inactive {
+ data,
+ session,
+ subs,
+ } => SharedCtx {
+ data,
+ session,
+ subs,
+ },
+ }
+ }
+
+ pub fn data(&mut self) -> &mut CompactionData {
+ match self {
+ CompactionCtx::Active(compaction_handler) => compaction_handler.data,
+ CompactionCtx::Inactive {
+ data,
+ session: _,
+ subs: _,
+ } => data,
+ }
+ }
+}
+struct SharedCtx<'a> {
+ data: &'a mut CompactionData,
+ session: &'a mut CompactionSubSession,
+ subs: &'a OutboxSubscriptions,
+}
+
+struct CompactionHandler<'a> {
+ relay: &'a mut WebsocketRelay,
+ data: &'a mut CompactionData,
+ subs: &'a OutboxSubscriptions,
+ pub session: CompactionSubSession,
+}
+
+impl<'a> Drop for CompactionHandler<'a> {
+ #[profiling::function]
+ fn drop(&mut self) {
+ for (id, task) in &self.session.tasks {
+ match task {
+ SubSessionTask::Touched => {
+ let Some(data) = self.data.relay_subs.get_mut(id) else {
+ continue;
+ };
+
+ let filters = self.subs.filters_all(&data.requests.requests);
+
+ if filters.is_empty() {
+ self.relay.conn.send(&ClientMessage::close(id.0.clone()));
+ } else {
+ self.relay
+ .conn
+ .send(&ClientMessage::req(id.0.clone(), filters));
+ }
+ }
+ SubSessionTask::Removed => {
+ self.relay.conn.send(&ClientMessage::close(id.0.clone()));
+ }
+ SubSessionTask::New => {
+ let Some(data) = self.data.relay_subs.get(id) else {
+ continue;
+ };
+
+ let filters = self.subs.filters_all(&data.requests.requests);
+ self.relay
+ .conn
+ .send(&ClientMessage::req(id.0.clone(), filters));
+ }
+ }
+ }
+ }
+}
+
+fn are_filters_empty(filters: &Vec<Filter>) -> bool {
+ if filters.is_empty() {
+ return true;
+ }
+
+ for filter in filters {
+ if filter.num_elements() != 0 {
+ return false;
+ }
+ }
+
+ true
+}
+
+impl<'a> CompactionHandler<'a> {
+ pub fn new(
+ relay: &'a mut WebsocketRelay,
+ data: &'a mut CompactionData,
+ subs: &'a OutboxSubscriptions,
+ ) -> Self {
+ Self {
+ relay,
+ data,
+ session: CompactionSubSession::default(),
+ subs,
+ }
+ }
+}
+
+/// Represents a singular REQ to a relay
+struct RelaySubData {
+ requests: SubRequests,
+ status: RelayReqStatus,
+ sub_pass: SubPass,
+}
+
+#[derive(Default)]
+struct SubRequests {
+ pub requests: HashSet<OutboxSubId>,
+}
+
+impl SubRequests {
+ #[profiling::function]
+ pub fn add(&mut self, id: OutboxSubId) {
+ self.requests.insert(id);
+ }
+
+ pub fn remove(&mut self, id: &OutboxSubId) -> bool {
+ self.requests.remove(id)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.requests.is_empty()
+ }
+
+ pub fn can_fit(
+ &self,
+ subs: &OutboxSubscriptions,
+ new: &OutboxSubId,
+ json_limit: usize,
+ ) -> bool {
+ let Some(new_size) = subs.json_size(new) else {
+ return true;
+ };
+
+ let cur_json_size = subs.json_size_sum(&self.requests);
+
+ // `["REQ","abc...123"]`;
+ // 12345678 ... 90 -> 10 characters excluding the UUID
+ cur_json_size + new_size + 10 + RelayReqId::byte_len() <= json_limit
+ }
+}
+
+#[derive(Default)]
+pub struct CompactionSession {
+ // Number of subs which should be free after ingestion. Subs will compact enough to free up that number of subs
+ // OR as much as possible without dropping any existing subs
+ request_free: usize,
+ tasks: HashMap<OutboxSubId, RelayTask>,
+}
+
+impl CompactionSession {
+ pub fn request_free_subs(&mut self, num_free: usize) {
+ self.request_free = num_free;
+ }
+
+ pub fn unsub(&mut self, unsub: OutboxSubId) {
+ self.tasks.insert(unsub, RelayTask::Unsubscribe);
+ }
+
+ pub fn sub(&mut self, id: OutboxSubId) {
+ self.tasks.insert(id, RelayTask::Subscribe);
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.tasks.is_empty() && self.request_free == 0
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ // ==================== CompactionData tests ====================
+
+ #[test]
+ fn compaction_data_default_empty() {
+ let data = CompactionData::default();
+ assert_eq!(data.num_subs(), 0);
+ }
+
+ #[test]
+ fn compaction_data_req_status_none_for_unknown() {
+ let data = CompactionData::default();
+ assert!(data.req_status(&OutboxSubId(999)).is_none());
+ }
+
+ #[test]
+ fn compaction_data_has_eose_false_for_unknown() {
+ let data = CompactionData::default();
+ assert!(!data.has_eose(&OutboxSubId(999)));
+ }
+
+ #[test]
+ fn compaction_data_set_req_status_ignores_unknown_sid() {
+ let mut data = CompactionData::default();
+ // Should not panic or error when setting status for unknown sid
+ data.set_req_status("unknown-sid", RelayReqStatus::Eose);
+ }
+
+ #[test]
+ fn compaction_data_ids_returns_sub_ids() {
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+
+ let id = OutboxSubId(7);
+ let relay_id = RelayReqId::from("req-123");
+ let mut requests = SubRequests::default();
+ requests.add(id);
+ data.relay_subs.insert(
+ relay_id.clone(),
+ RelaySubData {
+ requests,
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: pass,
+ },
+ );
+
+ let ids = data.ids(&relay_id);
+ assert!(ids.is_some());
+ assert!(ids.unwrap().contains(&id));
+ }
+
+ #[test]
+ fn compaction_data_set_req_status_updates_status() {
+ let mut data = CompactionData::default();
+
+ // Manually set up a relay subscription
+ let relay_id = RelayReqId::from("test-sid");
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+
+ data.relay_subs.insert(
+ relay_id.clone(),
+ RelaySubData {
+ requests: SubRequests::default(),
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: pass,
+ },
+ );
+
+ // Set EOSE should update status
+ data.set_req_status("test-sid", RelayReqStatus::Eose);
+
+ // Verify status was set
+ let sub_data = data.relay_subs.get(&relay_id).unwrap();
+ assert_eq!(sub_data.status, RelayReqStatus::Eose);
+ }
+
+ // ==================== SubRequests tests ====================
+
+ /// can_fit returns true when combined JSON size is under the limit.
+ #[test]
+ fn sub_requests_can_fit() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ let requests = SubRequests::default();
+
+ assert!(requests.can_fit(&subs, &OutboxSubId(0), 1_000_000));
+ assert!(!requests.can_fit(&subs, &OutboxSubId(0), 5));
+ }
+
+ // ==================== CompactionSession tests ====================
+
+ #[test]
+ fn compaction_session_default() {
+ let session = CompactionSession::default();
+ assert_eq!(session.request_free, 0);
+ assert!(session.tasks.is_empty());
+ }
+
+ #[test]
+ fn compaction_session_unsub() {
+ let mut session = CompactionSession::default();
+ session.unsub(OutboxSubId(42));
+
+ assert!(session.tasks.contains_key(&OutboxSubId(42)));
+ match session.tasks.get(&OutboxSubId(42)) {
+ Some(RelayTask::Unsubscribe) => (),
+ _ => panic!("Expected Unsubscribe task"),
+ }
+ }
+
+ #[test]
+ fn compaction_session_sub() {
+ let mut session = CompactionSession::default();
+ session.sub(OutboxSubId(1));
+
+ assert!(session.tasks.contains_key(&OutboxSubId(1)));
+ assert!(matches!(
+ session.tasks.get(&OutboxSubId(1)),
+ Some(RelayTask::Subscribe)
+ ));
+ }
+
+ // ==================== take_smallest_sub_reqs tests ====================
+
+ #[test]
+ fn take_smallest_returns_none_for_empty() {
+ let subs = OutboxSubscriptions::default();
+ let mut data: HashMap<RelayReqId, RelaySubData> = HashMap::new();
+ assert!(take_smallest_sub_reqs(&subs, &mut data).is_none());
+ }
+
+ /// Returns the relay sub with the smallest combined JSON size.
+ #[test]
+ fn take_smallest_returns_smallest_by_json_size() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ // Register subscriptions with different JSON sizes
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ subs.new_subscription(
+ OutboxSubId(1),
+ SubscribeTask {
+ filters: vec![Filter::new()
+ .kinds(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+ .build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ let mut guardian = SubPassGuardian::new(2);
+
+ // Small relay sub contains id 0
+ let mut small_requests = SubRequests::default();
+ small_requests.add(OutboxSubId(0));
+
+ // Large relay sub contains id 1
+ let mut large_requests = SubRequests::default();
+ large_requests.add(OutboxSubId(1));
+
+ let mut data: HashMap<RelayReqId, RelaySubData> = HashMap::new();
+ data.insert(
+ RelayReqId::from("small"),
+ RelaySubData {
+ requests: small_requests,
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: guardian.take_pass().unwrap(),
+ },
+ );
+ data.insert(
+ RelayReqId::from("large"),
+ RelaySubData {
+ requests: large_requests,
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: guardian.take_pass().unwrap(),
+ },
+ );
+
+ let (id, _) = take_smallest_sub_reqs(&subs, &mut data).unwrap();
+ assert_eq!(id.0, "small");
+ assert_eq!(data.len(), 1);
+ }
+
+ #[test]
+ fn take_smallest_removes_from_map() {
+ let subs = OutboxSubscriptions::default();
+ let mut data: HashMap<RelayReqId, RelaySubData> = HashMap::new();
+ let mut guardian = SubPassGuardian::new(1);
+
+ data.insert(
+ RelayReqId::from("only"),
+ RelaySubData {
+ requests: SubRequests::default(),
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: guardian.take_pass().unwrap(),
+ },
+ );
+
+ let result = take_smallest_sub_reqs(&subs, &mut data);
+ assert!(result.is_some());
+ assert!(data.is_empty());
+ }
+
+ // ==================== CompactionRelay tests ====================
+
+ /// Requesting free subs when there's nothing to compact has no effect.
+ #[test]
+ fn compact_returns_none_when_no_subs() {
+ let subs = OutboxSubscriptions::default();
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(5);
+ let json_limit = 100000;
+
+ let initial_passes = guardian.available_passes();
+
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.request_free_subs(1);
+ relay.ingest_session(session);
+
+ assert_eq!(guardian.available_passes(), initial_passes);
+ }
+
+ /// Compacting frees a pass and redistributes requests to remaining subs.
+ #[test]
+ fn compact_frees_pass_and_redistributes() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ subs.new_subscription(
+ OutboxSubId(1),
+ SubscribeTask {
+ filters: vec![Filter::new()
+ .kinds(vec![2, 3, 4, 5, 6, 7, 8, 9, 10])
+ .build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(5);
+ let json_limit = 100000;
+
+ // Create 2 relay subs
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.sub(OutboxSubId(0));
+ session.sub(OutboxSubId(1));
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 2);
+ assert_eq!(guardian.available_passes(), 3); // 5 - 2
+
+ // Request 4 free passes - must compact 1
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.request_free_subs(4);
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 1);
+ assert_eq!(guardian.available_passes(), 4);
+
+ let remaining = data.relay_subs.values().next().unwrap();
+ assert_eq!(remaining.requests.requests.len(), 2);
+ }
+
+ /// When compaction redistributes a request but the remaining sub
+ /// doesn't have room, the request goes to the queue.
+ #[test]
+ fn place_queues_when_no_room() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ subs.new_subscription(
+ OutboxSubId(1),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![2]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ // Set limit so combined filters won't fit in one REQ
+ let size0 = subs.json_size(&OutboxSubId(0)).unwrap();
+ let size1 = subs.json_size(&OutboxSubId(1)).unwrap();
+ let json_limit = size0 + size1 - 1;
+
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(2);
+
+ // Create 2 relay subs at capacity
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.sub(OutboxSubId(0));
+ session.sub(OutboxSubId(1));
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 2);
+ assert!(data.queue.is_empty());
+
+ // Compact 1 - redistributed request won't fit
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.request_free_subs(1);
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 1);
+ assert!(!data.queue.is_empty());
+ }
+
+ /// When no passes are available, requests are placed on existing relay subs.
+ #[test]
+ fn new_sub_places_on_existing_when_no_passes() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ subs.new_subscription(
+ OutboxSubId(1),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![2]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(1); // Only 1 pass
+ let json_limit = 100000;
+
+ // Add 2 requests with only 1 pass - second must go on existing
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.sub(OutboxSubId(0));
+ session.sub(OutboxSubId(1));
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 1);
+ let sub = data.relay_subs.values().next().unwrap();
+ assert_eq!(sub.requests.requests.len(), 2);
+ }
+
+ /// Subscriptions placed onto an existing compacted REQ must register
+ /// request-to-relay mapping so a later unsubscribe updates the correct REQ.
+ #[test]
+ fn unsubscribe_after_place_on_existing_removes_request() {
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+
+ let mut subs = OutboxSubscriptions::default();
+ subs.new_subscription(
+ OutboxSubId(0),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ subs.new_subscription(
+ OutboxSubId(1),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![2]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(1); // Force second sub onto existing REQ
+ let json_limit = 100000;
+
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.sub(OutboxSubId(0));
+ session.sub(OutboxSubId(1));
+ relay.ingest_session(session);
+
+ assert_eq!(data.relay_subs.len(), 1);
+ let relay_id = data.relay_subs.keys().next().cloned().unwrap();
+ assert_eq!(data.request_to_sid.get(&OutboxSubId(0)), Some(&relay_id));
+ assert_eq!(data.request_to_sid.get(&OutboxSubId(1)), Some(&relay_id));
+
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ session.unsub(OutboxSubId(1));
+ relay.ingest_session(session);
+
+ assert!(data.queue.is_empty());
+ assert_eq!(data.relay_subs.len(), 1);
+ let sub = data.relay_subs.get(&relay_id).unwrap();
+ assert_eq!(sub.requests.requests.len(), 1);
+ assert!(sub.requests.requests.contains(&OutboxSubId(0)));
+ assert!(!sub.requests.requests.contains(&OutboxSubId(1)));
+ assert_eq!(data.request_to_sid.get(&OutboxSubId(0)), Some(&relay_id));
+ assert!(!data.request_to_sid.contains_key(&OutboxSubId(1)));
+ }
+
+ /// When requesting multiple free passes, multiple subs are compacted
+ /// and all requests are consolidated into fewer relay subs.
+ #[test]
+ fn compact_multiple_subs() {
+ let mut data = CompactionData::default();
+ let mut guardian = SubPassGuardian::new(3);
+ let json_limit = 100000;
+ let mut subs = OutboxSubscriptions::default();
+ for i in 0..3 {
+ subs.new_subscription(
+ OutboxSubId(i),
+ SubscribeTask {
+ filters: vec![Filter::new().kinds(vec![i as u64 + 1]).build()],
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ false,
+ );
+ }
+
+ // Create 3 subs and request 2 free in same session
+ let relay = CompactionRelay::new(None, &mut data, json_limit, &mut guardian, &subs);
+ let mut session = CompactionSession::default();
+ for i in 0..3 {
+ session.sub(OutboxSubId(i));
+ }
+ session.request_free_subs(2);
+ relay.ingest_session(session);
+
+ // Should compact down to 1 sub with all 3 requests
+ assert_eq!(data.relay_subs.len(), 1);
+ assert_eq!(guardian.available_passes(), 2);
+
+ let sub = data.relay_subs.values().next().unwrap();
+ assert_eq!(sub.requests.requests.len(), 3);
+ }
+}
diff --git a/crates/enostr/src/relay/coordinator.rs b/crates/enostr/src/relay/coordinator.rs
@@ -0,0 +1,570 @@
+use ewebsock::{WsEvent, WsMessage};
+use hashbrown::{HashMap, HashSet};
+
+use crate::{
+ relay::{
+ compaction::{CompactionData, CompactionRelay, CompactionSession},
+ transparent::{revocate_transparent_subs, TransparentData, TransparentRelay},
+ BroadcastCache, BroadcastRelay, NormRelayUrl, OutboxSubId, OutboxSubscriptions,
+ RawEventData, RelayCoordinatorLimits, RelayImplType, RelayLimitations, RelayReqId,
+ RelayReqStatus, RelayType, SubPassGuardian, SubPassRevocation, WebsocketRelay,
+ },
+ EventClientMessage, RelayMessage, RelayStatus, Wakeup, WebsocketConn,
+};
+
+/// RelayCoordinator routes each Outbox subscription to either the compaction or
+/// transparent relay engine and tracks their status.
+pub struct CoordinationData {
+ limits: RelayCoordinatorLimits,
+ pub(crate) websocket: Option<WebsocketRelay>,
+ coordination: HashMap<OutboxSubId, RelayType>,
+ compaction_data: CompactionData,
+ transparent_data: TransparentData, // for outbox subs that prefer to be transparent
+ broadcast_cache: BroadcastCache,
+ eose_queue: Vec<RelayReqId>,
+}
+
+impl CoordinationData {
+ pub fn new<W>(limits: RelayLimitations, norm_url: NormRelayUrl, wakeup: W) -> Self
+ where
+ W: Wakeup,
+ {
+ let websocket = match WebsocketConn::from_wakeup(norm_url.clone().into(), wakeup) {
+ Ok(w) => Some(WebsocketRelay::new(w)),
+ Err(e) => {
+ tracing::error!("could not open websocket to {norm_url:?}: {e}");
+ None
+ }
+ };
+ let limits = RelayCoordinatorLimits::new(limits);
+ let compaction_data = CompactionData::default();
+ Self {
+ limits,
+ websocket,
+ compaction_data,
+ transparent_data: TransparentData::default(),
+ coordination: Default::default(),
+ broadcast_cache: Default::default(),
+ eose_queue: Vec::new(),
+ }
+ }
+
+ /// Change if we found a new NIP-11 `max_subscriptions`
+ #[allow(dead_code)]
+ pub fn set_max_size(&mut self, subs: &OutboxSubscriptions, max_size: usize) {
+ let Some(revocations) = self.limits.new_total(max_size) else {
+ return;
+ };
+
+ let mut trans_left = self.transparent_data.num_subs();
+ let mut compact_left = self.compaction_data.num_subs();
+
+ let (trans_revocations, compacts_revocations): (
+ Vec<SubPassRevocation>,
+ Vec<SubPassRevocation>,
+ ) = revocations.into_iter().partition(|_| {
+ let take_trans = (trans_left > compact_left && trans_left > 0) || (compact_left == 0);
+
+ if take_trans {
+ trans_left -= 1;
+ } else {
+ compact_left -= 1;
+ }
+ take_trans
+ });
+
+ if !trans_revocations.is_empty() {
+ revocate_transparent_subs(
+ self.websocket.as_mut(),
+ &mut self.transparent_data,
+ trans_revocations,
+ );
+ }
+
+ if !compacts_revocations.is_empty() {
+ CompactionRelay::new(
+ self.websocket.as_mut(),
+ &mut self.compaction_data,
+ self.limits.max_json_bytes,
+ &mut self.limits.sub_guardian,
+ subs,
+ )
+ .revocate_all(compacts_revocations);
+ }
+ }
+
+ #[profiling::function]
+ pub fn ingest_session(
+ &mut self,
+ subs: &OutboxSubscriptions,
+ session: CoordinationSession,
+ ) -> EoseIds {
+ let mut trans_unsubs: HashSet<OutboxSubId> = HashSet::new();
+ let mut trans = HashSet::new();
+ let mut compaction_session = CompactionSession::default();
+ let mut eose_ids = EoseIds::default();
+
+ for (id, task) in session.tasks {
+ match task {
+ CoordinationTask::TransparentSub => {
+ if let Some(RelayType::Compaction) = self.coordination.get(&id) {
+ compaction_session.unsub(id);
+ }
+ self.coordination.insert(id, RelayType::Transparent);
+ trans.insert(id);
+ }
+ CoordinationTask::CompactionSub => {
+ if let Some(RelayType::Transparent) = self.coordination.get(&id) {
+ trans_unsubs.insert(id);
+ }
+ self.coordination.insert(id, RelayType::Compaction);
+ compaction_session.sub(id);
+ }
+ CoordinationTask::Unsubscribe => {
+ let Some(rtype) = self.coordination.remove(&id) else {
+ continue;
+ };
+
+ match rtype {
+ RelayType::Compaction => {
+ compaction_session.unsub(id);
+ }
+ RelayType::Transparent => {
+ trans_unsubs.insert(id);
+ }
+ }
+ }
+ }
+ }
+
+ // Drain EOSE queue and collect IDs
+ for sid in self.eose_queue.drain(..) {
+ // Try compaction first
+ let Some(compaction_ids) = self.compaction_data.ids(&sid) else {
+ let Some(transparent_id) = self.transparent_data.id(&sid) else {
+ continue;
+ };
+
+ if subs.is_oneshot(&transparent_id) {
+ trans_unsubs.insert(transparent_id);
+ eose_ids.oneshots.insert(transparent_id);
+ } else {
+ eose_ids.normal.insert(transparent_id);
+ }
+ continue;
+ };
+
+ let oneshots = subs.subset_oneshot(compaction_ids);
+
+ for id in compaction_ids {
+ if oneshots.contains(id) {
+ compaction_session.unsub(*id);
+ eose_ids.oneshots.insert(*id);
+ } else {
+ eose_ids.normal.insert(*id);
+ }
+ }
+ }
+
+ if !trans_unsubs.is_empty() {
+ let mut transparent = TransparentRelay::new(
+ self.websocket.as_mut(),
+ &mut self.transparent_data,
+ &mut self.limits.sub_guardian,
+ );
+ for unsub in trans_unsubs {
+ transparent.unsubscribe(unsub);
+ }
+ }
+
+ if !trans.is_empty() {
+ compaction_session.request_free_subs(trans.len());
+ }
+
+ if !compaction_session.is_empty() {
+ CompactionRelay::new(
+ self.websocket.as_mut(),
+ &mut self.compaction_data,
+ self.limits.max_json_bytes,
+ &mut self.limits.sub_guardian,
+ subs,
+ )
+ .ingest_session(compaction_session);
+ }
+
+ let mut transparent = TransparentRelay::new(
+ self.websocket.as_mut(),
+ &mut self.transparent_data,
+ &mut self.limits.sub_guardian,
+ );
+ for id in trans {
+ let Some(view) = subs.view(&id) else {
+ continue;
+ };
+ transparent.subscribe(view);
+ }
+
+ transparent.try_flush_queue(subs);
+ tracing::trace!(
+ "Using {} of {} subs",
+ self.limits.sub_guardian.total_passes() - self.limits.sub_guardian.available_passes(),
+ self.limits.sub_guardian.total_passes()
+ );
+
+ eose_ids
+ }
+
+ pub fn send_event(&mut self, msg: EventClientMessage) {
+ BroadcastRelay::websocket(self.websocket.as_mut(), &mut self.broadcast_cache)
+ .broadcast(msg);
+ }
+
+ #[allow(dead_code)]
+ pub fn set_req_status(&mut self, sid: &str, status: RelayReqStatus) {
+ // the compaction & transparent data only act on sids that they already know, so whichever
+ // this sid belongs to, it'll make it to its rightful home
+ self.compaction_data.set_req_status(sid, status);
+ self.transparent_data.set_req_status(sid, status);
+ }
+
+ pub fn req_status(&self, id: &OutboxSubId) -> Option<RelayReqStatus> {
+ match self.coordination.get(id)? {
+ RelayType::Compaction => self.compaction_data.req_status(id),
+ RelayType::Transparent => self.transparent_data.req_status(id),
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn has_req_status(&self, id: &OutboxSubId, status: RelayReqStatus) -> bool {
+ self.req_status(id) == Some(status)
+ }
+
+ fn url(&self) -> &str {
+ let Some(websocket) = &self.websocket else {
+ return "";
+ };
+ websocket.conn.url.as_str()
+ }
+
+ // whether we received
+ #[profiling::function]
+ pub(crate) fn try_recv<F>(&mut self, subs: &OutboxSubscriptions, act: &mut F) -> RecvResponse
+ where
+ for<'a> F: FnMut(RawEventData<'a>),
+ {
+ let Some(websocket) = self.websocket.as_mut() else {
+ return RecvResponse::default();
+ };
+
+ let event = {
+ profiling::scope!("webscket try_recv");
+
+ let Some(event) = websocket.conn.receiver.try_recv() else {
+ return RecvResponse::default();
+ };
+ event
+ };
+
+ let msg = match &event {
+ WsEvent::Opened => {
+ websocket.conn.set_status(RelayStatus::Connected);
+ websocket.reconnect_attempt = 0;
+ websocket.retry_connect_after = WebsocketRelay::initial_reconnect_duration();
+ handle_relay_open(
+ websocket,
+ &mut self.broadcast_cache,
+ &mut self.compaction_data,
+ &mut self.transparent_data,
+ self.limits.max_json_bytes,
+ &mut self.limits.sub_guardian,
+ subs,
+ );
+ None
+ }
+ WsEvent::Closed => {
+ websocket.conn.set_status(RelayStatus::Disconnected);
+ None
+ }
+ WsEvent::Error(err) => {
+ tracing::error!("relay {} error: {:?}", websocket.conn.url, err);
+ websocket.conn.set_status(RelayStatus::Disconnected);
+ None
+ }
+ WsEvent::Message(ws_message) => match ws_message {
+ #[cfg(not(target_arch = "wasm32"))]
+ WsMessage::Ping(bs) => {
+ websocket.conn.sender.send(WsMessage::Pong(bs.clone()));
+ None
+ }
+ WsMessage::Text(text) => {
+ tracing::trace!("relay {} received text: {}", websocket.conn.url, text);
+ match RelayMessage::from_json(text) {
+ Ok(msg) => Some(msg),
+ Err(err) => {
+ tracing::error!(
+ "relay {} message decode error: {:?}",
+ websocket.conn.url,
+ err
+ );
+ None
+ }
+ }
+ }
+ _ => None,
+ },
+ };
+
+ let mut resp = RecvResponse::received();
+ let Some(msg) = msg else {
+ return resp;
+ };
+
+ match msg {
+ RelayMessage::OK(cr) => tracing::info!("OK {:?}", cr),
+ RelayMessage::Eose(sid) => {
+ tracing::debug!("Relay {} received EOSE for subscription: {sid}", self.url());
+ self.compaction_data
+ .set_req_status(sid, RelayReqStatus::Eose);
+ self.transparent_data
+ .set_req_status(sid, RelayReqStatus::Eose);
+ self.eose_queue.push(RelayReqId(sid.to_string()));
+ }
+ RelayMessage::Event(_, ev) => {
+ profiling::scope!("ingest event");
+ resp.event_was_nostr_note = true;
+ act(RawEventData {
+ url: websocket.conn.url.as_str(),
+ event_json: ev,
+ relay_type: RelayImplType::Websocket,
+ });
+ }
+ RelayMessage::Notice(msg) => {
+ tracing::warn!("Notice from {}: {}", self.url(), msg)
+ }
+ RelayMessage::Closed(sid, _) => {
+ tracing::trace!("Relay {} received CLOSED: {sid}", self.url());
+ self.compaction_data
+ .set_req_status(sid, RelayReqStatus::Closed);
+ self.transparent_data
+ .set_req_status(sid, RelayReqStatus::Closed);
+ }
+ }
+
+ resp
+ }
+}
+
+#[derive(Default)]
+pub struct RecvResponse {
+ pub received_event: bool,
+ pub event_was_nostr_note: bool,
+}
+
+impl RecvResponse {
+ pub fn received() -> Self {
+ RecvResponse {
+ received_event: true,
+ event_was_nostr_note: false,
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct EoseIds {
+ pub oneshots: HashSet<OutboxSubId>,
+ pub normal: HashSet<OutboxSubId>,
+}
+
+impl EoseIds {
+ /// Merges IDs from `other` into `self`, preserving set uniqueness.
+ pub fn absorb(&mut self, other: EoseIds) {
+ self.oneshots.extend(other.oneshots);
+ self.normal.extend(other.normal);
+ }
+}
+
+fn handle_relay_open(
+ websocket: &mut WebsocketRelay,
+ broadcast_cache: &mut BroadcastCache,
+ compaction: &mut CompactionData,
+ transparent: &mut TransparentData,
+ max_json: usize,
+ guardian: &mut SubPassGuardian,
+ subs: &OutboxSubscriptions,
+) {
+ BroadcastRelay::websocket(Some(websocket), broadcast_cache).try_flush_queue();
+ let mut transparent = TransparentRelay::new(Some(websocket), transparent, guardian);
+ transparent.handle_relay_open(subs);
+ let mut compaction =
+ CompactionRelay::new(Some(websocket), compaction, max_json, guardian, subs);
+ compaction.handle_relay_open();
+}
+
+#[derive(Default)]
+pub struct CoordinationSession {
+ pub tasks: HashMap<OutboxSubId, CoordinationTask>,
+}
+
+pub enum CoordinationTask {
+ TransparentSub,
+ CompactionSub,
+ Unsubscribe,
+}
+
+impl CoordinationSession {
+ pub fn subscribe(&mut self, id: OutboxSubId, use_transparent: bool) {
+ self.tasks.insert(
+ id,
+ if use_transparent {
+ CoordinationTask::TransparentSub
+ } else {
+ CoordinationTask::CompactionSub
+ },
+ );
+ }
+
+ pub fn unsubscribe(&mut self, id: OutboxSubId) {
+ self.tasks.insert(id, CoordinationTask::Unsubscribe);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ /// Returns the task held for `id`, panicking when no matching task exists.
+ #[track_caller]
+ fn expect_task<'a>(session: &'a CoordinationSession, id: OutboxSubId) -> &'a CoordinationTask {
+ session
+ .tasks
+ .get(&id)
+ .unwrap_or_else(|| panic!("Expected task for {:?}", id))
+ }
+
+ // ==================== CoordinationSession tests ====================
+
+ /// Newly created coordination sessions hold no tasks.
+ #[test]
+ fn coordination_session_default_empty() {
+ let session = CoordinationSession::default();
+ assert!(session.tasks.is_empty());
+ }
+
+ /// Transparent subscriptions should be recorded as TransparentSub tasks.
+ #[test]
+ fn coordination_session_subscribe_transparent() {
+ let mut session = CoordinationSession::default();
+
+ session.subscribe(OutboxSubId(0), true); // use_transparent = true
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::TransparentSub
+ ));
+ }
+
+ /// Compaction mode subscriptions should be recorded as CompactionSub tasks.
+ #[test]
+ fn coordination_session_subscribe_compaction() {
+ let mut session = CoordinationSession::default();
+
+ session.subscribe(OutboxSubId(0), false); // use_transparent = false means compaction
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::CompactionSub
+ ));
+ }
+
+ /// Unsubscribe should record an Unsubscribe task.
+ #[test]
+ fn coordination_session_unsubscribe() {
+ let mut session = CoordinationSession::default();
+
+ session.unsubscribe(OutboxSubId(42));
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(42)),
+ CoordinationTask::Unsubscribe
+ ));
+ }
+
+ /// Subsequent subscribe calls should overwrite previous modes.
+ #[test]
+ fn coordination_session_subscribe_overwrites_previous() {
+ let mut session = CoordinationSession::default();
+
+ // First subscribe as transparent
+ session.subscribe(OutboxSubId(0), true);
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::TransparentSub
+ ));
+
+ // Then as compaction
+ session.subscribe(OutboxSubId(0), false);
+
+ // Should be compaction now
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::CompactionSub
+ ));
+ }
+
+ /// Unsubscribe should override any prior subscribe entries.
+ #[test]
+ fn coordination_session_unsubscribe_overwrites_subscribe() {
+ let mut session = CoordinationSession::default();
+
+ session.subscribe(OutboxSubId(0), true);
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::TransparentSub
+ ));
+ session.unsubscribe(OutboxSubId(0));
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ CoordinationTask::Unsubscribe
+ ));
+ }
+
+ /// Multiple tasks can be recorded in a single session.
+ #[test]
+ fn coordination_session_multiple_tasks() {
+ let mut session = CoordinationSession::default();
+
+ session.subscribe(OutboxSubId(0), true);
+ session.subscribe(OutboxSubId(1), false);
+ session.unsubscribe(OutboxSubId(2));
+
+ assert_eq!(session.tasks.len(), 3);
+ }
+
+ // ==================== EoseIds tests ====================
+
+ #[test]
+ fn eose_ids_default_empty() {
+ let eose_ids = EoseIds::default();
+ assert!(eose_ids.oneshots.is_empty());
+ assert!(eose_ids.normal.is_empty());
+ }
+
+ /// absorb merges oneshot and normal ID sets into the target accumulator.
+ #[test]
+ fn eose_ids_absorb_merges_both_sets() {
+ let mut acc = EoseIds::default();
+ let mut incoming = EoseIds::default();
+
+ acc.oneshots.insert(OutboxSubId(1));
+ incoming.oneshots.insert(OutboxSubId(2));
+ incoming.normal.insert(OutboxSubId(3));
+
+ acc.absorb(incoming);
+
+ assert!(acc.oneshots.contains(&OutboxSubId(1)));
+ assert!(acc.oneshots.contains(&OutboxSubId(2)));
+ assert!(acc.normal.contains(&OutboxSubId(3)));
+ }
+}
diff --git a/crates/enostr/src/relay/identity.rs b/crates/enostr/src/relay/identity.rs
@@ -0,0 +1,253 @@
+use std::{
+ borrow::Borrow,
+ fmt::{self, Display},
+};
+
+use hashbrown::HashSet;
+use nostr::types::RelayUrl;
+use url::Url;
+use uuid::Uuid;
+
+use crate::Error;
+
+#[derive(Eq, PartialEq, Hash, Clone, Debug)]
+pub enum RelayId {
+ Websocket(NormRelayUrl),
+ Multicast,
+}
+
+#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug, PartialOrd, Ord)]
+pub struct OutboxSubId(pub u64);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum RelayReqStatus {
+ InitialQuery,
+ Eose,
+ Closed,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub struct RelayReqId(pub String);
+
+impl RelayReqId {
+ pub fn byte_len() -> usize {
+ uuid::fmt::Hyphenated::LENGTH
+ }
+}
+
+impl Default for RelayReqId {
+ fn default() -> Self {
+ Self(Uuid::new_v4().to_string())
+ }
+}
+
+impl From<String> for RelayReqId {
+ fn from(s: String) -> Self {
+ Self(s)
+ }
+}
+
+impl From<RelayReqId> for String {
+ fn from(value: RelayReqId) -> Self {
+ value.0
+ }
+}
+
+impl From<&str> for RelayReqId {
+ fn from(s: &str) -> Self {
+ Self(s.to_owned())
+ }
+}
+
+impl From<Uuid> for RelayReqId {
+ fn from(value: Uuid) -> Self {
+ RelayReqId(value.to_string())
+ }
+}
+
+impl std::fmt::Display for RelayReqId {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl Borrow<str> for RelayReqId {
+ fn borrow(&self) -> &str {
+ &self.0
+ }
+}
+
+#[derive(Eq, PartialEq, Hash, Clone, Debug, PartialOrd, Ord)]
+pub struct NormRelayUrl {
+ url: RelayUrl,
+}
+
+impl NormRelayUrl {
+ pub fn new(url: &str) -> Result<Self, Error> {
+ Ok(Self {
+ url: nostr::RelayUrl::parse(canonicalize_url(url.to_owned()))
+ .map_err(|_| Error::InvalidRelayUrl)?,
+ })
+ }
+}
+
+impl Display for NormRelayUrl {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.url)
+ }
+}
+
+impl From<NormRelayUrl> for RelayUrl {
+ fn from(value: NormRelayUrl) -> Self {
+ value.url
+ }
+}
+
+impl From<RelayUrl> for NormRelayUrl {
+ fn from(url: RelayUrl) -> Self {
+ Self { url }
+ }
+}
+
+#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)]
+pub enum RelayType {
+ Compaction,
+ Transparent,
+}
+
+#[derive(Default, Clone, Debug)]
+pub struct RelayUrlPkgs {
+ pub urls: HashSet<NormRelayUrl>,
+ pub use_transparent: bool,
+}
+
+impl RelayUrlPkgs {
+ pub fn iter(&self) -> impl Iterator<Item = &NormRelayUrl> {
+ self.urls.iter()
+ }
+
+ pub fn new(urls: HashSet<NormRelayUrl>) -> Self {
+ Self {
+ urls,
+ use_transparent: false,
+ }
+ }
+}
+
+// standardize the format (ie, trailing slashes)
+fn canonicalize_url(url: String) -> String {
+ match Url::parse(&url) {
+ Ok(parsed_url) => parsed_url.to_string(),
+ Err(_) => url, // If parsing fails, return the original URL.
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // ==================== NormRelayUrl tests ====================
+
+ #[test]
+ fn norm_relay_url_creates_valid_url() {
+ let url = NormRelayUrl::new("wss://relay.example.com");
+ assert!(url.is_ok());
+ }
+
+ #[test]
+ fn norm_relay_url_handles_trailing_slash() {
+ let url1 = NormRelayUrl::new("wss://relay.example.com/").unwrap();
+ let url2 = NormRelayUrl::new("wss://relay.example.com").unwrap();
+ // Both should canonicalize to the same thing
+ assert_eq!(url1.to_string(), url2.to_string());
+ }
+
+ #[test]
+ fn norm_relay_url_rejects_invalid() {
+ assert!(NormRelayUrl::new("not-a-url").is_err());
+ }
+
+ #[test]
+ fn norm_relay_url_rejects_http() {
+ // nostr relay URLs must be ws:// or wss://
+ assert!(NormRelayUrl::new("http://relay.example.com").is_err());
+ }
+
+ #[test]
+ fn norm_relay_url_equality() {
+ let url1 = NormRelayUrl::new("wss://relay.example.com").unwrap();
+ let url2 = NormRelayUrl::new("wss://relay.example.com").unwrap();
+ assert_eq!(url1, url2);
+ }
+
+ #[test]
+ fn norm_relay_url_hash_consistency() {
+ use std::collections::HashSet;
+
+ let url1 = NormRelayUrl::new("wss://relay.example.com").unwrap();
+ let url2 = NormRelayUrl::new("wss://relay.example.com").unwrap();
+
+ let mut set = HashSet::new();
+ set.insert(url1);
+ assert!(set.contains(&url2));
+ }
+
+ // ==================== RelayUrlPkgs tests ====================
+
+ #[test]
+ fn relay_url_pkgs_default_not_transparent() {
+ let pkgs = RelayUrlPkgs::default();
+ assert!(!pkgs.use_transparent);
+ assert!(pkgs.urls.is_empty());
+ }
+
+ #[test]
+ fn relay_url_pkgs_new_sets_urls() {
+ let mut urls = HashSet::new();
+ urls.insert(NormRelayUrl::new("wss://relay1.example.com").unwrap());
+ urls.insert(NormRelayUrl::new("wss://relay2.example.com").unwrap());
+
+ let pkgs = RelayUrlPkgs::new(urls);
+ assert_eq!(pkgs.urls.len(), 2);
+ assert!(!pkgs.use_transparent);
+ }
+
+ #[test]
+ fn relay_url_pkgs_iter() {
+ let mut urls = HashSet::new();
+ urls.insert(NormRelayUrl::new("wss://relay1.example.com").unwrap());
+
+ let pkgs = RelayUrlPkgs::new(urls);
+ assert_eq!(pkgs.iter().count(), 1);
+ }
+
+ // ==================== RelayREQId tests ====================
+
+ #[test]
+ fn relay_req_id_default_generates_uuid() {
+ let id1 = RelayReqId::default();
+ let id2 = RelayReqId::default();
+ // Each default should generate a unique UUID
+ assert_ne!(id1, id2);
+ }
+
+ // ==================== SubRequestId tests ====================
+
+ #[test]
+ fn sub_request_id_equality() {
+ let id1 = OutboxSubId(42);
+ let id2 = OutboxSubId(42);
+ let id3 = OutboxSubId(43);
+
+ assert_eq!(id1, id2);
+ assert_ne!(id1, id3);
+ }
+
+ #[test]
+ fn sub_request_id_ordering() {
+ let id1 = OutboxSubId(1);
+ let id2 = OutboxSubId(2);
+
+ assert!(id1 < id2);
+ }
+}
diff --git a/crates/enostr/src/relay/limits.rs b/crates/enostr/src/relay/limits.rs
@@ -0,0 +1,296 @@
+/// Limitations imposed by the relay
+pub struct RelayLimitations {
+ // corresponds to NIP-11 `max_subscriptions`
+ pub maximum_subs: usize,
+
+ // corresponds to NIP-11 `max_message_length`
+ pub max_json_bytes: usize,
+}
+
+impl Default for RelayLimitations {
+ fn default() -> Self {
+ Self {
+ maximum_subs: 10,
+ max_json_bytes: 400_000,
+ }
+ }
+}
+
+pub struct RelayCoordinatorLimits {
+ pub sub_guardian: SubPassGuardian,
+ pub max_json_bytes: usize,
+}
+
+impl RelayCoordinatorLimits {
+ pub fn new(limits: RelayLimitations) -> Self {
+ Self {
+ max_json_bytes: limits.max_json_bytes,
+ sub_guardian: SubPassGuardian::new(limits.maximum_subs),
+ }
+ }
+
+ pub fn new_total(&mut self, new_max: usize) -> Option<Vec<SubPassRevocation>> {
+ let old = self.sub_guardian.total_passes;
+
+ if new_max == old {
+ return None;
+ }
+
+ if new_max > old {
+ let add = new_max - old;
+ self.sub_guardian.spawn_passes(add);
+ self.sub_guardian.total_passes = new_max;
+ return None;
+ }
+
+ // new_max < old
+ let remove = old - new_max;
+ self.sub_guardian.total_passes = new_max;
+
+ let mut pending = Vec::new();
+
+ for _ in 0..remove {
+ let mut revocation = SubPassRevocation::new();
+ if let Some(pass) = self.sub_guardian.available_passes.pop() {
+ // can revoke immediately -> do NOT return a revocation object for it
+ revocation.revocate(pass);
+ } else {
+ // can't revoke now -> return a revocation object to be fulfilled later
+ pending.push(revocation);
+ }
+ }
+
+ if pending.is_empty() {
+ None
+ } else {
+ Some(pending)
+ }
+ }
+}
+
+pub struct SubPassGuardian {
+ total_passes: usize,
+ available_passes: Vec<SubPass>,
+}
+
+impl SubPassGuardian {
+ pub(crate) fn new(max_subs: usize) -> Self {
+ Self {
+ available_passes: (0..max_subs)
+ .map(|_| SubPass { _private: () })
+ .collect::<Vec<_>>(),
+ total_passes: max_subs,
+ }
+ }
+
+ pub fn take_pass(&mut self) -> Option<SubPass> {
+ self.available_passes.pop()
+ }
+
+ pub fn available_passes(&self) -> usize {
+ self.available_passes.len()
+ }
+
+ pub fn total_passes(&self) -> usize {
+ self.total_passes
+ }
+
+ pub fn return_pass(&mut self, pass: SubPass) {
+ self.available_passes.push(pass);
+ tracing::debug!(
+ "Returned pass. Using {} of {} passes",
+ self.total_passes - self.available_passes(),
+ self.total_passes
+ );
+ }
+
+ pub(crate) fn spawn_passes(&mut self, new_passes: usize) {
+ for _ in 0..new_passes {
+ self.available_passes.push(SubPass { _private: () });
+ }
+ }
+}
+
+/// Annihilates an existing `SubPass`. These should only be generated from the `RelayCoordinatorLimits`
+/// when there is a new total subs which is less than the existing amount
+pub struct SubPassRevocation {
+ revoked: bool,
+}
+
+impl SubPassRevocation {
+ pub fn revocate(&mut self, _: SubPass) {
+ self.revoked = true;
+ }
+
+ pub(crate) fn new() -> Self {
+ Self { revoked: false }
+ }
+}
+
+/// It completely breaks subscription management if we don't have strict accounting, so we crash if we fail to revocate
+impl Drop for SubPassRevocation {
+ fn drop(&mut self) {
+ if !self.revoked {
+ panic!("The subscription pass revocator did not revoke the SubPass");
+ }
+ }
+}
+
+pub struct SubPass {
+ _private: (),
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ // ==================== SubPassGuardian tests ====================
+
+ #[test]
+ fn guardian_starts_with_correct_passes() {
+ let guardian = SubPassGuardian::new(10);
+ assert_eq!(guardian.available_passes(), 10);
+ }
+
+ #[test]
+ fn guardian_take_pass_decrements() {
+ let mut guardian = SubPassGuardian::new(5);
+ let pass = guardian.take_pass();
+ assert!(pass.is_some());
+ assert_eq!(guardian.available_passes(), 4);
+ }
+
+ #[test]
+ fn guardian_take_pass_returns_none_when_empty() {
+ let mut guardian = SubPassGuardian::new(1);
+ let _pass = guardian.take_pass();
+ assert!(guardian.take_pass().is_none());
+ assert_eq!(guardian.available_passes(), 0);
+ }
+
+ #[test]
+ fn guardian_return_pass_increments() {
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+ assert_eq!(guardian.available_passes(), 0);
+ guardian.return_pass(pass);
+ assert_eq!(guardian.available_passes(), 1);
+ }
+
+ #[test]
+ fn guardian_spawn_passes_adds_new_passes() {
+ let mut guardian = SubPassGuardian::new(2);
+ assert_eq!(guardian.available_passes(), 2);
+ guardian.spawn_passes(3);
+ assert_eq!(guardian.available_passes(), 5);
+ }
+
+ #[test]
+ fn guardian_multiple_take_and_return() {
+ let mut guardian = SubPassGuardian::new(3);
+
+ let pass1 = guardian.take_pass().unwrap();
+ let pass2 = guardian.take_pass().unwrap();
+ assert_eq!(guardian.available_passes(), 1);
+
+ guardian.return_pass(pass1);
+ assert_eq!(guardian.available_passes(), 2);
+
+ let _pass3 = guardian.take_pass().unwrap();
+ assert_eq!(guardian.available_passes(), 1);
+
+ guardian.return_pass(pass2);
+ assert_eq!(guardian.available_passes(), 2);
+ }
+
+ // ==================== SubPassRevocation tests ====================
+
+ #[test]
+ #[should_panic(expected = "did not revoke")]
+ fn revocation_panics_if_not_revoked() {
+ let _revocation = SubPassRevocation::new();
+ // drop triggers panic
+ }
+
+ #[test]
+ fn revocation_does_not_panic_when_revoked() {
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+ let mut revocation = SubPassRevocation::new();
+ revocation.revocate(pass);
+ // drop should not panic since revoked is true
+ }
+
+ #[test]
+ fn revocation_marks_as_revoked_after_revocate() {
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+ let mut revocation = SubPassRevocation::new();
+
+ assert!(!revocation.revoked);
+ revocation.revocate(pass);
+ assert!(revocation.revoked);
+ }
+
+ // ==================== RelayCoordinatorLimits tests ====================
+
+ #[test]
+ fn new_total_returns_none_when_same() {
+ let mut limits = RelayCoordinatorLimits::new(RelayLimitations {
+ maximum_subs: 5,
+ max_json_bytes: 400_000,
+ });
+
+ let revocations = limits.new_total(5);
+ assert!(revocations.is_none());
+ assert_eq!(limits.sub_guardian.available_passes(), 5);
+ }
+
+ #[test]
+ fn new_total_spawns_passes_when_increasing() {
+ let mut limits = RelayCoordinatorLimits::new(RelayLimitations {
+ maximum_subs: 5,
+ max_json_bytes: 400_000,
+ });
+
+ let revocations = limits.new_total(10);
+ assert!(revocations.is_none());
+ assert_eq!(limits.sub_guardian.available_passes(), 10);
+ }
+
+ #[test]
+ fn new_total_returns_revocations_when_decreasing() {
+ let mut limits = RelayCoordinatorLimits::new(RelayLimitations {
+ maximum_subs: 10,
+ max_json_bytes: 400_000,
+ });
+
+ let revocations = limits.new_total(5);
+ assert!(revocations.is_none());
+ }
+
+ #[test]
+ fn new_total_partial_revocations_when_passes_in_use() {
+ let mut limits = RelayCoordinatorLimits::new(RelayLimitations {
+ maximum_subs: 5,
+ max_json_bytes: 400_000,
+ });
+
+ // Take 3 passes (simulate them being in use)
+ let pass = limits.sub_guardian.take_pass().unwrap();
+ limits.sub_guardian.take_pass();
+ limits.sub_guardian.take_pass();
+ assert_eq!(limits.sub_guardian.available_passes(), 2);
+
+ // Now reduce to 2 total (need to remove 3)
+ let revocations = limits.new_total(2);
+
+ assert!(revocations.is_some());
+
+ let mut revs = revocations.unwrap();
+ // since there were two available passes, the guardian used those, but there is still one pass unaccounted for
+ assert_eq!(revs.len(), 1);
+
+ revs.pop().unwrap().revocate(pass);
+ }
+}
diff --git a/crates/enostr/src/relay/message.rs b/crates/enostr/src/relay/message.rs
@@ -18,6 +18,7 @@ pub enum RelayMessage<'a> {
Eose(&'a str),
Event(&'a str, &'a str),
Notice(&'a str),
+ Closed(&'a str, &'a str),
}
#[derive(Debug)]
@@ -79,6 +80,11 @@ impl<'a> RelayMessage<'a> {
RelayMessage::Event(sub_id, ev)
}
+ /// Construct a relay `CLOSED` message with its subscription id and reason.
+ pub fn closed(sub_id: &'a str, message: &'a str) -> Self {
+ RelayMessage::Closed(sub_id, message)
+ }
+
pub fn from_json(msg: &'a str) -> Result<RelayMessage<'a>> {
if msg.is_empty() {
return Err(Error::Empty);
@@ -141,6 +147,18 @@ impl<'a> RelayMessage<'a> {
));
}
+ // CLOSED (NIP-01)
+ // Relay response format: ["CLOSED", <subscription_id>, <message>]
+ if msg.starts_with("[\"CLOSED\"") {
+ let parts: Vec<&'a str> =
+ serde_json::from_str(msg).map_err(|err| Error::DecodeFailed(err.to_string()))?;
+ if parts.len() != 3 || parts[0] != "CLOSED" {
+ return Err(Error::DecodeFailed("Invalid CLOSED format".into()));
+ }
+
+ return Ok(Self::closed(parts[1], parts[2]));
+ }
+
// OK (NIP-20)
// Relay response format: ["OK",<event_id>, <true|false>, <message>]
if &msg[0..=5] == "[\"OK\"," && msg.len() >= 78 {
@@ -206,6 +224,13 @@ mod tests {
Ok(RelayMessage::eose("random-subscription-id")),
),
(
+ r#"["CLOSED","sub1","error: shutting down idle subscription"]"#,
+ Ok(RelayMessage::closed(
+ "sub1",
+ "error: shutting down idle subscription",
+ )),
+ ),
+ (
r#"["OK","b1a649ebe8b435ec71d3784793f3bbf4b93e64e17568a741aecd4c7ddeafce30",true,"pow: difficulty 25>=24"]"#,
Ok(RelayMessage::ok(
"b1a649ebe8b435ec71d3784793f3bbf4b93e64e17568a741aecd4c7ddeafce30",
@@ -246,6 +271,10 @@ mod tests {
r#"["OK","b1a649ebe8b435ec71d3784793f3bbf4b93e64e17568a741aecd4c7ddeafce30",hello,404]"#,
Err(Error::DecodeFailed("bad boolean value".into())),
),
+ (
+ r#"["CLOSED","sub1"]"#,
+ Err(Error::DecodeFailed("Invalid CLOSED format".into())),
+ ),
];
for (input, expected) in tests {
diff --git a/crates/enostr/src/relay/mod.rs b/crates/enostr/src/relay/mod.rs
@@ -1,225 +1,239 @@
-use ewebsock::{Options, WsEvent, WsMessage, WsReceiver, WsSender};
-use mio::net::UdpSocket;
-use std::io;
-use std::net::IpAddr;
-use std::net::{SocketAddr, SocketAddrV4};
-use std::time::{Duration, Instant};
-
-use crate::{ClientMessage, EventClientMessage, Result};
-use std::fmt;
-use std::hash::{Hash, Hasher};
-use std::net::Ipv4Addr;
-use tracing::{debug, error};
-
+mod broadcast;
+mod compaction;
+mod coordinator;
+mod identity;
+mod limits;
pub mod message;
+mod multicast;
+mod outbox;
pub mod pool;
+mod queue;
pub mod subs_debug;
-
-#[derive(Debug, Copy, Clone)]
+mod subscription;
+mod transparent;
+mod websocket;
+
+pub use broadcast::{BroadcastCache, BroadcastRelay};
+pub use identity::{
+ NormRelayUrl, OutboxSubId, RelayId, RelayReqId, RelayReqStatus, RelayType, RelayUrlPkgs,
+};
+pub use limits::{
+ RelayCoordinatorLimits, RelayLimitations, SubPass, SubPassGuardian, SubPassRevocation,
+};
+pub use multicast::{MulticastRelay, MulticastRelayCache};
+use nostrdb::Filter;
+pub use outbox::{OutboxPool, OutboxSession, OutboxSessionHandler};
+pub use queue::QueuedTasks;
+pub use subscription::{
+ FullModificationTask, ModifyFiltersTask, ModifyRelaysTask, ModifyTask, OutboxSubscriptions,
+ OutboxTask, SubscribeTask,
+};
+pub use websocket::{WebsocketConn, WebsocketRelay};
+
+#[cfg(test)]
+pub mod test_utils;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum RelayStatus {
Connected,
Connecting,
Disconnected,
}
-pub struct MulticastRelay {
- last_join: Instant,
- status: RelayStatus,
- address: SocketAddrV4,
- socket: UdpSocket,
- interface: Ipv4Addr,
+enum UnownedRelay<'a> {
+ Websocket(&'a mut WebsocketRelay),
+ Multicast(&'a mut MulticastRelay),
}
-impl MulticastRelay {
- pub fn new(address: SocketAddrV4, socket: UdpSocket, interface: Ipv4Addr) -> Self {
- let last_join = Instant::now();
- let status = RelayStatus::Connected;
- MulticastRelay {
- status,
- address,
- socket,
- interface,
- last_join,
- }
- }
+/// RawEventData is the event raw data from a relay
+pub struct RawEventData<'a> {
+ pub url: &'a str,
+ pub event_json: &'a str,
+ pub relay_type: RelayImplType,
+}
- /// Multicast seems to fail every 260 seconds. We force a rejoin every 200 seconds or
- /// so to ensure we are always in the group
- pub fn rejoin(&mut self) -> Result<()> {
- self.last_join = Instant::now();
- self.status = RelayStatus::Disconnected;
- self.socket
- .leave_multicast_v4(self.address.ip(), &self.interface)?;
- self.socket
- .join_multicast_v4(self.address.ip(), &self.interface)?;
- self.status = RelayStatus::Connected;
- Ok(())
- }
+/// RelayImplType identifies whether an event came from a websocket or multicast relay.
+pub enum RelayImplType {
+ Websocket,
+ Multicast,
+}
- pub fn should_rejoin(&self) -> bool {
- (Instant::now() - self.last_join) >= Duration::from_secs(200)
- }
+pub enum RelayTask {
+ Unsubscribe,
+ Subscribe,
+}
- pub fn try_recv(&self) -> Option<WsEvent> {
- let mut buffer = [0u8; 65535];
- // Read the size header
- match self.socket.recv_from(&mut buffer) {
- Ok((size, src)) => {
- let parsed_size = u32::from_be_bytes(buffer[0..4].try_into().ok()?) as usize;
- debug!("multicast: read size {} from start of header", size - 4);
-
- if size != parsed_size + 4 {
- error!(
- "multicast: partial data received: expected {}, got {}",
- parsed_size, size
- );
- return None;
- }
-
- let text = String::from_utf8_lossy(&buffer[4..size]);
- debug!("multicast: received {} bytes from {}: {}", size, src, &text);
- Some(WsEvent::Message(WsMessage::Text(text.to_string())))
- }
- Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
- // No data available, continue
- None
- }
- Err(e) => {
- error!("multicast: error receiving data: {}", e);
- None
- }
- }
+pub struct FilterMetadata {
+ filter_json_size: usize,
+ last_seen: Option<u64>,
+}
+
+pub struct MetadataFilters {
+ filters: Vec<Filter>,
+ meta: Vec<FilterMetadata>,
+}
+
+impl MetadataFilters {
+ pub fn new(filters: Vec<Filter>) -> Self {
+ let meta = filters
+ .iter()
+ .map(|f| FilterMetadata {
+ filter_json_size: f.json().ok().map(|j| j.len()).unwrap_or(0),
+ last_seen: None,
+ })
+ .collect();
+ Self { filters, meta }
}
- pub fn send(&self, msg: &EventClientMessage) -> Result<()> {
- let json = msg.to_json();
- let len = json.len();
+ pub fn json_size_sum(&self) -> usize {
+ self.meta.iter().map(|f| f.filter_json_size).sum()
+ }
- debug!("writing to multicast relay");
- let mut buf: Vec<u8> = Vec::with_capacity(4 + len);
+ pub fn since_optimize(&mut self) {
+ for (filter, meta) in self.filters.iter_mut().zip(self.meta.iter()) {
+ let Some(last_seen) = meta.last_seen else {
+ continue;
+ };
- // Write the length of the message as 4 bytes (big-endian)
- buf.extend_from_slice(&(len as u32).to_be_bytes());
+ *filter = filter.clone().since_mut(last_seen);
+ }
+ }
- // Append the JSON message bytes
- buf.extend_from_slice(json.as_bytes());
+ pub fn get_filters(&self) -> &Vec<Filter> {
+ &self.filters
+ }
- self.socket.send_to(&buf, SocketAddr::V4(self.address))?;
- Ok(())
+ #[allow(dead_code)]
+ pub fn iter(&self) -> MetadataFiltersIter<'_> {
+ MetadataFiltersIter {
+ filters: self.filters.iter(),
+ meta: self.meta.iter(),
+ }
}
-}
-pub fn setup_multicast_relay(
- wakeup: impl Fn() + Send + Sync + Clone + 'static,
-) -> Result<MulticastRelay> {
- use mio::{Events, Interest, Poll, Token};
-
- let port = 9797;
- let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port);
- let multicast_ip = Ipv4Addr::new(239, 19, 88, 1);
-
- let mut socket = UdpSocket::bind(address)?;
- let interface = Ipv4Addr::UNSPECIFIED;
- let multicast_address = SocketAddrV4::new(multicast_ip, port);
-
- socket.join_multicast_v4(&multicast_ip, &interface)?;
-
- let mut poll = Poll::new()?;
- poll.registry().register(
- &mut socket,
- Token(0),
- Interest::READABLE | Interest::WRITABLE,
- )?;
-
- // wakeup our render thread when we have new stuff on the socket
- std::thread::spawn(move || {
- let mut events = Events::with_capacity(1);
- loop {
- if let Err(err) = poll.poll(&mut events, None) {
- error!("multicast socket poll error: {err}. ending multicast poller.");
- return;
- }
- wakeup();
-
- std::thread::yield_now();
+ pub fn iter_mut(&mut self) -> MetadataFiltersIterMut<'_> {
+ MetadataFiltersIterMut {
+ filters: self.filters.iter_mut(),
+ meta: self.meta.iter_mut(),
}
- });
+ }
- Ok(MulticastRelay::new(multicast_address, socket, interface))
+ #[allow(dead_code)]
+ pub fn is_empty(&self) -> bool {
+ self.filters.iter().all(|f| f.num_elements() == 0)
+ }
}
-pub struct Relay {
- pub url: nostr::RelayUrl,
- pub status: RelayStatus,
- pub sender: WsSender,
- pub receiver: WsReceiver,
+#[allow(dead_code)]
+pub struct MetadataFiltersIter<'a> {
+ filters: std::slice::Iter<'a, Filter>,
+ meta: std::slice::Iter<'a, FilterMetadata>,
}
-impl fmt::Debug for Relay {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Relay")
- .field("url", &self.url)
- .field("status", &self.status)
- .finish()
+impl<'a> Iterator for MetadataFiltersIter<'a> {
+ type Item = (&'a Filter, &'a FilterMetadata);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ Some((self.filters.next()?, self.meta.next()?))
}
}
-impl Hash for Relay {
- fn hash<H: Hasher>(&self, state: &mut H) {
- // Hashes the Relay by hashing the URL
- self.url.hash(state);
- }
+pub struct MetadataFiltersIterMut<'a> {
+ filters: std::slice::IterMut<'a, Filter>,
+ meta: std::slice::IterMut<'a, FilterMetadata>,
}
-impl PartialEq for Relay {
- fn eq(&self, other: &Self) -> bool {
- self.url == other.url
+impl<'a> Iterator for MetadataFiltersIterMut<'a> {
+ type Item = (&'a mut Filter, &'a mut FilterMetadata);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ Some((self.filters.next()?, self.meta.next()?))
}
}
-impl Eq for Relay {}
-
-impl Relay {
- pub fn new(url: nostr::RelayUrl, wakeup: impl Fn() + Send + Sync + 'static) -> Result<Self> {
- let status = RelayStatus::Connecting;
- let (sender, receiver) =
- ewebsock::connect_with_wakeup(url.as_str(), Options::default(), wakeup)?;
+#[cfg(test)]
+mod tests {
+ use super::*;
- Ok(Self {
- url,
- sender,
- receiver,
- status,
- })
+ fn filter_has_since(filter: &Filter, expected: u64) -> bool {
+ let json = filter.json().expect("filter json");
+ json.contains(&format!("\"since\":{}", expected))
}
- pub fn send(&mut self, msg: &ClientMessage) {
- let json = match msg.to_json() {
- Ok(json) => {
- debug!("sending {} to {}", json, self.url);
- json
- }
- Err(e) => {
- error!("error serializing json for filter: {e}");
- return;
- }
- };
-
- let txt = WsMessage::Text(json);
- self.sender.send(txt);
+ #[test]
+ fn since_optimize_applies_last_seen_to_filter() {
+ let filter = Filter::new().kinds(vec![1]).build();
+ let mut metadata_filters = MetadataFilters::new(vec![filter]);
+
+ // Initially no since
+ let json_before = metadata_filters.get_filters()[0]
+ .json()
+ .expect("filter json");
+ assert!(
+ !json_before.contains("\"since\""),
+ "filter should not have since initially"
+ );
+
+ // Set last_seen on metadata
+ metadata_filters.meta[0].last_seen = Some(12345);
+
+ // Call since_optimize
+ metadata_filters.since_optimize();
+
+ // Now filter should have since
+ assert!(
+ filter_has_since(&metadata_filters.get_filters()[0], 12345),
+ "filter should have since:12345 after optimization"
+ );
}
- pub fn connect(&mut self, wakeup: impl Fn() + Send + Sync + 'static) -> Result<()> {
- let (sender, receiver) =
- ewebsock::connect_with_wakeup(self.url.as_str(), Options::default(), wakeup)?;
- self.status = RelayStatus::Connecting;
- self.sender = sender;
- self.receiver = receiver;
- Ok(())
+ #[test]
+ fn since_optimize_skips_filters_without_last_seen() {
+ let filter1 = Filter::new().kinds(vec![1]).build();
+ let filter2 = Filter::new().kinds(vec![2]).build();
+ let mut metadata_filters = MetadataFilters::new(vec![filter1, filter2]);
+
+ // Only set last_seen on first filter
+ metadata_filters.meta[0].last_seen = Some(99999);
+
+ metadata_filters.since_optimize();
+
+ // First filter should have since
+ assert!(
+ filter_has_since(&metadata_filters.get_filters()[0], 99999),
+ "first filter should have since"
+ );
+
+ // Second filter should NOT have since
+ let json_second = metadata_filters.get_filters()[1]
+ .json()
+ .expect("filter json");
+ assert!(
+ !json_second.contains("\"since\""),
+ "second filter should not have since"
+ );
}
- pub fn ping(&mut self) {
- let msg = WsMessage::Ping(vec![]);
- self.sender.send(msg);
+ #[test]
+ fn since_optimize_overwrites_existing_since() {
+ // Create filter with initial since value
+ let filter = Filter::new().kinds(vec![1]).since(100).build();
+ let mut metadata_filters = MetadataFilters::new(vec![filter]);
+
+ // Verify initial since
+ assert!(
+ filter_has_since(&metadata_filters.get_filters()[0], 100),
+ "filter should have initial since:100"
+ );
+
+ // Set different last_seen
+ metadata_filters.meta[0].last_seen = Some(200);
+ metadata_filters.since_optimize();
+
+ // Since should be updated to new value
+ assert!(
+ filter_has_since(&metadata_filters.get_filters()[0], 200),
+ "filter should have updated since:200"
+ );
}
}
diff --git a/crates/enostr/src/relay/multicast.rs b/crates/enostr/src/relay/multicast.rs
@@ -0,0 +1,198 @@
+use ewebsock::{WsEvent, WsMessage};
+use mio::net::UdpSocket;
+use std::io;
+use std::net::IpAddr;
+use std::net::{SocketAddr, SocketAddrV4};
+use std::time::{Duration, Instant};
+
+use crate::relay::{BroadcastCache, BroadcastRelay, RawEventData, RelayImplType};
+use crate::{EventClientMessage, RelayStatus, Result, Wakeup};
+use std::net::Ipv4Addr;
+use tracing::{debug, error};
+
+pub struct MulticastRelay {
+ last_join: Instant,
+ status: RelayStatus,
+ address: SocketAddrV4,
+ socket: UdpSocket,
+ interface: Ipv4Addr,
+}
+
+impl MulticastRelay {
+ pub fn new(address: SocketAddrV4, socket: UdpSocket, interface: Ipv4Addr) -> Self {
+ let last_join = Instant::now();
+ let status = RelayStatus::Connected;
+ MulticastRelay {
+ status,
+ address,
+ socket,
+ interface,
+ last_join,
+ }
+ }
+
+ /// Multicast seems to fail every 260 seconds. We force a rejoin every 200 seconds or
+ /// so to ensure we are always in the group
+ pub fn rejoin(&mut self) -> Result<()> {
+ self.last_join = Instant::now();
+ self.status = RelayStatus::Disconnected;
+ self.socket
+ .leave_multicast_v4(self.address.ip(), &self.interface)?;
+ self.socket
+ .join_multicast_v4(self.address.ip(), &self.interface)?;
+ self.status = RelayStatus::Connected;
+ Ok(())
+ }
+
+ pub fn should_rejoin(&self) -> bool {
+ (Instant::now() - self.last_join) >= Duration::from_secs(200)
+ }
+
+ pub fn try_recv(&self) -> Option<WsEvent> {
+ let mut buffer = [0u8; 65535];
+ // Read the size header
+ match self.socket.recv_from(&mut buffer) {
+ Ok((size, src)) => {
+ let parsed_size = u32::from_be_bytes(buffer[0..4].try_into().ok()?) as usize;
+ debug!("multicast: read size {} from start of header", size - 4);
+
+ if size != parsed_size + 4 {
+ error!(
+ "multicast: partial data received: expected {}, got {}",
+ parsed_size, size
+ );
+ return None;
+ }
+
+ let text = String::from_utf8_lossy(&buffer[4..size]);
+ debug!("multicast: received {} bytes from {}: {}", size, src, &text);
+ Some(WsEvent::Message(WsMessage::Text(text.to_string())))
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // No data available, continue
+ None
+ }
+ Err(e) => {
+ error!("multicast: error receiving data: {}", e);
+ None
+ }
+ }
+ }
+
+ pub fn send(&self, msg: &EventClientMessage) -> Result<()> {
+ let json = msg.to_json();
+ let len = json.len();
+
+ debug!("writing to multicast relay");
+ let mut buf: Vec<u8> = Vec::with_capacity(4 + len);
+
+ // Write the length of the message as 4 bytes (big-endian)
+ buf.extend_from_slice(&(len as u32).to_be_bytes());
+
+ // Append the JSON message bytes
+ buf.extend_from_slice(json.as_bytes());
+
+ self.socket.send_to(&buf, SocketAddr::V4(self.address))?;
+ Ok(())
+ }
+
+ pub fn status(&self) -> RelayStatus {
+ self.status
+ }
+}
+
+pub fn setup_multicast_relay(
+ wakeup: impl Fn() + Send + Sync + Clone + 'static,
+) -> Result<MulticastRelay> {
+ use mio::{Events, Interest, Poll, Token};
+
+ let port = 9797;
+ let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port);
+ let multicast_ip = Ipv4Addr::new(239, 19, 88, 1);
+
+ let mut socket = UdpSocket::bind(address)?;
+ let interface = Ipv4Addr::UNSPECIFIED;
+ let multicast_address = SocketAddrV4::new(multicast_ip, port);
+
+ socket.join_multicast_v4(&multicast_ip, &interface)?;
+
+ let mut poll = Poll::new()?;
+ poll.registry().register(
+ &mut socket,
+ Token(0),
+ Interest::READABLE | Interest::WRITABLE,
+ )?;
+
+ // wakeup our render thread when we have new stuff on the socket
+ std::thread::spawn(move || {
+ let mut events = Events::with_capacity(1);
+ loop {
+ if let Err(err) = poll.poll(&mut events, None) {
+ error!("multicast socket poll error: {err}. ending multicast poller.");
+ return;
+ }
+ wakeup();
+
+ std::thread::yield_now();
+ }
+ });
+
+ Ok(MulticastRelay::new(multicast_address, socket, interface))
+}
+/// MulticastRelayCache lazily initializes the multicast connection and buffers
+/// outbound events until a connection is available.
+#[derive(Default)]
+pub struct MulticastRelayCache {
+ multicast: Option<MulticastRelay>,
+ cache: BroadcastCache,
+}
+
+impl MulticastRelayCache {
+ pub fn is_setup(&self) -> bool {
+ self.multicast.is_some()
+ }
+
+ pub fn try_setup<W>(&mut self, wakeup: &W)
+ where
+ W: Wakeup,
+ {
+ let wake = wakeup.clone();
+ let Ok(multicast) = setup_multicast_relay(move || wake.wake()) else {
+ return;
+ };
+
+ self.multicast = Some(multicast);
+ }
+
+ pub fn broadcast(&mut self, msg: EventClientMessage) {
+ BroadcastRelay::multicast(self.multicast.as_mut(), &mut self.cache).broadcast(msg);
+ }
+
+ #[profiling::function]
+ pub fn try_recv<F>(&mut self, mut process: F)
+ where
+ for<'a> F: FnMut(RawEventData<'a>),
+ {
+ let Some(multicast) = &mut self.multicast else {
+ return;
+ };
+
+ if multicast.should_rejoin() {
+ if let Err(e) = multicast.rejoin() {
+ tracing::error!("multicast: rejoin error: {e}");
+ }
+ }
+
+ BroadcastRelay::multicast(Some(multicast), &mut self.cache).try_flush_queue();
+
+ let Some(WsEvent::Message(WsMessage::Text(text))) = multicast.try_recv() else {
+ return;
+ };
+
+ process(RawEventData {
+ url: "multicast",
+ event_json: &text,
+ relay_type: RelayImplType::Multicast,
+ });
+ }
+}
diff --git a/crates/enostr/src/relay/outbox/handler.rs b/crates/enostr/src/relay/outbox/handler.rs
@@ -0,0 +1,83 @@
+use hashbrown::HashSet;
+use nostrdb::{Filter, Note};
+
+use crate::relay::outbox::OutboxPool;
+use crate::relay::{NormRelayUrl, OutboxSubId, RelayId, RelayUrlPkgs};
+use crate::{relay::outbox::OutboxSession, Wakeup};
+
+/// OutboxSessionHandler is the RAII wrapper apps use to stage subscription
+/// updates; dropping it flushes the recorded operations into the OutboxPool.
+pub struct OutboxSessionHandler<'a, W>
+where
+ W: Wakeup,
+{
+ pub outbox: &'a mut OutboxPool,
+ pub(crate) session: OutboxSession,
+ pub(crate) wakeup: W,
+}
+
+impl<'a, W> Drop for OutboxSessionHandler<'a, W>
+where
+ W: Wakeup,
+{
+ fn drop(&mut self) {
+ let session = std::mem::take(&mut self.session);
+ self.outbox.ingest_session(session, &self.wakeup);
+ }
+}
+
+impl<'a, W> OutboxSessionHandler<'a, W>
+where
+ W: Wakeup,
+{
+ pub fn new(outbox: &'a mut OutboxPool, wakeup: W) -> Self {
+ Self {
+ outbox,
+ session: OutboxSession::default(),
+ wakeup,
+ }
+ }
+
+ pub fn subscribe(&mut self, filters: Vec<Filter>, urls: RelayUrlPkgs) -> OutboxSubId {
+ let new_id = self.outbox.registry.next();
+ self.session.subscribe(new_id, filters, urls);
+ new_id
+ }
+
+ pub fn oneshot(&mut self, filters: Vec<Filter>, urls: RelayUrlPkgs) {
+ let new_id = self.outbox.registry.next();
+ self.session.oneshot(new_id, filters, urls);
+ }
+
+ pub fn modify_filters(&mut self, id: OutboxSubId, filters: Vec<Filter>) {
+ self.session.new_filters(id, filters);
+ }
+
+ pub fn modify_relays(&mut self, id: OutboxSubId, relays: HashSet<NormRelayUrl>) {
+ self.session.new_relays(id, relays);
+ }
+
+ pub fn unsubscribe(&mut self, id: OutboxSubId) {
+ self.session.unsubscribe(id);
+ }
+
+ pub fn broadcast_note(&mut self, note: &Note, relays: Vec<RelayId>) {
+ self.outbox.broadcast_note(note, relays, &self.wakeup);
+ }
+
+ /// Eject the session from the handler.
+ /// This is only necessary between initialization of the app and the first frame
+ pub fn export(mut self) -> OutboxSession {
+ let session = std::mem::take(&mut self.session);
+ drop(self);
+ session
+ }
+
+ pub fn import(outbox: &'a mut OutboxPool, session: OutboxSession, wakeup: W) -> Self {
+ Self {
+ outbox,
+ session,
+ wakeup,
+ }
+ }
+}
diff --git a/crates/enostr/src/relay/outbox/mod.rs b/crates/enostr/src/relay/outbox/mod.rs
@@ -0,0 +1,960 @@
+use hashbrown::{hash_map::RawEntryMut, HashMap, HashSet};
+use nostrdb::{Filter, Note};
+use std::{
+ collections::{hash_map::DefaultHasher, BTreeMap},
+ hash::{Hash, Hasher},
+ time::{Duration, Instant, SystemTime, UNIX_EPOCH},
+};
+
+use crate::{
+ relay::{
+ coordinator::{CoordinationData, CoordinationSession, EoseIds},
+ websocket::WebsocketRelay,
+ ModifyTask, MulticastRelayCache, NormRelayUrl, OutboxSubId, OutboxSubscriptions,
+ OutboxTask, RawEventData, RelayId, RelayLimitations, RelayReqStatus, RelayStatus,
+ RelayType,
+ },
+ EventClientMessage, Wakeup, WebsocketConn,
+};
+
+mod handler;
+mod session;
+
+pub use handler::OutboxSessionHandler;
+pub use session::OutboxSession;
+
+const KEEPALIVE_PING_RATE: Duration = Duration::from_secs(45);
+const MAX_RECONNECT_DELAY: Duration = Duration::from_secs(30 * 60); // 30 minutes
+
+/// Computes the deterministic base delay for a given attempt number.
+/// Formula: `5s * 2^attempt`, capped at [`MAX_RECONNECT_DELAY`].
+fn base_reconnect_delay(attempt: u32) -> Duration {
+ let secs = 5u64.checked_shl(attempt).unwrap_or(u64::MAX);
+ Duration::from_secs(secs).min(MAX_RECONNECT_DELAY)
+}
+
+fn reconnect_jitter_seed(relay_url: &nostr::RelayUrl, attempt: u32) -> u64 {
+ let now_nanos = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or_default()
+ .as_nanos() as u64;
+ let mut hasher = DefaultHasher::new();
+ relay_url.hash(&mut hasher);
+ attempt.hash(&mut hasher);
+ now_nanos.hash(&mut hasher);
+ hasher.finish()
+}
+
+/// Returns the reconnect delay for the given attempt count.
+///
+/// Uses the exponential base delay as the primary component and adds up to 25%
+/// additive jitter (via relay/time mixed seed) to spread out simultaneous
+/// reconnects without undermining the exponential delay itself.
+fn next_reconnect_duration(attempt: u32, jitter_seed: u64) -> Duration {
+ let base = base_reconnect_delay(attempt);
+ let jitter_ceiling = base / 4;
+ let jitter = if jitter_ceiling.is_zero() {
+ Duration::ZERO
+ } else {
+ let jitter_ceiling_nanos = jitter_ceiling.as_nanos() as u64;
+ Duration::from_nanos(jitter_seed % jitter_ceiling_nanos)
+ };
+ (base + jitter).min(MAX_RECONNECT_DELAY)
+}
+
+/// OutboxPool owns the active relay coordinators and applies staged subscription
+/// mutations to them each frame.
+pub struct OutboxPool {
+ registry: SubRegistry,
+ relays: HashMap<NormRelayUrl, CoordinationData>,
+ subs: OutboxSubscriptions,
+ multicast: MulticastRelayCache,
+}
+
+impl Default for OutboxPool {
+ fn default() -> Self {
+ Self {
+ registry: SubRegistry { next_request_id: 0 },
+ relays: HashMap::new(),
+ multicast: Default::default(),
+ subs: Default::default(),
+ }
+ }
+}
+
+impl OutboxPool {
+ fn remove_completed_oneshots(&mut self, ids: HashSet<OutboxSubId>) {
+ for id in ids {
+ if self.all_have_eose(&id) {
+ self.subs.remove(&id);
+ }
+ }
+ }
+
+ #[profiling::function]
+ fn ingest_session<W>(&mut self, session: OutboxSession, wakeup: &W)
+ where
+ W: Wakeup,
+ {
+ let sessions = self.collect_sessions(session);
+ let mut pending_eose_ids = EoseIds::default();
+
+ // Process relays with sessions
+ let sessions_keys = if sessions.is_empty() {
+ HashSet::new()
+ } else {
+ let sessions_keys: HashSet<NormRelayUrl> = sessions.keys().cloned().collect();
+ let session_eose_ids = self.process_sessions(sessions, wakeup);
+ pending_eose_ids.absorb(session_eose_ids);
+ sessions_keys
+ };
+
+ // Also process EOSE for relays that have pending EOSE but no session
+ // tasks. We only remove oneshots after all relay legs have reached EOSE.
+ let mut eose_state = EoseState {
+ relays: &mut self.relays,
+ subs: &mut self.subs,
+ };
+ let extra_eose_ids =
+ process_pending_eose_for_non_session_relays(&mut eose_state, &sessions_keys);
+ pending_eose_ids.absorb(extra_eose_ids);
+
+ optimize_since_for_fully_eosed_subs(&mut eose_state, pending_eose_ids.normal);
+ self.remove_completed_oneshots(pending_eose_ids.oneshots);
+ }
+
+ /// Translates a session's queued tasks into per-relay coordination sessions.
+ #[profiling::function]
+ fn collect_sessions(
+ &mut self,
+ session: OutboxSession,
+ ) -> HashMap<NormRelayUrl, CoordinationSession> {
+ if session.tasks.is_empty() {
+ return HashMap::new();
+ }
+
+ let mut sessions: HashMap<NormRelayUrl, CoordinationSession> = HashMap::new();
+ 'a: for (id, task) in session.tasks {
+ match task {
+ OutboxTask::Modify(modify) => 's: {
+ let Some(sub) = self.subs.get_mut(&id) else {
+ continue 'a;
+ };
+
+ match &modify {
+ ModifyTask::Filters(_) => {
+ for relay in &sub.relays {
+ get_session(&mut sessions, relay)
+ .subscribe(id, sub.relay_type == RelayType::Transparent);
+ }
+ }
+ ModifyTask::Relays(modify_relays_task) => {
+ let relays_to_remove = sub.relays.difference(&modify_relays_task.0);
+ for relay in relays_to_remove {
+ get_session(&mut sessions, relay).unsubscribe(id);
+ }
+
+ let relays_to_add = modify_relays_task.0.difference(&sub.relays);
+ for relay in relays_to_add {
+ get_session(&mut sessions, relay)
+ .subscribe(id, sub.relay_type == RelayType::Transparent);
+ }
+ }
+ ModifyTask::Full(full_modification_task) => {
+ let prev_relays = &sub.relays;
+ let new_relays = &full_modification_task.relays;
+ let relays_to_remove = prev_relays.difference(new_relays);
+ for relay in relays_to_remove {
+ get_session(&mut sessions, relay).unsubscribe(id);
+ }
+
+ if new_relays.is_empty() {
+ self.subs.remove(&id);
+ break 's;
+ }
+
+ for relay in new_relays {
+ get_session(&mut sessions, relay)
+ .subscribe(id, sub.relay_type == RelayType::Transparent);
+ }
+ }
+ }
+
+ sub.ingest_task(modify);
+ }
+ OutboxTask::Unsubscribe => {
+ let Some(sub) = self.subs.get_mut(&id) else {
+ continue 'a;
+ };
+
+ for relay_id in &sub.relays {
+ get_session(&mut sessions, relay_id).unsubscribe(id);
+ }
+
+ self.subs.remove(&id);
+ }
+ OutboxTask::Oneshot(subscribe) => {
+ for relay in &subscribe.relays.urls {
+ get_session(&mut sessions, relay)
+ .subscribe(id, subscribe.relays.use_transparent);
+ }
+ self.subs.new_subscription(id, subscribe, true);
+ }
+ OutboxTask::Subscribe(subscribe) => {
+ for relay in &subscribe.relays.urls {
+ get_session(&mut sessions, relay)
+ .subscribe(id, subscribe.relays.use_transparent);
+ }
+
+ self.subs.new_subscription(id, subscribe, false);
+ }
+ }
+ }
+
+ sessions
+ }
+
+ /// Ensures relay coordinators exist and feed them the coordination sessions.
+ #[profiling::function]
+ fn process_sessions<W>(
+ &mut self,
+ sessions: HashMap<NormRelayUrl, CoordinationSession>,
+ wakeup: &W,
+ ) -> EoseIds
+ where
+ W: Wakeup,
+ {
+ let mut pending_eoses = EoseIds::default();
+ for (relay_id, session) in sessions {
+ let relay = match self.relays.raw_entry_mut().from_key(&relay_id) {
+ RawEntryMut::Occupied(e) => 's: {
+ let res = e.into_mut();
+
+ if res.websocket.is_some() {
+ break 's res;
+ }
+
+ let Ok(websocket) = WebsocketConn::from_wakeup(relay_id.into(), wakeup.clone())
+ else {
+ // still can't generate websocket
+ break 's res;
+ };
+
+ res.websocket = Some(WebsocketRelay::new(websocket));
+
+ res
+ }
+ RawEntryMut::Vacant(e) => {
+ let coordinator = build_relay(relay_id.clone(), wakeup.clone());
+ let (_, res) = e.insert(relay_id, coordinator);
+ res
+ }
+ };
+ let eose_ids = relay.ingest_session(&self.subs, session);
+
+ pending_eoses.absorb(eose_ids);
+ }
+
+ pending_eoses
+ }
+
+ pub fn start_session<'a, W>(&'a mut self, wakeup: W) -> OutboxSessionHandler<'a, W>
+ where
+ W: Wakeup,
+ {
+ OutboxSessionHandler {
+ outbox: self,
+ session: OutboxSession::default(),
+ wakeup,
+ }
+ }
+
+ pub fn broadcast_note<W>(&mut self, note: &Note, relays: Vec<RelayId>, wakeup: &W)
+ where
+ W: Wakeup,
+ {
+ for relay_id in relays {
+ let Ok(msg) = EventClientMessage::try_from(note) else {
+ continue;
+ };
+ match relay_id {
+ RelayId::Websocket(norm_relay_url) => {
+ let rel = self.ensure_relay(&norm_relay_url, wakeup);
+ rel.send_event(msg);
+ }
+ RelayId::Multicast => {
+ if !self.multicast.is_setup() {
+ self.multicast.try_setup(wakeup);
+ };
+
+ self.multicast.broadcast(msg);
+ }
+ }
+ }
+ }
+
+ #[profiling::function]
+ pub fn keepalive_ping(&mut self, wakeup: impl Fn() + Send + Sync + Clone + 'static) {
+ for relay in self.relays.values_mut() {
+ let now = Instant::now();
+
+ let Some(websocket) = &mut relay.websocket else {
+ continue;
+ };
+
+ match websocket.conn.status {
+ RelayStatus::Disconnected => {
+ let reconnect_at =
+ websocket.last_connect_attempt + websocket.retry_connect_after;
+ if now > reconnect_at {
+ websocket.last_connect_attempt = now;
+ websocket.reconnect_attempt = websocket.reconnect_attempt.saturating_add(1);
+ let jitter_seed =
+ reconnect_jitter_seed(&websocket.conn.url, websocket.reconnect_attempt);
+ let next_duration =
+ next_reconnect_duration(websocket.reconnect_attempt, jitter_seed);
+ tracing::debug!(
+ "reconnect attempt {}, backing off for {:?}",
+ websocket.reconnect_attempt,
+ next_duration,
+ );
+ websocket.retry_connect_after = next_duration;
+ if let Err(err) = websocket.conn.connect(wakeup.clone()) {
+ tracing::error!("error connecting to relay: {}", err);
+ }
+ }
+ }
+ RelayStatus::Connected => {
+ websocket.reconnect_attempt = 0;
+ websocket.retry_connect_after = WebsocketRelay::initial_reconnect_duration();
+
+ let should_ping = now - websocket.last_ping > KEEPALIVE_PING_RATE;
+ if should_ping {
+ tracing::trace!("pinging {}", websocket.conn.url);
+ websocket.conn.ping();
+ websocket.last_ping = Instant::now();
+ }
+ }
+ RelayStatus::Connecting => {}
+ }
+ }
+ }
+
+ fn ensure_relay<W>(&mut self, relay_id: &NormRelayUrl, wakeup: &W) -> &mut CoordinationData
+ where
+ W: Wakeup,
+ {
+ match self.relays.raw_entry_mut().from_key(relay_id) {
+ RawEntryMut::Occupied(entry) => entry.into_mut(),
+ RawEntryMut::Vacant(entry) => {
+ let (_, res) = entry.insert(
+ relay_id.clone(),
+ build_relay(relay_id.clone(), wakeup.clone()),
+ );
+ res
+ }
+ }
+ }
+
+ pub fn status(&self, id: &OutboxSubId) -> HashMap<&NormRelayUrl, RelayReqStatus> {
+ let mut status = HashMap::new();
+ for (url, relay) in &self.relays {
+ let Some(res) = relay.req_status(id) else {
+ continue;
+ };
+ status.insert(url, res);
+ }
+
+ status
+ }
+
+ pub fn websocket_statuses(&self) -> BTreeMap<&NormRelayUrl, RelayStatus> {
+ let mut status = BTreeMap::new();
+
+ for (url, relay) in &self.relays {
+ let relay_status = if let Some(websocket) = &relay.websocket {
+ websocket.conn.status
+ } else {
+ RelayStatus::Disconnected
+ };
+ status.insert(url, relay_status);
+ }
+
+ status
+ }
+
+ pub fn has_eose(&self, id: &OutboxSubId) -> bool {
+ for relay in self.relays.values() {
+ if relay.req_status(id) == Some(RelayReqStatus::Eose) {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ pub fn all_have_eose(&self, id: &OutboxSubId) -> bool {
+ for relay in self.relays.values() {
+ let Some(status) = relay.req_status(id) else {
+ continue;
+ };
+ if status != RelayReqStatus::Eose {
+ return false;
+ }
+ }
+
+ true
+ }
+
+ /// Returns a clone of the filters for the given subscription ID.
+ pub fn filters(&self, id: &OutboxSubId) -> Option<&Vec<Filter>> {
+ self.subs.view(id).map(|v| v.filters.get_filters())
+ }
+
+ #[profiling::function]
+ pub fn try_recv<F>(&mut self, mut max_notes: usize, mut process: F)
+ where
+ for<'a> F: FnMut(RawEventData<'a>),
+ {
+ 's: while max_notes > 0 {
+ let mut received_any = false;
+
+ for relay in self.relays.values_mut() {
+ let resp = relay.try_recv(&self.subs, &mut process);
+
+ if !resp.received_event {
+ continue;
+ }
+
+ received_any = true;
+
+ if resp.event_was_nostr_note {
+ max_notes = max_notes.saturating_sub(1);
+ if max_notes == 0 {
+ break 's;
+ }
+ }
+ }
+
+ if !received_any {
+ break;
+ }
+ }
+
+ self.multicast.try_recv(process);
+ }
+}
+
+struct EoseState<'a> {
+ relays: &'a mut HashMap<NormRelayUrl, CoordinationData>,
+ subs: &'a mut OutboxSubscriptions,
+}
+
+fn unix_now_secs() -> Option<u64> {
+ SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .ok()
+ .map(|d| d.as_secs())
+}
+
+fn sub_all_relays_have_eose(state: &EoseState<'_>, id: &OutboxSubId) -> bool {
+ let Some(sub) = state.subs.get(id) else {
+ return false;
+ };
+ if sub.relays.is_empty() {
+ return false;
+ }
+
+ for relay_id in &sub.relays {
+ let Some(relay) = state.relays.get(relay_id) else {
+ return false;
+ };
+ if relay.req_status(id) != Some(RelayReqStatus::Eose) {
+ return false;
+ }
+ }
+
+ true
+}
+
+fn optimize_since_for_fully_eosed_subs(state: &mut EoseState<'_>, ids: HashSet<OutboxSubId>) {
+ let Some(now) = unix_now_secs() else {
+ return;
+ };
+
+ for id in ids {
+ // Since optimization is only safe after every relay leg for this
+ // subscription has reached EOSE at least once.
+ if !sub_all_relays_have_eose(state, &id) {
+ continue;
+ }
+
+ if let Some(sub) = state.subs.get_mut(&id) {
+ sub.see_all(now);
+ sub.filters.since_optimize();
+ }
+ }
+}
+
+fn process_pending_eose_for_non_session_relays(
+ state: &mut EoseState<'_>,
+ sessions_keys: &HashSet<NormRelayUrl>,
+) -> EoseIds {
+ let mut pending_eoses = EoseIds::default();
+
+ for (relay_id, relay) in state.relays.iter_mut() {
+ if sessions_keys.contains(relay_id) {
+ continue;
+ }
+
+ let eose_ids = relay.ingest_session(state.subs, CoordinationSession::default());
+ pending_eoses.absorb(eose_ids);
+ }
+
+ pending_eoses
+}
+
+struct SubRegistry {
+ next_request_id: u64,
+}
+
+impl SubRegistry {
+ pub fn next(&mut self) -> OutboxSubId {
+ let i = self.next_request_id;
+ self.next_request_id += 1;
+ OutboxSubId(i)
+ }
+}
+
+pub fn get_session<'a>(
+ map: &'a mut HashMap<NormRelayUrl, CoordinationSession>,
+ id: &NormRelayUrl,
+) -> &'a mut CoordinationSession {
+ match map.raw_entry_mut().from_key(id) {
+ RawEntryMut::Occupied(e) => e.into_mut(),
+ RawEntryMut::Vacant(e) => {
+ let session = CoordinationSession::default();
+ let (_, res) = e.insert(id.clone(), session);
+ res
+ }
+ }
+}
+
+fn build_relay<W>(relay_id: NormRelayUrl, wakeup: W) -> CoordinationData
+where
+ W: Wakeup,
+{
+ CoordinationData::new(
+ RelayLimitations::default(), // TODO(kernelkind): add actual limitations
+ relay_id,
+ wakeup,
+ )
+}
+
+#[cfg(test)]
+mod tests {
+ use hashbrown::HashSet;
+ use nostrdb::Filter;
+
+ use super::*;
+ use crate::relay::{
+ coordinator::CoordinationTask,
+ test_utils::{filters_json, trivial_filter, MockWakeup},
+ RelayUrlPkgs,
+ };
+
+ /// Ensures the subscription registry always yields unique IDs.
+ #[test]
+ fn registry_generates_unique_ids() {
+ let mut registry = SubRegistry { next_request_id: 0 };
+
+ let id1 = registry.next();
+ let id2 = registry.next();
+ let id3 = registry.next();
+
+ assert_ne!(id1, id2);
+ assert_ne!(id2, id3);
+ assert_ne!(id1, id3);
+ }
+
+ // ==================== OutboxPool tests ====================
+
+ /// Default pool has no relays or subscriptions.
+ #[test]
+ fn outbox_pool_default_empty() {
+ let pool = OutboxPool::default();
+ assert!(pool.relays.is_empty());
+ // Verify no subscriptions by checking that a lookup returns empty status
+ assert!(pool.status(&OutboxSubId(0)).is_empty());
+ }
+
+ /// has_eose returns false when no relays are tracking the request.
+ #[test]
+ fn outbox_pool_has_eose_false_when_empty() {
+ let pool = OutboxPool::default();
+ assert!(!pool.has_eose(&OutboxSubId(0)));
+ }
+
+ /// status() returns empty map for unknown request IDs.
+ #[test]
+ fn outbox_pool_status_empty_for_unknown() {
+ let pool = OutboxPool::default();
+ let status = pool.status(&OutboxSubId(999));
+ assert!(status.is_empty());
+ }
+
+ /// websocket_statuses() is empty before any relays connect.
+ #[test]
+ fn outbox_pool_websocket_statuses_empty_initially() {
+ let pool = OutboxPool::default();
+ let statuses = pool.websocket_statuses();
+ assert!(statuses.is_empty());
+ }
+
+ /// Full modifications should unsubscribe old relays and resubscribe new ones using the updated filters.
+ #[test]
+ fn full_modification_updates_sessions_with_new_filters() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+ let relay_a = NormRelayUrl::new("wss://relay-a.example.com").unwrap();
+ let relay_b = NormRelayUrl::new("wss://relay-b.example.com").unwrap();
+
+ let mut urls = HashSet::new();
+ urls.insert(relay_a.clone());
+ let new_sub_id = {
+ let mut handler = pool.start_session(wakeup.clone());
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(urls))
+ };
+
+ {
+ let sub = pool
+ .subs
+ .get_mut(&new_sub_id)
+ .expect("subscription should be registered");
+ assert_eq!(sub.relays.len(), 1);
+ assert!(sub.relays.contains(&relay_a));
+ assert!(!sub.is_oneshot);
+ assert_eq!(sub.relay_type, RelayType::Compaction);
+ }
+
+ let sessions = {
+ let mut updated_relays = HashSet::new();
+ updated_relays.insert(relay_b.clone());
+
+ let mut handler = pool.start_session(wakeup);
+ handler.modify_filters(
+ new_sub_id,
+ vec![Filter::new().kinds(vec![3]).limit(1).build()],
+ );
+ handler.modify_relays(new_sub_id, updated_relays);
+ let session = handler.export();
+ pool.collect_sessions(session)
+ };
+
+ let old_task = sessions
+ .get(&relay_a)
+ .and_then(|session| session.tasks.get(&new_sub_id))
+ .expect("expected a task for relay relay_a");
+ assert!(matches!(old_task, CoordinationTask::Unsubscribe));
+
+ let new_task = sessions
+ .get(&relay_b)
+ .and_then(|session| session.tasks.get(&new_sub_id))
+ .expect("expected a task for relay relay_b");
+ assert!(matches!(new_task, CoordinationTask::CompactionSub));
+ }
+
+ /// Base delay doubles on each attempt until it reaches the configured cap.
+ #[test]
+ fn reconnect_base_delay_doubles_with_cap() {
+ assert_eq!(base_reconnect_delay(0), Duration::from_secs(5));
+ assert_eq!(base_reconnect_delay(1), Duration::from_secs(10));
+ assert_eq!(base_reconnect_delay(2), Duration::from_secs(20));
+ assert_eq!(base_reconnect_delay(3), Duration::from_secs(40));
+ assert_eq!(base_reconnect_delay(4), Duration::from_secs(80));
+ assert_eq!(base_reconnect_delay(5), Duration::from_secs(160));
+ assert_eq!(base_reconnect_delay(6), Duration::from_secs(320));
+ assert_eq!(base_reconnect_delay(7), Duration::from_secs(640));
+ assert_eq!(base_reconnect_delay(8), Duration::from_secs(1280));
+ assert_eq!(base_reconnect_delay(9), MAX_RECONNECT_DELAY);
+ // Saturates at cap for any large attempt count.
+ assert_eq!(base_reconnect_delay(100), MAX_RECONNECT_DELAY);
+ }
+
+ /// Jittered delay is always >= the base and never exceeds base * 1.25 or the cap.
+ #[test]
+ fn reconnect_jitter_within_bounds() {
+ for attempt in [0u32, 1, 3, 8, 9, 50, 100] {
+ let base = base_reconnect_delay(attempt);
+ let max_with_jitter = (base + (base / 4)).min(MAX_RECONNECT_DELAY);
+ for sample in 0u64..20 {
+ let jittered = next_reconnect_duration(attempt, 0xBAD5EED ^ sample);
+ assert!(
+ jittered >= base,
+ "jittered {jittered:?} < base {base:?} at attempt {attempt}"
+ );
+ assert!(
+ jittered <= max_with_jitter,
+ "jittered {jittered:?} exceeds max-with-jitter {max_with_jitter:?} at attempt {attempt}"
+ );
+ }
+ }
+ }
+
+ /// Oneshot requests route to compaction mode by default.
+ #[test]
+ fn oneshot_routes_to_compaction() {
+ let mut pool = OutboxPool::default();
+ let relay = NormRelayUrl::new("wss://relay-oneshot.example.com").unwrap();
+ let mut relays = HashSet::new();
+ relays.insert(relay.clone());
+ let filters = vec![Filter::new().kinds(vec![1]).limit(2).build()];
+ let id = OutboxSubId(42);
+
+ let mut session = OutboxSession::default();
+ session.oneshot(id, filters.clone(), RelayUrlPkgs::new(relays));
+
+ let sessions = pool.collect_sessions(session);
+
+ let relay_task = sessions
+ .get(&relay)
+ .and_then(|session| session.tasks.get(&id))
+ .expect("expected task for oneshot relay");
+ assert!(matches!(relay_task, CoordinationTask::CompactionSub));
+ }
+
+ /// Unsubscribing from a multi-relay subscription emits unsubscribe tasks for each relay.
+ #[test]
+ fn unsubscribe_targets_all_relays() {
+ let mut pool = OutboxPool::default();
+ let relay_a = NormRelayUrl::new("wss://relay-a.example.com").unwrap();
+ let relay_b = NormRelayUrl::new("wss://relay-b.example.com").unwrap();
+ let id = OutboxSubId(42);
+
+ // Subscribe to both relays
+ let mut urls = HashSet::new();
+ urls.insert(relay_a.clone());
+ urls.insert(relay_b.clone());
+
+ let mut session = OutboxSession::default();
+ session.subscribe(id, trivial_filter(), RelayUrlPkgs::new(urls));
+ pool.collect_sessions(session);
+
+ // Unsubscribe
+ let mut session = OutboxSession::default();
+ session.unsubscribe(id);
+ let sessions = pool.collect_sessions(session);
+
+ // Both relays should receive unsubscribe tasks
+ let task_a = sessions.get(&relay_a).and_then(|s| s.tasks.get(&id));
+ let task_b = sessions.get(&relay_b).and_then(|s| s.tasks.get(&id));
+
+ assert!(matches!(task_a, Some(CoordinationTask::Unsubscribe)));
+ assert!(matches!(task_b, Some(CoordinationTask::Unsubscribe)));
+ }
+
+ /// Subscriptions with use_transparent=true route to transparent mode.
+ #[test]
+ fn subscribe_transparent_mode() {
+ let mut pool = OutboxPool::default();
+ let relay = NormRelayUrl::new("wss://relay-transparent.example.com").unwrap();
+ let id = OutboxSubId(5);
+
+ let mut urls = HashSet::new();
+ urls.insert(relay.clone());
+ let mut pkgs = RelayUrlPkgs::new(urls);
+ pkgs.use_transparent = true;
+
+ let mut session = OutboxSession::default();
+ session.subscribe(id, trivial_filter(), pkgs);
+ let sessions = pool.collect_sessions(session);
+
+ let task = sessions.get(&relay).and_then(|s| s.tasks.get(&id));
+ assert!(matches!(task, Some(CoordinationTask::TransparentSub)));
+ }
+
+ /// Modifying filters should re-subscribe the routed relays with the new filters.
+ #[test]
+ fn modify_filters_reissues_subscribe_for_existing_relays() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+ let relay = NormRelayUrl::new("wss://relay-modify.example.com").unwrap();
+
+ let mut urls = HashSet::new();
+ urls.insert(relay.clone());
+ let sub_id = {
+ let mut handler = pool.start_session(wakeup.clone());
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(urls))
+ };
+
+ let (sessions, expected_json) = {
+ let mut handler = pool.start_session(wakeup);
+ let updated_filters = vec![Filter::new().kinds(vec![7]).limit(2).build()];
+ let expected_json = filters_json(&updated_filters);
+ handler.modify_filters(sub_id, updated_filters);
+ let session = handler.export();
+ (pool.collect_sessions(session), expected_json)
+ };
+
+ let view = pool.subs.view(&sub_id).expect("updated subscription view");
+ let stored_json = filters_json(view.filters.get_filters());
+ assert_eq!(stored_json, expected_json);
+
+ let task = sessions
+ .get(&relay)
+ .and_then(|session| session.tasks.get(&sub_id))
+ .expect("expected coordination task");
+ assert!(matches!(task, CoordinationTask::CompactionSub));
+ }
+
+ /// Modifying relays should unsubscribe removed relays and subscribe new ones.
+ #[test]
+ fn modify_relays_differs_routing_sets() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+ let relay_a = NormRelayUrl::new("wss://relay-diff-a.example.com").unwrap();
+ let relay_b = NormRelayUrl::new("wss://relay-diff-b.example.com").unwrap();
+
+ let mut urls = HashSet::new();
+ urls.insert(relay_a.clone());
+ let sub_id = {
+ let mut handler = pool.start_session(wakeup.clone());
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(urls))
+ };
+
+ let sessions = {
+ let mut handler = pool.start_session(wakeup);
+ let mut new_urls = HashSet::new();
+ new_urls.insert(relay_b.clone());
+ handler.modify_relays(sub_id, new_urls);
+ let session = handler.export();
+ pool.collect_sessions(session)
+ };
+
+ let unsub_task = sessions
+ .get(&relay_a)
+ .and_then(|session| session.tasks.get(&sub_id))
+ .expect("missing relay_a task");
+ assert!(matches!(unsub_task, CoordinationTask::Unsubscribe));
+
+ let sub_task = sessions
+ .get(&relay_b)
+ .and_then(|session| session.tasks.get(&sub_id))
+ .expect("missing relay_b task");
+ assert!(matches!(sub_task, CoordinationTask::CompactionSub));
+ }
+
+ /// Full modifications that end up with no relays should drop the subscription entirely.
+ #[test]
+ fn modify_full_with_empty_relays_removes_subscription() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+ let relay = NormRelayUrl::new("wss://relay-empty.example.com").unwrap();
+
+ let mut urls = HashSet::new();
+ urls.insert(relay.clone());
+ let sub_id = {
+ let mut handler = pool.start_session(wakeup.clone());
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(urls))
+ };
+
+ let sessions = {
+ let mut handler = pool.start_session(wakeup);
+ handler.modify_filters(sub_id, vec![Filter::new().kinds(vec![9]).limit(1).build()]);
+ handler.modify_relays(sub_id, HashSet::new());
+ let session = handler.export();
+ pool.collect_sessions(session)
+ };
+
+ let task = sessions
+ .get(&relay)
+ .and_then(|session| session.tasks.get(&sub_id))
+ .expect("expected unsubscribe for relay");
+ assert!(matches!(task, CoordinationTask::Unsubscribe));
+ assert!(
+ pool.subs.get_mut(&sub_id).is_none(),
+ "subscription metadata should be removed"
+ );
+ }
+
+ // ==================== OutboxSessionHandler tests ====================
+
+ /// The first subscribe issued via handler should return SubRequestId(0).
+ #[test]
+ fn outbox_session_handler_subscribe_returns_id() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ let id = {
+ let mut handler = pool.start_session(wakeup);
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(HashSet::new()))
+ };
+
+ assert_eq!(id, OutboxSubId(0));
+ }
+
+ /// Separate sessions should continue incrementing subscription IDs globally.
+ #[test]
+ fn outbox_session_handler_multiple_subscribes_unique_ids() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ let id1 = {
+ let mut handler = pool.start_session(wakeup.clone());
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(HashSet::new()))
+ };
+
+ let id2 = {
+ let mut handler = pool.start_session(wakeup);
+ handler.subscribe(trivial_filter(), RelayUrlPkgs::new(HashSet::new()))
+ };
+
+ assert_ne!(id1, id2);
+ assert_eq!(id1, OutboxSubId(0));
+ assert_eq!(id2, OutboxSubId(1));
+ }
+
+ /// Exporting/importing a session should carry over any pending tasks intact.
+ #[test]
+ fn outbox_session_handler_export_and_import() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ // Create a handler and export its session
+ let handler = pool.start_session(wakeup.clone());
+ let session = handler.export();
+
+ // Should be empty since we didn't do anything
+ assert!(session.tasks.is_empty());
+
+ // Import the session back
+ let _handler = OutboxSessionHandler::import(&mut pool, session, wakeup);
+ }
+
+ // ==================== get_session tests ====================
+
+ /// get_session should create a new coordination entry when missing.
+ #[test]
+ fn get_session_creates_new_if_missing() {
+ let mut map: HashMap<NormRelayUrl, CoordinationSession> = HashMap::new();
+ let url = NormRelayUrl::new("wss://relay.example.com").unwrap();
+
+ let _session = get_session(&mut map, &url);
+
+ // Should have created a new session
+ assert!(map.contains_key(&url));
+ }
+
+ /// get_session returns the pre-existing coordination session.
+ #[test]
+ fn get_session_returns_existing() {
+ let mut map: HashMap<NormRelayUrl, CoordinationSession> = HashMap::new();
+ let url = NormRelayUrl::new("wss://relay.example.com").unwrap();
+
+ let session = get_session(&mut map, &url);
+ session.subscribe(OutboxSubId(0), false);
+
+ // Map should still have exactly one entry
+ assert_eq!(map.len(), 1);
+ }
+}
diff --git a/crates/enostr/src/relay/outbox/session.rs b/crates/enostr/src/relay/outbox/session.rs
@@ -0,0 +1,496 @@
+use hashbrown::{hash_map::Entry, HashMap, HashSet};
+use nostrdb::Filter;
+
+use crate::relay::{
+ FullModificationTask, ModifyFiltersTask, ModifyRelaysTask, ModifyTask, NormRelayUrl,
+ OutboxSubId, OutboxTask, RelayUrlPkgs, SubscribeTask,
+};
+
+/// OutboxSession records subscription mutations for the current frame before they
+/// are applied to the relay coordinators.
+#[derive(Default)]
+pub struct OutboxSession {
+ pub tasks: HashMap<OutboxSubId, OutboxTask>,
+}
+
+impl OutboxSession {
+ #[profiling::function]
+ pub fn new_filters(&mut self, id: OutboxSubId, mut new_filters: Vec<Filter>) {
+ filters_prune_empty(&mut new_filters);
+ if new_filters.is_empty() {
+ self.unsubscribe(id);
+ return;
+ }
+
+ let entry = self.tasks.entry(id);
+
+ let mut entry = match entry {
+ Entry::Occupied(occupied_entry) => {
+ if matches!(occupied_entry.get(), OutboxTask::Oneshot(_)) {
+ // we don't modify oneshots
+ return;
+ }
+ occupied_entry
+ }
+ Entry::Vacant(vacant_entry) => {
+ vacant_entry.insert(OutboxTask::Modify(ModifyTask::Filters(ModifyFiltersTask(
+ new_filters,
+ ))));
+ return;
+ }
+ };
+
+ match entry.get_mut() {
+ OutboxTask::Modify(modify) => match modify {
+ ModifyTask::Filters(_) => {
+ self.tasks.insert(
+ id,
+ OutboxTask::Modify(ModifyTask::Filters(ModifyFiltersTask(new_filters))),
+ );
+ }
+ ModifyTask::Relays(modify_relays_task) => {
+ let relays = std::mem::take(&mut modify_relays_task.0);
+ *entry.get_mut() = OutboxTask::Modify(ModifyTask::Full(FullModificationTask {
+ filters: new_filters,
+ relays,
+ }));
+ }
+ ModifyTask::Full(full) => {
+ full.filters = new_filters;
+ }
+ },
+ OutboxTask::Unsubscribe => {
+ self.tasks.insert(
+ id,
+ OutboxTask::Modify(ModifyTask::Filters(ModifyFiltersTask(new_filters))),
+ );
+ }
+ OutboxTask::Oneshot(oneshot) => {
+ oneshot.filters = new_filters;
+ }
+ OutboxTask::Subscribe(subscribe_task) => {
+ subscribe_task.filters = new_filters;
+ }
+ }
+ }
+ #[profiling::function]
+ pub fn new_relays(&mut self, id: OutboxSubId, new_urls: HashSet<NormRelayUrl>) {
+ let entry = self.tasks.entry(id);
+
+ let mut entry = match entry {
+ Entry::Occupied(occupied_entry) => {
+ let task = occupied_entry.get();
+
+ if matches!(task, OutboxTask::Oneshot(_)) {
+ // we don't modify oneshots
+ return;
+ }
+
+ occupied_entry
+ }
+ Entry::Vacant(vacant_entry) => {
+ vacant_entry.insert(OutboxTask::Modify(ModifyTask::Relays(ModifyRelaysTask(
+ new_urls,
+ ))));
+ return;
+ }
+ };
+
+ match entry.get_mut() {
+ OutboxTask::Modify(modify) => {
+ match modify {
+ ModifyTask::Filters(modify_filters_task) => {
+ let filters = std::mem::take(&mut modify_filters_task.0); // moves out, leaves empty/default
+ *entry.get_mut() =
+ OutboxTask::Modify(ModifyTask::Full(FullModificationTask {
+ filters,
+ relays: new_urls,
+ }));
+ }
+ ModifyTask::Relays(_) => {
+ self.tasks.insert(
+ id,
+ OutboxTask::Modify(ModifyTask::Relays(ModifyRelaysTask(new_urls))),
+ );
+ }
+ ModifyTask::Full(full_modification_task) => {
+ full_modification_task.relays = new_urls;
+ }
+ }
+ }
+ OutboxTask::Unsubscribe => {
+ self.tasks.insert(
+ id,
+ OutboxTask::Modify(ModifyTask::Relays(ModifyRelaysTask(new_urls))),
+ );
+ }
+ OutboxTask::Oneshot(oneshot) => {
+ oneshot.relays.urls = new_urls;
+ }
+ OutboxTask::Subscribe(subscribe_task) => {
+ subscribe_task.relays.urls = new_urls;
+ }
+ }
+ }
+
+ pub fn subscribe(&mut self, id: OutboxSubId, mut filters: Vec<Filter>, urls: RelayUrlPkgs) {
+ filters_prune_empty(&mut filters);
+ if filters.is_empty() {
+ return;
+ }
+
+ self.tasks.insert(
+ id,
+ OutboxTask::Subscribe(SubscribeTask {
+ filters,
+ relays: urls,
+ }),
+ );
+ }
+
+ pub fn oneshot(&mut self, id: OutboxSubId, mut filters: Vec<Filter>, urls: RelayUrlPkgs) {
+ filters_prune_empty(&mut filters);
+ if filters.is_empty() {
+ return;
+ }
+
+ self.tasks.insert(
+ id,
+ OutboxTask::Oneshot(SubscribeTask {
+ filters,
+ relays: urls,
+ }),
+ );
+ }
+
+ pub fn unsubscribe(&mut self, id: OutboxSubId) {
+ self.tasks.insert(id, OutboxTask::Unsubscribe);
+ }
+}
+
+fn filters_prune_empty(filters: &mut Vec<Filter>) {
+ filters.retain(|f| f.num_elements() != 0);
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::relay::test_utils::{expect_task, trivial_filter};
+
+ use super::*;
+
+ // ==================== OutboxSession tests ====================
+
+ /// Verifies a freshly created session has no pending tasks.
+ #[test]
+ fn outbox_session_default_empty() {
+ let session = OutboxSession::default();
+ assert!(session.tasks.is_empty());
+ }
+
+ /// Drops subscribe/oneshot requests that lack meaningful filters/relays.
+ #[test]
+ fn outbox_session_subscribe_empty() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.subscribe(OutboxSubId(0), vec![Filter::new().build()], urls.clone());
+ assert!(session.tasks.is_empty());
+
+ session.subscribe(OutboxSubId(0), vec![], urls.clone());
+ assert!(session.tasks.is_empty());
+
+ session.oneshot(OutboxSubId(0), vec![Filter::new().build()], urls.clone());
+ assert!(session.tasks.is_empty());
+
+ session.oneshot(OutboxSubId(0), vec![], urls);
+ assert!(session.tasks.is_empty());
+ }
+
+ /// Stores subscribe tasks when filters and relays are provided.
+ #[test]
+ fn outbox_session_subscribe() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.subscribe(OutboxSubId(0), trivial_filter(), urls);
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Subscribe(_)
+ ));
+ }
+
+ /// Stores oneshot tasks when filters and relays are provided.
+ #[test]
+ fn outbox_session_oneshot() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.oneshot(OutboxSubId(0), trivial_filter(), urls);
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Oneshot(_)
+ ));
+ }
+
+ /// Records unsubscribe operations on demand.
+ #[test]
+ fn outbox_session_unsubscribe() {
+ let mut session = OutboxSession::default();
+
+ session.unsubscribe(OutboxSubId(42));
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(42)),
+ OutboxTask::Unsubscribe
+ ));
+ }
+
+ /// Pushing filters first results in a Modify(Filters) task.
+ #[test]
+ fn outbox_session_new_filters_creates_modify_filters() {
+ let mut session = OutboxSession::default();
+
+ session.new_filters(OutboxSubId(0), trivial_filter());
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Modify(ModifyTask::Filters(_))
+ ));
+ }
+
+ /// Pushing relays first results in a Modify(Relays) task.
+ #[test]
+ fn outbox_session_new_relays_creates_modify_relays() {
+ let mut session = OutboxSession::default();
+
+ session.new_relays(OutboxSubId(0), HashSet::new());
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Modify(ModifyTask::Relays(_))
+ ));
+ }
+
+ /// Mixing filters then relays converges to a Modify(Full) task.
+ #[test]
+ fn outbox_session_merges_filters_and_relays_to_full_modification() {
+ let mut session = OutboxSession::default();
+
+ // First add filters
+ session.new_filters(OutboxSubId(0), trivial_filter());
+
+ // Then add relays - should merge to Full modification
+ session.new_relays(OutboxSubId(0), HashSet::new());
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Modify(ModifyTask::Full(_))
+ ));
+ }
+
+ /// Mixing relays then filters also converges to a Modify(Full) task.
+ #[test]
+ fn outbox_session_merges_relays_and_filters_to_full_modification() {
+ let mut session = OutboxSession::default();
+
+ // First add relays
+ session.new_relays(OutboxSubId(0), HashSet::new());
+
+ // Then add filters - should merge to Full modification
+ session.new_filters(OutboxSubId(0), trivial_filter());
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Modify(ModifyTask::Full(_))
+ ));
+ }
+
+ // this should never happen in practice though
+ /// Subscribe commands override previously staged filter changes.
+ #[test]
+ fn outbox_session_subscribe_overwrites_modify_filters() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.new_filters(OutboxSubId(0), trivial_filter());
+ session.subscribe(
+ OutboxSubId(0),
+ vec![Filter::new().kinds(vec![3]).build()],
+ urls,
+ );
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Subscribe(_)
+ ));
+ }
+
+ /// Unsubscribe issued after subscribe should take precedence.
+ #[test]
+ fn outbox_session_unsubscribe_after_subscribe() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.subscribe(OutboxSubId(0), trivial_filter(), urls);
+ session.unsubscribe(OutboxSubId(0));
+
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Unsubscribe
+ ));
+ }
+
+ /// Adding filters after an unsubscribe restarts the task as Modify(Filters).
+ #[test]
+ fn outbox_session_new_filters_after_unsubscribe() {
+ let mut session = OutboxSession::default();
+
+ session.unsubscribe(OutboxSubId(0));
+ session.new_filters(OutboxSubId(0), trivial_filter());
+
+ // Filters should overwrite unsubscribe
+ assert!(matches!(
+ expect_task(&session, OutboxSubId(0)),
+ OutboxTask::Modify(ModifyTask::Filters(_))
+ ));
+ }
+
+ /// Updating filters of a Full modification replaces its filter list.
+ #[test]
+ fn outbox_session_update_full_modification_filters() {
+ let mut session = OutboxSession::default();
+
+ // Create full modification
+ session.new_filters(OutboxSubId(0), trivial_filter());
+ session.new_relays(OutboxSubId(0), HashSet::new());
+
+ // Update filters on the full modification
+ session.new_filters(
+ OutboxSubId(0),
+ vec![
+ Filter::new().kinds(vec![3]).build(),
+ Filter::new().kinds(vec![1]).build(),
+ ],
+ );
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Modify(ModifyTask::Full(fm)) => {
+ assert_eq!(fm.filters.len(), 2);
+ }
+ _ => panic!("Expected Modify(Full)"),
+ }
+ }
+
+ /// Updating relays of a Full modification replaces its relay set.
+ #[test]
+ fn outbox_session_update_full_modification_relays() {
+ let mut session = OutboxSession::default();
+
+ // Create full modification
+ session.new_filters(OutboxSubId(0), trivial_filter());
+ session.new_relays(OutboxSubId(0), HashSet::new());
+
+ // Update relays on the full modification
+ let mut new_urls = HashSet::new();
+ new_urls.insert(NormRelayUrl::new("wss://relay.example.com").unwrap());
+ session.new_relays(OutboxSubId(0), new_urls);
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Modify(ModifyTask::Full(fm)) => {
+ assert!(!fm.relays.is_empty());
+ }
+ _ => panic!("Expected Modify(Full)"),
+ }
+ }
+
+ /// Attempting to modify oneshot filters leaves them unchanged.
+ #[test]
+ fn outbox_session_update_oneshot_filters() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.oneshot(OutboxSubId(0), trivial_filter(), urls);
+ session.new_filters(
+ OutboxSubId(0),
+ vec![
+ Filter::new().kinds([1]).build(),
+ Filter::new().kinds([3]).build(),
+ ],
+ );
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Oneshot(task) => {
+ assert_eq!(task.filters.len(), 1);
+ }
+ _ => panic!("Expected Oneshot task"),
+ }
+ }
+
+ /// Updating filters on a Subscribe task replaces the stored filters.
+ #[test]
+ fn outbox_session_update_subscribe_filters() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.subscribe(OutboxSubId(0), trivial_filter(), urls);
+ session.new_filters(
+ OutboxSubId(0),
+ vec![
+ Filter::new().kinds([1]).build(),
+ Filter::new().kinds([3]).build(),
+ ],
+ );
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Subscribe(task) => {
+ assert_eq!(task.filters.len(), 2);
+ }
+ _ => panic!("Expected Subscribe task"),
+ }
+ }
+
+ /// Updating relays on a Subscribe task replaces the stored relays.
+ #[test]
+ fn outbox_session_update_subscribe_relays() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.subscribe(OutboxSubId(0), trivial_filter(), urls);
+
+ let mut new_urls = HashSet::new();
+ new_urls.insert(NormRelayUrl::new("wss://relay.example.com").unwrap());
+ session.new_relays(OutboxSubId(0), new_urls);
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Subscribe(task) => {
+ assert!(!task.relays.urls.is_empty());
+ }
+ _ => panic!("Expected Subscribe task"),
+ }
+ }
+
+ /// Attempting to modify oneshot relays leaves them unchanged.
+ #[test]
+ fn outbox_session_update_oneshot_relays() {
+ let mut session = OutboxSession::default();
+ let urls = RelayUrlPkgs::new(HashSet::new());
+
+ session.oneshot(OutboxSubId(0), trivial_filter(), urls);
+
+ let mut new_urls = HashSet::new();
+ new_urls.insert(NormRelayUrl::new("wss://relay.example.com").unwrap());
+ session.new_relays(OutboxSubId(0), new_urls);
+
+ match expect_task(&session, OutboxSubId(0)) {
+ OutboxTask::Oneshot(task) => {
+ assert!(
+ task.relays.urls.is_empty(),
+ "cannot make modifications on oneshot"
+ );
+ }
+ _ => panic!("Expected Oneshot task"),
+ }
+ }
+}
diff --git a/crates/enostr/src/relay/pool.rs b/crates/enostr/src/relay/pool.rs
@@ -1,4 +1,5 @@
-use crate::relay::{setup_multicast_relay, MulticastRelay, Relay, RelayStatus};
+use crate::relay::multicast::{setup_multicast_relay, MulticastRelay};
+use crate::relay::{RelayStatus, WebsocketConn, WebsocketRelay};
use crate::{ClientMessage, Error, Result};
use nostrdb::Filter;
@@ -37,17 +38,10 @@ pub enum PoolRelay {
Multicast(MulticastRelay),
}
-pub struct WebsocketRelay {
- pub relay: Relay,
- pub last_ping: Instant,
- pub last_connect_attempt: Instant,
- pub retry_connect_after: Duration,
-}
-
impl PoolRelay {
pub fn url(&self) -> &str {
match self {
- Self::Websocket(wsr) => wsr.relay.url.as_str(),
+ Self::Websocket(wsr) => wsr.conn.url.as_str(),
Self::Multicast(_wsr) => "multicast",
}
}
@@ -55,7 +49,7 @@ impl PoolRelay {
pub fn set_status(&mut self, status: RelayStatus) {
match self {
Self::Websocket(wsr) => {
- wsr.relay.status = status;
+ wsr.conn.status = status;
}
Self::Multicast(_mcr) => {}
}
@@ -63,22 +57,22 @@ impl PoolRelay {
pub fn try_recv(&self) -> Option<WsEvent> {
match self {
- Self::Websocket(recvr) => recvr.relay.receiver.try_recv(),
+ Self::Websocket(recvr) => recvr.conn.receiver.try_recv(),
Self::Multicast(recvr) => recvr.try_recv(),
}
}
pub fn status(&self) -> RelayStatus {
match self {
- Self::Websocket(wsr) => wsr.relay.status,
- Self::Multicast(mcr) => mcr.status,
+ Self::Websocket(wsr) => wsr.conn.status,
+ Self::Multicast(mcr) => mcr.status(),
}
}
pub fn send(&mut self, msg: &ClientMessage) -> Result<()> {
match self {
Self::Websocket(wsr) => {
- wsr.relay.send(msg);
+ wsr.conn.send(msg);
Ok(())
}
@@ -96,7 +90,7 @@ impl PoolRelay {
self.send(&ClientMessage::req(subid, filter))
}
- pub fn websocket(relay: Relay) -> Self {
+ pub fn websocket(relay: WebsocketConn) -> Self {
Self::Websocket(WebsocketRelay::new(relay))
}
@@ -105,21 +99,6 @@ impl PoolRelay {
}
}
-impl WebsocketRelay {
- pub fn new(relay: Relay) -> Self {
- Self {
- relay,
- last_ping: Instant::now(),
- last_connect_attempt: Instant::now(),
- retry_connect_after: Self::initial_reconnect_duration(),
- }
- }
-
- pub fn initial_reconnect_duration() -> Duration {
- Duration::from_secs(5)
- }
-}
-
pub struct RelayPool {
pub relays: Vec<PoolRelay>,
pub ping_rate: Duration,
@@ -228,7 +207,7 @@ impl RelayPool {
match relay {
PoolRelay::Multicast(_) => {}
PoolRelay::Websocket(relay) => {
- match relay.relay.status {
+ match relay.conn.status {
RelayStatus::Disconnected => {
let reconnect_at =
relay.last_connect_attempt + relay.retry_connect_after;
@@ -240,7 +219,7 @@ impl RelayPool {
relay.retry_connect_after, next_duration
);
relay.retry_connect_after = next_duration;
- if let Err(err) = relay.relay.connect(wakeup.clone()) {
+ if let Err(err) = relay.conn.connect(wakeup.clone()) {
error!("error connecting to relay: {}", err);
}
} else {
@@ -254,8 +233,8 @@ impl RelayPool {
let should_ping = now - relay.last_ping > self.ping_rate;
if should_ping {
- trace!("pinging {}", relay.relay.url);
- relay.relay.ping();
+ trace!("pinging {}", relay.conn.url);
+ relay.conn.ping();
relay.last_ping = Instant::now();
}
}
@@ -312,7 +291,7 @@ impl RelayPool {
if self.has(&url) {
return Ok(());
}
- let relay = Relay::new(
+ let relay = WebsocketConn::new(
nostr::RelayUrl::parse(url).map_err(|_| Error::InvalidRelayUrl)?,
wakeup,
)?;
@@ -382,7 +361,7 @@ impl RelayPool {
trace!("pong {}", relay.url());
match relay {
PoolRelay::Websocket(wsr) => {
- wsr.relay.sender.send(WsMessage::Pong(bs.to_owned()));
+ wsr.conn.sender.send(WsMessage::Pong(bs.to_owned()));
}
PoolRelay::Multicast(_mcr) => {}
}
diff --git a/crates/enostr/src/relay/queue.rs b/crates/enostr/src/relay/queue.rs
@@ -0,0 +1,48 @@
+use std::collections::{btree_set, BTreeSet};
+
+use crate::relay::{OutboxSubId, RelayTask};
+
+/// QueuedTasks stores subscription work that could not be scheduled immediately.
+#[derive(Default)]
+pub struct QueuedTasks {
+ tasks: BTreeSet<OutboxSubId>,
+}
+
+impl QueuedTasks {
+ pub fn add(&mut self, id: OutboxSubId, task: RelayTask) {
+ match task {
+ RelayTask::Unsubscribe => {
+ // i guess swap remove is ok here? it's not super important to maintain strict insertion order
+ if !self.tasks.contains(&id) {
+ return;
+ }
+ self.tasks.remove(&id);
+ }
+ RelayTask::Subscribe => {
+ self.tasks.insert(id);
+ }
+ }
+ }
+
+ pub fn pop(&mut self) -> Option<OutboxSubId> {
+ self.tasks.pop_last()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.tasks.is_empty()
+ }
+
+ #[allow(dead_code)]
+ pub fn len(&self) -> usize {
+ self.tasks.len()
+ }
+}
+
+impl IntoIterator for QueuedTasks {
+ type Item = OutboxSubId;
+ type IntoIter = btree_set::IntoIter<OutboxSubId>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.tasks.into_iter()
+ }
+}
diff --git a/crates/enostr/src/relay/subs_debug.rs b/crates/enostr/src/relay/subs_debug.rs
@@ -60,6 +60,9 @@ impl From<RelayEvent<'_>> for OwnedRelayEvent {
RelayMessage::Eose(s) => format!("EOSE:{s}"),
RelayMessage::Event(_, s) => format!("EVENT:{s}"),
RelayMessage::Notice(s) => format!("NOTICE:{s}"),
+ RelayMessage::Closed(sub_id, message) => {
+ format!("CLOSED:{sub_id}:{message}")
+ }
};
OwnedRelayEvent::Message(relay_msg)
}
@@ -249,6 +252,9 @@ fn calculate_relay_message_size(message: &RelayMessage) -> usize {
RelayMessage::Eose(str_ref)
| RelayMessage::Event(str_ref, _)
| RelayMessage::Notice(str_ref) => mem::size_of_val(message) + str_ref.len(),
+ RelayMessage::Closed(sub_id, reason) => {
+ mem::size_of_val(message) + sub_id.len() + reason.len()
+ }
}
}
diff --git a/crates/enostr/src/relay/subscription.rs b/crates/enostr/src/relay/subscription.rs
@@ -0,0 +1,445 @@
+use hashbrown::{HashMap, HashSet};
+use nostrdb::Filter;
+
+use crate::relay::{MetadataFilters, NormRelayUrl, OutboxSubId, RelayType, RelayUrlPkgs};
+
+pub struct OutboxSubscription {
+ pub relays: HashSet<NormRelayUrl>,
+ pub filters: MetadataFilters,
+ json_size: usize,
+ pub is_oneshot: bool,
+ pub relay_type: RelayType,
+}
+
+impl OutboxSubscription {
+ pub fn see_all(&mut self, at: u64) {
+ for (_, meta) in self.filters.iter_mut() {
+ meta.last_seen = Some(at);
+ }
+ }
+
+ pub fn ingest_task(&mut self, task: ModifyTask) {
+ match task {
+ ModifyTask::Filters(modify_filters_task) => {
+ self.filters = MetadataFilters::new(modify_filters_task.0);
+ self.json_size = self.filters.json_size_sum();
+ }
+ ModifyTask::Relays(modify_relays_task) => {
+ self.relays = modify_relays_task.0;
+ }
+ ModifyTask::Full(full_modification_task) => {
+ self.filters = MetadataFilters::new(full_modification_task.filters);
+ self.json_size = self.filters.json_size_sum();
+ self.relays = full_modification_task.relays;
+ }
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct OutboxSubscriptions {
+ subs: HashMap<OutboxSubId, OutboxSubscription>,
+}
+
+impl OutboxSubscriptions {
+ pub fn view(&self, id: &OutboxSubId) -> Option<SubscriptionView<'_>> {
+ let sub = self.subs.get(id)?;
+
+ Some(SubscriptionView {
+ id: *id,
+ filters: &sub.filters,
+ json_size: sub.json_size,
+ is_oneshot: sub.is_oneshot,
+ })
+ }
+
+ pub fn json_size(&self, id: &OutboxSubId) -> Option<usize> {
+ self.subs.get(id).map(|s| s.json_size)
+ }
+
+ pub fn subset_oneshot(&self, ids: &HashSet<OutboxSubId>) -> HashSet<OutboxSubId> {
+ ids.iter()
+ .filter(|id| self.subs.get(*id).is_some_and(|s| s.is_oneshot))
+ .copied()
+ .collect()
+ }
+
+ pub fn is_oneshot(&self, id: &OutboxSubId) -> bool {
+ self.subs.get(id).is_some_and(|s| s.is_oneshot)
+ }
+
+ pub fn json_size_sum(&self, ids: &HashSet<OutboxSubId>) -> usize {
+ ids.iter()
+ .map(|id| self.subs.get(id).map_or(0, |s| s.json_size))
+ .sum()
+ }
+
+ pub fn filters_all(&self, ids: &HashSet<OutboxSubId>) -> Vec<Filter> {
+ ids.iter()
+ .filter_map(|id| self.subs.get(id))
+ .flat_map(|sub| sub.filters.filters.iter().cloned())
+ .collect()
+ }
+
+ pub fn get_mut(&mut self, id: &OutboxSubId) -> Option<&mut OutboxSubscription> {
+ self.subs.get_mut(id)
+ }
+
+ pub fn get(&self, id: &OutboxSubId) -> Option<&OutboxSubscription> {
+ self.subs.get(id)
+ }
+
+ pub fn remove(&mut self, id: &OutboxSubId) {
+ self.subs.remove(id);
+ }
+
+ pub fn new_subscription(&mut self, id: OutboxSubId, task: SubscribeTask, is_oneshot: bool) {
+ let filters = MetadataFilters::new(task.filters);
+ let json_size = filters.json_size_sum();
+ self.subs.insert(
+ id,
+ OutboxSubscription {
+ relays: task.relays.urls,
+ filters,
+ json_size,
+ is_oneshot,
+ relay_type: if task.relays.use_transparent {
+ RelayType::Transparent
+ } else {
+ RelayType::Compaction
+ },
+ },
+ );
+ }
+}
+
+pub struct SubscriptionView<'a> {
+ pub id: OutboxSubId,
+ pub filters: &'a MetadataFilters,
+ #[allow(dead_code)]
+ pub json_size: usize,
+ #[allow(dead_code)]
+ pub is_oneshot: bool,
+}
+
+pub enum OutboxTask {
+ Modify(ModifyTask),
+ Subscribe(SubscribeTask),
+ Unsubscribe,
+ Oneshot(SubscribeTask),
+}
+
+pub enum ModifyTask {
+ Filters(ModifyFiltersTask),
+ Relays(ModifyRelaysTask),
+ Full(FullModificationTask),
+}
+
+#[derive(Default)]
+pub struct ModifyFiltersTask(pub Vec<Filter>);
+
+pub struct ModifyRelaysTask(pub HashSet<NormRelayUrl>);
+
+pub struct FullModificationTask {
+ pub filters: Vec<Filter>,
+ pub relays: HashSet<NormRelayUrl>,
+}
+
+pub struct SubscribeTask {
+ pub filters: Vec<Filter>,
+ pub relays: RelayUrlPkgs,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::relay::RelayUrlPkgs;
+ use crate::relay::{FullModificationTask, ModifyFiltersTask};
+
+ fn subscribe_task(filters: Vec<Filter>, urls: RelayUrlPkgs) -> SubscribeTask {
+ SubscribeTask {
+ filters,
+ relays: urls,
+ }
+ }
+
+ fn relay_urls(url: &str) -> HashSet<NormRelayUrl> {
+ let mut urls = HashSet::new();
+ let relay = NormRelayUrl::new(url).unwrap();
+ urls.insert(relay);
+ urls
+ }
+
+ /// new_subscription should persist relay metadata and expose it via view().
+ #[test]
+ fn new_subscription_records_metadata() {
+ let mut subs = OutboxSubscriptions::default();
+ let mut pkgs = RelayUrlPkgs::new(relay_urls("wss://relay-meta.example.com"));
+ pkgs.use_transparent = true;
+ let filters = vec![Filter::new().kinds(vec![1]).limit(4).build()];
+ let id = OutboxSubId(7);
+
+ subs.new_subscription(id, subscribe_task(filters.clone(), pkgs), true);
+
+ let view = subs.view(&id).expect("subscription view");
+ assert_eq!(view.id, id);
+ assert!(view.is_oneshot);
+ assert_eq!(view.filters.get_filters().len(), filters.len());
+ assert!(view.json_size > 0);
+
+ let sub = subs.get_mut(&id).expect("subscription metadata");
+ assert_eq!(sub.relays.len(), 1);
+ assert_eq!(sub.relay_type, RelayType::Transparent);
+ }
+
+ /// subset_oneshot should only return IDs corresponding to oneshot subscriptions.
+ #[test]
+ fn subset_oneshot_filters_ids() {
+ let mut subs = OutboxSubscriptions::default();
+ let filters = vec![Filter::new().kinds(vec![1]).build()];
+ let id_a = OutboxSubId(1);
+ let id_b = OutboxSubId(2);
+ subs.new_subscription(
+ id_a,
+ subscribe_task(
+ filters.clone(),
+ RelayUrlPkgs::new(relay_urls("wss://relay-a.example")),
+ ),
+ false,
+ );
+ subs.new_subscription(
+ id_b,
+ subscribe_task(
+ filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-b.example")),
+ ),
+ true,
+ );
+
+ let mut ids = HashSet::new();
+ ids.insert(id_a);
+ ids.insert(id_b);
+
+ let oneshots = subs.subset_oneshot(&ids);
+ let expected = {
+ let mut s = HashSet::new();
+ s.insert(id_b);
+ s
+ };
+ assert_eq!(oneshots, expected);
+ }
+
+ /// json_size_sum aggregates the JSON payload size for the requested subscriptions.
+ #[test]
+ fn json_size_sum_accumulates_sizes() {
+ let mut subs = OutboxSubscriptions::default();
+ let filters = vec![Filter::new().kinds(vec![1]).build()];
+ let id_a = OutboxSubId(1);
+ let id_b = OutboxSubId(2);
+ subs.new_subscription(
+ id_a,
+ subscribe_task(
+ filters.clone(),
+ RelayUrlPkgs::new(relay_urls("wss://relay-json-a.example")),
+ ),
+ false,
+ );
+ subs.new_subscription(
+ id_b,
+ subscribe_task(
+ filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-json-b.example")),
+ ),
+ false,
+ );
+
+ let mut ids = HashSet::new();
+ ids.insert(id_a);
+ ids.insert(id_b);
+
+ let sum = subs.json_size_sum(&ids);
+ let expected = subs.json_size(&id_a).unwrap() + subs.json_size(&id_b).unwrap();
+ assert_eq!(sum, expected);
+ }
+
+ /// see_all should mark every filter as seen at the provided timestamp.
+ #[test]
+ fn see_all_marks_filters() {
+ let mut subs = OutboxSubscriptions::default();
+ let id = OutboxSubId(8);
+ subs.new_subscription(
+ id,
+ subscribe_task(
+ vec![
+ Filter::new().kinds(vec![1]).limit(2).build(),
+ Filter::new().kinds(vec![4]).limit(1).build(),
+ ],
+ RelayUrlPkgs::new(relay_urls("wss://relay-see.example")),
+ ),
+ false,
+ );
+
+ let timestamp = 12345;
+ let sub = subs.get_mut(&id).expect("subscription metadata");
+ sub.see_all(timestamp);
+
+ assert!(sub
+ .filters
+ .iter()
+ .all(|(_, meta)| meta.last_seen == Some(timestamp)));
+ }
+
+ /// ingest_task should update json_size when filters are modified.
+ #[test]
+ fn ingest_task_updates_json_size_on_filter_change() {
+ let mut subs = OutboxSubscriptions::default();
+ let id = OutboxSubId(9);
+ let small_filters = vec![Filter::new().kinds(vec![1]).build()];
+ subs.new_subscription(
+ id,
+ subscribe_task(
+ small_filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-ingest.example")),
+ ),
+ false,
+ );
+
+ let original_size = subs.json_size(&id).unwrap();
+
+ // Modify with larger filters
+ let large_filters = vec![
+ Filter::new().kinds(vec![1, 2, 3, 4, 5]).limit(100).build(),
+ Filter::new().kinds(vec![6, 7, 8]).limit(50).build(),
+ ];
+ let sub = subs.get_mut(&id).unwrap();
+ sub.ingest_task(ModifyTask::Filters(ModifyFiltersTask(large_filters)));
+
+ let new_size = subs.json_size(&id).unwrap();
+ assert_ne!(
+ original_size, new_size,
+ "json_size should change after filter modification"
+ );
+ assert!(
+ new_size > original_size,
+ "larger filters should have larger json_size"
+ );
+ }
+
+ /// ingest_task with Full modification should update json_size.
+ #[test]
+ fn ingest_task_updates_json_size_on_full_change() {
+ let mut subs = OutboxSubscriptions::default();
+ let id = OutboxSubId(10);
+ let small_filters = vec![Filter::new().kinds(vec![1]).build()];
+ subs.new_subscription(
+ id,
+ subscribe_task(
+ small_filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-full.example")),
+ ),
+ false,
+ );
+
+ let original_size = subs.json_size(&id).unwrap();
+
+ // Full modification with larger filters
+ let large_filters = vec![
+ Filter::new().kinds(vec![1, 2, 3, 4, 5]).limit(100).build(),
+ Filter::new().kinds(vec![6, 7, 8]).limit(50).build(),
+ ];
+ let sub = subs.get_mut(&id).unwrap();
+ sub.ingest_task(ModifyTask::Full(FullModificationTask {
+ filters: large_filters,
+ relays: relay_urls("wss://new-relay.example"),
+ }));
+
+ let new_size = subs.json_size(&id).unwrap();
+ assert_ne!(
+ original_size, new_size,
+ "json_size should change after full modification"
+ );
+ assert!(
+ new_size > original_size,
+ "larger filters should have larger json_size"
+ );
+ }
+
+ fn filter_has_since(filter: &Filter, expected: u64) -> bool {
+ let json = filter.json().expect("filter json");
+ json.contains(&format!("\"since\":{}", expected))
+ }
+
+ /// Full flow: see_all sets last_seen, then since_optimize applies it to filters.
+ #[test]
+ fn see_all_then_since_optimize_applies_since_to_filters() {
+ let mut subs = OutboxSubscriptions::default();
+ let id = OutboxSubId(11);
+ let filters = vec![
+ Filter::new().kinds(vec![1]).build(),
+ Filter::new().kinds(vec![2]).build(),
+ ];
+ subs.new_subscription(
+ id,
+ subscribe_task(
+ filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-since.example")),
+ ),
+ false,
+ );
+
+ // Verify filters don't have since initially
+ let view = subs.view(&id).unwrap();
+ for filter in view.filters.get_filters() {
+ let json = filter.json().expect("filter json");
+ assert!(
+ !json.contains("\"since\""),
+ "filter should not have since initially"
+ );
+ }
+
+ let timestamp = 1700000000u64;
+ let sub = subs.get_mut(&id).unwrap();
+ sub.see_all(timestamp);
+ sub.filters.since_optimize();
+
+ // Verify filters now have since
+ let view = subs.view(&id).unwrap();
+ for filter in view.filters.get_filters() {
+ assert!(
+ filter_has_since(filter, timestamp),
+ "filter should have since after see_all + since_optimize"
+ );
+ }
+ }
+
+ /// Filters accessed via view() should have since after optimization.
+ #[test]
+ fn view_returns_optimized_filters() {
+ let mut subs = OutboxSubscriptions::default();
+ let id = OutboxSubId(12);
+ let filters = vec![Filter::new().kinds(vec![1]).build()];
+ subs.new_subscription(
+ id,
+ subscribe_task(
+ filters,
+ RelayUrlPkgs::new(relay_urls("wss://relay-view.example")),
+ ),
+ false,
+ );
+
+ let timestamp = 1234567890u64;
+ {
+ let sub = subs.get_mut(&id).unwrap();
+ sub.see_all(timestamp);
+ sub.filters.since_optimize();
+ }
+
+ // Access via view - should see the optimized filters
+ let view = subs.view(&id).unwrap();
+ let filter = &view.filters.get_filters()[0];
+ assert!(
+ filter_has_since(filter, timestamp),
+ "view should return filters with since applied"
+ );
+ }
+}
diff --git a/crates/enostr/src/relay/test_utils.rs b/crates/enostr/src/relay/test_utils.rs
@@ -0,0 +1,55 @@
+#![cfg(test)]
+//! Test utilities for relay testing
+//!
+//! This module provides mock implementations and helpers for unit and integration tests.
+
+use nostrdb::Filter;
+
+use crate::relay::{OutboxSession, OutboxSubId, OutboxTask};
+use crate::Wakeup;
+
+/// A mock Wakeup implementation that tracks how many times wake() was called.
+///
+/// This is useful for unit tests to verify that wakeups are triggered correctly
+/// without needing a real UI/event loop.
+#[derive(Clone)]
+pub struct MockWakeup {}
+
+impl MockWakeup {
+ /// Create a new MockWakeup with zero wakeup count.
+ pub fn new() -> Self {
+ Self {}
+ }
+}
+
+impl Default for MockWakeup {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Wakeup for MockWakeup {
+ fn wake(&self) {}
+}
+
+/// Returns a task for `id`, panicking when the task is missing.
+#[track_caller]
+pub fn expect_task<'a>(session: &'a OutboxSession, id: OutboxSubId) -> &'a OutboxTask {
+ session
+ .tasks
+ .get(&id)
+ .unwrap_or_else(|| panic!("Expected task for {:?}", id))
+}
+
+// ==================== SubRegistry tests ====================
+
+pub fn trivial_filter() -> Vec<Filter> {
+ vec![Filter::new().kinds(vec![1]).build()]
+}
+
+pub fn filters_json(filters: &[Filter]) -> Vec<String> {
+ filters
+ .iter()
+ .map(|f| f.json().expect("serialize filter to json"))
+ .collect()
+}
diff --git a/crates/enostr/src/relay/transparent.rs b/crates/enostr/src/relay/transparent.rs
@@ -0,0 +1,576 @@
+use hashbrown::HashMap;
+use uuid::Uuid;
+
+use crate::{
+ relay::{
+ subscription::SubscriptionView, MetadataFilters, OutboxSubId, OutboxSubscriptions,
+ QueuedTasks, RelayReqId, RelayReqStatus, RelayTask, SubPass, SubPassGuardian,
+ SubPassRevocation, WebsocketRelay,
+ },
+ ClientMessage,
+};
+
+/// TransparentData tracks the outstanding transparent REQs and their metadata.
+#[derive(Default)]
+pub struct TransparentData {
+ request_to_sid: HashMap<OutboxSubId, RelayReqId>,
+ sid_status: HashMap<RelayReqId, SubData>,
+ queue: QueuedTasks,
+}
+
+impl TransparentData {
+ #[allow(dead_code)]
+ pub fn num_subs(&self) -> usize {
+ self.sid_status.len()
+ }
+
+ #[allow(dead_code)]
+ pub fn contains(&self, id: &OutboxSubId) -> bool {
+ self.request_to_sid.contains_key(id)
+ }
+
+ pub fn set_req_status(&mut self, sid: &str, status: RelayReqStatus) {
+ let Some(entry) = self.sid_status.get_mut(sid) else {
+ return;
+ };
+ entry.status = status;
+ }
+
+ pub fn req_status(&self, req_id: &OutboxSubId) -> Option<RelayReqStatus> {
+ let sid = self.request_to_sid.get(req_id)?;
+ Some(self.sid_status.get(sid)?.status)
+ }
+
+ /// Returns the OutboxSubId associated with the given relay subscription ID.
+ pub fn id(&self, sid: &RelayReqId) -> Option<OutboxSubId> {
+ self.sid_status.get(sid).map(|d| d.sub_req_id)
+ }
+}
+
+pub struct TransparentRelay<'a> {
+ relay: Option<&'a mut WebsocketRelay>,
+ data: &'a mut TransparentData,
+ sub_guardian: &'a mut SubPassGuardian,
+}
+
+/// TransparentRelay manages per-subscription REQs for outbox subscriptions which
+/// need to get EOSE ASAP (or some other need)
+impl<'a> TransparentRelay<'a> {
+ pub fn new(
+ relay: Option<&'a mut WebsocketRelay>,
+ data: &'a mut TransparentData,
+ sub_guardian: &'a mut SubPassGuardian,
+ ) -> Self {
+ Self {
+ relay,
+ data,
+ sub_guardian,
+ }
+ }
+
+ pub fn try_flush_queue(&mut self, subs: &OutboxSubscriptions) {
+ while self.sub_guardian.available_passes() > 0 && !self.data.queue.is_empty() {
+ let Some(next) = self.data.queue.pop() else {
+ return;
+ };
+
+ let Some(view) = subs.view(&next) else {
+ continue;
+ };
+
+ self.subscribe(view);
+ }
+ }
+
+ pub fn subscribe(&mut self, view: SubscriptionView) {
+ let req_id = view.id;
+ let Some(existing_sid) = self.data.request_to_sid.get(&req_id) else {
+ let Some(new_pass) = self.sub_guardian.take_pass() else {
+ self.data.queue.add(req_id, RelayTask::Subscribe);
+ return;
+ };
+ tracing::debug!("Transparent took pass for {req_id:?}");
+ let sid: RelayReqId = Uuid::new_v4().into();
+ self.data.request_to_sid.insert(req_id, sid.clone());
+ send_req(&mut self.relay, &sid, view.filters);
+ self.data.sid_status.insert(
+ sid,
+ SubData {
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: new_pass,
+ sub_req_id: req_id,
+ },
+ );
+ return;
+ };
+
+ let Some(sub_data) = self.data.sid_status.get_mut(existing_sid) else {
+ return;
+ };
+
+ // we're replacing the existing sub with new filters
+ sub_data.status = RelayReqStatus::InitialQuery;
+
+ send_req(&mut self.relay, existing_sid, view.filters);
+ }
+
+ pub fn unsubscribe(&mut self, req_id: OutboxSubId) {
+ let Some(sid) = self.data.request_to_sid.remove(&req_id) else {
+ self.data.queue.add(req_id, RelayTask::Unsubscribe);
+ return;
+ };
+
+ let Some(removed) = self.data.sid_status.remove(&sid) else {
+ return;
+ };
+
+ self.sub_guardian.return_pass(removed.sub_pass);
+
+ let Some(relay) = &mut self.relay else {
+ return;
+ };
+
+ if relay.is_connected() {
+ relay.conn.send(&ClientMessage::close(sid.to_string()));
+ }
+ }
+
+ #[profiling::function]
+ pub fn handle_relay_open(&mut self, subs: &OutboxSubscriptions) {
+ let Some(relay) = &mut self.relay else {
+ return;
+ };
+
+ if !relay.is_connected() {
+ return;
+ }
+
+ for (sid, data) in &self.data.sid_status {
+ let Some(view) = subs.view(&data.sub_req_id) else {
+ continue;
+ };
+
+ relay.conn.send(&ClientMessage::req(
+ sid.to_string(),
+ view.filters.get_filters().clone(),
+ ));
+ }
+ }
+}
+
+fn send_req(relay: &mut Option<&mut WebsocketRelay>, sid: &RelayReqId, filters: &MetadataFilters) {
+ let Some(relay) = relay.as_mut() else {
+ return;
+ };
+
+ if !relay.is_connected() {
+ return;
+ }
+
+ relay.conn.send(&ClientMessage::req(
+ sid.to_string(),
+ filters.get_filters().clone(),
+ ));
+}
+
+#[allow(dead_code)]
+pub fn revocate_transparent_subs(
+ mut relay: Option<&mut WebsocketRelay>,
+ data: &mut TransparentData,
+ revocations: Vec<SubPassRevocation>,
+) {
+ // Snapshot the pairs we intend to process (can't mutate while iterating).
+ let pairs: Vec<(OutboxSubId, RelayReqId)> = data
+ .request_to_sid
+ .iter()
+ .take(revocations.len())
+ .map(|(id, sid)| (*id, sid.clone()))
+ .collect();
+
+ for (mut revocation, (id, sid)) in revocations.into_iter().zip(pairs) {
+ // If we fail to remove the mapping, skip without consuming other state.
+ if data.request_to_sid.remove(&id).is_none() {
+ continue;
+ }
+
+ let Some(status) = data.sid_status.remove(&sid) else {
+ continue;
+ };
+
+ revocation.revocate(status.sub_pass);
+ data.queue.add(id, RelayTask::Subscribe);
+
+ let Some(relay) = &mut relay else {
+ continue;
+ };
+
+ if relay.is_connected() {
+ relay.conn.send(&ClientMessage::close(sid.to_string()));
+ }
+ }
+}
+
+struct SubData {
+ pub status: RelayReqStatus,
+ pub sub_pass: SubPass,
+ pub sub_req_id: OutboxSubId,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::relay::{RelayUrlPkgs, SubscribeTask};
+ use hashbrown::HashSet;
+ use nostrdb::Filter;
+
+ // ==================== TransparentData tests ====================
+
+ fn trivial_filter() -> Vec<Filter> {
+ vec![Filter::new().kinds([0]).build()]
+ }
+
+ fn create_subs_with_filter(id: OutboxSubId, filters: Vec<Filter>) -> OutboxSubscriptions {
+ let mut subs = OutboxSubscriptions::default();
+ insert_sub(&mut subs, id, filters, false);
+ subs
+ }
+
+ fn insert_sub(
+ subs: &mut OutboxSubscriptions,
+ id: OutboxSubId,
+ filters: Vec<Filter>,
+ is_oneshot: bool,
+ ) {
+ subs.new_subscription(
+ id,
+ SubscribeTask {
+ filters,
+ relays: RelayUrlPkgs::new(HashSet::new()),
+ },
+ is_oneshot,
+ );
+ }
+
+ #[test]
+ fn transparent_data_manual_insert_and_query() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(1);
+ let pass = guardian.take_pass().unwrap();
+
+ let req_id = OutboxSubId(42);
+ let sid = RelayReqId::default();
+
+ data.request_to_sid.insert(req_id, sid.clone());
+ data.sid_status.insert(
+ sid.clone(),
+ SubData {
+ status: RelayReqStatus::InitialQuery,
+ sub_pass: pass,
+ sub_req_id: req_id,
+ },
+ );
+
+ assert!(data.contains(&req_id));
+ assert_eq!(data.num_subs(), 1);
+ assert_eq!(data.req_status(&req_id), Some(RelayReqStatus::InitialQuery));
+
+ // Update status
+ data.set_req_status(&sid.to_string(), RelayReqStatus::Eose);
+ assert_eq!(data.req_status(&req_id), Some(RelayReqStatus::Eose));
+ }
+
+ // ==================== TransparentRelay tests ====================
+
+ #[test]
+ fn transparent_relay_subscribe_creates_mapping() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(5);
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ assert!(data.contains(&OutboxSubId(0)));
+ assert_eq!(data.num_subs(), 1);
+ assert_eq!(guardian.available_passes(), 4); // One pass consumed
+ }
+
+ #[test]
+ fn transparent_relay_subscribe_queues_when_no_passes() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(0); // No passes available
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ // Should be queued, not active
+ assert!(!data.contains(&OutboxSubId(0)));
+ assert_eq!(data.num_subs(), 0);
+ assert_eq!(data.queue.len(), 1);
+ }
+
+ #[test]
+ fn transparent_relay_unsubscribe_returns_pass() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(1);
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ assert_eq!(guardian.available_passes(), 0);
+ assert!(data.queue.is_empty());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.unsubscribe(OutboxSubId(0));
+ }
+
+ assert_eq!(guardian.available_passes(), 1);
+ assert!(!data.contains(&OutboxSubId(0)));
+ assert_eq!(data.num_subs(), 0);
+ assert!(data.queue.is_empty());
+ }
+
+ #[test]
+ fn transparent_relay_sub_unsub_no_passes() {
+ let mut data = TransparentData::default();
+
+ // no passes available
+ let mut guardian = SubPassGuardian::new(0);
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ assert!(!data.queue.is_empty());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.unsubscribe(OutboxSubId(0));
+ }
+
+ assert!(data.queue.is_empty());
+ }
+
+ #[test]
+ fn transparent_relay_unsubscribe_unknown_no_op() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(5);
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.unsubscribe(OutboxSubId(999)); // Unknown ID
+ }
+
+ // Should not panic, passes unchanged
+ assert_eq!(guardian.available_passes(), 5);
+ }
+
+ #[test]
+ fn transparent_relay_subscribe_replaces_existing() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(5);
+
+ let filters1 = vec![Filter::new().kinds(vec![1]).build()];
+ let filters2 = vec![Filter::new().kinds(vec![4]).build()];
+
+ let subs1 = create_subs_with_filter(OutboxSubId(0), filters1);
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs1.view(&OutboxSubId(0)).unwrap());
+ }
+
+ assert_eq!(guardian.available_passes(), 4);
+
+ let subs2 = create_subs_with_filter(OutboxSubId(0), filters2);
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs2.view(&OutboxSubId(0)).unwrap());
+ }
+
+ // Should still have same number of passes (replaced, not added)
+ assert_eq!(guardian.available_passes(), 4);
+ assert_eq!(data.num_subs(), 1);
+
+ // Verify replacement happened - status should be reset to InitialQuery
+ let sid = data.request_to_sid.get(&OutboxSubId(0)).unwrap();
+ let sub_data = data.sid_status.get(sid).unwrap();
+ assert_eq!(sub_data.status, RelayReqStatus::InitialQuery);
+ }
+
+ #[test]
+ fn transparent_relay_try_flush_queue_processes_when_passes_available() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(0); // Start with no passes
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ // Queue a subscription
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ assert_eq!(data.queue.len(), 1);
+ assert!(!data.contains(&OutboxSubId(0)));
+
+ // Return a pass
+ guardian.spawn_passes(1);
+
+ // Flush queue
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.try_flush_queue(&subs);
+ }
+
+ // Should now be active
+ assert!(data.queue.is_empty());
+ assert!(data.contains(&OutboxSubId(0)));
+ }
+
+ #[test]
+ fn transparent_relay_multiple_subscriptions() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(3);
+ let mut subs = OutboxSubscriptions::default();
+ insert_sub(&mut subs, OutboxSubId(0), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(1), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(2), trivial_filter(), false);
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(1)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(2)).unwrap());
+ }
+
+ assert_eq!(data.num_subs(), 3);
+ assert_eq!(guardian.available_passes(), 0);
+
+ // All should be tracked
+ assert!(data.contains(&OutboxSubId(0)));
+ assert!(data.contains(&OutboxSubId(1)));
+ assert!(data.contains(&OutboxSubId(2)));
+ }
+
+ #[test]
+ fn transparent_data_id_returns_outbox_sub_id() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(2);
+ let mut subs = OutboxSubscriptions::default();
+ insert_sub(&mut subs, OutboxSubId(0), trivial_filter(), true);
+ insert_sub(&mut subs, OutboxSubId(1), trivial_filter(), false);
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(1)).unwrap());
+ }
+
+ let sid = data.request_to_sid.get(&OutboxSubId(0)).unwrap().clone();
+
+ // id() should return the OutboxSubId for the relay subscription
+ let outbox_id = data.id(&sid);
+ assert_eq!(outbox_id, Some(OutboxSubId(0)));
+
+ // Unknown sid should return None
+ let unknown_sid = RelayReqId::from("unknown");
+ assert!(data.id(&unknown_sid).is_none());
+ }
+
+ // ==================== revocate_transparent_subs tests ====================
+
+ #[test]
+ fn revocate_transparent_subs_removes_subscriptions() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(3);
+ let mut subs = OutboxSubscriptions::default();
+ insert_sub(&mut subs, OutboxSubId(0), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(1), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(2), trivial_filter(), false);
+
+ // Set up some subscriptions
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(1)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(2)).unwrap());
+ }
+
+ assert_eq!(data.num_subs(), 3);
+
+ // Create revocations for 2 subs
+ let revocations = vec![SubPassRevocation::new(), SubPassRevocation::new()];
+
+ revocate_transparent_subs(None, &mut data, revocations);
+
+ // Should have removed 2 subscriptions
+ assert_eq!(data.num_subs(), 1);
+ assert_eq!(data.queue.len(), 2);
+ }
+
+ #[test]
+ fn revocate_transparent_subs_empty_revocations() {
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(2);
+ let subs = create_subs_with_filter(OutboxSubId(0), trivial_filter());
+
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ }
+
+ // No revocations
+ let revocations: Vec<SubPassRevocation> = vec![];
+ revocate_transparent_subs(None, &mut data, revocations);
+
+ // Nothing should change
+ assert_eq!(data.num_subs(), 1);
+ }
+
+ #[test]
+ fn revocate_transparent_subs_exactly_matching() {
+ // Test with exactly matching number of revocations and subscriptions
+ let mut data = TransparentData::default();
+ let mut guardian = SubPassGuardian::new(3);
+ let mut subs = OutboxSubscriptions::default();
+ insert_sub(&mut subs, OutboxSubId(0), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(1), trivial_filter(), false);
+ insert_sub(&mut subs, OutboxSubId(2), trivial_filter(), false);
+
+ // Create 3 subscriptions
+ {
+ let mut relay = TransparentRelay::new(None, &mut data, &mut guardian);
+ relay.subscribe(subs.view(&OutboxSubId(0)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(1)).unwrap());
+ relay.subscribe(subs.view(&OutboxSubId(2)).unwrap());
+ }
+
+ assert_eq!(data.num_subs(), 3);
+ assert_eq!(guardian.available_passes(), 0);
+
+ // Create exactly 3 revocations
+ let revocations = vec![
+ SubPassRevocation::new(),
+ SubPassRevocation::new(),
+ SubPassRevocation::new(),
+ ];
+
+ // This should revoke all subscriptions
+ revocate_transparent_subs(None, &mut data, revocations);
+
+ assert_eq!(data.num_subs(), 0);
+ assert_eq!(data.queue.len(), 3);
+ }
+}
diff --git a/crates/enostr/src/relay/websocket.rs b/crates/enostr/src/relay/websocket.rs
@@ -0,0 +1,145 @@
+use crate::{relay::RelayStatus, ClientMessage, Result, Wakeup};
+
+use std::{
+ fmt,
+ hash::{Hash, Hasher},
+ time::{Duration, Instant},
+};
+
+use ewebsock::{Options, WsMessage, WsReceiver, WsSender};
+use tracing::{debug, error};
+
+/// WebsocketConn owns an outbound websocket connection to a relay.
+pub struct WebsocketConn {
+ pub url: nostr::RelayUrl,
+ pub status: RelayStatus,
+ pub sender: WsSender,
+ pub receiver: WsReceiver,
+}
+
+impl fmt::Debug for WebsocketConn {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Relay")
+ .field("url", &self.url)
+ .field("status", &self.status)
+ .finish()
+ }
+}
+
+impl Hash for WebsocketConn {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ // Hashes the Relay by hashing the URL
+ self.url.hash(state);
+ }
+}
+
+impl PartialEq for WebsocketConn {
+ fn eq(&self, other: &Self) -> bool {
+ self.url == other.url
+ }
+}
+
+impl Eq for WebsocketConn {}
+
+impl WebsocketConn {
+ pub fn new(
+ url: nostr::RelayUrl,
+ wakeup: impl Fn() + Send + Sync + Clone + 'static,
+ ) -> Result<Self> {
+ #[derive(Clone)]
+ struct TmpWakeup<W>(W);
+
+ impl<W> Wakeup for TmpWakeup<W>
+ where
+ W: Fn() + Send + Sync + Clone + 'static,
+ {
+ fn wake(&self) {
+ (self.0)()
+ }
+ }
+
+ WebsocketConn::from_wakeup(url, TmpWakeup(wakeup))
+ }
+
+ pub fn from_wakeup<W>(url: nostr::RelayUrl, wakeup: W) -> Result<Self>
+ where
+ W: Wakeup,
+ {
+ let status = RelayStatus::Connecting;
+ let wake = wakeup;
+ let (sender, receiver) =
+ ewebsock::connect_with_wakeup(url.as_str(), Options::default(), move || wake.wake())?;
+
+ Ok(Self {
+ url,
+ sender,
+ receiver,
+ status,
+ })
+ }
+
+ #[profiling::function]
+ pub fn send(&mut self, msg: &ClientMessage) {
+ let json = match msg.to_json() {
+ Ok(json) => {
+ debug!("sending {} to {}", json, self.url);
+ json
+ }
+ Err(e) => {
+ error!("error serializing json for filter: {e}");
+ return;
+ }
+ };
+
+ let txt = WsMessage::Text(json);
+ self.sender.send(txt);
+ }
+
+ pub fn connect(&mut self, wakeup: impl Fn() + Send + Sync + 'static) -> Result<()> {
+ let (sender, receiver) =
+ ewebsock::connect_with_wakeup(self.url.as_str(), Options::default(), wakeup)?;
+ self.status = RelayStatus::Connecting;
+ self.sender = sender;
+ self.receiver = receiver;
+ Ok(())
+ }
+
+ pub fn ping(&mut self) {
+ let msg = WsMessage::Ping(vec![]);
+ self.sender.send(msg);
+ }
+
+ pub fn set_status(&mut self, status: RelayStatus) {
+ self.status = status;
+ }
+}
+
+/// WebsocketRelay wraps WebsocketConn with reconnect/keepalive metadata.
+pub struct WebsocketRelay {
+ pub conn: WebsocketConn,
+ pub last_ping: Instant,
+ pub last_connect_attempt: Instant,
+ pub retry_connect_after: Duration,
+ /// Number of consecutive failed reconnect attempts. Reset to 0 on successful connection.
+ pub reconnect_attempt: u32,
+}
+
+impl WebsocketRelay {
+ pub fn new(relay: WebsocketConn) -> Self {
+ Self {
+ conn: relay,
+ last_ping: Instant::now(),
+ last_connect_attempt: Instant::now(),
+ retry_connect_after: Self::initial_reconnect_duration(),
+ reconnect_attempt: 0,
+ }
+ }
+
+ pub fn initial_reconnect_duration() -> Duration {
+ Duration::from_secs(5)
+ }
+
+ pub fn is_connected(&self) -> bool {
+ self.conn.status == RelayStatus::Connected
+ }
+}
diff --git a/crates/enostr/tests/outbox_integration.rs b/crates/enostr/tests/outbox_integration.rs
@@ -0,0 +1,700 @@
+//! Integration tests for the Outbox relay system
+//!
+//! These tests use `nostr-relay-builder::LocalRelay` to run a real relay on localhost
+//! and test the full subscription lifecycle, EOSE propagation, and multi-relay coordination.
+
+use enostr::{
+ NormRelayUrl, OutboxPool, OutboxSessionHandler, OutboxSubId, RelayReqStatus, RelayStatus,
+ RelayUrlPkgs, Wakeup,
+};
+use hashbrown::HashSet;
+use nostr_relay_builder::{LocalRelay, RelayBuilder};
+use nostrdb::Filter;
+use std::sync::Once;
+use std::time::Duration;
+
+static TRACING_INIT: Once = Once::new();
+
+/// Initialize tracing for tests (only runs once even if called multiple times)
+fn init_tracing() {
+ TRACING_INIT.call_once(|| {
+ tracing_subscriber::fmt()
+ .with_env_filter(
+ tracing_subscriber::EnvFilter::from_default_env()
+ .add_directive("enostr=debug".parse().unwrap()),
+ )
+ .with_test_writer()
+ .init();
+ });
+}
+
+/// A mock Wakeup implementation for integration tests
+#[derive(Clone, Default)]
+pub struct MockWakeup {}
+
+impl Wakeup for MockWakeup {
+ fn wake(&self) {}
+}
+
+/// Helper to create a LocalRelay with default settings for tests.
+/// Returns the relay handle (must be kept alive) and its normalized URL.
+async fn create_test_relay() -> (LocalRelay, NormRelayUrl) {
+ let relay = LocalRelay::run(RelayBuilder::default())
+ .await
+ .expect("failed to start relay");
+
+ let url_str = relay.url();
+ tracing::info!("LocalRelay listening at {}", url_str);
+
+ let url = NormRelayUrl::new(&url_str).expect("valid relay url");
+ (relay, url)
+}
+
+/// Polls the pool until the provided predicate returns true or the attempt limit is reached.
+/// Returns the attempt count and whether the predicate was ultimately satisfied.
+async fn pump_pool_until<F>(
+ pool: &mut OutboxPool,
+ max_attempts: usize,
+ sleep_duration: Duration,
+ mut predicate: F,
+) -> bool
+where
+ F: FnMut(&mut OutboxPool) -> bool,
+{
+ let mut attempts = 0;
+ for attempt in 0..max_attempts {
+ pool.try_recv(10, |_| {});
+ if predicate(pool) {
+ return true;
+ }
+ tokio::time::sleep(sleep_duration).await;
+ attempts = attempt;
+ }
+
+ tracing::trace!("completed pool pump in {attempts} attempts");
+
+ predicate(pool)
+}
+
+async fn default_pool_pump<F>(pool: &mut OutboxPool, predicate: F) -> bool
+where
+ F: FnMut(&mut OutboxPool) -> bool,
+{
+ pump_pool_until(pool, 100, Duration::from_millis(15), predicate).await
+}
+
+// ==================== Full Subscription Lifecycle ====================
+
+#[tokio::test]
+async fn full_subscription_lifecycle() {
+ init_tracing();
+
+ // Start local relay
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ // 1. Subscribe to the local relay
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ let id = {
+ let mut session = pool.start_session(wakeup.clone());
+ session.subscribe(trivial_filter(), url_pkgs)
+ }; // session dropped, REQ sent to relay
+
+ let has_eose = pump_pool_until(&mut pool, 50, Duration::from_millis(5), |pool| {
+ pool.has_eose(&id)
+ })
+ .await;
+
+ assert!(has_eose, "should have received EOSE from relay");
+
+ // 4. Unsubscribe
+ {
+ let mut session = pool.start_session(wakeup.clone());
+ session.unsubscribe(id);
+ }
+
+ // 5. Verify cleaned up
+ let status = pool.status(&id);
+ assert!(
+ status.is_empty(),
+ "status should be empty after unsubscribe"
+ );
+}
+
+// ==================== EOSE Flow End-to-End ====================
+
+#[tokio::test]
+async fn eose_propagation_from_real_relay() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ // Subscribe with transparent mode (faster EOSE)
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let mut url_pkgs = RelayUrlPkgs::new(urls);
+ url_pkgs.use_transparent = true;
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(
+ vec![Filter::new().kinds(vec![1]).limit(10).build()],
+ url_pkgs,
+ )
+ };
+
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+
+ assert!(got_eose, "EOSE should propagate from relay to pool",);
+}
+
+// ==================== Multi-Relay Coordination ====================
+
+#[tokio::test]
+async fn subscribe_to_multiple_relays() {
+ // Start two local relays
+ let (_relay1, url1) = create_test_relay().await;
+ let (_relay2, url2) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ // Subscribe to both relays
+ let mut urls = HashSet::new();
+ urls.insert(url1.clone());
+ urls.insert(url2.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ let id = {
+ let mut session = pool.start_session(wakeup.clone());
+ session.subscribe(vec![Filter::new().kinds(vec![1]).build()], url_pkgs)
+ };
+
+ let got_eoses = pump_pool_until(&mut pool, 50, Duration::from_millis(5), |pool| {
+ pool.all_have_eose(&id)
+ })
+ .await;
+
+ let status = pool.status(&id);
+ assert_eq!(status.len(), 2);
+ assert!(got_eoses, "should have eoses from both relays");
+}
+
+// ==================== Modify Relays Mid-Subscription ====================
+
+#[tokio::test]
+async fn modify_relays_adds_and_removes() {
+ init_tracing();
+
+ let (_relay1, url1) = create_test_relay().await;
+ let (_relay2, url2) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ // Start with relay1 only
+ let mut urls1 = HashSet::new();
+ urls1.insert(url1.clone());
+
+ let id = {
+ let mut session = pool.start_session(wakeup.clone());
+ session.subscribe(
+ vec![Filter::new().kinds(vec![1]).build()],
+ RelayUrlPkgs::new(urls1),
+ )
+ };
+
+ {
+ let status = pool.status(&id);
+ assert_eq!(status.len(), 1);
+ let (url, res) = status.into_iter().next().unwrap();
+ assert_eq!(*url, url1);
+ assert_eq!(res, RelayReqStatus::InitialQuery);
+ }
+
+ let all_eose = default_pool_pump(&mut pool, |pool| pool.all_have_eose(&id)).await;
+ assert!(all_eose);
+
+ {
+ let status = pool.status(&id);
+ assert_eq!(status.len(), 1);
+ let (url, _) = status.into_iter().next().unwrap();
+ assert_eq!(*url, url1.clone());
+ }
+
+ // Switch to relay2 only
+ let mut urls2 = HashSet::new();
+ urls2.insert(url2.clone());
+
+ {
+ let mut session = pool.start_session(wakeup.clone());
+ session.modify_relays(id, urls2);
+ }
+
+ {
+ let status = pool.status(&id);
+ assert_eq!(status.len(), 1);
+ let (url, res) = status.into_iter().next().unwrap();
+ assert_eq!(*url, url2);
+ assert_eq!(res, RelayReqStatus::InitialQuery);
+ }
+
+ let all_eose = default_pool_pump(&mut pool, |pool| pool.all_have_eose(&id)).await;
+ tracing::info!("pool status: {:?}", pool.status(&id));
+ assert!(all_eose);
+
+ let status = pool.status(&id);
+ assert_eq!(
+ status.len(),
+ 1,
+ "we are replacing relay {:?} with {:?}",
+ url1,
+ url2
+ );
+ let (url, _) = status.into_iter().next().unwrap();
+ assert_eq!(
+ *url, url2,
+ "we are replacing relay {:?} with {:?}",
+ url1, url2
+ );
+}
+
+// ==================== Subscription with Filters ====================
+
+#[tokio::test]
+async fn subscription_with_complex_filters() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ // Use a more complex filter
+ let filters = vec![
+ Filter::new().kinds(vec![1]).build(),
+ Filter::new().kinds(vec![0]).build(),
+ Filter::new().kinds(vec![3]).build(),
+ Filter::new().kinds(vec![4]).limit(100).build(),
+ ];
+
+ let id = {
+ let mut session = pool.start_session(wakeup.clone());
+ session.subscribe(filters, url_pkgs)
+ };
+
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_eose, "should receive EOSE even with multiple filters");
+}
+
+// ==================== Multiple Concurrent Subscriptions ====================
+
+#[tokio::test]
+async fn multiple_concurrent_subscriptions() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+
+ // Create multiple subscriptions
+ let mut ids: Vec<OutboxSubId> = Vec::new();
+
+ {
+ let mut session = pool.start_session(wakeup.clone());
+
+ for kind in 0..5 {
+ let id = session.subscribe(
+ vec![Filter::new().kinds(vec![kind]).build()],
+ RelayUrlPkgs::new(urls.clone()),
+ );
+ ids.push(id);
+ }
+ }
+
+ assert_eq!(ids.len(), 5);
+
+ let all_eose = default_pool_pump(&mut pool, |pool| {
+ ids.iter().filter(|id| pool.has_eose(id)).count() == 5
+ })
+ .await;
+
+ assert!(all_eose, "at least one subscription should have EOSE");
+}
+
+// ==================== Unsubscribe During Processing ====================
+
+#[tokio::test]
+async fn unsubscribe_during_processing() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(vec![Filter::new().kinds(vec![1]).build()], url_pkgs)
+ };
+
+ // Immediately unsubscribe
+ {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.unsubscribe(id);
+ }
+
+ let empty = default_pool_pump(&mut pool, |pool| pool.status(&id).is_empty()).await;
+
+ // Status should be empty after unsubscribe
+ assert!(empty, "status should be empty after unsubscribe");
+}
+
+// ==================== Transparent vs Compaction Mode ====================
+
+#[tokio::test]
+async fn transparent_mode_subscription() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let mut url_pkgs = RelayUrlPkgs::new(urls);
+ url_pkgs.use_transparent = true; // Enable transparent mode
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(trivial_filter(), url_pkgs)
+ };
+
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_eose, "transparent mode should receive EOSE");
+}
+
+#[tokio::test]
+async fn compaction_mode_subscription() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let mut url_pkgs = RelayUrlPkgs::new(urls);
+ url_pkgs.use_transparent = false; // Compaction mode (default)
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(trivial_filter(), url_pkgs)
+ };
+
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_eose, "compaction mode should receive EOSE");
+}
+
+// ==================== Modify Filters Mid-Subscription ====================
+
+#[tokio::test]
+async fn modify_filters_mid_subscription() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ // Start with kind 1
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(trivial_filter(), url_pkgs)
+ };
+
+ // Modify to kind 4
+ {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.modify_filters(id, vec![Filter::new().kinds(vec![4]).limit(9).build()]);
+ }
+
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_eose, "should receive EOSE");
+}
+
+// ==================== Connection Resilience ====================
+
+fn trivial_filter() -> Vec<Filter> {
+ vec![Filter::new().kinds([1]).build()]
+}
+
+#[tokio::test]
+async fn websocket_status_tracking() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ {
+ let mut session = pool.start_session(wakeup.clone());
+ session.subscribe(trivial_filter(), url_pkgs);
+ }
+
+ // Check websocket statuses
+ let statuses = pool.websocket_statuses();
+ // Should have at least one relay tracked
+ assert!(!statuses.is_empty(), "should track websocket statuses");
+}
+
+// ==================== Failure Paths ====================
+
+/// Subscribing to an unreachable relay should remain disconnected and never report EOSE.
+#[tokio::test]
+async fn unreachable_relay_reports_disconnected_status() {
+ let mut pool = OutboxPool::default();
+ let wakeup = MockWakeup::default();
+ let unreachable =
+ NormRelayUrl::new("wss://127.0.0.1:6555").expect("valid unreachable relay url");
+
+ let mut urls = HashSet::new();
+ urls.insert(unreachable.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ let id = {
+ let mut session = pool.start_session(wakeup);
+ session.subscribe(trivial_filter(), url_pkgs)
+ };
+
+ let got_eose = pump_pool_until(&mut pool, 10, Duration::from_millis(10), |pool| {
+ pool.has_eose(&id)
+ })
+ .await;
+ assert!(
+ !got_eose,
+ "unreachable relay should never yield an EOSE signal"
+ );
+
+ // Should survive keepalive pings even when no websocket is available.
+ pool.keepalive_ping(|| {});
+
+ let statuses = pool.websocket_statuses();
+ let status = statuses
+ .into_iter()
+ .find(|(relay_url, _)| *relay_url == &unreachable)
+ .map(|(_, status)| status)
+ .expect("missing unreachable relay status");
+ assert_eq!(status, RelayStatus::Disconnected);
+}
+
+// ==================== Oneshot Subscription Removal After EOSE ====================
+
+/// Oneshot subscriptions should be removed from the pool after EOSE is received.
+#[tokio::test]
+async fn oneshot_subscription_removed_after_eose() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ // Create a oneshot subscription via the handler, then export to get the ID
+ let id = {
+ let mut handler = pool.start_session(MockWakeup::default());
+ handler.oneshot(trivial_filter(), url_pkgs);
+ let session = handler.export();
+ // Get the ID from the session's tasks
+ let id = *session
+ .tasks
+ .keys()
+ .next()
+ .expect("oneshot should create a task");
+ OutboxSessionHandler::import(&mut pool, session, MockWakeup::default());
+ id
+ };
+
+ // Verify subscription exists
+ let filters_before = pool.filters(&id);
+ assert!(
+ filters_before.is_some(),
+ "oneshot subscription should exist before EOSE"
+ );
+
+ // Wait for EOSE
+ let got_eose = pump_pool_until(&mut pool, 50, Duration::from_millis(5), |pool| {
+ pool.has_eose(&id)
+ })
+ .await;
+ assert!(got_eose, "should receive EOSE for oneshot subscription");
+
+ // Trigger EOSE processing by starting an empty session
+ {
+ let _ = pool.start_session(MockWakeup::default());
+ }
+
+ // Verify subscription was removed
+ let filters_after = pool.filters(&id);
+ assert!(
+ filters_after.is_none(),
+ "oneshot subscription should be removed after EOSE"
+ );
+}
+
+/// Oneshot subscriptions across multiple relays should fully clean up after all EOSEs.
+#[tokio::test]
+async fn oneshot_multi_relay_fully_removed_after_eose() {
+ let (_relay1, url1) = create_test_relay().await;
+ let (_relay2, url2) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(url1.clone());
+ urls.insert(url2.clone());
+ let url_pkgs = RelayUrlPkgs::new(urls);
+
+ let id = {
+ let mut handler = pool.start_session(MockWakeup::default());
+ handler.oneshot(trivial_filter(), url_pkgs);
+ let session = handler.export();
+ let id = *session
+ .tasks
+ .keys()
+ .next()
+ .expect("oneshot should create a task");
+ OutboxSessionHandler::import(&mut pool, session, MockWakeup::default());
+ id
+ };
+
+ let got_all_eose = pump_pool_until(&mut pool, 100, Duration::from_millis(10), |pool| {
+ pool.all_have_eose(&id)
+ })
+ .await;
+ assert!(got_all_eose, "oneshot should receive EOSE from all relays");
+
+ {
+ let _ = pool.start_session(MockWakeup::default());
+ }
+
+ assert!(
+ pool.filters(&id).is_none(),
+ "oneshot metadata should be removed after EOSE processing"
+ );
+ assert!(
+ pool.status(&id).is_empty(),
+ "oneshot should be fully unsubscribed on all relays after EOSE processing"
+ );
+}
+
+// ==================== Since Optimization After EOSE ====================
+
+fn filter_has_since(filter: &Filter) -> bool {
+ filter.since().is_some()
+}
+
+/// After EOSE is received, filters should have `since` applied for future re-subscriptions.
+#[tokio::test]
+async fn eose_applies_since_to_filters() {
+ let (_relay, url) = create_test_relay().await;
+
+ let mut pool = OutboxPool::default();
+
+ // Subscribe with transparent mode (faster EOSE)
+ let mut urls = HashSet::new();
+ urls.insert(url.clone());
+ let mut url_pkgs = RelayUrlPkgs::new(urls);
+ url_pkgs.use_transparent = true;
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(
+ vec![Filter::new().kinds(vec![1]).limit(10).build()],
+ url_pkgs,
+ )
+ };
+
+ // Verify filters don't have since initially
+ let initial_filters = pool.filters(&id).expect("subscription exists");
+ assert!(
+ !filter_has_since(&initial_filters[0]),
+ "filters should not have since before EOSE"
+ );
+
+ // Wait for EOSE
+ let got_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_eose, "should receive EOSE");
+
+ // Create an empty session to trigger EOSE queue processing
+ // (ingest_session is called when the handler is dropped)
+ {
+ let _ = pool.start_session(MockWakeup::default());
+ }
+
+ // After EOSE processing, filters should have since applied
+ let optimized_filters = pool.filters(&id).expect("subscription still exists");
+
+ assert!(
+ filter_has_since(&optimized_filters[0]),
+ "filters should have since after EOSE"
+ );
+}
+
+/// Since optimization should wait until every relay for the subscription reaches EOSE.
+#[tokio::test]
+async fn since_optimization_waits_for_all_relays_eose() {
+ let (_relay, live_url) = create_test_relay().await;
+ let dead_url = NormRelayUrl::new("wss://127.0.0.1:1").expect("valid dead relay url");
+
+ let mut pool = OutboxPool::default();
+
+ let mut urls = HashSet::new();
+ urls.insert(live_url);
+ urls.insert(dead_url);
+ let mut url_pkgs = RelayUrlPkgs::new(urls);
+ url_pkgs.use_transparent = true;
+
+ let id = {
+ let mut session = pool.start_session(MockWakeup::default());
+ session.subscribe(
+ vec![Filter::new().kinds(vec![1]).limit(10).build()],
+ url_pkgs,
+ )
+ };
+
+ let initial_filters = pool.filters(&id).expect("subscription exists");
+ assert!(
+ !filter_has_since(&initial_filters[0]),
+ "filters should not have since before any EOSE"
+ );
+
+ let got_any_eose = default_pool_pump(&mut pool, |pool| pool.has_eose(&id)).await;
+ assert!(got_any_eose, "live relay should produce EOSE");
+ assert!(
+ !pool.all_have_eose(&id),
+ "all relays should not have EOSE when one relay is unreachable"
+ );
+
+ // Trigger EOSE queue processing.
+ {
+ let _ = pool.start_session(MockWakeup::default());
+ }
+
+ let filters = pool.filters(&id).expect("subscription still exists");
+ assert!(
+ !filter_has_since(&filters[0]),
+ "since should not be optimized until every relay reaches EOSE"
+ );
+}
diff --git a/crates/notedeck/src/account/accounts.rs b/crates/notedeck/src/account/accounts.rs
@@ -1,21 +1,22 @@
-use uuid::Uuid;
-
use crate::account::cache::AccountCache;
use crate::account::contacts::Contacts;
use crate::account::mute::AccountMutedData;
use crate::account::relay::{
- modify_advertised_relays, update_relay_configuration, AccountRelayData, RelayAction,
+ calculate_relays, modify_advertised_relays, write_relays, AccountRelayData, RelayAction,
RelayDefaults,
};
+use crate::scoped_subs::{RelaySelection, ScopedSubIdentity, SubConfig, SubKey};
use crate::storage::AccountStorageWriter;
use crate::user_account::UserAccountSerializable;
use crate::{
- AccountStorage, MuteFun, SingleUnkIdAction, UnifiedSubscription, UnknownIds, UserAccount,
- ZapWallet,
+ AccountStorage, MuteFun, RemoteApi, ScopedSubApi, SingleUnkIdAction, SubOwnerKey, UnknownIds,
+ UserAccount, ZapWallet,
};
-use enostr::{ClientMessage, FilledKeypair, Keypair, Pubkey, RelayPool};
-use nostrdb::{Ndb, Note, Transaction};
+use enostr::{FilledKeypair, Keypair, NormRelayUrl, Pubkey, RelayId};
+use hashbrown::HashSet;
+use nostrdb::{Filter, Ndb, Note, Subscription, Transaction};
+use std::slice::from_ref;
// TODO: remove this
use std::sync::Arc;
@@ -25,7 +26,8 @@ pub struct Accounts {
pub cache: AccountCache,
storage_writer: Option<AccountStorageWriter>,
relay_defaults: RelayDefaults,
- subs: AccountSubs,
+ ndb_subs: AccountNdbSubs,
+ scoped_remote_initialized: bool,
}
impl Accounts {
@@ -36,8 +38,6 @@ impl Accounts {
fallback: Pubkey,
ndb: &mut Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
- ctx: &egui::Context,
unknown_ids: &mut UnknownIds,
) -> Self {
let (mut cache, unknown_id) = AccountCache::new(UserAccount::new(
@@ -78,31 +78,31 @@ impl Accounts {
selected_data.query(ndb, txn);
- let subs = {
- AccountSubs::new(
- ndb,
- pool,
- &relay_defaults,
- &selected.key.pubkey,
- selected_data,
- create_wakeup(ctx),
- )
- };
+ let ndb_subs = AccountNdbSubs::new(ndb, selected_data);
Accounts {
cache,
storage_writer,
relay_defaults,
- subs,
+ ndb_subs,
+ scoped_remote_initialized: false,
}
}
- pub fn remove_account(
+ pub(crate) fn remove_account(
&mut self,
pk: &Pubkey,
ndb: &mut Ndb,
- pool: &mut RelayPool,
- ctx: &egui::Context,
+ remote: &mut RemoteApi<'_>,
+ ) -> bool {
+ self.remove_account_internal(pk, ndb, remote)
+ }
+
+ fn remove_account_internal(
+ &mut self,
+ pk: &Pubkey,
+ ndb: &mut Ndb,
+ remote: &mut RemoteApi<'_>,
) -> bool {
let Some(resp) = self.cache.remove(pk) else {
return false;
@@ -117,8 +117,14 @@ impl Accounts {
}
if let Some(swap_to) = resp.swap_to {
+ let old_pk = resp.deleted.pubkey;
let txn = Transaction::new(ndb).expect("txn");
- self.select_account_internal(&swap_to, ndb, &txn, pool, ctx);
+ self.select_account_internal(&swap_to, old_pk, ndb, &txn, remote);
+ }
+
+ {
+ let mut scoped_subs = remote.scoped_subs(&*self);
+ clear_account_remote_subs_for_account(&mut scoped_subs, resp.deleted.pubkey);
}
true
@@ -219,29 +225,40 @@ impl Accounts {
&self.cache.selected().data
}
- pub fn select_account(
+ pub(crate) fn select_account(
&mut self,
pk_to_select: &Pubkey,
ndb: &mut Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
- ctx: &egui::Context,
+ remote: &mut RemoteApi<'_>,
) {
+ self.select_account_internal_entry(pk_to_select, ndb, txn, remote);
+ }
+
+ fn select_account_internal_entry(
+ &mut self,
+ pk_to_select: &Pubkey,
+ ndb: &mut Ndb,
+ txn: &Transaction,
+ remote: &mut RemoteApi<'_>,
+ ) {
+ let old_pk = *self.selected_account_pubkey();
+
if !self.cache.select(*pk_to_select) {
return;
}
- self.select_account_internal(pk_to_select, ndb, txn, pool, ctx);
+ self.select_account_internal(pk_to_select, old_pk, ndb, txn, remote);
}
/// Have already selected in `AccountCache`, updating other things
fn select_account_internal(
&mut self,
pk_to_select: &Pubkey,
+ old_pk: Pubkey,
ndb: &mut Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
- ctx: &egui::Context,
+ remote: &mut RemoteApi<'_>,
) {
if let Some(key_store) = &self.storage_writer {
if let Err(e) = key_store.select_key(Some(*pk_to_select)) {
@@ -250,14 +267,11 @@ impl Accounts {
}
self.get_selected_account_mut().data.query(ndb, txn);
- self.subs.swap_to(
- ndb,
- pool,
- &self.relay_defaults,
- pk_to_select,
- &self.cache.selected().data,
- create_wakeup(ctx),
- );
+ self.ndb_subs.swap_to(ndb, &self.cache.selected().data);
+
+ remote.on_account_switched(old_pk, *pk_to_select, self);
+
+ self.ensure_selected_account_remote_subs(remote);
}
pub fn mutefun(&self) -> Box<MuteFun> {
@@ -278,95 +292,79 @@ impl Accounts {
}
}
- pub fn send_initial_filters(&mut self, pool: &mut RelayPool, relay_url: &str) {
- let data = &self.get_selected_account().data;
- // send the active account's relay list subscription
- pool.send_to(
- &ClientMessage::req(
- self.subs.relay.remote.clone(),
- vec![data.relay.filter.clone()],
- ),
- relay_url,
- );
- // send the active account's muted subscription
- pool.send_to(
- &ClientMessage::req(
- self.subs.mute.remote.clone(),
- vec![data.muted.filter.clone()],
- ),
- relay_url,
- );
- pool.send_to(
- &ClientMessage::req(
- self.subs.contacts.remote.clone(),
- vec![data.contacts.filter.clone()],
- ),
- relay_url,
- );
- if let Some(cur_pk) = self.selected_filled().map(|s| s.pubkey) {
- let giftwraps_filter = nostrdb::Filter::new()
- .kinds([1059])
- .pubkeys([cur_pk.bytes()])
- .build();
- pool.send_to(
- &ClientMessage::req(self.subs.giftwraps.remote.clone(), vec![giftwraps_filter]),
- relay_url,
- );
- }
- }
-
- pub fn update(&mut self, ndb: &mut Ndb, pool: &mut RelayPool, ctx: &egui::Context) {
+ #[profiling::function]
+ pub fn update(&mut self, ndb: &mut Ndb, remote: &mut RemoteApi<'_>) {
// IMPORTANT - This function is called in the UI update loop,
// make sure it is fast when idle
- let Some(update) = self
+ let relay_updated = self
.cache
.selected_mut()
.data
- .poll_for_updates(ndb, &self.subs)
- else {
+ .poll_for_updates(ndb, &self.ndb_subs);
+
+ if !self.scoped_remote_initialized {
+ self.ensure_selected_account_remote_subs(remote);
return;
- };
+ }
- match update {
- // If needed, update the relay configuration
- AccountDataUpdate::Relay => {
- let acc = self.cache.selected();
- update_relay_configuration(
- pool,
- &self.relay_defaults,
- &acc.key.pubkey,
- &acc.data.relay,
- create_wakeup(ctx),
- );
- }
+ if !relay_updated {
+ return;
}
+
+ self.retarget_selected_account_read_relays(remote);
}
pub fn get_full<'a>(&'a self, pubkey: &Pubkey) -> Option<FilledKeypair<'a>> {
self.cache.get(pubkey).and_then(|r| r.key.to_full())
}
- pub fn process_relay_action(
- &mut self,
- ctx: &egui::Context,
- pool: &mut RelayPool,
- action: RelayAction,
- ) {
+ pub(crate) fn process_relay_action(&mut self, remote: &mut RemoteApi<'_>, action: RelayAction) {
let acc = self.cache.selected_mut();
- modify_advertised_relays(&acc.key, action, pool, &self.relay_defaults, &mut acc.data);
-
- update_relay_configuration(
- pool,
+ modify_advertised_relays(
+ &acc.key,
+ action,
+ remote,
&self.relay_defaults,
- &acc.key.pubkey,
- &acc.data.relay,
- create_wakeup(ctx),
+ &mut acc.data,
);
+
+ self.retarget_selected_account_read_relays(remote);
+ }
+
+ pub fn selected_account_read_relays(&self) -> HashSet<NormRelayUrl> {
+ calculate_relays(
+ &self.relay_defaults,
+ &self.get_selected_account_data().relay,
+ true,
+ )
+ }
+
+ /// Return the selected account's advertised NIP-65 relays with marker metadata.
+ pub fn selected_account_advertised_relays(
+ &self,
+ ) -> &std::collections::BTreeSet<crate::RelaySpec> {
+ &self.get_selected_account_data().relay.advertised
+ }
+
+ pub fn selected_account_write_relays(&self) -> Vec<RelayId> {
+ write_relays(
+ &self.relay_defaults,
+ &self.get_selected_account_data().relay,
+ )
}
- pub fn get_subs(&self) -> &AccountSubs {
- &self.subs
+ fn ensure_selected_account_remote_subs(&mut self, remote: &mut RemoteApi<'_>) {
+ {
+ let mut scoped_subs = remote.scoped_subs(&*self);
+ ensure_selected_account_remote_subs_api(&mut scoped_subs, self);
+ }
+ self.scoped_remote_initialized = true;
+ }
+
+ fn retarget_selected_account_read_relays(&mut self, remote: &mut RemoteApi<'_>) {
+ remote.retarget_selected_account_read_relays(self);
+ self.scoped_remote_initialized = true;
}
}
@@ -384,13 +382,6 @@ impl<'a> AccType<'a> {
}
}
-fn create_wakeup(ctx: &egui::Context) -> impl Fn() + Send + Sync + Clone + 'static {
- let ctx = ctx.clone();
- move || {
- ctx.request_repaint();
- }
-}
-
fn add_account_from_storage(
cache: &mut AccountCache,
user_account_serializable: UserAccountSerializable,
@@ -443,22 +434,16 @@ impl AccountData {
}
}
- pub(super) fn poll_for_updates(
- &mut self,
- ndb: &Ndb,
- subs: &AccountSubs,
- ) -> Option<AccountDataUpdate> {
+ #[profiling::function]
+ pub(super) fn poll_for_updates(&mut self, ndb: &Ndb, ndb_subs: &AccountNdbSubs) -> bool {
let txn = Transaction::new(ndb).expect("txn");
- let mut resp = None;
- if self.relay.poll_for_updates(ndb, &txn, subs.relay.local) {
- resp = Some(AccountDataUpdate::Relay);
- }
+ let relay_updated = self.relay.poll_for_updates(ndb, &txn, ndb_subs.relay_ndb);
- self.muted.poll_for_updates(ndb, &txn, subs.mute.local);
+ self.muted.poll_for_updates(ndb, &txn, ndb_subs.mute_ndb);
self.contacts
- .poll_for_updates(ndb, &txn, subs.contacts.local);
+ .poll_for_updates(ndb, &txn, ndb_subs.contacts_ndb);
- resp
+ relay_updated
}
/// Note: query should be called as close to the subscription as possible
@@ -469,90 +454,125 @@ impl AccountData {
}
}
-pub(super) enum AccountDataUpdate {
- Relay,
-}
-
pub struct AddAccountResponse {
pub switch_to: Pubkey,
pub unk_id_action: SingleUnkIdAction,
}
-pub struct AccountSubs {
- relay: UnifiedSubscription,
- giftwraps: UnifiedSubscription,
- mute: UnifiedSubscription,
- pub contacts: UnifiedSubscription,
+fn giftwrap_filter(pk: &Pubkey) -> Filter {
+ // TODO: since optimize
+ nostrdb::Filter::new()
+ .kinds([1059])
+ .pubkeys([pk.bytes()])
+ .build()
}
-impl AccountSubs {
- pub(super) fn new(
- ndb: &mut Ndb,
- pool: &mut RelayPool,
- relay_defaults: &RelayDefaults,
- pk: &Pubkey,
- data: &AccountData,
- wakeup: impl Fn() + Send + Sync + Clone + 'static,
- ) -> Self {
- // TODO: since optimize
- let giftwraps_filter = nostrdb::Filter::new()
- .kinds([1059])
- .pubkeys([pk.bytes()])
- .build();
-
- update_relay_configuration(pool, relay_defaults, pk, &data.relay, wakeup);
+fn account_remote_owner_key() -> SubOwnerKey {
+ SubOwnerKey::new("core/accounts/remote-subs")
+}
- let relay = subscribe(ndb, pool, &data.relay.filter);
- let giftwraps = subscribe(ndb, pool, &giftwraps_filter);
- let mute = subscribe(ndb, pool, &data.muted.filter);
- let contacts = subscribe(ndb, pool, &data.contacts.filter);
+fn ensure_selected_account_remote_subs_api(
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ accounts: &Accounts,
+) {
+ let owner = account_remote_owner_key();
+ for kind in account_remote_sub_kinds() {
+ let key = account_remote_sub_key(kind);
+ let identity = ScopedSubIdentity::account(owner, key);
+ let config = selected_account_remote_config(accounts, kind);
+ let _ = scoped_subs.ensure_sub(identity, config);
+ }
+}
- Self {
- relay,
- mute,
- contacts,
- giftwraps,
- }
+fn clear_account_remote_subs_for_account(
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ account_pk: Pubkey,
+) {
+ let owner = account_remote_owner_key();
+ for kind in account_remote_sub_kinds() {
+ let key = account_remote_sub_key(kind);
+ let identity = ScopedSubIdentity::account(owner, key);
+ let _ = scoped_subs.clear_sub_for_account(account_pk, identity);
}
+}
- pub(super) fn swap_to(
- &mut self,
- ndb: &mut Ndb,
- pool: &mut RelayPool,
- relay_defaults: &RelayDefaults,
- pk: &Pubkey,
- new_selection_data: &AccountData,
- wakeup: impl Fn() + Send + Sync + Clone + 'static,
- ) {
- unsubscribe(ndb, pool, &self.relay);
- unsubscribe(ndb, pool, &self.mute);
- unsubscribe(ndb, pool, &self.contacts);
- unsubscribe(ndb, pool, &self.giftwraps);
+#[derive(Clone, Copy, Eq, Hash, PartialEq)]
+enum AccountRemoteSubKind {
+ RelayList,
+ MuteList,
+ ContactsList,
+ Giftwrap,
+}
- *self = AccountSubs::new(ndb, pool, relay_defaults, pk, new_selection_data, wakeup);
- }
+fn account_remote_sub_kinds() -> [AccountRemoteSubKind; 4] {
+ [
+ AccountRemoteSubKind::RelayList,
+ AccountRemoteSubKind::MuteList,
+ AccountRemoteSubKind::ContactsList,
+ AccountRemoteSubKind::Giftwrap,
+ ]
}
-fn subscribe(ndb: &Ndb, pool: &mut RelayPool, filter: &nostrdb::Filter) -> UnifiedSubscription {
- let filters = vec![filter.clone()];
- let sub = ndb
- .subscribe(&filters)
- .expect("ndb relay list subscription");
+fn account_remote_sub_key(kind: AccountRemoteSubKind) -> SubKey {
+ SubKey::new(kind)
+}
- // remote subscription
- let subid = Uuid::new_v4().to_string();
- pool.subscribe(subid.clone(), filters);
+fn make_account_remote_config(filters: Vec<Filter>, use_transparent: bool) -> SubConfig {
+ SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters,
+ use_transparent,
+ }
+}
- UnifiedSubscription {
- local: sub,
- remote: subid,
+fn selected_account_remote_config(accounts: &Accounts, kind: AccountRemoteSubKind) -> SubConfig {
+ let selected = accounts.get_selected_account_data();
+ match kind {
+ AccountRemoteSubKind::RelayList => {
+ make_account_remote_config(vec![selected.relay.filter.clone()], false)
+ }
+ AccountRemoteSubKind::MuteList => {
+ make_account_remote_config(vec![selected.muted.filter.clone()], false)
+ }
+ AccountRemoteSubKind::ContactsList => {
+ make_account_remote_config(vec![selected.contacts.filter.clone()], true)
+ }
+ AccountRemoteSubKind::Giftwrap => make_account_remote_config(
+ vec![giftwrap_filter(accounts.selected_account_pubkey())],
+ false,
+ ),
}
}
-fn unsubscribe(ndb: &mut Ndb, pool: &mut RelayPool, sub: &UnifiedSubscription) {
- pool.unsubscribe(sub.remote.clone());
+struct AccountNdbSubs {
+ relay_ndb: Subscription,
+ mute_ndb: Subscription,
+ contacts_ndb: Subscription,
+}
+
+impl AccountNdbSubs {
+ pub fn new(ndb: &mut Ndb, data: &AccountData) -> Self {
+ let relay_ndb = ndb
+ .subscribe(from_ref(&data.relay.filter))
+ .expect("ndb relay list subscription");
+ let mute_ndb = ndb
+ .subscribe(from_ref(&data.muted.filter))
+ .expect("ndb sub");
+ let contacts_ndb = ndb
+ .subscribe(from_ref(&data.contacts.filter))
+ .expect("ndb sub");
+ Self {
+ relay_ndb,
+ mute_ndb,
+ contacts_ndb,
+ }
+ }
- // local subscription
- ndb.unsubscribe(sub.local)
- .expect("ndb relay list unsubscribe");
+ pub fn swap_to(&mut self, ndb: &mut Ndb, new_selection_data: &AccountData) {
+ let _ = ndb.unsubscribe(self.relay_ndb);
+ let _ = ndb.unsubscribe(self.mute_ndb);
+ let _ = ndb.unsubscribe(self.contacts_ndb);
+
+ *self = AccountNdbSubs::new(ndb, new_selection_data);
+ }
}
diff --git a/crates/notedeck/src/account/contacts.rs b/crates/notedeck/src/account/contacts.rs
@@ -69,6 +69,7 @@ impl Contacts {
}
}
+ #[profiling::function]
pub(super) fn poll_for_updates(&mut self, ndb: &Ndb, txn: &Transaction, sub: Subscription) {
let nks = ndb.poll_for_notes(sub, 1);
@@ -104,6 +105,7 @@ impl Contacts {
}
}
+#[profiling::function]
fn update_state(state: &mut ContactState, note: &Note, key: NoteKey) {
match state {
ContactState::Unreceived => {
diff --git a/crates/notedeck/src/account/mute.rs b/crates/notedeck/src/account/mute.rs
@@ -94,6 +94,7 @@ impl AccountMutedData {
muted
}
+ #[profiling::function]
pub(super) fn poll_for_updates(&mut self, ndb: &Ndb, txn: &Transaction, sub: Subscription) {
let nks = ndb.poll_for_notes(sub, 1);
diff --git a/crates/notedeck/src/account/relay.rs b/crates/notedeck/src/account/relay.rs
@@ -1,10 +1,10 @@
use std::collections::BTreeSet;
-use crate::{AccountData, RelaySpec};
-use enostr::{Keypair, Pubkey, RelayPool};
-use nostrdb::{Filter, Ndb, NoteBuilder, NoteKey, Subscription, Transaction};
+use crate::{AccountData, RelaySpec, RemoteApi};
+use enostr::{Keypair, NormRelayUrl, RelayId};
+use hashbrown::HashSet;
+use nostrdb::{Filter, Ndb, Note, NoteBuilder, NoteKey, Subscription, Transaction};
use tracing::{debug, error, info};
-use url::Url;
#[derive(Clone)]
pub(crate) struct AccountRelayData {
@@ -47,14 +47,6 @@ impl AccountRelayData {
self.advertised = relays.into_iter().collect()
}
- // standardize the format (ie, trailing slashes) to avoid dups
- pub fn canonicalize_url(url: &str) -> String {
- match Url::parse(url) {
- Ok(parsed_url) => parsed_url.to_string(),
- Err(_) => url.to_owned(), // If parsing fails, return the original URL.
- }
- }
-
pub(crate) fn harvest_nip65_relays(
ndb: &Ndb,
txn: &Transaction,
@@ -73,8 +65,12 @@ impl AccountRelayData {
let has_write_marker = tag
.get(2)
.is_some_and(|m| m.variant().str() == Some("write"));
+
+ let Ok(norm_url) = NormRelayUrl::new(url) else {
+ continue;
+ };
relays.push(RelaySpec::new(
- Self::canonicalize_url(url),
+ norm_url,
has_read_marker,
has_write_marker,
));
@@ -96,20 +92,23 @@ impl AccountRelayData {
relays
}
- pub fn publish_nip65_relays(&self, seckey: &[u8; 32], pool: &mut RelayPool) {
+ pub fn new_nip65_relays_note(&'_ self, seckey: &[u8; 32]) -> Note<'_> {
let mut builder = NoteBuilder::new().kind(10002).content("");
for rs in &self.advertised {
- builder = builder.start_tag().tag_str("r").tag_str(&rs.url);
+ builder = builder
+ .start_tag()
+ .tag_str("r")
+ .tag_str(&rs.url.to_string());
if rs.has_read_marker {
builder = builder.tag_str("read");
} else if rs.has_write_marker {
builder = builder.tag_str("write");
}
}
- let note = builder.sign(seckey).build().expect("note build");
- pool.send(&enostr::ClientMessage::event(¬e).expect("note client message"));
+ builder.sign(seckey).build().expect("note build")
}
+ #[profiling::function]
pub fn poll_for_updates(&mut self, ndb: &Ndb, txn: &Transaction, sub: Subscription) -> bool {
let nks = ndb.poll_for_notes(sub, 1);
@@ -134,7 +133,7 @@ impl RelayDefaults {
pub(crate) fn new(forced_relays: Vec<String>) -> Self {
let forced_relays: BTreeSet<RelaySpec> = forced_relays
.into_iter()
- .map(|u| RelaySpec::new(AccountRelayData::canonicalize_url(&u), false, false))
+ .filter_map(|u| Some(RelaySpec::new(NormRelayUrl::new(&u).ok()?, false, false)))
.collect();
let bootstrap_relays = [
"wss://relay.damus.io",
@@ -145,7 +144,7 @@ impl RelayDefaults {
]
.iter()
.map(|&url| url.to_string())
- .map(|u| RelaySpec::new(AccountRelayData::canonicalize_url(&u), false, false))
+ .filter_map(|u| Some(RelaySpec::new(NormRelayUrl::new(&u).ok()?, false, false)))
.collect();
Self {
@@ -155,25 +154,40 @@ impl RelayDefaults {
}
}
-pub(super) fn update_relay_configuration(
- pool: &mut RelayPool,
+pub fn calculate_relays(
relay_defaults: &RelayDefaults,
- pk: &Pubkey,
data: &AccountRelayData,
- wakeup: impl Fn() + Send + Sync + Clone + 'static,
-) {
- debug!(
- "updating relay configuration for currently selected {:?}",
- pk.hex()
- );
-
+ readable: bool, // are we calculating the readable relays? or the writable?
+) -> HashSet<NormRelayUrl> {
// If forced relays are set use them only
let mut desired_relays = relay_defaults.forced_relays.clone();
// Compose the desired relay lists from the selected account
if desired_relays.is_empty() {
- desired_relays.extend(data.local.iter().cloned());
- desired_relays.extend(data.advertised.iter().cloned());
+ desired_relays.extend(
+ data.local
+ .iter()
+ .filter(|l| {
+ if readable {
+ l.is_readable()
+ } else {
+ l.is_writable()
+ }
+ })
+ .cloned(),
+ );
+ desired_relays.extend(
+ data.advertised
+ .iter()
+ .filter(|l| {
+ if readable {
+ l.is_readable()
+ } else {
+ l.is_writable()
+ }
+ })
+ .cloned(),
+ );
}
// If no relays are specified at this point use the bootstrap list
@@ -181,33 +195,12 @@ pub(super) fn update_relay_configuration(
desired_relays = relay_defaults.bootstrap_relays.clone();
}
- debug!("current relays: {:?}", pool.urls());
debug!("desired relays: {:?}", desired_relays);
- let pool_specs = pool
- .urls()
- .iter()
- .map(|url| RelaySpec::new(url.clone(), false, false))
- .collect();
- let add: BTreeSet<RelaySpec> = desired_relays.difference(&pool_specs).cloned().collect();
- let mut sub: BTreeSet<RelaySpec> = pool_specs.difference(&desired_relays).cloned().collect();
- if !add.is_empty() {
- debug!("configuring added relays: {:?}", add);
- let _ = pool.add_urls(add.iter().map(|r| r.url.clone()).collect(), wakeup);
- }
- if !sub.is_empty() {
- // certain relays are persistent like the multicast relay,
- // although we should probably have a way to explicitly
- // disable it
- sub.remove(&RelaySpec::new("multicast", false, false));
-
- debug!("removing unwanted relays: {:?}", sub);
- pool.remove_urls(&sub.iter().map(|r| r.url.clone()).collect());
- }
-
- debug!("current relays: {:?}", pool.urls());
+ desired_relays.into_iter().map(|r| r.url).collect()
}
+// TODO(kernelkind): these should have `NormRelayUrl` instead of `String`...
pub enum RelayAction {
Add(String),
Remove(String),
@@ -225,14 +218,18 @@ impl RelayAction {
pub(super) fn modify_advertised_relays(
kp: &Keypair,
action: RelayAction,
- pool: &mut RelayPool,
+ remote: &mut RemoteApi<'_>,
relay_defaults: &RelayDefaults,
account_data: &mut AccountData,
) {
- let relay_url = AccountRelayData::canonicalize_url(action.get_url());
+ let Ok(relay_url) = NormRelayUrl::new(action.get_url()) else {
+ return;
+ };
+
+ let relay_url_str = relay_url.to_string();
match action {
- RelayAction::Add(_) => info!("add advertised relay \"{}\"", relay_url),
- RelayAction::Remove(_) => info!("remove advertised relay \"{}\"", relay_url),
+ RelayAction::Add(_) => info!("add advertised relay \"{relay_url_str}\""),
+ RelayAction::Remove(_) => info!("remove advertised relay \"{relay_url_str}\""),
}
// let selected = self.cache.selected_mut();
@@ -254,8 +251,22 @@ pub(super) fn modify_advertised_relays(
// If we have the secret key publish the NIP-65 relay list
if let Some(secretkey) = &kp.secret_key {
- account_data
+ let note = account_data
.relay
- .publish_nip65_relays(&secretkey.to_secret_bytes(), pool);
+ .new_nip65_relays_note(&secretkey.to_secret_bytes());
+
+ let mut publisher = remote.publisher_explicit();
+ publisher.publish_note(¬e, write_relays(relay_defaults, &account_data.relay));
}
}
+
+pub fn write_relays(relay_defaults: &RelayDefaults, data: &AccountRelayData) -> Vec<RelayId> {
+ let mut relays: Vec<RelayId> = calculate_relays(relay_defaults, data, false)
+ .into_iter()
+ .map(RelayId::Websocket)
+ .collect();
+
+ relays.push(RelayId::Multicast);
+
+ relays
+}
diff --git a/crates/notedeck/src/app.rs b/crates/notedeck/src/app.rs
@@ -2,20 +2,21 @@ use crate::account::FALLBACK_PUBKEY;
use crate::i18n::Localization;
use crate::nip05::Nip05Cache;
use crate::persist::{AppSizeHandler, SettingsHandler};
+use crate::scoped_sub_state::ScopedSubsState;
use crate::unknowns::unknown_id_send;
use crate::wallet::GlobalWallet;
use crate::zaps::Zaps;
-use crate::NotedeckOptions;
use crate::{
frame_history::FrameHistory, AccountStorage, Accounts, AppContext, Args, DataPath,
- DataPathType, Directory, Images, NoteAction, NoteCache, RelayDebugView, UnknownIds,
+ DataPathType, Directory, Images, NoteAction, NoteCache, RemoteApi, UnknownIds,
};
+use crate::{EguiWakeup, NotedeckOptions};
use crate::{Error, JobCache};
use crate::{JobPool, MediaJobs};
use egui::Margin;
use egui::ThemePreference;
use egui_winit::clipboard::Clipboard;
-use enostr::{PoolEventBuf, PoolRelay, RelayEvent, RelayMessage, RelayPool};
+use enostr::{OutboxPool, OutboxSession, OutboxSessionHandler};
use nostrdb::{Config, Ndb, Transaction};
use std::cell::RefCell;
use std::collections::BTreeSet;
@@ -65,7 +66,8 @@ pub struct Notedeck {
ndb: Ndb,
img_cache: Images,
unknown_ids: UnknownIds,
- pool: RelayPool,
+ pool: OutboxPool,
+ scoped_sub_state: ScopedSubsState,
note_cache: NoteCache,
accounts: Accounts,
global_wallet: GlobalWallet,
@@ -96,15 +98,14 @@ fn main_panel(style: &egui::Style) -> egui::CentralPanel {
})
}
-fn render_notedeck(notedeck: &mut Notedeck, ctx: &egui::Context) {
+#[profiling::function]
+fn render_notedeck(
+ app: Rc<RefCell<dyn App + 'static>>,
+ app_ctx: &mut AppContext,
+ ctx: &egui::Context,
+) {
main_panel(&ctx.style()).show(ctx, |ui| {
- // render app
- let Some(app) = ¬edeck.app else {
- return;
- };
-
- let app = app.clone();
- app.borrow_mut().update(&mut notedeck.app_context(), ui);
+ app.borrow_mut().update(app_ctx, ui);
// Move the screen up when we have a virtual keyboard
// NOTE: actually, we only want to do this if the keyboard is covering the focused element?
@@ -121,27 +122,52 @@ fn render_notedeck(notedeck: &mut Notedeck, ctx: &egui::Context) {
}
impl eframe::App for Notedeck {
+ #[profiling::function]
fn update(&mut self, ctx: &egui::Context, frame: &mut eframe::Frame) {
profiling::finish_frame!();
self.frame_history
.on_new_frame(ctx.input(|i| i.time), frame.info().cpu_usage);
- self.media_jobs.run_received(&mut self.job_pool, |id| {
- crate::run_media_job_pre_action(id, &mut self.img_cache.textures);
- });
- self.media_jobs.deliver_all_completed(|completed| {
- crate::deliver_completed_media_job(completed, &mut self.img_cache.textures)
- });
+ {
+ profiling::scope!("media jobs");
+ self.media_jobs.run_received(&mut self.job_pool, |id| {
+ crate::run_media_job_pre_action(id, &mut self.img_cache.textures);
+ });
+ self.media_jobs.deliver_all_completed(|completed| {
+ crate::deliver_completed_media_job(completed, &mut self.img_cache.textures)
+ });
+ }
self.nip05_cache.poll();
+ let Some(app) = &self.app else {
+ return;
+ };
+ let app = app.clone();
+ let mut app_ctx = self.app_context(ctx);
// handle account updates
- self.accounts.update(&mut self.ndb, &mut self.pool, ctx);
+ app_ctx.accounts.update(app_ctx.ndb, &mut app_ctx.remote);
+
+ app_ctx
+ .zaps
+ .process(app_ctx.accounts, app_ctx.global_wallet, app_ctx.ndb);
+
+ app_ctx.remote.process_events(ctx, app_ctx.ndb);
- self.zaps
- .process(&mut self.accounts, &mut self.global_wallet, &self.ndb);
+ {
+ profiling::scope!("unknown id");
+ if app_ctx.unknown_ids.ready_to_send() {
+ let mut oneshot = app_ctx.remote.oneshot(app_ctx.accounts);
+ unknown_id_send(app_ctx.unknown_ids, &mut oneshot);
+ }
+ }
- render_notedeck(self, ctx);
+ render_notedeck(app, &mut app_ctx, ctx);
+
+ {
+ profiling::scope!("outbox ingestion");
+ drop(app_ctx);
+ }
self.settings.update_batch(|settings| {
settings.zoom_factor = ctx.zoom_factor();
@@ -154,16 +180,6 @@ impl eframe::App for Notedeck {
});
self.app_size.try_save_app_size(ctx);
- if self.args.options.contains(NotedeckOptions::RelayDebug) {
- if self.pool.debug.is_none() {
- self.pool.use_debug();
- }
-
- if let Some(debug) = &mut self.pool.debug {
- RelayDebugView::window(ctx, debug);
- }
- }
-
#[cfg(feature = "puffin")]
puffin_egui::profiler_window(ctx);
}
@@ -186,7 +202,7 @@ impl Notedeck {
self.android_app = Some(context);
}
- pub fn new<P: AsRef<Path>>(ctx: &egui::Context, data_path: P, args: &[String]) -> Self {
+ pub fn init<P: AsRef<Path>>(ctx: &egui::Context, data_path: P, args: &[String]) -> NotedeckCtx {
#[cfg(feature = "puffin")]
setup_puffin();
@@ -236,19 +252,13 @@ impl Notedeck {
None
};
- // AccountManager will setup the pool on first update
- let mut pool = RelayPool::new();
- {
- let ctx = ctx.clone();
- if let Err(err) = pool.add_multicast_relay(move || ctx.request_repaint()) {
- error!("error setting up multicast relay: {err}");
- }
- }
-
let mut unknown_ids = UnknownIds::default();
try_swap_compacted_db(&dbpath_str);
let mut ndb = Ndb::new(&dbpath_str, &config).expect("ndb");
let txn = Transaction::new(&ndb).expect("txn");
+ let mut scoped_sub_state = ScopedSubsState::default();
+ let mut pool = OutboxPool::default();
+ let outbox_session = OutboxSessionHandler::new(&mut pool, EguiWakeup::new(ctx.clone()));
let mut accounts = Accounts::new(
keystore,
@@ -256,8 +266,6 @@ impl Notedeck {
FALLBACK_PUBKEY(),
&mut ndb,
&txn,
- &mut pool,
- ctx,
&mut unknown_ids,
);
@@ -276,9 +284,13 @@ impl Notedeck {
}
}
- if let Some(first) = parsed_args.keys.first() {
- accounts.select_account(&first.pubkey, &mut ndb, &txn, &mut pool, ctx);
- }
+ let outbox_session = if let Some(first) = parsed_args.keys.first() {
+ let mut remote = RemoteApi::new(outbox_session, &mut scoped_sub_state);
+ accounts.select_account(&first.pubkey, &mut ndb, &txn, &mut remote);
+ remote.export_session()
+ } else {
+ outbox_session.export()
+ };
let img_cache = Images::new(img_cache_dir);
let note_cache = NoteCache::default();
@@ -315,11 +327,12 @@ impl Notedeck {
let (send_new_jobs, receive_new_jobs) = std::sync::mpsc::channel();
let media_job_cache = JobCache::new(receive_new_jobs, send_new_jobs);
- Self {
+ let notedeck = Self {
ndb,
img_cache,
unknown_ids,
pool,
+ scoped_sub_state,
note_cache,
accounts,
global_wallet,
@@ -338,6 +351,11 @@ impl Notedeck {
i18n,
#[cfg(target_os = "android")]
android_app: None,
+ };
+
+ NotedeckCtx {
+ notedeck,
+ outbox_session,
}
}
@@ -354,22 +372,6 @@ impl Notedeck {
);
}
- /// ensure we recognized all the arguments
- pub fn check_args(&self, other_app_args: &BTreeSet<String>) -> Result<(), Error> {
- let completely_unrecognized: Vec<String> = self
- .unrecognized_args()
- .intersection(other_app_args)
- .cloned()
- .collect();
- if !completely_unrecognized.is_empty() {
- let err = format!("Unrecognized arguments: {completely_unrecognized:?}");
- tracing::error!("{}", &err);
- return Err(Error::Generic(err));
- }
-
- Ok(())
- }
-
#[inline]
pub fn options(&self) -> NotedeckOptions {
self.args.options
@@ -384,27 +386,46 @@ impl Notedeck {
self
}
- pub fn app_context(&mut self) -> AppContext<'_> {
- AppContext {
- ndb: &mut self.ndb,
- img_cache: &mut self.img_cache,
- unknown_ids: &mut self.unknown_ids,
- pool: &mut self.pool,
- note_cache: &mut self.note_cache,
- accounts: &mut self.accounts,
- global_wallet: &mut self.global_wallet,
- path: &self.path,
- args: &self.args,
- settings: &mut self.settings,
- clipboard: &mut self.clipboard,
- zaps: &mut self.zaps,
- frame_history: &mut self.frame_history,
- job_pool: &mut self.job_pool,
- media_jobs: &mut self.media_jobs,
- nip05_cache: &mut self.nip05_cache,
- i18n: &mut self.i18n,
- #[cfg(target_os = "android")]
- android: self.android_app.as_ref().unwrap().clone(),
+ pub fn app_context(&mut self, ui_ctx: &egui::Context) -> AppContext<'_> {
+ self.notedeck_ref(ui_ctx, None).app_ctx
+ }
+
+ pub fn notedeck_ref<'a>(
+ &'a mut self,
+ ui_ctx: &egui::Context,
+ session: Option<OutboxSession>,
+ ) -> NotedeckRef<'a> {
+ let outbox = if let Some(session) = session {
+ OutboxSessionHandler::import(&mut self.pool, session, EguiWakeup::new(ui_ctx.clone()))
+ } else {
+ OutboxSessionHandler::new(&mut self.pool, EguiWakeup::new(ui_ctx.clone()))
+ };
+
+ NotedeckRef {
+ app_ctx: AppContext {
+ ndb: &mut self.ndb,
+ img_cache: &mut self.img_cache,
+ unknown_ids: &mut self.unknown_ids,
+ remote: RemoteApi::new(outbox, &mut self.scoped_sub_state),
+ note_cache: &mut self.note_cache,
+ accounts: &mut self.accounts,
+ global_wallet: &mut self.global_wallet,
+ path: &self.path,
+ args: &self.args,
+ settings: &mut self.settings,
+ clipboard: &mut self.clipboard,
+ zaps: &mut self.zaps,
+ frame_history: &mut self.frame_history,
+ job_pool: &mut self.job_pool,
+ media_jobs: &mut self.media_jobs,
+ nip05_cache: &mut self.nip05_cache,
+ i18n: &mut self.i18n,
+ #[cfg(target_os = "android")]
+ android: self.android_app.as_ref().unwrap().clone(),
+ },
+ internals: NotedeckInternals {
+ unrecognized_args: &self.unrecognized_args,
+ },
}
}
@@ -457,95 +478,30 @@ pub fn install_crypto() {
}
}
-#[profiling::function]
-pub fn try_process_events_core(
- app_ctx: &mut AppContext<'_>,
- ctx: &egui::Context,
- mut receive: impl FnMut(&mut AppContext, PoolEventBuf),
-) {
- let ctx2 = ctx.clone();
- let wakeup = move || {
- ctx2.request_repaint();
- };
-
- app_ctx.pool.keepalive_ping(wakeup);
-
- // NOTE: we don't use the while let loop due to borrow issues
- #[allow(clippy::while_let_loop)]
- loop {
- let ev = if let Some(ev) = app_ctx.pool.try_recv() {
- ev.into_owned()
- } else {
- break;
- };
-
- match (&ev.event).into() {
- RelayEvent::Opened => {
- tracing::trace!("Opened relay {}", ev.relay);
- app_ctx
- .accounts
- .send_initial_filters(app_ctx.pool, &ev.relay);
- }
- RelayEvent::Closed => tracing::warn!("{} connection closed", &ev.relay),
- RelayEvent::Other(msg) => {
- tracing::trace!("relay {} sent other event {:?}", ev.relay, &msg)
- }
- RelayEvent::Error(error) => error!("relay {} had error: {error:?}", &ev.relay),
- RelayEvent::Message(msg) => {
- process_message_core(app_ctx, &ev.relay, &msg);
- }
- }
-
- receive(app_ctx, ev);
- }
-
- if app_ctx.unknown_ids.ready_to_send() {
- unknown_id_send(app_ctx.unknown_ids, app_ctx.pool);
- }
+pub struct NotedeckRef<'a> {
+ pub app_ctx: AppContext<'a>,
+ pub internals: NotedeckInternals<'a>,
}
-#[profiling::function]
-fn process_message_core(ctx: &mut AppContext<'_>, relay: &str, msg: &RelayMessage) {
- match msg {
- RelayMessage::Event(_subid, ev) => {
- let relay = if let Some(relay) = ctx.pool.relays.iter().find(|r| r.url() == relay) {
- relay
- } else {
- error!("couldn't find relay {} for note processing!?", relay);
- return;
- };
+pub struct NotedeckInternals<'a> {
+ pub unrecognized_args: &'a BTreeSet<String>,
+}
- match relay {
- PoolRelay::Websocket(_) => {
- //info!("processing event {}", event);
- tracing::trace!("processing event {ev}");
- if let Err(err) = ctx.ndb.process_event_with(
- ev,
- nostrdb::IngestMetadata::new()
- .client(false)
- .relay(relay.url()),
- ) {
- error!("error processing event {ev}: {err}");
- }
- }
- PoolRelay::Multicast(_) => {
- // multicast events are client events
- if let Err(err) = ctx.ndb.process_event_with(
- ev,
- nostrdb::IngestMetadata::new()
- .client(true)
- .relay(relay.url()),
- ) {
- error!("error processing multicast event {ev}: {err}");
- }
- }
- }
- }
- RelayMessage::Notice(msg) => tracing::warn!("Notice from {}: {}", relay, msg),
- RelayMessage::OK(cr) => info!("OK {:?}", cr),
- RelayMessage::Eose(id) => {
- tracing::trace!("Relay {} received eose: {id}", relay)
+impl<'a> NotedeckInternals<'a> {
+ /// ensure we recognized all the arguments
+ pub fn check_args(&self, other_app_args: &BTreeSet<String>) -> Result<(), Error> {
+ let completely_unrecognized: Vec<String> = self
+ .unrecognized_args
+ .intersection(other_app_args)
+ .cloned()
+ .collect();
+ if !completely_unrecognized.is_empty() {
+ let err = format!("Unrecognized arguments: {completely_unrecognized:?}");
+ tracing::error!("{}", &err);
+ return Err(Error::Generic(err));
}
+
+ Ok(())
}
}
@@ -596,3 +552,8 @@ fn try_swap_compacted_db(dbpath: &str) {
let _ = std::fs::remove_dir_all(&compact_path);
info!("compact swap: success! {old_size} -> {compact_size} bytes");
}
+
+pub struct NotedeckCtx {
+ pub notedeck: Notedeck,
+ pub outbox_session: OutboxSession,
+}
diff --git a/crates/notedeck/src/context.rs b/crates/notedeck/src/context.rs
@@ -1,12 +1,12 @@
use crate::{
account::accounts::Accounts, frame_history::FrameHistory, i18n::Localization,
nip05::Nip05Cache, wallet::GlobalWallet, zaps::Zaps, Args, DataPath, Images, JobPool,
- MediaJobs, NoteCache, SettingsHandler, UnknownIds,
+ MediaJobs, NoteCache, RemoteApi, SettingsHandler, UnknownIds,
};
use egui_winit::clipboard::Clipboard;
+use enostr::Pubkey;
-use enostr::RelayPool;
-use nostrdb::Ndb;
+use nostrdb::{Ndb, Transaction};
#[cfg(target_os = "android")]
use android_activity::AndroidApp;
@@ -17,7 +17,8 @@ pub struct AppContext<'a> {
pub ndb: &'a mut Ndb,
pub img_cache: &'a mut Images,
pub unknown_ids: &'a mut UnknownIds,
- pub pool: &'a mut RelayPool,
+ /// Relay/outbox transport APIs (scoped subs, oneshot, publish, relay inspect).
+ pub remote: RemoteApi<'a>,
pub note_cache: &'a mut NoteCache,
pub accounts: &'a mut Accounts,
pub global_wallet: &'a mut GlobalWallet,
@@ -51,6 +52,21 @@ impl SoftKeyboardContext {
}
impl<'a> AppContext<'a> {
+ pub fn select_account(&mut self, pubkey: &Pubkey) {
+ let txn = Transaction::new(self.ndb).expect("txn");
+ self.accounts
+ .select_account(pubkey, self.ndb, &txn, &mut self.remote);
+ }
+
+ pub fn remove_account(&mut self, pubkey: &Pubkey) -> bool {
+ self.accounts
+ .remove_account(pubkey, self.ndb, &mut self.remote)
+ }
+
+ pub fn process_relay_action(&mut self, action: crate::RelayAction) {
+ self.accounts.process_relay_action(&mut self.remote, action);
+ }
+
pub fn soft_keyboard_rect(&self, screen_rect: Rect, ctx: SoftKeyboardContext) -> Option<Rect> {
match ctx {
SoftKeyboardContext::Virtual => {
diff --git a/crates/notedeck/src/filter.rs b/crates/notedeck/src/filter.rs
@@ -1,7 +1,7 @@
use crate::error::{Error, FilterError};
use crate::note::NoteRef;
+use enostr::OutboxSubId;
use nostrdb::{Filter, FilterBuilder, Note, Subscription};
-use std::collections::HashMap;
use tracing::{debug, warn};
/// A unified subscription has a local and remote component. The remote subid
@@ -9,93 +9,7 @@ use tracing::{debug, warn};
#[derive(Debug, Clone)]
pub struct UnifiedSubscription {
pub local: Subscription,
- pub remote: String,
-}
-
-/// Each relay can have a different filter state. For example, some
-/// relays may have the contact list, some may not. Let's capture all of
-/// these states so that some relays don't stop the states of other
-/// relays.
-#[derive(Debug)]
-pub struct FilterStates {
- pub initial_state: FilterState,
- pub states: HashMap<String, FilterState>,
-}
-
-impl FilterStates {
- pub fn get_mut(&mut self, relay: &str) -> &FilterState {
- // if our initial state is ready, then just use that
- if let FilterState::Ready(_) = self.initial_state {
- &self.initial_state
- } else {
- // otherwise we look at relay states
- if !self.states.contains_key(relay) {
- self.states
- .insert(relay.to_string(), self.initial_state.clone());
- }
- self.states.get(relay).unwrap()
- }
- }
-
- pub fn get_any_gotremote(&self) -> Option<GotRemoteResult> {
- for (k, v) in self.states.iter() {
- if let FilterState::GotRemote(item_type) = v {
- return match item_type {
- GotRemoteType::Normal(subscription) => Some(GotRemoteResult::Normal {
- relay_id: k.to_owned(),
- sub_id: *subscription,
- }),
- GotRemoteType::Contact => Some(GotRemoteResult::Contact {
- relay_id: k.to_owned(),
- }),
- GotRemoteType::PeopleList => Some(GotRemoteResult::PeopleList {
- relay_id: k.to_owned(),
- }),
- };
- }
- }
-
- None
- }
-
- pub fn get_any_ready(&self) -> Option<&HybridFilter> {
- if let FilterState::Ready(fs) = &self.initial_state {
- Some(fs)
- } else {
- for (_k, v) in self.states.iter() {
- if let FilterState::Ready(ref fs) = v {
- return Some(fs);
- }
- }
-
- None
- }
- }
-
- pub fn new(initial_state: FilterState) -> Self {
- Self {
- initial_state,
- states: HashMap::new(),
- }
- }
-
- pub fn set_relay_state(&mut self, relay: String, state: FilterState) {
- if self.states.contains_key(&relay) {
- let current_state = self.states.get(&relay).unwrap();
- debug!(
- "set_relay_state: {:?} -> {:?} on {}",
- current_state, state, &relay,
- );
- }
- self.states.insert(relay, state);
- }
-
- /// For contacts, since that sub is managed elsewhere
- pub fn set_all_states(&mut self, state: FilterState) {
- for cur_state in self.states.values_mut() {
- *cur_state = state.clone();
- }
- }
+ pub remote: OutboxSubId, // abstracted ID to a remote subscription
}
/// We may need to fetch some data from relays before our filter is ready.
@@ -103,39 +17,12 @@ impl FilterStates {
#[derive(Debug, Clone)]
pub enum FilterState {
NeedsRemote,
- FetchingRemote(FetchingRemoteType),
- GotRemote(GotRemoteType),
+ FetchingRemote,
+ GotRemote,
Ready(HybridFilter),
Broken(FilterError),
}
-pub enum GotRemoteResult {
- Normal {
- relay_id: String,
- sub_id: Subscription,
- },
- Contact {
- relay_id: String,
- },
- PeopleList {
- relay_id: String,
- },
-}
-
-#[derive(Debug, Clone)]
-pub enum FetchingRemoteType {
- Normal(UnifiedSubscription),
- Contact,
- PeopleList,
-}
-
-#[derive(Debug, Clone)]
-pub enum GotRemoteType {
- Normal(Subscription),
- Contact,
- PeopleList,
-}
-
impl FilterState {
/// We tried to fetch a filter but we wither got no data or the data
/// was corrupted, preventing us from getting to the Ready state.
@@ -162,22 +49,6 @@ impl FilterState {
pub fn needs_remote() -> Self {
Self::NeedsRemote
}
-
- /// We got the remote data. Local data should be available to build
- /// the filter for the [`FilterState::Ready`] state
- pub fn got_remote(local_sub: Subscription) -> Self {
- Self::GotRemote(GotRemoteType::Normal(local_sub))
- }
-
- /// We have sent off a remote subscription to get data needed for the
- /// filter. The string is the subscription id
- pub fn fetching_remote(sub_id: String, local_sub: Subscription) -> Self {
- let unified_sub = UnifiedSubscription {
- local: local_sub,
- remote: sub_id,
- };
- Self::FetchingRemote(FetchingRemoteType::Normal(unified_sub))
- }
}
pub fn should_since_optimize(limit: u64, num_notes: usize) -> bool {
diff --git a/crates/notedeck/src/jobs/cache.rs b/crates/notedeck/src/jobs/cache.rs
@@ -39,6 +39,7 @@ where
}
}
+ #[profiling::function]
pub fn run_received(&mut self, pool: &mut JobPool, mut pre_action: impl FnMut(&JobId<K>)) {
for pkg in self.receive_new_jobs.try_iter() {
let id = &pkg.id;
@@ -68,6 +69,7 @@ where
}
}
+ #[profiling::function]
pub fn deliver_all_completed(&mut self, mut deliver_complete: impl FnMut(JobComplete<K, T>)) {
while let Some(res) = self.completed.pop() {
tracing::trace!("Got completed: {:?}", res.job_id);
@@ -82,6 +84,7 @@ where
}
}
+#[profiling::function]
fn run_received_job<K, T>(
job_run: JobRun<T>,
pool: &mut JobPool,
@@ -102,6 +105,7 @@ fn run_received_job<K, T>(
}
}
+#[profiling::function]
fn run_sync<F, K, T>(
job_pool: &mut JobPool,
send_new_jobs: Sender<JobPackage<K, T>>,
@@ -114,38 +118,42 @@ fn run_sync<F, K, T>(
T: Send + 'static,
{
let id_c = id.clone();
- let wrapped: Box<dyn FnOnce() + Send + 'static> = Box::new(move || {
- let res = run_job();
- match res {
- JobOutput::Complete(complete_response) => {
- completion_queue.push(JobComplete {
- job_id: id.job_id.clone(),
- response: complete_response.response,
- });
- if let Some(run) = complete_response.run_no_output {
+ let wrapped: Box<dyn FnOnce() + Send + 'static> = {
+ profiling::scope!("box gen");
+ Box::new(move || {
+ let res = run_job();
+ match res {
+ JobOutput::Complete(complete_response) => {
+ completion_queue.push(JobComplete {
+ job_id: id.job_id.clone(),
+ response: complete_response.response,
+ });
+ if let Some(run) = complete_response.run_no_output {
+ if let Err(e) = send_new_jobs.send(JobPackage {
+ id: id.into_internal(),
+ run: RunType::NoOutput(run),
+ }) {
+ tracing::error!("{e}");
+ }
+ }
+ }
+ JobOutput::Next(job_run) => {
if let Err(e) = send_new_jobs.send(JobPackage {
id: id.into_internal(),
- run: RunType::NoOutput(run),
+ run: RunType::Output(job_run),
}) {
tracing::error!("{e}");
}
}
}
- JobOutput::Next(job_run) => {
- if let Err(e) = send_new_jobs.send(JobPackage {
- id: id.into_internal(),
- run: RunType::Output(job_run),
- }) {
- tracing::error!("{e}");
- }
- }
- }
- });
+ })
+ };
tracing::trace!("Spawning sync job: {id_c:?}");
job_pool.schedule_no_output(wrapped);
}
+#[profiling::function]
fn run_async<K, T>(
send_new_jobs: Sender<JobPackage<K, T>>,
completion_queue: CompletionQueue<K, T>,
@@ -187,6 +195,7 @@ fn run_async<K, T>(
});
}
+#[profiling::function]
fn no_output_run(pool: &mut JobPool, run: NoOutputRun) {
match run {
NoOutputRun::Sync(c) => {
diff --git a/crates/notedeck/src/jobs/media.rs b/crates/notedeck/src/jobs/media.rs
@@ -23,6 +23,7 @@ pub enum MediaJobResult {
Animation(Result<Animation, Error>),
}
+#[profiling::function]
pub fn deliver_completed_media_job(
completed: JobComplete<MediaJobKind, MediaJobResult>,
tex_cache: &mut TexturesCache,
@@ -56,6 +57,7 @@ pub fn deliver_completed_media_job(
tracing::trace!("Delivered job for {id_c}");
}
+#[profiling::function]
pub fn run_media_job_pre_action(job_id: &JobId<MediaJobKind>, tex_cache: &mut TexturesCache) {
let id = job_id.id.clone();
match job_id.job_kind {
diff --git a/crates/notedeck/src/lib.rs b/crates/notedeck/src/lib.rs
@@ -24,14 +24,21 @@ pub mod nip05;
mod nip51_set;
pub mod note;
mod notecache;
+mod oneshot_api;
mod options;
mod persist;
pub mod platform;
pub mod profile;
+mod publish;
pub mod relay_debug;
pub mod relayspec;
+mod remote_api;
mod result;
mod route;
+mod scoped_sub_api;
+mod scoped_sub_owners;
+mod scoped_sub_state;
+mod scoped_subs;
mod setup;
pub mod storage;
mod style;
@@ -46,16 +53,17 @@ mod user_account;
mod wallet;
mod zaps;
-pub use account::accounts::{AccountData, AccountSubs, Accounts};
+pub use account::accounts::{AccountData, Accounts};
pub use account::contacts::{ContactState, IsFollowing};
pub use account::relay::RelayAction;
pub use account::FALLBACK_PUBKEY;
-pub use app::{try_process_events_core, App, AppAction, AppResponse, Notedeck};
+pub use app::{App, AppAction, AppResponse, Notedeck};
pub use args::Args;
pub use async_loader::{worker_count, AsyncLoader};
pub use context::{AppContext, SoftKeyboardContext};
+use enostr::{OutboxSessionHandler, Wakeup};
pub use error::{show_one_error_message, Error, FilterError, ZapError};
-pub use filter::{FilterState, FilterStates, UnifiedSubscription};
+pub use filter::{FilterState, UnifiedSubscription};
pub use fonts::NamedFontFamily;
pub use i18n::{CacheStats, FluentArgs, FluentValue, LanguageIdentifier, Localization};
pub use imgcache::{
@@ -76,19 +84,30 @@ pub use nav::DragResponse;
pub use nip05::{Nip05Cache, Nip05Status};
pub use nip51_set::{create_nip51_set, Nip51Set, Nip51SetCache};
pub use note::{
- builder_from_note, get_p_tags, send_mute_event, send_note_builder, send_people_list_event,
- send_report_event, send_unmute_event, BroadcastContext, ContextSelection, NoteAction,
- NoteContext, NoteContextSelection, NoteRef, ReportTarget, ReportType, RootIdError, RootNoteId,
+ builder_from_note, get_p_tags, send_mute_event, send_people_list_event, send_report_event,
+ send_unmute_event, BroadcastContext, ContextSelection, NoteAction, NoteContext,
+ NoteContextSelection, NoteRef, ReportTarget, ReportType, RootIdError, RootNoteId,
RootNoteIdBuf, ScrollInfo, ZapAction,
};
pub use notecache::{CachedNote, NoteCache};
+pub use oneshot_api::OneshotApi;
pub use options::NotedeckOptions;
pub use persist::*;
pub use profile::*;
+pub use publish::{AccountsPublishApi, ExplicitPublishApi, PublishApi, RelayType};
pub use relay_debug::RelayDebugView;
pub use relayspec::RelaySpec;
+pub use remote_api::{RelayInspectApi, RelayInspectEntry, RemoteApi};
pub use result::Result;
pub use route::{DrawerRouter, ReplacementType, Router};
+pub use scoped_sub_api::ScopedSubApi;
+pub use scoped_sub_owners::SubOwnerKeyBuilder;
+pub use scoped_sub_state::ScopedSubsState;
+pub use scoped_subs::{
+ ClearSubResult, DropSlotResult, EnsureSubResult, RelaySelection, ScopedSubEoseStatus,
+ ScopedSubIdentity, ScopedSubLiveEoseStatus, SetSubResult, SubConfig, SubKey, SubKeyBuilder,
+ SubOwnerKey, SubScope,
+};
pub use storage::{AccountStorage, DataPath, DataPathType, Directory};
pub use style::NotedeckTextStyle;
pub use theme::ColorTheme;
@@ -96,7 +115,9 @@ pub use time::{
is_future_timestamp, time_ago_since, time_format, unix_time_secs, MAX_FUTURE_NOTE_SKEW_SECS,
};
pub use timecache::TimeCached;
-pub use unknowns::{get_unknown_note_ids, NoteRefsUnkIdAction, SingleUnkIdAction, UnknownIds};
+pub use unknowns::{
+ get_unknown_note_ids, unknown_id_send, NoteRefsUnkIdAction, SingleUnkIdAction, UnknownIds,
+};
pub use urls::{supported_mime_hosted_at_url, SupportedMimeType, UrlMimes};
pub use user_account::UserAccount;
pub use wallet::{
@@ -113,3 +134,20 @@ pub use enostr;
pub use nostrdb;
pub use zaps::Zaps;
+
+pub type Outbox<'a> = OutboxSessionHandler<'a, EguiWakeup>;
+
+#[derive(Clone)]
+pub struct EguiWakeup(egui::Context);
+
+impl EguiWakeup {
+ pub fn new(ctx: egui::Context) -> Self {
+ Self(ctx)
+ }
+}
+
+impl Wakeup for EguiWakeup {
+ fn wake(&self) {
+ self.0.request_repaint();
+ }
+}
diff --git a/crates/notedeck/src/nip51_set.rs b/crates/notedeck/src/nip51_set.rs
@@ -1,9 +1,8 @@
-use enostr::{Pubkey, RelayPool};
+use enostr::{OutboxSubId, Pubkey, RelayUrlPkgs};
use indexmap::IndexMap;
-use nostrdb::{Filter, Ndb, Note, Transaction};
-use uuid::Uuid;
+use nostrdb::{Filter, Ndb, Note, Subscription, Transaction};
-use crate::{UnifiedSubscription, UnknownIds};
+use crate::{Accounts, Outbox, UnifiedSubscription, UnknownIds};
/// Keeps track of most recent NIP-51 sets
#[derive(Debug)]
@@ -15,44 +14,50 @@ pub struct Nip51SetCache {
type PackId = String;
impl Nip51SetCache {
- pub fn new(
- pool: &mut RelayPool,
+ pub fn new_accounts_read(
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
ndb: &Ndb,
txn: &Transaction,
unknown_ids: &mut UnknownIds,
nip51_set_filter: Vec<Filter>,
) -> Option<Self> {
- let subid = Uuid::new_v4().to_string();
- let mut cached_notes = IndexMap::default();
+ let (cached_notes, local) =
+ load_cached_notes_and_local_sub(ndb, txn, unknown_ids, &nip51_set_filter)?;
+ let remote = pool.subscribe(
+ nip51_set_filter.clone(),
+ RelayUrlPkgs::new(accounts.selected_account_read_relays()),
+ );
- let notes: Option<Vec<Note>> = if let Ok(results) = ndb.query(txn, &nip51_set_filter, 500) {
- Some(results.into_iter().map(|r| r.note).collect())
- } else {
- None
- };
+ Some(Self {
+ sub: UnifiedSubscription { local, remote },
+ cached_notes,
+ })
+ }
- if let Some(notes) = notes {
- add(notes, &mut cached_notes, ndb, txn, unknown_ids);
- }
+ pub fn new_local(
+ ndb: &Ndb,
+ txn: &Transaction,
+ unknown_ids: &mut UnknownIds,
+ nip51_set_filter: Vec<Filter>,
+ ) -> Option<Self> {
+ let (cached_notes, local) =
+ load_cached_notes_and_local_sub(ndb, txn, unknown_ids, &nip51_set_filter)?;
- let sub = match ndb.subscribe(&nip51_set_filter) {
- Ok(sub) => sub,
- Err(e) => {
- tracing::error!("Could not ndb subscribe: {e}");
- return None;
- }
- };
- pool.subscribe(subid.clone(), nip51_set_filter);
+ // Local-only constructor used when remote relay management is handled elsewhere.
+ let remote = OutboxSubId(0);
Some(Self {
- sub: UnifiedSubscription {
- local: sub,
- remote: subid,
- },
+ sub: UnifiedSubscription { local, remote },
cached_notes,
})
}
+ pub fn local_sub(&self) -> Subscription {
+ self.sub.local
+ }
+
+ #[profiling::function]
pub fn poll_for_notes(&mut self, ndb: &Ndb, unknown_ids: &mut UnknownIds) {
let new_notes = ndb.poll_for_notes(self.sub.local, 5);
@@ -86,6 +91,36 @@ impl Nip51SetCache {
}
}
+fn load_cached_notes_and_local_sub(
+ ndb: &Ndb,
+ txn: &Transaction,
+ unknown_ids: &mut UnknownIds,
+ nip51_set_filter: &[Filter],
+) -> Option<(IndexMap<PackId, Nip51Set>, Subscription)> {
+ let mut cached_notes = IndexMap::default();
+
+ let notes: Option<Vec<Note>> = if let Ok(results) = ndb.query(txn, nip51_set_filter, 500) {
+ Some(results.into_iter().map(|r| r.note).collect())
+ } else {
+ None
+ };
+
+ if let Some(notes) = notes {
+ add(notes, &mut cached_notes, ndb, txn, unknown_ids);
+ }
+
+ let local = match ndb.subscribe(nip51_set_filter) {
+ Ok(sub) => sub,
+ Err(e) => {
+ tracing::error!("Could not ndb subscribe: {e}");
+ return None;
+ }
+ };
+
+ Some((cached_notes, local))
+}
+
+#[profiling::function]
fn add(
notes: Vec<Note>,
cache: &mut IndexMap<PackId, Nip51Set>,
diff --git a/crates/notedeck/src/note/context.rs b/crates/notedeck/src/note/context.rs
@@ -1,8 +1,8 @@
-use enostr::{ClientMessage, NoteId, Pubkey, RelayPool};
+use enostr::{NoteId, Pubkey, RelayId};
use nostrdb::{Ndb, Note, NoteKey, Transaction};
use tracing::error;
-use crate::Accounts;
+use crate::{Accounts, RelayType, RemoteApi};
/// When broadcasting notes, this determines whether to broadcast
/// over the local network via multicast, or globally
@@ -53,22 +53,18 @@ impl NoteContextSelection {
ui: &mut egui::Ui,
note: &Note<'_>,
ndb: &Ndb,
- pool: &mut RelayPool,
+ remote: &mut RemoteApi,
txn: &Transaction,
accounts: &Accounts,
) {
match self {
NoteContextSelection::Broadcast(context) => {
tracing::info!("Broadcasting note {}", hex::encode(note.id()));
- match context {
- BroadcastContext::LocalNetwork => {
- pool.send_to(&ClientMessage::event(note).unwrap(), "multicast");
- }
-
- BroadcastContext::Everywhere => {
- pool.send(&ClientMessage::event(note).unwrap());
- }
- }
+ let relays = match context {
+ BroadcastContext::LocalNetwork => RelayType::Explicit(vec![RelayId::Multicast]),
+ BroadcastContext::Everywhere => RelayType::AccountsWrite,
+ };
+ remote.publisher(accounts).publish_note(note, relays);
}
NoteContextSelection::CopyText => {
ui.ctx().copy_text(note.content().to_string());
@@ -106,9 +102,23 @@ impl NoteContextSelection {
};
let muted = accounts.mute();
if muted.is_pk_muted(target.bytes()) {
- super::publish::send_unmute_event(ndb, txn, pool, kp, &muted, &target);
+ super::publish::send_unmute_event(
+ ndb,
+ txn,
+ &mut remote.publisher(accounts),
+ kp,
+ &muted,
+ &target,
+ );
} else {
- super::publish::send_mute_event(ndb, txn, pool, kp, &muted, &target);
+ super::publish::send_mute_event(
+ ndb,
+ txn,
+ &mut remote.publisher(accounts),
+ kp,
+ &muted,
+ &target,
+ );
}
}
NoteContextSelection::ReportUser => {}
diff --git a/crates/notedeck/src/note/mod.rs b/crates/notedeck/src/note/mod.rs
@@ -5,8 +5,8 @@ pub mod publish;
pub use action::{NoteAction, ReactAction, ScrollInfo, ZapAction, ZapTargetAmount};
pub use context::{BroadcastContext, ContextSelection, NoteContextSelection};
pub use publish::{
- builder_from_note, send_mute_event, send_note_builder, send_people_list_event,
- send_report_event, send_unmute_event, ReportTarget, ReportType,
+ builder_from_note, send_mute_event, send_people_list_event, send_report_event,
+ send_unmute_event, ReportTarget, ReportType,
};
use crate::jobs::MediaJobSender;
@@ -16,7 +16,7 @@ use crate::GlobalWallet;
use crate::Localization;
use crate::UnknownIds;
use crate::{notecache::NoteCache, zaps::Zaps, Images};
-use enostr::{NoteId, RelayPool};
+use enostr::NoteId;
use nostrdb::{Ndb, Note, NoteKey, QueryResult, Transaction};
use std::borrow::Borrow;
use std::cmp::Ordering;
@@ -32,7 +32,6 @@ pub struct NoteContext<'d> {
pub img_cache: &'d mut Images,
pub note_cache: &'d mut NoteCache,
pub zaps: &'d mut Zaps,
- pub pool: &'d mut RelayPool,
pub jobs: &'d MediaJobSender,
pub unknown_ids: &'d mut UnknownIds,
pub nip05_cache: &'d mut Nip05Cache,
diff --git a/crates/notedeck/src/note/publish.rs b/crates/notedeck/src/note/publish.rs
@@ -1,8 +1,8 @@
-use enostr::{FilledKeypair, NoteId, Pubkey, RelayPool};
+use enostr::{FilledKeypair, NoteId, Pubkey};
use nostrdb::{Filter, Ndb, Note, NoteBuildOptions, NoteBuilder, Transaction};
use tracing::info;
-use crate::Muted;
+use crate::{Muted, PublishApi, RelayType};
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ReportType {
@@ -83,7 +83,12 @@ where
builder
}
-pub fn send_note_builder(builder: NoteBuilder, ndb: &Ndb, pool: &mut RelayPool, kp: FilledKeypair) {
+pub fn publish_note_builder(
+ builder: NoteBuilder,
+ ndb: &Ndb,
+ publisher: &mut PublishApi<'_, '_>,
+ kp: FilledKeypair,
+) {
let note = builder
.sign(&kp.secret_key.secret_bytes())
.build()
@@ -101,13 +106,13 @@ pub fn send_note_builder(builder: NoteBuilder, ndb: &Ndb, pool: &mut RelayPool,
let _ = ndb.process_event_with(&json, nostrdb::IngestMetadata::new().client(true));
info!("sending {}", &json);
- pool.send(event);
+ publisher.publish_note(¬e, RelayType::AccountsWrite);
}
pub fn send_unmute_event(
ndb: &Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
kp: FilledKeypair,
muted: &Muted,
target: &Pubkey,
@@ -152,13 +157,13 @@ pub fn send_unmute_event(
}),
);
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
pub fn send_mute_event(
ndb: &Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
kp: FilledKeypair,
muted: &Muted,
target: &Pubkey,
@@ -200,12 +205,12 @@ pub fn send_mute_event(
.tag_str(&target.hex())
};
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
pub fn send_people_list_event(
ndb: &Ndb,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
kp: FilledKeypair,
name: &str,
members: &[Pubkey],
@@ -225,12 +230,12 @@ pub fn send_people_list_event(
builder = builder.start_tag().tag_str("p").tag_str(&pk.hex());
}
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
pub fn send_report_event(
ndb: &Ndb,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
kp: FilledKeypair,
target: &ReportTarget,
report_type: ReportType,
@@ -254,5 +259,5 @@ pub fn send_report_event(
.tag_str(report_str);
}
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
diff --git a/crates/notedeck/src/oneshot_api.rs b/crates/notedeck/src/oneshot_api.rs
@@ -0,0 +1,86 @@
+use enostr::RelayUrlPkgs;
+use nostrdb::Filter;
+
+use crate::{Accounts, Outbox};
+
+/// App-facing one-shot relay API.
+///
+/// This keeps transient read requests (REQ/EOSE) separate from durable
+/// scoped subscriptions.
+pub struct OneshotApi<'o, 'a> {
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+}
+
+impl<'o, 'a> OneshotApi<'o, 'a> {
+ pub fn new(pool: &'o mut Outbox<'a>, accounts: &'o Accounts) -> Self {
+ Self { pool, accounts }
+ }
+
+ /// Send a one-shot request to the selected account's read relay set.
+ pub fn oneshot(&mut self, filters: Vec<Filter>) {
+ self.pool.oneshot(
+ filters,
+ RelayUrlPkgs::new(self.accounts.selected_account_read_relays()),
+ );
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{EguiWakeup, UnknownIds, FALLBACK_PUBKEY};
+ use enostr::{OutboxPool, OutboxSessionHandler, OutboxSubId};
+ use nostrdb::{Config, Ndb, Transaction};
+ use tempfile::TempDir;
+
+ fn test_accounts_with_forced_relay(relay: &str) -> (TempDir, crate::Accounts) {
+ let tmp = TempDir::new().expect("tmp dir");
+ let mut ndb = Ndb::new(tmp.path().to_str().expect("path"), &Config::new()).expect("ndb");
+ let txn = Transaction::new(&ndb).expect("txn");
+ let mut unknown_ids = UnknownIds::default();
+
+ let accounts = crate::Accounts::new(
+ None,
+ vec![relay.to_owned()],
+ FALLBACK_PUBKEY(),
+ &mut ndb,
+ &txn,
+ &mut unknown_ids,
+ );
+
+ (tmp, accounts)
+ }
+
+ /// Verifies oneshot requests are routed to the selected account's read relays
+ /// and the expected filters are staged in outbox.
+ #[test]
+ fn oneshot_uses_selected_account_read_relays() {
+ let (_tmp, accounts) = test_accounts_with_forced_relay("wss://relay-read.example.com");
+ let expected_relays = accounts.selected_account_read_relays();
+ assert!(!expected_relays.is_empty());
+
+ let mut pool = OutboxPool::default();
+ let filter = Filter::new().kinds(vec![1]).limit(1).build();
+
+ {
+ let mut outbox =
+ OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+ let mut oneshot = OneshotApi::new(&mut outbox, &accounts);
+ oneshot.oneshot(vec![filter.clone()]);
+ }
+
+ let request_id = OutboxSubId(0);
+ let status = pool.status(&request_id);
+ let status_relays: hashbrown::HashSet<enostr::NormRelayUrl> =
+ status.keys().map(|url| (*url).clone()).collect();
+ assert_eq!(status_relays, expected_relays);
+
+ let stored_filters = pool.filters(&request_id).expect("oneshot filters");
+ assert_eq!(stored_filters.len(), 1);
+ assert_eq!(
+ stored_filters[0].json().expect("filter json"),
+ filter.json().expect("filter json")
+ );
+ }
+}
diff --git a/crates/notedeck/src/persist/settings_handler.rs b/crates/notedeck/src/persist/settings_handler.rs
@@ -231,6 +231,7 @@ impl SettingsHandler {
self.try_save_settings();
}
+ #[profiling::function]
pub fn update_batch<F>(&mut self, update_fn: F)
where
F: FnOnce(&mut Settings),
diff --git a/crates/notedeck/src/publish.rs b/crates/notedeck/src/publish.rs
@@ -0,0 +1,174 @@
+use enostr::RelayId;
+use nostrdb::Note;
+
+use crate::{Accounts, Outbox};
+
+/// Relay target policy for publishing.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum RelayType {
+ /// Publish to the selected account's write relay set.
+ AccountsWrite,
+ /// Publish to an explicit relay target set.
+ Explicit(Vec<RelayId>),
+}
+
+/// Explicit-relay publishing API that does not depend on account state.
+pub struct ExplicitPublishApi<'o, 'a> {
+ pool: &'o mut Outbox<'a>,
+}
+
+impl<'o, 'a> ExplicitPublishApi<'o, 'a> {
+ pub fn new(pool: &'o mut Outbox<'a>) -> Self {
+ Self { pool }
+ }
+
+ /// Publish a note to an explicit relay target set.
+ pub fn publish_note(&mut self, note: &Note, relays: Vec<RelayId>) {
+ self.pool.broadcast_note(note, relays);
+ }
+}
+
+/// Selected-account write-relay publishing API.
+pub struct AccountsPublishApi<'o, 'a> {
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+}
+
+impl<'o, 'a> AccountsPublishApi<'o, 'a> {
+ pub fn new(pool: &'o mut Outbox<'a>, accounts: &'o Accounts) -> Self {
+ Self { pool, accounts }
+ }
+
+ /// Publish a note to the selected account's write relay set.
+ pub fn publish_note(&mut self, note: &Note) {
+ self.pool
+ .broadcast_note(note, self.accounts.selected_account_write_relays());
+ }
+}
+
+/// Compatibility wrapper over typed publishing APIs.
+pub struct PublishApi<'o, 'a> {
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+}
+
+impl<'o, 'a> PublishApi<'o, 'a> {
+ pub fn new(pool: &'o mut Outbox<'a>, accounts: &'o Accounts) -> Self {
+ Self { pool, accounts }
+ }
+
+ pub fn explicit(&mut self) -> ExplicitPublishApi<'_, 'a> {
+ ExplicitPublishApi::new(self.pool)
+ }
+
+ pub fn accounts_write(&mut self) -> AccountsPublishApi<'_, 'a> {
+ AccountsPublishApi::new(self.pool, self.accounts)
+ }
+
+ pub fn publish_note(&mut self, note: &Note, relays: RelayType) {
+ match relays {
+ RelayType::AccountsWrite => self.accounts_write().publish_note(note),
+ RelayType::Explicit(relays) => self.explicit().publish_note(note, relays),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{EguiWakeup, UnknownIds, FALLBACK_PUBKEY};
+ use enostr::{FullKeypair, NormRelayUrl, OutboxPool, OutboxSessionHandler};
+ use nostrdb::{Config, Ndb, Note, NoteBuilder, Transaction};
+ use tempfile::TempDir;
+
+ fn test_accounts_with_forced_relay(relay: &str) -> (TempDir, crate::Accounts) {
+ let tmp = TempDir::new().expect("tmp dir");
+ let mut ndb = Ndb::new(tmp.path().to_str().expect("path"), &Config::new()).expect("ndb");
+ let txn = Transaction::new(&ndb).expect("txn");
+ let mut unknown_ids = UnknownIds::default();
+
+ let accounts = crate::Accounts::new(
+ None,
+ vec![relay.to_owned()],
+ FALLBACK_PUBKEY(),
+ &mut ndb,
+ &txn,
+ &mut unknown_ids,
+ );
+
+ (tmp, accounts)
+ }
+
+ fn signed_note() -> Note<'static> {
+ let keypair = FullKeypair::generate();
+ let seckey = keypair.secret_key.to_secret_bytes();
+
+ NoteBuilder::new()
+ .kind(1)
+ .content("publish-test")
+ .sign(&seckey)
+ .build()
+ .expect("note")
+ }
+
+ /// Verifies explicit relay publishing targets only the provided relay set.
+ #[test]
+ fn publish_note_explicit_targets_requested_relay() {
+ let (_tmp, accounts) = test_accounts_with_forced_relay("wss://relay-write.example.com");
+ let note = signed_note();
+ let relay = NormRelayUrl::new("wss://relay-explicit.example.com").expect("relay");
+ let mut expected = hashbrown::HashSet::new();
+ expected.insert(relay.clone());
+
+ let mut pool = OutboxPool::default();
+ {
+ let mut outbox =
+ OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+ let mut publish = PublishApi::new(&mut outbox, &accounts);
+
+ publish.publish_note(
+ ¬e,
+ RelayType::Explicit(vec![RelayId::Websocket(relay.clone())]),
+ );
+ }
+ let actual: hashbrown::HashSet<NormRelayUrl> = pool
+ .websocket_statuses()
+ .keys()
+ .map(|url| (*url).clone())
+ .collect();
+ assert_eq!(actual, expected);
+ }
+
+ /// Verifies account-write publishing targets the selected account's write relays.
+ #[test]
+ fn publish_note_accounts_write_targets_selected_account_relays() {
+ let (_tmp, accounts) =
+ test_accounts_with_forced_relay("wss://relay-accounts-write.example.com");
+ let note = signed_note();
+ let expected_relays: hashbrown::HashSet<NormRelayUrl> = accounts
+ .selected_account_write_relays()
+ .into_iter()
+ .filter_map(|relay| match relay {
+ RelayId::Websocket(url) => Some(url),
+ RelayId::Multicast => None,
+ })
+ .collect();
+ assert!(!expected_relays.is_empty());
+
+ let mut pool = OutboxPool::default();
+ {
+ let mut outbox =
+ OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+ let mut publish = PublishApi::new(&mut outbox, &accounts);
+
+ publish.publish_note(¬e, RelayType::AccountsWrite);
+ }
+
+ let actual_relays: hashbrown::HashSet<NormRelayUrl> = pool
+ .websocket_statuses()
+ .keys()
+ .map(|url| (*url).clone())
+ .collect();
+ assert_eq!(actual_relays, expected_relays);
+ }
+}
diff --git a/crates/notedeck/src/relayspec.rs b/crates/notedeck/src/relayspec.rs
@@ -1,30 +1,28 @@
use std::cmp::Ordering;
use std::fmt;
+use enostr::NormRelayUrl;
+
// A Relay specification includes NIP-65 defined "markers" which
// indicate if the relay should be used for reading or writing (or
// both).
#[derive(Clone)]
pub struct RelaySpec {
- pub url: String,
+ pub url: NormRelayUrl,
pub has_read_marker: bool,
pub has_write_marker: bool,
}
impl RelaySpec {
- pub fn new(
- url: impl Into<String>,
- mut has_read_marker: bool,
- mut has_write_marker: bool,
- ) -> Self {
+ pub fn new(url: NormRelayUrl, mut has_read_marker: bool, mut has_write_marker: bool) -> Self {
// if both markers are set turn both off ...
if has_read_marker && has_write_marker {
has_read_marker = false;
has_write_marker = false;
}
RelaySpec {
- url: url.into(),
+ url,
has_read_marker,
has_write_marker,
}
@@ -80,12 +78,12 @@ impl Eq for RelaySpec {}
#[allow(clippy::non_canonical_partial_ord_impl)]
impl PartialOrd for RelaySpec {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- Some(self.url.cmp(&other.url))
+ Some(self.url.to_string().cmp(&other.url.to_string()))
}
}
impl Ord for RelaySpec {
fn cmp(&self, other: &Self) -> Ordering {
- self.url.cmp(&other.url)
+ self.url.to_string().cmp(&other.url.to_string())
}
}
diff --git a/crates/notedeck/src/remote_api.rs b/crates/notedeck/src/remote_api.rs
@@ -0,0 +1,152 @@
+use egui::Context;
+use enostr::{NormRelayUrl, OutboxSession, Pubkey, RelayImplType, RelayStatus};
+use nostrdb::Ndb;
+
+use crate::{
+ Accounts, ExplicitPublishApi, OneshotApi, Outbox, PublishApi, ScopedSubApi, ScopedSubsState,
+};
+
+/// Read-only relay inspection row for relay UI surfaces.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct RelayInspectEntry<'a> {
+ pub relay_url: &'a NormRelayUrl,
+ pub status: RelayStatus,
+}
+
+/// Read-only relay inspection facade.
+///
+/// This exposes only relay status inspection needed by UI code and intentionally
+/// does not provide subscription/publish/oneshot methods.
+pub struct RelayInspectApi<'r, 'a> {
+ pool: &'r Outbox<'a>,
+}
+
+impl<'r, 'a> RelayInspectApi<'r, 'a> {
+ pub(crate) fn new(pool: &'r Outbox<'a>) -> Self {
+ Self { pool }
+ }
+
+ /// Snapshot websocket relay statuses for display/debug UI.
+ pub fn relay_infos(&self) -> Vec<RelayInspectEntry<'_>> {
+ self.pool
+ .outbox
+ .websocket_statuses()
+ .into_iter()
+ .map(|(url, status)| RelayInspectEntry {
+ relay_url: url,
+ status,
+ })
+ .collect()
+ }
+}
+
+/// App-facing facade for relay/outbox transport operations.
+///
+/// This is the only app-visible entrypoint for scoped subscriptions, one-shot
+/// requests, publishing, relay event ingestion, and relay status inspection.
+/// Apps should not access raw `Outbox` directly.
+pub struct RemoteApi<'a> {
+ pool: Outbox<'a>,
+ scoped_sub_state: &'a mut ScopedSubsState,
+}
+
+impl<'a> RemoteApi<'a> {
+ pub(crate) fn new(pool: Outbox<'a>, scoped_sub_state: &'a mut ScopedSubsState) -> Self {
+ Self {
+ pool,
+ scoped_sub_state,
+ }
+ }
+
+ /// Export the staged outbox session without exposing the raw handler.
+ ///
+ /// This is only needed during host initialization before the first frame.
+ pub(crate) fn export_session(self) -> OutboxSession {
+ self.pool.export()
+ }
+
+ /// Access scoped subscription APIs bound to the selected account.
+ pub fn scoped_subs<'o>(&'o mut self, accounts: &'o Accounts) -> ScopedSubApi<'o, 'a> {
+ self.scoped_sub_state.api(&mut self.pool, accounts)
+ }
+
+ /// Access one-shot read APIs bound to the selected account.
+ pub fn oneshot<'o>(&'o mut self, accounts: &'o Accounts) -> OneshotApi<'o, 'a> {
+ OneshotApi::new(&mut self.pool, accounts)
+ }
+
+ /// Access publishing APIs bound to the selected account.
+ pub fn publisher<'o>(&'o mut self, accounts: &'o Accounts) -> PublishApi<'o, 'a> {
+ PublishApi::new(&mut self.pool, accounts)
+ }
+
+ /// Access explicit-relay publishing APIs (no account dependency).
+ pub fn publisher_explicit<'o>(&'o mut self) -> ExplicitPublishApi<'o, 'a> {
+ ExplicitPublishApi::new(&mut self.pool)
+ }
+
+ /// Host-only relay ingestion + keepalive maintenance.
+ pub(crate) fn process_events(&mut self, ctx: &Context, ndb: &Ndb) {
+ try_process_events(ctx, &mut self.pool, ndb);
+ }
+
+ /// Access read-only relay inspection data for UI rendering.
+ pub fn relay_inspect(&self) -> RelayInspectApi<'_, 'a> {
+ RelayInspectApi::new(&self.pool)
+ }
+
+ /// Host account-switch transition hook for scoped subscription teardown/restore.
+ pub(crate) fn on_account_switched(
+ &mut self,
+ old_account: Pubkey,
+ new_account: Pubkey,
+ accounts: &Accounts,
+ ) {
+ self.scoped_sub_state.runtime_mut().on_account_switched(
+ &mut self.pool,
+ old_account,
+ new_account,
+ accounts,
+ );
+ }
+
+ /// Host/account hook to retarget selected-account-read scoped subscriptions.
+ ///
+ /// This retargets all live scoped subscriptions that resolve relays from
+ /// [`crate::RelaySelection::AccountsRead`] without requiring callers to
+ /// individually `set_sub(...)` every declaration.
+ pub(crate) fn retarget_selected_account_read_relays(&mut self, accounts: &Accounts) {
+ self.scoped_sub_state
+ .runtime_mut()
+ .retarget_selected_account_read_relays(&mut self.pool, accounts);
+ }
+}
+
+#[profiling::function]
+pub fn try_process_events(ctx: &Context, pool: &mut Outbox, ndb: &Ndb) {
+ let ctx2 = ctx.clone();
+ let wakeup = move || {
+ ctx2.request_repaint();
+ };
+
+ pool.outbox.keepalive_ping(wakeup);
+
+ pool.outbox.try_recv(10, |ev| {
+ let from_client = match ev.relay_type {
+ RelayImplType::Websocket => false,
+ enostr::RelayImplType::Multicast => true,
+ };
+
+ {
+ profiling::scope!("ndb process event");
+ if let Err(err) = ndb.process_event_with(
+ ev.event_json,
+ nostrdb::IngestMetadata::new()
+ .client(from_client)
+ .relay(ev.url),
+ ) {
+ tracing::error!("error processing event {}: {err}", ev.event_json);
+ }
+ }
+ });
+}
diff --git a/crates/notedeck/src/scoped_sub_api.rs b/crates/notedeck/src/scoped_sub_api.rs
@@ -0,0 +1,160 @@
+use enostr::Pubkey;
+
+use crate::scoped_sub_owners::ScopedSubOwners;
+use crate::scoped_subs::ScopedSubRuntime;
+use crate::{
+ Accounts, ClearSubResult, EnsureSubResult, Outbox, ScopedSubEoseStatus, ScopedSubIdentity,
+ SetSubResult, SubConfig, SubOwnerKey,
+};
+
+/// App-facing facade over scoped subscription owner/runtime operations.
+///
+/// This bundles host resources that are commonly passed together to avoid
+/// argument plumbing through app-layer helper functions.
+pub struct ScopedSubApi<'o, 'a> {
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+ owners: &'o mut ScopedSubOwners,
+ runtime: &'o mut ScopedSubRuntime,
+}
+
+impl<'o, 'a> ScopedSubApi<'o, 'a> {
+ pub(crate) fn new(
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+ owners: &'o mut ScopedSubOwners,
+ runtime: &'o mut ScopedSubRuntime,
+ ) -> Self {
+ Self {
+ pool,
+ accounts,
+ owners,
+ runtime,
+ }
+ }
+
+ pub fn selected_account_pubkey(&self) -> Pubkey {
+ *self.accounts.selected_account_pubkey()
+ }
+
+ /// Create or update one scoped remote subscription declaration.
+ ///
+ /// Thread example (recommended mental model):
+ /// - `identity.owner` = one thread view lifecycle (for example one open thread pane)
+ /// - `identity.key` = `replies-by-root(root_id)`
+ /// - `identity.scope` = `SubScope::Account`
+ ///
+ /// If two thread views open the same root on the same account, they should use:
+ /// - different `owner`
+ /// - same `key`
+ /// - same `scope`
+ ///
+ /// The runtime shares one live outbox subscription for that resolved `(scope, key)`.
+ ///
+ /// `set_sub(...)` is an upsert for the resolved `(scope, key)`:
+ /// - first call creates desired state
+ /// - repeated calls update/replace desired state and may modify the live outbox sub
+ ///
+ /// Use this when the remote config can change (filters and/or relays).
+ /// For thread reply subscriptions, prefer [`Self::ensure_sub`] unless the thread's
+ /// remote filters actually change.
+ ///
+ /// Account-scoped behavior (`SubScope::Account`):
+ /// - On switch away, the live outbox subscription is unsubscribed.
+ /// - Desired state is retained while owners still exist.
+ /// - On switch back, the live outbox subscription is restored from desired state.
+ /// - If owners are dropped while away, nothing is restored.
+ pub fn set_sub(&mut self, identity: ScopedSubIdentity, config: SubConfig) -> SetSubResult {
+ self.owners
+ .set_sub(self.runtime, self.pool, self.accounts, identity, config)
+ }
+
+ /// Create a scoped remote subscription declaration only if it is absent.
+ ///
+ /// Thread open path example:
+ /// - build `identity = { owner: thread-view, key: replies-by-root(root_id), scope: Account }`
+ /// - call `ensure_sub(identity, config)` when opening the thread
+ ///
+ /// Repeated calls with the same resolved `(scope, key)`:
+ /// - keep ownership attached
+ /// - do not modify desired state
+ /// - do not modify the live outbox subscription
+ ///
+ /// This is the preferred API for stable thread reply subscriptions because it is
+ /// idempotent and avoids unnecessary outbox subscription updates on repeats.
+ ///
+ /// Account-switch behavior matches [`Self::set_sub`].
+ pub fn ensure_sub(
+ &mut self,
+ identity: ScopedSubIdentity,
+ config: SubConfig,
+ ) -> EnsureSubResult {
+ self.owners
+ .ensure_sub(self.runtime, self.pool, self.accounts, identity, config)
+ }
+
+ /// Clear one scoped subscription declaration while keeping the owner alive.
+ ///
+ /// Thread example:
+ /// - This is less common than [`Self::drop_owner`].
+ /// - Use this only if a thread owner remains alive but should stop declaring one
+ /// specific thread remote sub key.
+ ///
+ /// Outbox behavior:
+ /// - If other owners still declare the same resolved `(scope, key)`, the shared live
+ /// outbox subscription remains active.
+ /// - If this was the last owner for that `(scope, key)`, the live outbox subscription
+ /// is unsubscribed (if active) and desired state is removed.
+ pub fn clear_sub(&mut self, identity: ScopedSubIdentity) -> ClearSubResult {
+ self.owners
+ .clear_sub(self.runtime, self.pool, self.accounts, identity)
+ }
+
+ /// Clear one account-scoped declaration for an explicit account (host-only).
+ ///
+ /// This exists for host cleanup paths (for example deleting a non-selected account's
+ /// retained scoped declarations). App code should use [`Self::clear_sub`], which resolves
+ /// account scope from the currently selected account.
+ pub(crate) fn clear_sub_for_account(
+ &mut self,
+ account_pubkey: Pubkey,
+ identity: ScopedSubIdentity,
+ ) -> ClearSubResult {
+ self.owners
+ .clear_sub_for_account(self.runtime, self.pool, account_pubkey, identity)
+ }
+
+ /// Query aggregate EOSE state for one scoped subscription declaration.
+ ///
+ /// Thread example:
+ /// - query the status of `{ owner: thread-view, key: replies-by-root(root_id), scope: Account }`
+ /// - the lookup uses the current selected account to resolve `SubScope::Account`
+ ///
+ /// If the same thread root is open in multiple thread views, each owner can query the same
+ /// shared outbox subscription status through its own identity.
+ ///
+ /// Account-switch behavior:
+ /// - Switch away: status typically becomes [`ScopedSubEoseStatus::Inactive`] because the
+ /// live outbox subscription is removed while desired state is retained.
+ /// - Switch back: status may return to `Live(...)` after restore.
+ pub fn sub_eose_status(&self, identity: ScopedSubIdentity) -> ScopedSubEoseStatus {
+ self.owners
+ .sub_eose_status(self.runtime, self.pool, self.accounts, identity)
+ }
+
+ /// Drop one owner lifecycle and release all scoped subscriptions declared by it.
+ ///
+ /// Thread example:
+ /// - `owner` is one thread view lifecycle token.
+ /// - If two thread views opened the same `replies-by-root(root_id)` on the same account,
+ /// dropping one owner keeps the shared live outbox subscription active.
+ /// - Dropping the last owner unsubscribes the live outbox subscription for that thread key
+ /// (if active) and removes the retained desired declaration.
+ ///
+ /// Account-scoped behavior:
+ /// - If the thread owner is dropped while switched away, there may be no live outbox sub to
+ /// unsubscribe, but the retained declaration is removed so nothing is restored on switch-back.
+ pub fn drop_owner(&mut self, owner: SubOwnerKey) -> bool {
+ self.owners.drop_owner(self.runtime, self.pool, owner)
+ }
+}
diff --git a/crates/notedeck/src/scoped_sub_owners.rs b/crates/notedeck/src/scoped_sub_owners.rs
@@ -0,0 +1,192 @@
+use enostr::Pubkey;
+use hashbrown::HashMap;
+
+use crate::{
+ scoped_subs::{ScopedSubRuntime, SubOwnerKey, SubSlotId},
+ Accounts, ClearSubResult, EnsureSubResult, Outbox, ScopedSubEoseStatus, ScopedSubIdentity,
+ SetSubResult, SubConfig, SubKeyBuilder, SubScope,
+};
+
+/// Incremental builder for stable owner keys.
+pub type SubOwnerKeyBuilder = SubKeyBuilder;
+
+/// Host-owned mapping from owner lifecycles to runtime slots.
+///
+/// This is intended to be held by host/container code (not app feature state)
+/// so slot ids are never stored in app modules.
+#[derive(Default)]
+pub(crate) struct ScopedSubOwners {
+ slots_by_owner: HashMap<SubOwnerKey, SubSlotId>,
+}
+
+impl ScopedSubOwners {
+ /// Ensure one runtime slot exists for this owner key.
+ pub(crate) fn ensure_slot(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ owner: SubOwnerKey,
+ ) -> SubSlotId {
+ if let Some(slot) = self.slots_by_owner.get(&owner).copied() {
+ return slot;
+ }
+
+ let slot = runtime.create_slot();
+ self.slots_by_owner.insert(owner, slot);
+ slot
+ }
+
+ /// Forward an upsert scoped-sub request for an owner lifecycle.
+ pub fn set_sub(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ identity: ScopedSubIdentity,
+ config: SubConfig,
+ ) -> SetSubResult {
+ let slot = self.ensure_slot(runtime, identity.owner);
+ runtime.set_sub(pool, accounts, slot, identity.scope, identity.key, config)
+ }
+
+ /// Forward a create-if-absent scoped-sub request for an owner lifecycle.
+ pub fn ensure_sub(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ identity: ScopedSubIdentity,
+ config: SubConfig,
+ ) -> EnsureSubResult {
+ let slot = self.ensure_slot(runtime, identity.owner);
+ runtime.ensure_sub(pool, accounts, slot, identity.scope, identity.key, config)
+ }
+
+ /// Clear one scoped subscription binding from an owner lifecycle.
+ pub fn clear_sub(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ identity: ScopedSubIdentity,
+ ) -> ClearSubResult {
+ let Some(slot) = self.slots_by_owner.get(&identity.owner).copied() else {
+ return ClearSubResult::NotFound;
+ };
+
+ runtime.clear_sub(pool, accounts, slot, identity.key, identity.scope)
+ }
+
+ /// Clear one account-scoped subscription binding from an owner lifecycle for an explicit account.
+ pub fn clear_sub_for_account(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ pool: &mut Outbox<'_>,
+ account_pubkey: Pubkey,
+ identity: ScopedSubIdentity,
+ ) -> ClearSubResult {
+ debug_assert!(matches!(identity.scope, SubScope::Account));
+ let Some(slot) = self.slots_by_owner.get(&identity.owner).copied() else {
+ return ClearSubResult::NotFound;
+ };
+
+ runtime.clear_sub_with_selected(pool, account_pubkey, slot, identity.key, identity.scope)
+ }
+
+ /// Query aggregate EOSE state for one scoped subscription binding owned by `owner`.
+ pub fn sub_eose_status(
+ &self,
+ runtime: &ScopedSubRuntime,
+ pool: &Outbox<'_>,
+ accounts: &Accounts,
+ identity: ScopedSubIdentity,
+ ) -> ScopedSubEoseStatus {
+ let Some(slot) = self.slots_by_owner.get(&identity.owner).copied() else {
+ return ScopedSubEoseStatus::Missing;
+ };
+
+ runtime.sub_eose_status(pool, accounts, slot, identity.key, identity.scope)
+ }
+
+ /// Drop one owner lifecycle and release all its scoped subscriptions.
+ pub fn drop_owner(
+ &mut self,
+ runtime: &mut ScopedSubRuntime,
+ pool: &mut Outbox<'_>,
+ owner: SubOwnerKey,
+ ) -> bool {
+ let Some(slot) = self.slots_by_owner.remove(&owner) else {
+ return false;
+ };
+
+ let _ = runtime.drop_slot(pool, slot);
+ true
+ }
+
+ /// Number of tracked owner lifecycles.
+ #[allow(dead_code)]
+ pub fn len(&self) -> usize {
+ self.slots_by_owner.len()
+ }
+
+ /// Returns true if no owner lifecycles are tracked.
+ #[allow(dead_code)]
+ pub fn is_empty(&self) -> bool {
+ self.slots_by_owner.is_empty()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::EguiWakeup;
+ use enostr::OutboxPool;
+
+ /// Verifies the same owner key always resolves to the same runtime slot.
+ #[test]
+ fn ensure_slot_is_stable_for_owner() {
+ let mut owners = ScopedSubOwners::default();
+ let mut runtime = ScopedSubRuntime::default();
+
+ let owner = SubOwnerKey::builder("threads").with(42u64).finish();
+ let a = owners.ensure_slot(&mut runtime, owner);
+ let b = owners.ensure_slot(&mut runtime, owner);
+
+ assert_eq!(a, b);
+ assert_eq!(owners.len(), 1);
+ }
+
+ /// Ensures different owner keys allocate distinct runtime slots.
+ #[test]
+ fn ensure_slot_distinguishes_different_owners() {
+ let mut owners = ScopedSubOwners::default();
+ let mut runtime = ScopedSubRuntime::default();
+
+ let owner_a = SubOwnerKey::builder("threads").with(1u64).finish();
+ let owner_b = SubOwnerKey::builder("threads").with(2u64).finish();
+
+ let slot_a = owners.ensure_slot(&mut runtime, owner_a);
+ let slot_b = owners.ensure_slot(&mut runtime, owner_b);
+
+ assert_ne!(slot_a, slot_b);
+ assert_eq!(owners.len(), 2);
+ }
+
+ /// Verifies dropping an owner removes its slot mapping and is idempotent.
+ #[test]
+ fn drop_owner_removes_mapping_and_runtime_slot() {
+ let mut owners = ScopedSubOwners::default();
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let mut outbox =
+ enostr::OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+
+ let owner = SubOwnerKey::builder("onboarding").finish();
+ let _slot = owners.ensure_slot(&mut runtime, owner);
+
+ assert!(owners.drop_owner(&mut runtime, &mut outbox, owner));
+ assert!(!owners.slots_by_owner.contains_key(&owner));
+ assert!(owners.is_empty());
+
+ assert!(!owners.drop_owner(&mut runtime, &mut outbox, owner));
+ }
+}
diff --git a/crates/notedeck/src/scoped_sub_state.rs b/crates/notedeck/src/scoped_sub_state.rs
@@ -0,0 +1,36 @@
+use crate::scoped_sub_owners::ScopedSubOwners;
+use crate::scoped_subs::ScopedSubRuntime;
+use crate::{Accounts, Outbox, ScopedSubApi};
+
+/// Host-owned scoped subscription state.
+///
+/// This keeps scoped owner slots and runtime state together so they are
+/// managed as one unit by the host.
+#[derive(Default)]
+pub struct ScopedSubsState {
+ runtime: ScopedSubRuntime,
+ owners: ScopedSubOwners,
+}
+
+impl ScopedSubsState {
+ /// Borrow owner/runtime internals for legacy callsites that still expect
+ /// both references separately.
+ pub(crate) fn split_mut(&mut self) -> (&mut ScopedSubOwners, &mut ScopedSubRuntime) {
+ (&mut self.owners, &mut self.runtime)
+ }
+
+ /// Build the app-facing scoped subscription API bound to host resources.
+ pub fn api<'o, 'a>(
+ &'o mut self,
+ pool: &'o mut Outbox<'a>,
+ accounts: &'o Accounts,
+ ) -> ScopedSubApi<'o, 'a> {
+ let (owners, runtime) = self.split_mut();
+ ScopedSubApi::new(pool, accounts, owners, runtime)
+ }
+
+ /// Mutable access to runtime internals for host account-switch integration.
+ pub(crate) fn runtime_mut(&mut self) -> &mut ScopedSubRuntime {
+ &mut self.runtime
+ }
+}
diff --git a/crates/notedeck/src/scoped_subs.rs b/crates/notedeck/src/scoped_subs.rs
@@ -0,0 +1,1934 @@
+use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
+
+use crate::{Accounts, Outbox};
+use enostr::{NormRelayUrl, OutboxSubId, Pubkey, RelayReqStatus, RelayUrlPkgs};
+use hashbrown::{HashMap, HashSet};
+use nostrdb::Filter;
+
+/// Stable key used by apps to identify a logical subscription.
+///
+/// This follows an `egui::Id` style API: callers provide any hashable value,
+/// and we store the resulting hashed key.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct SubKey(u64);
+
+/// Stable key for host-owned scoped subscription lifecycle owners.
+///
+/// This is a semantic alias over [`SubKey`] to keep the callsites explicit
+/// about ownership identity vs. logical subscription identity.
+pub type SubOwnerKey = SubKey;
+
+impl SubKey {
+ /// Build a key from any hashable value.
+ pub fn new(value: impl Hash) -> Self {
+ let mut hasher = DefaultHasher::new();
+ value.hash(&mut hasher);
+ Self(hasher.finish())
+ }
+
+ /// Access the raw hashed value.
+ pub fn as_u64(&self) -> u64 {
+ self.0
+ }
+
+ /// Start a typed key builder seeded with a stable namespace/root.
+ pub fn builder(seed: impl Hash) -> SubKeyBuilder {
+ SubKeyBuilder::new(seed)
+ }
+}
+
+/// Incremental builder for stable subscription keys.
+///
+/// This avoids ad-hoc string formatting and keeps key construction typed.
+pub struct SubKeyBuilder {
+ hasher: DefaultHasher,
+}
+
+impl SubKeyBuilder {
+ /// Create a new builder with a required seed/root.
+ pub fn new(seed: impl Hash) -> Self {
+ let mut hasher = DefaultHasher::new();
+ seed.hash(&mut hasher);
+ Self { hasher }
+ }
+
+ /// Append one typed part to the key path.
+ pub fn with(mut self, part: impl Hash) -> Self {
+ part.hash(&mut self.hasher);
+ self
+ }
+
+ /// Finalize into a stable `SubKey`.
+ pub fn finish(self) -> SubKey {
+ SubKey(self.hasher.finish())
+ }
+}
+
+/// Opaque owner slot id.
+///
+/// Host/app containers create one slot per UI lifecycle owner (route/view instance)
+/// and use it to attach scoped subscription intent to that owner.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub(crate) struct SubSlotId(u64);
+
+/// Scope associated with a subscription.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub enum SubScope {
+ /// Scoped to the current account; runtime resolves this to a concrete pubkey.
+ Account,
+ /// Cross-account scope.
+ Global,
+}
+
+/// Full logical identity of one scoped subscription declaration.
+///
+/// Thread-centric mental model (recommended):
+/// - `owner`: one thread view lifecycle token (for example one open thread pane)
+/// - `key`: the shareable thread remote stream identity, e.g. `replies-by-root(root_id)`
+/// - `scope`: whether that thread key is account-scoped or global (usually account-scoped)
+///
+/// If two thread views open the same root on the same account, they should use:
+/// - different `owner`
+/// - the same `key`
+/// - the same `scope = SubScope::Account`
+///
+/// The runtime then shares one live outbox subscription for that resolved `(scope, key)`.
+///
+/// `SubScope::Account` already partitions by account, so do not encode the account pubkey
+/// into the `key`.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct ScopedSubIdentity {
+ pub owner: SubOwnerKey,
+ pub key: SubKey,
+ pub scope: SubScope,
+}
+
+impl ScopedSubIdentity {
+ pub fn new(owner: SubOwnerKey, key: SubKey, scope: SubScope) -> Self {
+ Self { owner, key, scope }
+ }
+
+ pub fn account(owner: SubOwnerKey, key: SubKey) -> Self {
+ Self::new(owner, key, SubScope::Account)
+ }
+
+ pub fn global(owner: SubOwnerKey, key: SubKey) -> Self {
+ Self::new(owner, key, SubScope::Global)
+ }
+}
+
+/// Relay selection policy for a subscription.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum RelaySelection {
+ /// Resolve relay set from the currently selected account's read relays.
+ AccountsRead,
+ /// Use an explicit relay set.
+ Explicit(HashSet<NormRelayUrl>),
+}
+
+/// Realization config for one scoped subscription identity.
+///
+/// This is configuration only (`relays`, `filters`, transport mode). Identity is carried by
+/// [`ScopedSubIdentity`] (`owner + key + scope`).
+#[derive(Clone, Debug)]
+pub struct SubConfig {
+ pub relays: RelaySelection,
+ pub filters: Vec<Filter>,
+ pub use_transparent: bool,
+}
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+struct ScopedSubKey {
+ scope: ResolvedSubScope,
+ key: SubKey,
+}
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+enum ResolvedSubScope {
+ Account(Pubkey),
+ Global,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum SetSubLiveOp {
+ EnsurePresent,
+ ReplaceExisting,
+ ModifyExisting,
+ RemoveExisting,
+}
+
+/// Result of setting a desired subscription entry.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum SetSubResult {
+ Created,
+ Updated,
+}
+
+/// Result of ensuring a desired subscription entry exists without mutating it.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum EnsureSubResult {
+ Created,
+ AlreadyExists,
+}
+
+/// Result of clearing one `(slot, key)` ownership link.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum ClearSubResult {
+ Cleared,
+ StillInUse,
+ NotFound,
+}
+
+/// Result of dropping a whole slot.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum DropSlotResult {
+ Dropped,
+ NotFound,
+}
+
+/// Aggregate EOSE status for one live scoped subscription.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct ScopedSubLiveEoseStatus {
+ /// Number of relay legs currently tracking this request.
+ pub tracked_relays: usize,
+ /// Whether any tracked relay has reached EOSE.
+ pub any_eose: bool,
+ /// Whether all tracked relays have reached EOSE.
+ ///
+ /// This is false when `tracked_relays == 0`.
+ pub all_eosed: bool,
+}
+
+/// EOSE state for one owner-scoped logical subscription key.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum ScopedSubEoseStatus {
+ /// No owned scoped subscription exists for the requested `(owner, key, scope)`.
+ Missing,
+ /// Owned desired state exists, but no live outbox subscription is active.
+ ///
+ /// This occurs for empty-filter specs and for account-scoped subs while switched away.
+ Inactive,
+ /// Live outbox subscription exists; aggregate EOSE state is available.
+ Live(ScopedSubLiveEoseStatus),
+}
+
+/// Host-owned runtime for scoped subscription desired/live state and ownership.
+///
+/// The runtime never leaks outbox subscription ids to app code. Apps talk in
+/// terms of identity + config and the runtime handles lifecycles, relay
+/// mutations, and account-switch restore semantics.
+pub(crate) struct ScopedSubRuntime {
+ desired: HashMap<ScopedSubKey, SubConfig>,
+ live: HashMap<ScopedSubKey, OutboxSubId>,
+ owners_by_sub: HashMap<ScopedSubKey, HashSet<SubSlotId>>,
+ subs_by_slot: HashMap<SubSlotId, HashSet<ScopedSubKey>>,
+ next_slot_id: u64,
+}
+
+impl Default for ScopedSubRuntime {
+ fn default() -> Self {
+ Self {
+ desired: HashMap::default(),
+ live: HashMap::default(),
+ owners_by_sub: HashMap::default(),
+ subs_by_slot: HashMap::default(),
+ next_slot_id: 1,
+ }
+ }
+}
+
+impl ScopedSubRuntime {
+ fn scoped_key(scope: ResolvedSubScope, key: SubKey) -> ScopedSubKey {
+ ScopedSubKey { scope, key }
+ }
+
+ /// Create one owner slot for a UI lifecycle owner.
+ pub(crate) fn create_slot(&mut self) -> SubSlotId {
+ let slot = self.allocate_slot();
+ self.subs_by_slot.entry(slot).or_default();
+ slot
+ }
+
+ /// Internal upsert path using selected-account relay resolution.
+ pub(crate) fn set_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ slot: SubSlotId,
+ scope: SubScope,
+ key: SubKey,
+ config: SubConfig,
+ ) -> SetSubResult {
+ let account_read_relays = accounts.selected_account_read_relays();
+ let selected_account_pubkey = *accounts.selected_account_pubkey();
+ self.set_sub_with_relays(
+ pool,
+ &account_read_relays,
+ selected_account_pubkey,
+ slot,
+ scope,
+ key,
+ config,
+ )
+ }
+
+ /// Internal create-if-absent path using selected-account relay resolution.
+ pub(crate) fn ensure_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ slot: SubSlotId,
+ scope: SubScope,
+ key: SubKey,
+ config: SubConfig,
+ ) -> EnsureSubResult {
+ let account_read_relays = accounts.selected_account_read_relays();
+ let selected_account_pubkey = *accounts.selected_account_pubkey();
+ self.ensure_sub_with_relays(
+ pool,
+ &account_read_relays,
+ selected_account_pubkey,
+ slot,
+ scope,
+ key,
+ config,
+ )
+ }
+
+ /// Create desired state for one `(slot, key)` only if absent, with pre-resolved relays.
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn ensure_sub_with_relays(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ selected_account_pubkey: Pubkey,
+ slot: SubSlotId,
+ scope: SubScope,
+ key: SubKey,
+ mut config: SubConfig,
+ ) -> EnsureSubResult {
+ let resolved_scope = resolve_scope(&scope, selected_account_pubkey);
+ let scoped = Self::scoped_key(resolved_scope, key);
+
+ self.register_ownership(slot, &scoped);
+ if self.desired.contains_key(&scoped) {
+ return EnsureSubResult::AlreadyExists;
+ }
+
+ config.filters = normalize_filters(config.filters);
+ self.desired.insert(scoped.clone(), config.clone());
+ self.ensure_live_sub(pool, account_read_relays, scoped, &config);
+ EnsureSubResult::Created
+ }
+
+ /// Create-or-update desired state for one `(slot, key)` with pre-resolved relays.
+ ///
+ /// This is equivalent to [`Self::set_sub`] but avoids relay lookup from
+ /// `Accounts` when the caller already has the selected relay set.
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn set_sub_with_relays(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ selected_account_pubkey: Pubkey,
+ slot: SubSlotId,
+ scope: SubScope,
+ key: SubKey,
+ mut config: SubConfig,
+ ) -> SetSubResult {
+ let resolved_scope = resolve_scope(&scope, selected_account_pubkey);
+ let scoped = Self::scoped_key(resolved_scope, key);
+
+ self.register_ownership(slot, &scoped);
+
+ config.filters = normalize_filters(config.filters);
+ let previous = self.desired.insert(scoped.clone(), config.clone());
+ let op = plan_set_sub_live_op(previous.as_ref(), &config, self.live.contains_key(&scoped));
+
+ if previous.is_none() {
+ self.ensure_live_sub(pool, account_read_relays, scoped, &config);
+ return SetSubResult::Created;
+ }
+
+ match op {
+ SetSubLiveOp::EnsurePresent => {
+ self.ensure_live_sub(pool, account_read_relays, scoped, &config);
+ }
+ SetSubLiveOp::ReplaceExisting => {
+ self.replace_live_sub(pool, account_read_relays, &scoped, &config);
+ }
+ SetSubLiveOp::ModifyExisting => {
+ if let Some(id) = self.live.get(&scoped).copied() {
+ Self::modify_live_sub(pool, account_read_relays, id, &config);
+ }
+ }
+ SetSubLiveOp::RemoveExisting => {
+ self.remove_live_sub(pool, &scoped);
+ }
+ }
+
+ SetSubResult::Updated
+ }
+
+ /// Clear one `(slot, key)` ownership link.
+ pub(crate) fn clear_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ slot: SubSlotId,
+ key: SubKey,
+ scope: SubScope,
+ ) -> ClearSubResult {
+ let selected_account_pubkey = *accounts.selected_account_pubkey();
+ self.clear_sub_with_selected(pool, selected_account_pubkey, slot, key, scope)
+ }
+
+ /// Clear one `(slot, key)` with explicit selected account.
+ pub(crate) fn clear_sub_with_selected(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ selected_account_pubkey: Pubkey,
+ slot: SubSlotId,
+ key: SubKey,
+ scope: SubScope,
+ ) -> ClearSubResult {
+ let resolved_scope = resolve_scope(&scope, selected_account_pubkey);
+ let scoped = Self::scoped_key(resolved_scope, key);
+
+ let Some(slot_entries) = self.subs_by_slot.get_mut(&slot) else {
+ return ClearSubResult::NotFound;
+ };
+
+ if !slot_entries.remove(&scoped) {
+ return ClearSubResult::NotFound;
+ }
+
+ if slot_entries.is_empty() {
+ self.subs_by_slot.remove(&slot);
+ }
+
+ self.release_slot_from_scoped_sub(pool, slot, &scoped)
+ }
+
+ /// Query aggregate EOSE state for one `(slot, key)` using the selected account scope.
+ pub(crate) fn sub_eose_status(
+ &self,
+ pool: &Outbox<'_>,
+ accounts: &Accounts,
+ slot: SubSlotId,
+ key: SubKey,
+ scope: SubScope,
+ ) -> ScopedSubEoseStatus {
+ let selected_account_pubkey = *accounts.selected_account_pubkey();
+ self.sub_eose_status_with_selected(pool, selected_account_pubkey, slot, key, scope)
+ }
+
+ /// Query aggregate EOSE state for one `(slot, key)` using an explicit selected account.
+ pub(crate) fn sub_eose_status_with_selected(
+ &self,
+ pool: &Outbox<'_>,
+ selected_account_pubkey: Pubkey,
+ slot: SubSlotId,
+ key: SubKey,
+ scope: SubScope,
+ ) -> ScopedSubEoseStatus {
+ let resolved_scope = resolve_scope(&scope, selected_account_pubkey);
+ let scoped = Self::scoped_key(resolved_scope, key);
+
+ let Some(slot_entries) = self.subs_by_slot.get(&slot) else {
+ return ScopedSubEoseStatus::Missing;
+ };
+
+ if !slot_entries.contains(&scoped) {
+ return ScopedSubEoseStatus::Missing;
+ }
+
+ if let Some(live_id) = self.live.get(&scoped).copied() {
+ let relay_statuses = pool.outbox.status(&live_id);
+ return ScopedSubEoseStatus::Live(aggregate_eose_status(
+ relay_statuses.values().copied(),
+ ));
+ }
+
+ if self.desired.contains_key(&scoped) {
+ ScopedSubEoseStatus::Inactive
+ } else {
+ ScopedSubEoseStatus::Missing
+ }
+ }
+
+ /// Drop all ownership links attached to one slot.
+ pub(crate) fn drop_slot(&mut self, pool: &mut Outbox<'_>, slot: SubSlotId) -> DropSlotResult {
+ let Some(scoped_keys) = self.subs_by_slot.remove(&slot) else {
+ return DropSlotResult::NotFound;
+ };
+
+ for scoped in scoped_keys {
+ let _ = self.release_slot_from_scoped_sub(pool, slot, &scoped);
+ }
+
+ DropSlotResult::Dropped
+ }
+
+ /// Handle centralized account switching using host account relay resolution.
+ pub fn on_account_switched(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ old_pk: Pubkey,
+ new_pk: Pubkey,
+ accounts: &Accounts,
+ ) {
+ let new_account_read_relays = accounts.selected_account_read_relays();
+ self.on_account_switched_with_relays(pool, old_pk, new_pk, &new_account_read_relays);
+ }
+
+ /// Handle centralized account switching with pre-resolved new account relays.
+ pub(crate) fn on_account_switched_with_relays(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ old_pk: Pubkey,
+ new_pk: Pubkey,
+ new_account_read_relays: &HashSet<NormRelayUrl>,
+ ) {
+ if old_pk == new_pk {
+ return;
+ }
+
+ let old_scope = ResolvedSubScope::Account(old_pk);
+ let new_scope = ResolvedSubScope::Account(new_pk);
+
+ self.unsubscribe_scope(pool, &old_scope);
+
+ let new_desired_keys =
+ owned_desired_keys_for_scope(&self.desired, &self.owners_by_sub, &new_scope);
+
+ for scoped in new_desired_keys {
+ if self.live.contains_key(&scoped) {
+ continue;
+ }
+
+ let Some(spec) = self.desired.get(&scoped) else {
+ continue;
+ };
+
+ if let Some(live_id) = subscribe_live(pool, new_account_read_relays, spec) {
+ self.live.insert(scoped, live_id);
+ }
+ }
+ }
+
+ /// Retarget live subscriptions that depend on the selected account's read relay set.
+ ///
+ /// This updates all owned scoped subscriptions whose relay selection is
+ /// [`RelaySelection::AccountsRead`] and whose resolved scope is either:
+ /// - the currently selected account (`SubScope::Account` resolved), or
+ /// - global (`SubScope::Global`)
+ ///
+ /// This is used when the selected account's kind `10002` relay list changes
+ /// without switching accounts.
+ pub fn retarget_selected_account_read_relays(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ accounts: &Accounts,
+ ) {
+ let selected_account_pubkey = *accounts.selected_account_pubkey();
+ let account_read_relays = accounts.selected_account_read_relays();
+ self.retarget_selected_account_read_relays_with_relays(
+ pool,
+ selected_account_pubkey,
+ &account_read_relays,
+ );
+ }
+
+ /// Retarget selected-account-dependent live subscriptions with pre-resolved read relays.
+ pub(crate) fn retarget_selected_account_read_relays_with_relays(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ selected_account_pubkey: Pubkey,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ ) {
+ let account_scope = ResolvedSubScope::Account(selected_account_pubkey);
+ let scoped_keys: Vec<_> = self
+ .desired
+ .keys()
+ .filter(|scoped| {
+ (scoped.scope == account_scope || scoped.scope == ResolvedSubScope::Global)
+ && has_owners(&self.owners_by_sub, scoped)
+ })
+ .cloned()
+ .collect();
+
+ for scoped in scoped_keys {
+ let Some(spec) = self.desired.get(&scoped).cloned() else {
+ continue;
+ };
+
+ if !matches!(spec.relays, RelaySelection::AccountsRead) {
+ continue;
+ }
+
+ let has_live = self.live.get(&scoped).copied();
+
+ if spec.filters.is_empty() {
+ if has_live.is_some() {
+ self.remove_live_sub(pool, &scoped);
+ }
+ continue;
+ }
+
+ if let Some(live_id) = has_live {
+ pool.modify_relays(live_id, resolve_relays(account_read_relays, &spec.relays));
+ } else {
+ self.ensure_live_sub(pool, account_read_relays, scoped, &spec);
+ }
+ }
+ }
+
+ #[allow(dead_code)]
+ pub fn desired_len(&self) -> usize {
+ self.desired.len()
+ }
+
+ #[allow(dead_code)]
+ pub fn live_len(&self) -> usize {
+ self.live.len()
+ }
+
+ #[allow(dead_code)]
+ pub fn slot_len(&self) -> usize {
+ self.subs_by_slot.len()
+ }
+
+ fn register_ownership(&mut self, slot: SubSlotId, scoped: &ScopedSubKey) {
+ self.subs_by_slot
+ .entry(slot)
+ .or_default()
+ .insert(scoped.clone());
+ self.owners_by_sub
+ .entry(scoped.clone())
+ .or_default()
+ .insert(slot);
+ }
+
+ fn ensure_live_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ scoped: ScopedSubKey,
+ spec: &SubConfig,
+ ) {
+ if let Some(id) = subscribe_live(pool, account_read_relays, spec) {
+ self.live.insert(scoped, id);
+ }
+ }
+
+ fn replace_live_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ scoped: &ScopedSubKey,
+ spec: &SubConfig,
+ ) {
+ self.remove_live_sub(pool, scoped);
+ self.ensure_live_sub(pool, account_read_relays, scoped.clone(), spec);
+ }
+
+ fn modify_live_sub(
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ live_id: OutboxSubId,
+ spec: &SubConfig,
+ ) {
+ pool.modify_filters(live_id, spec.filters.clone());
+ pool.modify_relays(live_id, resolve_relays(account_read_relays, &spec.relays));
+ }
+
+ fn remove_live_sub(&mut self, pool: &mut Outbox<'_>, scoped: &ScopedSubKey) {
+ if let Some(live_id) = self.live.remove(scoped) {
+ pool.unsubscribe(live_id);
+ }
+ }
+
+ fn unsubscribe_scope(&mut self, pool: &mut Outbox<'_>, scope: &ResolvedSubScope) {
+ self.live.retain(|scoped, sub_id| {
+ if scoped.scope == *scope {
+ pool.unsubscribe(*sub_id);
+ false
+ } else {
+ true
+ }
+ });
+ }
+
+ fn release_slot_from_scoped_sub(
+ &mut self,
+ pool: &mut Outbox<'_>,
+ slot: SubSlotId,
+ scoped: &ScopedSubKey,
+ ) -> ClearSubResult {
+ let Some(owners) = self.owners_by_sub.get_mut(scoped) else {
+ return ClearSubResult::NotFound;
+ };
+
+ if !owners.remove(&slot) {
+ return ClearSubResult::NotFound;
+ }
+
+ if !owners.is_empty() {
+ return ClearSubResult::StillInUse;
+ }
+
+ self.owners_by_sub.remove(scoped);
+ self.desired.remove(scoped);
+ if let Some(sub_id) = self.live.remove(scoped) {
+ pool.unsubscribe(sub_id);
+ }
+
+ ClearSubResult::Cleared
+ }
+
+ fn allocate_slot(&mut self) -> SubSlotId {
+ loop {
+ if self.next_slot_id == 0 {
+ self.next_slot_id = 1;
+ }
+ let slot = SubSlotId(self.next_slot_id);
+ self.next_slot_id = self.next_slot_id.wrapping_add(1);
+ if !self.subs_by_slot.contains_key(&slot) {
+ return slot;
+ }
+ }
+ }
+}
+
+fn plan_set_sub_live_op(
+ previous: Option<&SubConfig>,
+ next: &SubConfig,
+ has_live: bool,
+) -> SetSubLiveOp {
+ let Some(previous) = previous else {
+ return SetSubLiveOp::EnsurePresent;
+ };
+
+ if !has_live {
+ return SetSubLiveOp::EnsurePresent;
+ }
+
+ if previous.use_transparent != next.use_transparent {
+ return SetSubLiveOp::ReplaceExisting;
+ }
+
+ if next.filters.is_empty() {
+ SetSubLiveOp::RemoveExisting
+ } else {
+ SetSubLiveOp::ModifyExisting
+ }
+}
+
+fn owned_desired_keys_for_scope(
+ desired: &HashMap<ScopedSubKey, SubConfig>,
+ owners_by_sub: &HashMap<ScopedSubKey, HashSet<SubSlotId>>,
+ scope: &ResolvedSubScope,
+) -> Vec<ScopedSubKey> {
+ desired
+ .keys()
+ .filter(|key| key.scope == *scope && has_owners(owners_by_sub, key))
+ .cloned()
+ .collect()
+}
+
+fn has_owners(
+ owners_by_sub: &HashMap<ScopedSubKey, HashSet<SubSlotId>>,
+ scoped: &ScopedSubKey,
+) -> bool {
+ owners_by_sub
+ .get(scoped)
+ .is_some_and(|owners| !owners.is_empty())
+}
+
+fn normalize_filters(filters: Vec<Filter>) -> Vec<Filter> {
+ filters
+ .into_iter()
+ .filter(|filter| filter.num_elements() != 0)
+ .collect()
+}
+
+fn resolve_scope(scope: &SubScope, selected_account_pubkey: Pubkey) -> ResolvedSubScope {
+ match scope {
+ SubScope::Account => ResolvedSubScope::Account(selected_account_pubkey),
+ SubScope::Global => ResolvedSubScope::Global,
+ }
+}
+
+fn resolve_relays(
+ account_read_relays: &HashSet<NormRelayUrl>,
+ selection: &RelaySelection,
+) -> HashSet<NormRelayUrl> {
+ match selection {
+ RelaySelection::AccountsRead => account_read_relays.clone(),
+ RelaySelection::Explicit(relays) => relays.clone(),
+ }
+}
+
+fn subscribe_live(
+ pool: &mut Outbox<'_>,
+ account_read_relays: &HashSet<NormRelayUrl>,
+ spec: &SubConfig,
+) -> Option<OutboxSubId> {
+ if spec.filters.is_empty() {
+ return None;
+ }
+
+ let relays = resolve_relays(account_read_relays, &spec.relays);
+ let mut relay_pkgs = RelayUrlPkgs::new(relays);
+ relay_pkgs.use_transparent = spec.use_transparent;
+ Some(pool.subscribe(spec.filters.clone(), relay_pkgs))
+}
+
+fn aggregate_eose_status(
+ relay_statuses: impl IntoIterator<Item = RelayReqStatus>,
+) -> ScopedSubLiveEoseStatus {
+ let mut tracked_relays = 0usize;
+ let mut any_eose = false;
+ let mut all_eosed = true;
+
+ for status in relay_statuses {
+ tracked_relays += 1;
+ if status == RelayReqStatus::Eose {
+ any_eose = true;
+ } else {
+ all_eosed = false;
+ }
+ }
+
+ if tracked_relays == 0 {
+ all_eosed = false;
+ }
+
+ ScopedSubLiveEoseStatus {
+ tracked_relays,
+ any_eose,
+ all_eosed,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::EguiWakeup;
+ use enostr::{OutboxPool, OutboxSessionHandler};
+ use std::hash::Hash;
+
+ #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+ enum FakeApp {
+ Timelines,
+ Threads,
+ Messages,
+ }
+
+ fn empty_config(_scope: SubScope) -> SubConfig {
+ SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: Vec::new(),
+ use_transparent: false,
+ }
+ }
+
+ fn live_config(scope: SubScope) -> SubConfig {
+ let mut config = empty_config(scope);
+ config.filters = vec![Filter::new().kinds(vec![1]).limit(5).build()];
+ config
+ }
+
+ fn relay_set(url: &str) -> HashSet<NormRelayUrl> {
+ let mut relays = HashSet::new();
+ relays.insert(NormRelayUrl::new(url).unwrap());
+ relays
+ }
+
+ fn account_pk(tag: u8) -> Pubkey {
+ Pubkey::new([tag; 32])
+ }
+
+ fn make_key(parts: impl Hash) -> SubKey {
+ SubKey::new(parts)
+ }
+
+ fn accountsread_spec(scope: SubScope, kind: u64, limit: u64) -> SubConfig {
+ let mut spec = empty_config(scope);
+ spec.filters = vec![Filter::new().kinds(vec![kind]).limit(limit).build()];
+ spec.relays = RelaySelection::AccountsRead;
+ spec
+ }
+
+ fn explicit_account_spec() -> SubConfig {
+ let explicit_relay = NormRelayUrl::new("wss://relay-explicit.example.com").unwrap();
+ let mut spec = empty_config(SubScope::Account);
+ spec.filters = vec![Filter::new().kinds(vec![10002]).limit(1).build()];
+ spec.relays = RelaySelection::Explicit({
+ let mut set = HashSet::new();
+ set.insert(explicit_relay);
+ set
+ });
+ spec
+ }
+
+ fn outbox<'a>(pool: &'a mut OutboxPool) -> Outbox<'a> {
+ OutboxSessionHandler::new(pool, EguiWakeup::new(egui::Context::default()))
+ }
+
+ fn slot_status(
+ runtime: &ScopedSubRuntime,
+ pool: &mut OutboxPool,
+ selected_account_pubkey: Pubkey,
+ slot: SubSlotId,
+ key: SubKey,
+ scope: SubScope,
+ ) -> ScopedSubEoseStatus {
+ let outbox = outbox(pool);
+ runtime.sub_eose_status_with_selected(&outbox, selected_account_pubkey, slot, key, scope)
+ }
+
+ /// Verifies repeated set_sub calls for the same key perform create-then-update semantics.
+ #[test]
+ fn set_sub_is_upsert_for_existing_key() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let key = SubKey::new(("messages", "dm-list", 7u8));
+ let scope = SubScope::Global;
+ let slot = runtime.create_slot();
+
+ let first = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ scope,
+ key,
+ empty_config(scope.clone()),
+ );
+ let second = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ scope,
+ key,
+ empty_config(scope),
+ );
+
+ assert!(matches!(first, SetSubResult::Created));
+ assert!(matches!(second, SetSubResult::Updated));
+ assert_eq!(runtime.desired_len(), 1);
+ assert_eq!(runtime.live_len(), 0);
+ assert_eq!(runtime.slot_len(), 1);
+ }
+
+ /// Verifies repeated ensure_sub calls for the same key are create-then-noop.
+ #[test]
+ fn ensure_sub_is_create_or_ignore_for_existing_key() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let key = SubKey::new(("messages", "dm-list", 9u8));
+ let slot = runtime.create_slot();
+
+ let first = runtime.ensure_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ empty_config(SubScope::Global),
+ );
+
+ let second = runtime.ensure_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ empty_config(SubScope::Global),
+ );
+
+ assert!(matches!(first, EnsureSubResult::Created));
+ assert!(matches!(second, EnsureSubResult::AlreadyExists));
+ assert_eq!(runtime.desired_len(), 1);
+ assert_eq!(runtime.live_len(), 0);
+ assert_eq!(runtime.slot_len(), 1);
+ }
+
+ /// Verifies ensure_sub does not mutate existing live filter state.
+ #[test]
+ fn ensure_sub_does_not_modify_existing_live_sub() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let key = SubKey::new(("timeline", "home", 1u8));
+ let slot = runtime.create_slot();
+
+ let mut initial = empty_config(SubScope::Global);
+ initial.filters = vec![Filter::new().kinds(vec![1]).limit(10).build()];
+
+ let created = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ initial,
+ );
+ assert!(matches!(created, SetSubResult::Created));
+
+ let scoped = ScopedSubRuntime::scoped_key(ResolvedSubScope::Global, key);
+ let live_id = runtime.live.get(&scoped).copied().expect("live sub id");
+ let before = pool
+ .filters(&live_id)
+ .expect("stored filters before ensure")
+ .iter()
+ .map(|f| f.json().expect("filter json"))
+ .collect::<Vec<_>>();
+
+ let mut replacement = empty_config(SubScope::Global);
+ replacement.filters = vec![Filter::new().kinds(vec![3]).limit(1).build()];
+ let ensured = runtime.ensure_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ replacement,
+ );
+ assert!(matches!(ensured, EnsureSubResult::AlreadyExists));
+
+ let after = pool
+ .filters(&live_id)
+ .expect("stored filters after ensure")
+ .iter()
+ .map(|f| f.json().expect("filter json"))
+ .collect::<Vec<_>>();
+ assert_eq!(before, after);
+ }
+
+ /// Verifies aggregate EOSE helper treats zero tracked relays as not fully EOSE'd.
+ #[test]
+ fn aggregate_eose_status_zero_tracked_relays_is_not_all_eosed() {
+ let status = aggregate_eose_status(std::iter::empty());
+ assert_eq!(
+ status,
+ ScopedSubLiveEoseStatus {
+ tracked_relays: 0,
+ any_eose: false,
+ all_eosed: false,
+ }
+ );
+ }
+
+ /// Verifies aggregate EOSE helper reports partial EOSE when relay legs are mixed.
+ #[test]
+ fn aggregate_eose_status_mixed_relays_reports_partial_eose() {
+ let status = aggregate_eose_status([
+ RelayReqStatus::InitialQuery,
+ RelayReqStatus::Eose,
+ RelayReqStatus::Closed,
+ ]);
+ assert_eq!(
+ status,
+ ScopedSubLiveEoseStatus {
+ tracked_relays: 3,
+ any_eose: true,
+ all_eosed: false,
+ }
+ );
+ }
+
+ /// Verifies aggregate EOSE helper reports fully EOSE'd only when all tracked relays are EOSE.
+ #[test]
+ fn aggregate_eose_status_all_relays_eose_reports_all_eosed() {
+ let status = aggregate_eose_status([RelayReqStatus::Eose, RelayReqStatus::Eose]);
+ assert_eq!(
+ status,
+ ScopedSubLiveEoseStatus {
+ tracked_relays: 2,
+ any_eose: true,
+ all_eosed: true,
+ }
+ );
+ }
+
+ /// Verifies EOSE status lookup returns Missing when the slot does not own the requested key.
+ #[test]
+ fn sub_eose_status_missing_when_slot_does_not_own_key() {
+ let runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let status = slot_status(
+ &runtime,
+ &mut pool,
+ account_pk(0x01),
+ SubSlotId(999),
+ make_key(("missing", 1u8)),
+ SubScope::Global,
+ );
+ assert_eq!(status, ScopedSubEoseStatus::Missing);
+ }
+
+ /// Verifies empty-filter desired state reports Inactive because no live outbox sub exists.
+ #[test]
+ fn sub_eose_status_inactive_for_desired_without_live_sub() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let slot = runtime.create_slot();
+ let key = make_key(("inactive", 1u8));
+ let selected = account_pk(0x01);
+
+ let _ = runtime.ensure_sub_with_relays(
+ &mut outbox(&mut pool),
+ &relays,
+ selected,
+ slot,
+ SubScope::Global,
+ key,
+ empty_config(SubScope::Global),
+ );
+
+ let status = slot_status(&runtime, &mut pool, selected, slot, key, SubScope::Global);
+ assert_eq!(status, ScopedSubEoseStatus::Inactive);
+ }
+
+ /// Verifies live subscriptions expose aggregate EOSE state without leaking outbox ids.
+ #[test]
+ fn sub_eose_status_live_reports_tracked_relays_and_eose_flags() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let slot = runtime.create_slot();
+ let key = make_key(("live", 1u8));
+ let selected = account_pk(0x01);
+
+ let _ = runtime.set_sub_with_relays(
+ &mut outbox(&mut pool),
+ &relays,
+ selected,
+ slot,
+ SubScope::Global,
+ key,
+ live_config(SubScope::Global),
+ );
+
+ let status = slot_status(&runtime, &mut pool, selected, slot, key, SubScope::Global);
+ let ScopedSubEoseStatus::Live(live) = status else {
+ panic!("expected live status, got {status:?}");
+ };
+
+ assert_eq!(live.tracked_relays, 1);
+ assert!(!live.any_eose);
+ assert!(!live.all_eosed);
+ }
+
+ /// Verifies account switch makes old account-scoped subs inactive and restores them on switch-back.
+ #[test]
+ fn account_scoped_sub_eose_status_transitions_inactive_and_restores_on_switch_back() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays_a = relay_set("wss://relay-a.example.com");
+ let relays_b = relay_set("wss://relay-b.example.com");
+ let account_a = account_pk(0x0A);
+ let account_b = account_pk(0x0B);
+ let slot = runtime.create_slot();
+ let key = make_key(("account-scoped", 1u8));
+
+ let _ = runtime.set_sub_with_relays(
+ &mut outbox(&mut pool),
+ &relays_a,
+ account_a,
+ slot,
+ SubScope::Account,
+ key,
+ live_config(SubScope::Account),
+ );
+
+ let before = slot_status(&runtime, &mut pool, account_a, slot, key, SubScope::Account);
+ assert!(matches!(before, ScopedSubEoseStatus::Live(_)));
+
+ runtime.on_account_switched_with_relays(
+ &mut outbox(&mut pool),
+ account_a,
+ account_b,
+ &relays_b,
+ );
+
+ let old_while_switched =
+ slot_status(&runtime, &mut pool, account_a, slot, key, SubScope::Account);
+ assert_eq!(old_while_switched, ScopedSubEoseStatus::Inactive);
+
+ let new_missing = slot_status(&runtime, &mut pool, account_b, slot, key, SubScope::Account);
+ assert_eq!(new_missing, ScopedSubEoseStatus::Missing);
+
+ runtime.on_account_switched_with_relays(
+ &mut outbox(&mut pool),
+ account_b,
+ account_a,
+ &relays_a,
+ );
+
+ let restored = slot_status(&runtime, &mut pool, account_a, slot, key, SubScope::Account);
+ assert!(matches!(restored, ScopedSubEoseStatus::Live(_)));
+ }
+
+ /// Verifies upsert updates a live subscription in place, and replaces it when transport mode changes.
+ #[test]
+ fn set_sub_upsert_modifies_live_sub() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let key = SubKey::new(("timeline", 1u64));
+ let scope = SubScope::Global;
+ let relays_a = relay_set("wss://relay-a.example.com");
+ let relays_b = relay_set("wss://relay-b.example.com");
+ let slot = runtime.create_slot();
+
+ let mut spec = empty_config(scope.clone());
+ spec.filters = vec![Filter::new().kinds(vec![1]).limit(2).build()];
+
+ let first = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_pk(0x01),
+ slot,
+ scope,
+ key,
+ spec.clone(),
+ );
+ assert!(matches!(first, SetSubResult::Created));
+
+ let scoped = ScopedSubRuntime::scoped_key(ResolvedSubScope::Global, key);
+ let live_id = runtime.live.get(&scoped).copied().expect("live sub id");
+ assert_eq!(pool.filters(&live_id).expect("stored filters").len(), 1);
+
+ let mut updated = spec.clone();
+ updated.filters = vec![Filter::new().kinds(vec![3]).limit(1).build()];
+
+ let res = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_b,
+ account_pk(0x01),
+ slot,
+ scope,
+ key,
+ updated.clone(),
+ );
+ assert!(matches!(res, SetSubResult::Updated));
+
+ assert_eq!(
+ pool.filters(&live_id)
+ .expect("updated filters should exist")
+ .len(),
+ 1
+ );
+
+ let mut transparent_update = updated;
+ transparent_update.use_transparent = true;
+
+ let res = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_b,
+ account_pk(0x01),
+ slot,
+ scope,
+ key,
+ transparent_update,
+ );
+ assert!(matches!(res, SetSubResult::Updated));
+
+ let new_live_id = runtime.live.get(&scoped).copied().expect("replacement id");
+ assert_ne!(live_id, new_live_id);
+ assert!(pool.filters(&live_id).is_none());
+ }
+
+ /// Verifies clearing the last owner unsubscribes the live outbox subscription and removes desired state.
+ #[test]
+ fn clear_sub_unsubscribes_live_subscription() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let key = SubKey::new(("timeline", 1u64));
+ let relays = relay_set("wss://relay-a.example.com");
+ let slot = runtime.create_slot();
+
+ let mut spec = empty_config(SubScope::Global);
+ spec.filters = vec![Filter::new().kinds(vec![1]).limit(2).build()];
+
+ runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ spec,
+ );
+
+ let scoped = ScopedSubRuntime::scoped_key(ResolvedSubScope::Global, key);
+ let live_id = runtime.live.get(&scoped).copied().expect("live sub id");
+
+ assert!(matches!(
+ runtime.clear_sub_with_selected(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ account_pk(0x01),
+ slot,
+ key,
+ SubScope::Global
+ ),
+ ClearSubResult::Cleared
+ ));
+
+ assert_eq!(runtime.desired_len(), 0);
+ assert_eq!(runtime.live_len(), 0);
+ assert_eq!(runtime.slot_len(), 0);
+ assert!(pool.filters(&live_id).is_none());
+
+ assert!(matches!(
+ runtime.clear_sub_with_selected(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ account_pk(0x01),
+ slot,
+ key,
+ SubScope::Global
+ ),
+ ClearSubResult::NotFound
+ ));
+ }
+
+ /// Verifies multiple owners share one live sub and only the final clear unsubscribes it.
+ #[test]
+ fn multiple_slots_share_single_live_sub_until_last_clear() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let account = account_pk(0x33);
+ let key = SubKey::new(("thread", [9u8; 32]));
+
+ let mut spec = empty_config(SubScope::Account);
+ spec.filters = vec![Filter::new().kinds(vec![1]).limit(25).build()];
+
+ let slot_a = runtime.create_slot();
+ let slot_b = runtime.create_slot();
+
+ let a = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account,
+ slot_a,
+ SubScope::Account,
+ key,
+ spec.clone(),
+ );
+ let b = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account,
+ slot_b,
+ SubScope::Account,
+ key,
+ spec,
+ );
+
+ assert!(matches!(a, SetSubResult::Created));
+ assert!(matches!(b, SetSubResult::Updated));
+
+ let scoped = ScopedSubRuntime::scoped_key(ResolvedSubScope::Account(account), key);
+ let live_id = runtime.live.get(&scoped).copied().expect("live sub id");
+ assert_eq!(runtime.desired_len(), 1);
+ assert_eq!(runtime.live_len(), 1);
+ assert_eq!(runtime.slot_len(), 2);
+ assert!(pool.filters(&live_id).is_some());
+
+ assert!(matches!(
+ runtime.clear_sub_with_selected(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ account,
+ slot_a,
+ key,
+ SubScope::Account
+ ),
+ ClearSubResult::StillInUse
+ ));
+
+ assert_eq!(runtime.desired_len(), 1);
+ assert_eq!(runtime.live_len(), 1);
+ assert_eq!(runtime.slot_len(), 1);
+ assert!(pool.filters(&live_id).is_some());
+
+ assert!(matches!(
+ runtime.clear_sub_with_selected(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ account,
+ slot_b,
+ key,
+ SubScope::Account
+ ),
+ ClearSubResult::Cleared
+ ));
+
+ assert_eq!(runtime.desired_len(), 0);
+ assert_eq!(runtime.live_len(), 0);
+ assert_eq!(runtime.slot_len(), 0);
+ assert!(pool.filters(&live_id).is_none());
+ }
+
+ /// Verifies dropping a slot clears every scoped sub owned by that slot.
+ #[test]
+ fn drop_slot_clears_all_owned_subs() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let account = account_pk(0x4A);
+ let relays = relay_set("wss://relay-a.example.com");
+ let slot = runtime.create_slot();
+
+ let key_account = SubKey::new(("timeline", "home"));
+ let key_global = SubKey::new(("global", "discovery"));
+
+ let mut account_spec = empty_config(SubScope::Account);
+ account_spec.filters = vec![Filter::new().kinds(vec![1]).limit(5).build()];
+
+ let mut global_spec = empty_config(SubScope::Global);
+ global_spec.filters = vec![Filter::new().kinds(vec![0]).limit(5).build()];
+
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account,
+ slot,
+ SubScope::Account,
+ key_account,
+ account_spec,
+ );
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account,
+ slot,
+ SubScope::Global,
+ key_global,
+ global_spec,
+ );
+
+ assert_eq!(runtime.desired_len(), 2);
+ assert_eq!(runtime.live_len(), 2);
+ assert_eq!(runtime.slot_len(), 1);
+
+ assert!(matches!(
+ runtime.drop_slot(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ slot
+ ),
+ DropSlotResult::Dropped
+ ));
+
+ assert_eq!(runtime.desired_len(), 0);
+ assert_eq!(runtime.live_len(), 0);
+ assert_eq!(runtime.slot_len(), 0);
+
+ assert!(matches!(
+ runtime.drop_slot(
+ &mut OutboxSessionHandler::new(
+ &mut pool,
+ EguiWakeup::new(egui::Context::default())
+ ),
+ slot
+ ),
+ DropSlotResult::NotFound
+ ));
+ }
+
+ /// Verifies account switch unsubscribes the old account scope and restores it when switching back.
+ #[test]
+ fn account_switch_unsubscribes_old_scope_and_restores_new_scope() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let account_a = account_pk(0xAA);
+ let account_b = account_pk(0xBB);
+ let relays_a = relay_set("wss://relay-a.example.com");
+ let relays_b = relay_set("wss://relay-b.example.com");
+ let key = SubKey::new(("timeline", "account-scoped"));
+ let slot = runtime.create_slot();
+
+ let mut scoped_spec = empty_config(SubScope::Account);
+ scoped_spec.filters = vec![Filter::new().kinds(vec![1]).limit(2).build()];
+
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_a,
+ slot,
+ SubScope::Account,
+ key,
+ scoped_spec,
+ );
+
+ let scoped_a = ScopedSubRuntime::scoped_key(ResolvedSubScope::Account(account_a), key);
+ let initial_live_id = runtime.live.get(&scoped_a).copied().expect("live id for A");
+ assert!(pool.filters(&initial_live_id).is_some());
+
+ runtime.on_account_switched_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ account_a,
+ account_b,
+ &relays_b,
+ );
+
+ assert!(runtime.live.get(&scoped_a).is_none());
+ assert!(pool.filters(&initial_live_id).is_none());
+ assert_eq!(runtime.desired_len(), 1);
+
+ runtime.on_account_switched_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ account_b,
+ account_a,
+ &relays_a,
+ );
+
+ let restored_live_id = runtime
+ .live
+ .get(&scoped_a)
+ .copied()
+ .expect("account A should be restored on switch back");
+ assert!(pool.filters(&restored_live_id).is_some());
+ }
+
+ /// Verifies account-scoped and global subscriptions obey the account-switch contract across app domains.
+ #[test]
+ fn account_switch_contract_with_multiple_apps_and_mixed_scopes() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let account_a = account_pk(0xA1);
+ let account_b = account_pk(0xB2);
+ let peer_pk = account_pk(0xCC);
+
+ let relays_a = relay_set("wss://relay-a.example.com");
+ let relays_b = relay_set("wss://relay-b.example.com");
+ let explicit_relay = NormRelayUrl::new("wss://relay-explicit.example.com").expect("relay");
+
+ let key_timeline_a = make_key((FakeApp::Timelines, "home", 1u64, account_a));
+ let key_thread_a = make_key((FakeApp::Threads, "root", [7u8; 32], account_a));
+ let key_messages_a = make_key((FakeApp::Messages, "dm-relay-list", peer_pk, account_a));
+ let key_global = make_key((FakeApp::Timelines, "global-discovery", 99u64));
+
+ let mut timeline_spec_a = empty_config(SubScope::Account);
+ timeline_spec_a.filters = vec![Filter::new().kinds(vec![1]).limit(50).build()];
+ timeline_spec_a.relays = RelaySelection::AccountsRead;
+
+ let mut thread_spec_a = empty_config(SubScope::Account);
+ thread_spec_a.filters = vec![Filter::new().kinds(vec![1]).limit(200).build()];
+ thread_spec_a.relays = RelaySelection::AccountsRead;
+ thread_spec_a.use_transparent = true;
+
+ let mut messages_spec_a = empty_config(SubScope::Account);
+ messages_spec_a.filters = vec![Filter::new().kinds(vec![10002]).limit(20).build()];
+ messages_spec_a.relays = RelaySelection::Explicit({
+ let mut set = HashSet::new();
+ set.insert(explicit_relay.clone());
+ set
+ });
+
+ let mut global_spec = empty_config(SubScope::Global);
+ global_spec.filters = vec![Filter::new().kinds(vec![0]).limit(10).build()];
+ global_spec.relays = RelaySelection::Explicit({
+ let mut set = HashSet::new();
+ set.insert(explicit_relay.clone());
+ set
+ });
+
+ let slot_timeline = runtime.create_slot();
+ let slot_thread = runtime.create_slot();
+ let slot_messages = runtime.create_slot();
+ let slot_global = runtime.create_slot();
+
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_a,
+ slot_timeline,
+ SubScope::Account,
+ key_timeline_a,
+ timeline_spec_a,
+ );
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_a,
+ slot_thread,
+ SubScope::Account,
+ key_thread_a,
+ thread_spec_a,
+ );
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_a,
+ slot_messages,
+ SubScope::Account,
+ key_messages_a,
+ messages_spec_a,
+ );
+ let _ = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays_a,
+ account_a,
+ slot_global,
+ SubScope::Global,
+ key_global,
+ global_spec,
+ );
+
+ let scoped_timeline_a =
+ ScopedSubRuntime::scoped_key(ResolvedSubScope::Account(account_a), key_timeline_a);
+ let scoped_thread_a =
+ ScopedSubRuntime::scoped_key(ResolvedSubScope::Account(account_a), key_thread_a);
+ let scoped_messages_a =
+ ScopedSubRuntime::scoped_key(ResolvedSubScope::Account(account_a), key_messages_a);
+ let scoped_global = ScopedSubRuntime::scoped_key(ResolvedSubScope::Global, key_global);
+
+ let timeline_id_a = runtime
+ .live
+ .get(&scoped_timeline_a)
+ .copied()
+ .expect("timeline A live");
+ let thread_id_a = runtime
+ .live
+ .get(&scoped_thread_a)
+ .copied()
+ .expect("thread A live");
+ let messages_id_a = runtime
+ .live
+ .get(&scoped_messages_a)
+ .copied()
+ .expect("messages A live");
+ let global_id = runtime
+ .live
+ .get(&scoped_global)
+ .copied()
+ .expect("global live");
+
+ assert!(pool.filters(&timeline_id_a).is_some());
+ assert!(pool.filters(&thread_id_a).is_some());
+ assert!(pool.filters(&messages_id_a).is_some());
+ assert!(pool.filters(&global_id).is_some());
+
+ runtime.on_account_switched_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ account_a,
+ account_b,
+ &relays_b,
+ );
+
+ assert!(
+ runtime.live.get(&scoped_timeline_a).is_none()
+ && runtime.live.get(&scoped_thread_a).is_none()
+ && runtime.live.get(&scoped_messages_a).is_none()
+ );
+ assert!(
+ pool.filters(&timeline_id_a).is_none()
+ && pool.filters(&thread_id_a).is_none()
+ && pool.filters(&messages_id_a).is_none()
+ );
+ assert!(runtime.live.get(&scoped_global).is_some() && pool.filters(&global_id).is_some());
+ assert_eq!(runtime.desired_len(), 4);
+
+ runtime.on_account_switched_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ account_b,
+ account_a,
+ &relays_a,
+ );
+
+ let restored_timeline_id = runtime
+ .live
+ .get(&scoped_timeline_a)
+ .copied()
+ .expect("timeline A restored");
+ let restored_thread_id = runtime
+ .live
+ .get(&scoped_thread_a)
+ .copied()
+ .expect("thread A restored");
+ let restored_messages_id = runtime
+ .live
+ .get(&scoped_messages_a)
+ .copied()
+ .expect("messages A restored");
+
+ assert!(pool.filters(&restored_timeline_id).is_some());
+ assert!(pool.filters(&restored_thread_id).is_some());
+ assert!(pool.filters(&restored_messages_id).is_some());
+ }
+
+ #[derive(Clone)]
+ struct SubmittedSub {
+ scoped: ScopedSubKey,
+ live_id: OutboxSubId,
+ }
+
+ // Scenario harness for selected-account read-relay retarget tests.
+ // Keep this narrow; it is intentionally not a generic scoped-subs fixture.
+ struct RetargetReadRelaysTest {
+ runtime: ScopedSubRuntime,
+ pool: OutboxPool,
+ selected_account: Pubkey,
+ other_account: Pubkey,
+ relay_a: HashSet<NormRelayUrl>,
+ relay_b: HashSet<NormRelayUrl>,
+ }
+
+ impl RetargetReadRelaysTest {
+ fn new() -> Self {
+ Self {
+ runtime: ScopedSubRuntime::default(),
+ pool: OutboxPool::default(),
+ selected_account: account_pk(0xA1),
+ other_account: account_pk(0xB2),
+ relay_a: relay_set("wss://relay-a.example.com"),
+ relay_b: relay_set("wss://relay-b.example.com"),
+ }
+ }
+
+ fn submit_accountsread_account_home(&mut self) -> SubmittedSub {
+ self.submit_sub(
+ SubScope::Account,
+ make_key((FakeApp::Timelines, "home", 1u64)),
+ accountsread_spec(SubScope::Account, 1, 50),
+ )
+ }
+
+ fn submit_accountsread_global_feed(&mut self) -> SubmittedSub {
+ self.submit_sub(
+ SubScope::Global,
+ make_key((FakeApp::Timelines, "global-ish", 2u64)),
+ accountsread_spec(SubScope::Global, 0, 10),
+ )
+ }
+
+ fn submit_account_explicit_messages(&mut self) -> SubmittedSub {
+ self.submit_sub(
+ SubScope::Account,
+ make_key((FakeApp::Messages, "explicit", 3u64)),
+ explicit_account_spec(),
+ )
+ }
+
+ fn submit_accountsread_other_account_home(&mut self) -> SubmittedSub {
+ self.submit_sub_for_account(
+ self.other_account,
+ SubScope::Account,
+ make_key((FakeApp::Timelines, "home", 99u64)),
+ accountsread_spec(SubScope::Account, 1, 25),
+ )
+ }
+
+ fn submit_sub(&mut self, scope: SubScope, key: SubKey, spec: SubConfig) -> SubmittedSub {
+ self.submit_sub_for_account(self.selected_account, scope, key, spec)
+ }
+
+ fn submit_sub_for_account(
+ &mut self,
+ account: Pubkey,
+ scope: SubScope,
+ key: SubKey,
+ spec: SubConfig,
+ ) -> SubmittedSub {
+ let slot = self.runtime.create_slot();
+ let _ = self.runtime.set_sub_with_relays(
+ &mut outbox(&mut self.pool),
+ &self.relay_a,
+ account,
+ slot,
+ scope,
+ key,
+ spec,
+ );
+
+ let resolved_scope = match scope {
+ SubScope::Account => ResolvedSubScope::Account(account),
+ SubScope::Global => ResolvedSubScope::Global,
+ };
+ let scoped = ScopedSubRuntime::scoped_key(resolved_scope, key);
+ let live_id = self.runtime.live.get(&scoped).copied().unwrap();
+
+ SubmittedSub { scoped, live_id }
+ }
+
+ fn retarget_to_relay_b(&mut self) {
+ self.runtime
+ .retarget_selected_account_read_relays_with_relays(
+ &mut outbox(&mut self.pool),
+ self.selected_account,
+ &self.relay_b,
+ );
+ }
+
+ fn assert_live_id_unchanged(&self, sub: &SubmittedSub) {
+ assert_eq!(self.runtime.live.get(&sub.scoped), Some(&sub.live_id));
+ }
+
+ fn assert_still_live(&self, sub: &SubmittedSub) {
+ assert!(self.pool.filters(&sub.live_id).is_some());
+ }
+
+ fn switch_selected_account_away(&mut self) {
+ self.runtime.on_account_switched_with_relays(
+ &mut outbox(&mut self.pool),
+ self.selected_account,
+ self.other_account,
+ &self.relay_b,
+ );
+ }
+
+ fn assert_not_live(&self, sub: &SubmittedSub) {
+ assert!(self.runtime.live.get(&sub.scoped).is_none());
+ assert!(self.pool.filters(&sub.live_id).is_none());
+ }
+
+ fn assert_live_recreated(&self, sub: &SubmittedSub) {
+ let recreated_live_id = self.runtime.live.get(&sub.scoped).copied().unwrap();
+ assert_ne!(recreated_live_id, sub.live_id);
+ assert!(self.pool.filters(&recreated_live_id).is_some());
+ assert!(self.pool.filters(&sub.live_id).is_none());
+ }
+ }
+
+ /// Verifies selected-account relay list refresh retargets all AccountsRead subs in scope.
+ #[test]
+ fn selected_account_relay_refresh_updates_account_and_global_accountsread_subs() {
+ let mut t = RetargetReadRelaysTest::new();
+
+ let account_home = t.submit_accountsread_account_home();
+ let global_feed = t.submit_accountsread_global_feed();
+ let explicit_messages = t.submit_account_explicit_messages();
+
+ t.retarget_to_relay_b();
+
+ t.assert_live_id_unchanged(&account_home);
+ t.assert_live_id_unchanged(&global_feed);
+ t.assert_live_id_unchanged(&explicit_messages);
+
+ t.assert_still_live(&account_home);
+ t.assert_still_live(&global_feed);
+ t.assert_still_live(&explicit_messages);
+ }
+
+ /// Verifies retargeting recreates a missing live AccountsRead sub from desired state.
+ #[test]
+ fn selected_account_relay_retarget_recreates_missing_live_sub() {
+ let mut t = RetargetReadRelaysTest::new();
+
+ let account_home = t.submit_accountsread_account_home();
+ t.switch_selected_account_away();
+ t.assert_not_live(&account_home);
+
+ t.retarget_to_relay_b();
+
+ t.assert_live_recreated(&account_home);
+ }
+
+ /// Verifies retargeting the selected account does not touch another account's account-scoped sub.
+ #[test]
+ fn selected_account_relay_retarget_ignores_other_account_scoped_subs() {
+ let mut t = RetargetReadRelaysTest::new();
+
+ let selected_account_home = t.submit_accountsread_account_home();
+ let other_account_home = t.submit_accountsread_other_account_home();
+
+ t.retarget_to_relay_b();
+
+ t.assert_live_id_unchanged(&selected_account_home);
+ t.assert_live_id_unchanged(&other_account_home);
+ t.assert_still_live(&selected_account_home);
+ t.assert_still_live(&other_account_home);
+ }
+
+ /// Verifies typed SubKey builder output is stable for identical inputs.
+ #[test]
+ fn subkey_builder_is_stable_and_typed() {
+ let key_a = SubKey::builder(FakeApp::Messages)
+ .with("dm-relay-list")
+ .with(account_pk(0x11))
+ .with(42u64)
+ .finish();
+ let key_b = SubKey::builder(FakeApp::Messages)
+ .with("dm-relay-list")
+ .with(account_pk(0x11))
+ .with(42u64)
+ .finish();
+ let key_c = SubKey::builder(FakeApp::Messages)
+ .with("dm-relay-list")
+ .with(account_pk(0x11))
+ .with(43u64)
+ .finish();
+
+ assert_eq!(key_a, key_b);
+ assert_ne!(key_a, key_c);
+ }
+
+ /// Verifies that upserting an empty filter set removes the active live subscription
+ /// while preserving desired state for future restoration.
+ #[test]
+ fn set_sub_with_empty_filters_removes_live_but_keeps_desired() {
+ let mut runtime = ScopedSubRuntime::default();
+ let mut pool = OutboxPool::default();
+ let relays = relay_set("wss://relay-a.example.com");
+ let key = SubKey::new(("messages", "dm-relay-list", 1u8));
+ let slot = runtime.create_slot();
+
+ let mut initial = empty_config(SubScope::Global);
+ initial.filters = vec![Filter::new().kinds(vec![10002]).limit(10).build()];
+
+ let created = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ initial,
+ );
+ assert!(matches!(created, SetSubResult::Created));
+
+ let scoped = ScopedSubRuntime::scoped_key(ResolvedSubScope::Global, key);
+ let live_id = runtime.live.get(&scoped).copied().expect("live sub id");
+ assert!(pool.filters(&live_id).is_some());
+
+ let emptied = runtime.set_sub_with_relays(
+ &mut OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default())),
+ &relays,
+ account_pk(0x01),
+ slot,
+ SubScope::Global,
+ key,
+ empty_config(SubScope::Global),
+ );
+ assert!(matches!(emptied, SetSubResult::Updated));
+ assert_eq!(runtime.desired_len(), 1);
+ assert_eq!(runtime.live_len(), 0);
+ assert!(pool.filters(&live_id).is_none());
+ }
+}
diff --git a/crates/notedeck/src/unknowns.rs b/crates/notedeck/src/unknowns.rs
@@ -1,7 +1,7 @@
use crate::{
note::NoteRef,
notecache::{CachedNote, NoteCache},
- Result,
+ OneshotApi, Result,
};
use enostr::{Filter, NoteId, Pubkey};
@@ -164,6 +164,7 @@ impl UnknownIds {
}
}
+ #[profiling::function]
pub fn update_from_note(
txn: &Transaction,
ndb: &Ndb,
@@ -384,14 +385,14 @@ fn get_unknown_ids_filter(ids: &[&UnknownId]) -> Option<Vec<Filter>> {
Some(filters)
}
-pub fn unknown_id_send(unknown_ids: &mut UnknownIds, pool: &mut enostr::RelayPool) {
+pub fn unknown_id_send(unknown_ids: &mut UnknownIds, oneshot: &mut OneshotApi<'_, '_>) {
tracing::debug!("unknown_id_send called on: {:?}", &unknown_ids);
let filter = unknown_ids.filter().expect("filter");
tracing::debug!(
"Getting {} unknown ids from relays",
unknown_ids.ids_iter().len()
);
- let msg = enostr::ClientMessage::req("unknownids".to_string(), filter);
+
+ oneshot.oneshot(filter);
unknown_ids.clear();
- pool.send(&msg);
}
diff --git a/crates/notedeck/src/zaps/cache.rs b/crates/notedeck/src/zaps/cache.rs
@@ -276,6 +276,7 @@ impl Zaps {
states.push(*id);
}
+ #[profiling::function]
pub fn process(
&mut self,
accounts: &mut Accounts,
diff --git a/crates/notedeck_chrome/src/android.rs b/crates/notedeck_chrome/src/android.rs
@@ -66,13 +66,18 @@ pub async fn android_main(android_app: AndroidApp) {
Box::new(move |cc| {
let ctx = &cc.egui_ctx;
- let mut notedeck = Notedeck::new(ctx, path, &app_args);
- notedeck.set_android_context(android_app);
- notedeck.setup(ctx);
- let chrome = Chrome::new_with_apps(cc, &app_args, &mut notedeck)?;
- notedeck.set_app(chrome);
-
- Ok(Box::new(notedeck))
+ let mut notedeck_ctx = Notedeck::init(ctx, path, &app_args);
+ notedeck_ctx.notedeck.set_android_context(android_app);
+ notedeck_ctx.notedeck.setup(ctx);
+ let chrome = Chrome::new_with_apps(
+ cc,
+ &app_args,
+ &mut notedeck_ctx.notedeck,
+ notedeck_ctx.outbox_session,
+ )?;
+ notedeck_ctx.notedeck.set_app(chrome);
+
+ Ok(Box::new(notedeck_ctx.notedeck))
}),
);
}
diff --git a/crates/notedeck_chrome/src/chrome.rs b/crates/notedeck_chrome/src/chrome.rs
@@ -13,6 +13,7 @@ use egui_extras::{Size, StripBuilder};
use egui_nav::RouteResponse;
use egui_nav::{NavAction, NavDrawer};
use nostrdb::{ProfileRecord, Transaction};
+use notedeck::enostr::OutboxSession;
use notedeck::fonts::get_font_size;
use notedeck::name::get_display_name;
use notedeck::ui::is_compiled_as_mobile;
@@ -151,15 +152,16 @@ impl Chrome {
cc: &CreationContext,
app_args: &[String],
notedeck: &mut Notedeck,
+ outbox_session: OutboxSession,
) -> Result<Self, Error> {
stop_debug_mode(notedeck.options());
- let context = &mut notedeck.app_context();
+ let notedeck_ref = &mut notedeck.notedeck_ref(&cc.egui_ctx, Some(outbox_session));
let dave = Dave::new(
cc.wgpu_render_state.as_ref(),
- context.ndb.clone(),
+ notedeck_ref.app_ctx.ndb.clone(),
cc.egui_ctx.clone(),
- context.path,
+ notedeck_ref.app_ctx.path,
);
#[cfg(feature = "wasm")]
let wasm_dir = context
@@ -169,8 +171,10 @@ impl Chrome {
let mut chrome = Chrome::default();
if !app_args.iter().any(|arg| arg == "--no-columns-app") {
- let columns = Damus::new(context, app_args);
- notedeck.check_args(columns.unrecognized_args())?;
+ let columns = Damus::new(&mut notedeck_ref.app_ctx, app_args);
+ notedeck_ref
+ .internals
+ .check_args(columns.unrecognized_args())?;
chrome.add_app(NotedeckApp::Columns(Box::new(columns)));
}
@@ -570,7 +574,7 @@ fn chrome_handle_app_action(
&mut columns.timeline_cache,
&mut columns.threads,
ctx.note_cache,
- ctx.pool,
+ &mut ctx.remote,
&txn,
ctx.unknown_ids,
ctx.accounts,
@@ -627,7 +631,7 @@ fn columns_route_to_profile(
&mut columns.timeline_cache,
&mut columns.threads,
ctx.note_cache,
- ctx.pool,
+ &mut ctx.remote,
&txn,
ctx.unknown_ids,
ctx.accounts,
diff --git a/crates/notedeck_chrome/src/notedeck.rs b/crates/notedeck_chrome/src/notedeck.rs
@@ -87,12 +87,17 @@ async fn main() {
let args: Vec<String> = std::env::args().collect();
let ctx = &cc.egui_ctx;
- let mut notedeck = Notedeck::new(ctx, base_path, &args);
- notedeck.setup(ctx);
- let chrome = Chrome::new_with_apps(cc, &args, &mut notedeck)?;
- notedeck.set_app(chrome);
-
- Ok(Box::new(notedeck))
+ let mut notedeck_ctx = Notedeck::init(ctx, base_path, &args);
+ notedeck_ctx.notedeck.setup(ctx);
+ let chrome = Chrome::new_with_apps(
+ cc,
+ &args,
+ &mut notedeck_ctx.notedeck,
+ notedeck_ctx.outbox_session,
+ )?;
+ notedeck_ctx.notedeck.set_app(chrome);
+
+ Ok(Box::new(notedeck_ctx.notedeck))
}),
);
}
@@ -155,7 +160,7 @@ mod tests {
.collect();
let ctx = egui::Context::default();
- let _app = Notedeck::new(&ctx, &datapath, &args);
+ let _app = Notedeck::init(&ctx, &datapath, &args);
assert!(Path::new(&dbpath.join("data.mdb")).exists());
assert!(Path::new(&dbpath.join("lock.mdb")).exists());
@@ -184,8 +189,8 @@ mod tests {
.collect();
let ctx = egui::Context::default();
- let mut notedeck = Notedeck::new(&ctx, &tmpdir, &args);
- let mut app_ctx = notedeck.app_context();
+ let mut notedeck_ctx = Notedeck::init(&ctx, &tmpdir, &args);
+ let mut app_ctx = notedeck_ctx.notedeck.app_context(&ctx);
let app = Damus::new(&mut app_ctx, &args);
assert_eq!(app.columns(app_ctx.accounts).columns().len(), 2);
@@ -233,12 +238,12 @@ mod tests {
.collect();
let ctx = egui::Context::default();
- let mut notedeck = Notedeck::new(&ctx, &tmpdir, &args);
- let mut app_ctx = notedeck.app_context();
- let app = Damus::new(&mut app_ctx, &args);
+ let mut notedeck_ctx = Notedeck::init(&ctx, &tmpdir, &args);
+ let app = Damus::new(&mut notedeck_ctx.notedeck.app_context(&ctx), &args);
// ensure we recognized all the arguments
- let completely_unrecognized: Vec<String> = notedeck
+ let completely_unrecognized: Vec<String> = notedeck_ctx
+ .notedeck
.unrecognized_args()
.intersection(app.unrecognized_args())
.cloned()
diff --git a/crates/notedeck_clndash/src/ui.rs b/crates/notedeck_clndash/src/ui.rs
@@ -48,7 +48,6 @@ pub fn note_hover_ui(
img_cache: ctx.img_cache,
note_cache: ctx.note_cache,
zaps: ctx.zaps,
- pool: ctx.pool,
jobs: ctx.media_jobs.sender(),
unknown_ids: ctx.unknown_ids,
nip05_cache: ctx.nip05_cache,
diff --git a/crates/notedeck_columns/src/accounts/mod.rs b/crates/notedeck_columns/src/accounts/mod.rs
@@ -7,9 +7,9 @@ use notedeck_ui::nip51_set::Nip51SetUiCache;
pub use crate::accounts::route::AccountsResponse;
use crate::app::get_active_columns_mut;
use crate::decks::DecksCache;
-use crate::onboarding::Onboarding;
+use crate::onboarding::{Onboarding, OnboardingEffect};
use crate::profile::{send_default_dms_relay_list, send_new_contact_list};
-use crate::subscriptions::Subscriptions;
+use crate::scoped_sub_owner_keys::onboarding_owner_key;
use crate::ui::onboarding::{FollowPackOnboardingView, FollowPacksResponse, OnboardingResponse};
use crate::{
login_manager::AcquireKeyState,
@@ -152,7 +152,6 @@ pub fn process_accounts_view_response(
pub fn process_login_view_response(
app_ctx: &mut AppContext,
decks: &mut DecksCache,
- subs: &mut Subscriptions,
onboarding: &mut Onboarding,
col: usize,
response: AccountLoginResponse,
@@ -168,14 +167,13 @@ pub fn process_login_view_response(
}
AccountLoginResponse::CreatingNew => {
cur_router.route_to(Route::Accounts(AccountsRoute::Onboarding));
-
- onboarding.process(app_ctx.pool, app_ctx.ndb, subs, app_ctx.unknown_ids);
+ process_onboarding_step(app_ctx, onboarding, col);
None
}
AccountLoginResponse::Onboarding(onboarding_response) => match onboarding_response {
FollowPacksResponse::NoFollowPacks => {
- onboarding.process(app_ctx.pool, app_ctx.ndb, subs, app_ctx.unknown_ids);
+ process_onboarding_step(app_ctx, onboarding, col);
None
}
FollowPacksResponse::UserSelectedPacks(nip51_sets_ui_state) => {
@@ -183,10 +181,20 @@ pub fn process_login_view_response(
let kp = FullKeypair::generate();
- send_new_contact_list(kp.to_filled(), app_ctx.ndb, app_ctx.pool, pks_to_follow);
- send_default_dms_relay_list(kp.to_filled(), app_ctx.ndb, app_ctx.pool);
+ {
+ let mut publisher = app_ctx.remote.publisher(app_ctx.accounts);
+ send_new_contact_list(
+ kp.to_filled(),
+ app_ctx.ndb,
+ &mut publisher,
+ pks_to_follow,
+ );
+ send_default_dms_relay_list(kp.to_filled(), app_ctx.ndb, &mut publisher);
+ }
cur_router.go_back();
- onboarding.end_onboarding(app_ctx.pool, app_ctx.ndb);
+ onboarding.end_onboarding(app_ctx.ndb);
+ let mut scoped_subs = app_ctx.remote.scoped_subs(app_ctx.accounts);
+ let _ = scoped_subs.drop_owner(onboarding_owner_key(col));
app_ctx.accounts.add_account(kp.to_keypair())
}
@@ -210,6 +218,19 @@ pub fn process_login_view_response(
}
}
+fn process_onboarding_step(app_ctx: &mut AppContext, onboarding: &mut Onboarding, col: usize) {
+ let owner = onboarding_owner_key(col);
+ let effect = {
+ let mut scoped_subs = app_ctx.remote.scoped_subs(app_ctx.accounts);
+ onboarding.process(&mut scoped_subs, owner, app_ctx.ndb, app_ctx.unknown_ids)
+ };
+
+ if let Some(OnboardingEffect::Oneshot(filters)) = effect {
+ let mut oneshot = app_ctx.remote.oneshot(app_ctx.accounts);
+ oneshot.oneshot(filters);
+ }
+}
+
impl AccountsRouteResponse {
pub fn process(
self,
@@ -235,7 +256,6 @@ impl AccountsRouteResponse {
let action = process_login_view_response(
app_ctx,
&mut app.decks_cache,
- &mut app.subscriptions,
&mut app.onboarding,
col,
response,
diff --git a/crates/notedeck_columns/src/actionbar.rs b/crates/notedeck_columns/src/actionbar.rs
@@ -12,13 +12,14 @@ use crate::{
};
use egui_nav::Percent;
-use enostr::{FilledKeypair, NoteId, Pubkey, RelayPool};
+use enostr::{FilledKeypair, NoteId, Pubkey};
use nostrdb::{IngestMetadata, Ndb, NoteBuilder, NoteKey, Transaction};
use notedeck::{
get_wallet_for, is_future_timestamp,
note::{reaction_sent_id, ReactAction, ZapTargetAmount},
unix_time_secs, Accounts, GlobalWallet, Images, MediaJobSender, NoteAction, NoteCache,
- NoteZapTargetOwned, UnknownIds, ZapAction, ZapTarget, ZappingError, Zaps,
+ NoteZapTargetOwned, PublishApi, RelayType, RemoteApi, UnknownIds, ZapAction, ZapTarget,
+ ZappingError, Zaps,
};
use notedeck_ui::media::MediaViewerFlags;
use tracing::error;
@@ -51,7 +52,7 @@ fn execute_note_action(
timeline_cache: &mut TimelineCache,
threads: &mut Threads,
note_cache: &mut NoteCache,
- pool: &mut RelayPool,
+ remote: &mut RemoteApi<'_>,
txn: &Transaction,
accounts: &mut Accounts,
global_wallet: &mut GlobalWallet,
@@ -100,7 +101,10 @@ fn execute_note_action(
}
NoteAction::React(react_action) => {
if let Some(filled) = accounts.selected_filled() {
- if let Err(err) = send_reaction_event(ndb, txn, pool, filled, &react_action) {
+ let mut publisher = remote.publisher(&*accounts);
+ if let Err(err) =
+ send_reaction_event(ndb, txn, &mut publisher, filled, &react_action)
+ {
tracing::error!("Failed to send reaction: {err}");
}
ui.ctx().data_mut(|d| {
@@ -116,8 +120,17 @@ fn execute_note_action(
NoteAction::Profile(pubkey) => {
let kind = TimelineKind::Profile(pubkey);
router_action = Some(RouterAction::route_to(Route::Timeline(kind.clone())));
+ let mut scoped_subs = remote.scoped_subs(accounts);
timeline_res = timeline_cache
- .open(ndb, note_cache, txn, pool, &kind, false)
+ .open(
+ ndb,
+ note_cache,
+ txn,
+ &mut scoped_subs,
+ &kind,
+ *accounts.selected_account_pubkey(),
+ false,
+ )
.map(NotesOpenResult::Timeline);
}
NoteAction::Note {
@@ -130,12 +143,12 @@ fn execute_note_action(
tracing::error!("No thread selection for {}?", hex::encode(note_id.bytes()));
break 'ex;
};
-
+ let mut scoped_subs = remote.scoped_subs(accounts);
timeline_res = threads
.open(
ndb,
txn,
- pool,
+ &mut scoped_subs,
&thread_selection,
preview,
col,
@@ -153,8 +166,17 @@ fn execute_note_action(
NoteAction::Hashtag(htag) => {
let kind = TimelineKind::Hashtag(vec![htag.clone()]);
router_action = Some(RouterAction::route_to(Route::Timeline(kind.clone())));
+ let mut scoped_subs = remote.scoped_subs(&*accounts);
timeline_res = timeline_cache
- .open(ndb, note_cache, txn, pool, &kind, false)
+ .open(
+ ndb,
+ note_cache,
+ txn,
+ &mut scoped_subs,
+ &kind,
+ *accounts.selected_account_pubkey(),
+ false,
+ )
.map(NotesOpenResult::Timeline);
}
NoteAction::Repost(note_id) => {
@@ -191,7 +213,7 @@ fn execute_note_action(
send_zap(
&sender,
zaps,
- pool,
+ accounts,
target,
wallet.default_zap.get_default_zap_msats(),
)
@@ -221,7 +243,7 @@ fn execute_note_action(
} else {
context
.action
- .process_selection(ui, ¬e, ndb, pool, txn, accounts);
+ .process_selection(ui, ¬e, ndb, remote, txn, accounts);
}
}
},
@@ -255,7 +277,7 @@ pub fn execute_and_process_note_action(
timeline_cache: &mut TimelineCache,
threads: &mut Threads,
note_cache: &mut NoteCache,
- pool: &mut RelayPool,
+ remote: &mut RemoteApi<'_>,
txn: &Transaction,
unknown_ids: &mut UnknownIds,
accounts: &mut Accounts,
@@ -282,7 +304,7 @@ pub fn execute_and_process_note_action(
timeline_cache,
threads,
note_cache,
- pool,
+ remote,
txn,
accounts,
global_wallet,
@@ -312,7 +334,7 @@ pub fn execute_and_process_note_action(
fn send_reaction_event(
ndb: &mut Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
kp: FilledKeypair<'_>,
reaction: &ReactAction,
) -> Result<(), String> {
@@ -374,7 +396,7 @@ fn send_reaction_event(
let _ = ndb.process_event_with(&json, IngestMetadata::new().client(true));
- pool.send(event);
+ publisher.publish_note(¬e, RelayType::AccountsWrite);
Ok(())
}
@@ -400,7 +422,7 @@ fn find_addressable_d_tag(note: &nostrdb::Note<'_>) -> Option<String> {
fn send_zap(
sender: &Pubkey,
zaps: &mut Zaps,
- pool: &RelayPool,
+ accounts: &Accounts,
target_amount: &ZapTargetAmount,
default_msats: u64,
) {
@@ -408,7 +430,14 @@ fn send_zap(
let msats = target_amount.specified_msats.unwrap_or(default_msats);
- let sender_relays: Vec<String> = pool.relays.iter().map(|r| r.url().to_string()).collect();
+ let sender_relays: Vec<String> = accounts
+ .selected_account_write_relays()
+ .into_iter()
+ .filter_map(|r| match r {
+ enostr::RelayId::Websocket(norm_relay_url) => Some(norm_relay_url.to_string()),
+ enostr::RelayId::Multicast => None,
+ })
+ .collect();
zaps.send_zap(sender.bytes(), sender_relays, zap_target, msats);
}
diff --git a/crates/notedeck_columns/src/app.rs b/crates/notedeck_columns/src/app.rs
@@ -8,7 +8,6 @@ use crate::{
options::AppOptions,
route::Route,
storage,
- subscriptions::{SubKind, Subscriptions},
support::Support,
timeline::{self, kind::ListKind, thread::Threads, TimelineCache, TimelineKind},
timeline_loader::{TimelineLoader, TimelineLoaderMsg},
@@ -18,12 +17,12 @@ use crate::{
Result,
};
use egui_extras::{Size, StripBuilder};
-use enostr::{ClientMessage, Pubkey, RelayEvent, RelayMessage};
+use enostr::Pubkey;
use nostrdb::Transaction;
use notedeck::{
- tr, try_process_events_core, ui::is_compiled_as_mobile, ui::is_narrow, Accounts, AppAction,
- AppContext, AppResponse, DataPath, DataPathType, FilterState, Images, Localization,
- MediaJobSender, NotedeckOptions, SettingsHandler,
+ tr, ui::is_compiled_as_mobile, ui::is_narrow, Accounts, AppAction, AppContext, AppResponse,
+ DataPath, DataPathType, FilterState, Images, Localization, MediaJobSender, NotedeckOptions,
+ SettingsHandler,
};
use notedeck_ui::{
media::{MediaViewer, MediaViewerFlags, MediaViewerState},
@@ -32,7 +31,6 @@ use notedeck_ui::{
use std::collections::{BTreeSet, HashMap, HashSet};
use std::path::Path;
use tracing::{error, info, warn};
-use uuid::Uuid;
/// Max timeline loader messages to process per frame to avoid UI stalls.
const MAX_TIMELINE_LOADER_MSGS_PER_FRAME: usize = 8;
@@ -51,7 +49,6 @@ pub struct Damus {
pub view_state: ViewState,
pub drafts: Drafts,
pub timeline_cache: TimelineCache,
- pub subscriptions: Subscriptions,
pub support: Support,
pub threads: Threads,
/// Background loader for initial timeline scans.
@@ -77,6 +74,7 @@ pub struct Damus {
hovered_column: Option<usize>,
}
+#[profiling::function]
fn handle_egui_events(
input: &egui::InputState,
columns: &mut Columns,
@@ -184,26 +182,29 @@ fn try_process_event(
)
});
- try_process_events_core(app_ctx, ctx, |app_ctx, ev| match (&ev.event).into() {
- RelayEvent::Opened => {
- timeline::send_initial_timeline_filters(
- damus.options.contains(AppOptions::SinceOptimize),
- &mut damus.timeline_cache,
- &mut damus.subscriptions,
- app_ctx.pool,
- &ev.relay,
- app_ctx.accounts,
- );
+ let selected_account_pk = *app_ctx.accounts.selected_account_pubkey();
+ for (kind, timeline) in &mut damus.timeline_cache {
+ if timeline.subscription.dependers(&selected_account_pk) == 0 {
+ continue;
}
- RelayEvent::Message(msg) => {
- process_message(damus, app_ctx, &ev.relay, &msg);
+
+ if let FilterState::Ready(filter) = &timeline.filter {
+ if timeline.kind.should_subscribe_locally()
+ && timeline
+ .subscription
+ .get_local(&selected_account_pk)
+ .is_none()
+ {
+ timeline
+ .subscription
+ .try_add_local(selected_account_pk, app_ctx.ndb, filter);
+ }
}
- _ => {}
- });
- for (kind, timeline) in &mut damus.timeline_cache {
- let is_ready =
- timeline::is_timeline_ready(app_ctx.ndb, app_ctx.pool, timeline, app_ctx.accounts);
+ let is_ready = {
+ let mut scoped_subs = app_ctx.remote.scoped_subs(app_ctx.accounts);
+ timeline::is_timeline_ready(app_ctx.ndb, &mut scoped_subs, timeline, app_ctx.accounts)
+ };
if is_ready {
schedule_timeline_load(
@@ -213,12 +214,14 @@ fn try_process_event(
app_ctx.ndb,
kind,
timeline,
+ app_ctx.accounts.selected_account_pubkey(),
);
let txn = Transaction::new(app_ctx.ndb).expect("txn");
// only thread timelines are reversed
let reversed = false;
if let Err(err) = timeline.poll_notes_into_view(
+ &selected_account_pk,
app_ctx.ndb,
&txn,
app_ctx.unknown_ids,
@@ -234,33 +237,22 @@ fn try_process_event(
| TimelineKind::Algo(timeline::kind::AlgoTimeline::LastPerPubkey(
ListKind::Contact(_),
)) => {
- timeline::fetch_contact_list(
- &mut damus.subscriptions,
- timeline,
- app_ctx.accounts,
- );
+ timeline::fetch_contact_list(timeline, app_ctx.accounts);
}
- TimelineKind::List(ListKind::PeopleList(plr))
+ TimelineKind::List(ListKind::PeopleList(_))
| TimelineKind::Algo(timeline::kind::AlgoTimeline::LastPerPubkey(
- ListKind::PeopleList(plr),
+ ListKind::PeopleList(_),
)) => {
- let plr = plr.clone();
- for relay in &mut app_ctx.pool.relays {
- timeline::fetch_people_list(
- &mut damus.subscriptions,
- relay,
- timeline,
- &plr,
- );
- }
+ let txn = Transaction::new(app_ctx.ndb).expect("txn");
+ timeline::fetch_people_list(app_ctx.ndb, &txn, timeline);
}
_ => {}
}
}
- }
- if let Some(follow_packs) = damus.onboarding.get_follow_packs_mut() {
- follow_packs.poll_for_notes(app_ctx.ndb, app_ctx.unknown_ids);
+ if let Some(follow_packs) = damus.onboarding.get_follow_packs_mut() {
+ follow_packs.poll_for_notes(app_ctx.ndb, app_ctx.unknown_ids);
+ }
}
Ok(())
@@ -274,17 +266,20 @@ fn schedule_timeline_load(
ndb: &nostrdb::Ndb,
kind: &TimelineKind,
timeline: &mut timeline::Timeline,
+ account_pk: &Pubkey,
) {
if loaded.contains(kind) || inflight.contains(kind) {
return;
}
- let Some(filter) = timeline.filter.get_any_ready().cloned() else {
+ let FilterState::Ready(filter) = timeline.filter.clone() else {
return;
};
if timeline.kind.should_subscribe_locally() {
- timeline.subscription.try_add_local(ndb, &filter);
+ timeline
+ .subscription
+ .try_add_local(*account_pk, ndb, &filter);
}
loader.load_timeline(kind.clone());
@@ -343,10 +338,7 @@ fn update_damus(damus: &mut Damus, app_ctx: &mut AppContext<'_>, ctx: &egui::Con
match damus.state {
DamusState::Initializing => {
damus.state = DamusState::Initialized;
- // this lets our eose handler know to close unknownids right away
- damus
- .subscriptions()
- .insert("unknownids".to_string(), SubKind::OneShot);
+ setup_selected_account_timeline_subs(&mut damus.timeline_cache, app_ctx);
if !app_ctx.settings.welcome_completed() {
let split =
@@ -375,109 +367,18 @@ fn update_damus(damus: &mut Damus, app_ctx: &mut AppContext<'_>, ctx: &egui::Con
}
}
-fn handle_eose(
- subscriptions: &Subscriptions,
+pub(crate) fn setup_selected_account_timeline_subs(
timeline_cache: &mut TimelineCache,
- ctx: &mut AppContext<'_>,
- subid: &str,
- relay_url: &str,
-) -> Result<()> {
- let sub_kind = if let Some(sub_kind) = subscriptions.subs.get(subid) {
- sub_kind
- } else {
- let n_subids = subscriptions.subs.len();
- warn!(
- "got unknown eose subid {}, {} tracked subscriptions",
- subid, n_subids
- );
- return Ok(());
- };
-
- match sub_kind {
- SubKind::Timeline(_) => {
- // eose on timeline? whatevs
- }
- SubKind::Initial => {
- //let txn = Transaction::new(ctx.ndb)?;
- //unknowns::update_from_columns(
- // &txn,
- // ctx.unknown_ids,
- // timeline_cache,
- // ctx.ndb,
- // ctx.note_cache,
- //);
- //// this is possible if this is the first time
- //if ctx.unknown_ids.ready_to_send() {
- // unknown_id_send(ctx.unknown_ids, ctx.pool);
- //}
- }
-
- // oneshot subs just close when they're done
- SubKind::OneShot => {
- let msg = ClientMessage::close(subid.to_string());
- ctx.pool.send_to(&msg, relay_url);
- }
-
- SubKind::FetchingContactList(timeline_uid) => {
- let timeline = if let Some(tl) = timeline_cache.get_mut(timeline_uid) {
- tl
- } else {
- error!(
- "timeline uid:{:?} not found for FetchingContactList",
- timeline_uid
- );
- return Ok(());
- };
-
- let filter_state = timeline.filter.get_mut(relay_url);
-
- let FilterState::FetchingRemote(fetching_remote_type) = filter_state else {
- // TODO: we could have multiple contact list results, we need
- // to check to see if this one is newer and use that instead
- warn!(
- "Expected timeline to have FetchingRemote state but was {:?}",
- timeline.filter
- );
- return Ok(());
- };
-
- let new_filter_state = match fetching_remote_type {
- notedeck::filter::FetchingRemoteType::Normal(unified_subscription) => {
- FilterState::got_remote(unified_subscription.local)
- }
- notedeck::filter::FetchingRemoteType::Contact => {
- FilterState::GotRemote(notedeck::filter::GotRemoteType::Contact)
- }
- notedeck::filter::FetchingRemoteType::PeopleList => {
- FilterState::GotRemote(notedeck::filter::GotRemoteType::PeopleList)
- }
- };
-
- // We take the subscription id and pass it to the new state of
- // "GotRemote". This will let future frames know that it can try
- // to look for the contact list in nostrdb.
- timeline
- .filter
- .set_relay_state(relay_url.to_string(), new_filter_state);
- }
- }
-
- Ok(())
-}
-
-fn process_message(damus: &mut Damus, ctx: &mut AppContext<'_>, relay: &str, msg: &RelayMessage) {
- let RelayMessage::Eose(sid) = msg else {
- return;
- };
-
- if let Err(err) = handle_eose(
- &damus.subscriptions,
- &mut damus.timeline_cache,
- ctx,
- sid,
- relay,
+ app_ctx: &mut AppContext<'_>,
+) {
+ if let Err(err) = timeline::setup_initial_nostrdb_subs(
+ app_ctx.ndb,
+ app_ctx.note_cache,
+ timeline_cache,
+ app_ctx.unknown_ids,
+ *app_ctx.accounts.selected_account_pubkey(),
) {
- error!("error handling eose: {}", err);
+ warn!("update_damus init: {err}");
}
}
@@ -586,13 +487,15 @@ impl Damus {
let txn = Transaction::new(app_context.ndb).unwrap();
for col in &parsed_args.columns {
let timeline_kind = col.clone().into_timeline_kind();
+ let mut scoped_subs = app_context.remote.scoped_subs(app_context.accounts);
if let Some(add_result) = columns.add_new_timeline_column(
&mut timeline_cache,
&txn,
app_context.ndb,
app_context.note_cache,
- app_context.pool,
+ &mut scoped_subs,
&timeline_kind,
+ *app_context.accounts.selected_account_pubkey(),
) {
add_result.process(
app_context.ndb,
@@ -629,7 +532,6 @@ impl Damus {
let threads = Threads::default();
Self {
- subscriptions: Subscriptions::default(),
timeline_cache,
drafts: Drafts::default(),
state: DamusState::Initializing,
@@ -663,14 +565,6 @@ impl Damus {
get_active_columns(accounts, &self.decks_cache)
}
- pub fn gen_subid(&self, kind: &SubKind) -> String {
- if self.options.contains(AppOptions::Debug) {
- format!("{kind:?}")
- } else {
- Uuid::new_v4().to_string()
- }
- }
-
pub fn mock<P: AsRef<Path>>(data_path: P) -> Self {
let mut i18n = Localization::default();
let decks_cache = DecksCache::default_decks_cache(&mut i18n);
@@ -683,7 +577,6 @@ impl Damus {
let support = Support::new(&path);
Self {
- subscriptions: Subscriptions::default(),
timeline_cache: TimelineCache::default(),
drafts: Drafts::default(),
state: DamusState::Initializing,
@@ -703,10 +596,6 @@ impl Damus {
}
}
- pub fn subscriptions(&mut self) -> &mut HashMap<String, SubKind> {
- &mut self.subscriptions.subs
- }
-
pub fn unrecognized_args(&self) -> &BTreeSet<String> {
&self.unrecognized_args
}
@@ -840,13 +729,10 @@ fn render_damus_mobile(
let kp = enostr::Keypair::only_pubkey(pubkey);
let _ = app_ctx.accounts.add_account(kp);
- let txn = nostrdb::Transaction::new(app_ctx.ndb).expect("txn");
- app_ctx.accounts.select_account(
- &pubkey,
- app_ctx.ndb,
- &txn,
- app_ctx.pool,
- ui.ctx(),
+ app_ctx.select_account(&pubkey);
+ setup_selected_account_timeline_subs(
+ &mut app.timeline_cache,
+ app_ctx,
);
}
@@ -1022,7 +908,7 @@ fn timelines_view(
ctx.img_cache,
ctx.media_jobs.sender(),
current_route.as_ref(),
- ctx.pool,
+ ctx.remote.relay_inspect(),
)
.show(ui);
@@ -1096,14 +982,7 @@ fn timelines_view(
// StripBuilder rendering
let mut save_cols = false;
if let Some(action) = side_panel_action {
- save_cols = save_cols
- || action.process(
- &mut app.timeline_cache,
- &mut app.decks_cache,
- ctx,
- &mut app.subscriptions,
- ui.ctx(),
- );
+ save_cols = save_cols || action.process(&mut app.timeline_cache, &mut app.decks_cache, ctx);
}
let mut app_action: Option<AppAction> = None;
@@ -1124,9 +1003,8 @@ fn timelines_view(
let kp = enostr::Keypair::only_pubkey(pubkey);
let _ = ctx.accounts.add_account(kp);
- let txn = nostrdb::Transaction::new(ctx.ndb).expect("txn");
- ctx.accounts
- .select_account(&pubkey, ctx.ndb, &txn, ctx.pool, ui.ctx());
+ ctx.select_account(&pubkey);
+ setup_selected_account_timeline_subs(&mut app.timeline_cache, ctx);
}
ProcessNavResult::ExternalNoteAction(note_action) => {
diff --git a/crates/notedeck_columns/src/column.rs b/crates/notedeck_columns/src/column.rs
@@ -3,9 +3,9 @@ use crate::{
route::{ColumnsRouter, Route, SingletonRouter},
timeline::{Timeline, TimelineCache, TimelineKind},
};
-use enostr::RelayPool;
+use enostr::Pubkey;
use nostrdb::{Ndb, Transaction};
-use notedeck::NoteCache;
+use notedeck::{NoteCache, ScopedSubApi};
use std::iter::Iterator;
use tracing::warn;
@@ -101,18 +101,20 @@ impl Columns {
SelectionResult::NewSelection(selected_index)
}
+ #[allow(clippy::too_many_arguments)]
pub fn add_new_timeline_column(
&mut self,
timeline_cache: &mut TimelineCache,
txn: &Transaction,
ndb: &Ndb,
note_cache: &mut NoteCache,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
kind: &TimelineKind,
+ account_pk: Pubkey,
) -> Option<TimelineOpenResult> {
self.columns
.push(Column::new(vec![Route::timeline(kind.to_owned())]));
- timeline_cache.open(ndb, note_cache, txn, pool, kind, false)
+ timeline_cache.open(ndb, note_cache, txn, scoped_subs, kind, account_pk, false)
}
pub fn new_column_picker(&mut self) {
@@ -124,15 +126,15 @@ impl Columns {
pub fn insert_intermediary_routes(
&mut self,
timeline_cache: &mut TimelineCache,
+ account_pk: Pubkey,
intermediary_routes: Vec<IntermediaryRoute>,
) {
let routes = intermediary_routes
.into_iter()
.map(|r| match r {
- IntermediaryRoute::Timeline(mut timeline) => {
+ IntermediaryRoute::Timeline(timeline) => {
let route = Route::timeline(timeline.kind.clone());
- timeline.subscription.increment();
- timeline_cache.insert(timeline.kind.clone(), *timeline);
+ timeline_cache.insert(timeline.kind.clone(), account_pk, *timeline);
route
}
IntermediaryRoute::Route(route) => route,
diff --git a/crates/notedeck_columns/src/decks.rs b/crates/notedeck_columns/src/decks.rs
@@ -1,8 +1,8 @@
use std::collections::{hash_map::ValuesMut, HashMap};
-use enostr::{Pubkey, RelayPool};
+use enostr::Pubkey;
use nostrdb::Transaction;
-use notedeck::{tr, AppContext, Localization, FALLBACK_PUBKEY};
+use notedeck::{tr, AppContext, Localization, ScopedSubApi, FALLBACK_PUBKEY};
use tracing::{error, info};
use crate::{
@@ -171,14 +171,14 @@ impl DecksCache {
key: &Pubkey,
timeline_cache: &mut TimelineCache,
ndb: &mut nostrdb::Ndb,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) {
let Some(decks) = self.account_to_decks.remove(key) else {
return;
};
info!("Removing decks for {:?}", key);
- decks.unsubscribe_all(timeline_cache, ndb, pool);
+ decks.unsubscribe_all(timeline_cache, ndb, scoped_subs);
if !self.account_to_decks.contains_key(&self.fallback_pubkey) {
self.account_to_decks
@@ -294,13 +294,13 @@ impl Decks {
index: usize,
timeline_cache: &mut TimelineCache,
ndb: &mut nostrdb::Ndb,
- pool: &mut enostr::RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) {
let Some(deck) = self.remove_deck_internal(index) else {
return;
};
- delete_deck(deck, timeline_cache, ndb, pool);
+ delete_deck(deck, timeline_cache, ndb, scoped_subs);
}
fn remove_deck_internal(&mut self, index: usize) -> Option<Deck> {
@@ -357,10 +357,10 @@ impl Decks {
self,
timeline_cache: &mut TimelineCache,
ndb: &mut nostrdb::Ndb,
- pool: &mut enostr::RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) {
for deck in self.decks {
- delete_deck(deck, timeline_cache, ndb, pool);
+ delete_deck(deck, timeline_cache, ndb, scoped_subs);
}
}
}
@@ -369,7 +369,7 @@ fn delete_deck(
mut deck: Deck,
timeline_cache: &mut TimelineCache,
ndb: &mut nostrdb::Ndb,
- pool: &mut enostr::RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) {
let cols = deck.columns_mut();
let num_cols = cols.num_columns();
@@ -377,7 +377,7 @@ fn delete_deck(
let kinds_to_pop = cols.delete_column(i);
for kind in &kinds_to_pop {
- if let Err(err) = timeline_cache.pop(kind, ndb, pool) {
+ if let Err(err) = timeline_cache.pop(kind, ndb, scoped_subs) {
error!("error popping timeline: {err}");
}
}
@@ -456,13 +456,15 @@ pub fn add_demo_columns(
let txn = Transaction::new(ctx.ndb).unwrap();
for kind in &timeline_kinds {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
if let Some(results) = columns.add_new_timeline_column(
timeline_cache,
&txn,
ctx.ndb,
ctx.note_cache,
- ctx.pool,
+ &mut scoped_subs,
kind,
+ pubkey,
) {
results.process(
ctx.ndb,
diff --git a/crates/notedeck_columns/src/lib.rs b/crates/notedeck_columns/src/lib.rs
@@ -15,7 +15,6 @@ mod draft;
mod key_parsing;
pub mod login_manager;
mod media_upload;
-mod multi_subscriber;
mod nav;
mod onboarding;
pub mod options;
@@ -23,8 +22,8 @@ mod post;
mod profile;
mod repost;
mod route;
+mod scoped_sub_owner_keys;
mod search;
-mod subscriptions;
mod support;
mod test_data;
pub mod timeline;
diff --git a/crates/notedeck_columns/src/multi_subscriber.rs b/crates/notedeck_columns/src/multi_subscriber.rs
@@ -1,574 +0,0 @@
-use egui_nav::ReturnType;
-use enostr::{Filter, NoteId, RelayPool};
-use hashbrown::HashMap;
-use nostrdb::{Ndb, Subscription};
-use notedeck::{filter::HybridFilter, UnifiedSubscription};
-use uuid::Uuid;
-
-use crate::{subscriptions, timeline::ThreadSelection};
-
-type RootNoteId = NoteId;
-
-#[derive(Default)]
-pub struct ThreadSubs {
- pub remotes: HashMap<RootNoteId, Remote>,
- scopes: HashMap<MetaId, Vec<Scope>>,
-}
-
-// column id
-type MetaId = usize;
-
-pub struct Remote {
- pub filter: Vec<Filter>,
- subid: String,
- dependers: usize,
-}
-
-impl std::fmt::Debug for Remote {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("Remote")
- .field("subid", &self.subid)
- .field("dependers", &self.dependers)
- .finish()
- }
-}
-
-struct Scope {
- pub root_id: NoteId,
- stack: Vec<Sub>,
-}
-
-pub struct Sub {
- pub selected_id: NoteId,
- pub sub: Subscription,
- pub filter: Vec<Filter>,
-}
-
-impl ThreadSubs {
- #[allow(clippy::too_many_arguments)]
- pub fn subscribe(
- &mut self,
- ndb: &mut Ndb,
- pool: &mut RelayPool,
- meta_id: usize,
- id: &ThreadSelection,
- local_sub_filter: Vec<Filter>,
- new_scope: bool,
- remote_sub_filter: impl FnOnce() -> Vec<Filter>,
- ) {
- let cur_scopes = self.scopes.entry(meta_id).or_default();
-
- let new_subs = if new_scope || cur_scopes.is_empty() {
- local_sub_new_scope(ndb, id, local_sub_filter, cur_scopes)
- } else {
- let cur_scope = cur_scopes.last_mut().expect("can't be empty");
- sub_current_scope(ndb, id, local_sub_filter, cur_scope)
- };
-
- let remote = match self.remotes.raw_entry_mut().from_key(&id.root_id.bytes()) {
- hashbrown::hash_map::RawEntryMut::Occupied(entry) => entry.into_mut(),
- hashbrown::hash_map::RawEntryMut::Vacant(entry) => {
- let (_, res) = entry.insert(
- NoteId::new(*id.root_id.bytes()),
- sub_remote(pool, remote_sub_filter, id),
- );
-
- res
- }
- };
-
- remote.dependers = remote.dependers.saturating_add_signed(new_subs);
- let num_dependers = remote.dependers;
- tracing::debug!(
- "Sub stats: num remotes: {}, num locals: {}, num remote dependers: {:?}",
- self.remotes.len(),
- self.scopes.len(),
- num_dependers,
- );
- }
-
- pub fn unsubscribe(
- &mut self,
- ndb: &mut Ndb,
- pool: &mut RelayPool,
- meta_id: usize,
- id: &ThreadSelection,
- return_type: ReturnType,
- ) {
- let Some(scopes) = self.scopes.get_mut(&meta_id) else {
- return;
- };
-
- let Some(remote) = self.remotes.get_mut(&id.root_id.bytes()) else {
- tracing::error!("somehow we're unsubscribing but we don't have a remote");
- return;
- };
-
- match return_type {
- ReturnType::Drag => {
- if let Some(scope) = scopes.last_mut() {
- let Some(cur_sub) = scope.stack.pop() else {
- tracing::error!("expected a scope to be left");
- return;
- };
-
- if scope.root_id.bytes() != id.root_id.bytes() {
- tracing::error!(
- "Somehow the current scope's root is not equal to the selected note's root. scope's root: {:?}, thread's root: {:?}",
- scope.root_id.hex(),
- id.root_id.bytes()
- );
- }
-
- if ndb_unsub(ndb, cur_sub.sub, id) {
- remote.dependers = remote.dependers.saturating_sub(1);
- }
-
- if scope.stack.is_empty() {
- scopes.pop();
- }
- }
- }
- ReturnType::Click => {
- let Some(scope) = scopes.pop() else {
- tracing::error!("called unsubscribe but there aren't any scopes left");
- return;
- };
-
- if scope.root_id.bytes() != id.root_id.bytes() {
- tracing::error!(
- "Somehow the current scope's root is not equal to the selected note's root. scope's root: {:?}, thread's root: {:?}",
- scope.root_id.hex(),
- id.root_id.bytes()
- );
- }
- for sub in scope.stack {
- if ndb_unsub(ndb, sub.sub, id) {
- remote.dependers = remote.dependers.saturating_sub(1);
- }
- }
- }
- }
-
- if scopes.is_empty() {
- self.scopes.remove(&meta_id);
- }
-
- let num_dependers = remote.dependers;
-
- if remote.dependers == 0 {
- let remote = self
- .remotes
- .remove(&id.root_id.bytes())
- .expect("code above should guarentee existence");
- tracing::debug!("Remotely unsubscribed: {}", remote.subid);
- pool.unsubscribe(remote.subid);
- }
-
- tracing::debug!(
- "unsub stats: num remotes: {}, num locals: {}, num remote dependers: {:?}",
- self.remotes.len(),
- self.scopes.len(),
- num_dependers,
- );
- }
-
- pub fn get_local(&self, meta_id: usize) -> Option<&Sub> {
- self.scopes
- .get(&meta_id)
- .as_ref()
- .and_then(|s| s.last())
- .and_then(|s| s.stack.last())
- }
-}
-
-fn sub_current_scope(
- ndb: &mut Ndb,
- selection: &ThreadSelection,
- local_sub_filter: Vec<Filter>,
- cur_scope: &mut Scope,
-) -> isize {
- let mut new_subs = 0;
-
- if selection.root_id.bytes() != cur_scope.root_id.bytes() {
- tracing::error!(
- "Somehow the current scope's root is not equal to the selected note's root"
- );
- }
-
- if let Some(sub) = ndb_sub(ndb, &local_sub_filter, selection) {
- cur_scope.stack.push(Sub {
- selected_id: NoteId::new(*selection.selected_or_root()),
- sub,
- filter: local_sub_filter,
- });
- new_subs += 1;
- }
-
- new_subs
-}
-
-fn ndb_sub(ndb: &Ndb, filter: &[Filter], id: impl std::fmt::Debug) -> Option<Subscription> {
- match ndb.subscribe(filter) {
- Ok(s) => Some(s),
- Err(e) => {
- tracing::error!("Failed to get subscription for {:?}: {e}", id);
- None
- }
- }
-}
-
-fn ndb_unsub(ndb: &mut Ndb, sub: Subscription, id: impl std::fmt::Debug) -> bool {
- match ndb.unsubscribe(sub) {
- Ok(_) => true,
- Err(e) => {
- tracing::error!("Failed to unsub {:?}: {e}", id);
- false
- }
- }
-}
-
-fn sub_remote(
- pool: &mut RelayPool,
- remote_sub_filter: impl FnOnce() -> Vec<Filter>,
- id: impl std::fmt::Debug,
-) -> Remote {
- let subid = Uuid::new_v4().to_string();
-
- let filter = remote_sub_filter();
-
- let remote = Remote {
- filter: filter.clone(),
- subid: subid.clone(),
- dependers: 0,
- };
-
- tracing::debug!("Remote subscribe for {:?}", id);
-
- pool.subscribe(subid, filter);
-
- remote
-}
-
-fn local_sub_new_scope(
- ndb: &mut Ndb,
- id: &ThreadSelection,
- local_sub_filter: Vec<Filter>,
- scopes: &mut Vec<Scope>,
-) -> isize {
- let Some(sub) = ndb_sub(ndb, &local_sub_filter, id) else {
- return 0;
- };
-
- scopes.push(Scope {
- root_id: id.root_id.to_note_id(),
- stack: vec![Sub {
- selected_id: NoteId::new(*id.selected_or_root()),
- sub,
- filter: local_sub_filter,
- }],
- });
-
- 1
-}
-
-#[derive(Debug)]
-pub struct TimelineSub {
- filter: Option<HybridFilter>,
- state: SubState,
-}
-
-#[derive(Debug, Clone)]
-enum SubState {
- NoSub {
- dependers: usize,
- },
- LocalOnly {
- local: Subscription,
- dependers: usize,
- },
- RemoteOnly {
- remote: String,
- dependers: usize,
- },
- Unified {
- unified: UnifiedSubscription,
- dependers: usize,
- },
-}
-
-impl Default for TimelineSub {
- fn default() -> Self {
- Self {
- state: SubState::NoSub { dependers: 0 },
- filter: None,
- }
- }
-}
-
-impl TimelineSub {
- /// Reset the subscription state, properly unsubscribing from ndb and
- /// relay pool before clearing.
- ///
- /// Used when the contact list changes and we need to rebuild the
- /// timeline with a new filter. Preserves the depender count so that
- /// shared subscription reference counting remains correct.
- pub fn reset(&mut self, ndb: &mut Ndb, pool: &mut RelayPool) {
- let before = self.state.clone();
-
- let dependers = match &self.state {
- SubState::NoSub { dependers } => *dependers,
-
- SubState::LocalOnly { local, dependers } => {
- if let Err(e) = ndb.unsubscribe(*local) {
- tracing::error!("TimelineSub::reset: failed to unsubscribe from ndb: {e}");
- }
- *dependers
- }
-
- SubState::RemoteOnly { remote, dependers } => {
- pool.unsubscribe(remote.to_owned());
- *dependers
- }
-
- SubState::Unified { unified, dependers } => {
- pool.unsubscribe(unified.remote.to_owned());
- if let Err(e) = ndb.unsubscribe(unified.local) {
- tracing::error!("TimelineSub::reset: failed to unsubscribe from ndb: {e}");
- }
- *dependers
- }
- };
-
- self.state = SubState::NoSub { dependers };
- self.filter = None;
-
- tracing::debug!("TimelineSub::reset: {:?} => {:?}", before, self.state);
- }
-
- pub fn try_add_local(&mut self, ndb: &Ndb, filter: &HybridFilter) {
- let before = self.state.clone();
- match &mut self.state {
- SubState::NoSub { dependers } => {
- let Some(sub) = ndb_sub(ndb, &filter.local().combined(), "") else {
- return;
- };
-
- self.filter = Some(filter.to_owned());
- self.state = SubState::LocalOnly {
- local: sub,
- dependers: *dependers,
- }
- }
- SubState::LocalOnly {
- local: _,
- dependers: _,
- } => {}
- SubState::RemoteOnly { remote, dependers } => {
- let Some(local) = ndb_sub(ndb, &filter.local().combined(), "") else {
- return;
- };
- self.state = SubState::Unified {
- unified: UnifiedSubscription {
- local,
- remote: remote.to_owned(),
- },
- dependers: *dependers,
- };
- }
- SubState::Unified {
- unified: _,
- dependers: _,
- } => {}
- }
- tracing::debug!(
- "TimelineSub::try_add_local: {:?} => {:?}",
- before,
- self.state
- );
- }
-
- pub fn force_add_remote(&mut self, subid: String) {
- let before = self.state.clone();
- match &mut self.state {
- SubState::NoSub { dependers } => {
- self.state = SubState::RemoteOnly {
- remote: subid,
- dependers: *dependers,
- }
- }
- SubState::LocalOnly { local, dependers } => {
- self.state = SubState::Unified {
- unified: UnifiedSubscription {
- local: *local,
- remote: subid,
- },
- dependers: *dependers,
- }
- }
- SubState::RemoteOnly {
- remote: _,
- dependers: _,
- } => {}
- SubState::Unified {
- unified: _,
- dependers: _,
- } => {}
- }
- tracing::debug!(
- "TimelineSub::force_add_remote: {:?} => {:?}",
- before,
- self.state
- );
- }
-
- pub fn try_add_remote(&mut self, pool: &mut RelayPool, filter: &HybridFilter) {
- let before = self.state.clone();
- match &mut self.state {
- SubState::NoSub { dependers } => {
- let subid = subscriptions::new_sub_id();
- pool.subscribe(subid.clone(), filter.remote().to_vec());
- self.filter = Some(filter.to_owned());
- self.state = SubState::RemoteOnly {
- remote: subid,
- dependers: *dependers,
- };
- }
- SubState::LocalOnly { local, dependers } => {
- let subid = subscriptions::new_sub_id();
- pool.subscribe(subid.clone(), filter.remote().to_vec());
- self.filter = Some(filter.to_owned());
- self.state = SubState::Unified {
- unified: UnifiedSubscription {
- local: *local,
- remote: subid,
- },
- dependers: *dependers,
- }
- }
- SubState::RemoteOnly {
- remote: _,
- dependers: _,
- } => {}
- SubState::Unified {
- unified: _,
- dependers: _,
- } => {}
- }
- tracing::debug!(
- "TimelineSub::try_add_remote: {:?} => {:?}",
- before,
- self.state
- );
- }
-
- pub fn increment(&mut self) {
- let before = self.state.clone();
- match &mut self.state {
- SubState::NoSub { dependers } => {
- *dependers += 1;
- }
- SubState::LocalOnly {
- local: _,
- dependers,
- } => {
- *dependers += 1;
- }
- SubState::RemoteOnly {
- remote: _,
- dependers,
- } => {
- *dependers += 1;
- }
- SubState::Unified {
- unified: _,
- dependers,
- } => {
- *dependers += 1;
- }
- }
-
- tracing::debug!("TimelineSub::increment: {:?} => {:?}", before, self.state);
- }
-
- pub fn get_local(&self) -> Option<Subscription> {
- match &self.state {
- SubState::NoSub { dependers: _ } => None,
- SubState::LocalOnly {
- local,
- dependers: _,
- } => Some(*local),
- SubState::RemoteOnly {
- remote: _,
- dependers: _,
- } => None,
- SubState::Unified {
- unified,
- dependers: _,
- } => Some(unified.local),
- }
- }
-
- pub fn unsubscribe_or_decrement(&mut self, ndb: &mut Ndb, pool: &mut RelayPool) {
- let before = self.state.clone();
- 's: {
- match &mut self.state {
- SubState::NoSub { dependers } => *dependers = dependers.saturating_sub(1),
- SubState::LocalOnly { local, dependers } => {
- if *dependers > 1 {
- *dependers = dependers.saturating_sub(1);
- break 's;
- }
-
- if let Err(e) = ndb.unsubscribe(*local) {
- tracing::error!("Could not unsub ndb: {e}");
- break 's;
- }
-
- self.state = SubState::NoSub { dependers: 0 };
- }
- SubState::RemoteOnly { remote, dependers } => {
- if *dependers > 1 {
- *dependers = dependers.saturating_sub(1);
- break 's;
- }
-
- pool.unsubscribe(remote.to_owned());
-
- self.state = SubState::NoSub { dependers: 0 };
- }
- SubState::Unified { unified, dependers } => {
- if *dependers > 1 {
- *dependers = dependers.saturating_sub(1);
- break 's;
- }
-
- pool.unsubscribe(unified.remote.to_owned());
-
- if let Err(e) = ndb.unsubscribe(unified.local) {
- tracing::error!("could not unsub ndb: {e}");
- self.state = SubState::LocalOnly {
- local: unified.local,
- dependers: *dependers,
- }
- } else {
- self.state = SubState::NoSub { dependers: 0 };
- }
- }
- }
- }
- tracing::debug!(
- "TimelineSub::unsubscribe_or_decrement: {:?} => {:?}",
- before,
- self.state
- );
- }
-
- pub fn get_filter(&self) -> Option<&HybridFilter> {
- self.filter.as_ref()
- }
-
- pub fn no_sub(&self) -> bool {
- matches!(self.state, SubState::NoSub { dependers: _ })
- }
-}
diff --git a/crates/notedeck_columns/src/nav.rs b/crates/notedeck_columns/src/nav.rs
@@ -1,6 +1,6 @@
use crate::{
accounts::{render_accounts_route, AccountsAction, AccountsResponse, AccountsRoute},
- app::{get_active_columns_mut, get_decks_mut},
+ app::{get_active_columns_mut, get_decks_mut, setup_selected_account_timeline_subs},
column::ColumnsAction,
deck_state::DeckState,
decks::{Deck, DecksAction, DecksCache},
@@ -8,11 +8,9 @@ use crate::{
profile::{ProfileAction, SaveProfileChanges},
repost::RepostAction,
route::{cleanup_popped_route, ColumnsRouter, Route, SingletonRouter},
- subscriptions::Subscriptions,
timeline::{
- kind::ListKind,
route::{render_thread_route, render_timeline_route},
- TimelineCache, TimelineKind,
+ TimelineCache,
},
ui::{
self,
@@ -35,7 +33,7 @@ use crate::{
use egui_nav::{
Nav, NavAction, NavResponse, NavUiType, PopupResponse, PopupSheet, RouteResponse, Split,
};
-use enostr::{ProfileState, RelayPool};
+use enostr::ProfileState;
use nostrdb::{Filter, Ndb, Transaction};
use notedeck::{
get_current_default_msats, nav::DragResponse, tr, ui::is_narrow, Accounts, AppContext,
@@ -90,36 +88,18 @@ impl SwitchingAction {
timeline_cache: &mut TimelineCache,
decks_cache: &mut DecksCache,
ctx: &mut AppContext<'_>,
- subs: &mut Subscriptions,
- ui_ctx: &egui::Context,
) -> bool {
match &self {
SwitchingAction::Accounts(account_action) => match account_action {
AccountsAction::Switch(switch_action) => {
- {
- let txn = Transaction::new(ctx.ndb).expect("txn");
- ctx.accounts.select_account(
- &switch_action.switch_to,
- ctx.ndb,
- &txn,
- ctx.pool,
- ui_ctx,
- );
-
- let contacts_sub = ctx.accounts.get_subs().contacts.remote.clone();
- // this is cringe but we're gonna get a new sub manager soon...
- subs.subs.insert(
- contacts_sub,
- crate::subscriptions::SubKind::FetchingContactList(TimelineKind::List(
- ListKind::Contact(*ctx.accounts.selected_account_pubkey()),
- )),
- );
- }
+ ctx.select_account(&switch_action.switch_to);
if switch_action.switching_to_new {
decks_cache.add_deck_default(ctx, timeline_cache, switch_action.switch_to);
}
+ setup_selected_account_timeline_subs(timeline_cache, ctx);
+
// pop nav after switch
get_active_columns_mut(ctx.i18n, ctx.accounts, decks_cache)
.column_mut(switch_action.source_column)
@@ -127,14 +107,18 @@ impl SwitchingAction {
.go_back();
}
AccountsAction::Remove(to_remove) => 's: {
- if !ctx
- .accounts
- .remove_account(to_remove, ctx.ndb, ctx.pool, ui_ctx)
- {
+ if !ctx.remove_account(to_remove) {
break 's;
}
- decks_cache.remove(ctx.i18n, to_remove, timeline_cache, ctx.ndb, ctx.pool);
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ decks_cache.remove(
+ ctx.i18n,
+ to_remove,
+ timeline_cache,
+ ctx.ndb,
+ &mut scoped_subs,
+ );
}
},
SwitchingAction::Columns(columns_action) => match *columns_action {
@@ -142,7 +126,8 @@ impl SwitchingAction {
let kinds_to_pop = get_active_columns_mut(ctx.i18n, ctx.accounts, decks_cache)
.delete_column(index);
for kind in &kinds_to_pop {
- if let Err(err) = timeline_cache.pop(kind, ctx.ndb, ctx.pool) {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ if let Err(err) = timeline_cache.pop(kind, ctx.ndb, &mut scoped_subs) {
error!("error popping timeline: {err}");
}
}
@@ -157,11 +142,12 @@ impl SwitchingAction {
get_decks_mut(ctx.i18n, ctx.accounts, decks_cache).set_active(index)
}
DecksAction::Removing(index) => {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
get_decks_mut(ctx.i18n, ctx.accounts, decks_cache).remove_deck(
index,
timeline_cache,
ctx.ndb,
- ctx.pool,
+ &mut scoped_subs,
);
}
},
@@ -259,6 +245,7 @@ fn process_popup_resp(
process_result
}
+#[profiling::function]
fn process_nav_resp(
app: &mut Damus,
ctx: &mut AppContext<'_>,
@@ -294,9 +281,10 @@ fn process_nav_resp(
route,
&mut app.timeline_cache,
&mut app.threads,
+ &mut app.onboarding,
&mut app.view_state,
ctx.ndb,
- ctx.pool,
+ &mut ctx.remote.scoped_subs(ctx.accounts),
return_type,
col,
);
@@ -312,14 +300,17 @@ fn process_nav_resp(
.data_mut(|d| d.insert_temp(toolbar_visible_id, true));
handle_navigating_edit_profile(ctx.ndb, ctx.accounts, app, col);
- handle_navigating_timeline(
- ctx.ndb,
- ctx.note_cache,
- ctx.pool,
- ctx.accounts,
- app,
- col,
- );
+ {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ handle_navigating_timeline(
+ ctx.ndb,
+ ctx.note_cache,
+ &mut scoped_subs,
+ ctx.accounts,
+ app,
+ col,
+ );
+ }
let cur_router = app
.columns_mut(ctx.i18n, ctx.accounts)
@@ -343,14 +334,17 @@ fn process_nav_resp(
.select_column(col as i32);
handle_navigating_edit_profile(ctx.ndb, ctx.accounts, app, col);
- handle_navigating_timeline(
- ctx.ndb,
- ctx.note_cache,
- ctx.pool,
- ctx.accounts,
- app,
- col,
- );
+ {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ handle_navigating_timeline(
+ ctx.ndb,
+ ctx.note_cache,
+ &mut scoped_subs,
+ ctx.accounts,
+ app,
+ col,
+ );
+ }
}
}
}
@@ -399,26 +393,36 @@ fn handle_navigating_edit_profile(ndb: &Ndb, accounts: &Accounts, app: &mut Damu
fn handle_navigating_timeline(
ndb: &Ndb,
note_cache: &mut NoteCache,
- pool: &mut RelayPool,
+ scoped_subs: &mut notedeck::ScopedSubApi<'_, '_>,
accounts: &Accounts,
app: &mut Damus,
col: usize,
) {
+ let account_pk = accounts.selected_account_pubkey();
let kind = {
let Route::Timeline(kind) = app.columns(accounts).column(col).router().top() else {
return;
};
- if app.timeline_cache.get(kind).is_some() {
- return;
+ if let Some(timeline) = app.timeline_cache.get(kind) {
+ if timeline.subscription.dependers(account_pk) > 0 {
+ return;
+ }
}
kind.to_owned()
};
let txn = Transaction::new(ndb).expect("txn");
- app.timeline_cache
- .open(ndb, note_cache, &txn, pool, &kind, false);
+ app.timeline_cache.open(
+ ndb,
+ note_cache,
+ &txn,
+ scoped_subs,
+ &kind,
+ *account_pk,
+ false,
+ );
}
pub enum RouterAction {
@@ -512,6 +516,7 @@ impl RouterAction {
}
}
+#[profiling::function]
fn process_render_nav_action(
app: &mut Damus,
ctx: &mut AppContext<'_>,
@@ -526,7 +531,8 @@ fn process_render_nav_action(
let kinds_to_pop = app.columns_mut(ctx.i18n, ctx.accounts).delete_column(col);
for kind in &kinds_to_pop {
- if let Err(err) = app.timeline_cache.pop(kind, ctx.ndb, ctx.pool) {
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ if let Err(err) = app.timeline_cache.pop(kind, ctx.ndb, &mut scoped_subs) {
error!("error popping timeline: {err}");
}
}
@@ -535,7 +541,8 @@ fn process_render_nav_action(
}
RenderNavAction::PostAction(new_post_action) => {
let txn = Transaction::new(ctx.ndb).expect("txn");
- match new_post_action.execute(ctx.ndb, &txn, ctx.pool, &mut app.drafts) {
+ let mut publisher = ctx.remote.publisher(ctx.accounts);
+ match new_post_action.execute(ctx.ndb, &txn, &mut publisher, &mut app.drafts) {
Err(err) => tracing::error!("Error executing post action: {err}"),
Ok(_) => tracing::debug!("Post action executed"),
}
@@ -563,7 +570,7 @@ fn process_render_nav_action(
&mut app.timeline_cache,
&mut app.threads,
ctx.note_cache,
- ctx.pool,
+ &mut ctx.remote,
&txn,
ctx.unknown_ids,
ctx.accounts,
@@ -576,13 +583,7 @@ fn process_render_nav_action(
)
}
RenderNavAction::SwitchingAction(switching_action) => {
- if switching_action.process(
- &mut app.timeline_cache,
- &mut app.decks_cache,
- ctx,
- &mut app.subscriptions,
- ui.ctx(),
- ) {
+ if switching_action.process(&mut app.timeline_cache, &mut app.decks_cache, ctx) {
return Some(ProcessNavResult::SwitchOccurred);
} else {
return None;
@@ -594,23 +595,25 @@ fn process_render_nav_action(
ctx.i18n,
ui.ctx(),
ctx.ndb,
- ctx.pool,
+ &mut ctx.remote,
ctx.accounts,
),
RenderNavAction::WalletAction(wallet_action) => {
wallet_action.process(ctx.accounts, ctx.global_wallet)
}
RenderNavAction::RelayAction(action) => {
- ctx.accounts
- .process_relay_action(ui.ctx(), ctx.pool, action);
+ ctx.process_relay_action(action);
None
}
RenderNavAction::SettingsAction(action) => {
action.process_settings_action(app, ctx, ui.ctx())
}
- RenderNavAction::RepostAction(action) => {
- action.process(ctx.ndb, &ctx.accounts.get_selected_account().key, ctx.pool)
- }
+ RenderNavAction::RepostAction(action) => action.process(
+ ctx.ndb,
+ &ctx.accounts.get_selected_account().key,
+ ctx.accounts,
+ &mut ctx.remote,
+ ),
RenderNavAction::ShowFollowing(pubkey) => Some(RouterAction::RouteTo(
crate::route::Route::Following(pubkey),
RouterType::Stack,
@@ -648,7 +651,6 @@ fn render_nav_body(
img_cache: ctx.img_cache,
note_cache: ctx.note_cache,
zaps: ctx.zaps,
- pool: ctx.pool,
jobs: ctx.media_jobs.sender(),
unknown_ids: ctx.unknown_ids,
nip05_cache: ctx.nip05_cache,
@@ -718,9 +720,14 @@ fn render_nav_body(
}
})
}
- Route::Relays => RelayView::new(ctx.pool, &mut app.view_state.id_string_map, ctx.i18n)
- .ui(ui)
- .map_output(RenderNavAction::RelayAction),
+ Route::Relays => RelayView::new(
+ ctx.remote.relay_inspect(),
+ ctx.accounts.selected_account_advertised_relays(),
+ &mut app.view_state.id_string_map,
+ ctx.i18n,
+ )
+ .ui(ui)
+ .map_output(RenderNavAction::RelayAction),
Route::Settings => {
let db_path = ctx.args.db_path(ctx.path);
@@ -1170,7 +1177,13 @@ fn render_nav_body(
ui::report::ReportView::new(&mut app.view_state.selected_report_type).show(ui);
if let Some(report_type) = resp {
- notedeck::send_report_event(ctx.ndb, ctx.pool, kp, target, report_type);
+ notedeck::send_report_event(
+ ctx.ndb,
+ &mut ctx.remote.publisher(ctx.accounts),
+ kp,
+ target,
+ report_type,
+ );
app.view_state.selected_report_type = None;
return DragResponse::output(Some(RenderNavAction::Back));
}
diff --git a/crates/notedeck_columns/src/onboarding.rs b/crates/notedeck_columns/src/onboarding.rs
@@ -1,30 +1,38 @@
use std::{cell::RefCell, rc::Rc};
use egui_virtual_list::VirtualList;
-use enostr::{Pubkey, RelayPool};
+use enostr::Pubkey;
use nostrdb::{Filter, Ndb, NoteKey, Transaction};
-use notedeck::{create_nip51_set, filter::default_limit, Nip51SetCache, UnknownIds};
-use uuid::Uuid;
-
-use crate::subscriptions::Subscriptions;
+use notedeck::{
+ create_nip51_set, filter::default_limit, Nip51SetCache, RelaySelection, ScopedSubApi,
+ ScopedSubIdentity, SubConfig, SubKey, SubOwnerKey, UnknownIds,
+};
#[derive(Debug)]
enum OnboardingState {
AwaitingTrustedPksList(Vec<Filter>),
- HaveFollowPacks(Nip51SetCache),
+ HaveFollowPacks { packs: Nip51SetCache },
}
-/// Manages the onboarding process. Responsible for retriving the kind 30000 list of trusted pubkeys
-/// and then retrieving all follow packs from the trusted pks updating when new ones arrive
+/// Manages onboarding discovery of trusted follow packs.
+///
+/// This first requests the trusted-author list (kind `30000`) and then
+/// installs a scoped account subscription for follow packs from those authors.
#[derive(Default)]
pub struct Onboarding {
state: Option<Result<OnboardingState, OnboardingError>>,
pub list: Rc<RefCell<VirtualList>>,
}
+/// Side effects emitted by one `Onboarding::process` pass.
+pub enum OnboardingEffect {
+ /// Request a one-shot fetch for the provided filters.
+ Oneshot(Vec<Filter>),
+}
+
impl Onboarding {
pub fn get_follow_packs(&self) -> Option<&Nip51SetCache> {
- let Some(Ok(OnboardingState::HaveFollowPacks(packs))) = &self.state else {
+ let Some(Ok(OnboardingState::HaveFollowPacks { packs, .. })) = &self.state else {
return None;
};
@@ -32,71 +40,79 @@ impl Onboarding {
}
pub fn get_follow_packs_mut(&mut self) -> Option<&mut Nip51SetCache> {
- let Some(Ok(OnboardingState::HaveFollowPacks(packs))) = &mut self.state else {
+ let Some(Ok(OnboardingState::HaveFollowPacks { packs, .. })) = &mut self.state else {
return None;
};
Some(packs)
}
+ #[allow(clippy::too_many_arguments)]
pub fn process(
&mut self,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ owner: SubOwnerKey,
ndb: &Ndb,
- subs: &mut Subscriptions,
unknown_ids: &mut UnknownIds,
- ) {
+ ) -> Option<OnboardingEffect> {
match &self.state {
Some(res) => {
let Ok(OnboardingState::AwaitingTrustedPksList(filter)) = res else {
- return;
+ return None;
};
let txn = Transaction::new(ndb).expect("txns");
let Ok(res) = ndb.query(&txn, filter, 1) else {
- return;
+ return None;
};
if res.is_empty() {
- return;
+ return None;
}
let key = res.first().expect("checked empty").note_key;
let new_state = get_trusted_authors(ndb, &txn, key).and_then(|trusted_pks| {
let pks: Vec<&[u8; 32]> = trusted_pks.iter().map(|f| f.bytes()).collect();
- Nip51SetCache::new(pool, ndb, &txn, unknown_ids, vec![follow_packs_filter(pks)])
- .map(OnboardingState::HaveFollowPacks)
+ let follow_filter = follow_packs_filter(pks);
+ let sub_key = follow_packs_sub_key();
+ let identity = ScopedSubIdentity::account(owner, sub_key);
+ let sub_config = SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: vec![follow_filter.clone()],
+ use_transparent: false,
+ };
+ let _ = scoped_subs.ensure_sub(identity, sub_config);
+
+ Nip51SetCache::new_local(ndb, &txn, unknown_ids, vec![follow_filter])
+ .map(|packs| OnboardingState::HaveFollowPacks { packs })
.ok_or(OnboardingError::InvalidNip51Set)
});
self.state = Some(new_state);
+ None
}
None => {
let filter = vec![trusted_pks_list_filter()];
-
- let subid = Uuid::new_v4().to_string();
- pool.subscribe(subid.clone(), filter.clone());
- subs.subs
- .insert(subid, crate::subscriptions::SubKind::OneShot);
-
let new_state = Some(Ok(OnboardingState::AwaitingTrustedPksList(filter)));
self.state = new_state;
+ let Some(Ok(OnboardingState::AwaitingTrustedPksList(filters))) = &self.state else {
+ return None;
+ };
+
+ Some(OnboardingEffect::Oneshot(filters.clone()))
}
}
}
// Unsubscribe and clear state
- pub fn end_onboarding(&mut self, pool: &mut RelayPool, ndb: &mut Ndb) {
- let Some(Ok(OnboardingState::HaveFollowPacks(state))) = &mut self.state else {
+ pub fn end_onboarding(&mut self, ndb: &mut Ndb) {
+ let Some(Ok(OnboardingState::HaveFollowPacks { packs })) = &mut self.state else {
self.state = None;
return;
};
- let unified = &state.sub;
-
- pool.unsubscribe(unified.remote.clone());
- let _ = ndb.unsubscribe(unified.local);
+ let _ = ndb.unsubscribe(packs.local_sub());
self.state = None;
}
@@ -104,11 +120,19 @@ impl Onboarding {
#[derive(Debug)]
pub enum OnboardingError {
+ /// Follow-pack note could not be parsed as a valid NIP-51 set.
InvalidNip51Set,
+ /// Trusted-author note exists but is not kind `30000`.
InvalidTrustedPksListKind,
+ /// Trusted-author note key could not be resolved from NostrDB.
NdbCouldNotFindNote,
}
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+enum OnboardingScopedSub {
+ FollowPacks,
+}
+
// author providing the list of trusted follow pack authors
const FOLLOW_PACK_AUTHOR: [u8; 32] = [
0x89, 0x5c, 0x2a, 0x90, 0xa8, 0x60, 0xac, 0x18, 0x43, 0x4a, 0xa6, 0x9e, 0x7b, 0x0d, 0xa8, 0x46,
@@ -132,6 +156,10 @@ pub fn follow_packs_filter(pks: Vec<&[u8; 32]>) -> Filter {
.build()
}
+fn follow_packs_sub_key() -> SubKey {
+ SubKey::builder(OnboardingScopedSub::FollowPacks).finish()
+}
+
/// gets the pubkeys from a kind 30000 follow set
fn get_trusted_authors(
ndb: &Ndb,
@@ -152,3 +180,79 @@ fn get_trusted_authors(
Ok(nip51set.pks)
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use enostr::{OutboxPool, OutboxSessionHandler};
+ use nostrdb::Config;
+ use notedeck::{Accounts, EguiWakeup, ScopedSubsState, FALLBACK_PUBKEY};
+ use tempfile::TempDir;
+
+ fn test_harness() -> (
+ TempDir,
+ Ndb,
+ Accounts,
+ UnknownIds,
+ ScopedSubsState,
+ OutboxPool,
+ ) {
+ let tmp = TempDir::new().expect("tmp dir");
+ let mut ndb = Ndb::new(tmp.path().to_str().expect("path"), &Config::new()).expect("ndb");
+ let txn = Transaction::new(&ndb).expect("txn");
+ let mut unknown_ids = UnknownIds::default();
+ let accounts = Accounts::new(
+ None,
+ vec!["wss://relay-onboarding.example.com".to_owned()],
+ FALLBACK_PUBKEY(),
+ &mut ndb,
+ &txn,
+ &mut unknown_ids,
+ );
+
+ (
+ tmp,
+ ndb,
+ accounts,
+ unknown_ids,
+ ScopedSubsState::default(),
+ OutboxPool::default(),
+ )
+ }
+
+ /// Verifies onboarding emits a one-time oneshot effect on first process call
+ /// and does not emit duplicate oneshot effects on subsequent calls.
+ #[test]
+ fn process_initially_emits_oneshot_effect_once() {
+ let (_tmp, ndb, accounts, mut unknown_ids, mut scoped_sub_state, mut pool) = test_harness();
+ let owner = SubOwnerKey::new(("onboarding", 1usize));
+ let mut onboarding = Onboarding::default();
+
+ let first = {
+ let mut outbox =
+ OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+ let mut scoped_subs = scoped_sub_state.api(&mut outbox, &accounts);
+ onboarding.process(&mut scoped_subs, owner, &ndb, &mut unknown_ids)
+ };
+
+ match first {
+ Some(OnboardingEffect::Oneshot(filters)) => {
+ assert_eq!(filters.len(), 1);
+ assert_eq!(
+ filters[0].json().expect("json"),
+ trusted_pks_list_filter().json().expect("json")
+ );
+ }
+ None => panic!("expected onboarding oneshot effect"),
+ }
+
+ let second = {
+ let mut outbox =
+ OutboxSessionHandler::new(&mut pool, EguiWakeup::new(egui::Context::default()));
+ let mut scoped_subs = scoped_sub_state.api(&mut outbox, &accounts);
+ onboarding.process(&mut scoped_subs, owner, &ndb, &mut unknown_ids)
+ };
+
+ assert!(second.is_none());
+ }
+}
diff --git a/crates/notedeck_columns/src/profile.rs b/crates/notedeck_columns/src/profile.rs
@@ -1,9 +1,9 @@
-use enostr::{FilledKeypair, FullKeypair, ProfileState, Pubkey, RelayPool};
+use enostr::{FilledKeypair, FullKeypair, ProfileState, Pubkey};
use nostrdb::{Ndb, Note, NoteBuildOptions, NoteBuilder, Transaction};
use notedeck::{
- builder_from_note, send_mute_event, send_note_builder, Accounts, ContactState, DataPath,
- Localization, ProfileContext,
+ builder_from_note, note::publish::publish_note_builder, send_mute_event, Accounts,
+ ContactState, DataPath, Localization, ProfileContext, PublishApi, RelayType, RemoteApi,
};
use tracing::info;
@@ -53,7 +53,7 @@ impl ProfileAction {
i18n: &mut Localization,
ctx: &egui::Context,
ndb: &Ndb,
- pool: &mut RelayPool,
+ remote: &mut RemoteApi<'_>,
accounts: &Accounts,
) -> Option<RouterAction> {
match self {
@@ -74,16 +74,19 @@ impl ProfileAction {
let _ = ndb.process_event_with(&json, nostrdb::IngestMetadata::new().client(true));
info!("sending {}", &json);
- pool.send(&event);
+ let mut publisher = remote.publisher(accounts);
+ publisher.publish_note(¬e, RelayType::AccountsWrite);
Some(RouterAction::GoBack)
}
ProfileAction::Follow(target_key) => {
- Self::send_follow_user_event(ndb, pool, accounts, target_key);
+ let mut publisher = remote.publisher(accounts);
+ Self::send_follow_user_event(ndb, &mut publisher, accounts, target_key);
None
}
ProfileAction::Unfollow(target_key) => {
- Self::send_unfollow_user_event(ndb, pool, accounts, target_key);
+ let mut publisher = remote.publisher(accounts);
+ Self::send_unfollow_user_event(ndb, &mut publisher, accounts, target_key);
None
}
ProfileAction::Context(profile_context) => {
@@ -119,17 +122,25 @@ impl ProfileAction {
let kp = accounts.get_selected_account().key.to_full()?;
let muted = accounts.mute();
let txn = Transaction::new(ndb).expect("txn");
+ let publisher = &mut remote.publisher(accounts);
if muted.is_pk_muted(profile_context.profile.bytes()) {
notedeck::send_unmute_event(
ndb,
&txn,
- pool,
+ publisher,
kp,
&muted,
&profile_context.profile,
);
} else {
- send_mute_event(ndb, &txn, pool, kp, &muted, &profile_context.profile);
+ send_mute_event(
+ ndb,
+ &txn,
+ publisher,
+ kp,
+ &muted,
+ &profile_context.profile,
+ );
}
None
}
@@ -156,20 +167,20 @@ impl ProfileAction {
fn send_follow_user_event(
ndb: &Ndb,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
accounts: &Accounts,
target_key: &Pubkey,
) {
- send_kind_3_event(ndb, pool, accounts, FollowAction::Follow(target_key));
+ send_kind_3_event(ndb, publisher, accounts, FollowAction::Follow(target_key));
}
fn send_unfollow_user_event(
ndb: &Ndb,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
accounts: &Accounts,
target_key: &Pubkey,
) {
- send_kind_3_event(ndb, pool, accounts, FollowAction::Unfollow(target_key));
+ send_kind_3_event(ndb, publisher, accounts, FollowAction::Unfollow(target_key));
}
}
@@ -178,7 +189,12 @@ enum FollowAction<'a> {
Unfollow(&'a Pubkey),
}
-fn send_kind_3_event(ndb: &Ndb, pool: &mut RelayPool, accounts: &Accounts, action: FollowAction) {
+fn send_kind_3_event(
+ ndb: &Ndb,
+ publisher: &mut PublishApi<'_, '_>,
+ accounts: &Accounts,
+ action: FollowAction,
+) {
let Some(kp) = accounts.get_selected_account().key.to_full() else {
return;
};
@@ -238,13 +254,13 @@ fn send_kind_3_event(ndb: &Ndb, pool: &mut RelayPool, accounts: &Accounts, actio
),
};
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
pub fn send_new_contact_list(
kp: FilledKeypair,
ndb: &Ndb,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
mut pks_to_follow: Vec<Pubkey>,
) {
if !pks_to_follow.contains(kp.pubkey) {
@@ -253,7 +269,7 @@ pub fn send_new_contact_list(
let builder = construct_new_contact_list(pks_to_follow);
- send_note_builder(builder, ndb, pool, kp);
+ publish_note_builder(builder, ndb, publisher, kp);
}
fn construct_new_contact_list<'a>(pks: Vec<Pubkey>) -> NoteBuilder<'a> {
@@ -269,8 +285,12 @@ fn construct_new_contact_list<'a>(pks: Vec<Pubkey>) -> NoteBuilder<'a> {
builder
}
-pub fn send_default_dms_relay_list(kp: FilledKeypair<'_>, ndb: &Ndb, pool: &mut RelayPool) {
- send_note_builder(construct_default_dms_relay_list(), ndb, pool, kp);
+pub fn send_default_dms_relay_list(
+ kp: FilledKeypair<'_>,
+ ndb: &Ndb,
+ publisher: &mut PublishApi<'_, '_>,
+) {
+ publish_note_builder(construct_default_dms_relay_list(), ndb, publisher, kp);
}
fn construct_default_dms_relay_list<'a>() -> NoteBuilder<'a> {
diff --git a/crates/notedeck_columns/src/repost.rs b/crates/notedeck_columns/src/repost.rs
@@ -1,5 +1,6 @@
-use enostr::{Keypair, NoteId, RelayPool};
+use enostr::{Keypair, NoteId, RelayId};
use nostrdb::{Ndb, Note, NoteBuilder, Transaction};
+use notedeck::{Accounts, RelayType, RemoteApi};
use crate::{nav::RouterAction, Route};
@@ -7,7 +8,7 @@ pub fn generate_repost_event<'a>(
ndb: &'a Ndb,
noteid_to_repost: &NoteId,
signer_nsec: &[u8; 32],
- pool: &RelayPool,
+ accounts: &Accounts,
) -> Result<Note<'a>, String> {
let txn = Transaction::new(ndb).expect("txn");
let note_to_repost = ndb
@@ -21,7 +22,14 @@ pub fn generate_repost_event<'a>(
));
}
- let urls = pool.urls();
+ let urls: Vec<String> = accounts
+ .selected_account_write_relays()
+ .into_iter()
+ .filter_map(|r| match r {
+ RelayId::Websocket(url) => Some(url.to_string()),
+ RelayId::Multicast => None,
+ })
+ .collect();
let Some(relay) = urls.first() else {
return Err(
"relay pool does not have any relays. This makes meeting the repost spec impossible"
@@ -59,7 +67,8 @@ impl RepostAction {
self,
ndb: &nostrdb::Ndb,
current_user: &Keypair,
- pool: &mut RelayPool,
+ accounts: &Accounts,
+ remote: &mut RemoteApi<'_>,
) -> Option<RouterAction> {
match self {
RepostAction::Quote(note_id) => {
@@ -75,7 +84,7 @@ impl RepostAction {
ndb,
¬e_id,
&full_user.secret_key.secret_bytes(),
- pool,
+ accounts,
)
.inspect_err(|e| tracing::error!("failure to generate repost event: {e}"))
.ok()?;
@@ -92,7 +101,8 @@ impl RepostAction {
let _ = ndb.process_event_with(&json, nostrdb::IngestMetadata::new().client(true));
- pool.send(event);
+ let mut publisher = remote.publisher(accounts);
+ publisher.publish_note(&repost_ev, RelayType::AccountsWrite);
Some(RouterAction::GoBack)
}
diff --git a/crates/notedeck_columns/src/route.rs b/crates/notedeck_columns/src/route.rs
@@ -1,14 +1,16 @@
use egui_nav::{Percent, ReturnType};
-use enostr::{NoteId, Pubkey, RelayPool};
+use enostr::{NoteId, Pubkey};
use nostrdb::Ndb;
use notedeck::{
tr, Localization, NoteZapTargetOwned, ReplacementType, ReportTarget, RootNoteIdBuf, Router,
- WalletType,
+ ScopedSubApi, WalletType,
};
use std::ops::Range;
use crate::{
accounts::AccountsRoute,
+ onboarding::Onboarding,
+ scoped_sub_owner_keys::onboarding_owner_key,
timeline::{kind::ColumnTitle, thread::Threads, ThreadSelection, TimelineCache, TimelineKind},
ui::add_column::{AddAlgoRoute, AddColumnRoute},
view_state::ViewState,
@@ -797,24 +799,29 @@ pub fn cleanup_popped_route(
route: &Route,
timeline_cache: &mut TimelineCache,
threads: &mut Threads,
+ onboarding: &mut Onboarding,
view_state: &mut ViewState,
ndb: &mut Ndb,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi,
return_type: ReturnType,
col_index: usize,
) {
match route {
Route::Timeline(kind) => {
- if let Err(err) = timeline_cache.pop(kind, ndb, pool) {
+ if let Err(err) = timeline_cache.pop(kind, ndb, scoped_subs) {
tracing::error!("popping timeline had an error: {err} for {:?}", kind);
}
}
Route::Thread(selection) => {
- threads.close(ndb, pool, selection, return_type, col_index);
+ threads.close(ndb, scoped_subs, selection, return_type, col_index);
}
Route::EditProfile(pk) => {
view_state.pubkey_to_profile_state.remove(pk);
}
+ Route::Accounts(AccountsRoute::Onboarding) => {
+ onboarding.end_onboarding(ndb);
+ let _ = scoped_subs.drop_owner(onboarding_owner_key(col_index));
+ }
_ => {}
}
}
diff --git a/crates/notedeck_columns/src/scoped_sub_owner_keys.rs b/crates/notedeck_columns/src/scoped_sub_owner_keys.rs
@@ -0,0 +1,41 @@
+use enostr::{NoteId, Pubkey};
+use notedeck::SubOwnerKey;
+
+use crate::timeline::TimelineKind;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+enum ColumnsOwner {
+ OnboardingFollowPacks,
+ ThreadScope,
+ TimelineRemote,
+}
+
+/// Stable owner key for onboarding follow-pack scoped subscriptions.
+pub fn onboarding_owner_key(col: usize) -> SubOwnerKey {
+ SubOwnerKey::builder(ColumnsOwner::OnboardingFollowPacks)
+ .with(col)
+ .finish()
+}
+
+/// Stable owner key for one thread scope within one column and account.
+pub fn thread_scope_owner_key(
+ account_pk: Pubkey,
+ col: usize,
+ root_id: &NoteId,
+ scope_depth: usize,
+) -> SubOwnerKey {
+ SubOwnerKey::builder(ColumnsOwner::ThreadScope)
+ .with(account_pk)
+ .with(col)
+ .with(*root_id.bytes())
+ .with(scope_depth)
+ .finish()
+}
+
+/// Stable owner key for timeline remote subscriptions per account/kind pair.
+pub fn timeline_remote_owner_key(account_pk: Pubkey, kind: &TimelineKind) -> SubOwnerKey {
+ SubOwnerKey::builder(ColumnsOwner::TimelineRemote)
+ .with(account_pk)
+ .with(kind)
+ .finish()
+}
diff --git a/crates/notedeck_columns/src/storage/decks.rs b/crates/notedeck_columns/src/storage/decks.rs
@@ -331,7 +331,7 @@ fn deserialize_columns(
match CleanIntermediaryRoute::parse(&mut parser, deck_user) {
Ok(route_intermediary) => {
if let Some(ir) = route_intermediary.into_intermediary_route(ndb) {
- cols.insert_intermediary_routes(timeline_cache, vec![ir]);
+ cols.insert_intermediary_routes(timeline_cache, *deck_user, vec![ir]);
}
}
Err(err) => {
diff --git a/crates/notedeck_columns/src/subscriptions.rs b/crates/notedeck_columns/src/subscriptions.rs
@@ -1,32 +0,0 @@
-use crate::timeline::TimelineKind;
-use std::collections::HashMap;
-use uuid::Uuid;
-
-#[derive(Debug, Clone)]
-pub enum SubKind {
- /// Initial subscription. This is the first time we do a remote subscription
- /// for a timeline
- Initial,
-
- /// One shot requests, we can just close after we receive EOSE
- OneShot,
-
- Timeline(TimelineKind),
-
- /// We are fetching a contact list so that we can use it for our follows
- /// Filter.
- // TODO: generalize this to any list?
- FetchingContactList(TimelineKind),
-}
-
-/// Subscriptions that need to be tracked at various stages. Sometimes we
-/// need to do A, then B, then C. Tracking requests at various stages by
-/// mapping uuid subids to explicit states happens here.
-#[derive(Default)]
-pub struct Subscriptions {
- pub subs: HashMap<String, SubKind>,
-}
-
-pub fn new_sub_id() -> String {
- Uuid::new_v4().to_string()
-}
diff --git a/crates/notedeck_columns/src/test_data.rs b/crates/notedeck_columns/src/test_data.rs
@@ -1,24 +1,5 @@
-use enostr::RelayPool;
use nostrdb::ProfileRecord;
-#[allow(unused_must_use)]
-pub fn sample_pool() -> RelayPool {
- let mut pool = RelayPool::new();
- let wakeup = move || {};
-
- pool.add_url("wss://relay.damus.io".to_string(), wakeup);
- pool.add_url("wss://eden.nostr.land".to_string(), wakeup);
- pool.add_url("wss://nostr.wine".to_string(), wakeup);
- pool.add_url("wss://nos.lol".to_string(), wakeup);
- pool.add_url("wss://test_relay_url_long_00000000000000000000000000000000000000000000000000000000000000000000000000000000000".to_string(), wakeup);
-
- for _ in 0..20 {
- pool.add_url("tmp".to_string(), wakeup);
- }
-
- pool
-}
-
// my (jb55) profile
const _TEST_PROFILE_DATA: [u8; 448] = [
0x04, 0x00, 0x00, 0x00, 0x54, 0xfe, 0xff, 0xff, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/crates/notedeck_columns/src/timeline/cache.rs b/crates/notedeck_columns/src/timeline/cache.rs
@@ -1,12 +1,16 @@
use crate::{
actionbar::TimelineOpenResult,
error::Error,
- timeline::{Timeline, TimelineKind, UnknownPksOwned},
+ timeline::{
+ drop_timeline_remote_owner, ensure_remote_timeline_subscription, Timeline, TimelineKind,
+ UnknownPksOwned,
+ },
};
+use notedeck::ScopedSubApi;
use notedeck::{filter, FilterState, NoteCache, NoteRef};
-use enostr::RelayPool;
+use enostr::Pubkey;
use nostrdb::{Filter, Ndb, Transaction};
use std::collections::HashMap;
use tracing::{debug, error, info, warn};
@@ -54,7 +58,7 @@ impl TimelineCache {
&mut self,
id: &TimelineKind,
ndb: &mut Ndb,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) -> Result<(), Error> {
let timeline = if let Some(timeline) = self.timelines.get_mut(id) {
timeline
@@ -62,9 +66,17 @@ impl TimelineCache {
return Err(Error::TimelineNotFound);
};
- timeline.subscription.unsubscribe_or_decrement(ndb, pool);
+ let account_pk = scoped_subs.selected_account_pubkey();
+ timeline
+ .subscription
+ .unsubscribe_or_decrement(account_pk, ndb);
- if timeline.subscription.no_sub() {
+ if timeline.subscription.no_sub(&account_pk) {
+ timeline.subscription.clear_remote_seeded(account_pk);
+ drop_timeline_remote_owner(timeline, account_pk, scoped_subs);
+ }
+
+ if !timeline.subscription.has_any_subs() {
debug!(
"popped last timeline {:?}, removing from timeline cache",
id
@@ -105,16 +117,18 @@ impl TimelineCache {
res
}
- pub fn insert(&mut self, id: TimelineKind, timeline: Timeline) {
+ pub fn insert(&mut self, id: TimelineKind, account_pk: Pubkey, mut timeline: Timeline) {
if let Some(cur_timeline) = self.timelines.get_mut(&id) {
- cur_timeline.subscription.increment();
+ cur_timeline.subscription.increment(account_pk);
return;
};
+ timeline.subscription.increment(account_pk);
self.timelines.insert(id, timeline);
}
/// Get and/or update the notes associated with this timeline
+ #[profiling::function]
fn notes<'a>(
&'a mut self,
ndb: &Ndb,
@@ -137,6 +151,7 @@ impl TimelineCache {
let mut notes = Vec::new();
for package in filters.local().packages {
+ profiling::scope!("ndb query");
if let Ok(results) = ndb.query(txn, package.filters, 1000) {
let cur_notes: Vec<NoteRef> = results
.into_iter()
@@ -174,13 +189,16 @@ impl TimelineCache {
/// When `load_local` is false, the timeline is created and subscribed
/// without running a blocking local query. Use this for startup paths
/// where initial notes are loaded asynchronously.
+ #[profiling::function]
+ #[allow(clippy::too_many_arguments)]
pub fn open(
&mut self,
ndb: &Ndb,
note_cache: &mut NoteCache,
txn: &Transaction,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
id: &TimelineKind,
+ account_pk: Pubkey,
load_local: bool,
) -> Option<TimelineOpenResult> {
if !load_local {
@@ -195,10 +213,15 @@ impl TimelineCache {
self.timelines.get_mut(id).expect("timeline inserted")
};
- if let Some(filter) = timeline.filter.get_any_ready() {
+ if let FilterState::Ready(filter) = &timeline.filter {
debug!("got open with subscription for {:?}", &timeline.kind);
- timeline.subscription.try_add_local(ndb, filter);
- timeline.subscription.try_add_remote(pool, filter);
+ timeline.subscription.try_add_local(account_pk, ndb, filter);
+ ensure_remote_timeline_subscription(
+ timeline,
+ account_pk,
+ filter.remote().to_vec(),
+ scoped_subs,
+ );
} else {
debug!(
"open skipped subscription; filter not ready for {:?}",
@@ -206,27 +229,16 @@ impl TimelineCache {
);
}
- timeline.subscription.increment();
+ timeline.subscription.increment(account_pk);
return None;
}
+ let account_pk = scoped_subs.selected_account_pubkey();
let notes_resp = self.notes(ndb, note_cache, txn, id);
let (mut open_result, timeline) = match notes_resp.vitality {
Vitality::Stale(timeline) => {
// The timeline cache is stale, let's update it
- let notes = {
- let mut notes = Vec::new();
- for package in timeline.subscription.get_filter()?.local().packages {
- let cur_notes = find_new_notes(
- timeline.all_or_any_entries().latest(),
- package.filters,
- txn,
- ndb,
- );
- notes.extend(cur_notes);
- }
- notes
- };
+ let notes = collect_stale_notes(timeline, txn, ndb);
let open_result = if notes.is_empty() {
None
@@ -246,10 +258,15 @@ impl TimelineCache {
Vitality::Fresh(timeline) => (None, timeline),
};
- if let Some(filter) = timeline.filter.get_any_ready() {
+ if let FilterState::Ready(filter) = &timeline.filter {
debug!("got open with *new* subscription for {:?}", &timeline.kind);
- timeline.subscription.try_add_local(ndb, filter);
- timeline.subscription.try_add_remote(pool, filter);
+ timeline.subscription.try_add_local(account_pk, ndb, filter);
+ ensure_remote_timeline_subscription(
+ timeline,
+ account_pk,
+ filter.remote().to_vec(),
+ scoped_subs,
+ );
} else {
// This should never happen reasoning, self.notes would have
// failed above if the filter wasn't ready
@@ -258,7 +275,7 @@ impl TimelineCache {
);
};
- timeline.subscription.increment();
+ timeline.subscription.increment(account_pk);
if let Some(unknowns) = notes_resp.unknown_pks {
match &mut open_result {
@@ -291,6 +308,24 @@ impl TimelineCache {
}
}
+fn collect_stale_notes(timeline: &Timeline, txn: &Transaction, ndb: &Ndb) -> Vec<NoteRef> {
+ let FilterState::Ready(filter) = &timeline.filter else {
+ return Vec::new();
+ };
+
+ let mut notes = Vec::new();
+ for package in filter.local().packages {
+ let cur_notes = find_new_notes(
+ timeline.all_or_any_entries().latest(),
+ package.filters,
+ txn,
+ ndb,
+ );
+ notes.extend(cur_notes);
+ }
+ notes
+}
+
pub struct GetNotesResponse<'a> {
vitality: Vitality<'a, Timeline>,
unknown_pks: Option<UnknownPksOwned>,
diff --git a/crates/notedeck_columns/src/timeline/mod.rs b/crates/notedeck_columns/src/timeline/mod.rs
@@ -1,10 +1,10 @@
use crate::{
error::Error,
- multi_subscriber::TimelineSub,
- subscriptions::{self, SubKind, Subscriptions},
+ scoped_sub_owner_keys::timeline_remote_owner_key,
timeline::{
kind::{people_list_note_filter, AlgoTimeline, ListKind, PeopleListRef},
note_units::InsertManyResponse,
+ sub::TimelineSub,
timeline_units::NotePayload,
},
Result,
@@ -12,13 +12,14 @@ use crate::{
use notedeck::{
contacts::hybrid_contacts_filter,
- filter::{self, HybridFilter},
+ filter::{self},
is_future_timestamp, tr, unix_time_secs, Accounts, CachedNote, ContactState, FilterError,
- FilterState, FilterStates, Localization, NoteCache, NoteRef, UnknownIds,
+ FilterState, Localization, NoteCache, NoteRef, RelaySelection, ScopedSubApi, ScopedSubIdentity,
+ SubConfig, SubKey, UnknownIds,
};
use egui_virtual_list::VirtualList;
-use enostr::{PoolRelay, Pubkey, RelayPool};
+use enostr::Pubkey;
use nostrdb::{Filter, Ndb, Note, NoteKey, Transaction};
use std::rc::Rc;
use std::{cell::RefCell, collections::HashSet};
@@ -29,6 +30,7 @@ pub mod cache;
pub mod kind;
mod note_units;
pub mod route;
+mod sub;
pub mod thread;
mod timeline_units;
mod unit;
@@ -39,6 +41,61 @@ pub use note_units::{CompositeType, InsertionResponse, NoteUnits};
pub use timeline_units::{TimelineUnits, UnknownPks};
pub use unit::{CompositeUnit, NoteUnit, ReactionUnit, RepostUnit};
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+enum TimelineScopedSub {
+ RemoteByKind,
+}
+
+fn timeline_remote_sub_key(kind: &TimelineKind) -> SubKey {
+ SubKey::builder(TimelineScopedSub::RemoteByKind)
+ .with(kind)
+ .finish()
+}
+
+fn timeline_remote_sub_config(remote_filters: Vec<Filter>) -> SubConfig {
+ SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: remote_filters,
+ use_transparent: false,
+ }
+}
+
+pub(crate) fn ensure_remote_timeline_subscription(
+ timeline: &mut Timeline,
+ account_pk: Pubkey,
+ remote_filters: Vec<Filter>,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+) {
+ let owner = timeline_remote_owner_key(account_pk, &timeline.kind);
+ let identity = ScopedSubIdentity::account(owner, timeline_remote_sub_key(&timeline.kind));
+ let config = timeline_remote_sub_config(remote_filters);
+ let _ = scoped_subs.ensure_sub(identity, config);
+ timeline.subscription.mark_remote_seeded(account_pk);
+}
+
+pub(crate) fn update_remote_timeline_subscription(
+ timeline: &mut Timeline,
+ remote_filters: Vec<Filter>,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+) {
+ let owner = timeline_remote_owner_key(scoped_subs.selected_account_pubkey(), &timeline.kind);
+ let identity = ScopedSubIdentity::account(owner, timeline_remote_sub_key(&timeline.kind));
+ let config = timeline_remote_sub_config(remote_filters);
+ let _ = scoped_subs.set_sub(identity, config);
+ timeline
+ .subscription
+ .mark_remote_seeded(scoped_subs.selected_account_pubkey());
+}
+
+pub fn drop_timeline_remote_owner(
+ timeline: &Timeline,
+ account_pk: Pubkey,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+) {
+ let owner = timeline_remote_owner_key(account_pk, &timeline.kind);
+ let _ = scoped_subs.drop_owner(owner);
+}
+
#[derive(Copy, Clone, Eq, PartialEq, Debug, Default, PartialOrd, Ord)]
pub enum ViewFilter {
MentionsOnly,
@@ -164,6 +221,7 @@ impl TimelineTab {
self.list.borrow_mut().reset();
}
+ #[profiling::function]
fn insert<'a>(
&mut self,
payloads: Vec<&'a NotePayload>,
@@ -249,7 +307,7 @@ pub struct Timeline {
pub kind: TimelineKind,
// We may not have the filter loaded yet, so let's make it an option so
// that codepaths have to explicitly handle it
- pub filter: FilterStates,
+ pub filter: FilterState,
pub views: Vec<TimelineTab>,
pub selected_view: usize,
pub seen_latest_notes: bool,
@@ -323,7 +381,6 @@ impl Timeline {
}
pub fn new(kind: TimelineKind, filter_state: FilterState, views: Vec<TimelineTab>) -> Self {
- let filter = FilterStates::new(filter_state);
let subscription = TimelineSub::default();
let selected_view = 0;
@@ -332,7 +389,7 @@ impl Timeline {
Timeline {
kind,
- filter,
+ filter: filter_state,
views,
subscription,
selected_view,
@@ -409,6 +466,7 @@ impl Timeline {
let now = unix_time_secs();
let mut unknown_pks = HashSet::new();
for note_ref in notes {
+ profiling::scope!("inserting notes");
if is_future_timestamp(note_ref.created_at, now) {
continue;
}
@@ -450,6 +508,7 @@ impl Timeline {
/// The main function used for inserting notes into timelines. Handles
/// inserting into multiple views if we have them. All timeline note
/// insertions should use this function.
+ #[profiling::function]
pub fn insert(
&mut self,
new_note_ids: &[NoteKey],
@@ -512,6 +571,7 @@ impl Timeline {
#[profiling::function]
pub fn poll_notes_into_view(
&mut self,
+ account_pk: &Pubkey,
ndb: &Ndb,
txn: &Transaction,
unknown_ids: &mut UnknownIds,
@@ -525,10 +585,13 @@ impl Timeline {
let sub = self
.subscription
- .get_local()
+ .get_local(account_pk)
.ok_or(Error::App(notedeck::Error::no_active_sub()))?;
- let new_note_ids = ndb.poll_for_notes(sub, 500);
+ let new_note_ids = {
+ profiling::scope!("big ndb poll");
+ ndb.poll_for_notes(sub, 500)
+ };
if new_note_ids.is_empty() {
return Ok(());
} else {
@@ -547,10 +610,7 @@ impl Timeline {
/// Note: We reset states rather than clearing them so that
/// [`Self::set_all_states`] can update them during the rebuild.
pub fn invalidate(&mut self) {
- self.filter.initial_state = FilterState::NeedsRemote;
- for state in self.filter.states.values_mut() {
- *state = FilterState::NeedsRemote;
- }
+ self.filter = FilterState::NeedsRemote;
self.contact_list_timestamp = None;
}
}
@@ -618,70 +678,47 @@ pub fn setup_new_timeline(
timeline: &mut Timeline,
ndb: &Ndb,
txn: &Transaction,
- subs: &mut Subscriptions,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
note_cache: &mut NoteCache,
since_optimize: bool,
accounts: &Accounts,
unknown_ids: &mut UnknownIds,
) {
+ let account_pk = *accounts.selected_account_pubkey();
+
// if we're ready, setup local subs
- if is_timeline_ready(ndb, pool, timeline, accounts) {
- if let Err(err) = setup_timeline_nostrdb_sub(ndb, txn, note_cache, timeline, unknown_ids) {
+ if is_timeline_ready(ndb, scoped_subs, timeline, accounts) {
+ if let Err(err) =
+ setup_timeline_nostrdb_sub(ndb, txn, note_cache, timeline, unknown_ids, account_pk)
+ {
error!("setup_new_timeline: {err}");
}
}
- for relay in &mut pool.relays {
- send_initial_timeline_filter(since_optimize, subs, relay, timeline, accounts);
- }
- timeline.subscription.increment();
-}
-
-/// Send initial filters for a specific relay. This typically gets called
-/// when we first connect to a new relay for the first time. For
-/// situations where you are adding a new timeline, use
-/// setup_new_timeline.
-#[profiling::function]
-pub fn send_initial_timeline_filters(
- since_optimize: bool,
- timeline_cache: &mut TimelineCache,
- subs: &mut Subscriptions,
- pool: &mut RelayPool,
- relay_id: &str,
- accounts: &Accounts,
-) -> Option<()> {
- info!("Sending initial filters to {}", relay_id);
- let relay = &mut pool.relays.iter_mut().find(|r| r.url() == relay_id)?;
-
- for (_kind, timeline) in timeline_cache {
- send_initial_timeline_filter(since_optimize, subs, relay, timeline, accounts);
- }
-
- Some(())
+ send_initial_timeline_filter(since_optimize, ndb, txn, timeline, accounts, scoped_subs);
+ timeline.subscription.increment(account_pk);
}
pub fn send_initial_timeline_filter(
can_since_optimize: bool,
- subs: &mut Subscriptions,
- relay: &mut PoolRelay,
+ ndb: &Ndb,
+ txn: &Transaction,
timeline: &mut Timeline,
accounts: &Accounts,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
) {
- let filter_state = timeline.filter.get_mut(relay.url());
-
- match filter_state {
+ match &timeline.filter {
FilterState::Broken(err) => {
error!(
"FetchingRemote state in broken state when sending initial timeline filter? {err}"
);
}
- FilterState::FetchingRemote(_unisub) => {
+ FilterState::FetchingRemote => {
error!("FetchingRemote state when sending initial timeline filter?");
}
- FilterState::GotRemote(_sub) => {
+ FilterState::GotRemote => {
error!("GotRemote state when sending initial timeline filter?");
}
@@ -714,87 +751,65 @@ pub fn send_initial_timeline_filter(
filter
}).collect();
- //let sub_id = damus.gen_subid(&SubKind::Initial);
- let sub_id = subscriptions::new_sub_id();
- subs.subs.insert(sub_id.clone(), SubKind::Initial);
-
- if let Err(err) = relay.subscribe(sub_id.clone(), new_filters.clone()) {
- error!("error subscribing: {err}");
- } else {
- timeline.subscription.force_add_remote(sub_id);
- }
+ update_remote_timeline_subscription(timeline, new_filters, scoped_subs);
}
// we need some data first
- FilterState::NeedsRemote => {
- let people_list_ref = match &timeline.kind {
- TimelineKind::List(ListKind::PeopleList(plr))
- | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(plr))) => {
- Some(plr.clone())
- }
- _ => None,
- };
- if let Some(plr) = people_list_ref {
- fetch_people_list(subs, relay, timeline, &plr);
- } else {
- fetch_contact_list(subs, timeline, accounts);
+ FilterState::NeedsRemote => match &timeline.kind {
+ TimelineKind::List(ListKind::PeopleList(_))
+ | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(_))) => {
+ fetch_people_list(ndb, txn, timeline);
}
- }
+ _ => fetch_contact_list(timeline, accounts),
+ },
}
}
-pub fn fetch_contact_list(subs: &mut Subscriptions, timeline: &mut Timeline, accounts: &Accounts) {
- if timeline.filter.get_any_ready().is_some() {
+pub fn fetch_contact_list(timeline: &mut Timeline, accounts: &Accounts) {
+ if matches!(&timeline.filter, FilterState::Ready(_)) {
return;
}
let new_filter_state = match accounts.get_selected_account().data.contacts.get_state() {
- ContactState::Unreceived => {
- FilterState::FetchingRemote(filter::FetchingRemoteType::Contact)
- }
+ ContactState::Unreceived => FilterState::FetchingRemote,
ContactState::Received {
contacts: _,
note_key: _,
timestamp: _,
- } => FilterState::GotRemote(filter::GotRemoteType::Contact),
+ } => FilterState::GotRemote,
};
- timeline.filter.set_all_states(new_filter_state);
+ timeline.filter = new_filter_state;
+}
- let sub = &accounts.get_subs().contacts;
- if subs.subs.contains_key(&sub.remote) {
+pub fn fetch_people_list(ndb: &Ndb, txn: &Transaction, timeline: &mut Timeline) {
+ if matches!(&timeline.filter, FilterState::Ready(_)) {
return;
}
- let sub_kind = SubKind::FetchingContactList(timeline.kind.clone());
- subs.subs.insert(sub.remote.clone(), sub_kind);
-}
-
-pub fn fetch_people_list(
- subs: &mut Subscriptions,
- relay: &mut PoolRelay,
- timeline: &mut Timeline,
- plr: &PeopleListRef,
-) {
- if timeline.filter.get_any_ready().is_some() {
+ let Some(plr) = people_list_ref(&timeline.kind) else {
+ error!("fetch_people_list called for non-people-list timeline");
+ timeline.filter = FilterState::broken(FilterError::EmptyList);
return;
- }
+ };
let filter = people_list_note_filter(plr);
- let sub_id = subscriptions::new_sub_id();
- if let Err(err) = relay.subscribe(sub_id.clone(), vec![filter]) {
- error!("error subscribing for people list: {err}");
+ let results = match ndb.query(txn, std::slice::from_ref(&filter), 1) {
+ Ok(results) => results,
+ Err(err) => {
+ error!("people list query failed in fetch_people_list: {err}");
+ timeline.filter = FilterState::broken(FilterError::EmptyList);
+ return;
+ }
+ };
+
+ if results.is_empty() {
+ timeline.filter = FilterState::FetchingRemote;
return;
}
- timeline.filter.set_relay_state(
- relay.url().to_string(),
- FilterState::FetchingRemote(filter::FetchingRemoteType::PeopleList),
- );
-
- let sub_kind = SubKind::FetchingContactList(timeline.kind.clone());
- subs.subs.insert(sub_id, sub_kind);
+ timeline.filter = FilterState::GotRemote;
}
#[profiling::function]
@@ -804,11 +819,17 @@ fn setup_initial_timeline(
timeline: &mut Timeline,
note_cache: &mut NoteCache,
unknown_ids: &mut UnknownIds,
- filters: &HybridFilter,
+ account_pk: Pubkey,
) -> Result<()> {
+ let FilterState::Ready(filters) = &timeline.filter else {
+ return Err(Error::App(notedeck::Error::empty_contact_list()));
+ };
+
// some timelines are one-shot and a refreshed, like last_per_pubkey algo feed
if timeline.kind.should_subscribe_locally() {
- timeline.subscription.try_add_local(ndb, filters);
+ timeline
+ .subscription
+ .try_add_local(account_pk, ndb, filters);
}
debug!(
@@ -856,10 +877,17 @@ pub fn setup_initial_nostrdb_subs(
note_cache: &mut NoteCache,
timeline_cache: &mut TimelineCache,
unknown_ids: &mut UnknownIds,
+ account_pk: Pubkey,
) -> Result<()> {
for (_kind, timeline) in timeline_cache {
+ if timeline.subscription.dependers(&account_pk) == 0 {
+ continue;
+ }
+
let txn = Transaction::new(ndb).expect("txn");
- if let Err(err) = setup_timeline_nostrdb_sub(ndb, &txn, note_cache, timeline, unknown_ids) {
+ if let Err(err) =
+ setup_timeline_nostrdb_sub(ndb, &txn, note_cache, timeline, unknown_ids, account_pk)
+ {
error!("setup_initial_nostrdb_subs: {err}");
}
}
@@ -873,14 +901,9 @@ fn setup_timeline_nostrdb_sub(
note_cache: &mut NoteCache,
timeline: &mut Timeline,
unknown_ids: &mut UnknownIds,
+ account_pk: Pubkey,
) -> Result<()> {
- let filter_state = timeline
- .filter
- .get_any_ready()
- .ok_or(Error::App(notedeck::Error::empty_contact_list()))?
- .to_owned();
-
- setup_initial_timeline(ndb, txn, timeline, note_cache, unknown_ids, &filter_state)?;
+ setup_initial_timeline(ndb, txn, timeline, note_cache, unknown_ids, account_pk)?;
Ok(())
}
@@ -889,40 +912,33 @@ fn setup_timeline_nostrdb_sub(
/// Our timelines may require additional data before it is functional. For
/// example, when we have to fetch a contact list before we do the actual
/// following list query.
+#[profiling::function]
pub fn is_timeline_ready(
ndb: &Ndb,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
timeline: &mut Timeline,
accounts: &Accounts,
) -> bool {
// TODO: we should debounce the filter states a bit to make sure we have
// seen all of the different contact lists from each relay
- if let Some(_f) = timeline.filter.get_any_ready() {
+ if let FilterState::Ready(filter) = &timeline.filter {
+ let account_pk = *accounts.selected_account_pubkey();
+ let remote_filters = filter.remote().to_vec();
+ if timeline.subscription.dependers(&account_pk) > 0
+ && !timeline.subscription.remote_seeded(&account_pk)
+ {
+ ensure_remote_timeline_subscription(timeline, account_pk, remote_filters, scoped_subs);
+ }
return true;
}
- let Some(res) = timeline.filter.get_any_gotremote() else {
+ if !matches!(&timeline.filter, FilterState::GotRemote) {
return false;
- };
-
- let (relay_id, note_key) = match res {
- filter::GotRemoteResult::Normal { relay_id, sub_id } => {
- // We got at least one eose for our filter request. Let's see
- // if nostrdb is done processing it yet.
- let res = ndb.poll_for_notes(sub_id, 1);
- if res.is_empty() {
- debug!(
- "check_timeline_filter_state: no notes found (yet?) for timeline {:?}",
- timeline
- );
- return false;
- }
-
- info!("notes found for contact timeline after GotRemote!");
+ }
- (relay_id, res[0])
- }
- filter::GotRemoteResult::Contact { relay_id } => {
+ let note_key = match &timeline.kind {
+ TimelineKind::List(ListKind::Contact(_))
+ | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::Contact(_))) => {
let ContactState::Received {
contacts: _,
note_key,
@@ -932,20 +948,10 @@ pub fn is_timeline_ready(
return false;
};
- (relay_id, *note_key)
+ *note_key
}
- filter::GotRemoteResult::PeopleList { relay_id } => {
- // Query ndb directly for the kind 30000 note. It should
- // have been ingested from the relay by now.
- let plr = match &timeline.kind {
- TimelineKind::List(ListKind::PeopleList(plr))
- | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(plr))) => plr,
- _ => {
- error!("GotRemoteResult::PeopleList but timeline kind is not PeopleList");
- return false;
- }
- };
-
+ TimelineKind::List(ListKind::PeopleList(plr))
+ | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(plr))) => {
let list_filter = people_list_note_filter(plr);
let txn = Transaction::new(ndb).expect("txn");
let results = match ndb.query(&txn, std::slice::from_ref(&list_filter), 1) {
@@ -962,8 +968,9 @@ pub fn is_timeline_ready(
}
info!("found people list note after GotRemote!");
- (relay_id, results[0].note_key)
+ results[0].note_key
}
+ _ => return false,
};
let with_hashtags = false;
@@ -980,29 +987,36 @@ pub fn is_timeline_ready(
match filter {
Err(notedeck::Error::Filter(e)) => {
error!("got broken when building filter {e}");
- timeline
- .filter
- .set_relay_state(relay_id, FilterState::broken(e));
+ timeline.filter = FilterState::broken(e);
false
}
Err(err) => {
error!("got broken when building filter {err}");
- timeline
- .filter
- .set_relay_state(relay_id, FilterState::broken(FilterError::EmptyList));
+ let reason = match &timeline.kind {
+ TimelineKind::List(ListKind::PeopleList(_))
+ | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(_))) => {
+ FilterError::EmptyList
+ }
+ _ => FilterError::EmptyContactList,
+ };
+ timeline.filter = FilterState::broken(reason);
false
}
Ok(filter) => {
// We just switched to the ready state; remote subscriptions can start now.
- info!("Found contact list! Setting up remote contact list query");
- timeline
- .filter
- .set_relay_state(relay_id, FilterState::ready_hybrid(filter.clone()));
-
- //let ck = &timeline.kind;
- //let subid = damus.gen_subid(&SubKind::Column(ck.clone()));
- timeline.subscription.try_add_remote(pool, &filter);
+ info!("Found list note! Setting up remote timeline query");
+ timeline.filter = FilterState::ready_hybrid(filter.clone());
+
+ update_remote_timeline_subscription(timeline, filter.remote().to_vec(), scoped_subs);
true
}
}
}
+
+fn people_list_ref(kind: &TimelineKind) -> Option<&PeopleListRef> {
+ match kind {
+ TimelineKind::List(ListKind::PeopleList(plr))
+ | TimelineKind::Algo(AlgoTimeline::LastPerPubkey(ListKind::PeopleList(plr))) => Some(plr),
+ _ => None,
+ }
+}
diff --git a/crates/notedeck_columns/src/timeline/sub/mod.rs b/crates/notedeck_columns/src/timeline/sub/mod.rs
@@ -0,0 +1,27 @@
+use nostrdb::{Filter, Ndb, Subscription};
+
+mod thread_sub;
+mod timeline_sub;
+
+pub use thread_sub::ThreadSubs;
+pub use timeline_sub::TimelineSub;
+
+pub fn ndb_sub(ndb: &Ndb, filter: &[Filter], id: impl std::fmt::Debug) -> Option<Subscription> {
+ match ndb.subscribe(filter) {
+ Ok(s) => Some(s),
+ Err(e) => {
+ tracing::error!("Failed to get subscription for {:?}: {e}", id);
+ None
+ }
+ }
+}
+
+pub fn ndb_unsub(ndb: &mut Ndb, sub: Subscription, id: impl std::fmt::Debug) -> bool {
+ match ndb.unsubscribe(sub) {
+ Ok(_) => true,
+ Err(e) => {
+ tracing::error!("Failed to unsub {:?}: {e}", id);
+ false
+ }
+ }
+}
diff --git a/crates/notedeck_columns/src/timeline/sub/thread_sub.rs b/crates/notedeck_columns/src/timeline/sub/thread_sub.rs
@@ -0,0 +1,337 @@
+use egui_nav::ReturnType;
+use enostr::{Filter, NoteId, Pubkey};
+use hashbrown::HashMap;
+use nostrdb::{Ndb, Subscription};
+use notedeck::{
+ Accounts, RelaySelection, ScopedSubApi, ScopedSubIdentity, SubConfig, SubKey, SubOwnerKey,
+};
+
+use crate::scoped_sub_owner_keys::thread_scope_owner_key;
+use crate::timeline::{
+ sub::{ndb_sub, ndb_unsub},
+ ThreadSelection,
+};
+
+type RootNoteId = NoteId;
+
+// column id
+type MetaId = usize;
+
+/// Outcome of removing local thread subscriptions for a close action.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum UnsubscribeOutcome {
+ /// Local NDB sub(s) were removed, but the scope still has stack entries so the
+ /// remote scoped-sub owner should remain.
+ KeepOwner,
+ /// The thread scope was fully removed and the remote scoped-sub owner should
+ /// be released using the returned root note id.
+ DropOwner(RootNoteId),
+}
+
+/// Thread subscription manager keyed by account and column scope.
+///
+/// Each opened thread scope installs one local NostrDB sub plus one scoped
+/// remote sub owner. Closing scope releases owner and tears down local state.
+#[derive(Default)]
+pub struct ThreadSubs {
+ /// Per-account thread subscription bookkeeping.
+ by_account: HashMap<Pubkey, AccountThreadSubs>,
+}
+
+#[derive(Default)]
+struct AccountThreadSubs {
+ scopes: HashMap<MetaId, Vec<Scope>>,
+}
+
+struct Scope {
+ root_id: NoteId,
+ stack: Vec<Sub>,
+}
+
+struct Sub {
+ _selected_id: NoteId,
+ sub: Subscription,
+ // Keep local filters alive for the full subscription lifetime. Thread
+ // filters use custom callbacks and can crash if dropped early.
+ _filters: Vec<Filter>,
+}
+
+impl ThreadSubs {
+ #[allow(clippy::too_many_arguments)]
+ pub fn subscribe(
+ &mut self,
+ ndb: &mut Ndb,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ meta_id: usize,
+ id: &ThreadSelection,
+ local_sub_filter: Vec<Filter>,
+ new_scope: bool,
+ remote_sub_filter: Vec<Filter>,
+ ) {
+ let account_pk = scoped_subs.selected_account_pubkey();
+ let account_subs = self.by_account.entry(account_pk).or_default();
+ let cur_scopes = account_subs.scopes.entry(meta_id).or_default();
+ let added_local = if new_scope || cur_scopes.is_empty() {
+ local_sub_new_scope(
+ ndb,
+ scoped_subs,
+ account_pk,
+ meta_id,
+ id,
+ local_sub_filter,
+ remote_sub_filter,
+ cur_scopes,
+ )
+ } else {
+ let cur_scope = cur_scopes.last_mut().expect("can't be empty");
+ sub_current_scope(ndb, id, local_sub_filter, cur_scope)
+ };
+
+ if added_local {
+ tracing::debug!(
+ "Sub stats: account={:?}, num locals: {}",
+ account_pk,
+ account_subs.scopes.len(),
+ );
+ }
+ }
+
+ pub fn unsubscribe(
+ &mut self,
+ ndb: &mut Ndb,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ meta_id: usize,
+ id: &ThreadSelection,
+ return_type: ReturnType,
+ ) {
+ let account_pk = scoped_subs.selected_account_pubkey();
+ let (owner_to_drop, remove_account_entry) = {
+ let Some(account_subs) = self.by_account.get_mut(&account_pk) else {
+ return;
+ };
+
+ let Some(scopes) = account_subs.scopes.get_mut(&meta_id) else {
+ return;
+ };
+
+ let scope_depth = scopes.len().saturating_sub(1);
+ let Some(unsub_outcome) = (match return_type {
+ ReturnType::Drag => unsubscribe_drag(scopes, ndb, id),
+ ReturnType::Click => unsubscribe_click(scopes, ndb, id),
+ }) else {
+ return;
+ };
+
+ if scopes.is_empty() {
+ account_subs.scopes.remove(&meta_id);
+ }
+
+ tracing::debug!(
+ "unsub stats: account={:?}, num locals: {}, released owner: {}",
+ account_pk,
+ account_subs.scopes.len(),
+ matches!(unsub_outcome, UnsubscribeOutcome::DropOwner(_)),
+ );
+
+ (
+ match unsub_outcome {
+ UnsubscribeOutcome::KeepOwner => None,
+ UnsubscribeOutcome::DropOwner(root_id) => Some(thread_scope_owner_key(
+ account_pk,
+ meta_id,
+ &root_id,
+ scope_depth,
+ )),
+ },
+ account_subs.scopes.is_empty(),
+ )
+ };
+
+ if remove_account_entry {
+ self.by_account.remove(&account_pk);
+ }
+
+ if let Some(owner) = owner_to_drop {
+ let _ = scoped_subs.drop_owner(owner);
+ }
+ }
+
+ pub fn get_local(&self, account_pk: &Pubkey, meta_id: usize) -> Option<&Subscription> {
+ self.by_account
+ .get(account_pk)?
+ .scopes
+ .get(&meta_id)
+ .and_then(|s| s.last())
+ .and_then(|s| s.stack.last())
+ .map(|s| &s.sub)
+ }
+
+ pub fn get_local_for_selected<'a>(
+ &'a self,
+ accounts: &Accounts,
+ meta_id: usize,
+ ) -> Option<&'a Subscription> {
+ self.get_local(accounts.selected_account_pubkey(), meta_id)
+ }
+}
+
+fn unsubscribe_drag(
+ scopes: &mut Vec<Scope>,
+ ndb: &mut Ndb,
+ id: &ThreadSelection,
+) -> Option<UnsubscribeOutcome> {
+ let Some(scope) = scopes.last_mut() else {
+ tracing::error!("called drag unsubscribe but there aren't any scopes left");
+ return None;
+ };
+
+ let Some(cur_sub) = scope.stack.pop() else {
+ tracing::error!("expected a scope to be left");
+ return None;
+ };
+
+ log_scope_root_mismatch(scope, id);
+
+ if !ndb_unsub(ndb, cur_sub.sub, id) {
+ // Keep local bookkeeping aligned with NDB when unsubscribe fails.
+ scope.stack.push(cur_sub);
+ return None;
+ }
+
+ if scope.stack.is_empty() {
+ let removed_scope = scopes.pop().expect("checked empty above");
+ return Some(UnsubscribeOutcome::DropOwner(removed_scope.root_id));
+ }
+
+ Some(UnsubscribeOutcome::KeepOwner)
+}
+
+fn unsubscribe_click(
+ scopes: &mut Vec<Scope>,
+ ndb: &mut Ndb,
+ id: &ThreadSelection,
+) -> Option<UnsubscribeOutcome> {
+ let Some(mut scope) = scopes.pop() else {
+ tracing::error!("called unsubscribe but there aren't any scopes left");
+ return None;
+ };
+
+ log_scope_root_mismatch(&scope, id);
+ while let Some(sub) = scope.stack.pop() {
+ if ndb_unsub(ndb, sub.sub, id) {
+ continue;
+ }
+
+ // Partial rollback: restore the failed local sub (and any remaining ones)
+ // to thread bookkeeping and keep the remote owner alive.
+ scope.stack.push(sub);
+ scopes.push(scope);
+ return None;
+ }
+ Some(UnsubscribeOutcome::DropOwner(scope.root_id))
+}
+
+fn log_scope_root_mismatch(scope: &Scope, id: &ThreadSelection) {
+ if scope.root_id.bytes() != id.root_id.bytes() {
+ tracing::error!(
+ "Somehow the current scope's root is not equal to the selected note's root. scope's root: {:?}, thread's root: {:?}",
+ scope.root_id.hex(),
+ id.root_id.bytes()
+ );
+ }
+}
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+enum ThreadScopedSub {
+ RepliesByRoot,
+}
+
+fn thread_remote_sub_key(root_id: &RootNoteId) -> SubKey {
+ SubKey::builder(ThreadScopedSub::RepliesByRoot)
+ .with(*root_id.bytes())
+ .finish()
+}
+
+fn sub_current_scope(
+ ndb: &mut Ndb,
+ selection: &ThreadSelection,
+ local_sub_filter: Vec<Filter>,
+ cur_scope: &mut Scope,
+) -> bool {
+ if selection.root_id.bytes() != cur_scope.root_id.bytes() {
+ tracing::error!(
+ "Somehow the current scope's root is not equal to the selected note's root"
+ );
+ }
+
+ if let Some(sub) = ndb_sub(ndb, &local_sub_filter, selection) {
+ cur_scope.stack.push(Sub {
+ _selected_id: NoteId::new(*selection.selected_or_root()),
+ sub,
+ _filters: local_sub_filter,
+ });
+ return true;
+ }
+
+ false
+}
+
+fn sub_remote(
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ owner: SubOwnerKey,
+ key: SubKey,
+ filter: Vec<Filter>,
+ id: impl std::fmt::Debug,
+) {
+ tracing::debug!("Remote subscribe for {:?}", id);
+
+ let identity = ScopedSubIdentity::account(owner, key);
+ let config = SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: filter,
+ use_transparent: false,
+ };
+ let _ = scoped_subs.ensure_sub(identity, config);
+}
+
+#[allow(clippy::too_many_arguments)]
+fn local_sub_new_scope(
+ ndb: &mut Ndb,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ account_pk: Pubkey,
+ meta_id: usize,
+ id: &ThreadSelection,
+ local_sub_filter: Vec<Filter>,
+ remote_sub_filter: Vec<Filter>,
+ scopes: &mut Vec<Scope>,
+) -> bool {
+ let root_id = id.root_id.to_note_id();
+ let scope_depth = scopes.len();
+ let owner = thread_scope_owner_key(account_pk, meta_id, &root_id, scope_depth);
+ tracing::info!(
+ "thread sub with owner: pk: {account_pk:?}, col: {meta_id}, rootid: {root_id:?}, depth: {scope_depth}"
+ );
+ sub_remote(
+ scoped_subs,
+ owner,
+ thread_remote_sub_key(&root_id),
+ remote_sub_filter,
+ id,
+ );
+
+ let Some(sub) = ndb_sub(ndb, &local_sub_filter, id) else {
+ let _ = scoped_subs.drop_owner(owner);
+ return false;
+ };
+
+ scopes.push(Scope {
+ root_id,
+ stack: vec![Sub {
+ _selected_id: NoteId::new(*id.selected_or_root()),
+ sub,
+ _filters: local_sub_filter,
+ }],
+ });
+
+ true
+}
diff --git a/crates/notedeck_columns/src/timeline/sub/timeline_sub.rs b/crates/notedeck_columns/src/timeline/sub/timeline_sub.rs
@@ -0,0 +1,132 @@
+use enostr::Pubkey;
+use hashbrown::HashMap;
+use nostrdb::{Ndb, Subscription};
+use notedeck::filter::HybridFilter;
+
+use crate::timeline::sub::ndb_sub;
+
+/// Per-account local timeline subscription state with ref-counting.
+///
+/// Remote timeline relay subscriptions are managed by scoped subs; this type
+/// only tracks local NostrDB subscriptions and active dependers.
+#[derive(Debug, Default)]
+pub struct TimelineSub {
+ by_account: HashMap<Pubkey, AccountSubState>,
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+struct AccountSubState {
+ local: Option<Subscription>,
+ dependers: usize,
+ remote_seeded: bool,
+}
+
+fn should_remove_account_state(state: &AccountSubState) -> bool {
+ state.dependers == 0 && state.local.is_none()
+}
+
+fn unsubscribe_local_with_rollback(ndb: &mut Ndb, local: &mut Option<Subscription>, context: &str) {
+ let Some(local_sub) = local.take() else {
+ return;
+ };
+
+ if let Err(e) = ndb.unsubscribe(local_sub) {
+ tracing::error!("{context}: ndb unsubscribe failed: {e}");
+ *local = Some(local_sub);
+ }
+}
+
+impl TimelineSub {
+ fn state_for_account(&self, account_pk: &Pubkey) -> AccountSubState {
+ self.by_account.get(account_pk).copied().unwrap_or_default()
+ }
+
+ fn state_for_account_mut(&mut self, account_pk: Pubkey) -> &mut AccountSubState {
+ self.by_account.entry(account_pk).or_default()
+ }
+
+ /// Reset one account's local subscription state while preserving its depender count.
+ pub fn reset_for_account(&mut self, account_pk: Pubkey, ndb: &mut Ndb) {
+ let mut remove_account_state = false;
+
+ if let Some(state) = self.by_account.get_mut(&account_pk) {
+ unsubscribe_local_with_rollback(
+ ndb,
+ &mut state.local,
+ "TimelineSub::reset_for_account",
+ );
+ remove_account_state = should_remove_account_state(state);
+ }
+
+ if remove_account_state {
+ self.by_account.remove(&account_pk);
+ }
+ }
+
+ pub fn try_add_local(&mut self, account_pk: Pubkey, ndb: &Ndb, filter: &HybridFilter) {
+ let state = self.state_for_account_mut(account_pk);
+ if state.local.is_some() {
+ return;
+ }
+
+ if let Some(sub) = ndb_sub(ndb, &filter.local().combined(), "") {
+ state.local = Some(sub);
+ }
+ }
+
+ pub fn increment(&mut self, account_pk: Pubkey) {
+ self.state_for_account_mut(account_pk).dependers += 1;
+ }
+
+ pub fn remote_seeded(&self, account_pk: &Pubkey) -> bool {
+ self.state_for_account(account_pk).remote_seeded
+ }
+
+ pub fn mark_remote_seeded(&mut self, account_pk: Pubkey) {
+ self.state_for_account_mut(account_pk).remote_seeded = true;
+ }
+
+ pub fn clear_remote_seeded(&mut self, account_pk: Pubkey) {
+ self.state_for_account_mut(account_pk).remote_seeded = false;
+ }
+
+ pub fn get_local(&self, account_pk: &Pubkey) -> Option<Subscription> {
+ self.state_for_account(account_pk).local
+ }
+
+ pub fn unsubscribe_or_decrement(&mut self, account_pk: Pubkey, ndb: &mut Ndb) {
+ let mut remove_account_state = false;
+ if let Some(state) = self.by_account.get_mut(&account_pk) {
+ if state.dependers > 1 {
+ state.dependers = state.dependers.saturating_sub(1);
+ return;
+ }
+
+ state.dependers = state.dependers.saturating_sub(1);
+ state.remote_seeded = false;
+ unsubscribe_local_with_rollback(
+ ndb,
+ &mut state.local,
+ "TimelineSub::unsubscribe_or_decrement",
+ );
+ remove_account_state = should_remove_account_state(state);
+ }
+
+ if remove_account_state {
+ self.by_account.remove(&account_pk);
+ }
+ }
+
+ pub fn no_sub(&self, account_pk: &Pubkey) -> bool {
+ let state = self.state_for_account(account_pk);
+ state.dependers == 0
+ }
+
+ pub fn has_any_subs(&self) -> bool {
+ !self.by_account.is_empty()
+ }
+
+ pub fn dependers(&self, account_pk: &Pubkey) -> usize {
+ self.state_for_account(account_pk).dependers
+ }
+}
diff --git a/crates/notedeck_columns/src/timeline/thread.rs b/crates/notedeck_columns/src/timeline/thread.rs
@@ -1,15 +1,15 @@
use egui_nav::ReturnType;
use egui_virtual_list::VirtualList;
-use enostr::{NoteId, RelayPool};
+use enostr::NoteId;
use hashbrown::{hash_map::RawEntryMut, HashMap};
use nostrdb::{Filter, Ndb, Note, NoteKey, NoteReplyBuf, Transaction};
-use notedeck::{NoteCache, NoteRef, UnknownIds};
+use notedeck::{Accounts, NoteCache, NoteRef, ScopedSubApi, UnknownIds};
use crate::{
actionbar::{process_thread_notes, NewThreadNotes},
- multi_subscriber::ThreadSubs,
timeline::{
note_units::{NoteUnits, UnitKey},
+ sub::ThreadSubs,
unit::NoteUnit,
InsertionResponse,
},
@@ -61,11 +61,12 @@ impl Threads {
/// Opening a thread.
/// Similar to [[super::cache::TimelineCache::open]]
#[allow(clippy::too_many_arguments)]
+ #[profiling::function]
pub fn open(
&mut self,
ndb: &mut Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
thread: &ThreadSelection,
new_scope: bool,
col: usize,
@@ -112,10 +113,15 @@ impl Threads {
.collect::<Vec<_>>()
});
- self.subs
- .subscribe(ndb, pool, col, thread, local_sub_filter, new_scope, || {
- replies_filter_remote(thread)
- });
+ self.subs.subscribe(
+ ndb,
+ scoped_subs,
+ col,
+ thread,
+ local_sub_filter,
+ new_scope,
+ replies_filter_remote(thread),
+ );
new_notes.map(|notes| NewThreadNotes {
selected_note_id: NoteId::new(*selected_note_id),
@@ -126,16 +132,19 @@ impl Threads {
pub fn close(
&mut self,
ndb: &mut Ndb,
- pool: &mut RelayPool,
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
thread: &ThreadSelection,
return_type: ReturnType,
id: usize,
) {
tracing::info!("Closing thread: {:?}", thread);
- self.subs.unsubscribe(ndb, pool, id, thread, return_type);
+ self.subs
+ .unsubscribe(ndb, scoped_subs, id, thread, return_type);
}
/// Responsible for making sure the chain and the direct replies are up to date
+ #[allow(clippy::too_many_arguments)]
+ #[profiling::function]
pub fn update(
&mut self,
selected: &Note<'_>,
@@ -143,6 +152,7 @@ impl Threads {
ndb: &Ndb,
txn: &Transaction,
unknown_ids: &mut UnknownIds,
+ accounts: &Accounts,
col: usize,
) {
let Some(selected_key) = selected.key() else {
@@ -160,12 +170,12 @@ impl Threads {
.get_mut(&selected.id())
.expect("should be guarenteed to exist from `Self::fill_reply_chain_recursive`");
- let Some(sub) = self.subs.get_local(col) else {
+ let Some(sub) = self.subs.get_local_for_selected(accounts, col) else {
tracing::error!("Was expecting to find local sub");
return;
};
- let keys = ndb.poll_for_notes(sub.sub, 10);
+ let keys = ndb.poll_for_notes(*sub, 10);
if keys.is_empty() {
return;
diff --git a/crates/notedeck_columns/src/timeline/timeline_units.rs b/crates/notedeck_columns/src/timeline/timeline_units.rs
@@ -39,6 +39,7 @@ impl TimelineUnits {
}
/// returns number of new entries merged
+ #[profiling::function]
pub fn merge_new_notes<'a>(
&mut self,
payloads: Vec<&'a NotePayload>,
diff --git a/crates/notedeck_columns/src/toolbar.rs b/crates/notedeck_columns/src/toolbar.rs
@@ -100,9 +100,10 @@ fn pop_to_root(app: &mut Damus, ctx: &mut AppContext, col_index: usize) {
&popped,
&mut app.timeline_cache,
&mut app.threads,
+ &mut app.onboarding,
&mut app.view_state,
ctx.ndb,
- ctx.pool,
+ &mut ctx.remote.scoped_subs(ctx.accounts),
ReturnType::Click,
col_index,
);
diff --git a/crates/notedeck_columns/src/ui/add_column.rs b/crates/notedeck_columns/src/ui/add_column.rs
@@ -199,7 +199,6 @@ pub struct AddColumnView<'a> {
contacts: &'a ContactState,
i18n: &'a mut Localization,
jobs: &'a MediaJobSender,
- pool: &'a mut enostr::RelayPool,
unknown_ids: &'a mut notedeck::UnknownIds,
people_lists: &'a mut Option<notedeck::Nip51SetCache>,
}
@@ -215,7 +214,6 @@ impl<'a> AddColumnView<'a> {
contacts: &'a ContactState,
i18n: &'a mut Localization,
jobs: &'a MediaJobSender,
- pool: &'a mut enostr::RelayPool,
unknown_ids: &'a mut notedeck::UnknownIds,
people_lists: &'a mut Option<notedeck::Nip51SetCache>,
) -> Self {
@@ -228,7 +226,6 @@ impl<'a> AddColumnView<'a> {
contacts,
i18n,
jobs,
- pool,
unknown_ids,
people_lists,
}
@@ -308,13 +305,8 @@ impl<'a> AddColumnView<'a> {
.kinds([30000])
.limit(50)
.build();
- *self.people_lists = notedeck::Nip51SetCache::new(
- self.pool,
- self.ndb,
- &txn,
- self.unknown_ids,
- vec![filter],
- );
+ *self.people_lists =
+ notedeck::Nip51SetCache::new_local(self.ndb, &txn, self.unknown_ids, vec![filter]);
}
// Poll for newly arrived notes each frame
@@ -886,6 +878,65 @@ struct ColumnOptionData {
option: AddColumnOption,
}
+/// Attach a new timeline column by building and initializing its timeline state.
+fn attach_timeline_column(
+ app: &mut Damus,
+ ctx: &mut AppContext<'_>,
+ col: usize,
+ timeline_kind: TimelineKind,
+) -> bool {
+ let account_pk = *ctx.accounts.selected_account_pubkey();
+ let already_open_for_account = app
+ .timeline_cache
+ .get(&timeline_kind)
+ .is_some_and(|timeline| timeline.subscription.dependers(&account_pk) > 0);
+
+ if already_open_for_account {
+ if let Some(timeline) = app.timeline_cache.get_mut(&timeline_kind) {
+ timeline.subscription.increment(account_pk);
+ }
+
+ app.columns_mut(ctx.i18n, ctx.accounts)
+ .column_mut(col)
+ .router_mut()
+ .route_to_replaced(Route::timeline(timeline_kind));
+ return true;
+ }
+
+ let txn = Transaction::new(ctx.ndb).expect("txn");
+ let mut timeline = if let Some(timeline) = timeline_kind.clone().into_timeline(&txn, ctx.ndb) {
+ timeline
+ } else {
+ error!("Could not convert column response to timeline");
+ return false;
+ };
+
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ crate::timeline::setup_new_timeline(
+ &mut timeline,
+ ctx.ndb,
+ &txn,
+ &mut scoped_subs,
+ ctx.note_cache,
+ app.options.contains(AppOptions::SinceOptimize),
+ ctx.accounts,
+ ctx.unknown_ids,
+ );
+
+ let route_kind = timeline.kind.clone();
+ app.columns_mut(ctx.i18n, ctx.accounts)
+ .column_mut(col)
+ .router_mut()
+ .route_to_replaced(Route::timeline(route_kind.clone()));
+ app.timeline_cache.insert(
+ route_kind,
+ *ctx.accounts.selected_account_pubkey(),
+ timeline,
+ );
+
+ true
+}
+
pub fn render_add_column_routes(
ui: &mut egui::Ui,
app: &mut Damus,
@@ -910,7 +961,6 @@ pub fn render_add_column_routes(
contacts,
ctx.i18n,
ctx.media_jobs.sender(),
- ctx.pool,
ctx.unknown_ids,
&mut app.view_state.people_lists,
);
@@ -936,34 +986,8 @@ pub fn render_add_column_routes(
if let Some(resp) = resp {
match resp {
- AddColumnResponse::Timeline(timeline_kind) => 'leave: {
- let txn = Transaction::new(ctx.ndb).unwrap();
- let mut timeline =
- if let Some(timeline) = timeline_kind.into_timeline(&txn, ctx.ndb) {
- timeline
- } else {
- error!("Could not convert column response to timeline");
- break 'leave;
- };
-
- crate::timeline::setup_new_timeline(
- &mut timeline,
- ctx.ndb,
- &txn,
- &mut app.subscriptions,
- ctx.pool,
- ctx.note_cache,
- app.options.contains(AppOptions::SinceOptimize),
- ctx.accounts,
- ctx.unknown_ids,
- );
-
- app.columns_mut(ctx.i18n, ctx.accounts)
- .column_mut(col)
- .router_mut()
- .route_to_replaced(Route::timeline(timeline.kind.clone()));
-
- app.timeline_cache.insert(timeline.kind.clone(), timeline);
+ AddColumnResponse::Timeline(timeline_kind) => {
+ let _ = attach_timeline_column(app, ctx, col, timeline_kind);
}
AddColumnResponse::Algo(algo_option) => match algo_option {
@@ -982,30 +1006,12 @@ pub fn render_add_column_routes(
// source to be, so let's create a timeline from that and
// add it to our list of timelines
AlgoOption::LastPerPubkey(Decision::Decided(list_kind)) => {
- let txn = Transaction::new(ctx.ndb).unwrap();
- let maybe_timeline = TimelineKind::last_per_pubkey(list_kind.clone())
- .into_timeline(&txn, ctx.ndb);
-
- if let Some(mut timeline) = maybe_timeline {
- crate::timeline::setup_new_timeline(
- &mut timeline,
- ctx.ndb,
- &txn,
- &mut app.subscriptions,
- ctx.pool,
- ctx.note_cache,
- app.options.contains(AppOptions::SinceOptimize),
- ctx.accounts,
- ctx.unknown_ids,
- );
-
- app.columns_mut(ctx.i18n, ctx.accounts)
- .column_mut(col)
- .router_mut()
- .route_to_replaced(Route::timeline(timeline.kind.clone()));
-
- app.timeline_cache.insert(timeline.kind.clone(), timeline);
- } else {
+ if !attach_timeline_column(
+ app,
+ ctx,
+ col,
+ TimelineKind::last_per_pubkey(list_kind.clone()),
+ ) {
// we couldn't fetch the timeline yet... let's let
// the user know ?
@@ -1103,7 +1109,13 @@ fn handle_create_people_list(app: &mut Damus, ctx: &mut AppContext<'_>, col: usi
return;
};
- notedeck::send_people_list_event(ctx.ndb, ctx.pool, kp, &name, &members);
+ notedeck::send_people_list_event(
+ ctx.ndb,
+ &mut ctx.remote.publisher(ctx.accounts),
+ kp,
+ &name,
+ &members,
+ );
// Reset the people_lists cache so it picks up the new list
app.view_state.people_lists = None;
@@ -1123,12 +1135,12 @@ fn handle_create_people_list(app: &mut Damus, ctx: &mut AppContext<'_>, col: usi
return;
};
+ let mut scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
crate::timeline::setup_new_timeline(
&mut timeline,
ctx.ndb,
&txn,
- &mut app.subscriptions,
- ctx.pool,
+ &mut scoped_subs,
ctx.note_cache,
app.options.contains(AppOptions::SinceOptimize),
ctx.accounts,
@@ -1140,7 +1152,11 @@ fn handle_create_people_list(app: &mut Damus, ctx: &mut AppContext<'_>, col: usi
.router_mut()
.route_to_replaced(Route::timeline(timeline.kind.clone()));
- app.timeline_cache.insert(timeline.kind.clone(), timeline);
+ app.timeline_cache.insert(
+ timeline.kind.clone(),
+ *ctx.accounts.selected_account_pubkey(),
+ timeline,
+ );
}
pub fn hashtag_ui(
diff --git a/crates/notedeck_columns/src/ui/note/post.rs b/crates/notedeck_columns/src/ui/note/post.rs
@@ -10,7 +10,7 @@ use egui::{
widgets::text_edit::TextEdit,
Frame, Layout, Margin, Pos2, ScrollArea, Sense, TextBuffer,
};
-use enostr::{FilledKeypair, FullKeypair, NoteId, Pubkey, RelayPool};
+use enostr::{FilledKeypair, FullKeypair, NoteId, Pubkey};
use nostrdb::{Ndb, Transaction};
use notedeck::media::latest::LatestImageTex;
use notedeck::media::AnimationMode;
@@ -18,7 +18,8 @@ use notedeck::media::AnimationMode;
use notedeck::platform::android::try_open_file_picker;
use notedeck::platform::get_next_selected_file;
use notedeck::{
- name::get_display_name, supported_mime_hosted_at_url, tr, Localization, NoteAction, NoteContext,
+ name::get_display_name, supported_mime_hosted_at_url, tr, Localization, NoteAction,
+ NoteContext, PublishApi, RelayType,
};
use notedeck::{DragResponse, PixelDimensions};
use notedeck_ui::{
@@ -70,7 +71,7 @@ impl NewPostAction {
&self,
ndb: &Ndb,
txn: &Transaction,
- pool: &mut RelayPool,
+ publisher: &mut PublishApi<'_, '_>,
drafts: &mut Drafts,
) -> Result<()> {
let seckey = self.post.account.secret_key.to_secret_bytes();
@@ -96,7 +97,7 @@ impl NewPostAction {
let _ = ndb.process_event_with(&json, nostrdb::IngestMetadata::new().client(true));
}
- pool.send(&event);
+ publisher.publish_note(¬e, RelayType::AccountsWrite);
drafts.get_from_post_type(&self.post_type).clear();
Ok(())
@@ -899,7 +900,6 @@ mod preview {
img_cache: app.img_cache,
note_cache: app.note_cache,
zaps: app.zaps,
- pool: app.pool,
jobs: app.media_jobs.sender(),
unknown_ids: app.unknown_ids,
nip05_cache: app.nip05_cache,
diff --git a/crates/notedeck_columns/src/ui/profile/mod.rs b/crates/notedeck_columns/src/ui/profile/mod.rs
@@ -105,6 +105,7 @@ impl<'a, 'd> ProfileView<'a, 'd> {
let reversed = false;
// poll for new notes and insert them into our existing notes
if let Err(e) = profile_timeline.poll_notes_into_view(
+ self.note_context.accounts.selected_account_pubkey(),
self.note_context.ndb,
&txn,
self.note_context.unknown_ids,
diff --git a/crates/notedeck_columns/src/ui/relay.rs b/crates/notedeck_columns/src/ui/relay.rs
@@ -1,22 +1,29 @@
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
-use crate::ui::{Preview, PreviewConfig};
use egui::{Align, Button, CornerRadius, Frame, Id, Layout, Margin, Rgba, RichText, Ui, Vec2};
-use enostr::{RelayPool, RelayStatus};
-use notedeck::{tr, DragResponse, Localization, NotedeckTextStyle, RelayAction};
+use enostr::{NormRelayUrl, RelayStatus};
+use notedeck::{
+ tr, DragResponse, Localization, NotedeckTextStyle, RelayAction, RelayInspectApi, RelaySpec,
+};
use notedeck_ui::app_images;
use notedeck_ui::{colors::PINK, padding};
use tracing::debug;
use super::widgets::styled_button;
-pub struct RelayView<'a> {
- pool: &'a RelayPool,
+pub struct RelayView<'r, 'a> {
+ relay_inspect: RelayInspectApi<'r, 'a>,
+ advertised_relays: &'a std::collections::BTreeSet<RelaySpec>,
id_string_map: &'a mut HashMap<Id, String>,
i18n: &'a mut Localization,
}
-impl RelayView<'_> {
+struct RelayRow {
+ relay_url: String,
+ status: RelayStatus,
+}
+
+impl RelayView<'_, '_> {
pub fn ui(&mut self, ui: &mut egui::Ui) -> DragResponse<RelayAction> {
let scroll_out = Frame::new()
.inner_margin(Margin::symmetric(10, 0))
@@ -60,14 +67,16 @@ impl RelayView<'_> {
}
}
-impl<'a> RelayView<'a> {
+impl<'r, 'a> RelayView<'r, 'a> {
pub fn new(
- pool: &'a RelayPool,
+ relay_inspect: RelayInspectApi<'r, 'a>,
+ advertised_relays: &'a std::collections::BTreeSet<RelaySpec>,
id_string_map: &'a mut HashMap<Id, String>,
i18n: &'a mut Localization,
) -> Self {
RelayView {
- pool,
+ relay_inspect,
+ advertised_relays,
id_string_map,
i18n,
}
@@ -77,55 +86,165 @@ impl<'a> RelayView<'a> {
egui::CentralPanel::default().show(ui.ctx(), |ui| self.ui(ui));
}
- /// Show the current relays and return a relay the user selected to delete
+ /// Show the selected account's advertised relays and
+ /// any other currently-connected outbox relays.
fn show_relays(&mut self, ui: &mut Ui) -> Option<String> {
+ let relay_infos = self.relay_inspect.relay_infos();
+ let status_by_url: HashMap<String, RelayStatus> = relay_infos
+ .iter()
+ .map(|relay_info| (relay_info.relay_url.to_string(), relay_info.status))
+ .collect();
+
+ let advertised_urls: HashSet<String> = self
+ .advertised_relays
+ .iter()
+ .map(|relay| relay.url.to_string())
+ .collect();
+
+ let mut advertised = Vec::new();
+
+ for relay in self.advertised_relays {
+ let url = relay.url.to_string();
+ let status = status_by_url
+ .get(&url)
+ .copied()
+ .unwrap_or(RelayStatus::Disconnected);
+
+ advertised.push(RelayRow {
+ relay_url: url,
+ status,
+ });
+ }
+
+ let mut outbox_other = Vec::new();
+ for relay_info in relay_infos {
+ let url = relay_info.relay_url.to_string();
+ if advertised_urls.contains(&url) {
+ continue;
+ }
+ outbox_other.push(RelayRow {
+ relay_url: url,
+ status: relay_info.status,
+ });
+ }
+
let mut relay_to_remove = None;
- for (index, relay_info) in get_relay_infos(self.pool).iter().enumerate() {
- ui.add_space(8.0);
- ui.vertical_centered_justified(|ui| {
- relay_frame(ui).show(ui, |ui| {
- ui.horizontal(|ui| {
- ui.with_layout(Layout::left_to_right(Align::Center), |ui| {
- Frame::new()
- // This frame is needed to add margin because the label will be added to the outer frame first and centered vertically before the connection status is added so the vertical centering isn't accurate.
- // TODO: remove this hack and actually center the url & status at the same time
- .inner_margin(Margin::symmetric(0, 4))
- .show(ui, |ui| {
- egui::ScrollArea::horizontal()
- .id_salt(index)
- .max_width(
- ui.max_rect().width()
- - get_right_side_width(relay_info.status),
- ) // TODO: refactor to dynamically check the size of the 'right to left' portion and set the max width to be the screen width minus padding minus 'right to left' width
- .show(ui, |ui| {
- ui.label(
- RichText::new(relay_info.relay_url)
- .text_style(
- NotedeckTextStyle::Monospace.text_style(),
- )
- .color(
- ui.style()
- .visuals
- .noninteractive()
- .fg_stroke
- .color,
- ),
- );
- });
- });
- });
-
- ui.with_layout(Layout::right_to_left(Align::Center), |ui| {
- if ui.add(delete_button(ui.visuals().dark_mode)).clicked() {
- relay_to_remove = Some(relay_info.relay_url.to_string());
- };
-
- show_connection_status(ui, self.i18n, relay_info.status);
- });
+ let advertised_label = tr!(
+ self.i18n,
+ "Advertised",
+ "Section header for advertised relays"
+ );
+ let outbox_other_label = tr!(
+ self.i18n,
+ "Other",
+ "Section header for non-advertised connected relays"
+ );
+
+ relay_to_remove = relay_to_remove.or_else(|| {
+ self.show_relay_section(ui, &advertised_label, &advertised, true, "relay-advertised")
+ });
+ relay_to_remove = relay_to_remove.or_else(|| {
+ self.show_relay_section(
+ ui,
+ &outbox_other_label,
+ &outbox_other,
+ false,
+ "relay-outbox-other",
+ )
+ });
+
+ relay_to_remove
+ }
+
+ fn show_relay_section(
+ &mut self,
+ ui: &mut Ui,
+ title: &str,
+ rows: &[RelayRow],
+ allow_delete: bool,
+ id_prefix: &'static str,
+ ) -> Option<String> {
+ let mut relay_to_remove = None;
+
+ ui.add_space(8.0);
+ ui.label(
+ RichText::new(title)
+ .text_style(NotedeckTextStyle::Body.text_style())
+ .strong(),
+ );
+ ui.add_space(4.0);
+
+ if rows.is_empty() {
+ ui.label(
+ RichText::new(tr!(self.i18n, "None", "Empty relay section placeholder"))
+ .text_style(NotedeckTextStyle::Body.text_style())
+ .weak(),
+ );
+ return None;
+ }
+
+ for (index, relay_row) in rows.iter().enumerate() {
+ relay_to_remove = relay_to_remove
+ .or_else(|| self.show_relay_row(ui, relay_row, allow_delete, (id_prefix, index)));
+ }
+
+ relay_to_remove
+ }
+
+ fn show_relay_row(
+ &mut self,
+ ui: &mut Ui,
+ relay_row: &RelayRow,
+ allow_delete: bool,
+ id_salt: impl std::hash::Hash,
+ ) -> Option<String> {
+ let mut relay_to_remove = None;
+
+ ui.add_space(8.0);
+ ui.vertical_centered_justified(|ui| {
+ relay_frame(ui).show(ui, |ui| {
+ ui.horizontal(|ui| {
+ ui.with_layout(Layout::left_to_right(Align::Center), |ui| {
+ Frame::new()
+ // This frame is needed to add margin because the label will be added to the outer frame first and centered vertically before the connection status is added so the vertical centering isn't accurate.
+ // TODO: remove this hack and actually center the url & status at the same time
+ .inner_margin(Margin::symmetric(0, 4))
+ .show(ui, |ui| {
+ egui::ScrollArea::horizontal()
+ .id_salt(id_salt)
+ .max_width(
+ ui.max_rect().width()
+ - get_right_side_width(relay_row.status),
+ ) // TODO: refactor to dynamically check the size of the 'right to left' portion and set the max width to be the screen width minus padding minus 'right to left' width
+ .show(ui, |ui| {
+ ui.label(
+ RichText::new(&relay_row.relay_url)
+ .text_style(
+ NotedeckTextStyle::Monospace.text_style(),
+ )
+ .color(
+ ui.style()
+ .visuals
+ .noninteractive()
+ .fg_stroke
+ .color,
+ ),
+ );
+ });
+ });
+ });
+
+ ui.with_layout(Layout::right_to_left(Align::Center), |ui| {
+ if allow_delete && ui.add(delete_button(ui.visuals().dark_mode)).clicked() {
+ relay_to_remove = Some(relay_row.relay_url.clone());
+ }
+
+ show_connection_status(ui, self.i18n, relay_row.status);
});
});
});
- }
+ });
+
relay_to_remove
}
@@ -160,7 +279,7 @@ impl<'a> RelayView<'a> {
.id_string_map
.entry(id)
.or_insert_with(|| Self::RELAY_PREFILL.to_string());
- let is_enabled = self.pool.is_valid_url(text_buffer);
+ let is_enabled = NormRelayUrl::new(text_buffer).is_ok();
let text_edit = egui::TextEdit::singleline(text_buffer)
.hint_text(
RichText::new(tr!(
@@ -270,55 +389,3 @@ fn get_connection_icon(status: RelayStatus) -> egui::Image<'static> {
RelayStatus::Disconnected => app_images::disconnected_image(),
}
}
-
-struct RelayInfo<'a> {
- pub relay_url: &'a str,
- pub status: RelayStatus,
-}
-
-fn get_relay_infos(pool: &RelayPool) -> Vec<RelayInfo<'_>> {
- pool.relays
- .iter()
- .map(|relay| RelayInfo {
- relay_url: relay.url(),
- status: relay.status(),
- })
- .collect()
-}
-
-// PREVIEWS
-
-mod preview {
- use super::*;
- use crate::test_data::sample_pool;
- use notedeck::{App, AppContext, AppResponse};
-
- pub struct RelayViewPreview {
- pool: RelayPool,
- }
-
- impl RelayViewPreview {
- fn new() -> Self {
- RelayViewPreview {
- pool: sample_pool(),
- }
- }
- }
-
- impl App for RelayViewPreview {
- fn update(&mut self, app: &mut AppContext<'_>, ui: &mut egui::Ui) -> AppResponse {
- self.pool.try_recv();
- let mut id_string_map = HashMap::new();
- RelayView::new(app.pool, &mut id_string_map, app.i18n).ui(ui);
- AppResponse::none()
- }
- }
-
- impl Preview for RelayView<'_> {
- type Prev = RelayViewPreview;
-
- fn preview(_cfg: PreviewConfig) -> Self::Prev {
- RelayViewPreview::new()
- }
- }
-}
diff --git a/crates/notedeck_columns/src/ui/side_panel.rs b/crates/notedeck_columns/src/ui/side_panel.rs
@@ -12,8 +12,10 @@ use crate::{
route::Route,
};
-use enostr::{RelayPool, RelayStatus};
-use notedeck::{tr, Accounts, Localization, MediaJobSender, NotedeckTextStyle, UserAccount};
+use enostr::RelayStatus;
+use notedeck::{
+ tr, Accounts, Localization, MediaJobSender, NotedeckTextStyle, RelayInspectApi, UserAccount,
+};
use notedeck_ui::{
anim::{AnimationHelper, ICON_EXPANSION_MULTIPLE},
app_images, colors, ProfilePic, View,
@@ -24,7 +26,7 @@ use super::configure_deck::deck_icon;
pub static SIDE_PANEL_WIDTH: f32 = 68.0;
static ICON_WIDTH: f32 = 40.0;
-pub struct DesktopSidePanel<'a> {
+pub struct DesktopSidePanel<'r, 'a> {
selected_account: &'a UserAccount,
decks_cache: &'a DecksCache,
i18n: &'a mut Localization,
@@ -32,10 +34,10 @@ pub struct DesktopSidePanel<'a> {
img_cache: &'a mut notedeck::Images,
jobs: &'a MediaJobSender,
current_route: Option<&'a Route>,
- pool: &'a RelayPool,
+ relay_inspect: RelayInspectApi<'r, 'a>,
}
-impl View for DesktopSidePanel<'_> {
+impl View for DesktopSidePanel<'_, '_> {
fn ui(&mut self, ui: &mut egui::Ui) {
self.show(ui);
}
@@ -70,7 +72,7 @@ impl SidePanelResponse {
}
}
-impl<'a> DesktopSidePanel<'a> {
+impl<'r, 'a> DesktopSidePanel<'r, 'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
selected_account: &'a UserAccount,
@@ -80,7 +82,7 @@ impl<'a> DesktopSidePanel<'a> {
img_cache: &'a mut notedeck::Images,
jobs: &'a MediaJobSender,
current_route: Option<&'a Route>,
- pool: &'a RelayPool,
+ relay_inspect: RelayInspectApi<'r, 'a>,
) -> Self {
Self {
selected_account,
@@ -90,7 +92,7 @@ impl<'a> DesktopSidePanel<'a> {
img_cache,
jobs,
current_route,
- pool,
+ relay_inspect,
}
}
@@ -201,7 +203,7 @@ impl<'a> DesktopSidePanel<'a> {
// Connectivity indicator
let connectivity_resp = ui
.with_layout(Layout::top_down(egui::Align::Center), |ui| {
- connectivity_indicator(ui, self.pool, self.current_route)
+ connectivity_indicator(ui, &self.relay_inspect, self.current_route)
})
.inner;
@@ -808,15 +810,15 @@ fn home_button() -> impl Widget {
}
fn connectivity_indicator(
ui: &mut egui::Ui,
- pool: &RelayPool,
+ relay_inspect: &RelayInspectApi<'_, '_>,
_current_route: Option<&Route>,
) -> egui::Response {
- let connected_count = pool
- .relays
+ let relay_infos = relay_inspect.relay_infos();
+ let connected_count = relay_infos
.iter()
- .filter(|r| matches!(r.status(), RelayStatus::Connected))
+ .filter(|info| matches!(info.status, RelayStatus::Connected))
.count();
- let total_count = pool.relays.len();
+ let total_count = relay_infos.len();
// Calculate connectivity ratio (0.0 to 1.0)
let ratio = if total_count > 0 {
diff --git a/crates/notedeck_columns/src/ui/thread.rs b/crates/notedeck_columns/src/ui/thread.rs
@@ -89,6 +89,7 @@ impl<'a, 'd> ThreadView<'a, 'd> {
self.note_context.ndb,
txn,
self.note_context.unknown_ids,
+ self.note_context.accounts,
self.col,
);
diff --git a/crates/notedeck_dashboard/src/lib.rs b/crates/notedeck_dashboard/src/lib.rs
@@ -7,7 +7,7 @@ use std::time::{Duration, Instant};
use crossbeam_channel as chan;
use nostrdb::{Filter, Ndb, Transaction};
-use notedeck::{AppContext, AppResponse, try_process_events_core};
+use notedeck::{AppContext, AppResponse};
use chrono::{Datelike, TimeZone, Utc};
@@ -245,8 +245,6 @@ impl Default for Dashboard {
impl notedeck::App for Dashboard {
fn update(&mut self, ctx: &mut AppContext<'_>, ui: &mut egui::Ui) -> AppResponse {
- try_process_events_core(ctx, ui.ctx(), |_, _| {});
-
if !self.initialized {
self.initialized = true;
self.init(ui.ctx().clone(), ctx);
diff --git a/crates/notedeck_dave/src/events.rs b/crates/notedeck_dave/src/events.rs
@@ -0,0 +1,114 @@
+use enostr::{PoolEventBuf, PoolRelay, RelayEvent, RelayMessage, RelayPool};
+use notedeck::{AppContext, UnknownIds};
+use tracing::{error, info};
+
+pub fn try_process_events_core(
+ app_ctx: &mut AppContext<'_>,
+ pool: &mut enostr::RelayPool,
+ ctx: &egui::Context,
+ mut receive: impl FnMut(&mut AppContext, &mut RelayPool, PoolEventBuf),
+) {
+ let ctx2 = ctx.clone();
+ let wakeup = move || {
+ ctx2.request_repaint();
+ };
+
+ pool.keepalive_ping(wakeup);
+
+ // NOTE: we don't use the while let loop due to borrow issues
+ #[allow(clippy::while_let_loop)]
+ loop {
+ let ev = if let Some(ev) = pool.try_recv() {
+ ev.into_owned()
+ } else {
+ break;
+ };
+
+ match (&ev.event).into() {
+ RelayEvent::Opened => {
+ tracing::trace!("Opened relay {}", ev.relay);
+ }
+ RelayEvent::Closed => tracing::warn!("{} connection closed", &ev.relay),
+ RelayEvent::Other(msg) => {
+ tracing::trace!("relay {} sent other event {:?}", ev.relay, &msg)
+ }
+ RelayEvent::Error(error) => error!("relay {} had error: {error:?}", &ev.relay),
+ RelayEvent::Message(msg) => {
+ process_message_core(app_ctx, pool, &ev.relay, &msg);
+ }
+ }
+
+ receive(app_ctx, pool, ev);
+ }
+
+ if app_ctx.unknown_ids.ready_to_send() {
+ pool_unknown_id_send(app_ctx.unknown_ids, pool);
+ }
+}
+
+fn process_message_core(
+ ctx: &mut AppContext<'_>,
+ pool: &mut enostr::RelayPool,
+ relay: &str,
+ msg: &RelayMessage,
+) {
+ match msg {
+ RelayMessage::Event(_subid, ev) => {
+ let relay = if let Some(relay) = pool.relays.iter().find(|r| r.url() == relay) {
+ relay
+ } else {
+ error!("couldn't find relay {} for note processing!?", relay);
+ return;
+ };
+
+ match relay {
+ PoolRelay::Websocket(_) => {
+ //info!("processing event {}", event);
+ tracing::trace!("processing event {ev}");
+ if let Err(err) = ctx.ndb.process_event_with(
+ ev,
+ nostrdb::IngestMetadata::new()
+ .client(false)
+ .relay(relay.url()),
+ ) {
+ error!("error processing event {ev}: {err}");
+ }
+ }
+ PoolRelay::Multicast(_) => {
+ // multicast events are client events
+ if let Err(err) = ctx.ndb.process_event_with(
+ ev,
+ nostrdb::IngestMetadata::new()
+ .client(true)
+ .relay(relay.url()),
+ ) {
+ error!("error processing multicast event {ev}: {err}");
+ }
+ }
+ }
+ }
+ RelayMessage::Notice(msg) => tracing::warn!("Notice from {}: {}", relay, msg),
+ RelayMessage::OK(cr) => info!("OK {:?}", cr),
+ RelayMessage::Eose(id) => {
+ tracing::trace!("Relay {} received eose: {id}", relay)
+ }
+ RelayMessage::Closed(sid, reason) => {
+ tracing::trace!(
+ "Relay {} with sub {sid} received close because: {reason}",
+ relay
+ );
+ }
+ }
+}
+
+fn pool_unknown_id_send(unknown_ids: &mut UnknownIds, pool: &mut enostr::RelayPool) {
+ tracing::debug!("unknown_id_send called on: {:?}", &unknown_ids);
+ let filter = unknown_ids.filter().expect("filter");
+ tracing::debug!(
+ "Getting {} unknown ids from relays",
+ unknown_ids.ids_iter().len()
+ );
+ let msg = enostr::ClientMessage::req("unknownids".to_string(), filter);
+ unknown_ids.clear();
+ pool.send(&msg);
+}
diff --git a/crates/notedeck_dave/src/lib.rs b/crates/notedeck_dave/src/lib.rs
@@ -3,6 +3,7 @@ mod auto_accept;
mod avatar;
mod backend;
pub mod config;
+pub mod events;
pub mod file_update;
mod focus_queue;
pub(crate) mod git_status;
@@ -30,12 +31,12 @@ use backend::{
};
use chrono::{Duration, Local};
use egui_wgpu::RenderState;
-use enostr::KeypairUnowned;
+use enostr::{KeypairUnowned, RelayPool};
use focus_queue::FocusQueue;
use nostrdb::{Subscription, Transaction};
use notedeck::{
- timed_serializer::TimedSerializer, try_process_events_core, ui::is_narrow, AppAction,
- AppContext, AppResponse, DataPath, DataPathType,
+ timed_serializer::TimedSerializer, ui::is_narrow, AppAction, AppContext, AppResponse, DataPath,
+ DataPathType,
};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
@@ -119,6 +120,7 @@ pub enum DaveOverlay {
}
pub struct Dave {
+ pool: RelayPool,
/// AI interaction mode (Chat vs Agentic)
ai_mode: AiMode,
/// Manages multiple chat sessions
@@ -207,6 +209,8 @@ pub struct Dave {
use update::PermissionPublish;
+use crate::events::try_process_events_core;
+
/// Info captured from a session before deletion, for publishing a "deleted" state event.
struct DeletedSessionInfo {
claude_session_id: String,
@@ -470,7 +474,10 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
AiMode::Agentic => (SessionManager::new(), DaveOverlay::DirectoryPicker),
};
+ let pool = RelayPool::new();
+
Dave {
+ pool,
ai_mode,
backends,
available_backends,
@@ -2520,7 +2527,7 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
let pns_sub_id = self.pns_relay_sub.clone();
let pns_relay = self.pns_relay_url.clone();
let mut neg_events: Vec<enostr::negentropy::NegEvent> = Vec::new();
- try_process_events_core(ctx, ui.ctx(), |app_ctx, ev| {
+ try_process_events_core(ctx, &mut self.pool, ui.ctx(), |app_ctx, pool, ev| {
if ev.relay == pns_relay {
if let enostr::RelayEvent::Opened = (&ev.event).into() {
neg_events.push(enostr::negentropy::NegEvent::RelayOpened);
@@ -2535,7 +2542,7 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
.limit(500)
.build();
let req = enostr::ClientMessage::req(sub_id.clone(), vec![pns_filter]);
- app_ctx.pool.send_to(&req, &pns_relay);
+ pool.send_to(&req, &pns_relay);
tracing::info!("re-subscribed for PNS events after relay reconnect");
}
}
@@ -2561,9 +2568,13 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
.kinds([enostr::pns::PNS_KIND as u64])
.authors([pns_keys.keypair.pubkey.bytes()])
.build();
- let result =
- self.neg_sync
- .process(neg_events, ctx.ndb, ctx.pool, &filter, &self.pns_relay_url);
+ let result = self.neg_sync.process(
+ neg_events,
+ ctx.ndb,
+ &mut self.pool,
+ &filter,
+ &self.pns_relay_url,
+ );
// If events were found and we haven't hit the round limit,
// trigger another sync to pull more recent data.
@@ -2615,7 +2626,7 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
// Ensure the PNS relay is in the pool
let egui_ctx = ui.ctx().clone();
let wakeup = move || egui_ctx.request_repaint();
- if let Err(e) = ctx.pool.add_url(self.pns_relay_url.clone(), wakeup) {
+ if let Err(e) = self.pool.add_url(self.pns_relay_url.clone(), wakeup) {
tracing::warn!("failed to add PNS relay {}: {:?}", self.pns_relay_url, e);
}
@@ -2627,7 +2638,7 @@ You are an AI agent for the nostr protocol called Dave, created by Damus. nostr
.build();
let sub_id = uuid::Uuid::new_v4().to_string();
let req = enostr::ClientMessage::req(sub_id.clone(), vec![pns_filter]);
- ctx.pool.send_to(&req, &self.pns_relay_url);
+ self.pool.send_to(&req, &self.pns_relay_url);
self.pns_relay_sub = Some(sub_id);
tracing::info!("subscribed for PNS events on {}", self.pns_relay_url);
@@ -2776,7 +2787,7 @@ impl notedeck::App for Dave {
for event in all_events {
match session_events::wrap_pns(&event.note_json, &pns_keys) {
Ok(pns_json) => match enostr::ClientMessage::event_json(pns_json) {
- Ok(msg) => ctx.pool.send_to(&msg, &self.pns_relay_url),
+ Ok(msg) => self.pool.send_to(&msg, &self.pns_relay_url),
Err(e) => tracing::warn!("failed to build relay message: {:?}", e),
},
Err(e) => tracing::warn!("failed to PNS-wrap event: {}", e),
diff --git a/crates/notedeck_dave/src/ui/dave.rs b/crates/notedeck_dave/src/ui/dave.rs
@@ -1142,7 +1142,6 @@ impl<'a> DaveUi<'a> {
img_cache: ctx.img_cache,
note_cache: ctx.note_cache,
zaps: ctx.zaps,
- pool: ctx.pool,
jobs: ctx.media_jobs.sender(),
unknown_ids: ctx.unknown_ids,
nip05_cache: ctx.nip05_cache,
diff --git a/crates/notedeck_messages/src/cache/conversation.rs b/crates/notedeck_messages/src/cache/conversation.rs
@@ -9,13 +9,14 @@ use crate::{
},
},
convo_renderable::ConversationRenderable,
- nip17::{chatroom_filter, conversation_filter, get_participants},
+ nip17::get_participants,
+ relay_ensure::DmListState,
};
use super::message_store::MessageStore;
use enostr::Pubkey;
use hashbrown::HashMap;
-use nostrdb::{Ndb, Note, NoteKey, QueryResult, Subscription, Transaction};
+use nostrdb::{Ndb, Note, NoteKey, Subscription, Transaction};
use notedeck::{note::event_tag, NoteCache, NoteRef, UnknownIds};
pub struct ConversationCache {
@@ -23,6 +24,7 @@ pub struct ConversationCache {
conversations: HashMap<ConversationId, Conversation>,
order: Vec<ConversationOrder>,
pub state: ConversationListState,
+ dm_relay_list_ensure: DmListState,
pub active: Option<ConversationId>,
}
@@ -51,85 +53,6 @@ impl ConversationCache {
self.conversations.get(&self.active?)
}
- /// A conversation is "opened" when the user navigates to the conversation
- #[profiling::function]
- pub fn open_conversation(
- &mut self,
- ndb: &Ndb,
- txn: &Transaction,
- id: ConversationId,
- note_cache: &mut NoteCache,
- unknown_ids: &mut UnknownIds,
- selected: &Pubkey,
- ) {
- let Some(conversation) = self.conversations.get_mut(&id) else {
- return;
- };
-
- let pubkeys = conversation.metadata.participants.clone();
- let participants: Vec<&[u8; 32]> = pubkeys.iter().map(|p| p.bytes()).collect();
-
- // We should try and get more messages... this isn't ideal
- let chatroom_filter = chatroom_filter(participants, selected);
-
- let mut updated = false;
- {
- profiling::scope!("chatroom_filter");
- let results = match ndb.query(txn, &chatroom_filter, 500) {
- Ok(r) => r,
- Err(e) => {
- tracing::error!("problem with chatroom filter ndb::query: {e:?}");
- return;
- }
- };
-
- for res in results {
- let participants = get_participants(&res.note);
- let parts = ParticipantSetUnowned::new(participants);
- let cur_id = self
- .registry
- .get_or_insert(ConversationIdentifierUnowned::Nip17(parts));
-
- if cur_id != id {
- // this note isn't relevant to the current conversation, unfortunately...
- continue;
- }
-
- UnknownIds::update_from_note(txn, ndb, unknown_ids, note_cache, &res.note);
- updated |= conversation.ingest_kind_14(res.note, res.note_key);
- }
- }
-
- if updated {
- let latest = conversation.last_activity();
- refresh_order(&mut self.order, id, LatestMessage::Latest(latest));
- }
-
- self.active = Some(id);
- tracing::info!("Set active to {id}");
- }
-
- #[profiling::function]
- pub fn init_conversations(
- &mut self,
- ndb: &Ndb,
- txn: &Transaction,
- cur_acc: &Pubkey,
- note_cache: &mut NoteCache,
- unknown_ids: &mut UnknownIds,
- ) {
- let Some(results) = get_conversations(ndb, txn, cur_acc) else {
- tracing::warn!("Got no conversations from ndb");
- return;
- };
-
- tracing::trace!("Received {} conversations from ndb", results.len());
-
- for res in results {
- self.ingest_chatroom_msg(res.note, res.note_key, ndb, txn, note_cache, unknown_ids);
- }
- }
-
#[profiling::function]
pub fn ingest_chatroom_msg(
&mut self,
@@ -177,6 +100,11 @@ impl ConversationCache {
pub fn first_convo_id(&self) -> Option<ConversationId> {
Some(self.order.first()?.id)
}
+
+ /// Mutable access to the selected-account DM relay-list ensure state.
+ pub fn dm_relay_list_ensure_mut(&mut self) -> &mut DmListState {
+ &mut self.dm_relay_list_ensure
+ }
}
fn refresh_order(order: &mut Vec<ConversationOrder>, id: ConversationId, latest: LatestMessage) {
@@ -310,26 +238,12 @@ impl Default for ConversationCache {
conversations: HashMap::new(),
order: Vec::new(),
state: Default::default(),
+ dm_relay_list_ensure: Default::default(),
active: None,
}
}
}
-#[profiling::function]
-fn get_conversations<'a>(
- ndb: &Ndb,
- txn: &'a Transaction,
- cur_acc: &Pubkey,
-) -> Option<Vec<QueryResult<'a>>> {
- match ndb.query(txn, &conversation_filter(cur_acc), 500) {
- Ok(r) => Some(r),
- Err(e) => {
- tracing::error!("error fetching kind 14 messages: {e}");
- None
- }
- }
-}
-
#[derive(Clone, Debug, Default)]
pub struct ConversationMetadata {
pub title: Option<TitleMetadata>,
diff --git a/crates/notedeck_messages/src/lib.rs b/crates/notedeck_messages/src/lib.rs
@@ -3,20 +3,23 @@ pub mod convo_renderable;
pub mod loader;
pub mod nav;
pub mod nip17;
+mod relay_ensure;
+mod relay_prefetch;
pub mod ui;
use enostr::Pubkey;
use hashbrown::{HashMap, HashSet};
use nav::{process_messages_ui_response, Route};
-use nostrdb::{Subscription, Transaction};
+use nostrdb::{Ndb, Subscription, Transaction};
use notedeck::{
- try_process_events_core, ui::is_narrow, Accounts, App, AppContext, AppResponse, Router,
+ ui::is_narrow, Accounts, App, AppContext, AppResponse, RemoteApi, Router, SubKey, SubOwnerKey,
};
use crate::{
cache::{ConversationCache, ConversationListState, ConversationStates},
loader::{LoaderMsg, MessagesLoader},
nip17::conversation_filter,
+ relay_ensure::ensure_selected_account_dm_list,
ui::{login_nsec_prompt, messages::messages_ui},
};
@@ -53,8 +56,6 @@ impl Default for MessagesApp {
impl App for MessagesApp {
#[profiling::function]
fn update(&mut self, ctx: &mut AppContext<'_>, ui: &mut egui::Ui) -> AppResponse {
- try_process_events_core(ctx, ui.ctx(), |_, _| {});
-
let Some(cache) = self.messages.get_current_mut(ctx.accounts) else {
login_nsec_prompt(ui, ctx.i18n);
return AppResponse::none();
@@ -88,6 +89,8 @@ impl App for MessagesApp {
}
}
+ ensure_selected_account_dm_relay_list(ctx.ndb, &mut ctx.remote, ctx.accounts, cache);
+
match cache.state {
ConversationListState::Initializing => {
initialize(ctx, cache, is_narrow(ui.ctx()), &self.loader);
@@ -224,7 +227,12 @@ fn handle_loader_messages(
if cache.active.is_none() && !is_narrow {
if let Some(first) = cache.first_convo_id() {
- cache.active = Some(first);
+ open_conversation_with_prefetch(
+ &mut ctx.remote,
+ ctx.accounts,
+ cache,
+ first,
+ );
request_conversation_messages(
cache,
ctx.accounts.selected_account_pubkey(),
@@ -291,6 +299,58 @@ fn request_conversation_messages(
);
}
+/// Scoped-sub owner namespace for messages DM relay-list lifecycles.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+enum RelayListOwner {
+ Prefetch,
+ Ensure,
+}
+
+const RELAY_LIST_KEY: &str = "dm_relay_list";
+
+/// Stable owner for DM relay-list prefetch subscriptions per selected account.
+fn list_prefetch_owner_key(account_pk: Pubkey) -> SubOwnerKey {
+ SubOwnerKey::builder(RelayListOwner::Prefetch)
+ .with(account_pk)
+ .finish()
+}
+
+/// Stable owner for selected-account DM relay-list ensure subscriptions per selected account.
+fn list_ensure_owner_key(account_pk: Pubkey) -> SubOwnerKey {
+ SubOwnerKey::builder(RelayListOwner::Ensure)
+ .with(account_pk)
+ .finish()
+}
+
+/// Stable key for one participant's DM relay-list remote stream.
+pub fn list_fetch_sub_key(participant: &Pubkey) -> SubKey {
+ SubKey::builder(RELAY_LIST_KEY)
+ .with(*participant.bytes())
+ .finish()
+}
+
+#[profiling::function]
+pub(crate) fn ensure_selected_account_dm_relay_list(
+ ndb: &mut Ndb,
+ remote: &mut RemoteApi<'_>,
+ accounts: &Accounts,
+ cache: &mut ConversationCache,
+) {
+ ensure_selected_account_dm_list(ndb, remote, accounts, cache.dm_relay_list_ensure_mut())
+}
+
+/// Marks a conversation active and ensures participant relay-list prefetch.
+#[profiling::function]
+pub(crate) fn open_conversation_with_prefetch(
+ remote: &mut RemoteApi<'_>,
+ accounts: &Accounts,
+ cache: &mut ConversationCache,
+ conversation_id: cache::ConversationId,
+) {
+ cache.active = Some(conversation_id);
+ relay_prefetch::ensure_conversation_prefetch(remote, accounts, cache, conversation_id);
+}
+
/// Storage for conversations per account. Account management is performed by `Accounts`
#[derive(Default)]
struct ConversationsCtx {
diff --git a/crates/notedeck_messages/src/nav.rs b/crates/notedeck_messages/src/nav.rs
@@ -9,6 +9,7 @@ use crate::{
},
loader::MessagesLoader,
nip17::send_conversation_message,
+ open_conversation_with_prefetch,
};
#[derive(Clone, Debug)]
@@ -160,7 +161,7 @@ fn handle_messages_action(
));
cache.initialize_conversation(id, vec![recipient, *selected]);
- cache.active = Some(id);
+ open_conversation_with_prefetch(&mut ctx.remote, ctx.accounts, cache, id);
request_conversation_messages(
cache,
ctx.accounts.selected_account_pubkey(),
@@ -197,7 +198,7 @@ fn open_coversation_action(
loader: &MessagesLoader,
inflight_messages: &mut HashSet<ConversationId>,
) {
- cache.active = Some(id);
+ open_conversation_with_prefetch(&mut ctx.remote, ctx.accounts, cache, id);
request_conversation_messages(
cache,
ctx.accounts.selected_account_pubkey(),
diff --git a/crates/notedeck_messages/src/nip17/message.rs b/crates/notedeck_messages/src/nip17/message.rs
@@ -1,8 +1,9 @@
-use enostr::ClientMessage;
-use notedeck::AppContext;
+use nostrdb::Transaction;
+use notedeck::enostr::RelayId;
+use notedeck::{AppContext, RelayType};
use crate::cache::{ConversationCache, ConversationId};
-use crate::nip17::{build_rumor_json, giftwrap_message, OsRng};
+use crate::nip17::{build_rumor_json, giftwrap_message, query_participant_dm_relays, OsRng};
pub fn send_conversation_message(
conversation_id: ConversationId,
@@ -37,21 +38,37 @@ pub fn send_conversation_message(
return;
};
+ let txn = Transaction::new(ctx.ndb).expect("txn");
let mut rng = OsRng;
for participant in &conversation.metadata.participants {
- let Some(giftwrap_json) =
+ let Some(gifrwrap_note) =
giftwrap_message(&mut rng, sender_secret, participant, &rumor_json)
else {
continue;
};
if participant == selected_kp.pubkey {
+ let Some(giftwrap_json) = gifrwrap_note.json().ok() else {
+ continue;
+ };
+
if let Err(e) = ctx.ndb.process_client_event(&giftwrap_json) {
tracing::error!("Could not ingest event: {e:?}");
}
}
- match ClientMessage::event_json(giftwrap_json.clone()) {
- Ok(msg) => ctx.pool.send(&msg),
- Err(err) => tracing::error!("failed to build client message: {err}"),
+
+ let participant_relays = query_participant_dm_relays(ctx.ndb, &txn, participant);
+ let relay_type = if participant_relays.is_empty() {
+ RelayType::AccountsWrite
+ } else {
+ RelayType::Explicit(
+ participant_relays
+ .into_iter()
+ .map(RelayId::Websocket)
+ .collect(),
+ )
};
+
+ let mut publisher = ctx.remote.publisher(ctx.accounts);
+ publisher.publish_note(&gifrwrap_note, relay_type);
}
}
diff --git a/crates/notedeck_messages/src/nip17/mod.rs b/crates/notedeck_messages/src/nip17/mod.rs
@@ -1,6 +1,7 @@
pub mod message;
-use enostr::{FullKeypair, Pubkey, SecretKey};
+use enostr::{FullKeypair, NormRelayUrl, Pubkey, SecretKey};
+use hashbrown::HashSet;
pub use message::send_conversation_message;
pub use nostr::secp256k1::rand::rngs::OsRng;
use nostr::secp256k1::rand::Rng;
@@ -10,7 +11,7 @@ use nostr::{
nips::nip44,
util::JsonUtil,
};
-use nostrdb::{Filter, FilterBuilder, Note, NoteBuilder};
+use nostrdb::{Filter, FilterBuilder, Ndb, Note, NoteBuilder, Transaction};
use notedeck::get_p_tags;
fn build_rumor_json(
@@ -37,7 +38,7 @@ pub fn giftwrap_message(
sender_secret: &SecretKey,
recipient: &Pubkey,
rumor_json: &str,
-) -> Option<String> {
+) -> Option<Note<'static>> {
let Some(recipient_pk) = nostrcrate_pk(recipient) else {
tracing::warn!("failed to convert recipient pubkey {}", recipient);
return None;
@@ -79,7 +80,7 @@ pub fn giftwrap_message(
};
let wrap_created = randomized_timestamp(rng);
- build_giftwrap_json(&encrypted_seal, &wrap_keys, recipient, wrap_created)
+ build_giftwrap_note(&encrypted_seal, &wrap_keys, recipient, wrap_created)
}
fn build_seal_json(
@@ -99,12 +100,12 @@ fn build_seal_json(
.ok()
}
-fn build_giftwrap_json(
+fn build_giftwrap_note(
content: &str,
wrap_keys: &FullKeypair,
recipient: &Pubkey,
created_at: u64,
-) -> Option<String> {
+) -> Option<Note<'static>> {
let builder = NoteBuilder::new()
.kind(1059)
.content(content)
@@ -113,11 +114,7 @@ fn build_giftwrap_json(
.tag_str("p")
.tag_str(&recipient.hex());
- builder
- .sign(&wrap_keys.secret_key.secret_bytes())
- .build()?
- .json()
- .ok()
+ builder.sign(&wrap_keys.secret_key.secret_bytes()).build()
}
fn nostrcrate_pk(pk: &Pubkey) -> Option<PublicKey> {
@@ -173,6 +170,102 @@ pub fn chatroom_filter(participants: Vec<&[u8; 32]>, me: &[u8; 32]) -> Vec<Filte
.build()]
}
+/// Builds a filter for one participant's kind `10050` DM relay list.
+pub fn participant_dm_relay_list_filter(participant: &Pubkey) -> Filter {
+ FilterBuilder::new()
+ .kinds([10050])
+ .authors([participant.bytes()])
+ .limit(1)
+ .build()
+}
+
+/// Returns `true` when `note` is a kind `10050` DM relay-list authored by `participant`.
+pub fn is_participant_dm_relay_list(note: &Note<'_>, participant: &Pubkey) -> bool {
+ note.kind() == 10050 && note.pubkey() == participant.bytes()
+}
+
+/// Queries NDB for presence of one participant's kind `10050` DM relay list.
+pub fn has_participant_dm_relay_list(ndb: &Ndb, txn: &Transaction, participant: &Pubkey) -> bool {
+ let filter = participant_dm_relay_list_filter(participant);
+ let Ok(results) = ndb.query(txn, std::slice::from_ref(&filter), 1) else {
+ return false;
+ };
+
+ !results.is_empty()
+}
+
+/// Default relay URLs used when creating a new kind `10050` DM relay-list note.
+pub fn default_dm_relay_urls() -> &'static [&'static str] {
+ &["wss://relay.damus.io", "wss://nos.lol"]
+}
+
+/// Builds a signed kind `10050` DM relay-list note using default relay URLs.
+pub fn build_default_dm_relay_list_note(sender_secret: &SecretKey) -> Option<Note<'static>> {
+ let mut builder = NoteBuilder::new().kind(10050).content("");
+
+ for relay in default_dm_relay_urls() {
+ builder = builder.start_tag().tag_str("relay").tag_str(relay);
+ }
+
+ builder.sign(&sender_secret.secret_bytes()).build()
+}
+
+/// Parses a kind `10050` note into unique websocket relay URLs.
+pub fn parse_dm_relay_list_relays(note: &Note<'_>) -> Vec<NormRelayUrl> {
+ if note.kind() != 10050 {
+ return Vec::new();
+ }
+
+ let mut seen = HashSet::new();
+ let mut relays = Vec::new();
+
+ for tag in note.tags() {
+ if tag.count() < 2 {
+ continue;
+ }
+
+ let Some("relay") = tag.get_str(0) else {
+ continue;
+ };
+
+ let Some(url) = tag.get_str(1) else {
+ continue;
+ };
+
+ let Ok(norm_url) = NormRelayUrl::new(url) else {
+ continue;
+ };
+
+ if !seen.insert(norm_url.clone()) {
+ continue;
+ }
+
+ relays.push(norm_url);
+ }
+
+ relays
+}
+
+/// Queries NDB for one participant's latest kind `10050` relay list.
+///
+/// Returns explicit websocket relay URLs when available, else an empty vec.
+pub fn query_participant_dm_relays(
+ ndb: &Ndb,
+ txn: &Transaction,
+ participant: &Pubkey,
+) -> Vec<NormRelayUrl> {
+ let filter = participant_dm_relay_list_filter(participant);
+ let Ok(results) = ndb.query(txn, std::slice::from_ref(&filter), 1) else {
+ return Vec::new();
+ };
+
+ let Some(result) = results.first() else {
+ return Vec::new();
+ };
+
+ parse_dm_relay_list_relays(&result.note)
+}
+
// easily retrievable from Note<'a>
pub struct Nip17ChatMessage<'a> {
pub sender: &'a [u8; 32],
@@ -220,3 +313,73 @@ pub fn parse_chat_message<'a>(note: &Note<'a>) -> Option<Nip17ChatMessage<'a>> {
created_at: note.created_at(),
})
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use nostrdb::NoteBuilder;
+
+ fn relay_note(relays: &[&str]) -> Note<'static> {
+ let signer = FullKeypair::generate();
+ let mut builder = NoteBuilder::new().kind(10050).content("");
+ for relay in relays {
+ builder = builder.start_tag().tag_str("relay").tag_str(relay);
+ }
+
+ builder
+ .sign(&signer.secret_key.secret_bytes())
+ .build()
+ .expect("relay note")
+ }
+
+ /// Verifies the relay-list filter targets kind `10050`, the participant author, and limit `1`.
+ #[test]
+ fn participant_dm_relay_list_filter_is_stable() {
+ let participant = Pubkey::new([0x22; 32]);
+ let actual = participant_dm_relay_list_filter(&participant);
+ let expected = FilterBuilder::new()
+ .kinds([10050])
+ .authors([participant.bytes()])
+ .limit(1)
+ .build();
+
+ assert_eq!(
+ actual.json().expect("actual filter json"),
+ expected.json().expect("expected filter json")
+ );
+ }
+
+ /// Verifies relay parsing ignores invalid URLs and deduplicates repeated relay tags.
+ #[test]
+ fn parse_dm_relay_list_relays_dedupes_and_skips_invalid_urls() {
+ let note = relay_note(&[
+ "wss://relay-a.example.com",
+ "notaurl",
+ "wss://relay-a.example.com",
+ "wss://relay-b.example.com",
+ ]);
+
+ let parsed = parse_dm_relay_list_relays(¬e);
+ assert_eq!(parsed.len(), 2);
+
+ let actual: HashSet<NormRelayUrl> = HashSet::from_iter(parsed);
+ let expected = HashSet::from_iter(
+ ["wss://relay-a.example.com", "wss://relay-b.example.com"]
+ .into_iter()
+ .map(|relay| NormRelayUrl::new(relay).expect("norm relay")),
+ );
+
+ assert_eq!(actual, expected);
+ }
+
+ /// Verifies default DM relay-list note construction emits kind `10050` and relay tags.
+ #[test]
+ fn build_default_dm_relay_list_note_contains_default_relays() {
+ let signer = FullKeypair::generate();
+ let note = build_default_dm_relay_list_note(&signer.secret_key).expect("relay list note");
+
+ assert_eq!(note.kind(), 10050);
+ let urls = parse_dm_relay_list_relays(¬e);
+ assert!(!urls.is_empty());
+ }
+}
diff --git a/crates/notedeck_messages/src/relay_ensure.rs b/crates/notedeck_messages/src/relay_ensure.rs
@@ -0,0 +1,249 @@
+use enostr::Pubkey;
+use nostrdb::{Ndb, Subscription, Transaction};
+use notedeck::{
+ Accounts, RelaySelection, RelayType, RemoteApi, ScopedSubEoseStatus, ScopedSubIdentity,
+ SubConfig, SubKey, SubOwnerKey,
+};
+
+use crate::{
+ list_ensure_owner_key, list_fetch_sub_key,
+ nip17::{
+ build_default_dm_relay_list_note, is_participant_dm_relay_list,
+ participant_dm_relay_list_filter,
+ },
+};
+
+/// Local view over the dependencies used by the DM relay-list ensure state machine.
+struct EnsureListCtx<'a, 'remote> {
+ ndb: &'a mut Ndb,
+ remote: &'a mut RemoteApi<'remote>,
+ accounts: &'a Accounts,
+ owner_key: SubOwnerKey,
+}
+
+/// Pure builder for the selected account's own DM relay-list ensure scoped-sub spec.
+fn dm_relay_list_spec(selected_account: &Pubkey) -> SubConfig {
+ SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: vec![participant_dm_relay_list_filter(selected_account)],
+ use_transparent: false,
+ }
+}
+
+#[profiling::function]
+pub(crate) fn ensure_selected_account_dm_list(
+ ndb: &mut Ndb,
+ remote: &mut RemoteApi<'_>,
+ accounts: &Accounts,
+ ensure_state: &mut DmListState,
+) {
+ let DmListState::Finding(state) = ensure_state else {
+ return;
+ };
+
+ let selected_account = *accounts.selected_account_pubkey();
+ let mut ctx = EnsureListCtx {
+ ndb,
+ remote,
+ accounts,
+ owner_key: list_ensure_owner_key(selected_account),
+ };
+
+ let list_found = match &state {
+ ListFindingState::Idle => handle_idle(&mut ctx, state),
+ ListFindingState::Waiting {
+ remote_sub_key,
+ local_sub,
+ } => handle_waiting(&mut ctx, *remote_sub_key, *local_sub),
+ };
+
+ if list_found {
+ set_list_found(&mut ctx, ensure_state);
+ }
+}
+
+type ListFound = bool;
+
+/// Handles the `Idle` ensure phase for the selected account DM relay list.
+fn handle_idle(ctx: &mut EnsureListCtx<'_, '_>, ensure_state: &mut ListFindingState) -> ListFound {
+ tracing::debug!("In idle state");
+ let pk = ctx.accounts.selected_account_pubkey();
+ let filter = participant_dm_relay_list_filter(pk);
+ let local_sub = match ctx.ndb.subscribe(std::slice::from_ref(&filter)) {
+ Ok(sub) => Some(sub),
+ Err(err) => {
+ tracing::error!("failed to subscribe to local dm relay list: {err}");
+ None
+ }
+ };
+
+ let remote_sub_key = list_fetch_sub_key(pk);
+ let spec = dm_relay_list_spec(pk);
+ let identity = ScopedSubIdentity::account(ctx.owner_key, remote_sub_key);
+ let _ = ctx
+ .remote
+ .scoped_subs(ctx.accounts)
+ .ensure_sub(identity, spec);
+
+ tracing::info!("waiting for selected account dm relay list ensure");
+ *ensure_state = ListFindingState::Waiting {
+ remote_sub_key,
+ local_sub,
+ };
+
+ false
+}
+
+/// Handles the `Waiting` ensure phase for the selected account DM relay list.
+fn handle_waiting(
+ ctx: &mut EnsureListCtx<'_, '_>,
+ remote_sub_key: SubKey,
+ local_sub: Option<Subscription>,
+) -> ListFound {
+ let pk = ctx.accounts.selected_account_pubkey();
+ if let Some(local_sub) = local_sub {
+ if received_dm_relay_list_from_poll(ctx.ndb, local_sub, pk) {
+ tracing::debug!(
+ "found selected account dm relay list on ndb poll; still waiting for remote EOSE"
+ );
+ }
+ }
+
+ if !all_eosed(ctx, remote_sub_key) {
+ return false;
+ }
+
+ republish_existing_or_publish_default_list(ctx, pk)
+}
+
+fn publish_default_list(ctx: &mut EnsureListCtx<'_, '_>) -> ListFound {
+ let Some(secret_key) = ctx.accounts.get_selected_account().key.secret_key.as_ref() else {
+ return false;
+ };
+
+ let Some(note) = build_default_dm_relay_list_note(secret_key) else {
+ return false;
+ };
+
+ let Ok(note_json) = note.json() else {
+ return false;
+ };
+
+ if let Err(err) = ctx.ndb.process_client_event(¬e_json) {
+ tracing::error!("failed to ingest default dm relay list: {err}");
+ return false;
+ }
+
+ let mut publisher = ctx.remote.publisher(ctx.accounts);
+ publisher.publish_note(¬e, RelayType::AccountsWrite);
+
+ true
+}
+
+/// After all-EOSE, republish the latest local selected-account kind `10050` if present.
+///
+/// Falls back to publishing a default kind `10050` when no local list exists.
+fn republish_existing_or_publish_default_list(
+ ctx: &mut EnsureListCtx<'_, '_>,
+ selected_account: &Pubkey,
+) -> ListFound {
+ let filter = participant_dm_relay_list_filter(selected_account);
+ let txn = Transaction::new(ctx.ndb).expect("txn");
+
+ let Ok(results) = ctx.ndb.query(&txn, std::slice::from_ref(&filter), 1) else {
+ tracing::error!("failed to query selected account dm relay list during ensure");
+ return false;
+ };
+
+ match results.first() {
+ Some(result) => {
+ tracing::info!("all relays eosed; republishing existing local dm relay list note");
+ let mut publisher = ctx.remote.publisher(ctx.accounts);
+ publisher.publish_note(&result.note, RelayType::AccountsWrite);
+ true
+ }
+ None => {
+ tracing::info!(
+ "all relays eosed; no local dm relay list note found, publishing default list"
+ );
+ publish_default_list(ctx)
+ }
+ }
+}
+
+/// Returns true when the selected-account DM relay-list ensure scoped sub reached all-EOSE.
+fn all_eosed(ctx: &mut EnsureListCtx<'_, '_>, remote_sub_key: SubKey) -> bool {
+ let scoped_subs = ctx.remote.scoped_subs(ctx.accounts);
+ let identity = ScopedSubIdentity::account(ctx.owner_key, remote_sub_key);
+ matches!(
+ scoped_subs.sub_eose_status(identity),
+ ScopedSubEoseStatus::Live(live) if live.all_eosed
+ )
+}
+
+/// Returns true when the ensure local subscription delivers a selected-account kind `10050` note.
+fn received_dm_relay_list_from_poll(
+ ndb: &Ndb,
+ local_sub: Subscription,
+ selected_account: &Pubkey,
+) -> bool {
+ let note_keys = ndb.poll_for_notes(local_sub, 1);
+
+ let Some(key) = note_keys.first() else {
+ return false;
+ };
+
+ let txn = Transaction::new(ndb).expect("txn");
+ let Ok(note) = ndb.get_note_by_key(&txn, *key) else {
+ return false;
+ };
+
+ is_participant_dm_relay_list(¬e, selected_account)
+}
+
+/// Moves DM relay-list ensure state to `Done` and tears down the local ensure subscription.
+///
+/// The remote scoped sub is intentionally left declared so it stays alive for the account session
+/// and can be shared with later conversation prefetch activity.
+fn set_list_found(ctx: &mut EnsureListCtx<'_, '_>, list_state: &mut DmListState) {
+ let prior = std::mem::replace(list_state, DmListState::Found);
+ let DmListState::Finding(ListFindingState::Waiting {
+ remote_sub_key: _,
+ local_sub,
+ }) = prior
+ else {
+ return;
+ };
+
+ let Some(local_sub) = local_sub else {
+ return;
+ };
+
+ if let Err(err) = ctx.ndb.unsubscribe(local_sub) {
+ tracing::error!("failed to unsubscribe dm relay-list local sub: {err}");
+ }
+}
+
+/// Active (non-terminal) phases for selected-account DM relay-list ensure.
+#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
+pub enum ListFindingState {
+ #[default]
+ Idle,
+ Waiting {
+ remote_sub_key: SubKey,
+ local_sub: Option<Subscription>,
+ },
+}
+
+/// Ensure-state for the selected account's kind `10050` DM relay list.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum DmListState {
+ Finding(ListFindingState),
+ Found,
+}
+
+impl Default for DmListState {
+ fn default() -> Self {
+ Self::Finding(ListFindingState::Idle)
+ }
+}
diff --git a/crates/notedeck_messages/src/relay_prefetch.rs b/crates/notedeck_messages/src/relay_prefetch.rs
@@ -0,0 +1,62 @@
+use enostr::Pubkey;
+use notedeck::{
+ Accounts, RelaySelection, RemoteApi, ScopedSubApi, ScopedSubIdentity, SubConfig, SubOwnerKey,
+};
+
+use crate::{
+ cache::{ConversationCache, ConversationId},
+ list_fetch_sub_key, list_prefetch_owner_key,
+ nip17::participant_dm_relay_list_filter,
+};
+
+/// Pure builder for the scoped-sub spec used to prefetch one participant relay list.
+fn participant_relay_prefetch_spec(participant: &Pubkey) -> SubConfig {
+ SubConfig {
+ relays: RelaySelection::AccountsRead,
+ filters: vec![participant_dm_relay_list_filter(participant)],
+ use_transparent: false,
+ }
+}
+
+/// Ensures remote prefetch subscriptions for one conversation's participants.
+pub(crate) fn ensure_conversation_prefetch(
+ remote: &mut RemoteApi<'_>,
+ accounts: &Accounts,
+ cache: &ConversationCache,
+ conversation_id: ConversationId,
+) {
+ let Some(conversation) = cache.get(conversation_id) else {
+ return;
+ };
+
+ ensure_participant_prefetch(remote, accounts, &conversation.metadata.participants);
+}
+
+/// Ensures remote prefetch subscriptions for all provided participants.
+pub(crate) fn ensure_participant_prefetch(
+ remote: &mut RemoteApi<'_>,
+ accounts: &Accounts,
+ participants: &[Pubkey],
+) {
+ if participants.is_empty() {
+ return;
+ }
+
+ let account_pk = *accounts.selected_account_pubkey();
+ let owner = list_prefetch_owner_key(account_pk);
+ let mut scoped_subs = remote.scoped_subs(accounts);
+ ensure_participant_subs(&mut scoped_subs, owner, participants);
+}
+
+fn ensure_participant_subs(
+ scoped_subs: &mut ScopedSubApi<'_, '_>,
+ owner: SubOwnerKey,
+ participants: &[Pubkey],
+) {
+ for participant in participants {
+ let key = list_fetch_sub_key(participant);
+ let spec = participant_relay_prefetch_spec(participant);
+ let identity = ScopedSubIdentity::account(owner, key);
+ let _ = scoped_subs.ensure_sub(identity, spec);
+ }
+}
diff --git a/crates/notedeck_nostrverse/src/lib.rs b/crates/notedeck_nostrverse/src/lib.rs
@@ -21,10 +21,12 @@ pub use room_state::{
};
pub use room_view::{NostrverseResponse, render_editing_panel, show_room_view};
-use enostr::Pubkey;
+use enostr::{NormRelayUrl, Pubkey, RelayId};
use glam::Vec3;
use nostrdb::Filter;
-use notedeck::{AppContext, AppResponse};
+use notedeck::{
+ AppContext, AppResponse, RelaySelection, ScopedSubIdentity, SubConfig, SubKey, SubOwnerKey,
+};
use renderbud::Transform;
use egui_wgpu::wgpu;
@@ -39,6 +41,47 @@ fn demo_pubkey() -> Pubkey {
.unwrap_or_else(|_| Pubkey::from_hex(FALLBACK_PUBKEY_HEX).unwrap())
}
+/// Scoped-sub identity for nostrverse's dedicated relay room/presence feed.
+fn nostrverse_remote_sub_identity() -> ScopedSubIdentity {
+ ScopedSubIdentity::account(
+ SubOwnerKey::new("nostrverse-owner"),
+ SubKey::new("nostrverse-room-presence"),
+ )
+}
+
+/// Publish a locally ingested note to the dedicated nostrverse relay.
+fn publish_ingested_note(
+ publisher: &mut notedeck::ExplicitPublishApi<'_, '_>,
+ relay_url: &NormRelayUrl,
+ note: &nostrdb::Note<'_>,
+) {
+ publisher.publish_note(note, vec![RelayId::Websocket(relay_url.clone())]);
+}
+
+fn configured_relay_url() -> NormRelayUrl {
+ let raw = std::env::var("NOSTRVERSE_RELAY")
+ .unwrap_or_else(|_| NostrverseApp::DEFAULT_RELAY.to_string());
+ match NormRelayUrl::new(&raw) {
+ Ok(url) => url,
+ Err(err) => {
+ tracing::warn!(
+ "Invalid NOSTRVERSE_RELAY '{}': {err:?}; falling back to {}",
+ raw,
+ NostrverseApp::DEFAULT_RELAY
+ );
+ NormRelayUrl::new(NostrverseApp::DEFAULT_RELAY).expect("default nostrverse relay URL")
+ }
+ }
+}
+
+fn room_filter() -> Filter {
+ Filter::new().kinds([kinds::ROOM as u64]).build()
+}
+
+fn presence_filter() -> Filter {
+ Filter::new().kinds([kinds::PRESENCE as u64]).build()
+}
+
/// Avatar scale: water bottle model is ~0.26m, scaled to human height (~1.8m)
const AVATAR_SCALE: f32 = 7.0;
/// How fast the avatar yaw lerps toward the target (higher = faster)
@@ -104,9 +147,7 @@ pub struct NostrverseApp {
/// Model download/cache manager (initialized lazily in initialize())
model_cache: Option<model_cache::ModelCache>,
/// Dedicated relay URL for multiplayer sync (from NOSTRVERSE_RELAY env)
- relay_url: Option<String>,
- /// Pending relay subscription ID — Some means we still need to send REQ
- pending_relay_sub: Option<String>,
+ relay_url: NormRelayUrl,
}
impl NostrverseApp {
@@ -119,9 +160,7 @@ impl NostrverseApp {
let device = render_state.map(|rs| rs.device.clone());
let queue = render_state.map(|rs| rs.queue.clone());
- let relay_url = Some(
- std::env::var("NOSTRVERSE_RELAY").unwrap_or_else(|_| Self::DEFAULT_RELAY.to_string()),
- );
+ let relay_url = configured_relay_url();
let space_naddr = space_ref.to_naddr();
Self {
@@ -140,7 +179,6 @@ impl NostrverseApp {
start_time: std::time::Instant::now(),
model_cache: None,
relay_url,
- pending_relay_sub: None,
}
}
@@ -150,38 +188,6 @@ impl NostrverseApp {
Self::new(space_ref, render_state)
}
- /// Send a client message to the dedicated relay, if configured.
- fn send_to_relay(&self, pool: &mut enostr::RelayPool, msg: &enostr::ClientMessage) {
- if let Some(relay_url) = &self.relay_url {
- pool.send_to(msg, relay_url);
- }
- }
-
- /// Send the relay subscription once the relay is connected.
- fn maybe_send_relay_sub(&mut self, pool: &mut enostr::RelayPool) {
- let (Some(sub_id), Some(relay_url)) = (&self.pending_relay_sub, &self.relay_url) else {
- return;
- };
-
- let connected = pool
- .relays
- .iter()
- .any(|r| r.url() == relay_url && matches!(r.status(), enostr::RelayStatus::Connected));
-
- if !connected {
- return;
- }
-
- let room_filter = Filter::new().kinds([kinds::ROOM as u64]).build();
- let presence_filter = Filter::new().kinds([kinds::PRESENCE as u64]).build();
-
- let req = enostr::ClientMessage::req(sub_id.clone(), vec![room_filter, presence_filter]);
- pool.send_to(&req, relay_url);
-
- tracing::info!("Sent nostrverse subscription to {}", relay_url);
- self.pending_relay_sub = None;
- }
-
/// Load a glTF model and return its handle
fn load_model(&self, path: &str) -> Option<renderbud::Model> {
let renderer = self.renderer.as_ref()?;
@@ -198,7 +204,7 @@ impl NostrverseApp {
}
/// Initialize: ingest demo space into local nostrdb and subscribe.
- fn initialize(&mut self, ctx: &mut AppContext<'_>, egui_ctx: &egui::Context) {
+ fn initialize(&mut self, ctx: &mut AppContext<'_>) {
if self.initialized {
return;
}
@@ -211,19 +217,21 @@ impl NostrverseApp {
self.room_sub = Some(subscriptions::RoomSubscription::new(ctx.ndb));
self.presence_sub = Some(subscriptions::PresenceSubscription::new(ctx.ndb));
- // Add dedicated relay to pool (subscription sent on connect in maybe_send_relay_sub)
- if let Some(relay_url) = &self.relay_url {
- let egui_ctx = egui_ctx.clone();
- if let Err(e) = ctx
- .pool
- .add_url(relay_url.clone(), move || egui_ctx.request_repaint())
- {
- tracing::error!("Failed to add nostrverse relay {}: {}", relay_url, e);
- } else {
- tracing::info!("Added nostrverse relay: {}", relay_url);
- self.pending_relay_sub = Some(format!("nostrverse-{}", uuid::Uuid::new_v4()));
- }
- }
+ // Declare remote room/presence feed on the dedicated relay.
+ let relays = std::iter::once(self.relay_url.clone()).collect();
+ let config = SubConfig {
+ relays: RelaySelection::Explicit(relays),
+ filters: vec![room_filter(), presence_filter()],
+ use_transparent: false,
+ };
+ let _ = ctx
+ .remote
+ .scoped_subs(ctx.accounts)
+ .set_sub(nostrverse_remote_sub_identity(), config);
+ tracing::info!(
+ "Declared nostrverse scoped relay subscription on {}",
+ self.relay_url
+ );
// Try to load an existing space from nostrdb first
let txn = nostrdb::Transaction::new(ctx.ndb).expect("txn");
@@ -241,8 +249,9 @@ impl NostrverseApp {
if let Some(kp) = ctx.accounts.selected_filled() {
let builder = nostr_events::build_space_event(&space, &self.state.space_ref.id);
- if let Some((msg, _id)) = nostr_events::ingest_event(builder, ctx.ndb, kp) {
- self.send_to_relay(ctx.pool, &msg);
+ if let Some(note) = nostr_events::ingest_event(builder, ctx.ndb, kp) {
+ let mut publisher = ctx.remote.publisher_explicit();
+ publish_ingested_note(&mut publisher, &self.relay_url, ¬e);
}
}
// room_sub (set up above) will pick up the ingested event
@@ -373,9 +382,9 @@ impl NostrverseApp {
let space = convert::build_space(info, &self.state.objects);
let builder = nostr_events::build_space_event(&space, &self.state.space_ref.id);
- if let Some((msg, id)) = nostr_events::ingest_event(builder, ctx.ndb, kp) {
- self.last_save_id = Some(id);
- self.send_to_relay(ctx.pool, &msg);
+ if let Some(note) = nostr_events::ingest_event(builder, ctx.ndb, kp) {
+ self.last_save_id = Some(*note.id());
+ publish_ingested_note(&mut ctx.remote.publisher_explicit(), &self.relay_url, ¬e);
}
tracing::info!("Saved space '{}'", self.state.space_ref.id);
}
@@ -520,11 +529,11 @@ impl NostrverseApp {
.map(|u| u.position)
.unwrap_or(Vec3::ZERO);
- if let Some(msg) =
+ if let Some(note) =
self.presence_pub
.maybe_publish(ctx.ndb, kp, &self.space_naddr, self_pos, now)
{
- self.send_to_relay(ctx.pool, &msg);
+ publish_ingested_note(&mut ctx.remote.publisher_explicit(), &self.relay_url, ¬e);
}
}
@@ -638,11 +647,7 @@ impl NostrverseApp {
impl notedeck::App for NostrverseApp {
fn update(&mut self, ctx: &mut AppContext<'_>, ui: &mut egui::Ui) -> AppResponse {
// Initialize on first frame
- let egui_ctx = ui.ctx().clone();
- self.initialize(ctx, &egui_ctx);
-
- // Send relay subscription once connected
- self.maybe_send_relay_sub(ctx.pool);
+ self.initialize(ctx);
// Poll for space event updates
self.poll_space_updates(ctx.ndb);
diff --git a/crates/notedeck_nostrverse/src/nostr_events.rs b/crates/notedeck_nostrverse/src/nostr_events.rs
@@ -131,20 +131,18 @@ pub fn get_presence_space<'a>(note: &'a Note<'a>) -> Option<&'a str> {
}
/// Sign and ingest a nostr event into the local nostrdb.
-/// Returns the ClientMessage (for optional relay publishing) and
-/// the 32-byte event ID on success.
+///
+/// Returns the built note on success so callers can publish it directly.
pub fn ingest_event(
builder: NoteBuilder<'_>,
ndb: &Ndb,
kp: FilledKeypair,
-) -> Option<(enostr::ClientMessage, [u8; 32])> {
+) -> Option<Note<'static>> {
let note = builder
.sign(&kp.secret_key.secret_bytes())
.build()
.expect("build note");
- let id = *note.id();
-
let Ok(event) = enostr::ClientMessage::event(¬e) else {
tracing::error!("ingest_event: failed to build client message");
return None;
@@ -157,7 +155,7 @@ pub fn ingest_event(
let _ = ndb.process_event_with(&json, nostrdb::IngestMetadata::new().client(true));
- Some((event, id))
+ Some(note)
}
#[cfg(test)]
diff --git a/crates/notedeck_nostrverse/src/presence.rs b/crates/notedeck_nostrverse/src/presence.rs
@@ -125,7 +125,8 @@ impl PresencePublisher {
}
/// Maybe publish a presence heartbeat.
- /// Returns the ClientMessage if published (for optional relay forwarding).
+ ///
+ /// Returns the ingested note if published so the caller can forward it.
pub fn maybe_publish(
&mut self,
ndb: &Ndb,
@@ -133,7 +134,7 @@ impl PresencePublisher {
room_naddr: &str,
position: Vec3,
now: f64,
- ) -> Option<enostr::ClientMessage> {
+ ) -> Option<nostrdb::Note<'static>> {
let velocity = self.compute_velocity(position, now);
// Always update position sample for velocity computation
@@ -148,7 +149,7 @@ impl PresencePublisher {
let result = nostr_events::ingest_event(builder, ndb, kp);
self.record_publish(position, velocity, now);
- result.map(|(msg, _id)| msg)
+ result
}
}