@claw-network/node 0.2.2 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/routes/messaging.d.ts +13 -0
- package/dist/api/routes/messaging.d.ts.map +1 -0
- package/dist/api/routes/messaging.js +181 -0
- package/dist/api/routes/messaging.js.map +1 -0
- package/dist/api/server.d.ts +1 -0
- package/dist/api/server.d.ts.map +1 -1
- package/dist/api/server.js +6 -0
- package/dist/api/server.js.map +1 -1
- package/dist/api/types.d.ts +2 -0
- package/dist/api/types.d.ts.map +1 -1
- package/dist/api/types.js.map +1 -1
- package/dist/api/ws-messaging.d.ts +23 -0
- package/dist/api/ws-messaging.d.ts.map +1 -0
- package/dist/api/ws-messaging.js +124 -0
- package/dist/api/ws-messaging.js.map +1 -0
- package/dist/index.d.ts +8 -5
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +102 -22
- package/dist/index.js.map +1 -1
- package/dist/services/message-store.d.ts +90 -0
- package/dist/services/message-store.d.ts.map +1 -0
- package/dist/services/message-store.js +221 -0
- package/dist/services/message-store.js.map +1 -0
- package/dist/services/messaging-service.d.ts +174 -0
- package/dist/services/messaging-service.d.ts.map +1 -0
- package/dist/services/messaging-service.js +705 -0
- package/dist/services/messaging-service.js.map +1 -0
- package/package.json +4 -2
|
@@ -0,0 +1,705 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MessagingService — orchestrates P2P direct messaging.
|
|
3
|
+
*
|
|
4
|
+
* Responsibilities:
|
|
5
|
+
* - Send messages to target DIDs via libp2p stream protocol
|
|
6
|
+
* - Receive inbound messages and store in inbox
|
|
7
|
+
* - Queue messages for offline peers in outbox, deliver on reconnect
|
|
8
|
+
* - Maintain DID → PeerId mapping via announce protocol
|
|
9
|
+
* - Periodic TTL cleanup of expired messages
|
|
10
|
+
*/
|
|
11
|
+
import { generateX25519Keypair, x25519SharedSecret, hkdfSha256, encryptAes256Gcm, decryptAes256Gcm, bytesToHex, hexToBytes, } from '@claw-network/core';
|
|
12
|
+
import { createLogger } from '../logger.js';
|
|
13
|
+
import { gzipSync, gunzipSync } from 'node:zlib';
|
|
14
|
+
// ── Constants ────────────────────────────────────────────────────
|
|
15
|
+
const PROTO_DM = '/clawnet/1.0.0/dm';
|
|
16
|
+
const PROTO_DID_ANNOUNCE = '/clawnet/1.0.0/did-announce';
|
|
17
|
+
const PROTO_RECEIPT = '/clawnet/1.0.0/receipt';
|
|
18
|
+
/** Maximum payload size in bytes (64 KB). */
|
|
19
|
+
const MAX_PAYLOAD_BYTES = 65_536;
|
|
20
|
+
/** Cleanup interval for expired messages (5 minutes). */
|
|
21
|
+
const CLEANUP_INTERVAL_MS = 5 * 60_000;
|
|
22
|
+
/** Max attempts before giving up on an outbox message. */
|
|
23
|
+
const MAX_DELIVERY_ATTEMPTS = 50;
|
|
24
|
+
/** Rate limit: max messages per DID per minute. */
|
|
25
|
+
const RATE_LIMIT_PER_MIN = 600;
|
|
26
|
+
/** Rate limit window in milliseconds (1 minute). */
|
|
27
|
+
const RATE_LIMIT_WINDOW_MS = 60_000;
|
|
28
|
+
/** Inbound P2P rate limit: max inbound messages per peer per minute. */
|
|
29
|
+
const INBOUND_RATE_LIMIT = 300;
|
|
30
|
+
/** Interval at which empty rate-limit buckets are pruned (2 minutes). */
|
|
31
|
+
const RATE_BUCKET_GC_INTERVAL_MS = 2 * 60_000;
|
|
32
|
+
/** Maximum concurrency for multicast delivery. */
|
|
33
|
+
const MULTICAST_CONCURRENCY = 20;
|
|
34
|
+
/** Base delay for exponential backoff in outbox retry (ms). */
|
|
35
|
+
const OUTBOX_RETRY_BASE_MS = 1_000;
|
|
36
|
+
/** Maximum backoff delay for outbox retry (ms). */
|
|
37
|
+
const OUTBOX_RETRY_MAX_MS = 60_000;
|
|
38
|
+
/** Valid DID format: did:claw:<multibase-base58btc-encoded-key>. */
|
|
39
|
+
const DID_PATTERN = /^did:claw:z[1-9A-HJ-NP-Za-km-z]{32,64}$/;
|
|
40
|
+
/** Payload size threshold for automatic gzip compression (1 KB). */
|
|
41
|
+
const COMPRESSION_THRESHOLD_BYTES = 1024;
|
|
42
|
+
/** HKDF info tag for E2E messaging encryption. */
|
|
43
|
+
const E2E_MSG_INFO = Buffer.from('clawnet:e2e-msg:v1', 'utf-8');
|
|
44
|
+
/** Priority levels — higher number = higher priority. */
|
|
45
|
+
export var MessagePriority;
|
|
46
|
+
(function (MessagePriority) {
|
|
47
|
+
MessagePriority[MessagePriority["LOW"] = 0] = "LOW";
|
|
48
|
+
MessagePriority[MessagePriority["NORMAL"] = 1] = "NORMAL";
|
|
49
|
+
MessagePriority[MessagePriority["HIGH"] = 2] = "HIGH";
|
|
50
|
+
MessagePriority[MessagePriority["URGENT"] = 3] = "URGENT";
|
|
51
|
+
})(MessagePriority || (MessagePriority = {}));
|
|
52
|
+
// ── Helpers ──────────────────────────────────────────────────────
|
|
53
|
+
/** Read all data from a stream source into a single Buffer, enforcing a size limit. */
|
|
54
|
+
async function readStream(source, maxBytes = MAX_PAYLOAD_BYTES * 2) {
|
|
55
|
+
const chunks = [];
|
|
56
|
+
let total = 0;
|
|
57
|
+
for await (const chunk of source) {
|
|
58
|
+
const bytes = chunk instanceof Uint8Array ? chunk : chunk.subarray();
|
|
59
|
+
total += bytes.length;
|
|
60
|
+
if (total > maxBytes) {
|
|
61
|
+
throw new Error(`Stream exceeded size limit: ${total} > ${maxBytes}`);
|
|
62
|
+
}
|
|
63
|
+
chunks.push(Buffer.from(bytes));
|
|
64
|
+
}
|
|
65
|
+
return Buffer.concat(chunks);
|
|
66
|
+
}
|
|
67
|
+
/** Write a UTF-8 JSON string to a stream sink. */
|
|
68
|
+
async function writeStream(sink, data) {
|
|
69
|
+
const encoded = Buffer.from(data, 'utf-8');
|
|
70
|
+
await sink((async function* () {
|
|
71
|
+
yield encoded;
|
|
72
|
+
})());
|
|
73
|
+
}
|
|
74
|
+
// ── Service ──────────────────────────────────────────────────────
|
|
75
|
+
export class MessagingService {
|
|
76
|
+
log;
|
|
77
|
+
store;
|
|
78
|
+
p2p;
|
|
79
|
+
localDid;
|
|
80
|
+
cleanupTimer;
|
|
81
|
+
/**
|
|
82
|
+
* DID → PeerId mapping. Populated via the did-announce protocol when
|
|
83
|
+
* peers connect. This is a best-effort cache; entries are never evicted
|
|
84
|
+
* but may become stale when peers go offline.
|
|
85
|
+
*/
|
|
86
|
+
didToPeerId = new Map();
|
|
87
|
+
peerIdToDid = new Map();
|
|
88
|
+
/** WebSocket subscribers that receive real-time inbox pushes. */
|
|
89
|
+
subscribers = new Set();
|
|
90
|
+
/** Sliding-window rate limiter: DID → array of timestamps (ms). */
|
|
91
|
+
rateBuckets = new Map();
|
|
92
|
+
/** Inbound rate limiter: peerId → array of timestamps. */
|
|
93
|
+
inboundRateBuckets = new Map();
|
|
94
|
+
/** Timer for periodic rate-bucket garbage collection. */
|
|
95
|
+
rateBucketGcTimer;
|
|
96
|
+
constructor(p2p, store, localDid) {
|
|
97
|
+
this.log = createLogger({ level: 'info' });
|
|
98
|
+
this.p2p = p2p;
|
|
99
|
+
this.store = store;
|
|
100
|
+
this.localDid = localDid;
|
|
101
|
+
}
|
|
102
|
+
// ── Lifecycle ──────────────────────────────────────────────────
|
|
103
|
+
async start() {
|
|
104
|
+
// Register stream protocol handlers
|
|
105
|
+
await this.p2p.handleProtocol(PROTO_DM, (incoming) => {
|
|
106
|
+
void this.handleInboundMessage(incoming);
|
|
107
|
+
});
|
|
108
|
+
await this.p2p.handleProtocol(PROTO_DID_ANNOUNCE, (incoming) => {
|
|
109
|
+
void this.handleDidAnnounce(incoming);
|
|
110
|
+
});
|
|
111
|
+
await this.p2p.handleProtocol(PROTO_RECEIPT, (incoming) => {
|
|
112
|
+
void this.handleDeliveryReceipt(incoming);
|
|
113
|
+
});
|
|
114
|
+
// When a new peer connects, exchange DID announcements
|
|
115
|
+
this.p2p.onPeerDisconnect(() => {
|
|
116
|
+
// No-op for now; outbox delivery is handled via flush on connect.
|
|
117
|
+
});
|
|
118
|
+
// Announce our DID to all currently connected peers
|
|
119
|
+
void this.announceToAll();
|
|
120
|
+
// Periodic cleanup of expired messages
|
|
121
|
+
this.cleanupTimer = setInterval(() => {
|
|
122
|
+
try {
|
|
123
|
+
this.store.cleanupInbox();
|
|
124
|
+
this.store.cleanupOutbox();
|
|
125
|
+
}
|
|
126
|
+
catch {
|
|
127
|
+
/* best-effort */
|
|
128
|
+
}
|
|
129
|
+
}, CLEANUP_INTERVAL_MS);
|
|
130
|
+
// Periodic GC for rate-limit buckets to prevent memory leaks
|
|
131
|
+
this.rateBucketGcTimer = setInterval(() => {
|
|
132
|
+
this.pruneRateBuckets();
|
|
133
|
+
}, RATE_BUCKET_GC_INTERVAL_MS);
|
|
134
|
+
this.log.info('[messaging] service started', { localDid: this.localDid });
|
|
135
|
+
}
|
|
136
|
+
async stop() {
|
|
137
|
+
if (this.cleanupTimer) {
|
|
138
|
+
clearInterval(this.cleanupTimer);
|
|
139
|
+
this.cleanupTimer = undefined;
|
|
140
|
+
}
|
|
141
|
+
if (this.rateBucketGcTimer) {
|
|
142
|
+
clearInterval(this.rateBucketGcTimer);
|
|
143
|
+
this.rateBucketGcTimer = undefined;
|
|
144
|
+
}
|
|
145
|
+
try {
|
|
146
|
+
await this.p2p.unhandleProtocol(PROTO_DM);
|
|
147
|
+
}
|
|
148
|
+
catch { /* ignore */ }
|
|
149
|
+
try {
|
|
150
|
+
await this.p2p.unhandleProtocol(PROTO_DID_ANNOUNCE);
|
|
151
|
+
}
|
|
152
|
+
catch { /* ignore */ }
|
|
153
|
+
try {
|
|
154
|
+
await this.p2p.unhandleProtocol(PROTO_RECEIPT);
|
|
155
|
+
}
|
|
156
|
+
catch { /* ignore */ }
|
|
157
|
+
this.subscribers.clear();
|
|
158
|
+
}
|
|
159
|
+
// ── Public API ─────────────────────────────────────────────────
|
|
160
|
+
/**
|
|
161
|
+
* Send a message to a target DID.
|
|
162
|
+
* If the target peer is online and reachable, delivers directly.
|
|
163
|
+
* Otherwise queues in outbox for later delivery.
|
|
164
|
+
*/
|
|
165
|
+
async send(targetDid, topic, payload, opts = {}) {
|
|
166
|
+
const ttlSec = opts.ttlSec ?? 86400;
|
|
167
|
+
const priority = opts.priority ?? MessagePriority.NORMAL;
|
|
168
|
+
// Rate limit check
|
|
169
|
+
this.enforceRateLimit(this.localDid);
|
|
170
|
+
// Apply compression + encryption to payload
|
|
171
|
+
const { encoded, compressed, encrypted } = this.encodePayload(payload, opts);
|
|
172
|
+
// Validate payload size after encoding
|
|
173
|
+
const payloadBytes = Buffer.byteLength(encoded, 'utf-8');
|
|
174
|
+
if (payloadBytes > MAX_PAYLOAD_BYTES) {
|
|
175
|
+
throw new Error(`Payload too large: ${payloadBytes} bytes (max ${MAX_PAYLOAD_BYTES})`);
|
|
176
|
+
}
|
|
177
|
+
const peerId = this.didToPeerId.get(targetDid);
|
|
178
|
+
if (peerId) {
|
|
179
|
+
// Try direct delivery
|
|
180
|
+
const delivered = await this.deliverDirect(peerId, targetDid, topic, encoded, ttlSec, priority, compressed, encrypted, opts.idempotencyKey);
|
|
181
|
+
if (delivered) {
|
|
182
|
+
return { messageId: `msg_direct_${Date.now().toString(36)}`, delivered: true, compressed, encrypted };
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
// Queue in outbox for later delivery
|
|
186
|
+
const messageId = this.store.addToOutbox({ targetDid, topic, payload: encoded, ttlSec, priority });
|
|
187
|
+
this.log.info('message queued in outbox', { messageId, targetDid, topic });
|
|
188
|
+
return { messageId, delivered: false, compressed, encrypted };
|
|
189
|
+
}
|
|
190
|
+
/**
|
|
191
|
+
* Send a message to multiple target DIDs (multicast).
|
|
192
|
+
* Each target is attempted independently — partial success is possible.
|
|
193
|
+
*/
|
|
194
|
+
async sendMulticast(targetDids, topic, payload, opts = {}) {
|
|
195
|
+
const ttlSec = opts.ttlSec ?? 86400;
|
|
196
|
+
const priority = opts.priority ?? MessagePriority.NORMAL;
|
|
197
|
+
// Rate limit check (counts as 1 call for rate-limit purposes)
|
|
198
|
+
this.enforceRateLimit(this.localDid);
|
|
199
|
+
// Apply compression (encryption not applied in multicast — each recipient needs their own key)
|
|
200
|
+
const { encoded, compressed, encrypted } = this.encodePayload(payload, { ...opts, encryptForKeyHex: undefined });
|
|
201
|
+
const payloadBytes = Buffer.byteLength(encoded, 'utf-8');
|
|
202
|
+
if (payloadBytes > MAX_PAYLOAD_BYTES) {
|
|
203
|
+
throw new Error(`Payload too large: ${payloadBytes} bytes (max ${MAX_PAYLOAD_BYTES})`);
|
|
204
|
+
}
|
|
205
|
+
// Deliver to all targets concurrently with bounded concurrency
|
|
206
|
+
const results = await this.deliverMulticast(targetDids, topic, encoded, ttlSec, priority, compressed, encrypted, opts.idempotencyKey);
|
|
207
|
+
return { results };
|
|
208
|
+
}
|
|
209
|
+
/** Query the local inbox. */
|
|
210
|
+
getInbox(opts) {
|
|
211
|
+
return this.store.getInbox(opts);
|
|
212
|
+
}
|
|
213
|
+
/** Acknowledge (consume) a message from inbox. */
|
|
214
|
+
ackMessage(messageId) {
|
|
215
|
+
return this.store.consumeMessage(messageId);
|
|
216
|
+
}
|
|
217
|
+
/** Flush outbox: attempt to deliver all pending messages for a specific DID with exponential backoff. */
|
|
218
|
+
async flushOutboxForDid(targetDid) {
|
|
219
|
+
const peerId = this.didToPeerId.get(targetDid);
|
|
220
|
+
if (!peerId)
|
|
221
|
+
return 0;
|
|
222
|
+
const entries = this.store.getOutboxForTarget(targetDid);
|
|
223
|
+
let delivered = 0;
|
|
224
|
+
const now = Date.now();
|
|
225
|
+
for (const entry of entries) {
|
|
226
|
+
if (entry.attempts > MAX_DELIVERY_ATTEMPTS) {
|
|
227
|
+
this.store.removeFromOutbox(entry.id);
|
|
228
|
+
continue;
|
|
229
|
+
}
|
|
230
|
+
// Exponential backoff: skip if too soon since last attempt
|
|
231
|
+
const backoff = Math.min(OUTBOX_RETRY_BASE_MS * (2 ** entry.attempts), OUTBOX_RETRY_MAX_MS);
|
|
232
|
+
const lastAttempt = entry.lastAttempt ?? 0;
|
|
233
|
+
if (lastAttempt > 0 && now - lastAttempt < backoff) {
|
|
234
|
+
continue;
|
|
235
|
+
}
|
|
236
|
+
this.store.recordAttempt(entry.id);
|
|
237
|
+
const ok = await this.deliverDirect(peerId, targetDid, entry.topic, entry.payload, entry.ttlSec);
|
|
238
|
+
if (ok) {
|
|
239
|
+
this.store.removeFromOutbox(entry.id);
|
|
240
|
+
delivered++;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
return delivered;
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* Called when a peer connects. Announces our DID and flushes any
|
|
247
|
+
* pending outbox messages for that peer's DID.
|
|
248
|
+
*/
|
|
249
|
+
async onPeerConnected(peerId) {
|
|
250
|
+
// Announce our DID to the new peer
|
|
251
|
+
await this.announceDidToPeer(peerId);
|
|
252
|
+
// Check if we know this peer's DID and flush outbox
|
|
253
|
+
const did = this.peerIdToDid.get(peerId);
|
|
254
|
+
if (did) {
|
|
255
|
+
const flushed = await this.flushOutboxForDid(did);
|
|
256
|
+
if (flushed > 0) {
|
|
257
|
+
this.log.info('flushed outbox messages on reconnect', { peerId, did, flushed });
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
/** Return the current DID→PeerId mapping (for debugging/status). */
|
|
262
|
+
getDidPeerMap() {
|
|
263
|
+
return Object.fromEntries(this.didToPeerId);
|
|
264
|
+
}
|
|
265
|
+
// ── Subscriber Management (WebSocket push) ─────────────────────
|
|
266
|
+
/** Register a subscriber for real-time inbox pushes. */
|
|
267
|
+
addSubscriber(cb) {
|
|
268
|
+
this.subscribers.add(cb);
|
|
269
|
+
}
|
|
270
|
+
/** Remove a subscriber. */
|
|
271
|
+
removeSubscriber(cb) {
|
|
272
|
+
this.subscribers.delete(cb);
|
|
273
|
+
}
|
|
274
|
+
/** Number of active WS subscribers. */
|
|
275
|
+
get subscriberCount() {
|
|
276
|
+
return this.subscribers.size;
|
|
277
|
+
}
|
|
278
|
+
/** Notify all subscribers of a new inbox message (non-blocking). */
|
|
279
|
+
notifySubscribers(msg) {
|
|
280
|
+
// Use queueMicrotask to avoid blocking the current handler when there are many subscribers
|
|
281
|
+
for (const cb of this.subscribers) {
|
|
282
|
+
queueMicrotask(() => {
|
|
283
|
+
try {
|
|
284
|
+
cb(msg);
|
|
285
|
+
}
|
|
286
|
+
catch { /* best-effort */ }
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
// ── Rate Limiting ──────────────────────────────────────────────
|
|
291
|
+
/**
|
|
292
|
+
* Check rate limit for a DID. Throws if limit exceeded.
|
|
293
|
+
* Uses a sliding window of timestamps with binary search eviction.
|
|
294
|
+
*/
|
|
295
|
+
enforceRateLimit(did) {
|
|
296
|
+
this.checkRateBucket(this.rateBuckets, did, RATE_LIMIT_PER_MIN);
|
|
297
|
+
}
|
|
298
|
+
/** Check if a DID is currently rate-limited (without consuming a slot). */
|
|
299
|
+
isRateLimited(did) {
|
|
300
|
+
const now = Date.now();
|
|
301
|
+
const windowStart = now - RATE_LIMIT_WINDOW_MS;
|
|
302
|
+
const timestamps = this.rateBuckets.get(did);
|
|
303
|
+
if (!timestamps)
|
|
304
|
+
return false;
|
|
305
|
+
const startIdx = this.bisectLeft(timestamps, windowStart);
|
|
306
|
+
return (timestamps.length - startIdx) >= RATE_LIMIT_PER_MIN;
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Enforce inbound rate limit for a peerId. Throws if limit exceeded.
|
|
310
|
+
* Prevents P2P peers from spamming without limit.
|
|
311
|
+
*/
|
|
312
|
+
enforceInboundRateLimit(peerId) {
|
|
313
|
+
this.checkRateBucket(this.inboundRateBuckets, peerId, INBOUND_RATE_LIMIT);
|
|
314
|
+
}
|
|
315
|
+
/**
|
|
316
|
+
* Core rate-limit check: evict expired entries via binary search,
|
|
317
|
+
* push new timestamp, throw if over limit.
|
|
318
|
+
*/
|
|
319
|
+
checkRateBucket(buckets, key, limit) {
|
|
320
|
+
const now = Date.now();
|
|
321
|
+
const windowStart = now - RATE_LIMIT_WINDOW_MS;
|
|
322
|
+
let timestamps = buckets.get(key);
|
|
323
|
+
if (!timestamps) {
|
|
324
|
+
timestamps = [];
|
|
325
|
+
buckets.set(key, timestamps);
|
|
326
|
+
}
|
|
327
|
+
// Binary search eviction — O(log n) instead of O(n)
|
|
328
|
+
const startIdx = this.bisectLeft(timestamps, windowStart);
|
|
329
|
+
if (startIdx > 0) {
|
|
330
|
+
timestamps.splice(0, startIdx);
|
|
331
|
+
}
|
|
332
|
+
if (timestamps.length >= limit) {
|
|
333
|
+
throw new RateLimitError(key, limit);
|
|
334
|
+
}
|
|
335
|
+
timestamps.push(now);
|
|
336
|
+
}
|
|
337
|
+
/** Binary search: find first index where timestamps[i] >= target. */
|
|
338
|
+
bisectLeft(arr, target) {
|
|
339
|
+
let lo = 0, hi = arr.length;
|
|
340
|
+
while (lo < hi) {
|
|
341
|
+
const mid = (lo + hi) >>> 1;
|
|
342
|
+
if (arr[mid] < target)
|
|
343
|
+
lo = mid + 1;
|
|
344
|
+
else
|
|
345
|
+
hi = mid;
|
|
346
|
+
}
|
|
347
|
+
return lo;
|
|
348
|
+
}
|
|
349
|
+
/** Remove empty and stale rate-limit buckets to prevent memory leaks. */
|
|
350
|
+
pruneRateBuckets() {
|
|
351
|
+
const now = Date.now();
|
|
352
|
+
const windowStart = now - RATE_LIMIT_WINDOW_MS;
|
|
353
|
+
for (const [key, ts] of this.rateBuckets) {
|
|
354
|
+
if (ts.length === 0 || ts[ts.length - 1] < windowStart) {
|
|
355
|
+
this.rateBuckets.delete(key);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
for (const [key, ts] of this.inboundRateBuckets) {
|
|
359
|
+
if (ts.length === 0 || ts[ts.length - 1] < windowStart) {
|
|
360
|
+
this.inboundRateBuckets.delete(key);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
// ── Private: Payload Encoding (compression + encryption) ────────
|
|
365
|
+
/**
|
|
366
|
+
* Encode a payload: optionally compress (gzip) then optionally encrypt (X25519+AES-256-GCM).
|
|
367
|
+
* Returns the encoded string and flags indicating what was applied.
|
|
368
|
+
*/
|
|
369
|
+
encodePayload(payload, opts) {
|
|
370
|
+
let data = payload;
|
|
371
|
+
let compressed = false;
|
|
372
|
+
let encrypted = false;
|
|
373
|
+
// Compression: gzip if enabled and payload > threshold
|
|
374
|
+
if (opts.compress !== false && Buffer.byteLength(data, 'utf-8') > COMPRESSION_THRESHOLD_BYTES) {
|
|
375
|
+
const gzipped = gzipSync(Buffer.from(data, 'utf-8'));
|
|
376
|
+
data = gzipped.toString('base64');
|
|
377
|
+
compressed = true;
|
|
378
|
+
}
|
|
379
|
+
// E2E Encryption: X25519 ECDH + HKDF + AES-256-GCM
|
|
380
|
+
if (opts.encryptForKeyHex) {
|
|
381
|
+
const recipientPubKey = hexToBytes(opts.encryptForKeyHex);
|
|
382
|
+
const ephemeral = generateX25519Keypair();
|
|
383
|
+
const shared = x25519SharedSecret(ephemeral.privateKey, recipientPubKey);
|
|
384
|
+
const derived = hkdfSha256(shared, undefined, new Uint8Array(E2E_MSG_INFO), 32);
|
|
385
|
+
const plainBytes = Buffer.from(data, 'utf-8');
|
|
386
|
+
const enc = encryptAes256Gcm(derived, new Uint8Array(plainBytes));
|
|
387
|
+
data = JSON.stringify({
|
|
388
|
+
_e2e: 1,
|
|
389
|
+
pk: bytesToHex(ephemeral.publicKey),
|
|
390
|
+
n: enc.nonceHex,
|
|
391
|
+
c: enc.ciphertextHex,
|
|
392
|
+
t: enc.tagHex,
|
|
393
|
+
});
|
|
394
|
+
encrypted = true;
|
|
395
|
+
}
|
|
396
|
+
return { encoded: data, compressed, encrypted };
|
|
397
|
+
}
|
|
398
|
+
/**
|
|
399
|
+
* Decrypt an E2E-encrypted payload using the local node's X25519 private key.
|
|
400
|
+
* Returns the decrypted payload string or null if not encrypted / decryption fails.
|
|
401
|
+
*/
|
|
402
|
+
static decryptPayload(payload, recipientPrivateKey) {
|
|
403
|
+
try {
|
|
404
|
+
const envelope = JSON.parse(payload);
|
|
405
|
+
if (envelope._e2e !== 1 || !envelope.pk || !envelope.n || !envelope.c || !envelope.t)
|
|
406
|
+
return null;
|
|
407
|
+
const senderPub = hexToBytes(envelope.pk);
|
|
408
|
+
const shared = x25519SharedSecret(recipientPrivateKey, senderPub);
|
|
409
|
+
const derived = hkdfSha256(shared, undefined, new Uint8Array(E2E_MSG_INFO), 32);
|
|
410
|
+
const decrypted = decryptAes256Gcm(derived, {
|
|
411
|
+
nonceHex: envelope.n,
|
|
412
|
+
ciphertextHex: envelope.c,
|
|
413
|
+
tagHex: envelope.t,
|
|
414
|
+
});
|
|
415
|
+
return Buffer.from(decrypted).toString('utf-8');
|
|
416
|
+
}
|
|
417
|
+
catch {
|
|
418
|
+
return null;
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
/**
|
|
422
|
+
* Decompress a gzip-compressed payload (base64-encoded gzip → utf-8 string).
|
|
423
|
+
* Returns the decompressed string, or null if decompression fails.
|
|
424
|
+
*/
|
|
425
|
+
static decompressPayload(payload) {
|
|
426
|
+
try {
|
|
427
|
+
const buf = Buffer.from(payload, 'base64');
|
|
428
|
+
const decompressed = gunzipSync(buf);
|
|
429
|
+
return decompressed.toString('utf-8');
|
|
430
|
+
}
|
|
431
|
+
catch {
|
|
432
|
+
return null;
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
/** Get the current inbox sequence number (for WS replay). */
|
|
436
|
+
getCurrentSeq() {
|
|
437
|
+
return this.store.currentSeq();
|
|
438
|
+
}
|
|
439
|
+
// ── Private: Direct Delivery ───────────────────────────────────
|
|
440
|
+
async deliverDirect(peerId, targetDid, topic, payload, ttlSec, priority = MessagePriority.NORMAL, compressed = false, encrypted = false, idempotencyKey) {
|
|
441
|
+
let stream = null;
|
|
442
|
+
try {
|
|
443
|
+
stream = await this.p2p.newStream(peerId, PROTO_DM);
|
|
444
|
+
const message = JSON.stringify({
|
|
445
|
+
sourceDid: this.localDid,
|
|
446
|
+
targetDid,
|
|
447
|
+
topic,
|
|
448
|
+
payload,
|
|
449
|
+
ttlSec,
|
|
450
|
+
sentAtMs: Date.now(),
|
|
451
|
+
priority,
|
|
452
|
+
compressed,
|
|
453
|
+
encrypted,
|
|
454
|
+
idempotencyKey,
|
|
455
|
+
});
|
|
456
|
+
await writeStream(stream.sink, message);
|
|
457
|
+
await stream.close();
|
|
458
|
+
this.log.info('message delivered', { peerId, targetDid, topic });
|
|
459
|
+
return true;
|
|
460
|
+
}
|
|
461
|
+
catch (err) {
|
|
462
|
+
this.log.warn('direct delivery failed', {
|
|
463
|
+
peerId,
|
|
464
|
+
targetDid,
|
|
465
|
+
error: err.message,
|
|
466
|
+
});
|
|
467
|
+
if (stream) {
|
|
468
|
+
try {
|
|
469
|
+
await stream.close();
|
|
470
|
+
}
|
|
471
|
+
catch { /* ignore */ }
|
|
472
|
+
}
|
|
473
|
+
return false;
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
// ── Private: Inbound Message Handler ───────────────────────────
|
|
477
|
+
async handleInboundMessage(incoming) {
|
|
478
|
+
const { stream, connection } = incoming;
|
|
479
|
+
try {
|
|
480
|
+
// Inbound rate limit check — prevent P2P spam
|
|
481
|
+
const remotePeer = connection.remotePeer?.toString();
|
|
482
|
+
if (remotePeer) {
|
|
483
|
+
try {
|
|
484
|
+
this.enforceInboundRateLimit(remotePeer);
|
|
485
|
+
}
|
|
486
|
+
catch {
|
|
487
|
+
this.log.warn('inbound rate limit exceeded, dropping stream', { peerId: remotePeer });
|
|
488
|
+
try {
|
|
489
|
+
await stream.close();
|
|
490
|
+
}
|
|
491
|
+
catch { /* ignore */ }
|
|
492
|
+
return;
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
// readStream enforces size limit before reading all into memory
|
|
496
|
+
const raw = await readStream(stream.source);
|
|
497
|
+
await stream.close();
|
|
498
|
+
const msg = JSON.parse(raw.toString('utf-8'));
|
|
499
|
+
if (!msg.sourceDid || !msg.topic || !msg.payload) {
|
|
500
|
+
this.log.warn('inbound message missing required fields');
|
|
501
|
+
return;
|
|
502
|
+
}
|
|
503
|
+
// Store in inbox (deduplication handled by store if idempotencyKey is present)
|
|
504
|
+
const messageId = this.store.addToInbox({
|
|
505
|
+
sourceDid: msg.sourceDid,
|
|
506
|
+
targetDid: msg.targetDid ?? this.localDid,
|
|
507
|
+
topic: msg.topic,
|
|
508
|
+
payload: msg.payload,
|
|
509
|
+
ttlSec: msg.ttlSec,
|
|
510
|
+
sentAtMs: msg.sentAtMs,
|
|
511
|
+
priority: msg.priority ?? MessagePriority.NORMAL,
|
|
512
|
+
idempotencyKey: msg.idempotencyKey,
|
|
513
|
+
});
|
|
514
|
+
// Record DID → PeerId mapping from the sender
|
|
515
|
+
const remotePeerId = connection.remotePeer?.toString();
|
|
516
|
+
if (remotePeerId && msg.sourceDid) {
|
|
517
|
+
this.didToPeerId.set(msg.sourceDid, remotePeerId);
|
|
518
|
+
this.peerIdToDid.set(remotePeerId, msg.sourceDid);
|
|
519
|
+
}
|
|
520
|
+
this.log.info('message received', { messageId, sourceDid: msg.sourceDid, topic: msg.topic });
|
|
521
|
+
// Push to WebSocket subscribers
|
|
522
|
+
const currentSeq = this.store.currentSeq();
|
|
523
|
+
this.notifySubscribers({
|
|
524
|
+
messageId,
|
|
525
|
+
sourceDid: msg.sourceDid,
|
|
526
|
+
topic: msg.topic,
|
|
527
|
+
payload: msg.payload,
|
|
528
|
+
receivedAtMs: Date.now(),
|
|
529
|
+
priority: msg.priority ?? MessagePriority.NORMAL,
|
|
530
|
+
seq: currentSeq,
|
|
531
|
+
});
|
|
532
|
+
// Send delivery receipt back to sender
|
|
533
|
+
if (remotePeerId) {
|
|
534
|
+
void this.sendDeliveryReceipt(remotePeerId, messageId, msg.sourceDid);
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
catch (err) {
|
|
538
|
+
this.log.warn('failed to handle inbound message', { error: err.message });
|
|
539
|
+
try {
|
|
540
|
+
await stream.close();
|
|
541
|
+
}
|
|
542
|
+
catch { /* ignore */ }
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
// ── Private: DID Announce Protocol ────────────────────────────
|
|
546
|
+
async handleDidAnnounce(incoming) {
|
|
547
|
+
const { stream, connection } = incoming;
|
|
548
|
+
try {
|
|
549
|
+
const raw = await readStream(stream.source, 1024); // DID announces are tiny
|
|
550
|
+
await stream.close();
|
|
551
|
+
const msg = JSON.parse(raw.toString('utf-8'));
|
|
552
|
+
const remotePeerId = connection.remotePeer?.toString();
|
|
553
|
+
// Validate DID format to prevent spoofing / garbage entries
|
|
554
|
+
if (msg.did && !DID_PATTERN.test(msg.did)) {
|
|
555
|
+
this.log.warn('invalid DID in announce, ignoring', { did: msg.did, peerId: remotePeerId });
|
|
556
|
+
return;
|
|
557
|
+
}
|
|
558
|
+
if (msg.did && remotePeerId) {
|
|
559
|
+
this.didToPeerId.set(msg.did, remotePeerId);
|
|
560
|
+
this.peerIdToDid.set(remotePeerId, msg.did);
|
|
561
|
+
this.log.info('peer DID registered', { did: msg.did, peerId: remotePeerId });
|
|
562
|
+
// Flush any pending outbox messages for this DID
|
|
563
|
+
const flushed = await this.flushOutboxForDid(msg.did);
|
|
564
|
+
if (flushed > 0) {
|
|
565
|
+
this.log.info('flushed outbox on DID announce', { did: msg.did, flushed });
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
catch (err) {
|
|
570
|
+
this.log.warn('failed to handle DID announce', { error: err.message });
|
|
571
|
+
try {
|
|
572
|
+
await stream.close();
|
|
573
|
+
}
|
|
574
|
+
catch { /* ignore */ }
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
/** Announce our DID to a specific peer. */
|
|
578
|
+
async announceDidToPeer(peerId) {
|
|
579
|
+
let stream = null;
|
|
580
|
+
try {
|
|
581
|
+
stream = await this.p2p.newStream(peerId, PROTO_DID_ANNOUNCE);
|
|
582
|
+
await writeStream(stream.sink, JSON.stringify({ did: this.localDid }));
|
|
583
|
+
await stream.close();
|
|
584
|
+
}
|
|
585
|
+
catch {
|
|
586
|
+
// Best-effort; the peer may not support this protocol yet
|
|
587
|
+
if (stream) {
|
|
588
|
+
try {
|
|
589
|
+
await stream.close();
|
|
590
|
+
}
|
|
591
|
+
catch { /* ignore */ }
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
/** Announce our DID to all currently connected peers. */
|
|
596
|
+
async announceToAll() {
|
|
597
|
+
const peers = this.p2p.getConnections();
|
|
598
|
+
for (const peerId of peers) {
|
|
599
|
+
await this.announceDidToPeer(peerId);
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
/**
|
|
603
|
+
* Deliver to multiple targets concurrently with bounded concurrency.
|
|
604
|
+
* Uses Promise.allSettled so one failure doesn't block others.
|
|
605
|
+
*/
|
|
606
|
+
async deliverMulticast(targetDids, topic, payload, ttlSec, priority = MessagePriority.NORMAL, compressed = false, encrypted = false, idempotencyKey) {
|
|
607
|
+
const results = [];
|
|
608
|
+
// Process in batches of MULTICAST_CONCURRENCY
|
|
609
|
+
for (let i = 0; i < targetDids.length; i += MULTICAST_CONCURRENCY) {
|
|
610
|
+
const batch = targetDids.slice(i, i + MULTICAST_CONCURRENCY);
|
|
611
|
+
const settled = await Promise.allSettled(batch.map(async (targetDid) => {
|
|
612
|
+
const peerId = this.didToPeerId.get(targetDid);
|
|
613
|
+
if (peerId) {
|
|
614
|
+
const delivered = await this.deliverDirect(peerId, targetDid, topic, payload, ttlSec, priority, compressed, encrypted, idempotencyKey);
|
|
615
|
+
if (delivered) {
|
|
616
|
+
return { targetDid, messageId: `msg_direct_${Date.now().toString(36)}`, delivered: true };
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
const messageId = this.store.addToOutbox({ targetDid, topic, payload, ttlSec, priority });
|
|
620
|
+
return { targetDid, messageId, delivered: false };
|
|
621
|
+
}));
|
|
622
|
+
for (const result of settled) {
|
|
623
|
+
if (result.status === 'fulfilled') {
|
|
624
|
+
results.push(result.value);
|
|
625
|
+
}
|
|
626
|
+
else {
|
|
627
|
+
// This shouldn't normally happen since deliverDirect catches its own errors
|
|
628
|
+
this.log.warn('multicast delivery error', { error: String(result.reason) });
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
return results;
|
|
633
|
+
}
|
|
634
|
+
// ── Private: Delivery Receipt Protocol ─────────────────────────
|
|
635
|
+
/** Send a delivery receipt to the sender after receiving a message. */
|
|
636
|
+
async sendDeliveryReceipt(peerId, messageId, recipientDid) {
|
|
637
|
+
let stream = null;
|
|
638
|
+
try {
|
|
639
|
+
stream = await this.p2p.newStream(peerId, PROTO_RECEIPT);
|
|
640
|
+
await writeStream(stream.sink, JSON.stringify({
|
|
641
|
+
type: 'delivered',
|
|
642
|
+
messageId,
|
|
643
|
+
recipientDid: this.localDid,
|
|
644
|
+
senderDid: recipientDid,
|
|
645
|
+
deliveredAtMs: Date.now(),
|
|
646
|
+
}));
|
|
647
|
+
await stream.close();
|
|
648
|
+
this.log.info('delivery receipt sent', { peerId, messageId });
|
|
649
|
+
}
|
|
650
|
+
catch {
|
|
651
|
+
// Best-effort — receipts are not critical
|
|
652
|
+
if (stream) {
|
|
653
|
+
try {
|
|
654
|
+
await stream.close();
|
|
655
|
+
}
|
|
656
|
+
catch { /* ignore */ }
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
/** Handle an incoming delivery receipt from a remote peer. */
|
|
661
|
+
async handleDeliveryReceipt(incoming) {
|
|
662
|
+
const { stream } = incoming;
|
|
663
|
+
try {
|
|
664
|
+
const raw = await readStream(stream.source);
|
|
665
|
+
await stream.close();
|
|
666
|
+
const receipt = JSON.parse(raw.toString('utf-8'));
|
|
667
|
+
if (receipt.type === 'delivered' && receipt.messageId) {
|
|
668
|
+
// Remove from outbox if it was queued
|
|
669
|
+
this.store.removeFromOutbox(receipt.messageId);
|
|
670
|
+
this.log.info('delivery receipt received', {
|
|
671
|
+
messageId: receipt.messageId,
|
|
672
|
+
recipientDid: receipt.recipientDid,
|
|
673
|
+
});
|
|
674
|
+
// Notify subscribers about the receipt
|
|
675
|
+
this.notifySubscribers({
|
|
676
|
+
messageId: receipt.messageId,
|
|
677
|
+
sourceDid: receipt.recipientDid ?? '',
|
|
678
|
+
topic: '_receipt',
|
|
679
|
+
payload: JSON.stringify(receipt),
|
|
680
|
+
receivedAtMs: receipt.deliveredAtMs ?? Date.now(),
|
|
681
|
+
priority: MessagePriority.NORMAL,
|
|
682
|
+
seq: 0, // Receipts don't have inbox seq
|
|
683
|
+
});
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
catch {
|
|
687
|
+
try {
|
|
688
|
+
await stream.close();
|
|
689
|
+
}
|
|
690
|
+
catch { /* ignore */ }
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
// ── Rate Limit Error ─────────────────────────────────────────────
|
|
695
|
+
export class RateLimitError extends Error {
|
|
696
|
+
did;
|
|
697
|
+
limit;
|
|
698
|
+
constructor(did, limit) {
|
|
699
|
+
super(`Rate limit exceeded for ${did}: max ${limit} messages/minute`);
|
|
700
|
+
this.name = 'RateLimitError';
|
|
701
|
+
this.did = did;
|
|
702
|
+
this.limit = limit;
|
|
703
|
+
}
|
|
704
|
+
}
|
|
705
|
+
//# sourceMappingURL=messaging-service.js.map
|