@prisma/streams-server 0.0.1 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/CODE_OF_CONDUCT.md +45 -0
  2. package/CONTRIBUTING.md +68 -0
  3. package/LICENSE +201 -0
  4. package/README.md +39 -2
  5. package/SECURITY.md +33 -0
  6. package/bin/prisma-streams-server +2 -0
  7. package/package.json +29 -34
  8. package/src/app.ts +74 -0
  9. package/src/app_core.ts +1706 -0
  10. package/src/app_local.ts +46 -0
  11. package/src/backpressure.ts +66 -0
  12. package/src/bootstrap.ts +239 -0
  13. package/src/config.ts +251 -0
  14. package/src/db/db.ts +1386 -0
  15. package/src/db/schema.ts +625 -0
  16. package/src/expiry_sweeper.ts +44 -0
  17. package/src/hist.ts +169 -0
  18. package/src/index/binary_fuse.ts +379 -0
  19. package/src/index/indexer.ts +745 -0
  20. package/src/index/run_cache.ts +84 -0
  21. package/src/index/run_format.ts +213 -0
  22. package/src/ingest.ts +655 -0
  23. package/src/lens/lens.ts +501 -0
  24. package/src/manifest.ts +114 -0
  25. package/src/memory.ts +155 -0
  26. package/src/metrics.ts +161 -0
  27. package/src/metrics_emitter.ts +50 -0
  28. package/src/notifier.ts +64 -0
  29. package/src/objectstore/interface.ts +13 -0
  30. package/src/objectstore/mock_r2.ts +269 -0
  31. package/src/objectstore/null.ts +32 -0
  32. package/src/objectstore/r2.ts +128 -0
  33. package/src/offset.ts +70 -0
  34. package/src/reader.ts +454 -0
  35. package/src/runtime/hash.ts +156 -0
  36. package/src/runtime/hash_vendor/LICENSE.hash-wasm +38 -0
  37. package/src/runtime/hash_vendor/NOTICE.md +8 -0
  38. package/src/runtime/hash_vendor/xxhash3.umd.min.cjs +7 -0
  39. package/src/runtime/hash_vendor/xxhash32.umd.min.cjs +7 -0
  40. package/src/runtime/hash_vendor/xxhash64.umd.min.cjs +7 -0
  41. package/src/schema/lens_schema.ts +290 -0
  42. package/src/schema/proof.ts +547 -0
  43. package/src/schema/registry.ts +405 -0
  44. package/src/segment/cache.ts +179 -0
  45. package/src/segment/format.ts +331 -0
  46. package/src/segment/segmenter.ts +326 -0
  47. package/src/segment/segmenter_worker.ts +43 -0
  48. package/src/segment/segmenter_workers.ts +94 -0
  49. package/src/server.ts +326 -0
  50. package/src/sqlite/adapter.ts +164 -0
  51. package/src/stats.ts +205 -0
  52. package/src/touch/engine.ts +41 -0
  53. package/src/touch/interpreter_worker.ts +442 -0
  54. package/src/touch/live_keys.ts +118 -0
  55. package/src/touch/live_metrics.ts +827 -0
  56. package/src/touch/live_templates.ts +619 -0
  57. package/src/touch/manager.ts +1199 -0
  58. package/src/touch/spec.ts +456 -0
  59. package/src/touch/touch_journal.ts +671 -0
  60. package/src/touch/touch_key_id.ts +20 -0
  61. package/src/touch/worker_pool.ts +189 -0
  62. package/src/touch/worker_protocol.ts +56 -0
  63. package/src/types/proper-lockfile.d.ts +1 -0
  64. package/src/uploader.ts +317 -0
  65. package/src/util/base32_crockford.ts +81 -0
  66. package/src/util/bloom256.ts +67 -0
  67. package/src/util/cleanup.ts +22 -0
  68. package/src/util/crc32c.ts +29 -0
  69. package/src/util/ds_error.ts +15 -0
  70. package/src/util/duration.ts +17 -0
  71. package/src/util/endian.ts +53 -0
  72. package/src/util/json_pointer.ts +148 -0
  73. package/src/util/log.ts +25 -0
  74. package/src/util/lru.ts +45 -0
  75. package/src/util/retry.ts +35 -0
  76. package/src/util/siphash.ts +71 -0
  77. package/src/util/stream_paths.ts +31 -0
  78. package/src/util/time.ts +14 -0
  79. package/src/util/yield.ts +3 -0
  80. package/build/index.d.mts +0 -1
  81. package/build/index.d.ts +0 -1
  82. package/build/index.js +0 -0
  83. package/build/index.mjs +0 -1
@@ -0,0 +1,625 @@
1
+ import type { SqliteDatabase } from "../sqlite/adapter.ts";
2
+ import { dsError } from "../util/ds_error.ts";
3
+
4
+ /**
5
+ * SQLite schema + migrations.
6
+ *
7
+ * This rewrite uses SQLite as:
8
+ * - WAL (durable append log)
9
+ * - local metadata store (streams/segments/manifests/schemas)
10
+ */
11
+
12
+ export const SCHEMA_VERSION = 11;
13
+
14
+ export const DEFAULT_PRAGMAS_SQL = `
15
+ PRAGMA journal_mode = WAL;
16
+ PRAGMA synchronous = FULL;
17
+ PRAGMA foreign_keys = ON;
18
+ PRAGMA busy_timeout = 5000;
19
+ PRAGMA temp_store = MEMORY;
20
+ `;
21
+
22
+ const CREATE_TABLES_V4_SQL = `
23
+ CREATE TABLE IF NOT EXISTS streams (
24
+ stream TEXT PRIMARY KEY,
25
+ created_at_ms INTEGER NOT NULL,
26
+ updated_at_ms INTEGER NOT NULL,
27
+
28
+ content_type TEXT NOT NULL,
29
+ stream_seq TEXT NULL,
30
+ closed INTEGER NOT NULL DEFAULT 0,
31
+ closed_producer_id TEXT NULL,
32
+ closed_producer_epoch INTEGER NULL,
33
+ closed_producer_seq INTEGER NULL,
34
+ ttl_seconds INTEGER NULL,
35
+
36
+ epoch INTEGER NOT NULL,
37
+ next_offset INTEGER NOT NULL,
38
+ sealed_through INTEGER NOT NULL,
39
+ uploaded_through INTEGER NOT NULL,
40
+ uploaded_segment_count INTEGER NOT NULL DEFAULT 0,
41
+
42
+ pending_rows INTEGER NOT NULL,
43
+ pending_bytes INTEGER NOT NULL,
44
+
45
+ -- Logical size of retained rows in the wal table for this stream (payload-only bytes).
46
+ -- This is explicitly tracked because SQLite file size is high-water and does not shrink
47
+ -- deterministically after DELETE-based GC/retention trimming.
48
+ wal_rows INTEGER NOT NULL DEFAULT 0,
49
+ wal_bytes INTEGER NOT NULL DEFAULT 0,
50
+
51
+ last_append_ms INTEGER NOT NULL,
52
+ last_segment_cut_ms INTEGER NOT NULL,
53
+ segment_in_progress INTEGER NOT NULL,
54
+
55
+ expires_at_ms INTEGER NULL,
56
+ stream_flags INTEGER NOT NULL DEFAULT 0
57
+ );
58
+
59
+ CREATE INDEX IF NOT EXISTS streams_pending_bytes_idx ON streams(pending_bytes);
60
+ CREATE INDEX IF NOT EXISTS streams_last_cut_idx ON streams(last_segment_cut_ms);
61
+ CREATE INDEX IF NOT EXISTS streams_inprog_pending_idx ON streams(segment_in_progress, pending_bytes, last_segment_cut_ms);
62
+
63
+ CREATE TABLE IF NOT EXISTS wal (
64
+ id INTEGER PRIMARY KEY,
65
+ stream TEXT NOT NULL,
66
+ offset INTEGER NOT NULL,
67
+ ts_ms INTEGER NOT NULL,
68
+ payload BLOB NOT NULL,
69
+ payload_len INTEGER NOT NULL,
70
+ routing_key BLOB NULL,
71
+ content_type TEXT NULL,
72
+ flags INTEGER NOT NULL DEFAULT 0
73
+ );
74
+
75
+ CREATE UNIQUE INDEX IF NOT EXISTS wal_stream_offset_uniq ON wal(stream, offset);
76
+ CREATE INDEX IF NOT EXISTS wal_stream_offset_idx ON wal(stream, offset);
77
+ CREATE INDEX IF NOT EXISTS wal_ts_idx ON wal(ts_ms);
78
+
79
+ CREATE TABLE IF NOT EXISTS segments (
80
+ segment_id TEXT PRIMARY KEY,
81
+ stream TEXT NOT NULL,
82
+ segment_index INTEGER NOT NULL,
83
+ start_offset INTEGER NOT NULL,
84
+ end_offset INTEGER NOT NULL,
85
+ block_count INTEGER NOT NULL,
86
+ last_append_ms INTEGER NOT NULL,
87
+ size_bytes INTEGER NOT NULL,
88
+ local_path TEXT NOT NULL,
89
+ created_at_ms INTEGER NOT NULL,
90
+ uploaded_at_ms INTEGER NULL,
91
+ r2_etag TEXT NULL
92
+ );
93
+
94
+ CREATE TABLE IF NOT EXISTS stream_segment_meta (
95
+ stream TEXT PRIMARY KEY,
96
+ segment_count INTEGER NOT NULL,
97
+ segment_offsets BLOB NOT NULL,
98
+ segment_blocks BLOB NOT NULL,
99
+ segment_last_ts BLOB NOT NULL
100
+ );
101
+
102
+ CREATE UNIQUE INDEX IF NOT EXISTS segments_stream_index_uniq ON segments(stream, segment_index);
103
+ CREATE INDEX IF NOT EXISTS segments_stream_start_idx ON segments(stream, start_offset);
104
+ CREATE INDEX IF NOT EXISTS segments_pending_upload_idx ON segments(uploaded_at_ms);
105
+
106
+ CREATE TABLE IF NOT EXISTS manifests (
107
+ stream TEXT PRIMARY KEY,
108
+ generation INTEGER NOT NULL,
109
+ uploaded_generation INTEGER NOT NULL,
110
+ last_uploaded_at_ms INTEGER NULL,
111
+ last_uploaded_etag TEXT NULL
112
+ );
113
+
114
+ CREATE TABLE IF NOT EXISTS schemas (
115
+ stream TEXT PRIMARY KEY,
116
+ schema_json TEXT NOT NULL,
117
+ updated_at_ms INTEGER NOT NULL
118
+ );
119
+
120
+ CREATE TABLE IF NOT EXISTS producer_state (
121
+ stream TEXT NOT NULL,
122
+ producer_id TEXT NOT NULL,
123
+ epoch INTEGER NOT NULL,
124
+ last_seq INTEGER NOT NULL,
125
+ updated_at_ms INTEGER NOT NULL,
126
+ PRIMARY KEY (stream, producer_id)
127
+ );
128
+
129
+ CREATE TABLE IF NOT EXISTS stream_interpreters (
130
+ stream TEXT PRIMARY KEY,
131
+ interpreted_through INTEGER NOT NULL,
132
+ updated_at_ms INTEGER NOT NULL
133
+ );
134
+
135
+ -- Live dynamic template registry (per base stream).
136
+ CREATE TABLE IF NOT EXISTS live_templates (
137
+ stream TEXT NOT NULL,
138
+ template_id TEXT NOT NULL,
139
+ entity TEXT NOT NULL,
140
+ fields_json TEXT NOT NULL,
141
+ encodings_json TEXT NOT NULL,
142
+ state TEXT NOT NULL,
143
+ created_at_ms INTEGER NOT NULL,
144
+ last_seen_at_ms INTEGER NOT NULL,
145
+ inactivity_ttl_ms INTEGER NOT NULL,
146
+ active_from_source_offset INTEGER NOT NULL,
147
+ retired_at_ms INTEGER NULL,
148
+ retired_reason TEXT NULL,
149
+ PRIMARY KEY (stream, template_id)
150
+ );
151
+
152
+ CREATE INDEX IF NOT EXISTS live_templates_stream_entity_state_last_seen_idx
153
+ ON live_templates(stream, entity, state, last_seen_at_ms);
154
+ CREATE INDEX IF NOT EXISTS live_templates_stream_state_last_seen_idx
155
+ ON live_templates(stream, state, last_seen_at_ms);
156
+ `;
157
+
158
+ const CREATE_INDEX_TABLES_SQL = `
159
+ CREATE TABLE IF NOT EXISTS index_state (
160
+ stream TEXT PRIMARY KEY,
161
+ index_secret BLOB NOT NULL,
162
+ indexed_through INTEGER NOT NULL,
163
+ updated_at_ms INTEGER NOT NULL
164
+ );
165
+
166
+ CREATE TABLE IF NOT EXISTS index_runs (
167
+ run_id TEXT PRIMARY KEY,
168
+ stream TEXT NOT NULL,
169
+ level INTEGER NOT NULL,
170
+ start_segment INTEGER NOT NULL,
171
+ end_segment INTEGER NOT NULL,
172
+ object_key TEXT NOT NULL,
173
+ filter_len INTEGER NOT NULL,
174
+ record_count INTEGER NOT NULL,
175
+ retired_gen INTEGER NULL,
176
+ retired_at_ms INTEGER NULL
177
+ );
178
+
179
+ CREATE INDEX IF NOT EXISTS index_runs_stream_idx ON index_runs(stream, level, start_segment);
180
+ `;
181
+
182
+ const CREATE_TABLES_V4_SUFFIX_SQL = (suffix: string): string => `
183
+ CREATE TABLE streams_${suffix} (
184
+ stream TEXT PRIMARY KEY,
185
+ created_at_ms INTEGER NOT NULL,
186
+ updated_at_ms INTEGER NOT NULL,
187
+
188
+ content_type TEXT NOT NULL,
189
+ stream_seq TEXT NULL,
190
+ closed INTEGER NOT NULL DEFAULT 0,
191
+ closed_producer_id TEXT NULL,
192
+ closed_producer_epoch INTEGER NULL,
193
+ closed_producer_seq INTEGER NULL,
194
+ ttl_seconds INTEGER NULL,
195
+
196
+ epoch INTEGER NOT NULL,
197
+ next_offset INTEGER NOT NULL,
198
+ sealed_through INTEGER NOT NULL,
199
+ uploaded_through INTEGER NOT NULL,
200
+ uploaded_segment_count INTEGER NOT NULL DEFAULT 0,
201
+
202
+ pending_rows INTEGER NOT NULL,
203
+ pending_bytes INTEGER NOT NULL,
204
+
205
+ last_append_ms INTEGER NOT NULL,
206
+ last_segment_cut_ms INTEGER NOT NULL,
207
+ segment_in_progress INTEGER NOT NULL,
208
+
209
+ expires_at_ms INTEGER NULL,
210
+ stream_flags INTEGER NOT NULL DEFAULT 0
211
+ );
212
+
213
+ CREATE TABLE wal_${suffix} (
214
+ id INTEGER PRIMARY KEY,
215
+ stream TEXT NOT NULL,
216
+ offset INTEGER NOT NULL,
217
+ ts_ms INTEGER NOT NULL,
218
+ payload BLOB NOT NULL,
219
+ payload_len INTEGER NOT NULL,
220
+ routing_key BLOB NULL,
221
+ content_type TEXT NULL,
222
+ flags INTEGER NOT NULL DEFAULT 0
223
+ );
224
+
225
+ CREATE TABLE segments_${suffix} (
226
+ segment_id TEXT PRIMARY KEY,
227
+ stream TEXT NOT NULL,
228
+ segment_index INTEGER NOT NULL,
229
+ start_offset INTEGER NOT NULL,
230
+ end_offset INTEGER NOT NULL,
231
+ block_count INTEGER NOT NULL,
232
+ last_append_ms INTEGER NOT NULL,
233
+ size_bytes INTEGER NOT NULL,
234
+ local_path TEXT NOT NULL,
235
+ created_at_ms INTEGER NOT NULL,
236
+ uploaded_at_ms INTEGER NULL,
237
+ r2_etag TEXT NULL
238
+ );
239
+
240
+ CREATE TABLE manifests_${suffix} (
241
+ stream TEXT PRIMARY KEY,
242
+ generation INTEGER NOT NULL,
243
+ uploaded_generation INTEGER NOT NULL,
244
+ last_uploaded_at_ms INTEGER NULL,
245
+ last_uploaded_etag TEXT NULL
246
+ );
247
+
248
+ CREATE TABLE schemas_${suffix} (
249
+ stream TEXT PRIMARY KEY,
250
+ schema_json TEXT NOT NULL,
251
+ updated_at_ms INTEGER NOT NULL
252
+ );
253
+
254
+ CREATE TABLE producer_state_${suffix} (
255
+ stream TEXT NOT NULL,
256
+ producer_id TEXT NOT NULL,
257
+ epoch INTEGER NOT NULL,
258
+ last_seq INTEGER NOT NULL,
259
+ updated_at_ms INTEGER NOT NULL,
260
+ PRIMARY KEY (stream, producer_id)
261
+ );
262
+ `;
263
+
264
+ const CREATE_INDEXES_V4_SQL = `
265
+ CREATE UNIQUE INDEX IF NOT EXISTS wal_stream_offset_uniq ON wal(stream, offset);
266
+ CREATE INDEX IF NOT EXISTS wal_stream_offset_idx ON wal(stream, offset);
267
+ CREATE INDEX IF NOT EXISTS wal_ts_idx ON wal(ts_ms);
268
+
269
+ CREATE INDEX IF NOT EXISTS streams_pending_bytes_idx ON streams(pending_bytes);
270
+ CREATE INDEX IF NOT EXISTS streams_last_cut_idx ON streams(last_segment_cut_ms);
271
+ CREATE INDEX IF NOT EXISTS streams_inprog_pending_idx ON streams(segment_in_progress, pending_bytes, last_segment_cut_ms);
272
+
273
+ CREATE UNIQUE INDEX IF NOT EXISTS segments_stream_index_uniq ON segments(stream, segment_index);
274
+ CREATE INDEX IF NOT EXISTS segments_stream_start_idx ON segments(stream, start_offset);
275
+ CREATE INDEX IF NOT EXISTS segments_pending_upload_idx ON segments(uploaded_at_ms);
276
+ `;
277
+
278
+ export function initSchema(db: SqliteDatabase, opts: { skipMigrations?: boolean } = {}): void {
279
+ db.exec(DEFAULT_PRAGMAS_SQL);
280
+
281
+ // Some worker processes only need read/write access to existing tables and
282
+ // should avoid concurrent schema init/migration work.
283
+ if (opts.skipMigrations) return;
284
+
285
+ db.exec(`CREATE TABLE IF NOT EXISTS schema_version (version INTEGER NOT NULL);`);
286
+
287
+ const readSchemaVersion = (): number | null => {
288
+ const row = db.query("SELECT version FROM schema_version LIMIT 1;").get() as any;
289
+ if (!row) return null;
290
+ const raw = row.version;
291
+ if (typeof raw === "bigint") return Number(raw);
292
+ if (typeof raw === "number") return raw;
293
+ return Number(raw);
294
+ };
295
+
296
+ const version0 = readSchemaVersion();
297
+ if (version0 == null) {
298
+ db.exec(CREATE_TABLES_V4_SQL);
299
+ db.exec(CREATE_INDEX_TABLES_SQL);
300
+ db.query("INSERT INTO schema_version(version) VALUES (?);").run(SCHEMA_VERSION);
301
+ return;
302
+ }
303
+
304
+ if (version0 === SCHEMA_VERSION) return;
305
+
306
+ let version = version0;
307
+ while (version !== SCHEMA_VERSION) {
308
+ if (version === 1) {
309
+ migrateV1ToV4(db);
310
+ } else if (version === 2) {
311
+ migrateV2ToV4(db);
312
+ } else if (version === 3) {
313
+ migrateV3ToV4(db);
314
+ } else if (version === 4) {
315
+ migrateV4ToV5(db);
316
+ } else if (version === 5) {
317
+ migrateV5ToV6(db);
318
+ } else if (version === 6) {
319
+ migrateV6ToV7(db);
320
+ } else if (version === 7) {
321
+ migrateV7ToV8(db);
322
+ } else if (version === 8) {
323
+ migrateV8ToV9(db);
324
+ } else if (version === 9) {
325
+ migrateV9ToV10(db);
326
+ } else if (version === 10) {
327
+ migrateV10ToV11(db);
328
+ } else {
329
+ throw dsError(`unexpected schema version: ${version} (expected ${SCHEMA_VERSION})`);
330
+ }
331
+ const next = readSchemaVersion();
332
+ if (next == null) throw dsError("schema_version row missing after migration");
333
+ version = next;
334
+ }
335
+ }
336
+
337
+ function migrateV1ToV4(db: SqliteDatabase): void {
338
+ const tx = db.transaction(() => {
339
+ db.exec(CREATE_TABLES_V4_SUFFIX_SQL("v4"));
340
+
341
+ // Streams
342
+ db.exec(`
343
+ INSERT INTO streams_v4(
344
+ stream, created_at_ms, updated_at_ms,
345
+ content_type, stream_seq, closed, closed_producer_id, closed_producer_epoch, closed_producer_seq, ttl_seconds,
346
+ epoch,
347
+ next_offset, sealed_through, uploaded_through,
348
+ pending_rows, pending_bytes,
349
+ last_append_ms, last_segment_cut_ms, segment_in_progress,
350
+ expires_at_ms, stream_flags
351
+ )
352
+ SELECT
353
+ stream,
354
+ CAST(created_at_ns / 1000000 AS INTEGER),
355
+ CAST(updated_at_ns / 1000000 AS INTEGER),
356
+ 'application/octet-stream',
357
+ NULL,
358
+ 0,
359
+ NULL,
360
+ NULL,
361
+ NULL,
362
+ NULL,
363
+ epoch,
364
+ next_seq,
365
+ sealed_through_seq,
366
+ uploaded_through_seq,
367
+ pending_rows,
368
+ pending_bytes,
369
+ CAST(last_append_ns / 1000000 AS INTEGER),
370
+ CAST(last_segment_cut_ns / 1000000 AS INTEGER),
371
+ segment_in_progress,
372
+ CASE WHEN expires_at_ns IS NULL THEN NULL ELSE CAST(expires_at_ns / 1000000 AS INTEGER) END,
373
+ CASE WHEN deleted != 0 THEN 1 ELSE 0 END
374
+ FROM streams;
375
+ `);
376
+
377
+ // WAL
378
+ db.exec(`
379
+ INSERT INTO wal_v4(
380
+ stream, offset, ts_ms, payload, payload_len, routing_key, content_type, flags
381
+ )
382
+ SELECT
383
+ stream,
384
+ seq,
385
+ CAST(append_ns / 1000000 AS INTEGER),
386
+ payload,
387
+ payload_len,
388
+ CASE WHEN routing_key IS NULL THEN NULL ELSE CAST(routing_key AS BLOB) END,
389
+ CASE WHEN is_json != 0 THEN 'application/json' ELSE NULL END,
390
+ 0
391
+ FROM wal;
392
+ `);
393
+
394
+ // Segments
395
+ db.exec(`
396
+ INSERT INTO segments_v4(
397
+ segment_id, stream, segment_index, start_offset, end_offset, block_count,
398
+ last_append_ms, size_bytes, local_path, created_at_ms, uploaded_at_ms, r2_etag
399
+ )
400
+ SELECT
401
+ segment_id,
402
+ stream,
403
+ segment_index,
404
+ start_seq,
405
+ end_seq,
406
+ block_count,
407
+ CAST(last_append_ns / 1000000 AS INTEGER),
408
+ size_bytes,
409
+ local_path,
410
+ CAST(created_at_ns / 1000000 AS INTEGER),
411
+ CASE WHEN uploaded_at_ns IS NULL THEN NULL ELSE CAST(uploaded_at_ns / 1000000 AS INTEGER) END,
412
+ NULL
413
+ FROM segments;
414
+ `);
415
+
416
+ // Manifests
417
+ db.exec(`
418
+ INSERT INTO manifests_v4(
419
+ stream, generation, uploaded_generation, last_uploaded_at_ms, last_uploaded_etag
420
+ )
421
+ SELECT
422
+ stream,
423
+ generation,
424
+ uploaded_generation,
425
+ CASE WHEN last_uploaded_at_ns IS NULL THEN NULL ELSE CAST(last_uploaded_at_ns / 1000000 AS INTEGER) END,
426
+ last_uploaded_etag
427
+ FROM manifests;
428
+ `);
429
+
430
+ // Schemas
431
+ db.exec(`
432
+ INSERT INTO schemas_v4(stream, schema_json, updated_at_ms)
433
+ SELECT stream, schema_json, CAST(updated_at_ns / 1000000 AS INTEGER)
434
+ FROM schemas;
435
+ `);
436
+
437
+ db.exec(`DROP TABLE wal;`);
438
+ db.exec(`DROP TABLE streams;`);
439
+ db.exec(`DROP TABLE segments;`);
440
+ db.exec(`DROP TABLE manifests;`);
441
+ db.exec(`DROP TABLE schemas;`);
442
+
443
+ db.exec(`ALTER TABLE streams_v4 RENAME TO streams;`);
444
+ db.exec(`ALTER TABLE wal_v4 RENAME TO wal;`);
445
+ db.exec(`ALTER TABLE segments_v4 RENAME TO segments;`);
446
+ db.exec(`ALTER TABLE manifests_v4 RENAME TO manifests;`);
447
+ db.exec(`ALTER TABLE schemas_v4 RENAME TO schemas;`);
448
+ db.exec(`ALTER TABLE producer_state_v4 RENAME TO producer_state;`);
449
+
450
+ db.exec(CREATE_INDEXES_V4_SQL);
451
+
452
+ db.exec(CREATE_INDEX_TABLES_SQL);
453
+ db.exec(`UPDATE schema_version SET version = 4;`);
454
+ });
455
+
456
+ tx();
457
+ }
458
+
459
+ function migrateV2ToV4(db: SqliteDatabase): void {
460
+ const tx = db.transaction(() => {
461
+ db.exec(`ALTER TABLE segments ADD COLUMN block_count INTEGER NOT NULL DEFAULT 0;`);
462
+ db.exec(`ALTER TABLE segments ADD COLUMN last_append_ms INTEGER NOT NULL DEFAULT 0;`);
463
+
464
+ db.exec(`ALTER TABLE streams ADD COLUMN content_type TEXT NOT NULL DEFAULT 'application/octet-stream';`);
465
+ db.exec(`ALTER TABLE streams ADD COLUMN stream_seq TEXT NULL;`);
466
+ db.exec(`ALTER TABLE streams ADD COLUMN closed INTEGER NOT NULL DEFAULT 0;`);
467
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_id TEXT NULL;`);
468
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_epoch INTEGER NULL;`);
469
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_seq INTEGER NULL;`);
470
+ db.exec(`ALTER TABLE streams ADD COLUMN ttl_seconds INTEGER NULL;`);
471
+
472
+ db.exec(`
473
+ CREATE TABLE IF NOT EXISTS producer_state (
474
+ stream TEXT NOT NULL,
475
+ producer_id TEXT NOT NULL,
476
+ epoch INTEGER NOT NULL,
477
+ last_seq INTEGER NOT NULL,
478
+ updated_at_ms INTEGER NOT NULL,
479
+ PRIMARY KEY (stream, producer_id)
480
+ );
481
+ `);
482
+ db.exec(CREATE_INDEX_TABLES_SQL);
483
+ db.exec(`UPDATE schema_version SET version = 4;`);
484
+ });
485
+
486
+ tx();
487
+ }
488
+
489
+ function migrateV3ToV4(db: SqliteDatabase): void {
490
+ const tx = db.transaction(() => {
491
+ db.exec(`ALTER TABLE streams ADD COLUMN content_type TEXT NOT NULL DEFAULT 'application/octet-stream';`);
492
+ db.exec(`ALTER TABLE streams ADD COLUMN stream_seq TEXT NULL;`);
493
+ db.exec(`ALTER TABLE streams ADD COLUMN closed INTEGER NOT NULL DEFAULT 0;`);
494
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_id TEXT NULL;`);
495
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_epoch INTEGER NULL;`);
496
+ db.exec(`ALTER TABLE streams ADD COLUMN closed_producer_seq INTEGER NULL;`);
497
+ db.exec(`ALTER TABLE streams ADD COLUMN ttl_seconds INTEGER NULL;`);
498
+
499
+ db.exec(`
500
+ CREATE TABLE IF NOT EXISTS producer_state (
501
+ stream TEXT NOT NULL,
502
+ producer_id TEXT NOT NULL,
503
+ epoch INTEGER NOT NULL,
504
+ last_seq INTEGER NOT NULL,
505
+ updated_at_ms INTEGER NOT NULL,
506
+ PRIMARY KEY (stream, producer_id)
507
+ );
508
+ `);
509
+ db.exec(CREATE_INDEX_TABLES_SQL);
510
+ db.exec(`UPDATE schema_version SET version = 4;`);
511
+ });
512
+
513
+ tx();
514
+ }
515
+
516
+ function migrateV4ToV5(db: SqliteDatabase): void {
517
+ const tx = db.transaction(() => {
518
+ db.exec(CREATE_INDEX_TABLES_SQL);
519
+ db.exec(`UPDATE schema_version SET version = 5;`);
520
+ });
521
+ tx();
522
+ }
523
+
524
+ function migrateV5ToV6(db: SqliteDatabase): void {
525
+ const tx = db.transaction(() => {
526
+ db.exec(`ALTER TABLE streams ADD COLUMN uploaded_segment_count INTEGER NOT NULL DEFAULT 0;`);
527
+ db.exec(`
528
+ CREATE TABLE IF NOT EXISTS stream_segment_meta (
529
+ stream TEXT PRIMARY KEY,
530
+ segment_count INTEGER NOT NULL,
531
+ segment_offsets BLOB NOT NULL,
532
+ segment_blocks BLOB NOT NULL,
533
+ segment_last_ts BLOB NOT NULL
534
+ );
535
+ `);
536
+ db.exec(`UPDATE schema_version SET version = 6;`);
537
+ });
538
+ tx();
539
+ }
540
+
541
+ function migrateV6ToV7(db: SqliteDatabase): void {
542
+ const tx = db.transaction(() => {
543
+ db.exec(`
544
+ CREATE TABLE IF NOT EXISTS stream_interpreters (
545
+ stream TEXT PRIMARY KEY,
546
+ interpreted_through INTEGER NOT NULL,
547
+ updated_at_ms INTEGER NOT NULL
548
+ );
549
+ `);
550
+ db.exec(`UPDATE schema_version SET version = 7;`);
551
+ });
552
+ tx();
553
+ }
554
+
555
+ function migrateV7ToV8(db: SqliteDatabase): void {
556
+ const tx = db.transaction(() => {
557
+ db.exec(`UPDATE schema_version SET version = 8;`);
558
+ });
559
+ tx();
560
+ }
561
+
562
+ function migrateV8ToV9(db: SqliteDatabase): void {
563
+ const tx = db.transaction(() => {
564
+ db.exec(`
565
+ CREATE TABLE IF NOT EXISTS live_templates (
566
+ stream TEXT NOT NULL,
567
+ template_id TEXT NOT NULL,
568
+ entity TEXT NOT NULL,
569
+ fields_json TEXT NOT NULL,
570
+ encodings_json TEXT NOT NULL,
571
+ state TEXT NOT NULL,
572
+ created_at_ms INTEGER NOT NULL,
573
+ last_seen_at_ms INTEGER NOT NULL,
574
+ inactivity_ttl_ms INTEGER NOT NULL,
575
+ active_from_source_offset INTEGER NOT NULL,
576
+ retired_at_ms INTEGER NULL,
577
+ retired_reason TEXT NULL,
578
+ PRIMARY KEY (stream, template_id)
579
+ );
580
+ `);
581
+ db.exec(`
582
+ CREATE INDEX IF NOT EXISTS live_templates_stream_entity_state_last_seen_idx
583
+ ON live_templates(stream, entity, state, last_seen_at_ms);
584
+ `);
585
+ db.exec(`
586
+ CREATE INDEX IF NOT EXISTS live_templates_stream_state_last_seen_idx
587
+ ON live_templates(stream, state, last_seen_at_ms);
588
+ `);
589
+ db.exec(`UPDATE schema_version SET version = 9;`);
590
+ });
591
+ tx();
592
+ }
593
+
594
+ function migrateV9ToV10(db: SqliteDatabase): void {
595
+ const tx = db.transaction(() => {
596
+ db.exec(`ALTER TABLE streams ADD COLUMN wal_rows INTEGER NOT NULL DEFAULT 0;`);
597
+ db.exec(`ALTER TABLE streams ADD COLUMN wal_bytes INTEGER NOT NULL DEFAULT 0;`);
598
+
599
+ // Backfill current retained WAL rows/bytes per stream so metrics are correct after upgrade.
600
+ db.exec(`DROP TABLE IF EXISTS temp.wal_stats;`);
601
+ db.exec(`
602
+ CREATE TEMP TABLE wal_stats AS
603
+ SELECT stream, COUNT(*) as rows, COALESCE(SUM(payload_len), 0) as bytes
604
+ FROM wal
605
+ GROUP BY stream;
606
+ `);
607
+ db.exec(`
608
+ UPDATE streams
609
+ SET wal_rows = COALESCE((SELECT rows FROM wal_stats WHERE wal_stats.stream = streams.stream), 0),
610
+ wal_bytes = COALESCE((SELECT bytes FROM wal_stats WHERE wal_stats.stream = streams.stream), 0);
611
+ `);
612
+ db.exec(`DROP TABLE wal_stats;`);
613
+
614
+ db.exec(`UPDATE schema_version SET version = 10;`);
615
+ });
616
+ tx();
617
+ }
618
+
619
+ function migrateV10ToV11(db: SqliteDatabase): void {
620
+ const tx = db.transaction(() => {
621
+ db.exec(`DROP INDEX IF EXISTS wal_touch_stream_rk_offset_idx;`);
622
+ db.exec(`UPDATE schema_version SET version = ${SCHEMA_VERSION};`);
623
+ });
624
+ tx();
625
+ }
@@ -0,0 +1,44 @@
1
+ import type { Config } from "./config";
2
+ import type { SqliteDurableStore } from "./db/db";
3
+
4
+ export class ExpirySweeper {
5
+ private readonly cfg: Config;
6
+ private readonly db: SqliteDurableStore;
7
+ private timer: any | null = null;
8
+ private running = false;
9
+
10
+ constructor(cfg: Config, db: SqliteDurableStore) {
11
+ this.cfg = cfg;
12
+ this.db = db;
13
+ }
14
+
15
+ start(): void {
16
+ if (this.timer || this.cfg.expirySweepIntervalMs <= 0) return;
17
+ this.timer = setInterval(() => {
18
+ void this.tick();
19
+ }, this.cfg.expirySweepIntervalMs);
20
+ }
21
+
22
+ stop(): void {
23
+ if (this.timer) clearInterval(this.timer);
24
+ this.timer = null;
25
+ }
26
+
27
+ private async tick(): Promise<void> {
28
+ if (this.running) return;
29
+ this.running = true;
30
+ try {
31
+ const expired = this.db.listExpiredStreams(this.cfg.expirySweepBatchLimit);
32
+ if (expired.length === 0) return;
33
+ for (const stream of expired) {
34
+ try {
35
+ this.db.deleteStream(stream);
36
+ } catch {
37
+ // ignore deletion errors
38
+ }
39
+ }
40
+ } finally {
41
+ this.running = false;
42
+ }
43
+ }
44
+ }