@durable-streams/state 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +654 -0
- package/STATE-PROTOCOL.md +502 -0
- package/dist/index.cjs +558 -0
- package/dist/index.d.cts +284 -0
- package/dist/index.d.ts +284 -0
- package/dist/index.js +530 -0
- package/package.json +48 -0
- package/src/index.ts +33 -0
- package/src/materialized-state.ts +93 -0
- package/src/stream-db.ts +934 -0
- package/src/types.ts +80 -0
- package/state-protocol.schema.json +186 -0
package/src/stream-db.ts
ADDED
|
@@ -0,0 +1,934 @@
|
|
|
1
|
+
import { createCollection, createOptimisticAction } from "@tanstack/db"
|
|
2
|
+
import { DurableStream as DurableStreamClass } from "@durable-streams/client"
|
|
3
|
+
import { isChangeEvent, isControlEvent } from "./types"
|
|
4
|
+
import type { Collection, SyncConfig } from "@tanstack/db"
|
|
5
|
+
import type { ChangeEvent, StateEvent } from "./types"
|
|
6
|
+
import type { StandardSchemaV1 } from "@standard-schema/spec"
|
|
7
|
+
import type {
|
|
8
|
+
DurableStream,
|
|
9
|
+
DurableStreamOptions,
|
|
10
|
+
StreamResponse,
|
|
11
|
+
} from "@durable-streams/client"
|
|
12
|
+
|
|
13
|
+
// ============================================================================
|
|
14
|
+
// Type Definitions
|
|
15
|
+
// ============================================================================
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Definition for a single collection in the stream state
|
|
19
|
+
*/
|
|
20
|
+
export interface CollectionDefinition<T = unknown> {
|
|
21
|
+
/** Standard Schema for validating values */
|
|
22
|
+
schema: StandardSchemaV1<T>
|
|
23
|
+
/** The type field value in change events that map to this collection */
|
|
24
|
+
type: string
|
|
25
|
+
/** The property name in T that serves as the primary key */
|
|
26
|
+
primaryKey: string
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Helper methods for creating change events for a collection
|
|
31
|
+
*/
|
|
32
|
+
export interface CollectionEventHelpers<T> {
|
|
33
|
+
/**
|
|
34
|
+
* Create an insert change event
|
|
35
|
+
*/
|
|
36
|
+
insert: (params: {
|
|
37
|
+
key?: string
|
|
38
|
+
value: T
|
|
39
|
+
headers?: Omit<Record<string, string>, `operation`>
|
|
40
|
+
}) => ChangeEvent<T>
|
|
41
|
+
/**
|
|
42
|
+
* Create an update change event
|
|
43
|
+
*/
|
|
44
|
+
update: (params: {
|
|
45
|
+
key?: string
|
|
46
|
+
value: T
|
|
47
|
+
oldValue?: T
|
|
48
|
+
headers?: Omit<Record<string, string>, `operation`>
|
|
49
|
+
}) => ChangeEvent<T>
|
|
50
|
+
/**
|
|
51
|
+
* Create a delete change event
|
|
52
|
+
*/
|
|
53
|
+
delete: (params: {
|
|
54
|
+
key?: string
|
|
55
|
+
oldValue?: T
|
|
56
|
+
headers?: Omit<Record<string, string>, `operation`>
|
|
57
|
+
}) => ChangeEvent<T>
|
|
58
|
+
/**
|
|
59
|
+
* Create an upsert change event (insert or update)
|
|
60
|
+
*/
|
|
61
|
+
upsert: (params: {
|
|
62
|
+
key?: string
|
|
63
|
+
value: T
|
|
64
|
+
headers?: Omit<Record<string, string>, `operation`>
|
|
65
|
+
}) => ChangeEvent<T>
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Collection definition enhanced with event creation helpers
|
|
70
|
+
*/
|
|
71
|
+
export type CollectionWithHelpers<T = unknown> = CollectionDefinition<T> &
|
|
72
|
+
CollectionEventHelpers<T>
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Stream state definition containing all collections
|
|
76
|
+
*/
|
|
77
|
+
export type StreamStateDefinition = Record<string, CollectionDefinition>
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Stream state schema with helper methods for creating change events
|
|
81
|
+
*/
|
|
82
|
+
export type StateSchema<T extends Record<string, CollectionDefinition>> = {
|
|
83
|
+
[K in keyof T]: CollectionWithHelpers<
|
|
84
|
+
T[K] extends CollectionDefinition<infer U> ? U : unknown
|
|
85
|
+
>
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Definition for a single action that can be passed to createOptimisticAction
|
|
90
|
+
*/
|
|
91
|
+
export interface ActionDefinition<TParams = any, TContext = any> {
|
|
92
|
+
onMutate: (params: TParams) => void
|
|
93
|
+
mutationFn: (params: TParams, context: TContext) => Promise<any>
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Factory function for creating actions with access to db and stream context
|
|
98
|
+
*/
|
|
99
|
+
export type ActionFactory<
|
|
100
|
+
TDef extends StreamStateDefinition,
|
|
101
|
+
TActions extends Record<string, ActionDefinition<any>>,
|
|
102
|
+
> = (context: { db: StreamDB<TDef>; stream: DurableStream }) => TActions
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Map action definitions to callable action functions
|
|
106
|
+
*/
|
|
107
|
+
export type ActionMap<TActions extends Record<string, ActionDefinition<any>>> =
|
|
108
|
+
{
|
|
109
|
+
[K in keyof TActions]: ReturnType<typeof createOptimisticAction<any>>
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Options for creating a stream DB
|
|
114
|
+
*/
|
|
115
|
+
export interface CreateStreamDBOptions<
|
|
116
|
+
TDef extends StreamStateDefinition = StreamStateDefinition,
|
|
117
|
+
TActions extends Record<string, ActionDefinition<any>> = Record<
|
|
118
|
+
string,
|
|
119
|
+
never
|
|
120
|
+
>,
|
|
121
|
+
> {
|
|
122
|
+
/** Options for creating the durable stream (stream is created lazily on preload) */
|
|
123
|
+
streamOptions: DurableStreamOptions
|
|
124
|
+
/** The stream state definition */
|
|
125
|
+
state: TDef
|
|
126
|
+
/** Optional factory function to create actions with db and stream context */
|
|
127
|
+
actions?: ActionFactory<TDef, TActions>
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Extract the value type from a CollectionDefinition
|
|
132
|
+
*/
|
|
133
|
+
type ExtractCollectionType<T extends CollectionDefinition> =
|
|
134
|
+
T extends CollectionDefinition<infer U> ? U : unknown
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Map collection definitions to TanStack DB Collection types
|
|
138
|
+
*/
|
|
139
|
+
type CollectionMap<TDef extends StreamStateDefinition> = {
|
|
140
|
+
[K in keyof TDef]: Collection<ExtractCollectionType<TDef[K]> & object, string>
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* The StreamDB interface - provides typed access to collections
|
|
145
|
+
*/
|
|
146
|
+
export type StreamDB<TDef extends StreamStateDefinition> = {
|
|
147
|
+
collections: CollectionMap<TDef>
|
|
148
|
+
} & StreamDBMethods
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* StreamDB with actions
|
|
152
|
+
*/
|
|
153
|
+
export type StreamDBWithActions<
|
|
154
|
+
TDef extends StreamStateDefinition,
|
|
155
|
+
TActions extends Record<string, ActionDefinition<any>>,
|
|
156
|
+
> = StreamDB<TDef> & {
|
|
157
|
+
actions: ActionMap<TActions>
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Utility methods available on StreamDB
|
|
162
|
+
*/
|
|
163
|
+
export interface StreamDBUtils {
|
|
164
|
+
/**
|
|
165
|
+
* Wait for a specific transaction ID to be synced through the stream
|
|
166
|
+
* @param txid The transaction ID to wait for (UUID string)
|
|
167
|
+
* @param timeout Optional timeout in milliseconds (defaults to 5000ms)
|
|
168
|
+
* @returns Promise that resolves when the txid is synced
|
|
169
|
+
*/
|
|
170
|
+
awaitTxId: (txid: string, timeout?: number) => Promise<void>
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Methods available on a StreamDB instance
|
|
175
|
+
*/
|
|
176
|
+
export interface StreamDBMethods {
|
|
177
|
+
/**
|
|
178
|
+
* The underlying DurableStream instance
|
|
179
|
+
*/
|
|
180
|
+
stream: DurableStream
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Preload all collections by consuming the stream until up-to-date
|
|
184
|
+
*/
|
|
185
|
+
preload: () => Promise<void>
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Close the stream connection and cleanup
|
|
189
|
+
*/
|
|
190
|
+
close: () => void
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Utility methods for advanced stream operations
|
|
194
|
+
*/
|
|
195
|
+
utils: StreamDBUtils
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// ============================================================================
|
|
199
|
+
// Internal Event Dispatcher
|
|
200
|
+
// ============================================================================
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Handler for collection sync events
|
|
204
|
+
*/
|
|
205
|
+
interface CollectionSyncHandler {
|
|
206
|
+
begin: () => void
|
|
207
|
+
write: (value: object, type: `insert` | `update` | `delete`) => void
|
|
208
|
+
commit: () => void
|
|
209
|
+
markReady: () => void
|
|
210
|
+
truncate: () => void
|
|
211
|
+
primaryKey: string
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Internal event dispatcher that routes stream events to collection handlers
|
|
216
|
+
*/
|
|
217
|
+
class EventDispatcher {
|
|
218
|
+
/** Map from event type to collection handler */
|
|
219
|
+
private handlers = new Map<string, CollectionSyncHandler>()
|
|
220
|
+
|
|
221
|
+
/** Handlers that have pending writes (need commit) */
|
|
222
|
+
private pendingHandlers = new Set<CollectionSyncHandler>()
|
|
223
|
+
|
|
224
|
+
/** Whether we've received the initial up-to-date signal */
|
|
225
|
+
private isUpToDate = false
|
|
226
|
+
|
|
227
|
+
/** Resolvers and rejecters for preload promises */
|
|
228
|
+
private preloadResolvers: Array<() => void> = []
|
|
229
|
+
private preloadRejecters: Array<(error: Error) => void> = []
|
|
230
|
+
|
|
231
|
+
/** Set of all txids that have been seen and committed */
|
|
232
|
+
private seenTxids = new Set<string>()
|
|
233
|
+
|
|
234
|
+
/** Txids collected during current batch (before commit) */
|
|
235
|
+
private pendingTxids = new Set<string>()
|
|
236
|
+
|
|
237
|
+
/** Resolvers waiting for specific txids */
|
|
238
|
+
private txidResolvers = new Map<
|
|
239
|
+
string,
|
|
240
|
+
Array<{
|
|
241
|
+
resolve: () => void
|
|
242
|
+
reject: (error: Error) => void
|
|
243
|
+
timeoutId: ReturnType<typeof setTimeout>
|
|
244
|
+
}>
|
|
245
|
+
>()
|
|
246
|
+
|
|
247
|
+
/** Track existing keys per collection for upsert logic */
|
|
248
|
+
private existingKeys = new Map<string, Set<string>>()
|
|
249
|
+
|
|
250
|
+
/**
|
|
251
|
+
* Register a handler for a specific event type
|
|
252
|
+
*/
|
|
253
|
+
registerHandler(eventType: string, handler: CollectionSyncHandler): void {
|
|
254
|
+
this.handlers.set(eventType, handler)
|
|
255
|
+
// Initialize key tracking for upsert logic
|
|
256
|
+
if (!this.existingKeys.has(eventType)) {
|
|
257
|
+
this.existingKeys.set(eventType, new Set())
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
/**
|
|
262
|
+
* Dispatch a change event to the appropriate collection.
|
|
263
|
+
* Writes are buffered until commit() is called via markUpToDate().
|
|
264
|
+
*/
|
|
265
|
+
dispatchChange(event: StateEvent): void {
|
|
266
|
+
if (!isChangeEvent(event)) return
|
|
267
|
+
|
|
268
|
+
// Check for txid in headers and collect it
|
|
269
|
+
if (event.headers.txid && typeof event.headers.txid === `string`) {
|
|
270
|
+
this.pendingTxids.add(event.headers.txid)
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
const handler = this.handlers.get(event.type)
|
|
274
|
+
if (!handler) {
|
|
275
|
+
// Unknown event type - ignore silently
|
|
276
|
+
return
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
let operation = event.headers.operation
|
|
280
|
+
|
|
281
|
+
// Validate that values are objects (required for key tracking)
|
|
282
|
+
if (operation !== `delete`) {
|
|
283
|
+
if (typeof event.value !== `object` || event.value === null) {
|
|
284
|
+
throw new Error(
|
|
285
|
+
`StreamDB collections require object values; got ${typeof event.value} for type=${event.type}, key=${event.key}`
|
|
286
|
+
)
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Get value, ensuring it's an object
|
|
291
|
+
const originalValue = (event.value ?? {}) as object
|
|
292
|
+
|
|
293
|
+
// Create a shallow copy to avoid mutating the original
|
|
294
|
+
const value = { ...originalValue }
|
|
295
|
+
|
|
296
|
+
// Set the primary key field on the value object from the event key
|
|
297
|
+
;(value as any)[handler.primaryKey] = event.key
|
|
298
|
+
|
|
299
|
+
// Begin transaction on first write to this handler
|
|
300
|
+
if (!this.pendingHandlers.has(handler)) {
|
|
301
|
+
handler.begin()
|
|
302
|
+
this.pendingHandlers.add(handler)
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// Handle upsert by converting to insert or update
|
|
306
|
+
if (operation === `upsert`) {
|
|
307
|
+
const keys = this.existingKeys.get(event.type)
|
|
308
|
+
const existing = keys?.has(event.key)
|
|
309
|
+
operation = existing ? `update` : `insert`
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Track key existence for upsert logic
|
|
313
|
+
const keys = this.existingKeys.get(event.type)
|
|
314
|
+
if (operation === `insert` || operation === `update`) {
|
|
315
|
+
keys?.add(event.key)
|
|
316
|
+
} else {
|
|
317
|
+
// Must be delete
|
|
318
|
+
keys?.delete(event.key)
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
try {
|
|
322
|
+
handler.write(value, operation)
|
|
323
|
+
} catch (error) {
|
|
324
|
+
console.error(`[StreamDB] Error in handler.write():`, error)
|
|
325
|
+
console.error(`[StreamDB] Event that caused error:`, {
|
|
326
|
+
type: event.type,
|
|
327
|
+
key: event.key,
|
|
328
|
+
operation,
|
|
329
|
+
})
|
|
330
|
+
throw error
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* Handle control events from the stream JSON items
|
|
336
|
+
*/
|
|
337
|
+
dispatchControl(event: StateEvent): void {
|
|
338
|
+
if (!isControlEvent(event)) return
|
|
339
|
+
|
|
340
|
+
switch (event.headers.control) {
|
|
341
|
+
case `reset`:
|
|
342
|
+
// Truncate all collections
|
|
343
|
+
for (const handler of this.handlers.values()) {
|
|
344
|
+
handler.truncate()
|
|
345
|
+
}
|
|
346
|
+
// Clear key tracking
|
|
347
|
+
for (const keys of this.existingKeys.values()) {
|
|
348
|
+
keys.clear()
|
|
349
|
+
}
|
|
350
|
+
this.pendingHandlers.clear()
|
|
351
|
+
this.isUpToDate = false
|
|
352
|
+
break
|
|
353
|
+
|
|
354
|
+
case `snapshot-start`:
|
|
355
|
+
case `snapshot-end`:
|
|
356
|
+
// These are hints for snapshot boundaries
|
|
357
|
+
break
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
/**
|
|
362
|
+
* Commit all pending writes and handle up-to-date signal
|
|
363
|
+
*/
|
|
364
|
+
markUpToDate(): void {
|
|
365
|
+
// Commit all handlers that have pending writes
|
|
366
|
+
for (const handler of this.pendingHandlers) {
|
|
367
|
+
try {
|
|
368
|
+
handler.commit()
|
|
369
|
+
} catch (error) {
|
|
370
|
+
console.error(`[StreamDB] Error in handler.commit():`, error)
|
|
371
|
+
|
|
372
|
+
// WORKAROUND for TanStack DB groupBy bug
|
|
373
|
+
// If it's the known "already exists in collection live-query" error, log and continue
|
|
374
|
+
if (
|
|
375
|
+
error instanceof Error &&
|
|
376
|
+
error.message.includes(`already exists in the collection`) &&
|
|
377
|
+
error.message.includes(`live-query`)
|
|
378
|
+
) {
|
|
379
|
+
console.warn(
|
|
380
|
+
`[StreamDB] Known TanStack DB groupBy bug detected - continuing despite error`
|
|
381
|
+
)
|
|
382
|
+
console.warn(
|
|
383
|
+
`[StreamDB] Queries with groupBy may show stale data until fixed`
|
|
384
|
+
)
|
|
385
|
+
continue // Don't throw, let other handlers commit
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
throw error
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
this.pendingHandlers.clear()
|
|
392
|
+
|
|
393
|
+
// Commit pending txids
|
|
394
|
+
for (const txid of this.pendingTxids) {
|
|
395
|
+
this.seenTxids.add(txid)
|
|
396
|
+
|
|
397
|
+
// Resolve any promises waiting for this txid
|
|
398
|
+
const resolvers = this.txidResolvers.get(txid)
|
|
399
|
+
if (resolvers) {
|
|
400
|
+
for (const { resolve, timeoutId } of resolvers) {
|
|
401
|
+
clearTimeout(timeoutId)
|
|
402
|
+
resolve()
|
|
403
|
+
}
|
|
404
|
+
this.txidResolvers.delete(txid)
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
this.pendingTxids.clear()
|
|
408
|
+
|
|
409
|
+
if (!this.isUpToDate) {
|
|
410
|
+
this.isUpToDate = true
|
|
411
|
+
// Mark all collections as ready
|
|
412
|
+
for (const handler of this.handlers.values()) {
|
|
413
|
+
handler.markReady()
|
|
414
|
+
}
|
|
415
|
+
// Resolve all preload promises
|
|
416
|
+
for (const resolve of this.preloadResolvers) {
|
|
417
|
+
resolve()
|
|
418
|
+
}
|
|
419
|
+
this.preloadResolvers = []
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/**
|
|
424
|
+
* Wait for the stream to reach up-to-date state
|
|
425
|
+
*/
|
|
426
|
+
waitForUpToDate(): Promise<void> {
|
|
427
|
+
if (this.isUpToDate) {
|
|
428
|
+
return Promise.resolve()
|
|
429
|
+
}
|
|
430
|
+
return new Promise((resolve, reject) => {
|
|
431
|
+
this.preloadResolvers.push(resolve)
|
|
432
|
+
this.preloadRejecters.push(reject)
|
|
433
|
+
})
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
/**
|
|
437
|
+
* Reject all waiting preload promises with an error
|
|
438
|
+
*/
|
|
439
|
+
rejectAll(error: Error): void {
|
|
440
|
+
for (const reject of this.preloadRejecters) {
|
|
441
|
+
reject(error)
|
|
442
|
+
}
|
|
443
|
+
this.preloadResolvers = []
|
|
444
|
+
this.preloadRejecters = []
|
|
445
|
+
|
|
446
|
+
// Also reject all pending txid promises
|
|
447
|
+
for (const resolvers of this.txidResolvers.values()) {
|
|
448
|
+
for (const { reject, timeoutId } of resolvers) {
|
|
449
|
+
clearTimeout(timeoutId)
|
|
450
|
+
reject(error)
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
this.txidResolvers.clear()
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
/**
|
|
457
|
+
* Check if we've received up-to-date
|
|
458
|
+
*/
|
|
459
|
+
get ready(): boolean {
|
|
460
|
+
return this.isUpToDate
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
/**
|
|
464
|
+
* Wait for a specific txid to be seen in the stream
|
|
465
|
+
*/
|
|
466
|
+
awaitTxId(txid: string, timeout: number = 5000): Promise<void> {
|
|
467
|
+
// Check if we've already seen this txid
|
|
468
|
+
if (this.seenTxids.has(txid)) {
|
|
469
|
+
return Promise.resolve()
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
return new Promise((resolve, reject) => {
|
|
473
|
+
const timeoutId = setTimeout(() => {
|
|
474
|
+
// Remove this resolver from the map
|
|
475
|
+
const resolvers = this.txidResolvers.get(txid)
|
|
476
|
+
if (resolvers) {
|
|
477
|
+
const index = resolvers.findIndex((r) => r.timeoutId === timeoutId)
|
|
478
|
+
if (index !== -1) {
|
|
479
|
+
resolvers.splice(index, 1)
|
|
480
|
+
}
|
|
481
|
+
if (resolvers.length === 0) {
|
|
482
|
+
this.txidResolvers.delete(txid)
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
reject(new Error(`Timeout waiting for txid: ${txid}`))
|
|
486
|
+
}, timeout)
|
|
487
|
+
|
|
488
|
+
// Add to resolvers map
|
|
489
|
+
if (!this.txidResolvers.has(txid)) {
|
|
490
|
+
this.txidResolvers.set(txid, [])
|
|
491
|
+
}
|
|
492
|
+
this.txidResolvers.get(txid)!.push({ resolve, reject, timeoutId })
|
|
493
|
+
})
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
// ============================================================================
|
|
498
|
+
// Sync Factory
|
|
499
|
+
// ============================================================================
|
|
500
|
+
|
|
501
|
+
/**
|
|
502
|
+
* Create a sync config for a stream-backed collection
|
|
503
|
+
*/
|
|
504
|
+
function createStreamSyncConfig<T extends object>(
|
|
505
|
+
eventType: string,
|
|
506
|
+
dispatcher: EventDispatcher,
|
|
507
|
+
primaryKey: string
|
|
508
|
+
): SyncConfig<T, string> {
|
|
509
|
+
return {
|
|
510
|
+
sync: ({ begin, write, commit, markReady, truncate }) => {
|
|
511
|
+
// Register this collection's handler with the dispatcher
|
|
512
|
+
dispatcher.registerHandler(eventType, {
|
|
513
|
+
begin,
|
|
514
|
+
write: (value, type) => {
|
|
515
|
+
write({
|
|
516
|
+
value: value as T,
|
|
517
|
+
type,
|
|
518
|
+
})
|
|
519
|
+
},
|
|
520
|
+
commit,
|
|
521
|
+
markReady,
|
|
522
|
+
truncate,
|
|
523
|
+
primaryKey,
|
|
524
|
+
})
|
|
525
|
+
|
|
526
|
+
// If the dispatcher is already up-to-date, mark ready immediately
|
|
527
|
+
if (dispatcher.ready) {
|
|
528
|
+
markReady()
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
// Return cleanup function
|
|
532
|
+
return () => {
|
|
533
|
+
// No cleanup needed - stream lifecycle managed by StreamDB
|
|
534
|
+
}
|
|
535
|
+
},
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
// ============================================================================
|
|
540
|
+
// Main Implementation
|
|
541
|
+
// ============================================================================
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* Reserved collection names that would collide with StreamDB properties
|
|
545
|
+
* (collections are now namespaced, but we still prevent internal name collisions)
|
|
546
|
+
*/
|
|
547
|
+
const RESERVED_COLLECTION_NAMES = new Set([
|
|
548
|
+
`collections`,
|
|
549
|
+
`preload`,
|
|
550
|
+
`close`,
|
|
551
|
+
`utils`,
|
|
552
|
+
`actions`,
|
|
553
|
+
])
|
|
554
|
+
|
|
555
|
+
/**
|
|
556
|
+
* Create helper functions for a collection
|
|
557
|
+
*/
|
|
558
|
+
function createCollectionHelpers<T>(
|
|
559
|
+
eventType: string,
|
|
560
|
+
primaryKey: string,
|
|
561
|
+
schema: StandardSchemaV1<T>
|
|
562
|
+
): CollectionEventHelpers<T> {
|
|
563
|
+
return {
|
|
564
|
+
insert: ({ key, value, headers }): ChangeEvent<T> => {
|
|
565
|
+
// Validate value
|
|
566
|
+
const result = schema[`~standard`].validate(value)
|
|
567
|
+
if (`issues` in result) {
|
|
568
|
+
throw new Error(
|
|
569
|
+
`Validation failed for ${eventType} insert: ${result.issues?.map((i) => i.message).join(`, `) ?? `Unknown validation error`}`
|
|
570
|
+
)
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// Derive key from value if not explicitly provided
|
|
574
|
+
const derived = (value as any)[primaryKey]
|
|
575
|
+
const finalKey =
|
|
576
|
+
key ?? (derived != null && derived !== `` ? String(derived) : undefined)
|
|
577
|
+
if (finalKey == null || finalKey === ``) {
|
|
578
|
+
throw new Error(
|
|
579
|
+
`Cannot create ${eventType} insert event: must provide either 'key' or a value with a non-empty '${primaryKey}' field`
|
|
580
|
+
)
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
return {
|
|
584
|
+
type: eventType,
|
|
585
|
+
key: finalKey,
|
|
586
|
+
value,
|
|
587
|
+
headers: { ...headers, operation: `insert` },
|
|
588
|
+
}
|
|
589
|
+
},
|
|
590
|
+
update: ({ key, value, oldValue, headers }): ChangeEvent<T> => {
|
|
591
|
+
// Validate value
|
|
592
|
+
const result = schema[`~standard`].validate(value)
|
|
593
|
+
if (`issues` in result) {
|
|
594
|
+
throw new Error(
|
|
595
|
+
`Validation failed for ${eventType} update: ${result.issues?.map((i) => i.message).join(`, `) ?? `Unknown validation error`}`
|
|
596
|
+
)
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
// Optionally validate oldValue if provided
|
|
600
|
+
if (oldValue !== undefined) {
|
|
601
|
+
const oldResult = schema[`~standard`].validate(oldValue)
|
|
602
|
+
if (`issues` in oldResult) {
|
|
603
|
+
throw new Error(
|
|
604
|
+
`Validation failed for ${eventType} update (oldValue): ${oldResult.issues?.map((i) => i.message).join(`, `) ?? `Unknown validation error`}`
|
|
605
|
+
)
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
// Derive key from value if not explicitly provided
|
|
610
|
+
const derived = (value as any)[primaryKey]
|
|
611
|
+
const finalKey =
|
|
612
|
+
key ?? (derived != null && derived !== `` ? String(derived) : undefined)
|
|
613
|
+
if (finalKey == null || finalKey === ``) {
|
|
614
|
+
throw new Error(
|
|
615
|
+
`Cannot create ${eventType} update event: must provide either 'key' or a value with a non-empty '${primaryKey}' field`
|
|
616
|
+
)
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
return {
|
|
620
|
+
type: eventType,
|
|
621
|
+
key: finalKey,
|
|
622
|
+
value,
|
|
623
|
+
old_value: oldValue,
|
|
624
|
+
headers: { ...headers, operation: `update` },
|
|
625
|
+
}
|
|
626
|
+
},
|
|
627
|
+
delete: ({ key, oldValue, headers }): ChangeEvent<T> => {
|
|
628
|
+
// Optionally validate oldValue if provided
|
|
629
|
+
if (oldValue !== undefined) {
|
|
630
|
+
const result = schema[`~standard`].validate(oldValue)
|
|
631
|
+
if (`issues` in result) {
|
|
632
|
+
throw new Error(
|
|
633
|
+
`Validation failed for ${eventType} delete (oldValue): ${result.issues?.map((i) => i.message).join(`, `) ?? `Unknown validation error`}`
|
|
634
|
+
)
|
|
635
|
+
}
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
// Ensure we have either key or oldValue to derive the key from
|
|
639
|
+
const finalKey =
|
|
640
|
+
key ?? (oldValue ? String((oldValue as any)[primaryKey]) : undefined)
|
|
641
|
+
if (!finalKey) {
|
|
642
|
+
throw new Error(
|
|
643
|
+
`Cannot create ${eventType} delete event: must provide either 'key' or 'oldValue' with a ${primaryKey} field`
|
|
644
|
+
)
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
return {
|
|
648
|
+
type: eventType,
|
|
649
|
+
key: finalKey,
|
|
650
|
+
old_value: oldValue,
|
|
651
|
+
headers: { ...headers, operation: `delete` },
|
|
652
|
+
}
|
|
653
|
+
},
|
|
654
|
+
upsert: ({ key, value, headers }): ChangeEvent<T> => {
|
|
655
|
+
// Validate value
|
|
656
|
+
const result = schema[`~standard`].validate(value)
|
|
657
|
+
if (`issues` in result) {
|
|
658
|
+
throw new Error(
|
|
659
|
+
`Validation failed for ${eventType} upsert: ${result.issues?.map((i) => i.message).join(`, `) ?? `Unknown validation error`}`
|
|
660
|
+
)
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
// Derive key from value if not explicitly provided
|
|
664
|
+
const derived = (value as any)[primaryKey]
|
|
665
|
+
const finalKey =
|
|
666
|
+
key ?? (derived != null && derived !== `` ? String(derived) : undefined)
|
|
667
|
+
if (finalKey == null || finalKey === ``) {
|
|
668
|
+
throw new Error(
|
|
669
|
+
`Cannot create ${eventType} upsert event: must provide either 'key' or a value with a non-empty '${primaryKey}' field`
|
|
670
|
+
)
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
return {
|
|
674
|
+
type: eventType,
|
|
675
|
+
key: finalKey,
|
|
676
|
+
value,
|
|
677
|
+
headers: { ...headers, operation: `upsert` },
|
|
678
|
+
}
|
|
679
|
+
},
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
/**
|
|
684
|
+
* Create a state schema definition with typed collections and event helpers
|
|
685
|
+
*/
|
|
686
|
+
export function createStateSchema<
|
|
687
|
+
T extends Record<string, CollectionDefinition>,
|
|
688
|
+
>(collections: T): StateSchema<T> {
|
|
689
|
+
// Validate no reserved collection names
|
|
690
|
+
for (const name of Object.keys(collections)) {
|
|
691
|
+
if (RESERVED_COLLECTION_NAMES.has(name)) {
|
|
692
|
+
throw new Error(
|
|
693
|
+
`Reserved collection name "${name}" - this would collide with StreamDB properties (${Array.from(RESERVED_COLLECTION_NAMES).join(`, `)})`
|
|
694
|
+
)
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
// Validate no duplicate event types
|
|
699
|
+
const typeToCollection = new Map<string, string>()
|
|
700
|
+
for (const [collectionName, def] of Object.entries(collections)) {
|
|
701
|
+
const existing = typeToCollection.get(def.type)
|
|
702
|
+
if (existing) {
|
|
703
|
+
throw new Error(
|
|
704
|
+
`Duplicate event type "${def.type}" - used by both "${existing}" and "${collectionName}" collections`
|
|
705
|
+
)
|
|
706
|
+
}
|
|
707
|
+
typeToCollection.set(def.type, collectionName)
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
// Enhance collections with helper methods
|
|
711
|
+
const enhancedCollections: any = {}
|
|
712
|
+
for (const [name, collectionDef] of Object.entries(collections)) {
|
|
713
|
+
enhancedCollections[name] = {
|
|
714
|
+
...collectionDef,
|
|
715
|
+
...createCollectionHelpers(
|
|
716
|
+
collectionDef.type,
|
|
717
|
+
collectionDef.primaryKey,
|
|
718
|
+
collectionDef.schema
|
|
719
|
+
),
|
|
720
|
+
}
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
return enhancedCollections
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
/**
|
|
727
|
+
* Create a stream-backed database with TanStack DB collections
|
|
728
|
+
*
|
|
729
|
+
* This function is synchronous - it creates the stream handle and collections
|
|
730
|
+
* but does not start the stream connection. Call `db.preload()` to connect
|
|
731
|
+
* and sync initial data.
|
|
732
|
+
*
|
|
733
|
+
* @example
|
|
734
|
+
* ```typescript
|
|
735
|
+
* const stateSchema = createStateSchema({
|
|
736
|
+
* users: { schema: userSchema, type: "user", primaryKey: "id" },
|
|
737
|
+
* messages: { schema: messageSchema, type: "message", primaryKey: "id" },
|
|
738
|
+
* })
|
|
739
|
+
*
|
|
740
|
+
* // Create a stream DB (synchronous - stream is created lazily on preload)
|
|
741
|
+
* const db = createStreamDB({
|
|
742
|
+
* streamOptions: {
|
|
743
|
+
* url: "https://api.example.com/streams/my-stream",
|
|
744
|
+
* contentType: "application/json",
|
|
745
|
+
* },
|
|
746
|
+
* state: stateSchema,
|
|
747
|
+
* })
|
|
748
|
+
*
|
|
749
|
+
* // preload() creates the stream and loads initial data
|
|
750
|
+
* await db.preload()
|
|
751
|
+
* const user = await db.collections.users.get("123")
|
|
752
|
+
* ```
|
|
753
|
+
*/
|
|
754
|
+
export function createStreamDB<
|
|
755
|
+
TDef extends StreamStateDefinition,
|
|
756
|
+
TActions extends Record<string, ActionDefinition<any>> = Record<
|
|
757
|
+
string,
|
|
758
|
+
never
|
|
759
|
+
>,
|
|
760
|
+
>(
|
|
761
|
+
options: CreateStreamDBOptions<TDef, TActions>
|
|
762
|
+
): TActions extends Record<string, never>
|
|
763
|
+
? StreamDB<TDef>
|
|
764
|
+
: StreamDBWithActions<TDef, TActions> {
|
|
765
|
+
const { streamOptions, state, actions: actionsFactory } = options
|
|
766
|
+
|
|
767
|
+
// Create a stream handle (lightweight, doesn't connect until stream() is called)
|
|
768
|
+
const stream = new DurableStreamClass(streamOptions)
|
|
769
|
+
|
|
770
|
+
// Create the event dispatcher
|
|
771
|
+
const dispatcher = new EventDispatcher()
|
|
772
|
+
|
|
773
|
+
// Create TanStack DB collections for each definition
|
|
774
|
+
const collectionInstances: Record<string, Collection<object, string>> = {}
|
|
775
|
+
|
|
776
|
+
for (const [name, definition] of Object.entries(state)) {
|
|
777
|
+
const collection = createCollection({
|
|
778
|
+
id: `stream-db:${name}`,
|
|
779
|
+
schema: definition.schema as StandardSchemaV1<object>,
|
|
780
|
+
getKey: (item: any) => String(item[definition.primaryKey]),
|
|
781
|
+
sync: createStreamSyncConfig(
|
|
782
|
+
definition.type,
|
|
783
|
+
dispatcher,
|
|
784
|
+
definition.primaryKey
|
|
785
|
+
),
|
|
786
|
+
startSync: true, // Start syncing immediately
|
|
787
|
+
// Disable GC - we manage lifecycle via db.close()
|
|
788
|
+
// DB would otherwise clean up the collections independently of each other, we
|
|
789
|
+
// cant recover one and not the others from a single log.
|
|
790
|
+
gcTime: 0,
|
|
791
|
+
})
|
|
792
|
+
|
|
793
|
+
console.log(`[StreamDB] Created collection "${name}":`, {
|
|
794
|
+
type: typeof collection,
|
|
795
|
+
constructor: collection.constructor.name,
|
|
796
|
+
isCollection: collection instanceof Object,
|
|
797
|
+
hasSize: `size` in collection,
|
|
798
|
+
})
|
|
799
|
+
|
|
800
|
+
collectionInstances[name] = collection
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
// Stream consumer state (lazy initialization)
|
|
804
|
+
let streamResponse: StreamResponse<StateEvent> | null = null
|
|
805
|
+
const abortController = new AbortController()
|
|
806
|
+
let consumerStarted = false
|
|
807
|
+
|
|
808
|
+
/**
|
|
809
|
+
* Start the stream consumer (called lazily on first preload)
|
|
810
|
+
*/
|
|
811
|
+
const startConsumer = async (): Promise<void> => {
|
|
812
|
+
if (consumerStarted) return
|
|
813
|
+
consumerStarted = true
|
|
814
|
+
|
|
815
|
+
// Start streaming (this is where the connection actually happens)
|
|
816
|
+
streamResponse = await stream.stream<StateEvent>({
|
|
817
|
+
live: `auto`,
|
|
818
|
+
signal: abortController.signal,
|
|
819
|
+
})
|
|
820
|
+
|
|
821
|
+
// Track batch processing for debugging
|
|
822
|
+
let batchCount = 0
|
|
823
|
+
let lastBatchTime = Date.now()
|
|
824
|
+
|
|
825
|
+
// Process events as they come in
|
|
826
|
+
streamResponse.subscribeJson(async (batch) => {
|
|
827
|
+
try {
|
|
828
|
+
batchCount++
|
|
829
|
+
lastBatchTime = Date.now()
|
|
830
|
+
|
|
831
|
+
if (batch.items.length > 0) {
|
|
832
|
+
console.log(
|
|
833
|
+
`[StreamDB] Processing batch #${batchCount}: ${batch.items.length} items, upToDate=${batch.upToDate}`
|
|
834
|
+
)
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
for (const event of batch.items) {
|
|
838
|
+
if (isChangeEvent(event)) {
|
|
839
|
+
dispatcher.dispatchChange(event)
|
|
840
|
+
} else if (isControlEvent(event)) {
|
|
841
|
+
dispatcher.dispatchControl(event)
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
// Check batch-level up-to-date signal
|
|
846
|
+
if (batch.upToDate) {
|
|
847
|
+
console.log(
|
|
848
|
+
`[StreamDB] Marking up-to-date after batch #${batchCount}`
|
|
849
|
+
)
|
|
850
|
+
dispatcher.markUpToDate()
|
|
851
|
+
console.log(`[StreamDB] Successfully marked up-to-date`)
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
if (batch.items.length > 0) {
|
|
855
|
+
console.log(`[StreamDB] Successfully processed batch #${batchCount}`)
|
|
856
|
+
}
|
|
857
|
+
} catch (error) {
|
|
858
|
+
console.error(`[StreamDB] Error processing batch:`, error)
|
|
859
|
+
console.error(`[StreamDB] Failed batch:`, batch)
|
|
860
|
+
// Reject all waiting preload promises
|
|
861
|
+
dispatcher.rejectAll(error as Error)
|
|
862
|
+
// Abort the stream to stop further processing
|
|
863
|
+
abortController.abort()
|
|
864
|
+
// Don't rethrow - we've already rejected the promise
|
|
865
|
+
}
|
|
866
|
+
})
|
|
867
|
+
|
|
868
|
+
// Health check to detect silent stalls
|
|
869
|
+
const healthCheck = setInterval(() => {
|
|
870
|
+
const timeSinceLastBatch = Date.now() - lastBatchTime
|
|
871
|
+
console.log(
|
|
872
|
+
`[StreamDB] Health: ${batchCount} batches processed, last batch ${(timeSinceLastBatch / 1000).toFixed(1)}s ago`
|
|
873
|
+
)
|
|
874
|
+
}, 15000)
|
|
875
|
+
|
|
876
|
+
// Clean up health check on abort
|
|
877
|
+
abortController.signal.addEventListener(`abort`, () => {
|
|
878
|
+
clearInterval(healthCheck)
|
|
879
|
+
console.log(`[StreamDB] Aborted - cleaning up health check`)
|
|
880
|
+
})
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
// Build the StreamDB object with methods
|
|
884
|
+
const dbMethods: StreamDBMethods = {
|
|
885
|
+
stream,
|
|
886
|
+
preload: async () => {
|
|
887
|
+
await startConsumer()
|
|
888
|
+
await dispatcher.waitForUpToDate()
|
|
889
|
+
},
|
|
890
|
+
close: () => {
|
|
891
|
+
// Reject all pending operations before aborting
|
|
892
|
+
dispatcher.rejectAll(new Error(`StreamDB closed`))
|
|
893
|
+
abortController.abort()
|
|
894
|
+
},
|
|
895
|
+
utils: {
|
|
896
|
+
awaitTxId: (txid: string, timeout?: number) =>
|
|
897
|
+
dispatcher.awaitTxId(txid, timeout),
|
|
898
|
+
},
|
|
899
|
+
}
|
|
900
|
+
|
|
901
|
+
// Combine collections with methods
|
|
902
|
+
console.log(
|
|
903
|
+
`[StreamDB] Creating db object with collections:`,
|
|
904
|
+
Object.keys(collectionInstances)
|
|
905
|
+
)
|
|
906
|
+
const db = {
|
|
907
|
+
collections: collectionInstances,
|
|
908
|
+
...dbMethods,
|
|
909
|
+
} as unknown as StreamDB<TDef>
|
|
910
|
+
console.log(`[StreamDB] db.collections:`, Object.keys(db.collections))
|
|
911
|
+
console.log(`[StreamDB] db.collections.events:`, db.collections.events)
|
|
912
|
+
|
|
913
|
+
// If actions factory is provided, wrap actions and return db with actions
|
|
914
|
+
if (actionsFactory) {
|
|
915
|
+
const actionDefs = actionsFactory({ db, stream })
|
|
916
|
+
const wrappedActions: Record<
|
|
917
|
+
string,
|
|
918
|
+
ReturnType<typeof createOptimisticAction>
|
|
919
|
+
> = {}
|
|
920
|
+
for (const [name, def] of Object.entries(actionDefs)) {
|
|
921
|
+
wrappedActions[name] = createOptimisticAction({
|
|
922
|
+
onMutate: def.onMutate,
|
|
923
|
+
mutationFn: def.mutationFn,
|
|
924
|
+
})
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
return {
|
|
928
|
+
...db,
|
|
929
|
+
actions: wrappedActions,
|
|
930
|
+
} as any
|
|
931
|
+
}
|
|
932
|
+
|
|
933
|
+
return db as any
|
|
934
|
+
}
|