@livestore/common 0.4.0-dev.20 → 0.4.0-dev.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/ClientSessionLeaderThreadProxy.d.ts +3 -0
  3. package/dist/ClientSessionLeaderThreadProxy.d.ts.map +1 -1
  4. package/dist/ClientSessionLeaderThreadProxy.js.map +1 -1
  5. package/dist/devtools/devtools-messages-client-session.d.ts +21 -21
  6. package/dist/devtools/devtools-messages-common.d.ts +6 -6
  7. package/dist/devtools/devtools-messages-leader.d.ts +26 -24
  8. package/dist/devtools/devtools-messages-leader.d.ts.map +1 -1
  9. package/dist/devtools/devtools-messages-leader.js +1 -0
  10. package/dist/devtools/devtools-messages-leader.js.map +1 -1
  11. package/dist/leader-thread/LeaderSyncProcessor.d.ts +33 -0
  12. package/dist/leader-thread/LeaderSyncProcessor.d.ts.map +1 -1
  13. package/dist/leader-thread/LeaderSyncProcessor.js +2 -2
  14. package/dist/leader-thread/LeaderSyncProcessor.js.map +1 -1
  15. package/dist/leader-thread/eventlog.d.ts +6 -1
  16. package/dist/leader-thread/eventlog.d.ts.map +1 -1
  17. package/dist/leader-thread/eventlog.js +59 -2
  18. package/dist/leader-thread/eventlog.js.map +1 -1
  19. package/dist/leader-thread/leader-worker-devtools.js +29 -6
  20. package/dist/leader-thread/leader-worker-devtools.js.map +1 -1
  21. package/dist/leader-thread/mod.d.ts +1 -0
  22. package/dist/leader-thread/mod.d.ts.map +1 -1
  23. package/dist/leader-thread/mod.js +1 -0
  24. package/dist/leader-thread/mod.js.map +1 -1
  25. package/dist/leader-thread/stream-events.d.ts +56 -0
  26. package/dist/leader-thread/stream-events.d.ts.map +1 -0
  27. package/dist/leader-thread/stream-events.js +166 -0
  28. package/dist/leader-thread/stream-events.js.map +1 -0
  29. package/dist/leader-thread/types.d.ts +77 -1
  30. package/dist/leader-thread/types.d.ts.map +1 -1
  31. package/dist/leader-thread/types.js +13 -0
  32. package/dist/leader-thread/types.js.map +1 -1
  33. package/dist/otel.d.ts +2 -1
  34. package/dist/otel.d.ts.map +1 -1
  35. package/dist/otel.js +5 -0
  36. package/dist/otel.js.map +1 -1
  37. package/dist/schema/state/sqlite/client-document-def.d.ts.map +1 -1
  38. package/dist/schema/state/sqlite/client-document-def.js.map +1 -1
  39. package/dist/schema/state/sqlite/column-def.test.js +2 -3
  40. package/dist/schema/state/sqlite/column-def.test.js.map +1 -1
  41. package/dist/schema/state/sqlite/db-schema/dsl/mod.d.ts.map +1 -1
  42. package/dist/schema/state/sqlite/db-schema/dsl/mod.js.map +1 -1
  43. package/dist/schema/state/sqlite/query-builder/api.d.ts +2 -1
  44. package/dist/schema/state/sqlite/query-builder/api.d.ts.map +1 -1
  45. package/dist/schema/state/sqlite/table-def.d.ts.map +1 -1
  46. package/dist/version.d.ts +1 -1
  47. package/dist/version.js +1 -1
  48. package/package.json +4 -4
  49. package/src/ClientSessionLeaderThreadProxy.ts +3 -0
  50. package/src/devtools/devtools-messages-leader.ts +1 -0
  51. package/src/leader-thread/LeaderSyncProcessor.ts +35 -2
  52. package/src/leader-thread/eventlog.ts +80 -4
  53. package/src/leader-thread/leader-worker-devtools.ts +41 -6
  54. package/src/leader-thread/mod.ts +1 -0
  55. package/src/leader-thread/stream-events.ts +201 -0
  56. package/src/leader-thread/types.ts +49 -1
  57. package/src/otel.ts +10 -0
  58. package/src/schema/state/sqlite/client-document-def.ts +7 -17
  59. package/src/schema/state/sqlite/column-def.test.ts +2 -3
  60. package/src/schema/state/sqlite/db-schema/dsl/mod.ts +10 -16
  61. package/src/schema/state/sqlite/query-builder/api.ts +6 -1
  62. package/src/schema/state/sqlite/table-def.ts +9 -8
  63. package/src/version.ts +1 -1
@@ -0,0 +1,201 @@
1
+ import type { Subscribable } from '@livestore/utils/effect'
2
+ import { Chunk, Effect, Option, Queue, Stream } from '@livestore/utils/effect'
3
+ import { EventSequenceNumber, type LiveStoreEvent } from '../schema/mod.ts'
4
+ import type * as SyncState from '../sync/syncstate.ts'
5
+ import * as Eventlog from './eventlog.ts'
6
+ import type { LeaderSqliteDb, StreamEventsOptions } from './types.ts'
7
+
8
+ /**
9
+ * Streams events for leader-thread adapters.
10
+ *
11
+ * Provides a continuous stream from the eventlog as the upstream head advances.
12
+ * When an until event is passed in the stream finalizes upon reaching it.
13
+ *
14
+ * The batch size is set to 100 by default as this was meassured to provide the
15
+ * best performance and 1000 as the upper limit.
16
+ *
17
+ * Adapters that call this helper:
18
+ * - `packages/@livestore/adapter-web/src/in-memory/in-memory-adapter.ts`
19
+ * - `packages/@livestore/adapter-web/src/web-worker/leader-worker/make-leader-worker.ts`
20
+ * - `packages/@livestore/adapter-node/src/client-session/adapter.ts`
21
+ * - `packages/@livestore/adapter-node/src/make-leader-worker.ts`
22
+ * - `packages/@livestore/adapter-cloudflare/src/make-adapter.ts`
23
+ *
24
+ * Each caller resolves dependencies inside the leader scope before invoking this helper,
25
+ * so the stream stays environment-agnostic and does not leak `LeaderThreadCtx` into runtime
26
+ * entry points such as `Store.eventsStream`.
27
+ *
28
+ * Test files:
29
+ * Unit: `tests/package-common/src/leader-thread/stream-events.test.ts`
30
+ * Integration: `packages/@livestore/livestore/src/store/store-eventstream.test.ts`
31
+ * Performance: `tests/perf-eventlog/tests/suites/event-streaming.test.ts`
32
+ *
33
+ * Optimization explorations
34
+ *
35
+ * In order to alleviate the occurence of many small queries when the syncState
36
+ * is sequentially progressing quickly we have explored some time-based batching
37
+ * approaches. It remains to be determined if and when the added complexity of
38
+ * these approaches are worth the benefit. They come with some drawbacks such as
39
+ * degraded time to first event or general performance degredation for larger
40
+ * query steps. These aspects can likely be mitigated with some more work but
41
+ * that is best assessed when we have a final implementation of event streaming
42
+ * with support for session and leader level streams.
43
+ *
44
+ * Fetch plans into a Sink
45
+ * https://gist.github.com/slashv/f1223689f2d1171d2eeb60a2823f4c7c
46
+ *
47
+ * Fetch plans into sink and decompose into windows
48
+ * https://gist.github.com/slashv/a8f55f50121c080937f42e44b4039ac8
49
+ *
50
+ * Mailbox and Latch approach (suggestion by Tim Smart)
51
+ * https://gist.github.com/slashv/d6b12395c85415bf0d3363372a1636c3
52
+ */
53
+ export const streamEventsWithSyncState = ({
54
+ dbEventlog,
55
+ syncState,
56
+ options,
57
+ }: {
58
+ dbEventlog: LeaderSqliteDb
59
+ syncState: Subscribable.Subscribable<SyncState.SyncState>
60
+ options: StreamEventsOptions
61
+ }): Stream.Stream<LiveStoreEvent.Client.Encoded> => {
62
+ const initialCursor = options.since ?? EventSequenceNumber.Client.ROOT
63
+ const batchSize = options.batchSize ?? 100
64
+
65
+ return Stream.unwrapScoped(
66
+ Effect.gen(function* () {
67
+ /**
68
+ * Single-element Queue allows suspending the event stream until head
69
+ * advances because Queue.take is a suspending effect. SubscriptionRef in
70
+ * comparrison lacks a primitive for suspending a stream until a new value
71
+ * is set and would require polling.
72
+ *
73
+ * The use of a sliding Queue here is useful since it ensures only the
74
+ * lastest head from syncState is the one present on the queue without the
75
+ * need for manual substitution.
76
+ */
77
+ const headQueue = yield* Queue.sliding<EventSequenceNumber.Client.Composite>(1)
78
+
79
+ /**
80
+ * We run a separate fiber which listens to changes in syncState and
81
+ * offer the latest head to the headQueue. Keeping track of the previous
82
+ * value is done to prevent syncState changes unrelated to the
83
+ * upstreamHead triggering empty queries.
84
+ *
85
+ * When we implement support for leader and session level streams
86
+ * this will need to be adapted to support the relevant value from
87
+ * syncState that we are interested in tracking.
88
+ */
89
+ let prevGlobalHead = -1
90
+ yield* syncState.changes.pipe(
91
+ Stream.map((state) => state.upstreamHead),
92
+ Stream.filter((head) => {
93
+ if (head.global > prevGlobalHead) {
94
+ prevGlobalHead = head.global
95
+ return true
96
+ }
97
+ return false
98
+ }),
99
+ Stream.runForEach((head) => Queue.offer(headQueue, head)),
100
+ Effect.forkScoped,
101
+ )
102
+
103
+ return Stream.paginateChunkEffect(
104
+ { cursor: initialCursor, head: EventSequenceNumber.Client.ROOT },
105
+ ({ cursor, head }) =>
106
+ Effect.gen(function* () {
107
+ /**
108
+ * Early check guards agains:
109
+ * since === until : Prevent empty query
110
+ * since > until : Incorrectly inverted interval
111
+ */
112
+ if (options.until && EventSequenceNumber.Client.isGreaterThanOrEqual(cursor, options.until)) {
113
+ return [Chunk.empty(), Option.none()]
114
+ }
115
+
116
+ /**
117
+ * There are two scenarios where we take the next head from the headQueue:
118
+ *
119
+ * 1. We need to wait for the head to advance
120
+ * The Stream suspends until a new head is available on the headQueue
121
+ *
122
+ * 2. Head has advanced during itteration
123
+ * While itterating towards the lastest head taken from the headQueue
124
+ * in increments of batchSize it's possible the head could have
125
+ * advanced. This leads to a suboptimal amount of queries. Therefor we
126
+ * check if the headQueue is full which tells us that there's a new
127
+ * head available to take. Example:
128
+ *
129
+ * batchSize: 2
130
+ *
131
+ * --> head at: e3
132
+ * First query: e0 -> e2 (two events)
133
+ * --> head advances to: e4
134
+ * Second query: e2 -> e3 (one event but we could have taken 2)
135
+ * --> Take the new head of e4
136
+ * Third query: e3 -> e4 (unnecessary third query)
137
+ *
138
+ *
139
+ * To define the target, which will be used as the temporary until
140
+ * marker for the eventlog query, we select the lowest of three possible values:
141
+ *
142
+ * hardStop: A user supplied until marker
143
+ * current cursor + batchSize: A batchSize step towards the latest head from headQueue
144
+ * nextHead: The latest head from headQueue
145
+ */
146
+ const waitForHead = EventSequenceNumber.Client.isGreaterThanOrEqual(cursor, head)
147
+ const maybeHead = waitForHead
148
+ ? yield* Queue.take(headQueue).pipe(Effect.map(Option.some))
149
+ : yield* Queue.poll(headQueue)
150
+ const nextHead = Option.getOrElse(maybeHead, () => head)
151
+ const hardStop = options.until?.global ?? Number.POSITIVE_INFINITY
152
+ const target = EventSequenceNumber.Client.Composite.make({
153
+ global: Math.min(hardStop, cursor.global + batchSize, nextHead.global),
154
+ client: EventSequenceNumber.Client.DEFAULT,
155
+ })
156
+
157
+ /**
158
+ * Eventlog.getEventsFromEventlog returns a Chunk from each
159
+ * query which is what we emit at each itteration.
160
+ */
161
+ const chunk = yield* Eventlog.getEventsFromEventlog({
162
+ dbEventlog,
163
+ options: {
164
+ ...options,
165
+ since: cursor,
166
+ until: target,
167
+ },
168
+ })
169
+
170
+ /**
171
+ * We construct the state for the following itteration of the stream
172
+ * loop by setting the current target as the since cursor and pass
173
+ * along the latest head.
174
+ *
175
+ * If we have the reached the user supplied until marker we signal the
176
+ * finalization of the stream by passing Option.none() instead.
177
+ */
178
+ const reachedUntil =
179
+ options.until !== undefined && EventSequenceNumber.Client.isGreaterThanOrEqual(target, options.until)
180
+
181
+ const nextState: Option.Option<{
182
+ cursor: EventSequenceNumber.Client.Composite
183
+ head: EventSequenceNumber.Client.Composite
184
+ }> = reachedUntil ? Option.none() : Option.some({ cursor: target, head: nextHead })
185
+
186
+ const spanAttributes = {
187
+ 'livestore.streamEvents.cursor.global': cursor.global,
188
+ 'livestore.streamEvents.target.global': target.global,
189
+ 'livestore.streamEvents.batchSize': batchSize,
190
+ 'livestore.streamEvents.waitedForHead': waitForHead,
191
+ }
192
+
193
+ return yield* Effect.succeed<[Chunk.Chunk<LiveStoreEvent.Client.Encoded>, typeof nextState]>([
194
+ chunk,
195
+ nextState,
196
+ ]).pipe(Effect.withSpan('@livestore/common:streamEvents:segment', { attributes: spanAttributes }))
197
+ }),
198
+ )
199
+ }),
200
+ )
201
+ }
@@ -24,7 +24,7 @@ import type {
24
24
  SyncBackend,
25
25
  UnknownError,
26
26
  } from '../index.ts'
27
- import type { EventSequenceNumber, LiveStoreEvent, LiveStoreSchema } from '../schema/mod.ts'
27
+ import { EventSequenceNumber, type LiveStoreEvent, type LiveStoreSchema } from '../schema/mod.ts'
28
28
  import type * as SyncState from '../sync/syncstate.ts'
29
29
  import type { ShutdownChannel } from './shutdown-channel.ts'
30
30
 
@@ -134,6 +134,54 @@ export type InitialBlockingSyncContext = {
134
134
  update: (_: { pageInfo: SyncBackend.PullResPageInfo; processed: number }) => Effect.Effect<void>
135
135
  }
136
136
 
137
+ export const STREAM_EVENTS_BATCH_SIZE_DEFAULT = 100
138
+ export const STREAM_EVENTS_BATCH_SIZE_MAX = 1_000
139
+
140
+ export const StreamEventsOptionsFields = {
141
+ since: Schema.optional(EventSequenceNumber.Client.Composite),
142
+ until: Schema.optional(EventSequenceNumber.Client.Composite),
143
+ filter: Schema.optional(Schema.Array(Schema.String)),
144
+ clientIds: Schema.optional(Schema.Array(Schema.String)),
145
+ sessionIds: Schema.optional(Schema.Array(Schema.String)),
146
+ batchSize: Schema.optional(Schema.Int.pipe(Schema.between(1, STREAM_EVENTS_BATCH_SIZE_MAX))),
147
+ includeClientOnly: Schema.optional(Schema.Boolean),
148
+ } as const
149
+
150
+ export const StreamEventsOptionsSchema = Schema.Struct(StreamEventsOptionsFields)
151
+
152
+ export interface StreamEventsOptions {
153
+ /**
154
+ * Only include events after this logical timestamp (exclusive).
155
+ * Defaults to `EventSequenceNumber.Client.ROOT` when omitted.
156
+ */
157
+ since?: EventSequenceNumber.Client.Composite
158
+ /**
159
+ * Only include events up to this logical timestamp (inclusive).
160
+ */
161
+ until?: EventSequenceNumber.Client.Composite
162
+ /**
163
+ * Only include events of the given names.
164
+ */
165
+ filter?: ReadonlyArray<string>
166
+ /**
167
+ * Only include events from specific client identifiers.
168
+ */
169
+ clientIds?: ReadonlyArray<string>
170
+ /**
171
+ * Only include events from specific session identifiers.
172
+ */
173
+ sessionIds?: ReadonlyArray<string>
174
+ /**
175
+ * Number of events to fetch in each batch when streaming from the eventlog.
176
+ * Defaults to 100.
177
+ */
178
+ batchSize?: number
179
+ /**
180
+ * Include client-only events (i.e. events with a positive client sequence number).
181
+ */
182
+ includeClientOnly?: boolean
183
+ }
184
+
137
185
  export interface LeaderSyncProcessor {
138
186
  /** Used by client sessions to subscribe to upstream sync state changes */
139
187
  pull: (args: {
package/src/otel.ts CHANGED
@@ -2,6 +2,16 @@ import { makeNoopTracer } from '@livestore/utils'
2
2
  import { Effect, identity, Layer, OtelTracer } from '@livestore/utils/effect'
3
3
  import * as otel from '@opentelemetry/api'
4
4
 
5
+ export const OtelLiveDummy: Layer.Layer<OtelTracer.OtelTracer> = Layer.suspend(() => {
6
+ const OtelTracerLive = Layer.succeed(OtelTracer.OtelTracer, makeNoopTracer())
7
+
8
+ const TracingLive = Layer.unwrapEffect(Effect.map(OtelTracer.make, Layer.setTracer)).pipe(
9
+ Layer.provideMerge(OtelTracerLive),
10
+ )
11
+
12
+ return TracingLive
13
+ })
14
+
5
15
  export const provideOtel =
6
16
  ({ otelTracer, parentSpanContext }: { otelTracer?: otel.Tracer; parentSpanContext?: otel.Context }) =>
7
17
  <A, E, R>(effect: Effect.Effect<A, E, R>): Effect.Effect<A, E, Exclude<R, OtelTracer.OtelTracer>> => {
@@ -513,27 +513,17 @@ export namespace ClientDocumentTableDef {
513
513
  }
514
514
  }
515
515
 
516
- export type GetOptions<TTableDef extends TraitAny> = TTableDef extends ClientDocumentTableDef.Trait<
517
- any,
518
- any,
519
- any,
520
- infer TOptions
521
- >
522
- ? TOptions
523
- : never
516
+ export type GetOptions<TTableDef extends TraitAny> =
517
+ TTableDef extends ClientDocumentTableDef.Trait<any, any, any, infer TOptions> ? TOptions : never
524
518
 
525
519
  export type TraitAny = Trait<any, any, any, any>
526
520
 
527
- export type DefaultIdType<TTableDef extends TraitAny> = TTableDef extends ClientDocumentTableDef.Trait<
528
- any,
529
- any,
530
- any,
531
- infer TOptions
532
- >
533
- ? TOptions['default']['id'] extends SessionIdSymbol | string
534
- ? TOptions['default']['id']
521
+ export type DefaultIdType<TTableDef extends TraitAny> =
522
+ TTableDef extends ClientDocumentTableDef.Trait<any, any, any, infer TOptions>
523
+ ? TOptions['default']['id'] extends SessionIdSymbol | string
524
+ ? TOptions['default']['id']
525
+ : never
535
526
  : never
536
- : never
537
527
 
538
528
  export type SetEventDefLike<
539
529
  TName extends string,
@@ -144,11 +144,10 @@ describe('getColumnDefForSchema', () => {
144
144
 
145
145
  it('should map tagged unions to json column', () => {
146
146
  const ResultSchema = Schema.Union(
147
- Schema.Struct({
148
- _tag: Schema.Literal('success'),
147
+ Schema.TaggedStruct('success', {
149
148
  value: Schema.String,
150
149
  }),
151
- Schema.Struct({ _tag: Schema.Literal('error'), error: Schema.String }),
150
+ Schema.TaggedStruct('error', { error: Schema.String }),
152
151
  )
153
152
 
154
153
  const columnDef = State.SQLite.getColumnDefForSchema(ResultSchema)
@@ -20,13 +20,12 @@ export type DbSchemaInput = Record<string, TableDefinition<any, any>> | Readonly
20
20
  * - array: we use the table name of each array item (= table definition) as the object key
21
21
  * - object: we discard the keys of the input object and use the table name of each object value (= table definition) as the new object key
22
22
  */
23
- export type DbSchemaFromInputSchema<TSchemaInput extends DbSchemaInput> = TSchemaInput extends ReadonlyArray<
24
- TableDefinition<any, any>
25
- >
26
- ? { [K in TSchemaInput[number] as K['name']]: K }
27
- : TSchemaInput extends Record<string, TableDefinition<any, any>>
28
- ? { [K in keyof TSchemaInput as TSchemaInput[K]['name']]: TSchemaInput[K] }
29
- : never
23
+ export type DbSchemaFromInputSchema<TSchemaInput extends DbSchemaInput> =
24
+ TSchemaInput extends ReadonlyArray<TableDefinition<any, any>>
25
+ ? { [K in TSchemaInput[number] as K['name']]: K }
26
+ : TSchemaInput extends Record<string, TableDefinition<any, any>>
27
+ ? { [K in keyof TSchemaInput as TSchemaInput[K]['name']]: TSchemaInput[K] }
28
+ : never
30
29
 
31
30
  // TODO ensure via runtime check (possibly even via type-level check) that all index names are unique
32
31
  export const makeDbSchema = <TDbSchemaInput extends DbSchemaInput>(
@@ -116,12 +115,8 @@ export type TableDefinition<TName extends string, TColumns extends Columns> = {
116
115
 
117
116
  export type Columns = Record<string, ColumnDefinition<any, any>>
118
117
 
119
- export type IsSingleColumn<TColumns extends Columns | ColumnDefinition<any, any>> = TColumns extends ColumnDefinition<
120
- any,
121
- any
122
- >
123
- ? true
124
- : false
118
+ export type IsSingleColumn<TColumns extends Columns | ColumnDefinition<any, any>> =
119
+ TColumns extends ColumnDefinition<any, any> ? true : false
125
120
 
126
121
  /**
127
122
  * NOTE this is only needed to avoid a TS limitation where `StructSchemaForColumns` in the default case
@@ -214,9 +209,8 @@ export namespace FromColumns {
214
209
 
215
210
  export type RequiredInsertColumnNames<TColumns extends Columns> = keyof RequiredInsertColumns<TColumns>
216
211
 
217
- export type RequiresInsertValues<TColumns extends Columns> = RequiredInsertColumnNames<TColumns> extends never
218
- ? false
219
- : true
212
+ export type RequiresInsertValues<TColumns extends Columns> =
213
+ RequiredInsertColumnNames<TColumns> extends never ? false : true
220
214
 
221
215
  export type InsertRowDecoded<TColumns extends Columns> = Types.Simplify<
222
216
  Pick<RowDecodedAll<TColumns>, RequiredInsertColumnNames<TColumns>> &
@@ -242,7 +242,12 @@ export namespace QueryBuilder {
242
242
  ): QueryBuilder<TResult, TTableDef, TWithout | 'row' | 'select'>
243
243
  <TColName extends keyof TTableDef['sqliteDef']['columns']>(
244
244
  col: TColName,
245
- op: QueryBuilder.WhereOps,
245
+ op: QueryBuilder.WhereOps.MultiValue,
246
+ value: ReadonlyArray<TTableDef['sqliteDef']['columns'][TColName]['schema']['Type']>,
247
+ ): QueryBuilder<TResult, TTableDef, TWithout | 'row' | 'select'>
248
+ <TColName extends keyof TTableDef['sqliteDef']['columns']>(
249
+ col: TColName,
250
+ op: QueryBuilder.WhereOps.SingleValue,
246
251
  value: TTableDef['sqliteDef']['columns'][TColName]['schema']['Type'],
247
252
  ): QueryBuilder<TResult, TTableDef, TWithout | 'row' | 'select'>
248
253
  }
@@ -381,14 +381,15 @@ export declare namespace SchemaToColumns {
381
381
  export type ColumnDefForType<TEncoded, TType> = SqliteDsl.ColumnDefinition<TEncoded, TType>
382
382
 
383
383
  // Create columns type from schema Type and Encoded
384
- export type FromTypes<TType, TEncoded> = TEncoded extends Record<string, any>
385
- ? {
386
- [K in keyof TEncoded]-?: ColumnDefForType<
387
- TEncoded[K],
388
- TType extends Record<string, any> ? (K extends keyof TType ? TType[K] : TEncoded[K]) : TEncoded[K]
389
- >
390
- }
391
- : SqliteDsl.Columns
384
+ export type FromTypes<TType, TEncoded> =
385
+ TEncoded extends Record<string, any>
386
+ ? {
387
+ [K in keyof TEncoded]-?: ColumnDefForType<
388
+ TEncoded[K],
389
+ TType extends Record<string, any> ? (K extends keyof TType ? TType[K] : TEncoded[K]) : TEncoded[K]
390
+ >
391
+ }
392
+ : SqliteDsl.Columns
392
393
  }
393
394
 
394
395
  export declare namespace TableDefInput {
package/src/version.ts CHANGED
@@ -2,7 +2,7 @@
2
2
  // import packageJson from '../package.json' with { type: 'json' }
3
3
  // export const liveStoreVersion = packageJson.version
4
4
 
5
- export const liveStoreVersion = '0.4.0-dev.20' as const
5
+ export const liveStoreVersion = '0.4.0-dev.21' as const
6
6
 
7
7
  /**
8
8
  * CRITICAL: Increment this version whenever you modify client-side EVENTLOG table schemas.