@concavejs/core 0.0.1-alpha.7 → 0.0.1-alpha.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/auth/auth-context.d.ts +4 -15
  2. package/dist/auth/auth-context.js +6 -15
  3. package/dist/docstore/index.d.ts +1 -0
  4. package/dist/docstore/index.js +1 -0
  5. package/dist/docstore/interface.d.ts +8 -0
  6. package/dist/docstore/search-interfaces.d.ts +29 -0
  7. package/dist/docstore/search-interfaces.js +8 -0
  8. package/dist/http/http-handler.js +3 -3
  9. package/dist/id-codec/document-id.js +4 -3
  10. package/dist/index.d.ts +3 -2
  11. package/dist/index.js +2 -1
  12. package/dist/kernel/context-storage.d.ts +1 -0
  13. package/dist/kernel/context-storage.js +29 -11
  14. package/dist/kernel/docstore-gateway.d.ts +31 -6
  15. package/dist/kernel/docstore-gateway.js +105 -31
  16. package/dist/kernel/index.d.ts +4 -3
  17. package/dist/kernel/index.js +4 -3
  18. package/dist/kernel/missing-schema-error.d.ts +5 -0
  19. package/dist/kernel/missing-schema-error.js +25 -0
  20. package/dist/kernel/native-timers.d.ts +7 -0
  21. package/dist/kernel/native-timers.js +14 -0
  22. package/dist/kernel/schema-service.d.ts +0 -5
  23. package/dist/kernel/schema-service.js +1 -25
  24. package/dist/kernel/udf-kernel.d.ts +11 -1
  25. package/dist/kernel/udf-kernel.js +10 -10
  26. package/dist/query/query-runtime.js +6 -3
  27. package/dist/queryengine/cursor.js +3 -2
  28. package/dist/queryengine/index.d.ts +2 -2
  29. package/dist/queryengine/index.js +1 -1
  30. package/dist/ryow/uncommitted-writes.js +4 -5
  31. package/dist/scheduler/cron-executor.js +2 -1
  32. package/dist/scheduler/scheduled-function-executor.js +3 -6
  33. package/dist/subscriptions/subscription-manager.d.ts +22 -0
  34. package/dist/subscriptions/subscription-manager.js +131 -44
  35. package/dist/sync/protocol-handler.d.ts +5 -0
  36. package/dist/sync/protocol-handler.js +31 -2
  37. package/dist/sync/session-backpressure.js +4 -3
  38. package/dist/sync/session-heartbeat.js +3 -2
  39. package/dist/system/internal.js +6 -3
  40. package/dist/system/system-functions.js +1 -1
  41. package/dist/transactor/occ-transaction.js +9 -8
  42. package/dist/transactor/occ-validation.js +3 -3
  43. package/dist/udf/analysis/validator.js +1 -1
  44. package/dist/udf/executor/inline.d.ts +1 -1
  45. package/dist/udf/executor/inline.js +6 -2
  46. package/dist/udf/module-loader/call-context.d.ts +5 -6
  47. package/dist/udf/module-loader/call-context.js +5 -9
  48. package/dist/udf/module-loader/module-loader.js +26 -4
  49. package/dist/udf/runtime/udf-rand.js +1 -1
  50. package/dist/udf/runtime/udf-setup.d.ts +21 -0
  51. package/dist/udf/runtime/udf-setup.js +149 -53
  52. package/dist/utils/base64.d.ts +4 -0
  53. package/dist/utils/base64.js +58 -0
  54. package/dist/utils/crypto.d.ts +2 -0
  55. package/dist/utils/crypto.js +40 -0
  56. package/dist/utils/index.d.ts +1 -0
  57. package/dist/utils/index.js +1 -0
  58. package/dist/utils/utils.d.ts +6 -1
  59. package/dist/utils/utils.js +6 -1
  60. package/package.json +5 -1
@@ -3,10 +3,20 @@ import type { OccMutationTransaction } from "../transactor/occ-transaction";
3
3
  import type { BlobStore } from "../abstractions/blobstore";
4
4
  import type { UdfExec } from "../udf/executor/interface";
5
5
  import type { KernelAuthContext } from "./types";
6
+ export interface UdfKernelConfig {
7
+ docstore: DocStore;
8
+ authContext?: KernelAuthContext;
9
+ storage?: BlobStore;
10
+ snapshotOverride?: bigint | null;
11
+ mutationTransaction?: OccMutationTransaction;
12
+ udfExecutor?: UdfExec;
13
+ componentPath?: string;
14
+ idGenerator?: () => Uint8Array;
15
+ }
6
16
  export declare class UdfKernel {
7
17
  private readonly context;
8
18
  private readonly syscalls;
9
- constructor(docstore: DocStore, authContext?: KernelAuthContext, storage?: BlobStore, snapshotOverride?: bigint | null, mutationTransaction?: OccMutationTransaction, udfExecutor?: UdfExec, componentPath?: string, idGenerator?: () => Uint8Array);
19
+ constructor(config: UdfKernelConfig);
10
20
  clearAccessLogs(): void;
11
21
  getTrackedReadRanges(): import("..").SerializedKeyRange[];
12
22
  getTrackedWriteRanges(): import("..").SerializedKeyRange[];
@@ -8,28 +8,28 @@ import { KernelSyscalls } from "./syscalls/kernel-syscalls";
8
8
  export class UdfKernel {
9
9
  context;
10
10
  syscalls;
11
- constructor(docstore, authContext, storage, snapshotOverride, mutationTransaction, udfExecutor, componentPath, idGenerator) {
12
- const transaction = mutationTransaction ?? transactionContext.getStore() ?? undefined;
13
- const docStore = new DocStoreGateway(docstore, transaction);
11
+ constructor(config) {
12
+ const transaction = config.mutationTransaction ?? transactionContext.getStore() ?? undefined;
13
+ const docStore = new DocStoreGateway(config.docstore, transaction);
14
14
  const inheritedSnapshot = snapshotContext.getStore() ?? null;
15
- const snapshotTimestamp = docStore.computeSnapshotTimestamp(snapshotOverride, inheritedSnapshot);
15
+ const snapshotTimestamp = docStore.computeSnapshotTimestamp(config.snapshotOverride, inheritedSnapshot);
16
16
  this.context = new KernelContext({
17
17
  snapshotTimestamp,
18
- authContext,
19
- componentPath,
18
+ authContext: config.authContext,
19
+ componentPath: config.componentPath,
20
20
  mutationTransaction: docStore.getTransaction(),
21
21
  });
22
22
  docStore.attachContext(this.context);
23
- const schemaService = new SchemaService(componentPath);
23
+ const schemaService = new SchemaService(config.componentPath);
24
24
  const queryRuntime = new QueryRuntime(this.context, docStore, schemaService);
25
25
  this.syscalls = new KernelSyscalls({
26
26
  context: this.context,
27
27
  docStore,
28
28
  schemaService,
29
29
  queryRuntime,
30
- storage,
31
- udfExecutor,
32
- idGenerator: idGenerator ?? generateInternalId,
30
+ storage: config.storage,
31
+ udfExecutor: config.udfExecutor,
32
+ idGenerator: config.idGenerator ?? generateInternalId,
33
33
  });
34
34
  }
35
35
  clearAccessLogs() {
@@ -3,6 +3,7 @@ import { UncommittedWrites } from "../ryow/uncommitted-writes";
3
3
  import { stringToHex } from "../utils/utils";
4
4
  import { evaluateFilter } from "../queryengine/filters";
5
5
  import { executeIndexQuery, rangeExpressionsToIndexBounds, evaluateRangeExpression, } from "../queryengine/index-query";
6
+ import { isSearchCapable } from "../docstore/search-interfaces";
6
7
  import { buildQueryPlan } from "./planner";
7
8
  import { paginateByCursor, sortByCreationTimeAndId, sortByIndexFields } from "./execution";
8
9
  import { applyQueryOperators } from "./postprocess";
@@ -113,9 +114,11 @@ export class QueryRuntime {
113
114
  : operatorLimit && operatorLimit > 0
114
115
  ? operatorLimit
115
116
  : undefined;
116
- const searchResults = await this.docStore
117
- .getDocStore()
118
- .search(plan.indexIdHex, plan.searchTerm, plan.filterMap, { limit: combinedLimit });
117
+ const rawDocStore = this.docStore.getDocStore();
118
+ if (!isSearchCapable(rawDocStore)) {
119
+ throw new Error("DocStore does not support full-text search");
120
+ }
121
+ const searchResults = await rawDocStore.search(plan.indexIdHex, plan.searchTerm, plan.filterMap, { limit: combinedLimit });
119
122
  const documents = searchResults
120
123
  .filter(({ doc }) => doc.ts <= this.context.snapshotTimestamp)
121
124
  .map(({ doc, score }) => ({
@@ -6,6 +6,7 @@
6
6
  * - Index cursors: Encode both _id and index key values (for index queries)
7
7
  */
8
8
  import { convexToJson, jsonToConvex } from "convex/values";
9
+ import { decodeBase64ToUtf8, encodeUtf8ToBase64 } from "../utils/base64";
9
10
  /**
10
11
  * Encode a cursor for an index query.
11
12
  * @param id Document ID
@@ -18,7 +19,7 @@ export function encodeIndexCursor(id, indexKey) {
18
19
  id,
19
20
  indexKey: indexKey.map((v) => convexToJson(v)),
20
21
  };
21
- return Buffer.from(JSON.stringify(cursor), "utf-8").toString("base64");
22
+ return encodeUtf8ToBase64(JSON.stringify(cursor));
22
23
  }
23
24
  /**
24
25
  * Encode a simple cursor for a full table scan.
@@ -36,7 +37,7 @@ export function encodeSimpleCursor(id) {
36
37
  export function decodeCursor(cursorString) {
37
38
  // Try to decode as base64 first (index cursor)
38
39
  try {
39
- const decoded = Buffer.from(cursorString, "base64").toString("utf-8");
40
+ const decoded = decodeBase64ToUtf8(cursorString);
40
41
  const parsed = JSON.parse(decoded);
41
42
  if (parsed.type === "index" && parsed.id && Array.isArray(parsed.indexKey)) {
42
43
  return {
@@ -1,8 +1,8 @@
1
- export { UdfKernel } from "../kernel/udf-kernel";
1
+ export { UdfKernel, type UdfKernelConfig } from "../kernel/udf-kernel";
2
2
  export { snapshotContext, transactionContext } from "../kernel/contexts";
3
3
  export { QueryRuntime } from "../query/query-runtime";
4
4
  export { SchemaService } from "../kernel/schema-service";
5
- export { DocStoreGateway } from "../kernel/docstore-gateway";
5
+ export { DocStoreGateway, createDocAccess, type DocAccess } from "../kernel/docstore-gateway";
6
6
  export * from "./cursor";
7
7
  export * from "./filters";
8
8
  export * from "./index-query";
@@ -2,7 +2,7 @@ export { UdfKernel } from "../kernel/udf-kernel";
2
2
  export { snapshotContext, transactionContext } from "../kernel/contexts";
3
3
  export { QueryRuntime } from "../query/query-runtime";
4
4
  export { SchemaService } from "../kernel/schema-service";
5
- export { DocStoreGateway } from "../kernel/docstore-gateway";
5
+ export { DocStoreGateway, createDocAccess } from "../kernel/docstore-gateway";
6
6
  export * from "./cursor";
7
7
  export * from "./filters";
8
8
  export * from "./index-query";
@@ -6,7 +6,7 @@
6
6
  * fragmented RYOW implementation into one clean abstraction.
7
7
  */
8
8
  import { parseDeveloperId } from "../queryengine/developer-id";
9
- import { deserializeDeveloperId, readVersionKey } from "../utils/utils";
9
+ import { documentIdKey, parseDocumentIdKey } from "../docstore/interface";
10
10
  /**
11
11
  * Unified interface for accessing uncommitted writes.
12
12
  * Abstracts over both transaction pending documents and local writes.
@@ -63,8 +63,7 @@ export class UncommittedWrites {
63
63
  for (const [key, doc] of pending.entries()) {
64
64
  if (doc === null) {
65
65
  // Deleted document - key is in format "table:internalId"
66
- // Use centralized ID parsing to extract table
67
- const parsed = deserializeDeveloperId(key);
66
+ const parsed = parseDocumentIdKey(key);
68
67
  if (parsed && parsed.table === tableId) {
69
68
  // Mark as deleted using the key as developer ID
70
69
  writes.set(key, null);
@@ -98,7 +97,7 @@ export class UncommittedWrites {
98
97
  if (!parsed) {
99
98
  return undefined;
100
99
  }
101
- const key = readVersionKey(parsed);
100
+ const key = documentIdKey(parsed);
102
101
  const pending = this.mutationTransaction.getPendingDocuments();
103
102
  const doc = pending.get(key);
104
103
  if (doc === undefined) {
@@ -146,7 +145,7 @@ export class UncommittedWrites {
146
145
  if (!parsed || parsed.table !== tableId) {
147
146
  continue;
148
147
  }
149
- const key = readVersionKey(parsed);
148
+ const key = documentIdKey(parsed);
150
149
  if (write.value === null) {
151
150
  // Deleted
152
151
  visibleDocs.delete(key);
@@ -8,6 +8,7 @@
8
8
  */
9
9
  import { Order } from "../docstore";
10
10
  import { stringToHex, writtenTablesFromRanges } from "../utils";
11
+ import { randomUuid } from "../utils/crypto";
11
12
  import { runAsServerCall } from "../udf/module-loader/call-context";
12
13
  const CRONS_TABLE = "_crons";
13
14
  export class CronExecutor {
@@ -82,7 +83,7 @@ export class CronExecutor {
82
83
  }
83
84
  else {
84
85
  // Create new job
85
- const internalId = stringToHex(crypto.randomUUID());
86
+ const internalId = stringToHex(randomUuid());
86
87
  const docId = { table: tableId, internalId };
87
88
  const ts = this.allocateTimestamp?.() ?? BigInt(now);
88
89
  const developerId = `${tableId}:${internalId}`;
@@ -1,5 +1,6 @@
1
1
  import { Order } from "../docstore";
2
- import { deserializeDeveloperId, stringToHex, writtenTablesFromRanges } from "../utils";
2
+ import { stringToHex, writtenTablesFromRanges } from "../utils";
3
+ import { parseDocumentIdKey } from "../docstore/interface";
3
4
  import { runAsServerCall } from "../udf/module-loader/call-context";
4
5
  const SCHEDULED_FUNCTIONS_TABLE = "_scheduled_functions";
5
6
  export class ScheduledFunctionExecutor {
@@ -183,11 +184,7 @@ export class ScheduledFunctionExecutor {
183
184
  }
184
185
  }
185
186
  getDocumentId(jobId) {
186
- const parts = deserializeDeveloperId(jobId);
187
- if (!parts) {
188
- return null;
189
- }
190
- return { table: parts.table, internalId: parts.internalId };
187
+ return parseDocumentIdKey(jobId);
191
188
  }
192
189
  defaultAllocateTimestamp(previousTs) {
193
190
  const now = BigInt(this.now());
@@ -49,6 +49,21 @@ export declare class SubscriptionManager {
49
49
  private tableToQueries;
50
50
  private tableWriteTimestamps;
51
51
  private lastCleanupTime;
52
+ /**
53
+ * Add a subscription hash to the table→queries inverted index.
54
+ * Must be called whenever a subscription's table set is established or grows.
55
+ */
56
+ private addSubscriptionToTables;
57
+ /**
58
+ * Remove a subscription hash from the table→queries inverted index.
59
+ * Must be called whenever a subscription's table set is cleared or shrinks.
60
+ */
61
+ private removeSubscriptionFromTables;
62
+ /**
63
+ * Atomically update a subscription's table dependencies and the inverted index.
64
+ * Removes old mappings, sets new tables, and adds new mappings in one operation.
65
+ */
66
+ private updateSubscriptionTables;
52
67
  /**
53
68
  * Subscribe a client to a query with its dependencies
54
69
  *
@@ -110,6 +125,13 @@ export declare class SubscriptionManager {
110
125
  totalTables: number;
111
126
  totalTrackedWriteTimestamps: number;
112
127
  };
128
+ /**
129
+ * Validate that all parallel maps are consistent with each other.
130
+ * Useful for testing and debugging subscription invariants.
131
+ *
132
+ * @throws Error if any inconsistency is detected
133
+ */
134
+ assertConsistency(): void;
113
135
  /**
114
136
  * Force cleanup of stale table write timestamps.
115
137
  * Useful for testing or manual cleanup.
@@ -127,6 +127,44 @@ export class SubscriptionManager {
127
127
  tableWriteTimestamps = new Map();
128
128
  // Last cleanup time to throttle cleanup frequency
129
129
  lastCleanupTime = 0;
130
+ /**
131
+ * Add a subscription hash to the table→queries inverted index.
132
+ * Must be called whenever a subscription's table set is established or grows.
133
+ */
134
+ addSubscriptionToTables(hash, tables) {
135
+ for (const table of tables) {
136
+ let queries = this.tableToQueries.get(table);
137
+ if (!queries) {
138
+ queries = new Set();
139
+ this.tableToQueries.set(table, queries);
140
+ }
141
+ queries.add(hash);
142
+ }
143
+ }
144
+ /**
145
+ * Remove a subscription hash from the table→queries inverted index.
146
+ * Must be called whenever a subscription's table set is cleared or shrinks.
147
+ */
148
+ removeSubscriptionFromTables(hash, tables) {
149
+ for (const table of tables) {
150
+ const queries = this.tableToQueries.get(table);
151
+ if (queries) {
152
+ queries.delete(hash);
153
+ if (queries.size === 0) {
154
+ this.tableToQueries.delete(table);
155
+ }
156
+ }
157
+ }
158
+ }
159
+ /**
160
+ * Atomically update a subscription's table dependencies and the inverted index.
161
+ * Removes old mappings, sets new tables, and adds new mappings in one operation.
162
+ */
163
+ updateSubscriptionTables(subscription, newTables) {
164
+ this.removeSubscriptionFromTables(subscription.queryHash, subscription.tables);
165
+ subscription.tables = newTables;
166
+ this.addSubscriptionToTables(subscription.queryHash, newTables);
167
+ }
130
168
  /**
131
169
  * Subscribe a client to a query with its dependencies
132
170
  *
@@ -170,26 +208,7 @@ export class SubscriptionManager {
170
208
  clientQueries.add(queryId);
171
209
  // Update table dependencies if they changed
172
210
  if (!setsEqual(subscription.tables, tables)) {
173
- // Remove old table mappings
174
- for (const oldTable of subscription.tables) {
175
- const queries = this.tableToQueries.get(oldTable);
176
- if (queries) {
177
- queries.delete(hash);
178
- if (queries.size === 0) {
179
- this.tableToQueries.delete(oldTable);
180
- }
181
- }
182
- }
183
- // Add new table mappings
184
- subscription.tables = tables;
185
- for (const table of tables) {
186
- let queries = this.tableToQueries.get(table);
187
- if (!queries) {
188
- queries = new Set();
189
- this.tableToQueries.set(table, queries);
190
- }
191
- queries.add(hash);
192
- }
211
+ this.updateSubscriptionTables(subscription, tables);
193
212
  }
194
213
  // Check if any writes happened after the query's snapshot timestamp
195
214
  // This handles the race condition where:
@@ -238,16 +257,7 @@ export class SubscriptionManager {
238
257
  }
239
258
  // If no more clients, remove the subscription entirely
240
259
  if (subscription.clients.size === 0) {
241
- // Remove from table index
242
- for (const table of subscription.tables) {
243
- const queries = this.tableToQueries.get(table);
244
- if (queries) {
245
- queries.delete(hash);
246
- if (queries.size === 0) {
247
- this.tableToQueries.delete(table);
248
- }
249
- }
250
- }
260
+ this.removeSubscriptionFromTables(hash, subscription.tables);
251
261
  this.subscriptions.delete(hash);
252
262
  }
253
263
  }
@@ -388,7 +398,7 @@ export class SubscriptionManager {
388
398
  * Update client ID across all subscriptions (for session ID changes)
389
399
  */
390
400
  updateSessionId(oldClientId, newClientId) {
391
- // Find all entries for the old client
401
+ // Collect all updates before mutating any maps
392
402
  const updates = [];
393
403
  for (const [key, hash] of this.clientQueryToHash) {
394
404
  if (key.startsWith(`${oldClientId}:`)) {
@@ -397,25 +407,42 @@ export class SubscriptionManager {
397
407
  updates.push({ oldKey: key, newKey, queryId, hash });
398
408
  }
399
409
  }
400
- for (const { oldKey, newKey, queryId, hash } of updates) {
401
- // Update the client->hash mapping
410
+ // Phase 1: Update all clientQueryToHash mappings
411
+ for (const { oldKey, newKey, hash } of updates) {
402
412
  this.clientQueryToHash.delete(oldKey);
403
413
  this.clientQueryToHash.set(newKey, hash);
404
- // Update the subscription's clients map
414
+ }
415
+ // Phase 2: Update all subscription client maps
416
+ // Group by hash to avoid redundant lookups
417
+ const byHash = new Map();
418
+ for (const { queryId, hash } of updates) {
419
+ let ids = byHash.get(hash);
420
+ if (!ids) {
421
+ ids = [];
422
+ byHash.set(hash, ids);
423
+ }
424
+ ids.push(queryId);
425
+ }
426
+ for (const [hash, queryIds] of byHash) {
405
427
  const subscription = this.subscriptions.get(hash);
406
- if (subscription) {
407
- const oldQueries = subscription.clients.get(oldClientId);
408
- if (oldQueries) {
428
+ if (!subscription)
429
+ continue;
430
+ // Move all queryIds from old client to new client atomically
431
+ const oldQueries = subscription.clients.get(oldClientId);
432
+ if (oldQueries) {
433
+ for (const queryId of queryIds) {
409
434
  oldQueries.delete(queryId);
410
- if (oldQueries.size === 0) {
411
- subscription.clients.delete(oldClientId);
412
- }
413
435
  }
414
- let newQueries = subscription.clients.get(newClientId);
415
- if (!newQueries) {
416
- newQueries = new Set();
417
- subscription.clients.set(newClientId, newQueries);
436
+ if (oldQueries.size === 0) {
437
+ subscription.clients.delete(oldClientId);
418
438
  }
439
+ }
440
+ let newQueries = subscription.clients.get(newClientId);
441
+ if (!newQueries) {
442
+ newQueries = new Set();
443
+ subscription.clients.set(newClientId, newQueries);
444
+ }
445
+ for (const queryId of queryIds) {
419
446
  newQueries.add(queryId);
420
447
  }
421
448
  }
@@ -438,6 +465,66 @@ export class SubscriptionManager {
438
465
  totalTrackedWriteTimestamps: this.tableWriteTimestamps.size,
439
466
  };
440
467
  }
468
+ /**
469
+ * Validate that all parallel maps are consistent with each other.
470
+ * Useful for testing and debugging subscription invariants.
471
+ *
472
+ * @throws Error if any inconsistency is detected
473
+ */
474
+ assertConsistency() {
475
+ // 1. Every clientQueryToHash entry must have a matching subscription with that client
476
+ for (const [key, hash] of this.clientQueryToHash) {
477
+ const colonIdx = key.indexOf(":");
478
+ const clientId = key.substring(0, colonIdx);
479
+ const queryId = key.substring(colonIdx + 1);
480
+ const subscription = this.subscriptions.get(hash);
481
+ if (!subscription) {
482
+ throw new Error(`clientQueryToHash references hash "${hash}" but no subscription exists`);
483
+ }
484
+ const clientQueries = subscription.clients.get(clientId);
485
+ if (!clientQueries || !clientQueries.has(queryId)) {
486
+ throw new Error(`clientQueryToHash has ${key} -> ${hash} but subscription doesn't list client "${clientId}" with queryId "${queryId}"`);
487
+ }
488
+ }
489
+ // 2. Every subscription.clients entry must have matching clientQueryToHash entries
490
+ for (const [hash, subscription] of this.subscriptions) {
491
+ for (const [clientId, queryIds] of subscription.clients) {
492
+ for (const queryId of queryIds) {
493
+ const key = makeClientQueryKey(clientId, queryId);
494
+ const mappedHash = this.clientQueryToHash.get(key);
495
+ if (mappedHash !== hash) {
496
+ throw new Error(`subscription "${hash}" lists client "${clientId}" queryId "${queryId}" but clientQueryToHash maps to "${mappedHash}"`);
497
+ }
498
+ }
499
+ }
500
+ }
501
+ // 3. tableToQueries must match subscription.tables
502
+ const expectedTableToQueries = new Map();
503
+ for (const [hash, subscription] of this.subscriptions) {
504
+ for (const table of subscription.tables) {
505
+ let queries = expectedTableToQueries.get(table);
506
+ if (!queries) {
507
+ queries = new Set();
508
+ expectedTableToQueries.set(table, queries);
509
+ }
510
+ queries.add(hash);
511
+ }
512
+ }
513
+ for (const [table, expectedHashes] of expectedTableToQueries) {
514
+ const actualHashes = this.tableToQueries.get(table);
515
+ if (!actualHashes) {
516
+ throw new Error(`Expected tableToQueries to have table "${table}" but it's missing`);
517
+ }
518
+ if (!setsEqual(actualHashes, expectedHashes)) {
519
+ throw new Error(`tableToQueries for "${table}" is inconsistent with subscription tables`);
520
+ }
521
+ }
522
+ for (const [table] of this.tableToQueries) {
523
+ if (!expectedTableToQueries.has(table)) {
524
+ throw new Error(`tableToQueries has orphaned table "${table}" with no matching subscriptions`);
525
+ }
526
+ }
527
+ }
441
528
  /**
442
529
  * Force cleanup of stale table write timestamps.
443
530
  * Useful for testing or manual cleanup.
@@ -164,6 +164,11 @@ export declare class SyncProtocolHandler {
164
164
  private handleAction;
165
165
  private handleAuthenticate;
166
166
  private updateOriginatingSession;
167
+ /**
168
+ * Re-execute all active subscription queries across all sessions.
169
+ * Used during hot-reload to push fresh results after module code changes.
170
+ */
171
+ rerunAllSubscriptions(): Promise<void>;
167
172
  /**
168
173
  * Broadcast updates using fine-grained range-based tracking
169
174
  * Sends messages directly to affected sessions (excluding the originating session)
@@ -7,6 +7,7 @@
7
7
  import { encodeServerMessage } from "../types";
8
8
  import { assertAdminToken, assertSystemToken, verifyJwtAndGetIdentity, JWTValidationError } from "../auth";
9
9
  import { SubscriptionManager } from "../subscriptions";
10
+ import { nativeTimers } from "../kernel/native-timers";
10
11
  import { deserializeKeyRange } from "../queryengine";
11
12
  import { advanceSessionTimestamp, convertTablesToRanges, formatRangesForLog, longToBigIntTimestamp, makeStateVersion, makeTransitionMessage, toLongTimestamp, } from "./protocol-utils";
12
13
  import { executeReactiveQuery } from "./query-execution";
@@ -282,6 +283,13 @@ export class SyncProtocolHandler {
282
283
  async handleModifyQuerySet(sessionId, message) {
283
284
  const session = this.sessions.get(sessionId);
284
285
  return this.withSessionLock(session, async () => {
286
+ // If the client restarts its local query set (baseVersion=0), align the
287
+ // server's initial transition timestamp with the zeroed client state.
288
+ // Without this, a carried maxObservedTimestamp can produce startVersion
289
+ // mismatches like "X:0:0 transitioning from 0:0:0" after reconnect.
290
+ if (session.querySetVersion === 0 && session.identityVersion === 0 && message.baseVersion === 0) {
291
+ session.timestamp = 0n;
292
+ }
285
293
  // For reconnections, accept the client's base version as the starting point
286
294
  if (session.querySetVersion === 0 && message.baseVersion > 0) {
287
295
  session.querySetVersion = message.baseVersion;
@@ -576,6 +584,27 @@ export class SyncProtocolHandler {
576
584
  const endVersion = makeStateVersion(session.querySetVersion, session.identityVersion, endTimestamp);
577
585
  return makeTransitionMessage(startVersion, endVersion, modifications);
578
586
  }
587
+ /**
588
+ * Re-execute all active subscription queries across all sessions.
589
+ * Used during hot-reload to push fresh results after module code changes.
590
+ */
591
+ async rerunAllSubscriptions() {
592
+ for (const [sessionId, session] of this.sessions) {
593
+ if (session.activeQueries.size === 0)
594
+ continue;
595
+ await this.withSessionLock(session, async () => {
596
+ const allQueryIds = new Set(session.activeQueries.keys());
597
+ const modifications = await this.rerunSpecificQueries(session, sessionId, allQueryIds);
598
+ if (modifications.length > 0) {
599
+ const startTimestamp = session.timestamp;
600
+ const endTimestamp = advanceSessionTimestamp(session);
601
+ const startVersion = makeStateVersion(session.querySetVersion, session.identityVersion, startTimestamp);
602
+ const endVersion = makeStateVersion(session.querySetVersion, session.identityVersion, endTimestamp);
603
+ this.send(session, makeTransitionMessage(startVersion, endVersion, modifications));
604
+ }
605
+ });
606
+ }
607
+ }
579
608
  /**
580
609
  * Broadcast updates using fine-grained range-based tracking
581
610
  * Sends messages directly to affected sessions (excluding the originating session)
@@ -684,13 +713,13 @@ export class SyncProtocolHandler {
684
713
  }
685
714
  let timeoutHandle;
686
715
  const timeoutPromise = new Promise((_, reject) => {
687
- timeoutHandle = setTimeout(() => {
716
+ timeoutHandle = nativeTimers.setTimeout(() => {
688
717
  reject(new Error(`Sync operation timed out after ${this.operationTimeoutMs}ms`));
689
718
  }, this.operationTimeoutMs);
690
719
  });
691
720
  return Promise.race([promise, timeoutPromise]).finally(() => {
692
721
  if (timeoutHandle) {
693
- clearTimeout(timeoutHandle);
722
+ nativeTimers.clearTimeout(timeoutHandle);
694
723
  }
695
724
  });
696
725
  }
@@ -1,3 +1,4 @@
1
+ import { nativeTimers } from "../kernel/native-timers";
1
2
  export class SessionBackpressureController {
2
3
  options;
3
4
  constructor(options) {
@@ -27,7 +28,7 @@ export class SessionBackpressureController {
27
28
  }
28
29
  clearDrainTimeout(session) {
29
30
  if (session.drainTimeout) {
30
- clearTimeout(session.drainTimeout);
31
+ nativeTimers.clearTimeout(session.drainTimeout);
31
32
  session.drainTimeout = undefined;
32
33
  }
33
34
  }
@@ -35,7 +36,7 @@ export class SessionBackpressureController {
35
36
  if (session.isDraining)
36
37
  return;
37
38
  session.isDraining = true;
38
- session.drainTimeout = setTimeout(() => {
39
+ session.drainTimeout = nativeTimers.setTimeout(() => {
39
40
  if (session.messageQueue.length > 0) {
40
41
  this.options.warn(`[SyncProtocolHandler] Slow client timeout - disconnecting. ` +
41
42
  `Queue depth: ${session.messageQueue.length}, dropped: ${session.droppedMessages}`);
@@ -68,7 +69,7 @@ export class SessionBackpressureController {
68
69
  this.clearDrainTimeout(session);
69
70
  }
70
71
  else {
71
- setTimeout(() => this.drainQueue(session), 10);
72
+ nativeTimers.setTimeout(() => this.drainQueue(session), 10);
72
73
  }
73
74
  }
74
75
  }
@@ -1,3 +1,4 @@
1
+ import { nativeTimers } from "../kernel/native-timers";
1
2
  export class SessionHeartbeatController {
2
3
  options;
3
4
  constructor(options) {
@@ -9,7 +10,7 @@ export class SessionHeartbeatController {
9
10
  }
10
11
  clear(session) {
11
12
  if (session.pingTimer) {
12
- clearTimeout(session.pingTimer);
13
+ nativeTimers.clearTimeout(session.pingTimer);
13
14
  session.pingTimer = undefined;
14
15
  }
15
16
  }
@@ -18,7 +19,7 @@ export class SessionHeartbeatController {
18
19
  if (!session.websocket || session.websocket.readyState !== this.options.websocketReadyStateOpen) {
19
20
  return;
20
21
  }
21
- session.pingTimer = setTimeout(() => {
22
+ session.pingTimer = nativeTimers.setTimeout(() => {
22
23
  this.options.onPing(session);
23
24
  }, this.options.intervalMs);
24
25
  }
@@ -21,7 +21,7 @@
21
21
  */
22
22
  import { v } from "convex/values";
23
23
  import { loadConvexModule, listRegisteredModules } from "../udf/module-loader/module-loader";
24
- import { isMissingSchemaModuleError } from "../kernel/schema-service";
24
+ import { isMissingSchemaModuleError } from "../kernel/missing-schema-error";
25
25
  import { listSystemFunctions } from "./function-introspection";
26
26
  import { getFullTableName } from "../tables/interface";
27
27
  import { queryExecutionLog, clearExecutionLog } from "./execution-log";
@@ -505,8 +505,11 @@ export function createSystemFunctions(deps) {
505
505
  systemListFunctions: query({
506
506
  args: { componentPath: v.optional(v.string()) },
507
507
  handler: async (ctx, args) => {
508
- const componentPath = args.componentPath ?? ctx?.componentPath ?? undefined;
509
- const functions = await listSystemFunctions({ componentPath });
508
+ const requestedComponentPath = args.componentPath ?? ctx?.componentPath ?? undefined;
509
+ const normalizedComponentPath = typeof requestedComponentPath === "string" && requestedComponentPath.trim().length === 0
510
+ ? undefined
511
+ : requestedComponentPath;
512
+ const functions = await listSystemFunctions({ componentPath: normalizedComponentPath });
510
513
  return functions.map((fn) => ({
511
514
  name: fn.name,
512
515
  path: fn.path,
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import { hexToString, stringToHex } from "../utils/utils";
6
6
  import { loadConvexModule } from "../udf/module-loader/module-loader";
7
- import { isMissingSchemaModuleError } from "../kernel/schema-service";
7
+ import { isMissingSchemaModuleError } from "../kernel/missing-schema-error";
8
8
  import { listSystemFunctions } from "./function-introspection";
9
9
  import { Order } from "../docstore/interface";
10
10
  /**