@sqlite-sync/cloudflare 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +71 -0
- package/dist/index.js +305 -0
- package/dist/index.js.map +1 -0
- package/dist/jobs.d.ts +89 -0
- package/dist/jobs.js +613 -0
- package/dist/jobs.js.map +1 -0
- package/package.json +59 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import * as _sqlite_sync_core from '@sqlite-sync/core';
|
|
2
|
+
import { SyncDbSchema, CrdtStorageMutator, TypedEventTarget, CrdtEventStatus, CrdtEventType, CrdtEventOrigin, Migrations } from '@sqlite-sync/core';
|
|
3
|
+
import { Compilable, Kysely } from 'kysely';
|
|
4
|
+
|
|
5
|
+
type ExecuteParams = {
|
|
6
|
+
sql: string;
|
|
7
|
+
parameters: readonly unknown[];
|
|
8
|
+
};
|
|
9
|
+
type ExecuteResult<T> = {
|
|
10
|
+
rows: T[];
|
|
11
|
+
};
|
|
12
|
+
type QueryBuilderOutput<QB> = QB extends Compilable<infer O> ? O : never;
|
|
13
|
+
type KyselyQueryFactory<TDatabase, TQuery extends Compilable<TResult>, TResult = QueryBuilderOutput<TQuery>> = (kysely: Kysely<TDatabase>) => TQuery;
|
|
14
|
+
type KyselyExecutor<TDatabase> = {
|
|
15
|
+
execute<TResult = unknown>(query: ExecuteParams): ExecuteResult<TResult>;
|
|
16
|
+
executeKysely<TQuery extends Compilable<TResult>, TResult = QueryBuilderOutput<TQuery>>(factory: KyselyQueryFactory<TDatabase, TQuery, TResult>): ExecuteResult<TResult>;
|
|
17
|
+
transaction: (callback: (tx: Pick<KyselyExecutor<TDatabase>, "execute" | "executeKysely">) => void) => void;
|
|
18
|
+
};
|
|
19
|
+
declare function createKyselyExecutor<TDatabase>(db: DurableObjectStorage): KyselyExecutor<TDatabase>;
|
|
20
|
+
|
|
21
|
+
type TypedPersistedCrdtEvent<Schema extends SyncDbSchema> = {
|
|
22
|
+
schema_version: number;
|
|
23
|
+
sync_id: number;
|
|
24
|
+
status: CrdtEventStatus;
|
|
25
|
+
type: CrdtEventType;
|
|
26
|
+
timestamp: string;
|
|
27
|
+
origin: CrdtEventOrigin;
|
|
28
|
+
source_node_id: string;
|
|
29
|
+
dataset: keyof Schema[`~mutationsSchema`];
|
|
30
|
+
item_id: string;
|
|
31
|
+
payload: string;
|
|
32
|
+
};
|
|
33
|
+
type ServerSyncDbEvents<Schema extends SyncDbSchema> = {
|
|
34
|
+
"event-applied": TypedPersistedCrdtEvent<Schema>;
|
|
35
|
+
};
|
|
36
|
+
type ServerSyncDb<Schema extends SyncDbSchema> = Pick<KyselyExecutor<Schema[`~serverSchema`]>, "execute" | "executeKysely"> & CrdtStorageMutator<Schema[`~mutationsSchema`]> & Pick<TypedEventTarget<ServerSyncDbEvents<Schema>>, "addEventListener" | "removeEventListener">;
|
|
37
|
+
declare function createDurableObjectCrdtStorage<Schema extends SyncDbSchema>({ storage, syncDbSchema, nodeId, crdtEventsTable, batchSize, broadcastPayload, }: {
|
|
38
|
+
storage: DurableObjectStorage;
|
|
39
|
+
syncDbSchema: Schema;
|
|
40
|
+
nodeId: string;
|
|
41
|
+
crdtEventsTable: string;
|
|
42
|
+
batchSize?: number;
|
|
43
|
+
broadcastPayload: (payload: string) => void;
|
|
44
|
+
}): {
|
|
45
|
+
syncDb: ServerSyncDb<Schema>;
|
|
46
|
+
remoteHandler: RemoteHandler;
|
|
47
|
+
};
|
|
48
|
+
type MessageResult = {
|
|
49
|
+
success: true;
|
|
50
|
+
payload: string;
|
|
51
|
+
} | {
|
|
52
|
+
success: false;
|
|
53
|
+
error: unknown;
|
|
54
|
+
};
|
|
55
|
+
type RemoteHandler = {
|
|
56
|
+
handleMessage: (message: string) => MessageResult;
|
|
57
|
+
};
|
|
58
|
+
declare const durableObjectAdapter: {
|
|
59
|
+
createCrdtStorage: typeof createDurableObjectCrdtStorage;
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
declare function createMigrator(kv: SyncKvStorage, sqlExecutor: KyselyExecutor<any>, migrations: Migrations, updateLogTableName?: string): {
|
|
63
|
+
migrateDbToLatest: () => void;
|
|
64
|
+
latestSchemaVersion: number;
|
|
65
|
+
currentSchemaVersion: number;
|
|
66
|
+
migrateEvent: <Event extends _sqlite_sync_core.MigratableEvent>(event: Event, targetVersion?: number) => Event | null;
|
|
67
|
+
migrateEvents: <Event extends _sqlite_sync_core.MigratableEvent>(events: Event[], targetVersion?: number) => Event[];
|
|
68
|
+
};
|
|
69
|
+
type SyncDbMigrator = ReturnType<typeof createMigrator>;
|
|
70
|
+
|
|
71
|
+
export { type KyselyExecutor, type RemoteHandler, type ServerSyncDb, type SyncDbMigrator, type TypedPersistedCrdtEvent, createKyselyExecutor, createMigrator, durableObjectAdapter };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
// src/durable-object-adapter.ts
|
|
2
|
+
import {
|
|
3
|
+
applyKyselyEventsBatchFilters,
|
|
4
|
+
baseSystemMigrations,
|
|
5
|
+
createCrdtApplyFunction,
|
|
6
|
+
createCrdtStorage,
|
|
7
|
+
createCrdtStorageMutator,
|
|
8
|
+
createCrdtSyncProducer,
|
|
9
|
+
createStoredValue as createStoredValue2,
|
|
10
|
+
createTypedEventTarget,
|
|
11
|
+
HLCCounter,
|
|
12
|
+
jsonSafeParse,
|
|
13
|
+
quoteId,
|
|
14
|
+
runSystemMigrations
|
|
15
|
+
} from "@sqlite-sync/core";
|
|
16
|
+
import {
|
|
17
|
+
syncServerZodSchema
|
|
18
|
+
} from "@sqlite-sync/core/server";
|
|
19
|
+
|
|
20
|
+
// src/kysely-executor.ts
|
|
21
|
+
import { dummyKysely } from "@sqlite-sync/core";
|
|
22
|
+
function createKyselyExecutor(db) {
|
|
23
|
+
const execute = (query) => {
|
|
24
|
+
const rows = db.sql.exec(query.sql, ...query.parameters).toArray();
|
|
25
|
+
return { rows };
|
|
26
|
+
};
|
|
27
|
+
const executeKysely = (factory) => {
|
|
28
|
+
const query = factory(dummyKysely).compile();
|
|
29
|
+
return execute(query);
|
|
30
|
+
};
|
|
31
|
+
const transaction = (callback) => {
|
|
32
|
+
db.transactionSync(() => callback(executor));
|
|
33
|
+
};
|
|
34
|
+
const executor = {
|
|
35
|
+
execute,
|
|
36
|
+
executeKysely,
|
|
37
|
+
transaction
|
|
38
|
+
};
|
|
39
|
+
return executor;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// src/migrator.ts
|
|
43
|
+
import { createMigrator as createBaseMigrator, createStoredValue } from "@sqlite-sync/core";
|
|
44
|
+
function createMigrator(kv, sqlExecutor, migrations, updateLogTableName2) {
|
|
45
|
+
const schemaVersion = createStoredValue({
|
|
46
|
+
initialValue: kv.get("schema-version") ?? -1,
|
|
47
|
+
saveToStorage: (val) => kv.put("schema-version", val)
|
|
48
|
+
});
|
|
49
|
+
const baseMigrator = createBaseMigrator({
|
|
50
|
+
migrations,
|
|
51
|
+
schemaVersion,
|
|
52
|
+
updateLogTableName: updateLogTableName2
|
|
53
|
+
});
|
|
54
|
+
return {
|
|
55
|
+
...baseMigrator,
|
|
56
|
+
migrateDbToLatest: () => {
|
|
57
|
+
baseMigrator.migrateDbToLatest({
|
|
58
|
+
startTransaction: (callback) => {
|
|
59
|
+
sqlExecutor.transaction(() => {
|
|
60
|
+
return callback({
|
|
61
|
+
execute: (sql, parameters) => sqlExecutor.execute({
|
|
62
|
+
sql,
|
|
63
|
+
parameters
|
|
64
|
+
})
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// src/durable-object-adapter.ts
|
|
74
|
+
var updateLogTableName = "__crdt_update_log";
|
|
75
|
+
var durableObjectMigrations = [
|
|
76
|
+
...baseSystemMigrations,
|
|
77
|
+
{
|
|
78
|
+
version: 2,
|
|
79
|
+
up: (ctx) => {
|
|
80
|
+
ctx.execute(
|
|
81
|
+
`DELETE FROM ${ctx.eventsTableName} WHERE "sync_id" NOT IN (SELECT MIN("sync_id") FROM ${ctx.eventsTableName} GROUP BY "timestamp", "source_node_id")`
|
|
82
|
+
);
|
|
83
|
+
ctx.execute(
|
|
84
|
+
`CREATE UNIQUE INDEX IF NOT EXISTS "idx_crdt_events_dedup" ON ${ctx.eventsTableName} ("timestamp", "source_node_id")`
|
|
85
|
+
);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
];
|
|
89
|
+
function createDurableObjectCrdtStorage({
|
|
90
|
+
storage,
|
|
91
|
+
syncDbSchema,
|
|
92
|
+
nodeId,
|
|
93
|
+
crdtEventsTable = "crdt_events",
|
|
94
|
+
batchSize = 50,
|
|
95
|
+
broadcastPayload
|
|
96
|
+
}) {
|
|
97
|
+
const eventTarget = createTypedEventTarget();
|
|
98
|
+
const sqlExecutor = createKyselyExecutor(storage);
|
|
99
|
+
runSystemMigrations({
|
|
100
|
+
migrations: durableObjectMigrations,
|
|
101
|
+
version: createStoredValue2({
|
|
102
|
+
initialValue: storage.kv.get("internal-schema-version") ?? -1,
|
|
103
|
+
saveToStorage: (val) => storage.kv.put("internal-schema-version", val)
|
|
104
|
+
}),
|
|
105
|
+
eventsTableName: quoteId(crdtEventsTable),
|
|
106
|
+
updateLogTableName: quoteId(updateLogTableName),
|
|
107
|
+
execute: (sql) => sqlExecutor.execute({ sql, parameters: [] }),
|
|
108
|
+
transaction: (callback) => sqlExecutor.transaction(callback)
|
|
109
|
+
});
|
|
110
|
+
const syncId = createStoredValue2({
|
|
111
|
+
initialValue: getLatestSyncId(sqlExecutor)
|
|
112
|
+
});
|
|
113
|
+
const migrator = createMigrator(storage.kv, sqlExecutor, syncDbSchema.migrations, quoteId(updateLogTableName));
|
|
114
|
+
migrator.migrateDbToLatest();
|
|
115
|
+
const baseApply = createCrdtApplyFunction({
|
|
116
|
+
getCrdtUpdateLog(opts) {
|
|
117
|
+
const [metaRow] = sqlExecutor.executeKysely(
|
|
118
|
+
(db) => db.selectFrom(updateLogTableName).select("payload").where("item_id", "=", opts.itemId).where("dataset", "=", opts.dataset)
|
|
119
|
+
).rows;
|
|
120
|
+
return metaRow ? JSON.parse(metaRow.payload) : null;
|
|
121
|
+
},
|
|
122
|
+
insertCrdtUpdateLog(opts) {
|
|
123
|
+
sqlExecutor.executeKysely(
|
|
124
|
+
(db) => db.insertInto(updateLogTableName).values({
|
|
125
|
+
item_id: opts.itemId,
|
|
126
|
+
dataset: opts.dataset,
|
|
127
|
+
payload: opts.payload
|
|
128
|
+
})
|
|
129
|
+
);
|
|
130
|
+
},
|
|
131
|
+
updateCrdtUpdateLog(opts) {
|
|
132
|
+
sqlExecutor.executeKysely(
|
|
133
|
+
(db) => db.updateTable(updateLogTableName).set({
|
|
134
|
+
payload: opts.payload
|
|
135
|
+
}).where("item_id", "=", opts.itemId).where("dataset", "=", opts.dataset)
|
|
136
|
+
);
|
|
137
|
+
},
|
|
138
|
+
insertItem(opts) {
|
|
139
|
+
sqlExecutor.executeKysely((db) => db.insertInto(opts.dataset).values(opts.payload));
|
|
140
|
+
},
|
|
141
|
+
updateItem(opts) {
|
|
142
|
+
const keys = Array.from(Object.keys(opts.payload));
|
|
143
|
+
sqlExecutor.execute({
|
|
144
|
+
sql: `update ${quoteId(opts.dataset)} set ${keys.map((key) => `${quoteId(key)} = ?`).join(",")} where id = ?`,
|
|
145
|
+
parameters: [...keys.map((key) => opts.payload[key]), opts.itemId]
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
});
|
|
149
|
+
const handleCrdtEventApply = (event) => {
|
|
150
|
+
sqlExecutor.transaction(() => {
|
|
151
|
+
baseApply(event);
|
|
152
|
+
});
|
|
153
|
+
queueMicrotask(() => {
|
|
154
|
+
eventTarget.dispatchEvent("event-applied", event);
|
|
155
|
+
});
|
|
156
|
+
};
|
|
157
|
+
const truncatedNodeId = nodeId.slice(0, 12);
|
|
158
|
+
const hlc = new HLCCounter(truncatedNodeId, () => Date.now());
|
|
159
|
+
const crdtStorage = createCrdtStorage({
|
|
160
|
+
nodeId: truncatedNodeId,
|
|
161
|
+
syncId,
|
|
162
|
+
hlc,
|
|
163
|
+
migrator,
|
|
164
|
+
handleCrdtEventApply,
|
|
165
|
+
transaction: (callback) => sqlExecutor.transaction(callback),
|
|
166
|
+
getEventsBatch: (opts) => {
|
|
167
|
+
return sqlExecutor.executeKysely(
|
|
168
|
+
(db) => applyKyselyEventsBatchFilters(db.selectFrom(crdtEventsTable).selectAll(), {
|
|
169
|
+
...opts,
|
|
170
|
+
limit: opts.limit ?? batchSize
|
|
171
|
+
})
|
|
172
|
+
).rows;
|
|
173
|
+
},
|
|
174
|
+
persistEvent: (event) => {
|
|
175
|
+
sqlExecutor.executeKysely(
|
|
176
|
+
(db) => db.insertInto(crdtEventsTable).values(event).onConflict((oc) => oc.columns(["timestamp", "source_node_id"]).doNothing())
|
|
177
|
+
);
|
|
178
|
+
},
|
|
179
|
+
updateEvent: (syncId2, event) => sqlExecutor.executeKysely(
|
|
180
|
+
(db) => db.updateTable(crdtEventsTable).set({
|
|
181
|
+
status: event.status,
|
|
182
|
+
dataset: event.dataset,
|
|
183
|
+
item_id: event.item_id,
|
|
184
|
+
schema_version: event.schema_version,
|
|
185
|
+
type: event.type,
|
|
186
|
+
payload: event.payload
|
|
187
|
+
}).where("sync_id", "=", syncId2)
|
|
188
|
+
)
|
|
189
|
+
});
|
|
190
|
+
const remoteHandler = createDurableObjectRemoteHandler({
|
|
191
|
+
bufferSize: batchSize,
|
|
192
|
+
crdtStorage,
|
|
193
|
+
broadcastPayload
|
|
194
|
+
});
|
|
195
|
+
const syncDbMutator = createCrdtStorageMutator({
|
|
196
|
+
storage: crdtStorage
|
|
197
|
+
});
|
|
198
|
+
const syncDbExecutor = sqlExecutor;
|
|
199
|
+
const syncDb = {
|
|
200
|
+
...syncDbExecutor,
|
|
201
|
+
...syncDbMutator,
|
|
202
|
+
addEventListener: eventTarget.addEventListener,
|
|
203
|
+
removeEventListener: eventTarget.removeEventListener
|
|
204
|
+
};
|
|
205
|
+
return {
|
|
206
|
+
syncDb,
|
|
207
|
+
remoteHandler
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
function createDurableObjectRemoteHandler({
|
|
211
|
+
bufferSize = 50,
|
|
212
|
+
crdtStorage,
|
|
213
|
+
broadcastPayload
|
|
214
|
+
}) {
|
|
215
|
+
createCrdtSyncProducer({
|
|
216
|
+
storage: crdtStorage,
|
|
217
|
+
broadcastEvents: (chunk) => {
|
|
218
|
+
broadcastPayload(
|
|
219
|
+
JSON.stringify({
|
|
220
|
+
type: "events-applied",
|
|
221
|
+
newSyncId: chunk.newSyncId
|
|
222
|
+
})
|
|
223
|
+
);
|
|
224
|
+
}
|
|
225
|
+
});
|
|
226
|
+
const handleMessage = (message) => {
|
|
227
|
+
const requestRaw = jsonSafeParse(message);
|
|
228
|
+
if (!requestRaw.success) {
|
|
229
|
+
return { success: false, error: requestRaw.error };
|
|
230
|
+
}
|
|
231
|
+
const requestResult = syncServerZodSchema.request.safeParse(requestRaw.data);
|
|
232
|
+
if (!requestResult.success) {
|
|
233
|
+
console.log("Invalid request", requestResult.error);
|
|
234
|
+
return { success: false, error: requestResult.error };
|
|
235
|
+
}
|
|
236
|
+
const request = requestResult.data;
|
|
237
|
+
switch (request.type) {
|
|
238
|
+
case "pull-events":
|
|
239
|
+
return handlePullEvents(request);
|
|
240
|
+
case "push-events":
|
|
241
|
+
return handlePushEvents(request);
|
|
242
|
+
default:
|
|
243
|
+
request;
|
|
244
|
+
return { success: false, error: new Error("Invalid request type") };
|
|
245
|
+
}
|
|
246
|
+
};
|
|
247
|
+
const handlePullEvents = (request) => {
|
|
248
|
+
const batch = crdtStorage.getEventsBatch({
|
|
249
|
+
limit: bufferSize,
|
|
250
|
+
status: "applied",
|
|
251
|
+
afterSyncId: request.afterSyncId,
|
|
252
|
+
excludeNodeId: request.excludeNodeId
|
|
253
|
+
});
|
|
254
|
+
const eventsPullMessage = {
|
|
255
|
+
type: "events-pull-response",
|
|
256
|
+
requestId: request.requestId,
|
|
257
|
+
data: {
|
|
258
|
+
hasMore: batch.hasMore,
|
|
259
|
+
nextSyncId: batch.nextSyncId,
|
|
260
|
+
events: batch.events.map((x) => ({
|
|
261
|
+
schema_version: x.schema_version,
|
|
262
|
+
timestamp: x.timestamp,
|
|
263
|
+
type: x.type,
|
|
264
|
+
dataset: x.dataset,
|
|
265
|
+
item_id: x.item_id,
|
|
266
|
+
payload: x.payload
|
|
267
|
+
}))
|
|
268
|
+
}
|
|
269
|
+
};
|
|
270
|
+
return {
|
|
271
|
+
success: true,
|
|
272
|
+
payload: JSON.stringify(eventsPullMessage)
|
|
273
|
+
};
|
|
274
|
+
};
|
|
275
|
+
const handlePushEvents = (request) => {
|
|
276
|
+
crdtStorage.enqueueLocalEvents(request.events, request.nodeId);
|
|
277
|
+
const eventsAppliedMessage = {
|
|
278
|
+
type: "events-push-response",
|
|
279
|
+
requestId: request.requestId,
|
|
280
|
+
data: {
|
|
281
|
+
ok: true
|
|
282
|
+
}
|
|
283
|
+
};
|
|
284
|
+
return {
|
|
285
|
+
success: true,
|
|
286
|
+
payload: JSON.stringify(eventsAppliedMessage)
|
|
287
|
+
};
|
|
288
|
+
};
|
|
289
|
+
return { handleMessage };
|
|
290
|
+
}
|
|
291
|
+
function getLatestSyncId(executor) {
|
|
292
|
+
const result = executor.executeKysely(
|
|
293
|
+
(db) => db.selectFrom("crdt_events").select((eb) => eb.fn.max("sync_id").as("sync_id"))
|
|
294
|
+
);
|
|
295
|
+
return result.rows[0]?.sync_id ?? 0;
|
|
296
|
+
}
|
|
297
|
+
var durableObjectAdapter = {
|
|
298
|
+
createCrdtStorage: createDurableObjectCrdtStorage
|
|
299
|
+
};
|
|
300
|
+
export {
|
|
301
|
+
createKyselyExecutor,
|
|
302
|
+
createMigrator,
|
|
303
|
+
durableObjectAdapter
|
|
304
|
+
};
|
|
305
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/durable-object-adapter.ts","../src/kysely-executor.ts","../src/migrator.ts"],"sourcesContent":["import {\n applyKyselyEventsBatchFilters,\n baseSystemMigrations,\n type CrdtEventOrigin,\n type CrdtEventStatus,\n type CrdtEventType,\n type CrdtStorage,\n type CrdtStorageMutator,\n type CrdtUpdateLogItem,\n type CrdtUpdateLogPayload,\n createCrdtApplyFunction,\n createCrdtStorage,\n createCrdtStorageMutator,\n createCrdtSyncProducer,\n createStoredValue,\n createTypedEventTarget,\n HLCCounter,\n jsonSafeParse,\n type PersistedCrdtEvent,\n quoteId,\n runSystemMigrations,\n type SyncDbSchema,\n type SystemMigration,\n type TypedEventTarget,\n} from \"@sqlite-sync/core\";\nimport {\n type ExtractSyncServerRequest,\n type SyncServerMessage,\n type SyncServerRequest,\n syncServerZodSchema,\n} from \"@sqlite-sync/core/server\";\nimport { createKyselyExecutor, type KyselyExecutor } from \"./kysely-executor\";\nimport { createMigrator } from \"./migrator\";\n\nconst updateLogTableName = \"__crdt_update_log\";\n\nconst durableObjectMigrations: SystemMigration[] = [\n ...baseSystemMigrations,\n {\n version: 2,\n up: (ctx) => {\n ctx.execute(\n `DELETE FROM ${ctx.eventsTableName} WHERE \"sync_id\" NOT IN ` +\n `(SELECT MIN(\"sync_id\") FROM ${ctx.eventsTableName} GROUP BY \"timestamp\", \"source_node_id\")`,\n );\n ctx.execute(\n `CREATE UNIQUE INDEX IF NOT EXISTS \"idx_crdt_events_dedup\" ` +\n `ON ${ctx.eventsTableName} (\"timestamp\", \"source_node_id\")`,\n );\n },\n },\n];\n\ntype AdapterDb = {\n crdtEvents: PersistedCrdtEvent;\n [updateLogTableName]: CrdtUpdateLogItem;\n};\n\nexport type TypedPersistedCrdtEvent<Schema extends SyncDbSchema> = {\n schema_version: number;\n sync_id: number;\n status: CrdtEventStatus;\n type: CrdtEventType;\n timestamp: string;\n origin: CrdtEventOrigin;\n source_node_id: string;\n dataset: keyof Schema[`~mutationsSchema`];\n item_id: string;\n payload: string;\n};\n\ntype ServerSyncDbEvents<Schema extends SyncDbSchema> = {\n \"event-applied\": TypedPersistedCrdtEvent<Schema>;\n};\n\nexport type ServerSyncDb<Schema extends SyncDbSchema> = Pick<\n KyselyExecutor<Schema[`~serverSchema`]>,\n \"execute\" | \"executeKysely\"\n> &\n CrdtStorageMutator<Schema[`~mutationsSchema`]> &\n Pick<TypedEventTarget<ServerSyncDbEvents<Schema>>, \"addEventListener\" | \"removeEventListener\">;\n\nfunction createDurableObjectCrdtStorage<Schema extends SyncDbSchema>({\n storage,\n syncDbSchema,\n nodeId,\n crdtEventsTable = \"crdt_events\",\n batchSize = 50,\n broadcastPayload,\n}: {\n storage: DurableObjectStorage;\n syncDbSchema: Schema;\n nodeId: string;\n crdtEventsTable: string;\n batchSize?: number;\n broadcastPayload: (payload: string) => void;\n}): {\n syncDb: ServerSyncDb<Schema>;\n remoteHandler: RemoteHandler;\n} {\n const eventTarget = createTypedEventTarget<ServerSyncDbEvents<Schema>>();\n const sqlExecutor = createKyselyExecutor<AdapterDb>(storage);\n\n runSystemMigrations({\n migrations: durableObjectMigrations,\n version: createStoredValue<number>({\n initialValue: storage.kv.get(\"internal-schema-version\") ?? -1,\n saveToStorage: (val) => storage.kv.put(\"internal-schema-version\", val),\n }),\n eventsTableName: quoteId(crdtEventsTable),\n updateLogTableName: quoteId(updateLogTableName),\n execute: (sql) => sqlExecutor.execute({ sql, parameters: [] }),\n transaction: (callback) => sqlExecutor.transaction(callback),\n });\n\n const syncId = createStoredValue({\n initialValue: getLatestSyncId(sqlExecutor),\n });\n\n const migrator = createMigrator(storage.kv, sqlExecutor, syncDbSchema.migrations, quoteId(updateLogTableName));\n migrator.migrateDbToLatest();\n\n const baseApply = createCrdtApplyFunction({\n getCrdtUpdateLog(opts) {\n const [metaRow] = sqlExecutor.executeKysely((db) =>\n db\n .selectFrom(updateLogTableName)\n .select(\"payload\")\n .where(\"item_id\", \"=\", opts.itemId)\n .where(\"dataset\", \"=\", opts.dataset),\n ).rows;\n return metaRow ? (JSON.parse(metaRow.payload) as CrdtUpdateLogPayload) : null;\n },\n insertCrdtUpdateLog(opts) {\n sqlExecutor.executeKysely((db) =>\n db.insertInto(updateLogTableName).values({\n item_id: opts.itemId,\n dataset: opts.dataset,\n payload: opts.payload,\n }),\n );\n },\n updateCrdtUpdateLog(opts) {\n sqlExecutor.executeKysely((db) =>\n db\n .updateTable(updateLogTableName)\n .set({\n payload: opts.payload,\n })\n .where(\"item_id\", \"=\", opts.itemId)\n .where(\"dataset\", \"=\", opts.dataset),\n );\n },\n insertItem(opts) {\n sqlExecutor.executeKysely((db) => db.insertInto(opts.dataset as any).values(opts.payload));\n },\n updateItem(opts) {\n const keys = Array.from(Object.keys(opts.payload));\n sqlExecutor.execute({\n sql: `update ${quoteId(opts.dataset)} set ${keys.map((key) => `${quoteId(key)} = ?`).join(\",\")} where id = ?`,\n parameters: [...keys.map((key) => opts.payload[key]), opts.itemId],\n });\n },\n });\n\n const handleCrdtEventApply = (event: PersistedCrdtEvent) => {\n sqlExecutor.transaction(() => {\n baseApply(event);\n });\n\n queueMicrotask(() => {\n eventTarget.dispatchEvent(\"event-applied\", event as TypedPersistedCrdtEvent<Schema>);\n });\n };\n\n const truncatedNodeId = nodeId.slice(0, 12);\n const hlc = new HLCCounter(truncatedNodeId, () => Date.now());\n\n const crdtStorage = createCrdtStorage({\n nodeId: truncatedNodeId,\n syncId,\n hlc,\n migrator: migrator,\n handleCrdtEventApply,\n transaction: (callback) => sqlExecutor.transaction(callback),\n getEventsBatch: (opts) => {\n return sqlExecutor.executeKysely((db) =>\n applyKyselyEventsBatchFilters(db.selectFrom(crdtEventsTable as \"crdtEvents\").selectAll(), {\n ...opts,\n limit: opts.limit ?? batchSize,\n }),\n ).rows;\n },\n persistEvent: (event) => {\n sqlExecutor.executeKysely((db) =>\n db\n .insertInto(crdtEventsTable as \"crdtEvents\")\n .values(event)\n .onConflict((oc) => oc.columns([\"timestamp\", \"source_node_id\"]).doNothing()),\n );\n },\n updateEvent: (syncId, event) =>\n sqlExecutor.executeKysely((db) =>\n db\n .updateTable(crdtEventsTable as \"crdtEvents\")\n .set({\n status: event.status,\n dataset: event.dataset,\n item_id: event.item_id,\n schema_version: event.schema_version,\n type: event.type,\n payload: event.payload,\n })\n .where(\"sync_id\", \"=\", syncId),\n ),\n });\n\n const remoteHandler = createDurableObjectRemoteHandler({\n bufferSize: batchSize,\n crdtStorage,\n broadcastPayload,\n });\n\n const syncDbMutator = createCrdtStorageMutator<Schema[`~mutationsSchema`]>({\n storage: crdtStorage,\n });\n\n const syncDbExecutor = sqlExecutor as unknown as KyselyExecutor<Schema[`~serverSchema`]>;\n const syncDb: ServerSyncDb<Schema> = {\n ...syncDbExecutor,\n ...syncDbMutator,\n addEventListener: eventTarget.addEventListener,\n removeEventListener: eventTarget.removeEventListener,\n };\n\n return {\n syncDb,\n remoteHandler,\n };\n}\n\ntype MessageResult = { success: true; payload: string } | { success: false; error: unknown };\nexport type RemoteHandler = {\n handleMessage: (message: string) => MessageResult;\n};\n\nfunction createDurableObjectRemoteHandler({\n bufferSize = 50,\n crdtStorage,\n broadcastPayload,\n}: {\n bufferSize?: number;\n crdtStorage: CrdtStorage;\n broadcastPayload: (payload: string) => void;\n}): RemoteHandler {\n createCrdtSyncProducer({\n storage: crdtStorage,\n broadcastEvents: (chunk) => {\n broadcastPayload(\n JSON.stringify({\n type: \"events-applied\",\n newSyncId: chunk.newSyncId,\n }),\n );\n },\n });\n\n const handleMessage = (message: string): MessageResult => {\n const requestRaw = jsonSafeParse<SyncServerRequest>(message);\n\n if (!requestRaw.success) {\n return { success: false, error: requestRaw.error };\n }\n\n const requestResult = syncServerZodSchema.request.safeParse(requestRaw.data);\n\n if (!requestResult.success) {\n console.log(\"Invalid request\", requestResult.error);\n return { success: false, error: requestResult.error };\n }\n\n const request = requestResult.data;\n\n switch (request.type) {\n case \"pull-events\":\n return handlePullEvents(request);\n case \"push-events\":\n return handlePushEvents(request);\n default:\n request satisfies never;\n return { success: false, error: new Error(\"Invalid request type\") };\n }\n };\n\n const handlePullEvents = (request: ExtractSyncServerRequest<\"pull-events\">): MessageResult => {\n const batch = crdtStorage.getEventsBatch({\n limit: bufferSize,\n status: \"applied\",\n afterSyncId: request.afterSyncId,\n excludeNodeId: request.excludeNodeId,\n });\n\n const eventsPullMessage: SyncServerMessage = {\n type: \"events-pull-response\",\n requestId: request.requestId,\n data: {\n hasMore: batch.hasMore,\n nextSyncId: batch.nextSyncId,\n events: batch.events.map((x) => ({\n schema_version: x.schema_version,\n timestamp: x.timestamp,\n type: x.type,\n dataset: x.dataset,\n item_id: x.item_id,\n payload: x.payload,\n })),\n },\n };\n\n return {\n success: true,\n payload: JSON.stringify(eventsPullMessage),\n };\n };\n\n const handlePushEvents = (request: ExtractSyncServerRequest<\"push-events\">): MessageResult => {\n crdtStorage.enqueueLocalEvents(request.events, request.nodeId);\n const eventsAppliedMessage: SyncServerMessage = {\n type: \"events-push-response\",\n requestId: request.requestId,\n data: {\n ok: true,\n },\n };\n\n return {\n success: true,\n payload: JSON.stringify(eventsAppliedMessage),\n };\n };\n\n return { handleMessage };\n}\n\nfunction getLatestSyncId(executor: KyselyExecutor<any>) {\n const result = executor.executeKysely((db) =>\n db.selectFrom(\"crdt_events\").select((eb) => eb.fn.max(\"sync_id\").as(\"sync_id\")),\n );\n return result.rows[0]?.sync_id ?? 0;\n}\n\nexport const durableObjectAdapter = {\n createCrdtStorage: createDurableObjectCrdtStorage,\n};\n","import { dummyKysely } from \"@sqlite-sync/core\";\nimport type { Compilable, Kysely } from \"kysely\";\n\ntype ExecuteParams = {\n sql: string;\n parameters: readonly unknown[];\n};\n\ntype ExecuteResult<T> = {\n rows: T[];\n};\n\ntype QueryBuilderOutput<QB> = QB extends Compilable<infer O> ? O : never;\n\ntype KyselyQueryFactory<TDatabase, TQuery extends Compilable<TResult>, TResult = QueryBuilderOutput<TQuery>> = (\n kysely: Kysely<TDatabase>,\n) => TQuery;\n\nexport type KyselyExecutor<TDatabase> = {\n execute<TResult = unknown>(query: ExecuteParams): ExecuteResult<TResult>;\n executeKysely<TQuery extends Compilable<TResult>, TResult = QueryBuilderOutput<TQuery>>(\n factory: KyselyQueryFactory<TDatabase, TQuery, TResult>,\n ): ExecuteResult<TResult>;\n transaction: (callback: (tx: Pick<KyselyExecutor<TDatabase>, \"execute\" | \"executeKysely\">) => void) => void;\n};\n\nexport function createKyselyExecutor<TDatabase>(db: DurableObjectStorage): KyselyExecutor<TDatabase> {\n const execute = <TResult = unknown>(query: ExecuteParams): ExecuteResult<TResult> => {\n const rows = db.sql.exec(query.sql, ...query.parameters).toArray();\n return { rows: rows as TResult[] };\n };\n\n const executeKysely = <TQuery extends Compilable<TResult>, TResult = QueryBuilderOutput<TQuery>>(\n factory: KyselyQueryFactory<TDatabase, TQuery, TResult>,\n ): ExecuteResult<TResult> => {\n const query = factory(dummyKysely as any).compile();\n return execute(query);\n };\n\n const transaction = (callback: (tx: Pick<KyselyExecutor<TDatabase>, \"execute\" | \"executeKysely\">) => void) => {\n db.transactionSync(() => callback(executor));\n };\n\n const executor = {\n execute,\n executeKysely,\n transaction,\n };\n\n return executor;\n}\n","import { createMigrator as createBaseMigrator, createStoredValue, type Migrations } from \"@sqlite-sync/core\";\nimport type { KyselyExecutor } from \"./kysely-executor\";\n\nexport function createMigrator(\n kv: SyncKvStorage,\n sqlExecutor: KyselyExecutor<any>,\n migrations: Migrations,\n updateLogTableName?: string,\n) {\n const schemaVersion = createStoredValue<number>({\n initialValue: kv.get(\"schema-version\") ?? -1,\n saveToStorage: (val) => kv.put(\"schema-version\", val),\n });\n\n const baseMigrator = createBaseMigrator({\n migrations,\n schemaVersion,\n updateLogTableName,\n });\n\n return {\n ...baseMigrator,\n migrateDbToLatest: () => {\n baseMigrator.migrateDbToLatest({\n startTransaction: (callback) => {\n sqlExecutor.transaction(() => {\n return callback({\n execute: (sql, parameters) =>\n sqlExecutor.execute({\n sql,\n parameters,\n }),\n });\n });\n },\n });\n },\n };\n}\n\nexport type SyncDbMigrator = ReturnType<typeof createMigrator>;\n"],"mappings":";AAAA;AAAA,EACE;AAAA,EACA;AAAA,EAQA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,qBAAAA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,OAIK;AACP;AAAA,EAIE;AAAA,OACK;;;AC9BP,SAAS,mBAAmB;AA0BrB,SAAS,qBAAgC,IAAqD;AACnG,QAAM,UAAU,CAAoB,UAAiD;AACnF,UAAM,OAAO,GAAG,IAAI,KAAK,MAAM,KAAK,GAAG,MAAM,UAAU,EAAE,QAAQ;AACjE,WAAO,EAAE,KAAwB;AAAA,EACnC;AAEA,QAAM,gBAAgB,CACpB,YAC2B;AAC3B,UAAM,QAAQ,QAAQ,WAAkB,EAAE,QAAQ;AAClD,WAAO,QAAQ,KAAK;AAAA,EACtB;AAEA,QAAM,cAAc,CAAC,aAAyF;AAC5G,OAAG,gBAAgB,MAAM,SAAS,QAAQ,CAAC;AAAA,EAC7C;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,SAAO;AACT;;;AClDA,SAAS,kBAAkB,oBAAoB,yBAA0C;AAGlF,SAAS,eACd,IACA,aACA,YACAC,qBACA;AACA,QAAM,gBAAgB,kBAA0B;AAAA,IAC9C,cAAc,GAAG,IAAI,gBAAgB,KAAK;AAAA,IAC1C,eAAe,CAAC,QAAQ,GAAG,IAAI,kBAAkB,GAAG;AAAA,EACtD,CAAC;AAED,QAAM,eAAe,mBAAmB;AAAA,IACtC;AAAA,IACA;AAAA,IACA,oBAAAA;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL,GAAG;AAAA,IACH,mBAAmB,MAAM;AACvB,mBAAa,kBAAkB;AAAA,QAC7B,kBAAkB,CAAC,aAAa;AAC9B,sBAAY,YAAY,MAAM;AAC5B,mBAAO,SAAS;AAAA,cACd,SAAS,CAAC,KAAK,eACb,YAAY,QAAQ;AAAA,gBAClB;AAAA,gBACA;AAAA,cACF,CAAC;AAAA,YACL,CAAC;AAAA,UACH,CAAC;AAAA,QACH;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AACF;;;AFJA,IAAM,qBAAqB;AAE3B,IAAM,0BAA6C;AAAA,EACjD,GAAG;AAAA,EACH;AAAA,IACE,SAAS;AAAA,IACT,IAAI,CAAC,QAAQ;AACX,UAAI;AAAA,QACF,eAAe,IAAI,eAAe,uDACD,IAAI,eAAe;AAAA,MACtD;AACA,UAAI;AAAA,QACF,gEACQ,IAAI,eAAe;AAAA,MAC7B;AAAA,IACF;AAAA,EACF;AACF;AA+BA,SAAS,+BAA4D;AAAA,EACnE;AAAA,EACA;AAAA,EACA;AAAA,EACA,kBAAkB;AAAA,EAClB,YAAY;AAAA,EACZ;AACF,GAUE;AACA,QAAM,cAAc,uBAAmD;AACvE,QAAM,cAAc,qBAAgC,OAAO;AAE3D,sBAAoB;AAAA,IAClB,YAAY;AAAA,IACZ,SAASC,mBAA0B;AAAA,MACjC,cAAc,QAAQ,GAAG,IAAI,yBAAyB,KAAK;AAAA,MAC3D,eAAe,CAAC,QAAQ,QAAQ,GAAG,IAAI,2BAA2B,GAAG;AAAA,IACvE,CAAC;AAAA,IACD,iBAAiB,QAAQ,eAAe;AAAA,IACxC,oBAAoB,QAAQ,kBAAkB;AAAA,IAC9C,SAAS,CAAC,QAAQ,YAAY,QAAQ,EAAE,KAAK,YAAY,CAAC,EAAE,CAAC;AAAA,IAC7D,aAAa,CAAC,aAAa,YAAY,YAAY,QAAQ;AAAA,EAC7D,CAAC;AAED,QAAM,SAASA,mBAAkB;AAAA,IAC/B,cAAc,gBAAgB,WAAW;AAAA,EAC3C,CAAC;AAED,QAAM,WAAW,eAAe,QAAQ,IAAI,aAAa,aAAa,YAAY,QAAQ,kBAAkB,CAAC;AAC7G,WAAS,kBAAkB;AAE3B,QAAM,YAAY,wBAAwB;AAAA,IACxC,iBAAiB,MAAM;AACrB,YAAM,CAAC,OAAO,IAAI,YAAY;AAAA,QAAc,CAAC,OAC3C,GACG,WAAW,kBAAkB,EAC7B,OAAO,SAAS,EAChB,MAAM,WAAW,KAAK,KAAK,MAAM,EACjC,MAAM,WAAW,KAAK,KAAK,OAAO;AAAA,MACvC,EAAE;AACF,aAAO,UAAW,KAAK,MAAM,QAAQ,OAAO,IAA6B;AAAA,IAC3E;AAAA,IACA,oBAAoB,MAAM;AACxB,kBAAY;AAAA,QAAc,CAAC,OACzB,GAAG,WAAW,kBAAkB,EAAE,OAAO;AAAA,UACvC,SAAS,KAAK;AAAA,UACd,SAAS,KAAK;AAAA,UACd,SAAS,KAAK;AAAA,QAChB,CAAC;AAAA,MACH;AAAA,IACF;AAAA,IACA,oBAAoB,MAAM;AACxB,kBAAY;AAAA,QAAc,CAAC,OACzB,GACG,YAAY,kBAAkB,EAC9B,IAAI;AAAA,UACH,SAAS,KAAK;AAAA,QAChB,CAAC,EACA,MAAM,WAAW,KAAK,KAAK,MAAM,EACjC,MAAM,WAAW,KAAK,KAAK,OAAO;AAAA,MACvC;AAAA,IACF;AAAA,IACA,WAAW,MAAM;AACf,kBAAY,cAAc,CAAC,OAAO,GAAG,WAAW,KAAK,OAAc,EAAE,OAAO,KAAK,OAAO,CAAC;AAAA,IAC3F;AAAA,IACA,WAAW,MAAM;AACf,YAAM,OAAO,MAAM,KAAK,OAAO,KAAK,KAAK,OAAO,CAAC;AACjD,kBAAY,QAAQ;AAAA,QAClB,KAAK,UAAU,QAAQ,KAAK,OAAO,CAAC,QAAQ,KAAK,IAAI,CAAC,QAAQ,GAAG,QAAQ,GAAG,CAAC,MAAM,EAAE,KAAK,GAAG,CAAC;AAAA,QAC9F,YAAY,CAAC,GAAG,KAAK,IAAI,CAAC,QAAQ,KAAK,QAAQ,GAAG,CAAC,GAAG,KAAK,MAAM;AAAA,MACnE,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAED,QAAM,uBAAuB,CAAC,UAA8B;AAC1D,gBAAY,YAAY,MAAM;AAC5B,gBAAU,KAAK;AAAA,IACjB,CAAC;AAED,mBAAe,MAAM;AACnB,kBAAY,cAAc,iBAAiB,KAAwC;AAAA,IACrF,CAAC;AAAA,EACH;AAEA,QAAM,kBAAkB,OAAO,MAAM,GAAG,EAAE;AAC1C,QAAM,MAAM,IAAI,WAAW,iBAAiB,MAAM,KAAK,IAAI,CAAC;AAE5D,QAAM,cAAc,kBAAkB;AAAA,IACpC,QAAQ;AAAA,IACR;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,aAAa,CAAC,aAAa,YAAY,YAAY,QAAQ;AAAA,IAC3D,gBAAgB,CAAC,SAAS;AACxB,aAAO,YAAY;AAAA,QAAc,CAAC,OAChC,8BAA8B,GAAG,WAAW,eAA+B,EAAE,UAAU,GAAG;AAAA,UACxF,GAAG;AAAA,UACH,OAAO,KAAK,SAAS;AAAA,QACvB,CAAC;AAAA,MACH,EAAE;AAAA,IACJ;AAAA,IACA,cAAc,CAAC,UAAU;AACvB,kBAAY;AAAA,QAAc,CAAC,OACzB,GACG,WAAW,eAA+B,EAC1C,OAAO,KAAK,EACZ,WAAW,CAAC,OAAO,GAAG,QAAQ,CAAC,aAAa,gBAAgB,CAAC,EAAE,UAAU,CAAC;AAAA,MAC/E;AAAA,IACF;AAAA,IACA,aAAa,CAACC,SAAQ,UACpB,YAAY;AAAA,MAAc,CAAC,OACzB,GACG,YAAY,eAA+B,EAC3C,IAAI;AAAA,QACH,QAAQ,MAAM;AAAA,QACd,SAAS,MAAM;AAAA,QACf,SAAS,MAAM;AAAA,QACf,gBAAgB,MAAM;AAAA,QACtB,MAAM,MAAM;AAAA,QACZ,SAAS,MAAM;AAAA,MACjB,CAAC,EACA,MAAM,WAAW,KAAKA,OAAM;AAAA,IACjC;AAAA,EACJ,CAAC;AAED,QAAM,gBAAgB,iCAAiC;AAAA,IACrD,YAAY;AAAA,IACZ;AAAA,IACA;AAAA,EACF,CAAC;AAED,QAAM,gBAAgB,yBAAqD;AAAA,IACzE,SAAS;AAAA,EACX,CAAC;AAED,QAAM,iBAAiB;AACvB,QAAM,SAA+B;AAAA,IACnC,GAAG;AAAA,IACH,GAAG;AAAA,IACH,kBAAkB,YAAY;AAAA,IAC9B,qBAAqB,YAAY;AAAA,EACnC;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,EACF;AACF;AAOA,SAAS,iCAAiC;AAAA,EACxC,aAAa;AAAA,EACb;AAAA,EACA;AACF,GAIkB;AAChB,yBAAuB;AAAA,IACrB,SAAS;AAAA,IACT,iBAAiB,CAAC,UAAU;AAC1B;AAAA,QACE,KAAK,UAAU;AAAA,UACb,MAAM;AAAA,UACN,WAAW,MAAM;AAAA,QACnB,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF,CAAC;AAED,QAAM,gBAAgB,CAAC,YAAmC;AACxD,UAAM,aAAa,cAAiC,OAAO;AAE3D,QAAI,CAAC,WAAW,SAAS;AACvB,aAAO,EAAE,SAAS,OAAO,OAAO,WAAW,MAAM;AAAA,IACnD;AAEA,UAAM,gBAAgB,oBAAoB,QAAQ,UAAU,WAAW,IAAI;AAE3E,QAAI,CAAC,cAAc,SAAS;AAC1B,cAAQ,IAAI,mBAAmB,cAAc,KAAK;AAClD,aAAO,EAAE,SAAS,OAAO,OAAO,cAAc,MAAM;AAAA,IACtD;AAEA,UAAM,UAAU,cAAc;AAE9B,YAAQ,QAAQ,MAAM;AAAA,MACpB,KAAK;AACH,eAAO,iBAAiB,OAAO;AAAA,MACjC,KAAK;AACH,eAAO,iBAAiB,OAAO;AAAA,MACjC;AACE;AACA,eAAO,EAAE,SAAS,OAAO,OAAO,IAAI,MAAM,sBAAsB,EAAE;AAAA,IACtE;AAAA,EACF;AAEA,QAAM,mBAAmB,CAAC,YAAoE;AAC5F,UAAM,QAAQ,YAAY,eAAe;AAAA,MACvC,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,aAAa,QAAQ;AAAA,MACrB,eAAe,QAAQ;AAAA,IACzB,CAAC;AAED,UAAM,oBAAuC;AAAA,MAC3C,MAAM;AAAA,MACN,WAAW,QAAQ;AAAA,MACnB,MAAM;AAAA,QACJ,SAAS,MAAM;AAAA,QACf,YAAY,MAAM;AAAA,QAClB,QAAQ,MAAM,OAAO,IAAI,CAAC,OAAO;AAAA,UAC/B,gBAAgB,EAAE;AAAA,UAClB,WAAW,EAAE;AAAA,UACb,MAAM,EAAE;AAAA,UACR,SAAS,EAAE;AAAA,UACX,SAAS,EAAE;AAAA,UACX,SAAS,EAAE;AAAA,QACb,EAAE;AAAA,MACJ;AAAA,IACF;AAEA,WAAO;AAAA,MACL,SAAS;AAAA,MACT,SAAS,KAAK,UAAU,iBAAiB;AAAA,IAC3C;AAAA,EACF;AAEA,QAAM,mBAAmB,CAAC,YAAoE;AAC5F,gBAAY,mBAAmB,QAAQ,QAAQ,QAAQ,MAAM;AAC7D,UAAM,uBAA0C;AAAA,MAC9C,MAAM;AAAA,MACN,WAAW,QAAQ;AAAA,MACnB,MAAM;AAAA,QACJ,IAAI;AAAA,MACN;AAAA,IACF;AAEA,WAAO;AAAA,MACL,SAAS;AAAA,MACT,SAAS,KAAK,UAAU,oBAAoB;AAAA,IAC9C;AAAA,EACF;AAEA,SAAO,EAAE,cAAc;AACzB;AAEA,SAAS,gBAAgB,UAA+B;AACtD,QAAM,SAAS,SAAS;AAAA,IAAc,CAAC,OACrC,GAAG,WAAW,aAAa,EAAE,OAAO,CAAC,OAAO,GAAG,GAAG,IAAI,SAAS,EAAE,GAAG,SAAS,CAAC;AAAA,EAChF;AACA,SAAO,OAAO,KAAK,CAAC,GAAG,WAAW;AACpC;AAEO,IAAM,uBAAuB;AAAA,EAClC,mBAAmB;AACrB;","names":["createStoredValue","updateLogTableName","createStoredValue","syncId"]}
|
package/dist/jobs.d.ts
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { StandardSchemaV1 } from '@standard-schema/spec';
|
|
2
|
+
|
|
3
|
+
type JobStatus = "queued" | "running" | "completed" | "failed" | "cancelled";
|
|
4
|
+
type JobRunRecord<TType extends string = string, TInput = unknown> = {
|
|
5
|
+
id: string;
|
|
6
|
+
type: TType;
|
|
7
|
+
status: JobStatus;
|
|
8
|
+
payload: TInput;
|
|
9
|
+
scheduledAt: number;
|
|
10
|
+
startedAt: number | null;
|
|
11
|
+
finishedAt: number | null;
|
|
12
|
+
errorMessage: string | null;
|
|
13
|
+
errorStack: string | null;
|
|
14
|
+
scheduleId: string | null;
|
|
15
|
+
createdAt: number;
|
|
16
|
+
updatedAt: number;
|
|
17
|
+
};
|
|
18
|
+
type IntervalScheduleStatus = "active" | "cancelled";
|
|
19
|
+
type IntervalScheduleRecord<TType extends string = string, TInput = unknown> = {
|
|
20
|
+
id: string;
|
|
21
|
+
type: TType;
|
|
22
|
+
dedupeKey: string;
|
|
23
|
+
payload: TInput;
|
|
24
|
+
intervalMs: number;
|
|
25
|
+
nextRunAt: number;
|
|
26
|
+
status: IntervalScheduleStatus;
|
|
27
|
+
createdAt: number;
|
|
28
|
+
updatedAt: number;
|
|
29
|
+
lastRunAt: number | null;
|
|
30
|
+
};
|
|
31
|
+
type JobRunResult = {
|
|
32
|
+
processedJobs: number;
|
|
33
|
+
nextAlarmAt: number | null;
|
|
34
|
+
};
|
|
35
|
+
type JobRuntime = {
|
|
36
|
+
onAlarm: () => Promise<JobRunResult>;
|
|
37
|
+
setNextAlarm: () => Promise<number | null>;
|
|
38
|
+
schedule: <TType extends string, TSchema extends StandardSchemaV1>(job: DefinedJob<TType, TSchema>, options: JobScheduleOptions<StandardSchemaV1.InferOutput<TSchema>>) => Promise<JobRunRecord<TType, StandardSchemaV1.InferOutput<TSchema>>>;
|
|
39
|
+
scheduleInterval: <TType extends string, TSchema extends StandardSchemaV1>(job: DefinedJob<TType, TSchema>, options: IntervalJobScheduleOptions<StandardSchemaV1.InferOutput<TSchema>>) => Promise<IntervalScheduleRecord<TType, StandardSchemaV1.InferOutput<TSchema>>>;
|
|
40
|
+
cancelInterval: <TType extends string, TSchema extends StandardSchemaV1>(job: DefinedJob<TType, TSchema>, options: CancelIntervalJobOptions) => Promise<boolean>;
|
|
41
|
+
};
|
|
42
|
+
type JobExecutionContext<TInput, TContext extends Record<string, unknown>, TType extends string> = {
|
|
43
|
+
input: TInput;
|
|
44
|
+
context: TContext;
|
|
45
|
+
job: JobRunRecord<TType, TInput>;
|
|
46
|
+
};
|
|
47
|
+
type JobHandler<TInput, TContext extends Record<string, unknown>, TType extends string> = (context: JobExecutionContext<TInput, TContext, TType>) => void | Promise<void>;
|
|
48
|
+
type JobScheduleOptions<TInput> = {
|
|
49
|
+
input: TInput;
|
|
50
|
+
at: number;
|
|
51
|
+
};
|
|
52
|
+
type IntervalJobScheduleOptions<TInput> = {
|
|
53
|
+
input: TInput;
|
|
54
|
+
dedupeKey: string;
|
|
55
|
+
everyMs: number;
|
|
56
|
+
startAt?: number;
|
|
57
|
+
};
|
|
58
|
+
type CancelIntervalJobOptions = {
|
|
59
|
+
dedupeKey: string;
|
|
60
|
+
};
|
|
61
|
+
type DefinedJob<TType extends string, TSchema extends StandardSchemaV1, TContext extends Record<string, unknown> = Record<string, unknown>> = {
|
|
62
|
+
type: TType;
|
|
63
|
+
/** @internal Phantom field for type inference — never set at runtime. */
|
|
64
|
+
readonly "~schema"?: TSchema;
|
|
65
|
+
/** @internal Phantom field for type inference — never set at runtime. */
|
|
66
|
+
readonly "~context"?: TContext;
|
|
67
|
+
};
|
|
68
|
+
type AnyDefinedJob = DefinedJob<string, StandardSchemaV1, Record<string, unknown>>;
|
|
69
|
+
type DefineJobInputBuilder<TType extends string, TSchema extends StandardSchemaV1, TContext extends Record<string, unknown>> = {
|
|
70
|
+
handler: (handler: JobHandler<StandardSchemaV1.InferOutput<TSchema>, TContext, TType>) => DefinedJob<TType, TSchema, TContext>;
|
|
71
|
+
};
|
|
72
|
+
type DefineJobBuilder<TType extends string, TContext extends Record<string, unknown>> = {
|
|
73
|
+
input: <TSchema extends StandardSchemaV1>(schema: TSchema) => DefineJobInputBuilder<TType, TSchema, TContext>;
|
|
74
|
+
};
|
|
75
|
+
type CreateDefineJobBuilder<TContext extends Record<string, unknown>> = <TType extends string>(options: {
|
|
76
|
+
type: TType;
|
|
77
|
+
}) => DefineJobBuilder<TType, TContext>;
|
|
78
|
+
|
|
79
|
+
declare function createDefineJob<TContext extends Record<string, unknown> = Record<string, unknown>>(): CreateDefineJobBuilder<TContext>;
|
|
80
|
+
|
|
81
|
+
type SetupJobsOptions<TContext extends Record<string, unknown>, TJobs extends readonly AnyDefinedJob[]> = {
|
|
82
|
+
jobs: TJobs;
|
|
83
|
+
ctx: DurableObjectState;
|
|
84
|
+
context: TContext;
|
|
85
|
+
maxJobsPerAlarm?: number;
|
|
86
|
+
};
|
|
87
|
+
declare function setupJobs<TContext extends Record<string, unknown>, TJobs extends readonly AnyDefinedJob[]>(options: SetupJobsOptions<TContext, TJobs>): Promise<JobRuntime>;
|
|
88
|
+
|
|
89
|
+
export { type AnyDefinedJob, type CancelIntervalJobOptions, type CreateDefineJobBuilder, type DefineJobBuilder, type DefineJobInputBuilder, type DefinedJob, type IntervalJobScheduleOptions, type IntervalScheduleRecord, type IntervalScheduleStatus, type JobExecutionContext, type JobHandler, type JobRunRecord, type JobRunResult, type JobRuntime, type JobScheduleOptions, type JobStatus, createDefineJob, setupJobs };
|
package/dist/jobs.js
ADDED
|
@@ -0,0 +1,613 @@
|
|
|
1
|
+
// src/jobs/define-job.ts
|
|
2
|
+
var jobDefinitionInternals = Symbol.for("@sqlite-sync/cloudflare/jobs/definition");
|
|
3
|
+
function createDefineJob() {
|
|
4
|
+
return function defineJob(options) {
|
|
5
|
+
return {
|
|
6
|
+
input: (schema) => ({
|
|
7
|
+
handler: (handler) => {
|
|
8
|
+
const job = {
|
|
9
|
+
type: options.type,
|
|
10
|
+
[jobDefinitionInternals]: {
|
|
11
|
+
schema,
|
|
12
|
+
handler
|
|
13
|
+
}
|
|
14
|
+
};
|
|
15
|
+
return job;
|
|
16
|
+
}
|
|
17
|
+
})
|
|
18
|
+
};
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
// src/jobs/schema.ts
|
|
23
|
+
var JOBS_SCHEMA_VERSION_KEY = "jobs-schema-version";
|
|
24
|
+
var JOBS_TABLE = "__jobs";
|
|
25
|
+
var JOB_SCHEDULES_TABLE = "__job_schedules";
|
|
26
|
+
var jobsSchemaMigrations = [
|
|
27
|
+
{
|
|
28
|
+
version: 0,
|
|
29
|
+
up: (storage) => {
|
|
30
|
+
storage.sql.exec(`CREATE TABLE IF NOT EXISTS "${JOBS_TABLE}" (
|
|
31
|
+
"id" TEXT NOT NULL PRIMARY KEY,
|
|
32
|
+
"type" TEXT NOT NULL,
|
|
33
|
+
"status" TEXT NOT NULL,
|
|
34
|
+
"payload" TEXT NOT NULL,
|
|
35
|
+
"scheduled_at" INTEGER NOT NULL,
|
|
36
|
+
"started_at" INTEGER,
|
|
37
|
+
"finished_at" INTEGER,
|
|
38
|
+
"error_message" TEXT,
|
|
39
|
+
"error_stack" TEXT,
|
|
40
|
+
"schedule_id" TEXT,
|
|
41
|
+
"created_at" INTEGER NOT NULL,
|
|
42
|
+
"updated_at" INTEGER NOT NULL
|
|
43
|
+
)`);
|
|
44
|
+
storage.sql.exec(`CREATE INDEX IF NOT EXISTS "idx_jobs_due" ON "${JOBS_TABLE}" ("status", "scheduled_at", "id")`);
|
|
45
|
+
storage.sql.exec(`CREATE TABLE IF NOT EXISTS "${JOB_SCHEDULES_TABLE}" (
|
|
46
|
+
"id" TEXT NOT NULL PRIMARY KEY,
|
|
47
|
+
"type" TEXT NOT NULL,
|
|
48
|
+
"dedupe_key" TEXT NOT NULL,
|
|
49
|
+
"payload" TEXT NOT NULL,
|
|
50
|
+
"interval_ms" INTEGER NOT NULL,
|
|
51
|
+
"next_run_at" INTEGER NOT NULL,
|
|
52
|
+
"status" TEXT NOT NULL,
|
|
53
|
+
"created_at" INTEGER NOT NULL,
|
|
54
|
+
"updated_at" INTEGER NOT NULL,
|
|
55
|
+
"last_run_at" INTEGER
|
|
56
|
+
)`);
|
|
57
|
+
storage.sql.exec(
|
|
58
|
+
`CREATE UNIQUE INDEX IF NOT EXISTS "idx_job_schedules_type_key" ON "${JOB_SCHEDULES_TABLE}" ("type", "dedupe_key")`
|
|
59
|
+
);
|
|
60
|
+
storage.sql.exec(
|
|
61
|
+
`CREATE INDEX IF NOT EXISTS "idx_job_schedules_due" ON "${JOB_SCHEDULES_TABLE}" ("status", "next_run_at", "id")`
|
|
62
|
+
);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
];
|
|
66
|
+
function ensureJobsSchema(ctx) {
|
|
67
|
+
const currentVersion = ctx.storage.kv.get(JOBS_SCHEMA_VERSION_KEY) ?? -1;
|
|
68
|
+
for (const migration of jobsSchemaMigrations) {
|
|
69
|
+
if (migration.version <= currentVersion) continue;
|
|
70
|
+
ctx.storage.transactionSync(() => {
|
|
71
|
+
migration.up(ctx.storage);
|
|
72
|
+
ctx.storage.kv.put(JOBS_SCHEMA_VERSION_KEY, migration.version);
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// src/jobs/storage.ts
|
|
78
|
+
function execute(storage, sql, parameters = []) {
|
|
79
|
+
return storage.sql.exec(sql, ...parameters).toArray();
|
|
80
|
+
}
|
|
81
|
+
function parsePayload(payload) {
|
|
82
|
+
return JSON.parse(payload);
|
|
83
|
+
}
|
|
84
|
+
function toJobRunRecord(row) {
|
|
85
|
+
return {
|
|
86
|
+
id: row.id,
|
|
87
|
+
type: row.type,
|
|
88
|
+
status: row.status,
|
|
89
|
+
payload: parsePayload(row.payload),
|
|
90
|
+
scheduledAt: row.scheduled_at,
|
|
91
|
+
startedAt: row.started_at,
|
|
92
|
+
finishedAt: row.finished_at,
|
|
93
|
+
errorMessage: row.error_message,
|
|
94
|
+
errorStack: row.error_stack,
|
|
95
|
+
scheduleId: row.schedule_id,
|
|
96
|
+
createdAt: row.created_at,
|
|
97
|
+
updatedAt: row.updated_at
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
function toIntervalScheduleRecord(row) {
|
|
101
|
+
return {
|
|
102
|
+
id: row.id,
|
|
103
|
+
type: row.type,
|
|
104
|
+
dedupeKey: row.dedupe_key,
|
|
105
|
+
payload: parsePayload(row.payload),
|
|
106
|
+
intervalMs: row.interval_ms,
|
|
107
|
+
nextRunAt: row.next_run_at,
|
|
108
|
+
status: row.status,
|
|
109
|
+
createdAt: row.created_at,
|
|
110
|
+
updatedAt: row.updated_at,
|
|
111
|
+
lastRunAt: row.last_run_at
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
function insertOneOffJob({
|
|
115
|
+
ctx,
|
|
116
|
+
type,
|
|
117
|
+
input,
|
|
118
|
+
at
|
|
119
|
+
}) {
|
|
120
|
+
const now = Date.now();
|
|
121
|
+
const row = {
|
|
122
|
+
id: crypto.randomUUID(),
|
|
123
|
+
type,
|
|
124
|
+
status: "queued",
|
|
125
|
+
payload: JSON.stringify(input),
|
|
126
|
+
scheduled_at: at,
|
|
127
|
+
started_at: null,
|
|
128
|
+
finished_at: null,
|
|
129
|
+
error_message: null,
|
|
130
|
+
error_stack: null,
|
|
131
|
+
schedule_id: null,
|
|
132
|
+
created_at: now,
|
|
133
|
+
updated_at: now
|
|
134
|
+
};
|
|
135
|
+
ctx.storage.transactionSync(() => {
|
|
136
|
+
execute(
|
|
137
|
+
ctx.storage,
|
|
138
|
+
`INSERT INTO "${JOBS_TABLE}" (
|
|
139
|
+
"id",
|
|
140
|
+
"type",
|
|
141
|
+
"status",
|
|
142
|
+
"payload",
|
|
143
|
+
"scheduled_at",
|
|
144
|
+
"started_at",
|
|
145
|
+
"finished_at",
|
|
146
|
+
"error_message",
|
|
147
|
+
"error_stack",
|
|
148
|
+
"schedule_id",
|
|
149
|
+
"created_at",
|
|
150
|
+
"updated_at"
|
|
151
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
152
|
+
[
|
|
153
|
+
row.id,
|
|
154
|
+
row.type,
|
|
155
|
+
row.status,
|
|
156
|
+
row.payload,
|
|
157
|
+
row.scheduled_at,
|
|
158
|
+
row.started_at,
|
|
159
|
+
row.finished_at,
|
|
160
|
+
row.error_message,
|
|
161
|
+
row.error_stack,
|
|
162
|
+
row.schedule_id,
|
|
163
|
+
row.created_at,
|
|
164
|
+
row.updated_at
|
|
165
|
+
]
|
|
166
|
+
);
|
|
167
|
+
});
|
|
168
|
+
return toJobRunRecord(row);
|
|
169
|
+
}
|
|
170
|
+
function upsertIntervalSchedule({
|
|
171
|
+
ctx,
|
|
172
|
+
type,
|
|
173
|
+
dedupeKey,
|
|
174
|
+
input,
|
|
175
|
+
everyMs,
|
|
176
|
+
startAt
|
|
177
|
+
}) {
|
|
178
|
+
const now = Date.now();
|
|
179
|
+
const scheduleId = crypto.randomUUID();
|
|
180
|
+
const payload = JSON.stringify(input);
|
|
181
|
+
ctx.storage.transactionSync(() => {
|
|
182
|
+
execute(
|
|
183
|
+
ctx.storage,
|
|
184
|
+
`INSERT INTO "${JOB_SCHEDULES_TABLE}" (
|
|
185
|
+
"id",
|
|
186
|
+
"type",
|
|
187
|
+
"dedupe_key",
|
|
188
|
+
"payload",
|
|
189
|
+
"interval_ms",
|
|
190
|
+
"next_run_at",
|
|
191
|
+
"status",
|
|
192
|
+
"created_at",
|
|
193
|
+
"updated_at",
|
|
194
|
+
"last_run_at"
|
|
195
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
196
|
+
ON CONFLICT ("type", "dedupe_key") DO UPDATE SET
|
|
197
|
+
"payload" = excluded."payload",
|
|
198
|
+
"interval_ms" = excluded."interval_ms",
|
|
199
|
+
"next_run_at" = excluded."next_run_at",
|
|
200
|
+
"status" = excluded."status",
|
|
201
|
+
"updated_at" = excluded."updated_at"`,
|
|
202
|
+
[scheduleId, type, dedupeKey, payload, everyMs, startAt, "active", now, now, null]
|
|
203
|
+
);
|
|
204
|
+
});
|
|
205
|
+
const [row] = execute(
|
|
206
|
+
ctx.storage,
|
|
207
|
+
`SELECT
|
|
208
|
+
"id",
|
|
209
|
+
"type",
|
|
210
|
+
"dedupe_key",
|
|
211
|
+
"payload",
|
|
212
|
+
"interval_ms",
|
|
213
|
+
"next_run_at",
|
|
214
|
+
"status",
|
|
215
|
+
"created_at",
|
|
216
|
+
"updated_at",
|
|
217
|
+
"last_run_at"
|
|
218
|
+
FROM "${JOB_SCHEDULES_TABLE}"
|
|
219
|
+
WHERE "type" = ? AND "dedupe_key" = ?
|
|
220
|
+
LIMIT 1`,
|
|
221
|
+
[type, dedupeKey]
|
|
222
|
+
);
|
|
223
|
+
if (!row) {
|
|
224
|
+
throw new Error(`Failed to create schedule for job type "${type}"`);
|
|
225
|
+
}
|
|
226
|
+
return toIntervalScheduleRecord(row);
|
|
227
|
+
}
|
|
228
|
+
function cancelIntervalSchedule({
|
|
229
|
+
ctx,
|
|
230
|
+
type,
|
|
231
|
+
dedupeKey
|
|
232
|
+
}) {
|
|
233
|
+
const [existing] = execute(
|
|
234
|
+
ctx.storage,
|
|
235
|
+
`SELECT "id" FROM "${JOB_SCHEDULES_TABLE}"
|
|
236
|
+
WHERE "type" = ? AND "dedupe_key" = ? AND "status" = 'active'
|
|
237
|
+
LIMIT 1`,
|
|
238
|
+
[type, dedupeKey]
|
|
239
|
+
);
|
|
240
|
+
if (!existing) {
|
|
241
|
+
return false;
|
|
242
|
+
}
|
|
243
|
+
const now = Date.now();
|
|
244
|
+
ctx.storage.transactionSync(() => {
|
|
245
|
+
execute(
|
|
246
|
+
ctx.storage,
|
|
247
|
+
`UPDATE "${JOB_SCHEDULES_TABLE}"
|
|
248
|
+
SET "status" = 'cancelled', "updated_at" = ?
|
|
249
|
+
WHERE "id" = ?`,
|
|
250
|
+
[now, existing.id]
|
|
251
|
+
);
|
|
252
|
+
});
|
|
253
|
+
return true;
|
|
254
|
+
}
|
|
255
|
+
function materializeDueSchedules(ctx, now) {
|
|
256
|
+
let insertedJobs = 0;
|
|
257
|
+
ctx.storage.transactionSync(() => {
|
|
258
|
+
const dueSchedules = execute(
|
|
259
|
+
ctx.storage,
|
|
260
|
+
`SELECT
|
|
261
|
+
"id",
|
|
262
|
+
"type",
|
|
263
|
+
"dedupe_key",
|
|
264
|
+
"payload",
|
|
265
|
+
"interval_ms",
|
|
266
|
+
"next_run_at",
|
|
267
|
+
"status",
|
|
268
|
+
"created_at",
|
|
269
|
+
"updated_at",
|
|
270
|
+
"last_run_at"
|
|
271
|
+
FROM "${JOB_SCHEDULES_TABLE}"
|
|
272
|
+
WHERE "status" = 'active' AND "next_run_at" <= ?
|
|
273
|
+
ORDER BY "next_run_at" ASC, "id" ASC`,
|
|
274
|
+
[now]
|
|
275
|
+
);
|
|
276
|
+
for (const schedule of dueSchedules) {
|
|
277
|
+
const runId = crypto.randomUUID();
|
|
278
|
+
execute(
|
|
279
|
+
ctx.storage,
|
|
280
|
+
`INSERT INTO "${JOBS_TABLE}" (
|
|
281
|
+
"id",
|
|
282
|
+
"type",
|
|
283
|
+
"status",
|
|
284
|
+
"payload",
|
|
285
|
+
"scheduled_at",
|
|
286
|
+
"started_at",
|
|
287
|
+
"finished_at",
|
|
288
|
+
"error_message",
|
|
289
|
+
"error_stack",
|
|
290
|
+
"schedule_id",
|
|
291
|
+
"created_at",
|
|
292
|
+
"updated_at"
|
|
293
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
294
|
+
[
|
|
295
|
+
runId,
|
|
296
|
+
schedule.type,
|
|
297
|
+
"queued",
|
|
298
|
+
schedule.payload,
|
|
299
|
+
schedule.next_run_at,
|
|
300
|
+
null,
|
|
301
|
+
null,
|
|
302
|
+
null,
|
|
303
|
+
null,
|
|
304
|
+
schedule.id,
|
|
305
|
+
now,
|
|
306
|
+
now
|
|
307
|
+
]
|
|
308
|
+
);
|
|
309
|
+
execute(
|
|
310
|
+
ctx.storage,
|
|
311
|
+
`UPDATE "${JOB_SCHEDULES_TABLE}"
|
|
312
|
+
SET "last_run_at" = ?, "next_run_at" = ?, "updated_at" = ?
|
|
313
|
+
WHERE "id" = ?`,
|
|
314
|
+
[now, now + schedule.interval_ms, now, schedule.id]
|
|
315
|
+
);
|
|
316
|
+
insertedJobs += 1;
|
|
317
|
+
}
|
|
318
|
+
});
|
|
319
|
+
return insertedJobs;
|
|
320
|
+
}
|
|
321
|
+
function getDueQueuedJobs(ctx, now, limit) {
|
|
322
|
+
return execute(
|
|
323
|
+
ctx.storage,
|
|
324
|
+
`SELECT
|
|
325
|
+
"id",
|
|
326
|
+
"type",
|
|
327
|
+
"status",
|
|
328
|
+
"payload",
|
|
329
|
+
"scheduled_at",
|
|
330
|
+
"started_at",
|
|
331
|
+
"finished_at",
|
|
332
|
+
"error_message",
|
|
333
|
+
"error_stack",
|
|
334
|
+
"schedule_id",
|
|
335
|
+
"created_at",
|
|
336
|
+
"updated_at"
|
|
337
|
+
FROM "${JOBS_TABLE}"
|
|
338
|
+
WHERE "status" = 'queued' AND "scheduled_at" <= ?
|
|
339
|
+
ORDER BY "scheduled_at" ASC, "id" ASC
|
|
340
|
+
LIMIT ?`,
|
|
341
|
+
[now, limit]
|
|
342
|
+
);
|
|
343
|
+
}
|
|
344
|
+
function markJobRunning(ctx, jobId, startedAt) {
|
|
345
|
+
ctx.storage.transactionSync(() => {
|
|
346
|
+
execute(
|
|
347
|
+
ctx.storage,
|
|
348
|
+
`UPDATE "${JOBS_TABLE}"
|
|
349
|
+
SET "status" = 'running', "started_at" = ?, "updated_at" = ?
|
|
350
|
+
WHERE "id" = ?`,
|
|
351
|
+
[startedAt, startedAt, jobId]
|
|
352
|
+
);
|
|
353
|
+
});
|
|
354
|
+
}
|
|
355
|
+
function markJobCompleted(ctx, jobId, finishedAt) {
|
|
356
|
+
ctx.storage.transactionSync(() => {
|
|
357
|
+
execute(
|
|
358
|
+
ctx.storage,
|
|
359
|
+
`UPDATE "${JOBS_TABLE}"
|
|
360
|
+
SET
|
|
361
|
+
"status" = 'completed',
|
|
362
|
+
"finished_at" = ?,
|
|
363
|
+
"updated_at" = ?,
|
|
364
|
+
"error_message" = NULL,
|
|
365
|
+
"error_stack" = NULL
|
|
366
|
+
WHERE "id" = ?`,
|
|
367
|
+
[finishedAt, finishedAt, jobId]
|
|
368
|
+
);
|
|
369
|
+
});
|
|
370
|
+
}
|
|
371
|
+
function toErrorDetails(error) {
|
|
372
|
+
if (error instanceof Error) {
|
|
373
|
+
return {
|
|
374
|
+
message: error.message,
|
|
375
|
+
stack: error.stack ?? null
|
|
376
|
+
};
|
|
377
|
+
}
|
|
378
|
+
return {
|
|
379
|
+
message: String(error),
|
|
380
|
+
stack: null
|
|
381
|
+
};
|
|
382
|
+
}
|
|
383
|
+
function markJobFailed(ctx, jobId, finishedAt, error) {
|
|
384
|
+
const details = toErrorDetails(error);
|
|
385
|
+
ctx.storage.transactionSync(() => {
|
|
386
|
+
execute(
|
|
387
|
+
ctx.storage,
|
|
388
|
+
`UPDATE "${JOBS_TABLE}"
|
|
389
|
+
SET
|
|
390
|
+
"status" = 'failed',
|
|
391
|
+
"finished_at" = ?,
|
|
392
|
+
"updated_at" = ?,
|
|
393
|
+
"error_message" = ?,
|
|
394
|
+
"error_stack" = ?
|
|
395
|
+
WHERE "id" = ?`,
|
|
396
|
+
[finishedAt, finishedAt, details.message, details.stack, jobId]
|
|
397
|
+
);
|
|
398
|
+
});
|
|
399
|
+
}
|
|
400
|
+
async function setNextAlarmFromDb(ctx) {
|
|
401
|
+
const [jobRow] = execute(
|
|
402
|
+
ctx.storage,
|
|
403
|
+
`SELECT MIN("scheduled_at") AS "next_at"
|
|
404
|
+
FROM "${JOBS_TABLE}"
|
|
405
|
+
WHERE "status" = 'queued'`
|
|
406
|
+
);
|
|
407
|
+
const [scheduleRow] = execute(
|
|
408
|
+
ctx.storage,
|
|
409
|
+
`SELECT MIN("next_run_at") AS "next_at"
|
|
410
|
+
FROM "${JOB_SCHEDULES_TABLE}"
|
|
411
|
+
WHERE "status" = 'active'`
|
|
412
|
+
);
|
|
413
|
+
const nextJobAt = jobRow?.next_at ?? null;
|
|
414
|
+
const nextScheduleAt = scheduleRow?.next_at ?? null;
|
|
415
|
+
let nextAlarmAt = null;
|
|
416
|
+
if (nextJobAt !== null && nextScheduleAt !== null) {
|
|
417
|
+
nextAlarmAt = Math.min(nextJobAt, nextScheduleAt);
|
|
418
|
+
} else if (nextJobAt !== null) {
|
|
419
|
+
nextAlarmAt = nextJobAt;
|
|
420
|
+
} else if (nextScheduleAt !== null) {
|
|
421
|
+
nextAlarmAt = nextScheduleAt;
|
|
422
|
+
}
|
|
423
|
+
if (nextAlarmAt === null) {
|
|
424
|
+
await ctx.storage.deleteAlarm();
|
|
425
|
+
return null;
|
|
426
|
+
}
|
|
427
|
+
await ctx.storage.setAlarm(nextAlarmAt);
|
|
428
|
+
return nextAlarmAt;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
// src/jobs/runtime.ts
|
|
432
|
+
function getInternalJob(job) {
|
|
433
|
+
const internal = job[jobDefinitionInternals];
|
|
434
|
+
if (!internal) {
|
|
435
|
+
throw new Error(`Invalid job "${job.type}". Jobs must be created by defineJob(...).input(...).handler(...).`);
|
|
436
|
+
}
|
|
437
|
+
return job;
|
|
438
|
+
}
|
|
439
|
+
function validateMaxJobsPerAlarm(maxJobsPerAlarm) {
|
|
440
|
+
if (!Number.isFinite(maxJobsPerAlarm) || !Number.isInteger(maxJobsPerAlarm) || maxJobsPerAlarm < 1) {
|
|
441
|
+
throw new Error(`Invalid "maxJobsPerAlarm". Expected a positive integer.`);
|
|
442
|
+
}
|
|
443
|
+
return maxJobsPerAlarm;
|
|
444
|
+
}
|
|
445
|
+
function requireRegisteredJob(jobsByType, job) {
|
|
446
|
+
const registered = jobsByType.get(job.type);
|
|
447
|
+
if (!registered) {
|
|
448
|
+
throw new Error(`Job type "${job.type}" is not registered. Pass it to setupJobs({ jobs: [...] }).`);
|
|
449
|
+
}
|
|
450
|
+
return registered;
|
|
451
|
+
}
|
|
452
|
+
function normalizeTimestamp(value, label) {
|
|
453
|
+
if (!Number.isFinite(value)) {
|
|
454
|
+
throw new Error(`Invalid ${label}. Expected a finite timestamp in milliseconds.`);
|
|
455
|
+
}
|
|
456
|
+
return Math.floor(value);
|
|
457
|
+
}
|
|
458
|
+
function normalizeIntervalMs(everyMs) {
|
|
459
|
+
if (!Number.isFinite(everyMs) || !Number.isInteger(everyMs) || everyMs < 1) {
|
|
460
|
+
throw new Error(`Invalid "everyMs". Expected a positive integer number of milliseconds.`);
|
|
461
|
+
}
|
|
462
|
+
return everyMs;
|
|
463
|
+
}
|
|
464
|
+
async function parseJobInput(schema, input) {
|
|
465
|
+
const result = await schema["~standard"].validate(input);
|
|
466
|
+
if (result.issues) {
|
|
467
|
+
const firstMessage = result.issues[0]?.message;
|
|
468
|
+
throw new Error(
|
|
469
|
+
firstMessage ? `Invalid "input". ${firstMessage}` : `Invalid "input". Payload does not match schema.`
|
|
470
|
+
);
|
|
471
|
+
}
|
|
472
|
+
return result.value;
|
|
473
|
+
}
|
|
474
|
+
async function validatePersistedInput(schema, input) {
|
|
475
|
+
let serialized;
|
|
476
|
+
try {
|
|
477
|
+
serialized = JSON.stringify(input);
|
|
478
|
+
} catch (error) {
|
|
479
|
+
throw new Error(`Invalid "input". Job payload must be JSON-serializable before persistence: ${String(error)}`);
|
|
480
|
+
}
|
|
481
|
+
if (serialized === void 0) {
|
|
482
|
+
throw new Error(`Invalid "input". Job payload must serialize to JSON.`);
|
|
483
|
+
}
|
|
484
|
+
const roundTripped = JSON.parse(serialized);
|
|
485
|
+
const result = await schema["~standard"].validate(roundTripped);
|
|
486
|
+
if (result.issues) {
|
|
487
|
+
throw new Error(`Invalid "input". Job payload must remain valid after JSON serialization for persisted jobs.`);
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
function validateDedupeKey(dedupeKey) {
|
|
491
|
+
if (!dedupeKey || dedupeKey.trim().length === 0) {
|
|
492
|
+
throw new Error(`Invalid "dedupeKey". Expected a non-empty string.`);
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
async function setupJobs(options) {
|
|
496
|
+
const maxJobsPerAlarm = validateMaxJobsPerAlarm(options.maxJobsPerAlarm ?? 50);
|
|
497
|
+
const jobsByType = /* @__PURE__ */ new Map();
|
|
498
|
+
for (const job of options.jobs) {
|
|
499
|
+
const internalJob = getInternalJob(job);
|
|
500
|
+
if (jobsByType.has(job.type)) {
|
|
501
|
+
throw new Error(`Duplicate job type "${job.type}" during setupJobs.`);
|
|
502
|
+
}
|
|
503
|
+
jobsByType.set(job.type, internalJob);
|
|
504
|
+
}
|
|
505
|
+
ensureJobsSchema(options.ctx);
|
|
506
|
+
await setNextAlarmFromDb(options.ctx);
|
|
507
|
+
const onAlarm = async () => {
|
|
508
|
+
const now = Date.now();
|
|
509
|
+
materializeDueSchedules(options.ctx, now);
|
|
510
|
+
let processedJobs = 0;
|
|
511
|
+
while (processedJobs < maxJobsPerAlarm) {
|
|
512
|
+
const remaining = maxJobsPerAlarm - processedJobs;
|
|
513
|
+
const dueJobs = getDueQueuedJobs(options.ctx, Date.now(), remaining);
|
|
514
|
+
if (dueJobs.length === 0) {
|
|
515
|
+
break;
|
|
516
|
+
}
|
|
517
|
+
for (const jobRow of dueJobs) {
|
|
518
|
+
if (processedJobs >= maxJobsPerAlarm) {
|
|
519
|
+
break;
|
|
520
|
+
}
|
|
521
|
+
const internalJob = jobsByType.get(jobRow.type);
|
|
522
|
+
const startedAt = Date.now();
|
|
523
|
+
markJobRunning(options.ctx, jobRow.id, startedAt);
|
|
524
|
+
try {
|
|
525
|
+
if (!internalJob) {
|
|
526
|
+
throw new Error(`No registered handler for job type "${jobRow.type}".`);
|
|
527
|
+
}
|
|
528
|
+
const queuedRecord = toJobRunRecord(jobRow);
|
|
529
|
+
const parsed = await internalJob[jobDefinitionInternals].schema["~standard"].validate(queuedRecord.payload);
|
|
530
|
+
if (parsed.issues) {
|
|
531
|
+
throw new Error(`Invalid persisted payload for job type "${jobRow.type}".`);
|
|
532
|
+
}
|
|
533
|
+
const input = parsed.value;
|
|
534
|
+
const runningRecord = {
|
|
535
|
+
...queuedRecord,
|
|
536
|
+
status: "running",
|
|
537
|
+
payload: input,
|
|
538
|
+
startedAt,
|
|
539
|
+
updatedAt: startedAt
|
|
540
|
+
};
|
|
541
|
+
await internalJob[jobDefinitionInternals].handler({
|
|
542
|
+
input,
|
|
543
|
+
context: options.context,
|
|
544
|
+
job: runningRecord
|
|
545
|
+
});
|
|
546
|
+
markJobCompleted(options.ctx, jobRow.id, Date.now());
|
|
547
|
+
} catch (error) {
|
|
548
|
+
markJobFailed(options.ctx, jobRow.id, Date.now(), error);
|
|
549
|
+
}
|
|
550
|
+
processedJobs += 1;
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
const nextAlarmAt = await setNextAlarmFromDb(options.ctx);
|
|
554
|
+
return {
|
|
555
|
+
processedJobs,
|
|
556
|
+
nextAlarmAt
|
|
557
|
+
};
|
|
558
|
+
};
|
|
559
|
+
return {
|
|
560
|
+
onAlarm,
|
|
561
|
+
setNextAlarm: async () => setNextAlarmFromDb(options.ctx),
|
|
562
|
+
schedule: (async (job, scheduleOptions) => {
|
|
563
|
+
const registered = requireRegisteredJob(jobsByType, job);
|
|
564
|
+
const schema = registered[jobDefinitionInternals].schema;
|
|
565
|
+
const at = normalizeTimestamp(scheduleOptions.at, `"at"`);
|
|
566
|
+
const input = await parseJobInput(schema, scheduleOptions.input);
|
|
567
|
+
await validatePersistedInput(schema, input);
|
|
568
|
+
const record = insertOneOffJob({
|
|
569
|
+
ctx: options.ctx,
|
|
570
|
+
type: job.type,
|
|
571
|
+
input,
|
|
572
|
+
at
|
|
573
|
+
});
|
|
574
|
+
await setNextAlarmFromDb(options.ctx);
|
|
575
|
+
return record;
|
|
576
|
+
}),
|
|
577
|
+
scheduleInterval: (async (job, scheduleOptions) => {
|
|
578
|
+
const registered = requireRegisteredJob(jobsByType, job);
|
|
579
|
+
const schema = registered[jobDefinitionInternals].schema;
|
|
580
|
+
validateDedupeKey(scheduleOptions.dedupeKey);
|
|
581
|
+
const everyMs = normalizeIntervalMs(scheduleOptions.everyMs);
|
|
582
|
+
const startAt = normalizeTimestamp(scheduleOptions.startAt ?? Date.now() + everyMs, `"startAt"`);
|
|
583
|
+
const input = await parseJobInput(schema, scheduleOptions.input);
|
|
584
|
+
await validatePersistedInput(schema, input);
|
|
585
|
+
const record = upsertIntervalSchedule({
|
|
586
|
+
ctx: options.ctx,
|
|
587
|
+
type: job.type,
|
|
588
|
+
dedupeKey: scheduleOptions.dedupeKey,
|
|
589
|
+
input,
|
|
590
|
+
everyMs,
|
|
591
|
+
startAt
|
|
592
|
+
});
|
|
593
|
+
await setNextAlarmFromDb(options.ctx);
|
|
594
|
+
return record;
|
|
595
|
+
}),
|
|
596
|
+
cancelInterval: (async (job, cancelOptions) => {
|
|
597
|
+
requireRegisteredJob(jobsByType, job);
|
|
598
|
+
validateDedupeKey(cancelOptions.dedupeKey);
|
|
599
|
+
const cancelled = cancelIntervalSchedule({
|
|
600
|
+
ctx: options.ctx,
|
|
601
|
+
type: job.type,
|
|
602
|
+
dedupeKey: cancelOptions.dedupeKey
|
|
603
|
+
});
|
|
604
|
+
await setNextAlarmFromDb(options.ctx);
|
|
605
|
+
return cancelled;
|
|
606
|
+
})
|
|
607
|
+
};
|
|
608
|
+
}
|
|
609
|
+
export {
|
|
610
|
+
createDefineJob,
|
|
611
|
+
setupJobs
|
|
612
|
+
};
|
|
613
|
+
//# sourceMappingURL=jobs.js.map
|
package/dist/jobs.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/jobs/define-job.ts","../src/jobs/schema.ts","../src/jobs/storage.ts","../src/jobs/runtime.ts"],"sourcesContent":["import type { StandardSchemaV1 } from \"@standard-schema/spec\";\nimport type { CreateDefineJobBuilder, DefinedJob, DefineJobBuilder, DefineJobInputBuilder, JobHandler } from \"./types\";\n\nexport const jobDefinitionInternals = Symbol.for(\"@sqlite-sync/cloudflare/jobs/definition\");\n\ntype JobDefinitionInternals<TType extends string, TSchema extends StandardSchemaV1> = {\n schema: TSchema;\n handler: JobHandler<StandardSchemaV1.InferOutput<TSchema>, Record<string, unknown>, TType>;\n};\n\nexport type InternalDefinedJob<TType extends string, TSchema extends StandardSchemaV1> = DefinedJob<\n TType,\n TSchema,\n Record<string, unknown>\n> & {\n [jobDefinitionInternals]: JobDefinitionInternals<TType, TSchema>;\n};\n\nexport function createDefineJob<\n TContext extends Record<string, unknown> = Record<string, unknown>,\n>(): CreateDefineJobBuilder<TContext> {\n return function defineJob<TType extends string>(options: { type: TType }): DefineJobBuilder<TType, TContext> {\n return {\n input: <TSchema extends StandardSchemaV1>(schema: TSchema): DefineJobInputBuilder<TType, TSchema, TContext> => ({\n handler: (handler: JobHandler<StandardSchemaV1.InferOutput<TSchema>, TContext, TType>) => {\n const job: InternalDefinedJob<TType, TSchema> = {\n type: options.type,\n [jobDefinitionInternals]: {\n schema,\n handler: handler as JobHandler<StandardSchemaV1.InferOutput<TSchema>, Record<string, unknown>, TType>,\n },\n };\n return job as DefinedJob<TType, TSchema, TContext>;\n },\n }),\n };\n };\n}\n","const JOBS_SCHEMA_VERSION_KEY = \"jobs-schema-version\";\n\nexport const JOBS_TABLE = \"__jobs\";\nexport const JOB_SCHEDULES_TABLE = \"__job_schedules\";\n\ntype JobsSchemaMigration = {\n version: number;\n up: (storage: DurableObjectStorage) => void;\n};\n\nconst jobsSchemaMigrations: JobsSchemaMigration[] = [\n {\n version: 0,\n up: (storage) => {\n storage.sql.exec(`CREATE TABLE IF NOT EXISTS \"${JOBS_TABLE}\" (\n \"id\" TEXT NOT NULL PRIMARY KEY,\n \"type\" TEXT NOT NULL,\n \"status\" TEXT NOT NULL,\n \"payload\" TEXT NOT NULL,\n \"scheduled_at\" INTEGER NOT NULL,\n \"started_at\" INTEGER,\n \"finished_at\" INTEGER,\n \"error_message\" TEXT,\n \"error_stack\" TEXT,\n \"schedule_id\" TEXT,\n \"created_at\" INTEGER NOT NULL,\n \"updated_at\" INTEGER NOT NULL\n )`);\n\n storage.sql.exec(`CREATE INDEX IF NOT EXISTS \"idx_jobs_due\" ON \"${JOBS_TABLE}\" (\"status\", \"scheduled_at\", \"id\")`);\n\n storage.sql.exec(`CREATE TABLE IF NOT EXISTS \"${JOB_SCHEDULES_TABLE}\" (\n \"id\" TEXT NOT NULL PRIMARY KEY,\n \"type\" TEXT NOT NULL,\n \"dedupe_key\" TEXT NOT NULL,\n \"payload\" TEXT NOT NULL,\n \"interval_ms\" INTEGER NOT NULL,\n \"next_run_at\" INTEGER NOT NULL,\n \"status\" TEXT NOT NULL,\n \"created_at\" INTEGER NOT NULL,\n \"updated_at\" INTEGER NOT NULL,\n \"last_run_at\" INTEGER\n )`);\n\n storage.sql.exec(\n `CREATE UNIQUE INDEX IF NOT EXISTS \"idx_job_schedules_type_key\" ON \"${JOB_SCHEDULES_TABLE}\" (\"type\", \"dedupe_key\")`,\n );\n storage.sql.exec(\n `CREATE INDEX IF NOT EXISTS \"idx_job_schedules_due\" ON \"${JOB_SCHEDULES_TABLE}\" (\"status\", \"next_run_at\", \"id\")`,\n );\n },\n },\n];\n\nexport function ensureJobsSchema(ctx: DurableObjectState): void {\n const currentVersion = ctx.storage.kv.get<number>(JOBS_SCHEMA_VERSION_KEY) ?? -1;\n\n for (const migration of jobsSchemaMigrations) {\n if (migration.version <= currentVersion) continue;\n\n ctx.storage.transactionSync(() => {\n migration.up(ctx.storage);\n ctx.storage.kv.put(JOBS_SCHEMA_VERSION_KEY, migration.version);\n });\n }\n}\n","import { JOB_SCHEDULES_TABLE, JOBS_TABLE } from \"./schema\";\nimport type { IntervalScheduleRecord, JobRunRecord } from \"./types\";\n\ntype JobRow = {\n id: string;\n type: string;\n status: \"queued\" | \"running\" | \"completed\" | \"failed\" | \"cancelled\";\n payload: string;\n scheduled_at: number;\n started_at: number | null;\n finished_at: number | null;\n error_message: string | null;\n error_stack: string | null;\n schedule_id: string | null;\n created_at: number;\n updated_at: number;\n};\n\ntype ScheduleRow = {\n id: string;\n type: string;\n dedupe_key: string;\n payload: string;\n interval_ms: number;\n next_run_at: number;\n status: \"active\" | \"cancelled\";\n created_at: number;\n updated_at: number;\n last_run_at: number | null;\n};\n\nfunction execute<TResult = unknown>(\n storage: DurableObjectStorage,\n sql: string,\n parameters: readonly unknown[] = [],\n): TResult[] {\n return storage.sql.exec(sql, ...parameters).toArray() as TResult[];\n}\n\nfunction parsePayload(payload: string): unknown {\n return JSON.parse(payload);\n}\n\nexport function toJobRunRecord<TType extends string = string, TInput = unknown>(\n row: JobRow,\n): JobRunRecord<TType, TInput> {\n return {\n id: row.id,\n type: row.type as TType,\n status: row.status,\n payload: parsePayload(row.payload) as TInput,\n scheduledAt: row.scheduled_at,\n startedAt: row.started_at,\n finishedAt: row.finished_at,\n errorMessage: row.error_message,\n errorStack: row.error_stack,\n scheduleId: row.schedule_id,\n createdAt: row.created_at,\n updatedAt: row.updated_at,\n };\n}\n\nfunction toIntervalScheduleRecord<TType extends string = string, TInput = unknown>(\n row: ScheduleRow,\n): IntervalScheduleRecord<TType, TInput> {\n return {\n id: row.id,\n type: row.type as TType,\n dedupeKey: row.dedupe_key,\n payload: parsePayload(row.payload) as TInput,\n intervalMs: row.interval_ms,\n nextRunAt: row.next_run_at,\n status: row.status,\n createdAt: row.created_at,\n updatedAt: row.updated_at,\n lastRunAt: row.last_run_at,\n };\n}\n\nexport function insertOneOffJob<TInput>({\n ctx,\n type,\n input,\n at,\n}: {\n ctx: DurableObjectState;\n type: string;\n input: TInput;\n at: number;\n}): JobRunRecord<string, TInput> {\n const now = Date.now();\n const row: JobRow = {\n id: crypto.randomUUID(),\n type,\n status: \"queued\",\n payload: JSON.stringify(input),\n scheduled_at: at,\n started_at: null,\n finished_at: null,\n error_message: null,\n error_stack: null,\n schedule_id: null,\n created_at: now,\n updated_at: now,\n };\n\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `INSERT INTO \"${JOBS_TABLE}\" (\n \"id\",\n \"type\",\n \"status\",\n \"payload\",\n \"scheduled_at\",\n \"started_at\",\n \"finished_at\",\n \"error_message\",\n \"error_stack\",\n \"schedule_id\",\n \"created_at\",\n \"updated_at\"\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,\n [\n row.id,\n row.type,\n row.status,\n row.payload,\n row.scheduled_at,\n row.started_at,\n row.finished_at,\n row.error_message,\n row.error_stack,\n row.schedule_id,\n row.created_at,\n row.updated_at,\n ],\n );\n });\n\n return toJobRunRecord<string, TInput>(row);\n}\n\nexport function upsertIntervalSchedule<TInput>({\n ctx,\n type,\n dedupeKey,\n input,\n everyMs,\n startAt,\n}: {\n ctx: DurableObjectState;\n type: string;\n dedupeKey: string;\n input: TInput;\n everyMs: number;\n startAt: number;\n}): IntervalScheduleRecord<string, TInput> {\n const now = Date.now();\n const scheduleId = crypto.randomUUID();\n const payload = JSON.stringify(input);\n\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `INSERT INTO \"${JOB_SCHEDULES_TABLE}\" (\n \"id\",\n \"type\",\n \"dedupe_key\",\n \"payload\",\n \"interval_ms\",\n \"next_run_at\",\n \"status\",\n \"created_at\",\n \"updated_at\",\n \"last_run_at\"\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT (\"type\", \"dedupe_key\") DO UPDATE SET\n \"payload\" = excluded.\"payload\",\n \"interval_ms\" = excluded.\"interval_ms\",\n \"next_run_at\" = excluded.\"next_run_at\",\n \"status\" = excluded.\"status\",\n \"updated_at\" = excluded.\"updated_at\"`,\n [scheduleId, type, dedupeKey, payload, everyMs, startAt, \"active\", now, now, null],\n );\n });\n\n const [row] = execute<ScheduleRow>(\n ctx.storage,\n `SELECT\n \"id\",\n \"type\",\n \"dedupe_key\",\n \"payload\",\n \"interval_ms\",\n \"next_run_at\",\n \"status\",\n \"created_at\",\n \"updated_at\",\n \"last_run_at\"\n FROM \"${JOB_SCHEDULES_TABLE}\"\n WHERE \"type\" = ? AND \"dedupe_key\" = ?\n LIMIT 1`,\n [type, dedupeKey],\n );\n\n if (!row) {\n throw new Error(`Failed to create schedule for job type \"${type}\"`);\n }\n\n return toIntervalScheduleRecord<string, TInput>(row);\n}\n\nexport function cancelIntervalSchedule({\n ctx,\n type,\n dedupeKey,\n}: {\n ctx: DurableObjectState;\n type: string;\n dedupeKey: string;\n}): boolean {\n const [existing] = execute<Pick<ScheduleRow, \"id\">>(\n ctx.storage,\n `SELECT \"id\" FROM \"${JOB_SCHEDULES_TABLE}\"\n WHERE \"type\" = ? AND \"dedupe_key\" = ? AND \"status\" = 'active'\n LIMIT 1`,\n [type, dedupeKey],\n );\n\n if (!existing) {\n return false;\n }\n\n const now = Date.now();\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `UPDATE \"${JOB_SCHEDULES_TABLE}\"\n SET \"status\" = 'cancelled', \"updated_at\" = ?\n WHERE \"id\" = ?`,\n [now, existing.id],\n );\n });\n\n return true;\n}\n\nexport function materializeDueSchedules(ctx: DurableObjectState, now: number): number {\n let insertedJobs = 0;\n\n ctx.storage.transactionSync(() => {\n const dueSchedules = execute<ScheduleRow>(\n ctx.storage,\n `SELECT\n \"id\",\n \"type\",\n \"dedupe_key\",\n \"payload\",\n \"interval_ms\",\n \"next_run_at\",\n \"status\",\n \"created_at\",\n \"updated_at\",\n \"last_run_at\"\n FROM \"${JOB_SCHEDULES_TABLE}\"\n WHERE \"status\" = 'active' AND \"next_run_at\" <= ?\n ORDER BY \"next_run_at\" ASC, \"id\" ASC`,\n [now],\n );\n\n for (const schedule of dueSchedules) {\n const runId = crypto.randomUUID();\n execute(\n ctx.storage,\n `INSERT INTO \"${JOBS_TABLE}\" (\n \"id\",\n \"type\",\n \"status\",\n \"payload\",\n \"scheduled_at\",\n \"started_at\",\n \"finished_at\",\n \"error_message\",\n \"error_stack\",\n \"schedule_id\",\n \"created_at\",\n \"updated_at\"\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,\n [\n runId,\n schedule.type,\n \"queued\",\n schedule.payload,\n schedule.next_run_at,\n null,\n null,\n null,\n null,\n schedule.id,\n now,\n now,\n ],\n );\n\n execute(\n ctx.storage,\n `UPDATE \"${JOB_SCHEDULES_TABLE}\"\n SET \"last_run_at\" = ?, \"next_run_at\" = ?, \"updated_at\" = ?\n WHERE \"id\" = ?`,\n [now, now + schedule.interval_ms, now, schedule.id],\n );\n\n insertedJobs += 1;\n }\n });\n\n return insertedJobs;\n}\n\nexport function getDueQueuedJobs(ctx: DurableObjectState, now: number, limit: number): JobRow[] {\n return execute<JobRow>(\n ctx.storage,\n `SELECT\n \"id\",\n \"type\",\n \"status\",\n \"payload\",\n \"scheduled_at\",\n \"started_at\",\n \"finished_at\",\n \"error_message\",\n \"error_stack\",\n \"schedule_id\",\n \"created_at\",\n \"updated_at\"\n FROM \"${JOBS_TABLE}\"\n WHERE \"status\" = 'queued' AND \"scheduled_at\" <= ?\n ORDER BY \"scheduled_at\" ASC, \"id\" ASC\n LIMIT ?`,\n [now, limit],\n );\n}\n\nexport function markJobRunning(ctx: DurableObjectState, jobId: string, startedAt: number): void {\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `UPDATE \"${JOBS_TABLE}\"\n SET \"status\" = 'running', \"started_at\" = ?, \"updated_at\" = ?\n WHERE \"id\" = ?`,\n [startedAt, startedAt, jobId],\n );\n });\n}\n\nexport function markJobCompleted(ctx: DurableObjectState, jobId: string, finishedAt: number): void {\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `UPDATE \"${JOBS_TABLE}\"\n SET\n \"status\" = 'completed',\n \"finished_at\" = ?,\n \"updated_at\" = ?,\n \"error_message\" = NULL,\n \"error_stack\" = NULL\n WHERE \"id\" = ?`,\n [finishedAt, finishedAt, jobId],\n );\n });\n}\n\nfunction toErrorDetails(error: unknown): { message: string; stack: string | null } {\n if (error instanceof Error) {\n return {\n message: error.message,\n stack: error.stack ?? null,\n };\n }\n\n return {\n message: String(error),\n stack: null,\n };\n}\n\nexport function markJobFailed(ctx: DurableObjectState, jobId: string, finishedAt: number, error: unknown): void {\n const details = toErrorDetails(error);\n\n ctx.storage.transactionSync(() => {\n execute(\n ctx.storage,\n `UPDATE \"${JOBS_TABLE}\"\n SET\n \"status\" = 'failed',\n \"finished_at\" = ?,\n \"updated_at\" = ?,\n \"error_message\" = ?,\n \"error_stack\" = ?\n WHERE \"id\" = ?`,\n [finishedAt, finishedAt, details.message, details.stack, jobId],\n );\n });\n}\n\nexport async function setNextAlarmFromDb(ctx: DurableObjectState): Promise<number | null> {\n const [jobRow] = execute<{ next_at: number | null }>(\n ctx.storage,\n `SELECT MIN(\"scheduled_at\") AS \"next_at\"\n FROM \"${JOBS_TABLE}\"\n WHERE \"status\" = 'queued'`,\n );\n const [scheduleRow] = execute<{ next_at: number | null }>(\n ctx.storage,\n `SELECT MIN(\"next_run_at\") AS \"next_at\"\n FROM \"${JOB_SCHEDULES_TABLE}\"\n WHERE \"status\" = 'active'`,\n );\n\n const nextJobAt = jobRow?.next_at ?? null;\n const nextScheduleAt = scheduleRow?.next_at ?? null;\n\n let nextAlarmAt: number | null = null;\n if (nextJobAt !== null && nextScheduleAt !== null) {\n nextAlarmAt = Math.min(nextJobAt, nextScheduleAt);\n } else if (nextJobAt !== null) {\n nextAlarmAt = nextJobAt;\n } else if (nextScheduleAt !== null) {\n nextAlarmAt = nextScheduleAt;\n }\n\n if (nextAlarmAt === null) {\n await ctx.storage.deleteAlarm();\n return null;\n }\n\n await ctx.storage.setAlarm(nextAlarmAt);\n return nextAlarmAt;\n}\n\nexport type { JobRow };\n","import type { StandardSchemaV1 } from \"@standard-schema/spec\";\nimport { type InternalDefinedJob, jobDefinitionInternals } from \"./define-job\";\nimport { ensureJobsSchema } from \"./schema\";\nimport {\n cancelIntervalSchedule,\n getDueQueuedJobs,\n insertOneOffJob,\n markJobCompleted,\n markJobFailed,\n markJobRunning,\n materializeDueSchedules,\n setNextAlarmFromDb,\n toJobRunRecord,\n upsertIntervalSchedule,\n} from \"./storage\";\nimport type { AnyDefinedJob, JobRunResult, JobRuntime } from \"./types\";\n\ntype SetupJobsOptions<TContext extends Record<string, unknown>, TJobs extends readonly AnyDefinedJob[]> = {\n jobs: TJobs;\n ctx: DurableObjectState;\n context: TContext;\n maxJobsPerAlarm?: number;\n};\n\ntype InternalJob = InternalDefinedJob<string, StandardSchemaV1>;\n\nfunction getInternalJob(job: AnyDefinedJob): InternalJob {\n const internal = (job as InternalJob)[jobDefinitionInternals];\n if (!internal) {\n throw new Error(`Invalid job \"${job.type}\". Jobs must be created by defineJob(...).input(...).handler(...).`);\n }\n\n return job as InternalJob;\n}\n\nfunction validateMaxJobsPerAlarm(maxJobsPerAlarm: number): number {\n if (!Number.isFinite(maxJobsPerAlarm) || !Number.isInteger(maxJobsPerAlarm) || maxJobsPerAlarm < 1) {\n throw new Error(`Invalid \"maxJobsPerAlarm\". Expected a positive integer.`);\n }\n return maxJobsPerAlarm;\n}\n\nfunction requireRegisteredJob(jobsByType: Map<string, InternalJob>, job: AnyDefinedJob): InternalJob {\n const registered = jobsByType.get(job.type);\n if (!registered) {\n throw new Error(`Job type \"${job.type}\" is not registered. Pass it to setupJobs({ jobs: [...] }).`);\n }\n return registered;\n}\n\nfunction normalizeTimestamp(value: number, label: string): number {\n if (!Number.isFinite(value)) {\n throw new Error(`Invalid ${label}. Expected a finite timestamp in milliseconds.`);\n }\n return Math.floor(value);\n}\n\nfunction normalizeIntervalMs(everyMs: number): number {\n if (!Number.isFinite(everyMs) || !Number.isInteger(everyMs) || everyMs < 1) {\n throw new Error(`Invalid \"everyMs\". Expected a positive integer number of milliseconds.`);\n }\n return everyMs;\n}\n\nasync function parseJobInput<TSchema extends StandardSchemaV1>(\n schema: TSchema,\n input: unknown,\n): Promise<StandardSchemaV1.InferOutput<TSchema>> {\n const result = await schema[\"~standard\"].validate(input);\n if (result.issues) {\n const firstMessage = result.issues[0]?.message;\n throw new Error(\n firstMessage ? `Invalid \"input\". ${firstMessage}` : `Invalid \"input\". Payload does not match schema.`,\n );\n }\n\n return result.value;\n}\n\nasync function validatePersistedInput<TSchema extends StandardSchemaV1>(\n schema: TSchema,\n input: StandardSchemaV1.InferOutput<TSchema>,\n): Promise<void> {\n let serialized: string;\n try {\n serialized = JSON.stringify(input);\n } catch (error) {\n throw new Error(`Invalid \"input\". Job payload must be JSON-serializable before persistence: ${String(error)}`);\n }\n\n if (serialized === undefined) {\n throw new Error(`Invalid \"input\". Job payload must serialize to JSON.`);\n }\n\n const roundTripped: unknown = JSON.parse(serialized);\n const result = await schema[\"~standard\"].validate(roundTripped);\n if (result.issues) {\n throw new Error(`Invalid \"input\". Job payload must remain valid after JSON serialization for persisted jobs.`);\n }\n}\n\nfunction validateDedupeKey(dedupeKey: string): void {\n if (!dedupeKey || dedupeKey.trim().length === 0) {\n throw new Error(`Invalid \"dedupeKey\". Expected a non-empty string.`);\n }\n}\n\nexport async function setupJobs<TContext extends Record<string, unknown>, TJobs extends readonly AnyDefinedJob[]>(\n options: SetupJobsOptions<TContext, TJobs>,\n): Promise<JobRuntime> {\n const maxJobsPerAlarm = validateMaxJobsPerAlarm(options.maxJobsPerAlarm ?? 50);\n const jobsByType = new Map<string, InternalJob>();\n\n for (const job of options.jobs) {\n const internalJob = getInternalJob(job);\n if (jobsByType.has(job.type)) {\n throw new Error(`Duplicate job type \"${job.type}\" during setupJobs.`);\n }\n jobsByType.set(job.type, internalJob);\n }\n\n ensureJobsSchema(options.ctx);\n await setNextAlarmFromDb(options.ctx);\n\n const onAlarm = async (): Promise<JobRunResult> => {\n const now = Date.now();\n materializeDueSchedules(options.ctx, now);\n\n let processedJobs = 0;\n\n while (processedJobs < maxJobsPerAlarm) {\n const remaining = maxJobsPerAlarm - processedJobs;\n const dueJobs = getDueQueuedJobs(options.ctx, Date.now(), remaining);\n\n if (dueJobs.length === 0) {\n break;\n }\n\n for (const jobRow of dueJobs) {\n if (processedJobs >= maxJobsPerAlarm) {\n break;\n }\n\n const internalJob = jobsByType.get(jobRow.type);\n const startedAt = Date.now();\n markJobRunning(options.ctx, jobRow.id, startedAt);\n\n try {\n if (!internalJob) {\n throw new Error(`No registered handler for job type \"${jobRow.type}\".`);\n }\n\n const queuedRecord = toJobRunRecord(jobRow);\n const parsed = await internalJob[jobDefinitionInternals].schema[\"~standard\"].validate(queuedRecord.payload);\n if (parsed.issues) {\n throw new Error(`Invalid persisted payload for job type \"${jobRow.type}\".`);\n }\n\n const input = parsed.value;\n const runningRecord = {\n ...queuedRecord,\n status: \"running\" as const,\n payload: input,\n startedAt,\n updatedAt: startedAt,\n };\n\n await internalJob[jobDefinitionInternals].handler({\n input,\n context: options.context,\n job: runningRecord,\n });\n\n markJobCompleted(options.ctx, jobRow.id, Date.now());\n } catch (error) {\n markJobFailed(options.ctx, jobRow.id, Date.now(), error);\n }\n\n processedJobs += 1;\n }\n }\n\n const nextAlarmAt = await setNextAlarmFromDb(options.ctx);\n return {\n processedJobs,\n nextAlarmAt,\n };\n };\n\n return {\n onAlarm,\n setNextAlarm: async () => setNextAlarmFromDb(options.ctx),\n\n schedule: (async (job, scheduleOptions) => {\n const registered = requireRegisteredJob(jobsByType, job);\n const schema = registered[jobDefinitionInternals].schema;\n const at = normalizeTimestamp(scheduleOptions.at, `\"at\"`);\n const input = await parseJobInput(schema, scheduleOptions.input);\n await validatePersistedInput(schema, input);\n\n const record = insertOneOffJob({\n ctx: options.ctx,\n type: job.type,\n input,\n at,\n });\n\n await setNextAlarmFromDb(options.ctx);\n return record;\n }) as JobRuntime[\"schedule\"],\n\n scheduleInterval: (async (job, scheduleOptions) => {\n const registered = requireRegisteredJob(jobsByType, job);\n const schema = registered[jobDefinitionInternals].schema;\n validateDedupeKey(scheduleOptions.dedupeKey);\n const everyMs = normalizeIntervalMs(scheduleOptions.everyMs);\n const startAt = normalizeTimestamp(scheduleOptions.startAt ?? Date.now() + everyMs, `\"startAt\"`);\n const input = await parseJobInput(schema, scheduleOptions.input);\n await validatePersistedInput(schema, input);\n\n const record = upsertIntervalSchedule({\n ctx: options.ctx,\n type: job.type,\n dedupeKey: scheduleOptions.dedupeKey,\n input,\n everyMs,\n startAt,\n });\n\n await setNextAlarmFromDb(options.ctx);\n return record;\n }) as JobRuntime[\"scheduleInterval\"],\n\n cancelInterval: (async (job, cancelOptions) => {\n requireRegisteredJob(jobsByType, job);\n validateDedupeKey(cancelOptions.dedupeKey);\n\n const cancelled = cancelIntervalSchedule({\n ctx: options.ctx,\n type: job.type,\n dedupeKey: cancelOptions.dedupeKey,\n });\n\n await setNextAlarmFromDb(options.ctx);\n return cancelled;\n }) as JobRuntime[\"cancelInterval\"],\n };\n}\n"],"mappings":";AAGO,IAAM,yBAAyB,OAAO,IAAI,yCAAyC;AAenF,SAAS,kBAEsB;AACpC,SAAO,SAAS,UAAgC,SAA6D;AAC3G,WAAO;AAAA,MACL,OAAO,CAAmC,YAAsE;AAAA,QAC9G,SAAS,CAAC,YAAgF;AACxF,gBAAM,MAA0C;AAAA,YAC9C,MAAM,QAAQ;AAAA,YACd,CAAC,sBAAsB,GAAG;AAAA,cACxB;AAAA,cACA;AAAA,YACF;AAAA,UACF;AACA,iBAAO;AAAA,QACT;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;;;ACrCA,IAAM,0BAA0B;AAEzB,IAAM,aAAa;AACnB,IAAM,sBAAsB;AAOnC,IAAM,uBAA8C;AAAA,EAClD;AAAA,IACE,SAAS;AAAA,IACT,IAAI,CAAC,YAAY;AACf,cAAQ,IAAI,KAAK,+BAA+B,UAAU;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAaxD;AAEF,cAAQ,IAAI,KAAK,iDAAiD,UAAU,oCAAoC;AAEhH,cAAQ,IAAI,KAAK,+BAA+B,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAWjE;AAEF,cAAQ,IAAI;AAAA,QACV,sEAAsE,mBAAmB;AAAA,MAC3F;AACA,cAAQ,IAAI;AAAA,QACV,0DAA0D,mBAAmB;AAAA,MAC/E;AAAA,IACF;AAAA,EACF;AACF;AAEO,SAAS,iBAAiB,KAA+B;AAC9D,QAAM,iBAAiB,IAAI,QAAQ,GAAG,IAAY,uBAAuB,KAAK;AAE9E,aAAW,aAAa,sBAAsB;AAC5C,QAAI,UAAU,WAAW,eAAgB;AAEzC,QAAI,QAAQ,gBAAgB,MAAM;AAChC,gBAAU,GAAG,IAAI,OAAO;AACxB,UAAI,QAAQ,GAAG,IAAI,yBAAyB,UAAU,OAAO;AAAA,IAC/D,CAAC;AAAA,EACH;AACF;;;AClCA,SAAS,QACP,SACA,KACA,aAAiC,CAAC,GACvB;AACX,SAAO,QAAQ,IAAI,KAAK,KAAK,GAAG,UAAU,EAAE,QAAQ;AACtD;AAEA,SAAS,aAAa,SAA0B;AAC9C,SAAO,KAAK,MAAM,OAAO;AAC3B;AAEO,SAAS,eACd,KAC6B;AAC7B,SAAO;AAAA,IACL,IAAI,IAAI;AAAA,IACR,MAAM,IAAI;AAAA,IACV,QAAQ,IAAI;AAAA,IACZ,SAAS,aAAa,IAAI,OAAO;AAAA,IACjC,aAAa,IAAI;AAAA,IACjB,WAAW,IAAI;AAAA,IACf,YAAY,IAAI;AAAA,IAChB,cAAc,IAAI;AAAA,IAClB,YAAY,IAAI;AAAA,IAChB,YAAY,IAAI;AAAA,IAChB,WAAW,IAAI;AAAA,IACf,WAAW,IAAI;AAAA,EACjB;AACF;AAEA,SAAS,yBACP,KACuC;AACvC,SAAO;AAAA,IACL,IAAI,IAAI;AAAA,IACR,MAAM,IAAI;AAAA,IACV,WAAW,IAAI;AAAA,IACf,SAAS,aAAa,IAAI,OAAO;AAAA,IACjC,YAAY,IAAI;AAAA,IAChB,WAAW,IAAI;AAAA,IACf,QAAQ,IAAI;AAAA,IACZ,WAAW,IAAI;AAAA,IACf,WAAW,IAAI;AAAA,IACf,WAAW,IAAI;AAAA,EACjB;AACF;AAEO,SAAS,gBAAwB;AAAA,EACtC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAKiC;AAC/B,QAAM,MAAM,KAAK,IAAI;AACrB,QAAM,MAAc;AAAA,IAClB,IAAI,OAAO,WAAW;AAAA,IACtB;AAAA,IACA,QAAQ;AAAA,IACR,SAAS,KAAK,UAAU,KAAK;AAAA,IAC7B,cAAc;AAAA,IACd,YAAY;AAAA,IACZ,aAAa;AAAA,IACb,eAAe;AAAA,IACf,aAAa;AAAA,IACb,aAAa;AAAA,IACb,YAAY;AAAA,IACZ,YAAY;AAAA,EACd;AAEA,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,gBAAgB,UAAU;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAc1B;AAAA,QACE,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,QACJ,IAAI;AAAA,MACN;AAAA,IACF;AAAA,EACF,CAAC;AAED,SAAO,eAA+B,GAAG;AAC3C;AAEO,SAAS,uBAA+B;AAAA,EAC7C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAO2C;AACzC,QAAM,MAAM,KAAK,IAAI;AACrB,QAAM,aAAa,OAAO,WAAW;AACrC,QAAM,UAAU,KAAK,UAAU,KAAK;AAEpC,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,gBAAgB,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAkBnC,CAAC,YAAY,MAAM,WAAW,SAAS,SAAS,SAAS,UAAU,KAAK,KAAK,IAAI;AAAA,IACnF;AAAA,EACF,CAAC;AAED,QAAM,CAAC,GAAG,IAAI;AAAA,IACZ,IAAI;AAAA,IACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YAWQ,mBAAmB;AAAA;AAAA;AAAA,IAG3B,CAAC,MAAM,SAAS;AAAA,EAClB;AAEA,MAAI,CAAC,KAAK;AACR,UAAM,IAAI,MAAM,2CAA2C,IAAI,GAAG;AAAA,EACpE;AAEA,SAAO,yBAAyC,GAAG;AACrD;AAEO,SAAS,uBAAuB;AAAA,EACrC;AAAA,EACA;AAAA,EACA;AACF,GAIY;AACV,QAAM,CAAC,QAAQ,IAAI;AAAA,IACjB,IAAI;AAAA,IACJ,qBAAqB,mBAAmB;AAAA;AAAA;AAAA,IAGxC,CAAC,MAAM,SAAS;AAAA,EAClB;AAEA,MAAI,CAAC,UAAU;AACb,WAAO;AAAA,EACT;AAEA,QAAM,MAAM,KAAK,IAAI;AACrB,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,WAAW,mBAAmB;AAAA;AAAA;AAAA,MAG9B,CAAC,KAAK,SAAS,EAAE;AAAA,IACnB;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAEO,SAAS,wBAAwB,KAAyB,KAAqB;AACpF,MAAI,eAAe;AAEnB,MAAI,QAAQ,gBAAgB,MAAM;AAChC,UAAM,eAAe;AAAA,MACnB,IAAI;AAAA,MACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,cAWQ,mBAAmB;AAAA;AAAA;AAAA,MAG3B,CAAC,GAAG;AAAA,IACN;AAEA,eAAW,YAAY,cAAc;AACnC,YAAM,QAAQ,OAAO,WAAW;AAChC;AAAA,QACE,IAAI;AAAA,QACJ,gBAAgB,UAAU;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAc1B;AAAA,UACE;AAAA,UACA,SAAS;AAAA,UACT;AAAA,UACA,SAAS;AAAA,UACT,SAAS;AAAA,UACT;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA,SAAS;AAAA,UACT;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAEA;AAAA,QACE,IAAI;AAAA,QACJ,WAAW,mBAAmB;AAAA;AAAA;AAAA,QAG9B,CAAC,KAAK,MAAM,SAAS,aAAa,KAAK,SAAS,EAAE;AAAA,MACpD;AAEA,sBAAgB;AAAA,IAClB;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAEO,SAAS,iBAAiB,KAAyB,KAAa,OAAyB;AAC9F,SAAO;AAAA,IACL,IAAI;AAAA,IACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,YAaQ,UAAU;AAAA;AAAA;AAAA;AAAA,IAIlB,CAAC,KAAK,KAAK;AAAA,EACb;AACF;AAEO,SAAS,eAAe,KAAyB,OAAe,WAAyB;AAC9F,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,WAAW,UAAU;AAAA;AAAA;AAAA,MAGrB,CAAC,WAAW,WAAW,KAAK;AAAA,IAC9B;AAAA,EACF,CAAC;AACH;AAEO,SAAS,iBAAiB,KAAyB,OAAe,YAA0B;AACjG,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,WAAW,UAAU;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQrB,CAAC,YAAY,YAAY,KAAK;AAAA,IAChC;AAAA,EACF,CAAC;AACH;AAEA,SAAS,eAAe,OAA2D;AACjF,MAAI,iBAAiB,OAAO;AAC1B,WAAO;AAAA,MACL,SAAS,MAAM;AAAA,MACf,OAAO,MAAM,SAAS;AAAA,IACxB;AAAA,EACF;AAEA,SAAO;AAAA,IACL,SAAS,OAAO,KAAK;AAAA,IACrB,OAAO;AAAA,EACT;AACF;AAEO,SAAS,cAAc,KAAyB,OAAe,YAAoB,OAAsB;AAC9G,QAAM,UAAU,eAAe,KAAK;AAEpC,MAAI,QAAQ,gBAAgB,MAAM;AAChC;AAAA,MACE,IAAI;AAAA,MACJ,WAAW,UAAU;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAQrB,CAAC,YAAY,YAAY,QAAQ,SAAS,QAAQ,OAAO,KAAK;AAAA,IAChE;AAAA,EACF,CAAC;AACH;AAEA,eAAsB,mBAAmB,KAAiD;AACxF,QAAM,CAAC,MAAM,IAAI;AAAA,IACf,IAAI;AAAA,IACJ;AAAA,YACQ,UAAU;AAAA;AAAA,EAEpB;AACA,QAAM,CAAC,WAAW,IAAI;AAAA,IACpB,IAAI;AAAA,IACJ;AAAA,YACQ,mBAAmB;AAAA;AAAA,EAE7B;AAEA,QAAM,YAAY,QAAQ,WAAW;AACrC,QAAM,iBAAiB,aAAa,WAAW;AAE/C,MAAI,cAA6B;AACjC,MAAI,cAAc,QAAQ,mBAAmB,MAAM;AACjD,kBAAc,KAAK,IAAI,WAAW,cAAc;AAAA,EAClD,WAAW,cAAc,MAAM;AAC7B,kBAAc;AAAA,EAChB,WAAW,mBAAmB,MAAM;AAClC,kBAAc;AAAA,EAChB;AAEA,MAAI,gBAAgB,MAAM;AACxB,UAAM,IAAI,QAAQ,YAAY;AAC9B,WAAO;AAAA,EACT;AAEA,QAAM,IAAI,QAAQ,SAAS,WAAW;AACtC,SAAO;AACT;;;AC7ZA,SAAS,eAAe,KAAiC;AACvD,QAAM,WAAY,IAAoB,sBAAsB;AAC5D,MAAI,CAAC,UAAU;AACb,UAAM,IAAI,MAAM,gBAAgB,IAAI,IAAI,oEAAoE;AAAA,EAC9G;AAEA,SAAO;AACT;AAEA,SAAS,wBAAwB,iBAAiC;AAChE,MAAI,CAAC,OAAO,SAAS,eAAe,KAAK,CAAC,OAAO,UAAU,eAAe,KAAK,kBAAkB,GAAG;AAClG,UAAM,IAAI,MAAM,yDAAyD;AAAA,EAC3E;AACA,SAAO;AACT;AAEA,SAAS,qBAAqB,YAAsC,KAAiC;AACnG,QAAM,aAAa,WAAW,IAAI,IAAI,IAAI;AAC1C,MAAI,CAAC,YAAY;AACf,UAAM,IAAI,MAAM,aAAa,IAAI,IAAI,6DAA6D;AAAA,EACpG;AACA,SAAO;AACT;AAEA,SAAS,mBAAmB,OAAe,OAAuB;AAChE,MAAI,CAAC,OAAO,SAAS,KAAK,GAAG;AAC3B,UAAM,IAAI,MAAM,WAAW,KAAK,gDAAgD;AAAA,EAClF;AACA,SAAO,KAAK,MAAM,KAAK;AACzB;AAEA,SAAS,oBAAoB,SAAyB;AACpD,MAAI,CAAC,OAAO,SAAS,OAAO,KAAK,CAAC,OAAO,UAAU,OAAO,KAAK,UAAU,GAAG;AAC1E,UAAM,IAAI,MAAM,wEAAwE;AAAA,EAC1F;AACA,SAAO;AACT;AAEA,eAAe,cACb,QACA,OACgD;AAChD,QAAM,SAAS,MAAM,OAAO,WAAW,EAAE,SAAS,KAAK;AACvD,MAAI,OAAO,QAAQ;AACjB,UAAM,eAAe,OAAO,OAAO,CAAC,GAAG;AACvC,UAAM,IAAI;AAAA,MACR,eAAe,oBAAoB,YAAY,KAAK;AAAA,IACtD;AAAA,EACF;AAEA,SAAO,OAAO;AAChB;AAEA,eAAe,uBACb,QACA,OACe;AACf,MAAI;AACJ,MAAI;AACF,iBAAa,KAAK,UAAU,KAAK;AAAA,EACnC,SAAS,OAAO;AACd,UAAM,IAAI,MAAM,8EAA8E,OAAO,KAAK,CAAC,EAAE;AAAA,EAC/G;AAEA,MAAI,eAAe,QAAW;AAC5B,UAAM,IAAI,MAAM,sDAAsD;AAAA,EACxE;AAEA,QAAM,eAAwB,KAAK,MAAM,UAAU;AACnD,QAAM,SAAS,MAAM,OAAO,WAAW,EAAE,SAAS,YAAY;AAC9D,MAAI,OAAO,QAAQ;AACjB,UAAM,IAAI,MAAM,6FAA6F;AAAA,EAC/G;AACF;AAEA,SAAS,kBAAkB,WAAyB;AAClD,MAAI,CAAC,aAAa,UAAU,KAAK,EAAE,WAAW,GAAG;AAC/C,UAAM,IAAI,MAAM,mDAAmD;AAAA,EACrE;AACF;AAEA,eAAsB,UACpB,SACqB;AACrB,QAAM,kBAAkB,wBAAwB,QAAQ,mBAAmB,EAAE;AAC7E,QAAM,aAAa,oBAAI,IAAyB;AAEhD,aAAW,OAAO,QAAQ,MAAM;AAC9B,UAAM,cAAc,eAAe,GAAG;AACtC,QAAI,WAAW,IAAI,IAAI,IAAI,GAAG;AAC5B,YAAM,IAAI,MAAM,uBAAuB,IAAI,IAAI,qBAAqB;AAAA,IACtE;AACA,eAAW,IAAI,IAAI,MAAM,WAAW;AAAA,EACtC;AAEA,mBAAiB,QAAQ,GAAG;AAC5B,QAAM,mBAAmB,QAAQ,GAAG;AAEpC,QAAM,UAAU,YAAmC;AACjD,UAAM,MAAM,KAAK,IAAI;AACrB,4BAAwB,QAAQ,KAAK,GAAG;AAExC,QAAI,gBAAgB;AAEpB,WAAO,gBAAgB,iBAAiB;AACtC,YAAM,YAAY,kBAAkB;AACpC,YAAM,UAAU,iBAAiB,QAAQ,KAAK,KAAK,IAAI,GAAG,SAAS;AAEnE,UAAI,QAAQ,WAAW,GAAG;AACxB;AAAA,MACF;AAEA,iBAAW,UAAU,SAAS;AAC5B,YAAI,iBAAiB,iBAAiB;AACpC;AAAA,QACF;AAEA,cAAM,cAAc,WAAW,IAAI,OAAO,IAAI;AAC9C,cAAM,YAAY,KAAK,IAAI;AAC3B,uBAAe,QAAQ,KAAK,OAAO,IAAI,SAAS;AAEhD,YAAI;AACF,cAAI,CAAC,aAAa;AAChB,kBAAM,IAAI,MAAM,uCAAuC,OAAO,IAAI,IAAI;AAAA,UACxE;AAEA,gBAAM,eAAe,eAAe,MAAM;AAC1C,gBAAM,SAAS,MAAM,YAAY,sBAAsB,EAAE,OAAO,WAAW,EAAE,SAAS,aAAa,OAAO;AAC1G,cAAI,OAAO,QAAQ;AACjB,kBAAM,IAAI,MAAM,2CAA2C,OAAO,IAAI,IAAI;AAAA,UAC5E;AAEA,gBAAM,QAAQ,OAAO;AACrB,gBAAM,gBAAgB;AAAA,YACpB,GAAG;AAAA,YACH,QAAQ;AAAA,YACR,SAAS;AAAA,YACT;AAAA,YACA,WAAW;AAAA,UACb;AAEA,gBAAM,YAAY,sBAAsB,EAAE,QAAQ;AAAA,YAChD;AAAA,YACA,SAAS,QAAQ;AAAA,YACjB,KAAK;AAAA,UACP,CAAC;AAED,2BAAiB,QAAQ,KAAK,OAAO,IAAI,KAAK,IAAI,CAAC;AAAA,QACrD,SAAS,OAAO;AACd,wBAAc,QAAQ,KAAK,OAAO,IAAI,KAAK,IAAI,GAAG,KAAK;AAAA,QACzD;AAEA,yBAAiB;AAAA,MACnB;AAAA,IACF;AAEA,UAAM,cAAc,MAAM,mBAAmB,QAAQ,GAAG;AACxD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,cAAc,YAAY,mBAAmB,QAAQ,GAAG;AAAA,IAExD,WAAW,OAAO,KAAK,oBAAoB;AACzC,YAAM,aAAa,qBAAqB,YAAY,GAAG;AACvD,YAAM,SAAS,WAAW,sBAAsB,EAAE;AAClD,YAAM,KAAK,mBAAmB,gBAAgB,IAAI,MAAM;AACxD,YAAM,QAAQ,MAAM,cAAc,QAAQ,gBAAgB,KAAK;AAC/D,YAAM,uBAAuB,QAAQ,KAAK;AAE1C,YAAM,SAAS,gBAAgB;AAAA,QAC7B,KAAK,QAAQ;AAAA,QACb,MAAM,IAAI;AAAA,QACV;AAAA,QACA;AAAA,MACF,CAAC;AAED,YAAM,mBAAmB,QAAQ,GAAG;AACpC,aAAO;AAAA,IACT;AAAA,IAEA,mBAAmB,OAAO,KAAK,oBAAoB;AACjD,YAAM,aAAa,qBAAqB,YAAY,GAAG;AACvD,YAAM,SAAS,WAAW,sBAAsB,EAAE;AAClD,wBAAkB,gBAAgB,SAAS;AAC3C,YAAM,UAAU,oBAAoB,gBAAgB,OAAO;AAC3D,YAAM,UAAU,mBAAmB,gBAAgB,WAAW,KAAK,IAAI,IAAI,SAAS,WAAW;AAC/F,YAAM,QAAQ,MAAM,cAAc,QAAQ,gBAAgB,KAAK;AAC/D,YAAM,uBAAuB,QAAQ,KAAK;AAE1C,YAAM,SAAS,uBAAuB;AAAA,QACpC,KAAK,QAAQ;AAAA,QACb,MAAM,IAAI;AAAA,QACV,WAAW,gBAAgB;AAAA,QAC3B;AAAA,QACA;AAAA,QACA;AAAA,MACF,CAAC;AAED,YAAM,mBAAmB,QAAQ,GAAG;AACpC,aAAO;AAAA,IACT;AAAA,IAEA,iBAAiB,OAAO,KAAK,kBAAkB;AAC7C,2BAAqB,YAAY,GAAG;AACpC,wBAAkB,cAAc,SAAS;AAEzC,YAAM,YAAY,uBAAuB;AAAA,QACvC,KAAK,QAAQ;AAAA,QACb,MAAM,IAAI;AAAA,QACV,WAAW,cAAc;AAAA,MAC3B,CAAC;AAED,YAAM,mBAAmB,QAAQ,GAAG;AACpC,aAAO;AAAA,IACT;AAAA,EACF;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@sqlite-sync/cloudflare",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"description": "Cloudflare utilities for @sqlite-sync/core",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "https://github.com/krolebord/sqlite-sync.git",
|
|
10
|
+
"directory": "packages/cloudflare"
|
|
11
|
+
},
|
|
12
|
+
"keywords": [
|
|
13
|
+
"sqlite",
|
|
14
|
+
"sync",
|
|
15
|
+
"cloudflare",
|
|
16
|
+
"workers",
|
|
17
|
+
"durable-objects",
|
|
18
|
+
"kysely"
|
|
19
|
+
],
|
|
20
|
+
"main": "./dist/index.js",
|
|
21
|
+
"module": "./dist/index.js",
|
|
22
|
+
"types": "./dist/index.d.ts",
|
|
23
|
+
"exports": {
|
|
24
|
+
".": {
|
|
25
|
+
"@sqlite-sync/source": "./src/index.ts",
|
|
26
|
+
"workerd": "./src/index.ts",
|
|
27
|
+
"types": "./dist/index.d.ts",
|
|
28
|
+
"import": "./dist/index.js"
|
|
29
|
+
},
|
|
30
|
+
"./jobs": {
|
|
31
|
+
"@sqlite-sync/source": "./src/jobs/index.ts",
|
|
32
|
+
"workerd": "./src/jobs/index.ts",
|
|
33
|
+
"types": "./dist/jobs.d.ts",
|
|
34
|
+
"import": "./dist/jobs.js"
|
|
35
|
+
}
|
|
36
|
+
},
|
|
37
|
+
"files": [
|
|
38
|
+
"dist"
|
|
39
|
+
],
|
|
40
|
+
"dependencies": {
|
|
41
|
+
"@standard-schema/spec": "^1.1.0",
|
|
42
|
+
"@sqlite-sync/core": "0.0.2"
|
|
43
|
+
},
|
|
44
|
+
"peerDependencies": {
|
|
45
|
+
"@cloudflare/workers-types": "^4.0.0",
|
|
46
|
+
"kysely": "^0.28.0"
|
|
47
|
+
},
|
|
48
|
+
"devDependencies": {
|
|
49
|
+
"@cloudflare/workers-types": "^4.20260124.0",
|
|
50
|
+
"kysely": "^0.28.10",
|
|
51
|
+
"tsup": "^8.3.5",
|
|
52
|
+
"typescript": "~5.9.3"
|
|
53
|
+
},
|
|
54
|
+
"scripts": {
|
|
55
|
+
"build": "tsup",
|
|
56
|
+
"dev": "tsup --watch",
|
|
57
|
+
"typecheck": "tsc --noEmit"
|
|
58
|
+
}
|
|
59
|
+
}
|