@tanstack/powersync-db-collection 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,71 @@
1
+ {
2
+ "name": "@tanstack/powersync-db-collection",
3
+ "description": "PowerSync collection for TanStack DB",
4
+ "version": "0.0.0",
5
+ "dependencies": {
6
+ "@standard-schema/spec": "^1.0.0",
7
+ "@tanstack/db": "workspace:*",
8
+ "@tanstack/store": "^0.8.0",
9
+ "debug": "^4.4.3",
10
+ "p-defer": "^4.0.1"
11
+ },
12
+ "peerDependencies": {
13
+ "@powersync/common": "^1.41.0"
14
+ },
15
+ "devDependencies": {
16
+ "@powersync/common": "^1.41.0",
17
+ "@powersync/node": "^0.13.0",
18
+ "@types/debug": "^4.1.12",
19
+ "@vitest/coverage-istanbul": "^3.2.4"
20
+ },
21
+ "exports": {
22
+ ".": {
23
+ "import": {
24
+ "types": "./dist/esm/index.d.ts",
25
+ "default": "./dist/esm/index.js"
26
+ },
27
+ "require": {
28
+ "types": "./dist/cjs/index.d.cts",
29
+ "default": "./dist/cjs/index.cjs"
30
+ }
31
+ },
32
+ "./package.json": "./package.json"
33
+ },
34
+ "files": [
35
+ "dist",
36
+ "src"
37
+ ],
38
+ "main": "dist/cjs/index.cjs",
39
+ "module": "dist/esm/index.js",
40
+ "packageManager": "pnpm@10.17.0",
41
+ "author": "POWERSYNC",
42
+ "license": "MIT",
43
+ "repository": {
44
+ "type": "git",
45
+ "url": "https://github.com/TanStack/db.git",
46
+ "directory": "packages/powersync-db-collection"
47
+ },
48
+ "homepage": "https://tanstack.com/db",
49
+ "keywords": [
50
+ "powersync",
51
+ "realtime",
52
+ "local-first",
53
+ "sync-engine",
54
+ "sync",
55
+ "replication",
56
+ "opfs",
57
+ "indexeddb",
58
+ "localstorage",
59
+ "optimistic",
60
+ "typescript"
61
+ ],
62
+ "scripts": {
63
+ "build": "vite build",
64
+ "dev": "vite build --watch",
65
+ "lint": "eslint . --fix",
66
+ "test": "npx vitest --run"
67
+ },
68
+ "sideEffects": false,
69
+ "type": "module",
70
+ "types": "dist/esm/index.d.ts"
71
+ }
@@ -0,0 +1,54 @@
1
+ import pDefer from "p-defer"
2
+ import type { DiffTriggerOperation } from "@powersync/common"
3
+ import type { DeferredPromise } from "p-defer"
4
+
5
+ export type PendingOperation = {
6
+ tableName: string
7
+ operation: DiffTriggerOperation
8
+ id: string
9
+ timestamp: string
10
+ }
11
+
12
+ /**
13
+ * Optimistic mutations have their optimistic state discarded once transactions have
14
+ * been applied.
15
+ * We need to ensure that an applied transaction has been observed by the sync diff trigger
16
+ * before resolving the transaction application call.
17
+ * This store allows registering a wait for a pending operation to have been observed.
18
+ */
19
+ export class PendingOperationStore {
20
+ private pendingOperations = new Map<PendingOperation, DeferredPromise<void>>()
21
+
22
+ /**
23
+ * Globally accessible PendingOperationStore
24
+ */
25
+ static GLOBAL = new PendingOperationStore()
26
+
27
+ /**
28
+ * @returns A promise which will resolve once the specified operation has been seen.
29
+ */
30
+ waitFor(operation: PendingOperation): Promise<void> {
31
+ const managedPromise = pDefer<void>()
32
+ this.pendingOperations.set(operation, managedPromise)
33
+ return managedPromise.promise
34
+ }
35
+
36
+ /**
37
+ * Marks a set of operations as seen. This will resolve any pending promises.
38
+ */
39
+ resolvePendingFor(operations: Array<PendingOperation>) {
40
+ for (const operation of operations) {
41
+ for (const [pendingOp, deferred] of this.pendingOperations.entries()) {
42
+ if (
43
+ pendingOp.tableName == operation.tableName &&
44
+ pendingOp.operation == operation.operation &&
45
+ pendingOp.id == operation.id &&
46
+ pendingOp.timestamp == operation.timestamp
47
+ ) {
48
+ deferred.resolve()
49
+ this.pendingOperations.delete(pendingOp)
50
+ }
51
+ }
52
+ }
53
+ }
54
+ }
@@ -0,0 +1,271 @@
1
+ import { sanitizeSQL } from "@powersync/common"
2
+ import DebugModule from "debug"
3
+ import { asPowerSyncRecord, mapOperationToPowerSync } from "./helpers"
4
+ import { PendingOperationStore } from "./PendingOperationStore"
5
+ import type { AbstractPowerSyncDatabase, LockContext } from "@powersync/common"
6
+ import type { PendingMutation, Transaction } from "@tanstack/db"
7
+ import type { EnhancedPowerSyncCollectionConfig } from "./definitions"
8
+ import type { PendingOperation } from "./PendingOperationStore"
9
+
10
+ const debug = DebugModule.debug(`ts/db:powersync`)
11
+
12
+ export type TransactorOptions = {
13
+ database: AbstractPowerSyncDatabase
14
+ }
15
+
16
+ /**
17
+ * Applies mutations to the PowerSync database. This method is called automatically by the collection's
18
+ * insert, update, and delete operations. You typically don't need to call this directly unless you
19
+ * have special transaction requirements.
20
+ *
21
+ * @example
22
+ * ```typescript
23
+ * // Create a collection
24
+ * const collection = createCollection(
25
+ * powerSyncCollectionOptions<Document>({
26
+ * database: db,
27
+ * table: APP_SCHEMA.props.documents,
28
+ * })
29
+ * )
30
+ *
31
+ * const addTx = createTransaction({
32
+ * autoCommit: false,
33
+ * mutationFn: async ({ transaction }) => {
34
+ * await new PowerSyncTransactor({ database: db }).applyTransaction(transaction)
35
+ * },
36
+ * })
37
+ *
38
+ * addTx.mutate(() => {
39
+ * for (let i = 0; i < 5; i++) {
40
+ * collection.insert({ id: randomUUID(), name: `tx-${i}` })
41
+ * }
42
+ * })
43
+ *
44
+ * await addTx.commit()
45
+ * await addTx.isPersisted.promise
46
+ * ```
47
+ *
48
+ * @param transaction - The transaction containing mutations to apply
49
+ * @returns A promise that resolves when the mutations have been persisted to PowerSync
50
+ */
51
+ export class PowerSyncTransactor {
52
+ database: AbstractPowerSyncDatabase
53
+ pendingOperationStore: PendingOperationStore
54
+
55
+ constructor(options: TransactorOptions) {
56
+ this.database = options.database
57
+ this.pendingOperationStore = PendingOperationStore.GLOBAL
58
+ }
59
+
60
+ /**
61
+ * Persists a {@link Transaction} to the PowerSync SQLite database.
62
+ */
63
+ async applyTransaction(transaction: Transaction<any>) {
64
+ const { mutations } = transaction
65
+
66
+ if (mutations.length == 0) {
67
+ return
68
+ }
69
+ /**
70
+ * The transaction might contain operations for different collections.
71
+ * We can do some optimizations for single-collection transactions.
72
+ */
73
+ const mutationsCollectionIds = mutations.map(
74
+ (mutation) => mutation.collection.id
75
+ )
76
+ const collectionIds = Array.from(new Set(mutationsCollectionIds))
77
+ const lastCollectionMutationIndexes = new Map<string, number>()
78
+ const allCollections = collectionIds
79
+ .map((id) => mutations.find((mutation) => mutation.collection.id == id)!)
80
+ .map((mutation) => mutation.collection)
81
+ for (const collectionId of collectionIds) {
82
+ lastCollectionMutationIndexes.set(
83
+ collectionId,
84
+ mutationsCollectionIds.lastIndexOf(collectionId)
85
+ )
86
+ }
87
+
88
+ // Check all the observers are ready before taking a lock
89
+ await Promise.all(
90
+ allCollections.map(async (collection) => {
91
+ if (collection.isReady()) {
92
+ return
93
+ }
94
+ await new Promise<void>((resolve) => collection.onFirstReady(resolve))
95
+ })
96
+ )
97
+
98
+ // Persist to PowerSync
99
+ const { whenComplete } = await this.database.writeTransaction(
100
+ async (tx) => {
101
+ const pendingOperations: Array<PendingOperation | null> = []
102
+
103
+ for (const [index, mutation] of mutations.entries()) {
104
+ /**
105
+ * Each collection processes events independently. We need to make sure the
106
+ * last operation for each collection has been observed.
107
+ */
108
+ const shouldWait =
109
+ index == lastCollectionMutationIndexes.get(mutation.collection.id)
110
+ switch (mutation.type) {
111
+ case `insert`:
112
+ pendingOperations.push(
113
+ await this.handleInsert(mutation, tx, shouldWait)
114
+ )
115
+ break
116
+ case `update`:
117
+ pendingOperations.push(
118
+ await this.handleUpdate(mutation, tx, shouldWait)
119
+ )
120
+ break
121
+ case `delete`:
122
+ pendingOperations.push(
123
+ await this.handleDelete(mutation, tx, shouldWait)
124
+ )
125
+ break
126
+ }
127
+ }
128
+
129
+ /**
130
+ * Return a promise from the writeTransaction, without awaiting it.
131
+ * This promise will resolve once the entire transaction has been
132
+ * observed via the diff triggers.
133
+ * We return without awaiting in order to free the write lock.
134
+ */
135
+ return {
136
+ whenComplete: Promise.all(
137
+ pendingOperations
138
+ .filter((op) => !!op)
139
+ .map((op) => this.pendingOperationStore.waitFor(op))
140
+ ),
141
+ }
142
+ }
143
+ )
144
+
145
+ // Wait for the change to be observed via the diff trigger
146
+ await whenComplete
147
+ }
148
+
149
+ protected async handleInsert(
150
+ mutation: PendingMutation<any>,
151
+ context: LockContext,
152
+ waitForCompletion: boolean = false
153
+ ): Promise<PendingOperation | null> {
154
+ debug(`insert`, mutation)
155
+
156
+ return this.handleOperationWithCompletion(
157
+ mutation,
158
+ context,
159
+ waitForCompletion,
160
+ async (tableName, mutation, serializeValue) => {
161
+ const values = serializeValue(mutation.modified)
162
+ const keys = Object.keys(values).map((key) => sanitizeSQL`${key}`)
163
+
164
+ await context.execute(
165
+ `
166
+ INSERT into ${tableName}
167
+ (${keys.join(`, `)})
168
+ VALUES
169
+ (${keys.map((_) => `?`).join(`, `)})
170
+ `,
171
+ Object.values(values)
172
+ )
173
+ }
174
+ )
175
+ }
176
+
177
+ protected async handleUpdate(
178
+ mutation: PendingMutation<any>,
179
+ context: LockContext,
180
+ waitForCompletion: boolean = false
181
+ ): Promise<PendingOperation | null> {
182
+ debug(`update`, mutation)
183
+
184
+ return this.handleOperationWithCompletion(
185
+ mutation,
186
+ context,
187
+ waitForCompletion,
188
+ async (tableName, mutation, serializeValue) => {
189
+ const values = serializeValue(mutation.modified)
190
+ const keys = Object.keys(values).map((key) => sanitizeSQL`${key}`)
191
+
192
+ await context.execute(
193
+ `
194
+ UPDATE ${tableName}
195
+ SET ${keys.map((key) => `${key} = ?`).join(`, `)}
196
+ WHERE id = ?
197
+ `,
198
+ [...Object.values(values), asPowerSyncRecord(mutation.modified).id]
199
+ )
200
+ }
201
+ )
202
+ }
203
+
204
+ protected async handleDelete(
205
+ mutation: PendingMutation<any>,
206
+ context: LockContext,
207
+ waitForCompletion: boolean = false
208
+ ): Promise<PendingOperation | null> {
209
+ debug(`update`, mutation)
210
+
211
+ return this.handleOperationWithCompletion(
212
+ mutation,
213
+ context,
214
+ waitForCompletion,
215
+ async (tableName, mutation) => {
216
+ await context.execute(
217
+ `
218
+ DELETE FROM ${tableName} WHERE id = ?
219
+ `,
220
+ [asPowerSyncRecord(mutation.original).id]
221
+ )
222
+ }
223
+ )
224
+ }
225
+
226
+ /**
227
+ * Helper function which wraps a persistence operation by:
228
+ * - Fetching the mutation's collection's SQLite table details
229
+ * - Executing the mutation
230
+ * - Returning the last pending diff operation if required
231
+ */
232
+ protected async handleOperationWithCompletion(
233
+ mutation: PendingMutation<any>,
234
+ context: LockContext,
235
+ waitForCompletion: boolean,
236
+ handler: (
237
+ tableName: string,
238
+ mutation: PendingMutation<any>,
239
+ serializeValue: (value: any) => Record<string, unknown>
240
+ ) => Promise<void>
241
+ ): Promise<PendingOperation | null> {
242
+ if (
243
+ typeof (mutation.collection.config as any).utils?.getMeta != `function`
244
+ ) {
245
+ throw new Error(`Could not get tableName from mutation's collection config.
246
+ The provided mutation might not have originated from PowerSync.`)
247
+ }
248
+
249
+ const { tableName, trackedTableName, serializeValue } = (
250
+ mutation.collection
251
+ .config as unknown as EnhancedPowerSyncCollectionConfig<any>
252
+ ).utils.getMeta()
253
+
254
+ await handler(sanitizeSQL`${tableName}`, mutation, serializeValue)
255
+
256
+ if (!waitForCompletion) {
257
+ return null
258
+ }
259
+
260
+ // Need to get the operation in order to wait for it
261
+ const diffOperation = await context.get<{ id: string; timestamp: string }>(
262
+ sanitizeSQL`SELECT id, timestamp FROM ${trackedTableName} ORDER BY timestamp DESC LIMIT 1`
263
+ )
264
+ return {
265
+ tableName,
266
+ id: diffOperation.id,
267
+ operation: mapOperationToPowerSync(mutation.type),
268
+ timestamp: diffOperation.timestamp,
269
+ }
270
+ }
271
+ }
@@ -0,0 +1,274 @@
1
+ import type { AbstractPowerSyncDatabase, Table } from "@powersync/common"
2
+ import type { StandardSchemaV1 } from "@standard-schema/spec"
3
+ import type {
4
+ BaseCollectionConfig,
5
+ CollectionConfig,
6
+ InferSchemaOutput,
7
+ } from "@tanstack/db"
8
+ import type {
9
+ AnyTableColumnType,
10
+ ExtractedTable,
11
+ OptionalExtractedTable,
12
+ PowerSyncRecord,
13
+ } from "./helpers"
14
+
15
+ /**
16
+ * Small helper which determines the output type if:
17
+ * - Standard SQLite types are to be used OR
18
+ * - If the provided schema should be used.
19
+ */
20
+ export type InferPowerSyncOutputType<
21
+ TTable extends Table = Table,
22
+ TSchema extends StandardSchemaV1<PowerSyncRecord> = never,
23
+ > = TSchema extends never ? ExtractedTable<TTable> : InferSchemaOutput<TSchema>
24
+
25
+ /**
26
+ * A mapping type for custom serialization of object properties to SQLite-compatible values.
27
+ *
28
+ * This type allows you to override, for keys in the input object (`TOutput`), a function that transforms
29
+ * the value to the corresponding SQLite type (`TSQLite`). Keys not specified will use the default SQLite serialization.
30
+ *
31
+ * ## Generics
32
+ * - `TOutput`: The input object type, representing the row data to be serialized.
33
+ * - `TSQLite`: The target SQLite-compatible type for each property, typically inferred from the table schema.
34
+ *
35
+ * ## Usage
36
+ * Use this type to define a map of serialization functions for specific keys when you need custom handling
37
+ * (e.g., converting complex objects, formatting dates, or handling enums).
38
+ *
39
+ * Example:
40
+ * ```ts
41
+ * const serializer: CustomSQLiteSerializer<MyRowType, MySQLiteType> = {
42
+ * createdAt: (date) => date.toISOString(),
43
+ * status: (status) => status ? 1 : 0,
44
+ * meta: (meta) => JSON.stringify(meta),
45
+ * };
46
+ * ```
47
+ *
48
+ * ## Behavior
49
+ * - Each key maps to a function that receives the value and returns the SQLite-compatible value.
50
+ * - Used by `serializeForSQLite` to override default serialization for specific columns.
51
+ */
52
+ export type CustomSQLiteSerializer<
53
+ TOutput extends Record<string, unknown>,
54
+ TSQLite extends Record<string, unknown>,
55
+ > = Partial<{
56
+ [Key in keyof TOutput]: (
57
+ value: TOutput[Key]
58
+ ) => Key extends keyof TSQLite ? TSQLite[Key] : never
59
+ }>
60
+
61
+ export type SerializerConfig<
62
+ TOutput extends Record<string, unknown>,
63
+ TSQLite extends Record<string, unknown>,
64
+ > = {
65
+ /**
66
+ * Optional partial serializer object for customizing how individual columns are serialized for SQLite.
67
+ *
68
+ * This should be a partial map of column keys to serialization functions, following the
69
+ * {@link CustomSQLiteSerializer} type. Each function receives the column value and returns a value
70
+ * compatible with SQLite storage.
71
+ *
72
+ * If not provided for a column, the default behavior is used:
73
+ * - `TEXT`: Strings are stored as-is; Dates are converted to ISO strings; other types are JSON-stringified.
74
+ * - `INTEGER`/`REAL`: Numbers are stored as-is; booleans are mapped to 1/0.
75
+ *
76
+ * Use this option to override serialization for specific columns, such as formatting dates, handling enums,
77
+ * or serializing complex objects.
78
+ *
79
+ * Example:
80
+ * ```typescript
81
+ * serializer: {
82
+ * createdAt: (date) => date.getTime(), // Store as timestamp
83
+ * meta: (meta) => JSON.stringify(meta), // Custom object serialization
84
+ * }
85
+ * ```
86
+ */
87
+ serializer?: CustomSQLiteSerializer<TOutput, TSQLite>
88
+
89
+ /**
90
+ * Application logic should ensure that incoming synced data is always valid.
91
+ * Failing to deserialize and apply incoming changes results in data inconsistency - which is a fatal error.
92
+ * Use this callback to react to deserialization errors.
93
+ */
94
+ onDeserializationError: (error: StandardSchemaV1.FailureResult) => void
95
+ }
96
+
97
+ /**
98
+ * Config for when TInput and TOutput are both the SQLite types.
99
+ */
100
+ export type ConfigWithSQLiteTypes = {}
101
+
102
+ /**
103
+ * Config where TInput is the SQLite types while TOutput can be defined by TSchema.
104
+ * We can use the same schema to validate TInput and incoming SQLite changes.
105
+ */
106
+ export type ConfigWithSQLiteInputType<
107
+ TTable extends Table,
108
+ TSchema extends StandardSchemaV1<
109
+ // TInput is the SQLite types.
110
+ OptionalExtractedTable<TTable>,
111
+ AnyTableColumnType<TTable>
112
+ >,
113
+ > = SerializerConfig<
114
+ StandardSchemaV1.InferOutput<TSchema>,
115
+ ExtractedTable<TTable>
116
+ > & {
117
+ schema: TSchema
118
+ }
119
+
120
+ /**
121
+ * Config where TInput and TOutput have arbitrarily typed values.
122
+ * The keys of the types need to equal the SQLite types.
123
+ * Since TInput is not the SQLite types, we require a schema in order to deserialize incoming SQLite updates. The schema should validate from SQLite to TOutput.
124
+ */
125
+ export type ConfigWithArbitraryCollectionTypes<
126
+ TTable extends Table,
127
+ TSchema extends StandardSchemaV1<
128
+ // The input and output must have the same keys, the value types can be arbitrary
129
+ AnyTableColumnType<TTable>,
130
+ AnyTableColumnType<TTable>
131
+ >,
132
+ > = SerializerConfig<
133
+ StandardSchemaV1.InferOutput<TSchema>,
134
+ ExtractedTable<TTable>
135
+ > & {
136
+ schema: TSchema
137
+ /**
138
+ * Schema for deserializing and validating input data from the sync stream.
139
+ *
140
+ * This schema defines how to transform and validate data coming from SQLite types (as stored in the database)
141
+ * into the desired output types (`TOutput`) expected by your application or validation logic.
142
+ *
143
+ * The generic parameters allow for arbitrary input and output types, so you can specify custom conversion rules
144
+ * for each column. This is especially useful when your application expects richer types (e.g., Date, enums, objects)
145
+ * than what SQLite natively supports.
146
+ *
147
+ * Use this to ensure that incoming data from the sync stream is properly converted and validated before use.
148
+ *
149
+ * Example:
150
+ * ```typescript
151
+ * deserializationSchema: z.object({
152
+ * createdAt: z.preprocess((val) => new Date(val as string), z.date()),
153
+ * meta: z.preprocess((val) => JSON.parse(val as string), z.object({ ... })),
154
+ * })
155
+ * ```
156
+ *
157
+ * This enables robust type safety and validation for incoming data, bridging the gap between SQLite storage
158
+ * and your application's expected types.
159
+ */
160
+ deserializationSchema: StandardSchemaV1<
161
+ ExtractedTable<TTable>,
162
+ StandardSchemaV1.InferOutput<TSchema>
163
+ >
164
+ }
165
+ export type BasePowerSyncCollectionConfig<
166
+ TTable extends Table = Table,
167
+ TSchema extends StandardSchemaV1 = never,
168
+ > = Omit<
169
+ BaseCollectionConfig<ExtractedTable<TTable>, string, TSchema>,
170
+ `onInsert` | `onUpdate` | `onDelete` | `getKey`
171
+ > & {
172
+ /** The PowerSync schema Table definition */
173
+ table: TTable
174
+ /** The PowerSync database instance */
175
+ database: AbstractPowerSyncDatabase
176
+ /**
177
+ * The maximum number of documents to read from the SQLite table
178
+ * in a single batch during the initial sync between PowerSync and the
179
+ * in-memory TanStack DB collection.
180
+ *
181
+ * @remarks
182
+ * - Defaults to {@link DEFAULT_BATCH_SIZE} if not specified.
183
+ * - Larger values reduce the number of round trips to the storage
184
+ * engine but increase memory usage per batch.
185
+ * - Smaller values may lower memory usage and allow earlier
186
+ * streaming of initial results, at the cost of more query calls.
187
+ */
188
+ syncBatchSize?: number
189
+ }
190
+
191
+ /**
192
+ * Configuration interface for PowerSync collection options.
193
+ * @template TTable - The PowerSync table schema definition
194
+ * @template TSchema - The validation schema type
195
+ */
196
+ /**
197
+ * Configuration options for creating a PowerSync collection.
198
+ *
199
+ * @example
200
+ * ```typescript
201
+ * const APP_SCHEMA = new Schema({
202
+ * documents: new Table({
203
+ * name: column.text,
204
+ * }),
205
+ * })
206
+ *
207
+ * const db = new PowerSyncDatabase({
208
+ * database: {
209
+ * dbFilename: "test.sqlite",
210
+ * },
211
+ * schema: APP_SCHEMA,
212
+ * })
213
+ *
214
+ * const collection = createCollection(
215
+ * powerSyncCollectionOptions({
216
+ * database: db,
217
+ * table: APP_SCHEMA.props.documents
218
+ * })
219
+ * )
220
+ * ```
221
+ */
222
+ export type PowerSyncCollectionConfig<
223
+ TTable extends Table = Table,
224
+ TSchema extends StandardSchemaV1<any> = never,
225
+ > = BasePowerSyncCollectionConfig<TTable, TSchema> &
226
+ (
227
+ | ConfigWithSQLiteTypes
228
+ | ConfigWithSQLiteInputType<TTable, TSchema>
229
+ | ConfigWithArbitraryCollectionTypes<TTable, TSchema>
230
+ )
231
+
232
+ /**
233
+ * Metadata for the PowerSync Collection.
234
+ */
235
+ export type PowerSyncCollectionMeta<TTable extends Table = Table> = {
236
+ /**
237
+ * The SQLite table representing the collection.
238
+ */
239
+ tableName: string
240
+ /**
241
+ * The internal table used to track diffs for the collection.
242
+ */
243
+ trackedTableName: string
244
+
245
+ /**
246
+ * Serializes a collection value to the SQLite type
247
+ */
248
+ serializeValue: (value: any) => ExtractedTable<TTable>
249
+ }
250
+
251
+ /**
252
+ * A CollectionConfig which includes utilities for PowerSync.
253
+ */
254
+ export type EnhancedPowerSyncCollectionConfig<
255
+ TTable extends Table,
256
+ OutputType extends Record<string, unknown> = Record<string, unknown>,
257
+ TSchema extends StandardSchemaV1 = never,
258
+ > = CollectionConfig<OutputType, string, TSchema> & {
259
+ id?: string
260
+ utils: PowerSyncCollectionUtils<TTable>
261
+ schema?: TSchema
262
+ }
263
+
264
+ /**
265
+ * Collection-level utilities for PowerSync.
266
+ */
267
+ export type PowerSyncCollectionUtils<TTable extends Table = Table> = {
268
+ getMeta: () => PowerSyncCollectionMeta<TTable>
269
+ }
270
+
271
+ /**
272
+ * Default value for {@link PowerSyncCollectionConfig#syncBatchSize}.
273
+ */
274
+ export const DEFAULT_BATCH_SIZE = 1000