@mastra/libsql 0.13.4-alpha.0 → 0.13.5-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +1 -1
- package/CHANGELOG.md +23 -0
- package/dist/index.cjs +679 -8
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +680 -9
- package/dist/index.js.map +1 -1
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +34 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -0
- package/dist/storage/domains/operations/index.d.ts +50 -1
- package/dist/storage/domains/operations/index.d.ts.map +1 -1
- package/dist/storage/domains/utils.d.ts +45 -1
- package/dist/storage/domains/utils.d.ts.map +1 -1
- package/dist/storage/domains/workflows/index.d.ts +27 -2
- package/dist/storage/domains/workflows/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +42 -2
- package/dist/storage/index.d.ts.map +1 -1
- package/package.json +5 -5
- package/src/storage/domains/memory/index.ts +1 -1
- package/src/storage/domains/observability/index.ts +237 -0
- package/src/storage/domains/operations/index.ts +213 -3
- package/src/storage/domains/utils.ts +207 -2
- package/src/storage/domains/workflows/index.ts +225 -2
- package/src/storage/index.ts +74 -1
|
@@ -1,9 +1,14 @@
|
|
|
1
|
-
import type { Client } from '@libsql/client';
|
|
1
|
+
import type { Client, InValue } from '@libsql/client';
|
|
2
2
|
import { ErrorCategory, ErrorDomain, MastraError } from '@mastra/core/error';
|
|
3
|
-
import { TABLE_WORKFLOW_SNAPSHOT, StoreOperations } from '@mastra/core/storage';
|
|
3
|
+
import { TABLE_WORKFLOW_SNAPSHOT, StoreOperations, TABLE_AI_SPANS } from '@mastra/core/storage';
|
|
4
4
|
import type { StorageColumn, TABLE_NAMES } from '@mastra/core/storage';
|
|
5
5
|
import { parseSqlIdentifier } from '@mastra/core/utils';
|
|
6
|
-
import {
|
|
6
|
+
import {
|
|
7
|
+
createExecuteWriteOperationWithRetry,
|
|
8
|
+
prepareDeleteStatement,
|
|
9
|
+
prepareStatement,
|
|
10
|
+
prepareUpdateStatement,
|
|
11
|
+
} from '../utils';
|
|
7
12
|
|
|
8
13
|
export class StoreOperationsLibSQL extends StoreOperations {
|
|
9
14
|
private client: Client;
|
|
@@ -66,6 +71,14 @@ export class StoreOperationsLibSQL extends StoreOperations {
|
|
|
66
71
|
return stmnt;
|
|
67
72
|
}
|
|
68
73
|
|
|
74
|
+
if (tableName === TABLE_AI_SPANS) {
|
|
75
|
+
const stmnt = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
|
|
76
|
+
${columns.join(',\n')},
|
|
77
|
+
PRIMARY KEY (traceId, spanId)
|
|
78
|
+
)`;
|
|
79
|
+
return stmnt;
|
|
80
|
+
}
|
|
81
|
+
|
|
69
82
|
return `CREATE TABLE IF NOT EXISTS ${parsedTableName} (${columns.join(', ')})`;
|
|
70
83
|
}
|
|
71
84
|
|
|
@@ -164,6 +177,93 @@ export class StoreOperationsLibSQL extends StoreOperations {
|
|
|
164
177
|
return parsed as R;
|
|
165
178
|
}
|
|
166
179
|
|
|
180
|
+
async loadMany<R>({
|
|
181
|
+
tableName,
|
|
182
|
+
whereClause,
|
|
183
|
+
orderBy,
|
|
184
|
+
offset,
|
|
185
|
+
limit,
|
|
186
|
+
args,
|
|
187
|
+
}: {
|
|
188
|
+
tableName: TABLE_NAMES;
|
|
189
|
+
whereClause?: { sql: string; args: InValue[] };
|
|
190
|
+
orderBy?: string;
|
|
191
|
+
offset?: number;
|
|
192
|
+
limit?: number;
|
|
193
|
+
args?: any[];
|
|
194
|
+
}): Promise<R[]> {
|
|
195
|
+
const parsedTableName = parseSqlIdentifier(tableName, 'table name');
|
|
196
|
+
|
|
197
|
+
let statement = `SELECT * FROM ${parsedTableName}`;
|
|
198
|
+
|
|
199
|
+
if (whereClause?.sql) {
|
|
200
|
+
statement += `${whereClause.sql}`;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (orderBy) {
|
|
204
|
+
statement += ` ORDER BY ${orderBy}`;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if (limit) {
|
|
208
|
+
statement += ` LIMIT ${limit}`;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if (offset) {
|
|
212
|
+
statement += ` OFFSET ${offset}`;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
const result = await this.client.execute({
|
|
216
|
+
sql: statement,
|
|
217
|
+
args: [...(whereClause?.args ?? []), ...(args ?? [])],
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
return result.rows as R[];
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
async loadTotalCount({
|
|
224
|
+
tableName,
|
|
225
|
+
whereClause,
|
|
226
|
+
}: {
|
|
227
|
+
tableName: TABLE_NAMES;
|
|
228
|
+
whereClause?: { sql: string; args: InValue[] };
|
|
229
|
+
}): Promise<number> {
|
|
230
|
+
const parsedTableName = parseSqlIdentifier(tableName, 'table name');
|
|
231
|
+
|
|
232
|
+
const statement = `SELECT COUNT(*) as count FROM ${parsedTableName} ${whereClause ? `${whereClause.sql}` : ''}`;
|
|
233
|
+
|
|
234
|
+
const result = await this.client.execute({
|
|
235
|
+
sql: statement,
|
|
236
|
+
args: whereClause?.args ?? [],
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
if (!result.rows || result.rows.length === 0) {
|
|
240
|
+
return 0;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
return (result.rows[0]?.count as number) ?? 0;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
public update(args: { tableName: TABLE_NAMES; keys: Record<string, any>; data: Record<string, any> }): Promise<void> {
|
|
247
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
248
|
+
logger: this.logger,
|
|
249
|
+
maxRetries: this.maxRetries,
|
|
250
|
+
initialBackoffMs: this.initialBackoffMs,
|
|
251
|
+
});
|
|
252
|
+
return executeWriteOperationWithRetry(() => this.executeUpdate(args), `update table ${args.tableName}`);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
private async executeUpdate({
|
|
256
|
+
tableName,
|
|
257
|
+
keys,
|
|
258
|
+
data,
|
|
259
|
+
}: {
|
|
260
|
+
tableName: TABLE_NAMES;
|
|
261
|
+
keys: Record<string, any>;
|
|
262
|
+
data: Record<string, any>;
|
|
263
|
+
}): Promise<void> {
|
|
264
|
+
await this.client.execute(prepareUpdateStatement({ tableName, updates: data, keys }));
|
|
265
|
+
}
|
|
266
|
+
|
|
167
267
|
private async doBatchInsert({
|
|
168
268
|
tableName,
|
|
169
269
|
records,
|
|
@@ -201,6 +301,116 @@ export class StoreOperationsLibSQL extends StoreOperations {
|
|
|
201
301
|
});
|
|
202
302
|
}
|
|
203
303
|
|
|
304
|
+
/**
|
|
305
|
+
* Public batch update method with retry logic
|
|
306
|
+
*/
|
|
307
|
+
public batchUpdate(args: {
|
|
308
|
+
tableName: TABLE_NAMES;
|
|
309
|
+
updates: Array<{
|
|
310
|
+
keys: Record<string, any>;
|
|
311
|
+
data: Record<string, any>;
|
|
312
|
+
}>;
|
|
313
|
+
}): Promise<void> {
|
|
314
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
315
|
+
logger: this.logger,
|
|
316
|
+
maxRetries: this.maxRetries,
|
|
317
|
+
initialBackoffMs: this.initialBackoffMs,
|
|
318
|
+
});
|
|
319
|
+
|
|
320
|
+
return executeWriteOperationWithRetry(
|
|
321
|
+
() => this.executeBatchUpdate(args),
|
|
322
|
+
`batch update in table ${args.tableName}`,
|
|
323
|
+
).catch(error => {
|
|
324
|
+
throw new MastraError(
|
|
325
|
+
{
|
|
326
|
+
id: 'LIBSQL_STORE_BATCH_UPDATE_FAILED',
|
|
327
|
+
domain: ErrorDomain.STORAGE,
|
|
328
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
329
|
+
details: {
|
|
330
|
+
tableName: args.tableName,
|
|
331
|
+
},
|
|
332
|
+
},
|
|
333
|
+
error,
|
|
334
|
+
);
|
|
335
|
+
});
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Updates multiple records in batch. Each record can be updated based on single or composite keys.
|
|
340
|
+
*/
|
|
341
|
+
private async executeBatchUpdate({
|
|
342
|
+
tableName,
|
|
343
|
+
updates,
|
|
344
|
+
}: {
|
|
345
|
+
tableName: TABLE_NAMES;
|
|
346
|
+
updates: Array<{
|
|
347
|
+
keys: Record<string, any>;
|
|
348
|
+
data: Record<string, any>;
|
|
349
|
+
}>;
|
|
350
|
+
}): Promise<void> {
|
|
351
|
+
if (updates.length === 0) return;
|
|
352
|
+
|
|
353
|
+
const batchStatements = updates.map(({ keys, data }) =>
|
|
354
|
+
prepareUpdateStatement({
|
|
355
|
+
tableName,
|
|
356
|
+
updates: data,
|
|
357
|
+
keys,
|
|
358
|
+
}),
|
|
359
|
+
);
|
|
360
|
+
|
|
361
|
+
await this.client.batch(batchStatements, 'write');
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
/**
|
|
365
|
+
* Public batch delete method with retry logic
|
|
366
|
+
*/
|
|
367
|
+
public batchDelete({ tableName, keys }: { tableName: TABLE_NAMES; keys: Array<Record<string, any>> }): Promise<void> {
|
|
368
|
+
const executeWriteOperationWithRetry = createExecuteWriteOperationWithRetry({
|
|
369
|
+
logger: this.logger,
|
|
370
|
+
maxRetries: this.maxRetries,
|
|
371
|
+
initialBackoffMs: this.initialBackoffMs,
|
|
372
|
+
});
|
|
373
|
+
|
|
374
|
+
return executeWriteOperationWithRetry(
|
|
375
|
+
() => this.executeBatchDelete({ tableName, keys }),
|
|
376
|
+
`batch delete from table ${tableName}`,
|
|
377
|
+
).catch(error => {
|
|
378
|
+
throw new MastraError(
|
|
379
|
+
{
|
|
380
|
+
id: 'LIBSQL_STORE_BATCH_DELETE_FAILED',
|
|
381
|
+
domain: ErrorDomain.STORAGE,
|
|
382
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
383
|
+
details: {
|
|
384
|
+
tableName,
|
|
385
|
+
},
|
|
386
|
+
},
|
|
387
|
+
error,
|
|
388
|
+
);
|
|
389
|
+
});
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
/**
|
|
393
|
+
* Deletes multiple records in batch. Each record can be deleted based on single or composite keys.
|
|
394
|
+
*/
|
|
395
|
+
private async executeBatchDelete({
|
|
396
|
+
tableName,
|
|
397
|
+
keys,
|
|
398
|
+
}: {
|
|
399
|
+
tableName: TABLE_NAMES;
|
|
400
|
+
keys: Array<Record<string, any>>;
|
|
401
|
+
}): Promise<void> {
|
|
402
|
+
if (keys.length === 0) return;
|
|
403
|
+
|
|
404
|
+
const batchStatements = keys.map(keyObj =>
|
|
405
|
+
prepareDeleteStatement({
|
|
406
|
+
tableName,
|
|
407
|
+
keys: keyObj,
|
|
408
|
+
}),
|
|
409
|
+
);
|
|
410
|
+
|
|
411
|
+
await this.client.batch(batchStatements, 'write');
|
|
412
|
+
}
|
|
413
|
+
|
|
204
414
|
/**
|
|
205
415
|
* Alters table schema to add columns if they don't exist
|
|
206
416
|
* @param tableName Name of the table
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { InValue } from '@libsql/client';
|
|
2
2
|
import type { IMastraLogger } from '@mastra/core/logger';
|
|
3
|
-
import
|
|
3
|
+
import { safelyParseJSON, TABLE_SCHEMAS } from '@mastra/core/storage';
|
|
4
|
+
import type { PaginationArgs, StorageColumn, TABLE_NAMES } from '@mastra/core/storage';
|
|
4
5
|
import { parseSqlIdentifier } from '@mastra/core/utils';
|
|
5
6
|
|
|
6
7
|
export function createExecuteWriteOperationWithRetry({
|
|
@@ -49,7 +50,7 @@ export function prepareStatement({ tableName, record }: { tableName: TABLE_NAMES
|
|
|
49
50
|
const parsedTableName = parseSqlIdentifier(tableName, 'table name');
|
|
50
51
|
const columns = Object.keys(record).map(col => parseSqlIdentifier(col, 'column name'));
|
|
51
52
|
const values = Object.values(record).map(v => {
|
|
52
|
-
if (typeof v === `undefined`) {
|
|
53
|
+
if (typeof v === `undefined` || v === null) {
|
|
53
54
|
// returning an undefined value will cause libsql to throw
|
|
54
55
|
return null;
|
|
55
56
|
}
|
|
@@ -65,3 +66,207 @@ export function prepareStatement({ tableName, record }: { tableName: TABLE_NAMES
|
|
|
65
66
|
args: values,
|
|
66
67
|
};
|
|
67
68
|
}
|
|
69
|
+
|
|
70
|
+
export function prepareUpdateStatement({
|
|
71
|
+
tableName,
|
|
72
|
+
updates,
|
|
73
|
+
keys,
|
|
74
|
+
}: {
|
|
75
|
+
tableName: TABLE_NAMES;
|
|
76
|
+
updates: Record<string, any>;
|
|
77
|
+
keys: Record<string, any>;
|
|
78
|
+
}): {
|
|
79
|
+
sql: string;
|
|
80
|
+
args: InValue[];
|
|
81
|
+
} {
|
|
82
|
+
const parsedTableName = parseSqlIdentifier(tableName, 'table name');
|
|
83
|
+
const schema = TABLE_SCHEMAS[tableName];
|
|
84
|
+
|
|
85
|
+
// Prepare SET clause
|
|
86
|
+
const updateColumns = Object.keys(updates).map(col => parseSqlIdentifier(col, 'column name'));
|
|
87
|
+
const updateValues = Object.values(updates).map(transformToSqlValue);
|
|
88
|
+
const setClause = updateColumns.map(col => `${col} = ?`).join(', ');
|
|
89
|
+
|
|
90
|
+
const whereClause = prepareWhereClause(keys, schema);
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
sql: `UPDATE ${parsedTableName} SET ${setClause}${whereClause.sql}`,
|
|
94
|
+
args: [...updateValues, ...whereClause.args],
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
export function transformToSqlValue(value: any): InValue {
|
|
99
|
+
if (typeof value === 'undefined' || value === null) {
|
|
100
|
+
return null;
|
|
101
|
+
}
|
|
102
|
+
if (value instanceof Date) {
|
|
103
|
+
return value.toISOString();
|
|
104
|
+
}
|
|
105
|
+
return typeof value === 'object' ? JSON.stringify(value) : value;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
export function prepareDeleteStatement({ tableName, keys }: { tableName: TABLE_NAMES; keys: Record<string, any> }): {
|
|
109
|
+
sql: string;
|
|
110
|
+
args: InValue[];
|
|
111
|
+
} {
|
|
112
|
+
const parsedTableName = parseSqlIdentifier(tableName, 'table name');
|
|
113
|
+
const whereClause = prepareWhereClause(keys, TABLE_SCHEMAS[tableName]);
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
sql: `DELETE FROM ${parsedTableName}${whereClause.sql}`,
|
|
117
|
+
args: whereClause.args,
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
type WhereValue = InValue | { startAt?: InValue; endAt?: InValue };
|
|
122
|
+
|
|
123
|
+
export function prepareWhereClause(
|
|
124
|
+
filters: Record<string, WhereValue>,
|
|
125
|
+
schema: Record<string, StorageColumn>,
|
|
126
|
+
): {
|
|
127
|
+
sql: string;
|
|
128
|
+
args: InValue[];
|
|
129
|
+
} {
|
|
130
|
+
const conditions: string[] = [];
|
|
131
|
+
const args: InValue[] = [];
|
|
132
|
+
|
|
133
|
+
for (const [columnName, filterValue] of Object.entries(filters)) {
|
|
134
|
+
const column = schema[columnName];
|
|
135
|
+
if (!column) {
|
|
136
|
+
throw new Error(`Unknown column: ${columnName}`);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const parsedColumn = parseSqlIdentifier(columnName, 'column name');
|
|
140
|
+
const result = buildCondition(parsedColumn, filterValue);
|
|
141
|
+
|
|
142
|
+
conditions.push(result.condition);
|
|
143
|
+
args.push(...result.args);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return {
|
|
147
|
+
sql: conditions.length > 0 ? ` WHERE ${conditions.join(' AND ')}` : '',
|
|
148
|
+
args,
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
function buildCondition(columnName: string, filterValue: WhereValue): { condition: string; args: InValue[] } {
|
|
153
|
+
// Handle null values - IS NULL
|
|
154
|
+
if (filterValue === null) {
|
|
155
|
+
return { condition: `${columnName} IS NULL`, args: [] };
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Handle date range objects
|
|
159
|
+
if (typeof filterValue === 'object' && filterValue !== null && ('startAt' in filterValue || 'endAt' in filterValue)) {
|
|
160
|
+
return buildDateRangeCondition(columnName, filterValue);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Handle exact match
|
|
164
|
+
return {
|
|
165
|
+
condition: `${columnName} = ?`,
|
|
166
|
+
args: [transformToSqlValue(filterValue)],
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
function buildDateRangeCondition(
|
|
171
|
+
columnName: string,
|
|
172
|
+
range: { startAt?: InValue; endAt?: InValue },
|
|
173
|
+
): { condition: string; args: InValue[] } {
|
|
174
|
+
const conditions: string[] = [];
|
|
175
|
+
const args: InValue[] = [];
|
|
176
|
+
|
|
177
|
+
if (range.startAt !== undefined) {
|
|
178
|
+
conditions.push(`${columnName} >= ?`);
|
|
179
|
+
args.push(transformToSqlValue(range.startAt));
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (range.endAt !== undefined) {
|
|
183
|
+
conditions.push(`${columnName} <= ?`);
|
|
184
|
+
args.push(transformToSqlValue(range.endAt));
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
if (conditions.length === 0) {
|
|
188
|
+
throw new Error('Date range must specify at least startAt or endAt');
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
condition: conditions.join(' AND '),
|
|
193
|
+
args,
|
|
194
|
+
};
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
type DateRangeFilter = {
|
|
198
|
+
startAt?: string;
|
|
199
|
+
endAt?: string;
|
|
200
|
+
};
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Converts pagination date range to where clause date range format
|
|
204
|
+
* @param dateRange - The date range from pagination
|
|
205
|
+
* @param columnName - The timestamp column to filter on (defaults to 'createdAt')
|
|
206
|
+
* @returns Object with the date range filter, or empty object if no date range
|
|
207
|
+
*/
|
|
208
|
+
export function buildDateRangeFilter(
|
|
209
|
+
dateRange?: PaginationArgs['dateRange'],
|
|
210
|
+
columnName: string = 'createdAt',
|
|
211
|
+
): Record<string, DateRangeFilter> {
|
|
212
|
+
if (!dateRange?.start && !dateRange?.end) {
|
|
213
|
+
return {};
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
const filter: DateRangeFilter = {};
|
|
217
|
+
|
|
218
|
+
if (dateRange.start) {
|
|
219
|
+
filter.startAt = new Date(dateRange.start).toISOString();
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
if (dateRange.end) {
|
|
223
|
+
filter.endAt = new Date(dateRange.end).toISOString();
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
return { [columnName]: filter };
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Transforms SQL row data back to a typed object format
|
|
231
|
+
* Reverses the transformations done in prepareStatement
|
|
232
|
+
*/
|
|
233
|
+
export function transformFromSqlRow<T>({
|
|
234
|
+
tableName,
|
|
235
|
+
sqlRow,
|
|
236
|
+
}: {
|
|
237
|
+
tableName: TABLE_NAMES;
|
|
238
|
+
sqlRow: Record<string, any>;
|
|
239
|
+
}): T {
|
|
240
|
+
const result: Record<string, any> = {};
|
|
241
|
+
const jsonColumns = new Set(
|
|
242
|
+
Object.keys(TABLE_SCHEMAS[tableName])
|
|
243
|
+
.filter(key => TABLE_SCHEMAS[tableName][key]!.type === 'jsonb')
|
|
244
|
+
.map(key => key),
|
|
245
|
+
);
|
|
246
|
+
const dateColumns = new Set(
|
|
247
|
+
Object.keys(TABLE_SCHEMAS[tableName])
|
|
248
|
+
.filter(key => TABLE_SCHEMAS[tableName][key]!.type === 'timestamp')
|
|
249
|
+
.map(key => key),
|
|
250
|
+
);
|
|
251
|
+
|
|
252
|
+
for (const [key, value] of Object.entries(sqlRow)) {
|
|
253
|
+
if (value === null || value === undefined) {
|
|
254
|
+
result[key] = value;
|
|
255
|
+
continue;
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
if (dateColumns.has(key) && typeof value === 'string') {
|
|
259
|
+
result[key] = new Date(value);
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if (jsonColumns.has(key) && typeof value === 'string') {
|
|
264
|
+
result[key] = safelyParseJSON(value);
|
|
265
|
+
continue;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
result[key] = value;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
return result as T;
|
|
272
|
+
}
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import type { Client, InValue } from '@libsql/client';
|
|
2
|
-
import type { WorkflowRun, WorkflowRuns, WorkflowRunState } from '@mastra/core';
|
|
3
2
|
import { ErrorCategory, ErrorDomain, MastraError } from '@mastra/core/error';
|
|
3
|
+
import type { WorkflowRun, WorkflowRuns } from '@mastra/core/storage';
|
|
4
4
|
import { TABLE_WORKFLOW_SNAPSHOT, WorkflowsStorage } from '@mastra/core/storage';
|
|
5
|
+
import type { WorkflowRunState, StepResult } from '@mastra/core/workflows';
|
|
5
6
|
import type { StoreOperationsLibSQL } from '../operations';
|
|
6
7
|
|
|
7
8
|
function parseWorkflowRun(row: Record<string, any>): WorkflowRun {
|
|
@@ -27,10 +28,232 @@ function parseWorkflowRun(row: Record<string, any>): WorkflowRun {
|
|
|
27
28
|
export class WorkflowsLibSQL extends WorkflowsStorage {
|
|
28
29
|
operations: StoreOperationsLibSQL;
|
|
29
30
|
client: Client;
|
|
30
|
-
|
|
31
|
+
private readonly maxRetries: number;
|
|
32
|
+
private readonly initialBackoffMs: number;
|
|
33
|
+
|
|
34
|
+
constructor({
|
|
35
|
+
operations,
|
|
36
|
+
client,
|
|
37
|
+
maxRetries = 5,
|
|
38
|
+
initialBackoffMs = 500,
|
|
39
|
+
}: {
|
|
40
|
+
operations: StoreOperationsLibSQL;
|
|
41
|
+
client: Client;
|
|
42
|
+
maxRetries?: number;
|
|
43
|
+
initialBackoffMs?: number;
|
|
44
|
+
}) {
|
|
31
45
|
super();
|
|
32
46
|
this.operations = operations;
|
|
33
47
|
this.client = client;
|
|
48
|
+
this.maxRetries = maxRetries;
|
|
49
|
+
this.initialBackoffMs = initialBackoffMs;
|
|
50
|
+
|
|
51
|
+
// Set PRAGMA settings to help with database locks
|
|
52
|
+
// Note: This is async but we can't await in constructor, so we'll handle it as a fire-and-forget
|
|
53
|
+
this.setupPragmaSettings().catch(err =>
|
|
54
|
+
this.logger.warn('LibSQL Workflows: Failed to setup PRAGMA settings.', err),
|
|
55
|
+
);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
private async setupPragmaSettings() {
|
|
59
|
+
try {
|
|
60
|
+
// Set busy timeout to wait longer before returning busy errors
|
|
61
|
+
await this.client.execute('PRAGMA busy_timeout = 10000;');
|
|
62
|
+
this.logger.debug('LibSQL Workflows: PRAGMA busy_timeout=10000 set.');
|
|
63
|
+
|
|
64
|
+
// Enable WAL mode for better concurrency (if supported)
|
|
65
|
+
try {
|
|
66
|
+
await this.client.execute('PRAGMA journal_mode = WAL;');
|
|
67
|
+
this.logger.debug('LibSQL Workflows: PRAGMA journal_mode=WAL set.');
|
|
68
|
+
} catch {
|
|
69
|
+
this.logger.debug('LibSQL Workflows: WAL mode not supported, using default journal mode.');
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Set synchronous mode for better durability vs performance trade-off
|
|
73
|
+
try {
|
|
74
|
+
await this.client.execute('PRAGMA synchronous = NORMAL;');
|
|
75
|
+
this.logger.debug('LibSQL Workflows: PRAGMA synchronous=NORMAL set.');
|
|
76
|
+
} catch {
|
|
77
|
+
this.logger.debug('LibSQL Workflows: Failed to set synchronous mode.');
|
|
78
|
+
}
|
|
79
|
+
} catch (err) {
|
|
80
|
+
this.logger.warn('LibSQL Workflows: Failed to set PRAGMA settings.', err);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
private async executeWithRetry<T>(operation: () => Promise<T>): Promise<T> {
|
|
85
|
+
let attempts = 0;
|
|
86
|
+
let backoff = this.initialBackoffMs;
|
|
87
|
+
|
|
88
|
+
while (attempts < this.maxRetries) {
|
|
89
|
+
try {
|
|
90
|
+
return await operation();
|
|
91
|
+
} catch (error: any) {
|
|
92
|
+
// Log the error details for debugging
|
|
93
|
+
this.logger.debug('LibSQL Workflows: Error caught in retry loop', {
|
|
94
|
+
errorType: error.constructor.name,
|
|
95
|
+
errorCode: error.code,
|
|
96
|
+
errorMessage: error.message,
|
|
97
|
+
attempts,
|
|
98
|
+
maxRetries: this.maxRetries,
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
// Check for various database lock/busy conditions
|
|
102
|
+
const isLockError =
|
|
103
|
+
error.code === 'SQLITE_BUSY' ||
|
|
104
|
+
error.code === 'SQLITE_LOCKED' ||
|
|
105
|
+
error.message?.toLowerCase().includes('database is locked') ||
|
|
106
|
+
error.message?.toLowerCase().includes('database table is locked') ||
|
|
107
|
+
error.message?.toLowerCase().includes('table is locked') ||
|
|
108
|
+
(error.constructor.name === 'SqliteError' && error.message?.toLowerCase().includes('locked'));
|
|
109
|
+
|
|
110
|
+
if (isLockError) {
|
|
111
|
+
attempts++;
|
|
112
|
+
if (attempts >= this.maxRetries) {
|
|
113
|
+
this.logger.error(
|
|
114
|
+
`LibSQL Workflows: Operation failed after ${this.maxRetries} attempts due to database lock: ${error.message}`,
|
|
115
|
+
{ error, attempts, maxRetries: this.maxRetries },
|
|
116
|
+
);
|
|
117
|
+
throw error;
|
|
118
|
+
}
|
|
119
|
+
this.logger.warn(
|
|
120
|
+
`LibSQL Workflows: Attempt ${attempts} failed due to database lock. Retrying in ${backoff}ms...`,
|
|
121
|
+
{ errorMessage: error.message, attempts, backoff, maxRetries: this.maxRetries },
|
|
122
|
+
);
|
|
123
|
+
await new Promise(resolve => setTimeout(resolve, backoff));
|
|
124
|
+
backoff *= 2;
|
|
125
|
+
} else {
|
|
126
|
+
// Not a lock error, re-throw immediately
|
|
127
|
+
this.logger.error('LibSQL Workflows: Non-lock error occurred, not retrying', { error });
|
|
128
|
+
throw error;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
throw new Error('LibSQL Workflows: Max retries reached, but no error was re-thrown from the loop.');
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
async updateWorkflowResults({
|
|
136
|
+
workflowName,
|
|
137
|
+
runId,
|
|
138
|
+
stepId,
|
|
139
|
+
result,
|
|
140
|
+
runtimeContext,
|
|
141
|
+
}: {
|
|
142
|
+
workflowName: string;
|
|
143
|
+
runId: string;
|
|
144
|
+
stepId: string;
|
|
145
|
+
result: StepResult<any, any, any, any>;
|
|
146
|
+
runtimeContext: Record<string, any>;
|
|
147
|
+
}): Promise<Record<string, StepResult<any, any, any, any>>> {
|
|
148
|
+
return this.executeWithRetry(async () => {
|
|
149
|
+
// Use a transaction to ensure atomicity
|
|
150
|
+
const tx = await this.client.transaction('write');
|
|
151
|
+
try {
|
|
152
|
+
// Load existing snapshot within transaction
|
|
153
|
+
const existingSnapshotResult = await tx.execute({
|
|
154
|
+
sql: `SELECT snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
155
|
+
args: [workflowName, runId],
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
let snapshot: WorkflowRunState;
|
|
159
|
+
if (!existingSnapshotResult.rows?.[0]) {
|
|
160
|
+
// Create new snapshot if none exists
|
|
161
|
+
snapshot = {
|
|
162
|
+
context: {},
|
|
163
|
+
activePaths: [],
|
|
164
|
+
timestamp: Date.now(),
|
|
165
|
+
suspendedPaths: {},
|
|
166
|
+
serializedStepGraph: [],
|
|
167
|
+
value: {},
|
|
168
|
+
waitingPaths: {},
|
|
169
|
+
status: 'pending',
|
|
170
|
+
runId: runId,
|
|
171
|
+
runtimeContext: {},
|
|
172
|
+
} as WorkflowRunState;
|
|
173
|
+
} else {
|
|
174
|
+
// Parse existing snapshot
|
|
175
|
+
const existingSnapshot = existingSnapshotResult.rows[0].snapshot;
|
|
176
|
+
snapshot = typeof existingSnapshot === 'string' ? JSON.parse(existingSnapshot) : existingSnapshot;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// Merge the new step result and runtime context
|
|
180
|
+
snapshot.context[stepId] = result;
|
|
181
|
+
snapshot.runtimeContext = { ...snapshot.runtimeContext, ...runtimeContext };
|
|
182
|
+
|
|
183
|
+
// Update the snapshot within the same transaction
|
|
184
|
+
await tx.execute({
|
|
185
|
+
sql: `UPDATE ${TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
|
|
186
|
+
args: [JSON.stringify(snapshot), workflowName, runId],
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
await tx.commit();
|
|
190
|
+
return snapshot.context;
|
|
191
|
+
} catch (error) {
|
|
192
|
+
if (!tx.closed) {
|
|
193
|
+
await tx.rollback();
|
|
194
|
+
}
|
|
195
|
+
throw error;
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
async updateWorkflowState({
|
|
201
|
+
workflowName,
|
|
202
|
+
runId,
|
|
203
|
+
opts,
|
|
204
|
+
}: {
|
|
205
|
+
workflowName: string;
|
|
206
|
+
runId: string;
|
|
207
|
+
opts: {
|
|
208
|
+
status: string;
|
|
209
|
+
result?: StepResult<any, any, any, any>;
|
|
210
|
+
error?: string;
|
|
211
|
+
suspendedPaths?: Record<string, number[]>;
|
|
212
|
+
waitingPaths?: Record<string, number[]>;
|
|
213
|
+
};
|
|
214
|
+
}): Promise<WorkflowRunState | undefined> {
|
|
215
|
+
return this.executeWithRetry(async () => {
|
|
216
|
+
// Use a transaction to ensure atomicity
|
|
217
|
+
const tx = await this.client.transaction('write');
|
|
218
|
+
try {
|
|
219
|
+
// Load existing snapshot within transaction
|
|
220
|
+
const existingSnapshotResult = await tx.execute({
|
|
221
|
+
sql: `SELECT snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
|
|
222
|
+
args: [workflowName, runId],
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
if (!existingSnapshotResult.rows?.[0]) {
|
|
226
|
+
await tx.rollback();
|
|
227
|
+
return undefined;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Parse existing snapshot
|
|
231
|
+
const existingSnapshot = existingSnapshotResult.rows[0].snapshot;
|
|
232
|
+
const snapshot = typeof existingSnapshot === 'string' ? JSON.parse(existingSnapshot) : existingSnapshot;
|
|
233
|
+
|
|
234
|
+
if (!snapshot || !snapshot?.context) {
|
|
235
|
+
await tx.rollback();
|
|
236
|
+
throw new Error(`Snapshot not found for runId ${runId}`);
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Merge the new options with the existing snapshot
|
|
240
|
+
const updatedSnapshot = { ...snapshot, ...opts };
|
|
241
|
+
|
|
242
|
+
// Update the snapshot within the same transaction
|
|
243
|
+
await tx.execute({
|
|
244
|
+
sql: `UPDATE ${TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
|
|
245
|
+
args: [JSON.stringify(updatedSnapshot), workflowName, runId],
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
await tx.commit();
|
|
249
|
+
return updatedSnapshot;
|
|
250
|
+
} catch (error) {
|
|
251
|
+
if (!tx.closed) {
|
|
252
|
+
await tx.rollback();
|
|
253
|
+
}
|
|
254
|
+
throw error;
|
|
255
|
+
}
|
|
256
|
+
});
|
|
34
257
|
}
|
|
35
258
|
|
|
36
259
|
async persistWorkflowSnapshot({
|