@mastra/clickhouse 0.12.0 → 0.12.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +7 -7
- package/CHANGELOG.md +53 -0
- package/LICENSE.md +12 -4
- package/dist/_tsup-dts-rollup.d.cts +351 -64
- package/dist/_tsup-dts-rollup.d.ts +351 -64
- package/dist/index.cjs +2052 -609
- package/dist/index.d.cts +0 -2
- package/dist/index.d.ts +0 -2
- package/dist/index.js +2051 -606
- package/package.json +6 -6
- package/src/storage/domains/legacy-evals/index.ts +246 -0
- package/src/storage/domains/memory/index.ts +1393 -0
- package/src/storage/domains/operations/index.ts +319 -0
- package/src/storage/domains/scores/index.ts +326 -0
- package/src/storage/domains/traces/index.ts +275 -0
- package/src/storage/domains/utils.ts +86 -0
- package/src/storage/domains/workflows/index.ts +285 -0
- package/src/storage/index.test.ts +15 -1091
- package/src/storage/index.ts +184 -1246
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
import type { ClickHouseClient } from '@clickhouse/client';
|
|
2
|
+
import { ErrorCategory, ErrorDomain, MastraError } from '@mastra/core/error';
|
|
3
|
+
import { safelyParseJSON, TABLE_SCHEMAS, TABLE_TRACES, TracesStorage } from '@mastra/core/storage';
|
|
4
|
+
import type { PaginationInfo, StorageGetTracesPaginatedArg, StorageGetTracesArg } from '@mastra/core/storage';
|
|
5
|
+
import type { Trace } from '@mastra/core/telemetry';
|
|
6
|
+
import type { StoreOperationsClickhouse } from '../operations';
|
|
7
|
+
|
|
8
|
+
export class TracesStorageClickhouse extends TracesStorage {
|
|
9
|
+
protected client: ClickHouseClient;
|
|
10
|
+
protected operations: StoreOperationsClickhouse;
|
|
11
|
+
|
|
12
|
+
constructor({ client, operations }: { client: ClickHouseClient; operations: StoreOperationsClickhouse }) {
|
|
13
|
+
super();
|
|
14
|
+
this.client = client;
|
|
15
|
+
this.operations = operations;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
async getTracesPaginated(args: StorageGetTracesPaginatedArg): Promise<PaginationInfo & { traces: Trace[] }> {
|
|
19
|
+
const { name, scope, page = 0, perPage = 100, attributes, filters, dateRange } = args;
|
|
20
|
+
const fromDate = dateRange?.start;
|
|
21
|
+
const toDate = dateRange?.end;
|
|
22
|
+
const currentOffset = page * perPage;
|
|
23
|
+
|
|
24
|
+
const queryArgs: Record<string, any> = {};
|
|
25
|
+
const conditions: string[] = [];
|
|
26
|
+
|
|
27
|
+
if (name) {
|
|
28
|
+
conditions.push(`name LIKE CONCAT({var_name:String}, '%')`);
|
|
29
|
+
queryArgs.var_name = name;
|
|
30
|
+
}
|
|
31
|
+
if (scope) {
|
|
32
|
+
conditions.push(`scope = {var_scope:String}`);
|
|
33
|
+
queryArgs.var_scope = scope;
|
|
34
|
+
}
|
|
35
|
+
if (attributes) {
|
|
36
|
+
Object.entries(attributes).forEach(([key, value]) => {
|
|
37
|
+
conditions.push(`JSONExtractString(attributes, '${key}') = {var_attr_${key}:String}`);
|
|
38
|
+
queryArgs[`var_attr_${key}`] = value;
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
if (filters) {
|
|
42
|
+
Object.entries(filters).forEach(([key, value]) => {
|
|
43
|
+
conditions.push(`${key} = {var_col_${key}:${TABLE_SCHEMAS.mastra_traces?.[key]?.type ?? 'text'}}`);
|
|
44
|
+
queryArgs[`var_col_${key}`] = value;
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
if (fromDate) {
|
|
48
|
+
conditions.push(`createdAt >= parseDateTime64BestEffort({var_from_date:String})`);
|
|
49
|
+
queryArgs.var_from_date = fromDate.toISOString();
|
|
50
|
+
}
|
|
51
|
+
if (toDate) {
|
|
52
|
+
conditions.push(`createdAt <= parseDateTime64BestEffort({var_to_date:String})`);
|
|
53
|
+
queryArgs.var_to_date = toDate.toISOString();
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
// Get total count
|
|
60
|
+
const countResult = await this.client.query({
|
|
61
|
+
query: `SELECT COUNT(*) as count FROM ${TABLE_TRACES} ${whereClause}`,
|
|
62
|
+
query_params: queryArgs,
|
|
63
|
+
clickhouse_settings: {
|
|
64
|
+
date_time_input_format: 'best_effort',
|
|
65
|
+
date_time_output_format: 'iso',
|
|
66
|
+
use_client_time_zone: 1,
|
|
67
|
+
output_format_json_quote_64bit_integers: 0,
|
|
68
|
+
},
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
const countData = await countResult.json();
|
|
72
|
+
const total = Number((countData.data?.[0] as any)?.count ?? 0);
|
|
73
|
+
|
|
74
|
+
if (total === 0) {
|
|
75
|
+
return {
|
|
76
|
+
traces: [],
|
|
77
|
+
total: 0,
|
|
78
|
+
page,
|
|
79
|
+
perPage,
|
|
80
|
+
hasMore: false,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Get traces with pagination
|
|
85
|
+
const result = await this.client.query({
|
|
86
|
+
query: `SELECT *, toDateTime64(createdAt, 3) as createdAt FROM ${TABLE_TRACES} ${whereClause} ORDER BY "createdAt" DESC LIMIT {var_limit:UInt32} OFFSET {var_offset:UInt32}`,
|
|
87
|
+
query_params: { ...queryArgs, var_limit: perPage, var_offset: currentOffset },
|
|
88
|
+
clickhouse_settings: {
|
|
89
|
+
date_time_input_format: 'best_effort',
|
|
90
|
+
date_time_output_format: 'iso',
|
|
91
|
+
use_client_time_zone: 1,
|
|
92
|
+
output_format_json_quote_64bit_integers: 0,
|
|
93
|
+
},
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
if (!result) {
|
|
97
|
+
return {
|
|
98
|
+
traces: [],
|
|
99
|
+
total,
|
|
100
|
+
page,
|
|
101
|
+
perPage,
|
|
102
|
+
hasMore: false,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
const resp = await result.json();
|
|
107
|
+
const rows: any[] = resp.data;
|
|
108
|
+
const traces = rows.map(row => ({
|
|
109
|
+
id: row.id,
|
|
110
|
+
parentSpanId: row.parentSpanId,
|
|
111
|
+
traceId: row.traceId,
|
|
112
|
+
name: row.name,
|
|
113
|
+
scope: row.scope,
|
|
114
|
+
kind: row.kind,
|
|
115
|
+
status: safelyParseJSON(row.status as string),
|
|
116
|
+
events: safelyParseJSON(row.events as string),
|
|
117
|
+
links: safelyParseJSON(row.links as string),
|
|
118
|
+
attributes: safelyParseJSON(row.attributes as string),
|
|
119
|
+
startTime: row.startTime,
|
|
120
|
+
endTime: row.endTime,
|
|
121
|
+
other: safelyParseJSON(row.other as string),
|
|
122
|
+
createdAt: row.createdAt,
|
|
123
|
+
}));
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
traces,
|
|
127
|
+
total,
|
|
128
|
+
page,
|
|
129
|
+
perPage,
|
|
130
|
+
hasMore: currentOffset + traces.length < total,
|
|
131
|
+
};
|
|
132
|
+
} catch (error: any) {
|
|
133
|
+
if (error?.message?.includes('no such table') || error?.message?.includes('does not exist')) {
|
|
134
|
+
return {
|
|
135
|
+
traces: [],
|
|
136
|
+
total: 0,
|
|
137
|
+
page,
|
|
138
|
+
perPage,
|
|
139
|
+
hasMore: false,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
throw new MastraError(
|
|
143
|
+
{
|
|
144
|
+
id: 'CLICKHOUSE_STORAGE_GET_TRACES_PAGINATED_FAILED',
|
|
145
|
+
domain: ErrorDomain.STORAGE,
|
|
146
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
147
|
+
details: {
|
|
148
|
+
name: name ?? null,
|
|
149
|
+
scope: scope ?? null,
|
|
150
|
+
page,
|
|
151
|
+
perPage,
|
|
152
|
+
attributes: attributes ? JSON.stringify(attributes) : null,
|
|
153
|
+
filters: filters ? JSON.stringify(filters) : null,
|
|
154
|
+
dateRange: dateRange ? JSON.stringify(dateRange) : null,
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
error,
|
|
158
|
+
);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
async getTraces({
|
|
163
|
+
name,
|
|
164
|
+
scope,
|
|
165
|
+
page,
|
|
166
|
+
perPage,
|
|
167
|
+
attributes,
|
|
168
|
+
filters,
|
|
169
|
+
fromDate,
|
|
170
|
+
toDate,
|
|
171
|
+
}: StorageGetTracesArg): Promise<any[]> {
|
|
172
|
+
const limit = perPage;
|
|
173
|
+
const offset = page * perPage;
|
|
174
|
+
|
|
175
|
+
const args: Record<string, any> = {};
|
|
176
|
+
|
|
177
|
+
const conditions: string[] = [];
|
|
178
|
+
if (name) {
|
|
179
|
+
conditions.push(`name LIKE CONCAT({var_name:String}, '%')`);
|
|
180
|
+
args.var_name = name;
|
|
181
|
+
}
|
|
182
|
+
if (scope) {
|
|
183
|
+
conditions.push(`scope = {var_scope:String}`);
|
|
184
|
+
args.var_scope = scope;
|
|
185
|
+
}
|
|
186
|
+
if (attributes) {
|
|
187
|
+
Object.entries(attributes).forEach(([key, value]) => {
|
|
188
|
+
conditions.push(`JSONExtractString(attributes, '${key}') = {var_attr_${key}:String}`);
|
|
189
|
+
args[`var_attr_${key}`] = value;
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
if (filters) {
|
|
194
|
+
Object.entries(filters).forEach(([key, value]) => {
|
|
195
|
+
conditions.push(`${key} = {var_col_${key}:${TABLE_SCHEMAS.mastra_traces?.[key]?.type ?? 'text'}}`);
|
|
196
|
+
args[`var_col_${key}`] = value;
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
if (fromDate) {
|
|
201
|
+
conditions.push(`createdAt >= {var_from_date:DateTime64(3)}`);
|
|
202
|
+
args.var_from_date = fromDate.getTime() / 1000; // Convert to Unix timestamp
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
if (toDate) {
|
|
206
|
+
conditions.push(`createdAt <= {var_to_date:DateTime64(3)}`);
|
|
207
|
+
args.var_to_date = toDate.getTime() / 1000; // Convert to Unix timestamp
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
211
|
+
|
|
212
|
+
try {
|
|
213
|
+
const result = await this.client.query({
|
|
214
|
+
query: `SELECT *, toDateTime64(createdAt, 3) as createdAt FROM ${TABLE_TRACES} ${whereClause} ORDER BY "createdAt" DESC LIMIT ${limit} OFFSET ${offset}`,
|
|
215
|
+
query_params: args,
|
|
216
|
+
clickhouse_settings: {
|
|
217
|
+
// Allows to insert serialized JS Dates (such as '2023-12-06T10:54:48.000Z')
|
|
218
|
+
date_time_input_format: 'best_effort',
|
|
219
|
+
date_time_output_format: 'iso',
|
|
220
|
+
use_client_time_zone: 1,
|
|
221
|
+
output_format_json_quote_64bit_integers: 0,
|
|
222
|
+
},
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
if (!result) {
|
|
226
|
+
return [];
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const resp = await result.json();
|
|
230
|
+
const rows: any[] = resp.data;
|
|
231
|
+
return rows.map(row => ({
|
|
232
|
+
id: row.id,
|
|
233
|
+
parentSpanId: row.parentSpanId,
|
|
234
|
+
traceId: row.traceId,
|
|
235
|
+
name: row.name,
|
|
236
|
+
scope: row.scope,
|
|
237
|
+
kind: row.kind,
|
|
238
|
+
status: safelyParseJSON(row.status as string),
|
|
239
|
+
events: safelyParseJSON(row.events as string),
|
|
240
|
+
links: safelyParseJSON(row.links as string),
|
|
241
|
+
attributes: safelyParseJSON(row.attributes as string),
|
|
242
|
+
startTime: row.startTime,
|
|
243
|
+
endTime: row.endTime,
|
|
244
|
+
other: safelyParseJSON(row.other as string),
|
|
245
|
+
createdAt: row.createdAt,
|
|
246
|
+
}));
|
|
247
|
+
} catch (error: any) {
|
|
248
|
+
if (error?.message?.includes('no such table') || error?.message?.includes('does not exist')) {
|
|
249
|
+
return [];
|
|
250
|
+
}
|
|
251
|
+
throw new MastraError(
|
|
252
|
+
{
|
|
253
|
+
id: 'CLICKHOUSE_STORAGE_GET_TRACES_FAILED',
|
|
254
|
+
domain: ErrorDomain.STORAGE,
|
|
255
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
256
|
+
details: {
|
|
257
|
+
name: name ?? null,
|
|
258
|
+
scope: scope ?? null,
|
|
259
|
+
page,
|
|
260
|
+
perPage,
|
|
261
|
+
attributes: attributes ? JSON.stringify(attributes) : null,
|
|
262
|
+
filters: filters ? JSON.stringify(filters) : null,
|
|
263
|
+
fromDate: fromDate?.toISOString() ?? null,
|
|
264
|
+
toDate: toDate?.toISOString() ?? null,
|
|
265
|
+
},
|
|
266
|
+
},
|
|
267
|
+
error,
|
|
268
|
+
);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
async batchTraceInsert(args: { records: Trace[] }): Promise<void> {
|
|
273
|
+
await this.operations.batchInsert({ tableName: TABLE_TRACES, records: args.records });
|
|
274
|
+
}
|
|
275
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import type { TABLE_NAMES, TABLE_SCHEMAS, StorageColumn } from '@mastra/core/storage';
|
|
2
|
+
import {
|
|
3
|
+
TABLE_MESSAGES,
|
|
4
|
+
TABLE_RESOURCES,
|
|
5
|
+
TABLE_EVALS,
|
|
6
|
+
TABLE_SCORERS,
|
|
7
|
+
TABLE_THREADS,
|
|
8
|
+
TABLE_TRACES,
|
|
9
|
+
TABLE_WORKFLOW_SNAPSHOT,
|
|
10
|
+
safelyParseJSON,
|
|
11
|
+
} from '@mastra/core/storage';
|
|
12
|
+
|
|
13
|
+
export const TABLE_ENGINES: Record<TABLE_NAMES, string> = {
|
|
14
|
+
[TABLE_MESSAGES]: `MergeTree()`,
|
|
15
|
+
[TABLE_WORKFLOW_SNAPSHOT]: `ReplacingMergeTree()`,
|
|
16
|
+
[TABLE_TRACES]: `MergeTree()`,
|
|
17
|
+
[TABLE_THREADS]: `ReplacingMergeTree()`,
|
|
18
|
+
[TABLE_EVALS]: `MergeTree()`,
|
|
19
|
+
[TABLE_SCORERS]: `MergeTree()`,
|
|
20
|
+
[TABLE_RESOURCES]: `ReplacingMergeTree()`,
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
export const COLUMN_TYPES: Record<StorageColumn['type'], string> = {
|
|
24
|
+
text: 'String',
|
|
25
|
+
timestamp: 'DateTime64(3)',
|
|
26
|
+
uuid: 'String',
|
|
27
|
+
jsonb: 'String',
|
|
28
|
+
integer: 'Int64',
|
|
29
|
+
float: 'Float64',
|
|
30
|
+
bigint: 'Int64',
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
export type IntervalUnit =
|
|
34
|
+
| 'NANOSECOND'
|
|
35
|
+
| 'MICROSECOND'
|
|
36
|
+
| 'MILLISECOND'
|
|
37
|
+
| 'SECOND'
|
|
38
|
+
| 'MINUTE'
|
|
39
|
+
| 'HOUR'
|
|
40
|
+
| 'DAY'
|
|
41
|
+
| 'WEEK'
|
|
42
|
+
| 'MONTH'
|
|
43
|
+
| 'QUARTER'
|
|
44
|
+
| 'YEAR';
|
|
45
|
+
|
|
46
|
+
export type ClickhouseConfig = {
|
|
47
|
+
url: string;
|
|
48
|
+
username: string;
|
|
49
|
+
password: string;
|
|
50
|
+
ttl?: {
|
|
51
|
+
[TableKey in TABLE_NAMES]?: {
|
|
52
|
+
row?: { interval: number; unit: IntervalUnit; ttlKey?: string };
|
|
53
|
+
columns?: Partial<{
|
|
54
|
+
[ColumnKey in keyof (typeof TABLE_SCHEMAS)[TableKey]]: {
|
|
55
|
+
interval: number;
|
|
56
|
+
unit: IntervalUnit;
|
|
57
|
+
ttlKey?: string;
|
|
58
|
+
};
|
|
59
|
+
}>;
|
|
60
|
+
};
|
|
61
|
+
};
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
export function transformRow<R>(row: any): R {
|
|
65
|
+
if (!row) {
|
|
66
|
+
return row;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
if (row.createdAt) {
|
|
70
|
+
row.createdAt = new Date(row.createdAt);
|
|
71
|
+
}
|
|
72
|
+
if (row.updatedAt) {
|
|
73
|
+
row.updatedAt = new Date(row.updatedAt);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Parse content field if it's a JSON string
|
|
77
|
+
if (row.content && typeof row.content === 'string') {
|
|
78
|
+
row.content = safelyParseJSON(row.content);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
return row;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export function transformRows<R>(rows: any[]): R[] {
|
|
85
|
+
return rows.map((row: any) => transformRow<R>(row));
|
|
86
|
+
}
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import type { ClickHouseClient } from '@clickhouse/client';
|
|
2
|
+
import { ErrorCategory, ErrorDomain, MastraError } from '@mastra/core/error';
|
|
3
|
+
import { TABLE_WORKFLOW_SNAPSHOT, WorkflowsStorage } from '@mastra/core/storage';
|
|
4
|
+
import type { WorkflowRun, WorkflowRuns } from '@mastra/core/storage';
|
|
5
|
+
import type { WorkflowRunState } from '@mastra/core/workflows';
|
|
6
|
+
import type { StoreOperationsClickhouse } from '../operations';
|
|
7
|
+
import { TABLE_ENGINES } from '../utils';
|
|
8
|
+
|
|
9
|
+
export class WorkflowsStorageClickhouse extends WorkflowsStorage {
|
|
10
|
+
protected client: ClickHouseClient;
|
|
11
|
+
protected operations: StoreOperationsClickhouse;
|
|
12
|
+
constructor({ client, operations }: { client: ClickHouseClient; operations: StoreOperationsClickhouse }) {
|
|
13
|
+
super();
|
|
14
|
+
this.operations = operations;
|
|
15
|
+
this.client = client;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
async persistWorkflowSnapshot({
|
|
19
|
+
workflowName,
|
|
20
|
+
runId,
|
|
21
|
+
snapshot,
|
|
22
|
+
}: {
|
|
23
|
+
workflowName: string;
|
|
24
|
+
runId: string;
|
|
25
|
+
snapshot: WorkflowRunState;
|
|
26
|
+
}): Promise<void> {
|
|
27
|
+
try {
|
|
28
|
+
const currentSnapshot = await this.operations.load({
|
|
29
|
+
tableName: TABLE_WORKFLOW_SNAPSHOT,
|
|
30
|
+
keys: { workflow_name: workflowName, run_id: runId },
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
const now = new Date();
|
|
34
|
+
const persisting = currentSnapshot
|
|
35
|
+
? {
|
|
36
|
+
...currentSnapshot,
|
|
37
|
+
snapshot: JSON.stringify(snapshot),
|
|
38
|
+
updatedAt: now.toISOString(),
|
|
39
|
+
}
|
|
40
|
+
: {
|
|
41
|
+
workflow_name: workflowName,
|
|
42
|
+
run_id: runId,
|
|
43
|
+
snapshot: JSON.stringify(snapshot),
|
|
44
|
+
createdAt: now.toISOString(),
|
|
45
|
+
updatedAt: now.toISOString(),
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
await this.client.insert({
|
|
49
|
+
table: TABLE_WORKFLOW_SNAPSHOT,
|
|
50
|
+
format: 'JSONEachRow',
|
|
51
|
+
values: [persisting],
|
|
52
|
+
clickhouse_settings: {
|
|
53
|
+
// Allows to insert serialized JS Dates (such as '2023-12-06T10:54:48.000Z')
|
|
54
|
+
date_time_input_format: 'best_effort',
|
|
55
|
+
use_client_time_zone: 1,
|
|
56
|
+
output_format_json_quote_64bit_integers: 0,
|
|
57
|
+
},
|
|
58
|
+
});
|
|
59
|
+
} catch (error: any) {
|
|
60
|
+
throw new MastraError(
|
|
61
|
+
{
|
|
62
|
+
id: 'CLICKHOUSE_STORAGE_PERSIST_WORKFLOW_SNAPSHOT_FAILED',
|
|
63
|
+
domain: ErrorDomain.STORAGE,
|
|
64
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
65
|
+
details: { workflowName, runId },
|
|
66
|
+
},
|
|
67
|
+
error,
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
async loadWorkflowSnapshot({
|
|
73
|
+
workflowName,
|
|
74
|
+
runId,
|
|
75
|
+
}: {
|
|
76
|
+
workflowName: string;
|
|
77
|
+
runId: string;
|
|
78
|
+
}): Promise<WorkflowRunState | null> {
|
|
79
|
+
try {
|
|
80
|
+
const result = await this.operations.load({
|
|
81
|
+
tableName: TABLE_WORKFLOW_SNAPSHOT,
|
|
82
|
+
keys: {
|
|
83
|
+
workflow_name: workflowName,
|
|
84
|
+
run_id: runId,
|
|
85
|
+
},
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
if (!result) {
|
|
89
|
+
return null;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
return (result as any).snapshot;
|
|
93
|
+
} catch (error: any) {
|
|
94
|
+
throw new MastraError(
|
|
95
|
+
{
|
|
96
|
+
id: 'CLICKHOUSE_STORAGE_LOAD_WORKFLOW_SNAPSHOT_FAILED',
|
|
97
|
+
domain: ErrorDomain.STORAGE,
|
|
98
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
99
|
+
details: { workflowName, runId },
|
|
100
|
+
},
|
|
101
|
+
error,
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
private parseWorkflowRun(row: any): WorkflowRun {
|
|
107
|
+
let parsedSnapshot: WorkflowRunState | string = row.snapshot as string;
|
|
108
|
+
if (typeof parsedSnapshot === 'string') {
|
|
109
|
+
try {
|
|
110
|
+
parsedSnapshot = JSON.parse(row.snapshot as string) as WorkflowRunState;
|
|
111
|
+
} catch (e) {
|
|
112
|
+
// If parsing fails, return the raw snapshot string
|
|
113
|
+
console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
return {
|
|
118
|
+
workflowName: row.workflow_name,
|
|
119
|
+
runId: row.run_id,
|
|
120
|
+
snapshot: parsedSnapshot,
|
|
121
|
+
createdAt: new Date(row.createdAt),
|
|
122
|
+
updatedAt: new Date(row.updatedAt),
|
|
123
|
+
resourceId: row.resourceId,
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async getWorkflowRuns({
|
|
128
|
+
workflowName,
|
|
129
|
+
fromDate,
|
|
130
|
+
toDate,
|
|
131
|
+
limit,
|
|
132
|
+
offset,
|
|
133
|
+
resourceId,
|
|
134
|
+
}: {
|
|
135
|
+
workflowName?: string;
|
|
136
|
+
fromDate?: Date;
|
|
137
|
+
toDate?: Date;
|
|
138
|
+
limit?: number;
|
|
139
|
+
offset?: number;
|
|
140
|
+
resourceId?: string;
|
|
141
|
+
} = {}): Promise<WorkflowRuns> {
|
|
142
|
+
try {
|
|
143
|
+
const conditions: string[] = [];
|
|
144
|
+
const values: Record<string, any> = {};
|
|
145
|
+
|
|
146
|
+
if (workflowName) {
|
|
147
|
+
conditions.push(`workflow_name = {var_workflow_name:String}`);
|
|
148
|
+
values.var_workflow_name = workflowName;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if (resourceId) {
|
|
152
|
+
const hasResourceId = await this.operations.hasColumn(TABLE_WORKFLOW_SNAPSHOT, 'resourceId');
|
|
153
|
+
if (hasResourceId) {
|
|
154
|
+
conditions.push(`resourceId = {var_resourceId:String}`);
|
|
155
|
+
values.var_resourceId = resourceId;
|
|
156
|
+
} else {
|
|
157
|
+
console.warn(`[${TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if (fromDate) {
|
|
162
|
+
conditions.push(`createdAt >= {var_from_date:DateTime64(3)}`);
|
|
163
|
+
values.var_from_date = fromDate.getTime() / 1000; // Convert to Unix timestamp
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
if (toDate) {
|
|
167
|
+
conditions.push(`createdAt <= {var_to_date:DateTime64(3)}`);
|
|
168
|
+
values.var_to_date = toDate.getTime() / 1000; // Convert to Unix timestamp
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
172
|
+
const limitClause = limit !== undefined ? `LIMIT ${limit}` : '';
|
|
173
|
+
const offsetClause = offset !== undefined ? `OFFSET ${offset}` : '';
|
|
174
|
+
|
|
175
|
+
let total = 0;
|
|
176
|
+
// Only get total count when using pagination
|
|
177
|
+
if (limit !== undefined && offset !== undefined) {
|
|
178
|
+
const countResult = await this.client.query({
|
|
179
|
+
query: `SELECT COUNT(*) as count FROM ${TABLE_WORKFLOW_SNAPSHOT} ${TABLE_ENGINES[TABLE_WORKFLOW_SNAPSHOT].startsWith('ReplacingMergeTree') ? 'FINAL' : ''} ${whereClause}`,
|
|
180
|
+
query_params: values,
|
|
181
|
+
format: 'JSONEachRow',
|
|
182
|
+
});
|
|
183
|
+
const countRows = await countResult.json();
|
|
184
|
+
total = Number((countRows as Array<{ count: string | number }>)[0]?.count ?? 0);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Get results
|
|
188
|
+
const result = await this.client.query({
|
|
189
|
+
query: `
|
|
190
|
+
SELECT
|
|
191
|
+
workflow_name,
|
|
192
|
+
run_id,
|
|
193
|
+
snapshot,
|
|
194
|
+
toDateTime64(createdAt, 3) as createdAt,
|
|
195
|
+
toDateTime64(updatedAt, 3) as updatedAt,
|
|
196
|
+
resourceId
|
|
197
|
+
FROM ${TABLE_WORKFLOW_SNAPSHOT} ${TABLE_ENGINES[TABLE_WORKFLOW_SNAPSHOT].startsWith('ReplacingMergeTree') ? 'FINAL' : ''}
|
|
198
|
+
${whereClause}
|
|
199
|
+
ORDER BY createdAt DESC
|
|
200
|
+
${limitClause}
|
|
201
|
+
${offsetClause}
|
|
202
|
+
`,
|
|
203
|
+
query_params: values,
|
|
204
|
+
format: 'JSONEachRow',
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
const resultJson = await result.json();
|
|
208
|
+
const rows = resultJson as any[];
|
|
209
|
+
const runs = rows.map(row => {
|
|
210
|
+
return this.parseWorkflowRun(row);
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
// Use runs.length as total when not paginating
|
|
214
|
+
return { runs, total: total || runs.length };
|
|
215
|
+
} catch (error: any) {
|
|
216
|
+
throw new MastraError(
|
|
217
|
+
{
|
|
218
|
+
id: 'CLICKHOUSE_STORAGE_GET_WORKFLOW_RUNS_FAILED',
|
|
219
|
+
domain: ErrorDomain.STORAGE,
|
|
220
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
221
|
+
details: { workflowName: workflowName ?? '', resourceId: resourceId ?? '' },
|
|
222
|
+
},
|
|
223
|
+
error,
|
|
224
|
+
);
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
async getWorkflowRunById({
|
|
229
|
+
runId,
|
|
230
|
+
workflowName,
|
|
231
|
+
}: {
|
|
232
|
+
runId: string;
|
|
233
|
+
workflowName?: string;
|
|
234
|
+
}): Promise<WorkflowRun | null> {
|
|
235
|
+
try {
|
|
236
|
+
const conditions: string[] = [];
|
|
237
|
+
const values: Record<string, any> = {};
|
|
238
|
+
|
|
239
|
+
if (runId) {
|
|
240
|
+
conditions.push(`run_id = {var_runId:String}`);
|
|
241
|
+
values.var_runId = runId;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
if (workflowName) {
|
|
245
|
+
conditions.push(`workflow_name = {var_workflow_name:String}`);
|
|
246
|
+
values.var_workflow_name = workflowName;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
250
|
+
|
|
251
|
+
// Get results
|
|
252
|
+
const result = await this.client.query({
|
|
253
|
+
query: `
|
|
254
|
+
SELECT
|
|
255
|
+
workflow_name,
|
|
256
|
+
run_id,
|
|
257
|
+
snapshot,
|
|
258
|
+
toDateTime64(createdAt, 3) as createdAt,
|
|
259
|
+
toDateTime64(updatedAt, 3) as updatedAt,
|
|
260
|
+
resourceId
|
|
261
|
+
FROM ${TABLE_WORKFLOW_SNAPSHOT} ${TABLE_ENGINES[TABLE_WORKFLOW_SNAPSHOT].startsWith('ReplacingMergeTree') ? 'FINAL' : ''}
|
|
262
|
+
${whereClause}
|
|
263
|
+
`,
|
|
264
|
+
query_params: values,
|
|
265
|
+
format: 'JSONEachRow',
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
const resultJson = await result.json();
|
|
269
|
+
if (!Array.isArray(resultJson) || resultJson.length === 0) {
|
|
270
|
+
return null;
|
|
271
|
+
}
|
|
272
|
+
return this.parseWorkflowRun(resultJson[0]);
|
|
273
|
+
} catch (error: any) {
|
|
274
|
+
throw new MastraError(
|
|
275
|
+
{
|
|
276
|
+
id: 'CLICKHOUSE_STORAGE_GET_WORKFLOW_RUN_BY_ID_FAILED',
|
|
277
|
+
domain: ErrorDomain.STORAGE,
|
|
278
|
+
category: ErrorCategory.THIRD_PARTY,
|
|
279
|
+
details: { runId: runId ?? '', workflowName: workflowName ?? '' },
|
|
280
|
+
},
|
|
281
|
+
error,
|
|
282
|
+
);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
}
|