@mastra/clickhouse 1.0.0-beta.9 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +944 -0
- package/README.md +1 -1
- package/dist/docs/README.md +1 -1
- package/dist/docs/SKILL.md +1 -1
- package/dist/docs/SOURCE_MAP.json +1 -1
- package/dist/docs/storage/01-reference.md +25 -25
- package/dist/index.cjs +384 -32
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +385 -33
- package/dist/index.js.map +1 -1
- package/dist/storage/db/index.d.ts +50 -0
- package/dist/storage/db/index.d.ts.map +1 -1
- package/dist/storage/db/utils.d.ts.map +1 -1
- package/dist/storage/domains/memory/index.d.ts +2 -2
- package/dist/storage/domains/memory/index.d.ts.map +1 -1
- package/dist/storage/domains/observability/index.d.ts +23 -0
- package/dist/storage/domains/observability/index.d.ts.map +1 -1
- package/dist/storage/index.d.ts +42 -9
- package/dist/storage/index.d.ts.map +1 -1
- package/package.json +5 -5
package/dist/index.cjs
CHANGED
|
@@ -15,8 +15,10 @@ var TABLE_ENGINES = {
|
|
|
15
15
|
[storage.TABLE_THREADS]: `ReplacingMergeTree()`,
|
|
16
16
|
[storage.TABLE_SCORERS]: `MergeTree()`,
|
|
17
17
|
[storage.TABLE_RESOURCES]: `ReplacingMergeTree()`,
|
|
18
|
-
//
|
|
19
|
-
|
|
18
|
+
// ReplacingMergeTree(updatedAt) deduplicates rows with the same (traceId, spanId) sorting key,
|
|
19
|
+
// keeping the row with the highest updatedAt value. Combined with ORDER BY (traceId, spanId),
|
|
20
|
+
// this provides eventual uniqueness for the (traceId, spanId) composite key.
|
|
21
|
+
[storage.TABLE_SPANS]: `ReplacingMergeTree(updatedAt)`,
|
|
20
22
|
mastra_agents: `ReplacingMergeTree()`
|
|
21
23
|
};
|
|
22
24
|
var COLUMN_TYPES = {
|
|
@@ -99,6 +101,213 @@ var ClickhouseDB = class extends base.MastraBase {
|
|
|
99
101
|
const columns = await result.json();
|
|
100
102
|
return columns.some((c) => c.name === column);
|
|
101
103
|
}
|
|
104
|
+
/**
|
|
105
|
+
* Checks if a table exists in the database.
|
|
106
|
+
*/
|
|
107
|
+
async tableExists(tableName) {
|
|
108
|
+
try {
|
|
109
|
+
const result = await this.client.query({
|
|
110
|
+
query: `EXISTS TABLE ${tableName}`,
|
|
111
|
+
format: "JSONEachRow"
|
|
112
|
+
});
|
|
113
|
+
const rows = await result.json();
|
|
114
|
+
return rows[0]?.result === 1;
|
|
115
|
+
} catch {
|
|
116
|
+
return false;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Gets the sorting key (ORDER BY columns) for a table.
|
|
121
|
+
* Returns null if the table doesn't exist.
|
|
122
|
+
*/
|
|
123
|
+
async getTableSortingKey(tableName) {
|
|
124
|
+
try {
|
|
125
|
+
const result = await this.client.query({
|
|
126
|
+
query: `SELECT sorting_key FROM system.tables WHERE database = currentDatabase() AND name = {tableName:String}`,
|
|
127
|
+
query_params: { tableName },
|
|
128
|
+
format: "JSONEachRow"
|
|
129
|
+
});
|
|
130
|
+
const rows = await result.json();
|
|
131
|
+
return rows[0]?.sorting_key ?? null;
|
|
132
|
+
} catch {
|
|
133
|
+
return null;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
/**
|
|
137
|
+
* Checks if migration is needed for the spans table.
|
|
138
|
+
* Returns information about the current state.
|
|
139
|
+
*/
|
|
140
|
+
async checkSpansMigrationStatus(tableName) {
|
|
141
|
+
const exists = await this.tableExists(tableName);
|
|
142
|
+
if (!exists) {
|
|
143
|
+
return { needsMigration: false, currentSortingKey: null };
|
|
144
|
+
}
|
|
145
|
+
const currentSortingKey = await this.getTableSortingKey(tableName);
|
|
146
|
+
if (!currentSortingKey) {
|
|
147
|
+
return { needsMigration: false, currentSortingKey: null };
|
|
148
|
+
}
|
|
149
|
+
const needsMigration = currentSortingKey.toLowerCase().startsWith("createdat");
|
|
150
|
+
return { needsMigration, currentSortingKey };
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Checks for duplicate (traceId, spanId) combinations in the spans table.
|
|
154
|
+
* Returns information about duplicates for logging/CLI purposes.
|
|
155
|
+
*/
|
|
156
|
+
async checkForDuplicateSpans(tableName) {
|
|
157
|
+
try {
|
|
158
|
+
const result = await this.client.query({
|
|
159
|
+
query: `
|
|
160
|
+
SELECT count() as duplicate_count
|
|
161
|
+
FROM (
|
|
162
|
+
SELECT traceId, spanId
|
|
163
|
+
FROM ${tableName}
|
|
164
|
+
GROUP BY traceId, spanId
|
|
165
|
+
HAVING count() > 1
|
|
166
|
+
)
|
|
167
|
+
`,
|
|
168
|
+
format: "JSONEachRow"
|
|
169
|
+
});
|
|
170
|
+
const rows = await result.json();
|
|
171
|
+
const duplicateCount = parseInt(rows[0]?.duplicate_count ?? "0", 10);
|
|
172
|
+
return {
|
|
173
|
+
hasDuplicates: duplicateCount > 0,
|
|
174
|
+
duplicateCount
|
|
175
|
+
};
|
|
176
|
+
} catch (error) {
|
|
177
|
+
this.logger?.debug?.(`Could not check for duplicates: ${error}`);
|
|
178
|
+
return { hasDuplicates: false, duplicateCount: 0 };
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
/**
|
|
182
|
+
* Migrates the spans table from the old sorting key (createdAt, traceId, spanId)
|
|
183
|
+
* to the new sorting key (traceId, spanId) for proper uniqueness enforcement.
|
|
184
|
+
*
|
|
185
|
+
* This migration:
|
|
186
|
+
* 1. Renames the old table to a backup
|
|
187
|
+
* 2. Creates a new table with the correct sorting key
|
|
188
|
+
* 3. Copies all data from the backup to the new table, deduplicating by (traceId, spanId)
|
|
189
|
+
* using priority-based selection:
|
|
190
|
+
* - First, prefer completed spans (those with endedAt set)
|
|
191
|
+
* - Then prefer the most recently updated span (highest updatedAt)
|
|
192
|
+
* - Finally use creation time as tiebreaker (highest createdAt)
|
|
193
|
+
* 4. Drops the backup table
|
|
194
|
+
*
|
|
195
|
+
* The deduplication strategy matches the PostgreSQL migration (PR #12073) to ensure
|
|
196
|
+
* consistent behavior across storage backends.
|
|
197
|
+
*
|
|
198
|
+
* The migration is idempotent - it only runs if the old sorting key is detected.
|
|
199
|
+
*
|
|
200
|
+
* @returns true if migration was performed, false if not needed
|
|
201
|
+
*/
|
|
202
|
+
async migrateSpansTableSortingKey({
|
|
203
|
+
tableName,
|
|
204
|
+
schema
|
|
205
|
+
}) {
|
|
206
|
+
if (tableName !== storage.TABLE_SPANS) {
|
|
207
|
+
return false;
|
|
208
|
+
}
|
|
209
|
+
const exists = await this.tableExists(tableName);
|
|
210
|
+
if (!exists) {
|
|
211
|
+
return false;
|
|
212
|
+
}
|
|
213
|
+
const currentSortingKey = await this.getTableSortingKey(tableName);
|
|
214
|
+
if (!currentSortingKey) {
|
|
215
|
+
return false;
|
|
216
|
+
}
|
|
217
|
+
const needsMigration = currentSortingKey.toLowerCase().startsWith("createdat");
|
|
218
|
+
if (!needsMigration) {
|
|
219
|
+
this.logger?.debug?.(`Spans table already has correct sorting key: ${currentSortingKey}`);
|
|
220
|
+
return false;
|
|
221
|
+
}
|
|
222
|
+
this.logger?.info?.(`Migrating spans table from sorting key "${currentSortingKey}" to "(traceId, spanId)"`);
|
|
223
|
+
const backupTableName = `${tableName}_backup_${Date.now()}`;
|
|
224
|
+
const rowTtl = this.ttl?.[tableName]?.row;
|
|
225
|
+
try {
|
|
226
|
+
await this.client.command({
|
|
227
|
+
query: `RENAME TABLE ${tableName} TO ${backupTableName}`
|
|
228
|
+
});
|
|
229
|
+
const columns = Object.entries(schema).map(([name, def]) => {
|
|
230
|
+
let sqlType = this.getSqlType(def.type);
|
|
231
|
+
let isNullable = def.nullable === true;
|
|
232
|
+
if (tableName === storage.TABLE_SPANS && name === "updatedAt") {
|
|
233
|
+
isNullable = false;
|
|
234
|
+
}
|
|
235
|
+
if (isNullable) {
|
|
236
|
+
sqlType = `Nullable(${sqlType})`;
|
|
237
|
+
}
|
|
238
|
+
const constraints = [];
|
|
239
|
+
if (name === "metadata" && (def.type === "text" || def.type === "jsonb") && isNullable) {
|
|
240
|
+
constraints.push("DEFAULT '{}'");
|
|
241
|
+
}
|
|
242
|
+
const columnTtl = this.ttl?.[tableName]?.columns?.[name];
|
|
243
|
+
return `"${name}" ${sqlType} ${constraints.join(" ")} ${columnTtl ? `TTL toDateTime(${columnTtl.ttlKey ?? "createdAt"}) + INTERVAL ${columnTtl.interval} ${columnTtl.unit}` : ""}`;
|
|
244
|
+
}).join(",\n");
|
|
245
|
+
const createSql = `
|
|
246
|
+
CREATE TABLE ${tableName} (
|
|
247
|
+
${columns}
|
|
248
|
+
)
|
|
249
|
+
ENGINE = ${TABLE_ENGINES[tableName] ?? "MergeTree()"}
|
|
250
|
+
PRIMARY KEY (traceId, spanId)
|
|
251
|
+
ORDER BY (traceId, spanId)
|
|
252
|
+
${rowTtl ? `TTL toDateTime(${rowTtl.ttlKey ?? "createdAt"}) + INTERVAL ${rowTtl.interval} ${rowTtl.unit}` : ""}
|
|
253
|
+
SETTINGS index_granularity = 8192
|
|
254
|
+
`;
|
|
255
|
+
await this.client.command({
|
|
256
|
+
query: createSql
|
|
257
|
+
});
|
|
258
|
+
const describeResult = await this.client.query({
|
|
259
|
+
query: `DESCRIBE TABLE ${backupTableName}`,
|
|
260
|
+
format: "JSONEachRow"
|
|
261
|
+
});
|
|
262
|
+
const backupColumns = await describeResult.json();
|
|
263
|
+
const backupColumnNames = new Set(backupColumns.map((c) => c.name));
|
|
264
|
+
const columnsToInsert = Object.keys(schema).filter((col) => backupColumnNames.has(col));
|
|
265
|
+
const columnList = columnsToInsert.map((c) => `"${c}"`).join(", ");
|
|
266
|
+
const selectExpressions = columnsToInsert.map((c) => c === "updatedAt" ? `COALESCE("updatedAt", "createdAt") as "updatedAt"` : `"${c}"`).join(", ");
|
|
267
|
+
await this.client.command({
|
|
268
|
+
query: `INSERT INTO ${tableName} (${columnList})
|
|
269
|
+
SELECT ${selectExpressions}
|
|
270
|
+
FROM ${backupTableName}
|
|
271
|
+
ORDER BY traceId, spanId,
|
|
272
|
+
(endedAt IS NOT NULL AND endedAt != '') DESC,
|
|
273
|
+
COALESCE(updatedAt, createdAt) DESC,
|
|
274
|
+
createdAt DESC
|
|
275
|
+
LIMIT 1 BY traceId, spanId`
|
|
276
|
+
});
|
|
277
|
+
await this.client.command({
|
|
278
|
+
query: `DROP TABLE ${backupTableName}`
|
|
279
|
+
});
|
|
280
|
+
this.logger?.info?.(`Successfully migrated spans table to new sorting key`);
|
|
281
|
+
return true;
|
|
282
|
+
} catch (error$1) {
|
|
283
|
+
this.logger?.error?.(`Migration failed: ${error$1.message}`);
|
|
284
|
+
try {
|
|
285
|
+
const originalExists = await this.tableExists(tableName);
|
|
286
|
+
const backupExists = await this.tableExists(backupTableName);
|
|
287
|
+
if (!originalExists && backupExists) {
|
|
288
|
+
this.logger?.info?.(`Restoring spans table from backup`);
|
|
289
|
+
await this.client.command({
|
|
290
|
+
query: `RENAME TABLE ${backupTableName} TO ${tableName}`
|
|
291
|
+
});
|
|
292
|
+
} else if (originalExists && backupExists) {
|
|
293
|
+
await this.client.command({
|
|
294
|
+
query: `DROP TABLE IF EXISTS ${backupTableName}`
|
|
295
|
+
});
|
|
296
|
+
}
|
|
297
|
+
} catch (restoreError) {
|
|
298
|
+
this.logger?.error?.(`Failed to restore from backup: ${restoreError}`);
|
|
299
|
+
}
|
|
300
|
+
throw new error.MastraError(
|
|
301
|
+
{
|
|
302
|
+
id: storage.createStorageErrorId("CLICKHOUSE", "MIGRATE_SPANS_SORTING_KEY", "FAILED"),
|
|
303
|
+
domain: error.ErrorDomain.STORAGE,
|
|
304
|
+
category: error.ErrorCategory.THIRD_PARTY,
|
|
305
|
+
details: { tableName, currentSortingKey }
|
|
306
|
+
},
|
|
307
|
+
error$1
|
|
308
|
+
);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
102
311
|
getSqlType(type) {
|
|
103
312
|
switch (type) {
|
|
104
313
|
case "text":
|
|
@@ -125,12 +334,15 @@ var ClickhouseDB = class extends base.MastraBase {
|
|
|
125
334
|
try {
|
|
126
335
|
const columns = Object.entries(schema).map(([name, def]) => {
|
|
127
336
|
let sqlType = this.getSqlType(def.type);
|
|
128
|
-
|
|
337
|
+
let isNullable = def.nullable === true;
|
|
338
|
+
if (tableName === storage.TABLE_SPANS && name === "updatedAt") {
|
|
339
|
+
isNullable = false;
|
|
340
|
+
}
|
|
129
341
|
if (isNullable) {
|
|
130
342
|
sqlType = `Nullable(${sqlType})`;
|
|
131
343
|
}
|
|
132
344
|
const constraints = [];
|
|
133
|
-
if (name === "metadata" && def.type === "text" && isNullable) {
|
|
345
|
+
if (name === "metadata" && (def.type === "text" || def.type === "jsonb") && isNullable) {
|
|
134
346
|
constraints.push("DEFAULT '{}'");
|
|
135
347
|
}
|
|
136
348
|
const columnTtl = this.ttl?.[tableName]?.columns?.[name];
|
|
@@ -155,8 +367,8 @@ var ClickhouseDB = class extends base.MastraBase {
|
|
|
155
367
|
${columns}
|
|
156
368
|
)
|
|
157
369
|
ENGINE = ${TABLE_ENGINES[tableName] ?? "MergeTree()"}
|
|
158
|
-
PRIMARY KEY (
|
|
159
|
-
ORDER BY (
|
|
370
|
+
PRIMARY KEY (traceId, spanId)
|
|
371
|
+
ORDER BY (traceId, spanId)
|
|
160
372
|
${rowTtl ? `TTL toDateTime(${rowTtl.ttlKey ?? "createdAt"}) + INTERVAL ${rowTtl.interval} ${rowTtl.unit}` : ""}
|
|
161
373
|
SETTINGS index_granularity = 8192
|
|
162
374
|
`;
|
|
@@ -302,7 +514,9 @@ var ClickhouseDB = class extends base.MastraBase {
|
|
|
302
514
|
...Object.fromEntries(
|
|
303
515
|
Object.entries(record).map(([key, value]) => [
|
|
304
516
|
key,
|
|
305
|
-
|
|
517
|
+
// Only convert to Date if it's a timestamp column AND value is not null/undefined
|
|
518
|
+
// new Date(null) returns epoch date, not null, so we must check first
|
|
519
|
+
storage.TABLE_SCHEMAS[tableName]?.[key]?.type === "timestamp" && value != null ? new Date(value).toISOString() : value
|
|
306
520
|
])
|
|
307
521
|
)
|
|
308
522
|
}));
|
|
@@ -1068,26 +1282,68 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1068
1282
|
);
|
|
1069
1283
|
}
|
|
1070
1284
|
}
|
|
1071
|
-
async
|
|
1072
|
-
const {
|
|
1285
|
+
async listThreads(args) {
|
|
1286
|
+
const { page = 0, perPage: perPageInput, orderBy, filter } = args;
|
|
1287
|
+
try {
|
|
1288
|
+
this.validatePaginationInput(page, perPageInput ?? 100);
|
|
1289
|
+
} catch (error$1) {
|
|
1290
|
+
throw new error.MastraError(
|
|
1291
|
+
{
|
|
1292
|
+
id: storage.createStorageErrorId("CLICKHOUSE", "LIST_THREADS", "INVALID_PAGE"),
|
|
1293
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1294
|
+
category: error.ErrorCategory.USER,
|
|
1295
|
+
details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
|
|
1296
|
+
},
|
|
1297
|
+
error$1 instanceof Error ? error$1 : new Error("Invalid pagination parameters")
|
|
1298
|
+
);
|
|
1299
|
+
}
|
|
1073
1300
|
const perPage = storage.normalizePerPage(perPageInput, 100);
|
|
1074
|
-
|
|
1301
|
+
try {
|
|
1302
|
+
this.validateMetadataKeys(filter?.metadata);
|
|
1303
|
+
} catch (error$1) {
|
|
1075
1304
|
throw new error.MastraError(
|
|
1076
1305
|
{
|
|
1077
|
-
id: storage.createStorageErrorId("CLICKHOUSE", "
|
|
1306
|
+
id: storage.createStorageErrorId("CLICKHOUSE", "LIST_THREADS", "INVALID_METADATA_KEY"),
|
|
1078
1307
|
domain: error.ErrorDomain.STORAGE,
|
|
1079
1308
|
category: error.ErrorCategory.USER,
|
|
1080
|
-
details: {
|
|
1309
|
+
details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
|
|
1081
1310
|
},
|
|
1082
|
-
new Error("
|
|
1311
|
+
error$1 instanceof Error ? error$1 : new Error("Invalid metadata key")
|
|
1083
1312
|
);
|
|
1084
1313
|
}
|
|
1085
1314
|
const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
|
|
1086
1315
|
const { field, direction } = this.parseOrderBy(orderBy);
|
|
1087
1316
|
try {
|
|
1317
|
+
const whereClauses = [];
|
|
1318
|
+
const queryParams = {};
|
|
1319
|
+
if (filter?.resourceId) {
|
|
1320
|
+
whereClauses.push("resourceId = {resourceId:String}");
|
|
1321
|
+
queryParams.resourceId = filter.resourceId;
|
|
1322
|
+
}
|
|
1323
|
+
if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
|
|
1324
|
+
let metadataIndex = 0;
|
|
1325
|
+
for (const [key, value] of Object.entries(filter.metadata)) {
|
|
1326
|
+
const paramName = `metadata${metadataIndex}`;
|
|
1327
|
+
whereClauses.push(`JSONExtractRaw(metadata, '${key}') = {${paramName}:String}`);
|
|
1328
|
+
queryParams[paramName] = JSON.stringify(value);
|
|
1329
|
+
metadataIndex++;
|
|
1330
|
+
}
|
|
1331
|
+
}
|
|
1088
1332
|
const countResult = await this.client.query({
|
|
1089
|
-
query: `
|
|
1090
|
-
|
|
1333
|
+
query: `
|
|
1334
|
+
WITH ranked_threads AS (
|
|
1335
|
+
SELECT
|
|
1336
|
+
id,
|
|
1337
|
+
resourceId,
|
|
1338
|
+
metadata,
|
|
1339
|
+
ROW_NUMBER() OVER (PARTITION BY id ORDER BY updatedAt DESC) as row_num
|
|
1340
|
+
FROM ${storage.TABLE_THREADS}
|
|
1341
|
+
)
|
|
1342
|
+
SELECT count(*) as total
|
|
1343
|
+
FROM ranked_threads
|
|
1344
|
+
WHERE row_num = 1 ${whereClauses.length > 0 ? `AND ${whereClauses.join(" AND ")}` : ""}
|
|
1345
|
+
`,
|
|
1346
|
+
query_params: queryParams,
|
|
1091
1347
|
clickhouse_settings: {
|
|
1092
1348
|
date_time_input_format: "best_effort",
|
|
1093
1349
|
date_time_output_format: "iso",
|
|
@@ -1118,7 +1374,6 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1118
1374
|
toDateTime64(updatedAt, 3) as updatedAt,
|
|
1119
1375
|
ROW_NUMBER() OVER (PARTITION BY id ORDER BY updatedAt DESC) as row_num
|
|
1120
1376
|
FROM ${storage.TABLE_THREADS}
|
|
1121
|
-
WHERE resourceId = {resourceId:String}
|
|
1122
1377
|
)
|
|
1123
1378
|
SELECT
|
|
1124
1379
|
id,
|
|
@@ -1128,12 +1383,12 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1128
1383
|
createdAt,
|
|
1129
1384
|
updatedAt
|
|
1130
1385
|
FROM ranked_threads
|
|
1131
|
-
WHERE row_num = 1
|
|
1386
|
+
WHERE row_num = 1 ${whereClauses.length > 0 ? `AND ${whereClauses.join(" AND ")}` : ""}
|
|
1132
1387
|
ORDER BY "${field}" ${direction === "DESC" ? "DESC" : "ASC"}
|
|
1133
1388
|
LIMIT {perPage:Int64} OFFSET {offset:Int64}
|
|
1134
1389
|
`,
|
|
1135
1390
|
query_params: {
|
|
1136
|
-
|
|
1391
|
+
...queryParams,
|
|
1137
1392
|
perPage,
|
|
1138
1393
|
offset
|
|
1139
1394
|
},
|
|
@@ -1159,10 +1414,14 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1159
1414
|
} catch (error$1) {
|
|
1160
1415
|
throw new error.MastraError(
|
|
1161
1416
|
{
|
|
1162
|
-
id: storage.createStorageErrorId("CLICKHOUSE", "
|
|
1417
|
+
id: storage.createStorageErrorId("CLICKHOUSE", "LIST_THREADS", "FAILED"),
|
|
1163
1418
|
domain: error.ErrorDomain.STORAGE,
|
|
1164
1419
|
category: error.ErrorCategory.THIRD_PARTY,
|
|
1165
|
-
details: {
|
|
1420
|
+
details: {
|
|
1421
|
+
...filter?.resourceId && { resourceId: filter.resourceId },
|
|
1422
|
+
hasMetadataFilter: !!filter?.metadata,
|
|
1423
|
+
page
|
|
1424
|
+
}
|
|
1166
1425
|
},
|
|
1167
1426
|
error$1
|
|
1168
1427
|
);
|
|
@@ -1470,7 +1729,7 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1470
1729
|
return {
|
|
1471
1730
|
id: resource.id,
|
|
1472
1731
|
workingMemory: resource.workingMemory && typeof resource.workingMemory === "object" ? JSON.stringify(resource.workingMemory) : resource.workingMemory,
|
|
1473
|
-
metadata:
|
|
1732
|
+
metadata: parseMetadata(resource.metadata),
|
|
1474
1733
|
createdAt: new Date(resource.createdAt),
|
|
1475
1734
|
updatedAt: new Date(resource.updatedAt)
|
|
1476
1735
|
};
|
|
@@ -1495,7 +1754,7 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1495
1754
|
{
|
|
1496
1755
|
id: resource.id,
|
|
1497
1756
|
workingMemory: resource.workingMemory,
|
|
1498
|
-
metadata:
|
|
1757
|
+
metadata: serializeMetadata(resource.metadata),
|
|
1499
1758
|
createdAt: resource.createdAt.toISOString(),
|
|
1500
1759
|
updatedAt: resource.updatedAt.toISOString()
|
|
1501
1760
|
}
|
|
@@ -1506,7 +1765,10 @@ var MemoryStorageClickhouse = class extends storage.MemoryStorage {
|
|
|
1506
1765
|
output_format_json_quote_64bit_integers: 0
|
|
1507
1766
|
}
|
|
1508
1767
|
});
|
|
1509
|
-
return
|
|
1768
|
+
return {
|
|
1769
|
+
...resource,
|
|
1770
|
+
metadata: resource.metadata || {}
|
|
1771
|
+
};
|
|
1510
1772
|
} catch (error$1) {
|
|
1511
1773
|
throw new error.MastraError(
|
|
1512
1774
|
{
|
|
@@ -1596,11 +1858,101 @@ var ObservabilityStorageClickhouse = class extends storage.ObservabilityStorage
|
|
|
1596
1858
|
this.#db = new ClickhouseDB({ client, ttl });
|
|
1597
1859
|
}
|
|
1598
1860
|
async init() {
|
|
1861
|
+
const migrationStatus = await this.#db.checkSpansMigrationStatus(storage.TABLE_SPANS);
|
|
1862
|
+
if (migrationStatus.needsMigration) {
|
|
1863
|
+
const duplicateInfo = await this.#db.checkForDuplicateSpans(storage.TABLE_SPANS);
|
|
1864
|
+
const duplicateMessage = duplicateInfo.hasDuplicates ? `
|
|
1865
|
+
Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations that will be removed.
|
|
1866
|
+
` : "";
|
|
1867
|
+
const errorMessage = `
|
|
1868
|
+
===========================================================================
|
|
1869
|
+
MIGRATION REQUIRED: ClickHouse spans table needs sorting key update
|
|
1870
|
+
===========================================================================
|
|
1871
|
+
|
|
1872
|
+
The spans table structure has changed. ClickHouse requires a table recreation
|
|
1873
|
+
to update the sorting key from (traceId) to (traceId, spanId).
|
|
1874
|
+
` + duplicateMessage + `
|
|
1875
|
+
To fix this, run the manual migration command:
|
|
1876
|
+
|
|
1877
|
+
npx mastra migrate
|
|
1878
|
+
|
|
1879
|
+
This command will:
|
|
1880
|
+
1. Create a new table with the correct sorting key
|
|
1881
|
+
2. Copy data from the old table (deduplicating if needed)
|
|
1882
|
+
3. Replace the old table with the new one
|
|
1883
|
+
|
|
1884
|
+
WARNING: This migration involves table recreation and may take significant
|
|
1885
|
+
time for large tables. Please ensure you have a backup before proceeding.
|
|
1886
|
+
===========================================================================
|
|
1887
|
+
`;
|
|
1888
|
+
throw new error.MastraError({
|
|
1889
|
+
id: storage.createStorageErrorId("CLICKHOUSE", "MIGRATION_REQUIRED", "SORTING_KEY_CHANGE"),
|
|
1890
|
+
domain: error.ErrorDomain.STORAGE,
|
|
1891
|
+
category: error.ErrorCategory.USER,
|
|
1892
|
+
text: errorMessage
|
|
1893
|
+
});
|
|
1894
|
+
}
|
|
1599
1895
|
await this.#db.createTable({ tableName: storage.TABLE_SPANS, schema: storage.SPAN_SCHEMA });
|
|
1600
1896
|
}
|
|
1601
1897
|
async dangerouslyClearAll() {
|
|
1602
1898
|
await this.#db.clearTable({ tableName: storage.TABLE_SPANS });
|
|
1603
1899
|
}
|
|
1900
|
+
/**
|
|
1901
|
+
* Manually run the spans migration to deduplicate and update the sorting key.
|
|
1902
|
+
* This is intended to be called from the CLI when duplicates are detected.
|
|
1903
|
+
*
|
|
1904
|
+
* @returns Migration result with status and details
|
|
1905
|
+
*/
|
|
1906
|
+
async migrateSpans() {
|
|
1907
|
+
const migrationStatus = await this.#db.checkSpansMigrationStatus(storage.TABLE_SPANS);
|
|
1908
|
+
if (!migrationStatus.needsMigration) {
|
|
1909
|
+
return {
|
|
1910
|
+
success: true,
|
|
1911
|
+
alreadyMigrated: true,
|
|
1912
|
+
duplicatesRemoved: 0,
|
|
1913
|
+
message: `Migration already complete. Spans table has correct sorting key.`
|
|
1914
|
+
};
|
|
1915
|
+
}
|
|
1916
|
+
const duplicateInfo = await this.#db.checkForDuplicateSpans(storage.TABLE_SPANS);
|
|
1917
|
+
if (duplicateInfo.hasDuplicates) {
|
|
1918
|
+
this.logger?.info?.(
|
|
1919
|
+
`Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting migration with deduplication...`
|
|
1920
|
+
);
|
|
1921
|
+
} else {
|
|
1922
|
+
this.logger?.info?.(`No duplicate spans found. Starting sorting key migration...`);
|
|
1923
|
+
}
|
|
1924
|
+
await this.#db.migrateSpansTableSortingKey({ tableName: storage.TABLE_SPANS, schema: storage.SPAN_SCHEMA });
|
|
1925
|
+
return {
|
|
1926
|
+
success: true,
|
|
1927
|
+
alreadyMigrated: false,
|
|
1928
|
+
duplicatesRemoved: duplicateInfo.duplicateCount,
|
|
1929
|
+
message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and updated sorting key for ${storage.TABLE_SPANS}.` : `Migration complete. Updated sorting key for ${storage.TABLE_SPANS}.`
|
|
1930
|
+
};
|
|
1931
|
+
}
|
|
1932
|
+
/**
|
|
1933
|
+
* Check migration status for the spans table.
|
|
1934
|
+
* Returns information about whether migration is needed.
|
|
1935
|
+
*/
|
|
1936
|
+
async checkSpansMigrationStatus() {
|
|
1937
|
+
const migrationStatus = await this.#db.checkSpansMigrationStatus(storage.TABLE_SPANS);
|
|
1938
|
+
if (!migrationStatus.needsMigration) {
|
|
1939
|
+
return {
|
|
1940
|
+
needsMigration: false,
|
|
1941
|
+
hasDuplicates: false,
|
|
1942
|
+
duplicateCount: 0,
|
|
1943
|
+
constraintExists: true,
|
|
1944
|
+
tableName: storage.TABLE_SPANS
|
|
1945
|
+
};
|
|
1946
|
+
}
|
|
1947
|
+
const duplicateInfo = await this.#db.checkForDuplicateSpans(storage.TABLE_SPANS);
|
|
1948
|
+
return {
|
|
1949
|
+
needsMigration: true,
|
|
1950
|
+
hasDuplicates: duplicateInfo.hasDuplicates,
|
|
1951
|
+
duplicateCount: duplicateInfo.duplicateCount,
|
|
1952
|
+
constraintExists: false,
|
|
1953
|
+
tableName: storage.TABLE_SPANS
|
|
1954
|
+
};
|
|
1955
|
+
}
|
|
1604
1956
|
get tracingStrategy() {
|
|
1605
1957
|
return {
|
|
1606
1958
|
preferred: "insert-only",
|
|
@@ -1934,10 +2286,10 @@ var ObservabilityStorageClickhouse = class extends storage.ObservabilityStorage
|
|
|
1934
2286
|
conditions.push(`(error IS NOT NULL AND error != '')`);
|
|
1935
2287
|
break;
|
|
1936
2288
|
case storage.TraceStatus.RUNNING:
|
|
1937
|
-
conditions.push(`
|
|
2289
|
+
conditions.push(`endedAt IS NULL AND (error IS NULL OR error = '')`);
|
|
1938
2290
|
break;
|
|
1939
2291
|
case storage.TraceStatus.SUCCESS:
|
|
1940
|
-
conditions.push(`
|
|
2292
|
+
conditions.push(`endedAt IS NOT NULL AND (error IS NULL OR error = '')`);
|
|
1941
2293
|
break;
|
|
1942
2294
|
}
|
|
1943
2295
|
}
|
|
@@ -1966,7 +2318,7 @@ var ObservabilityStorageClickhouse = class extends storage.ObservabilityStorage
|
|
|
1966
2318
|
if (sortField === "endedAt") {
|
|
1967
2319
|
const nullSortValue = sortDirection === "DESC" ? 0 : 1;
|
|
1968
2320
|
const nonNullSortValue = sortDirection === "DESC" ? 1 : 0;
|
|
1969
|
-
orderClause = `ORDER BY CASE WHEN ${sortField} IS NULL
|
|
2321
|
+
orderClause = `ORDER BY CASE WHEN ${sortField} IS NULL THEN ${nullSortValue} ELSE ${nonNullSortValue} END, ${sortField} ${sortDirection}`;
|
|
1970
2322
|
} else {
|
|
1971
2323
|
orderClause = `ORDER BY ${sortField} ${sortDirection}`;
|
|
1972
2324
|
}
|
|
@@ -2694,7 +3046,7 @@ var WorkflowsStorageClickhouse = class extends storage.WorkflowsStorage {
|
|
|
2694
3046
|
try {
|
|
2695
3047
|
parsedSnapshot = JSON.parse(row.snapshot);
|
|
2696
3048
|
} catch (e) {
|
|
2697
|
-
|
|
3049
|
+
this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
|
|
2698
3050
|
}
|
|
2699
3051
|
}
|
|
2700
3052
|
return {
|
|
@@ -2732,7 +3084,7 @@ var WorkflowsStorageClickhouse = class extends storage.WorkflowsStorage {
|
|
|
2732
3084
|
conditions.push(`resourceId = {var_resourceId:String}`);
|
|
2733
3085
|
values.var_resourceId = resourceId;
|
|
2734
3086
|
} else {
|
|
2735
|
-
|
|
3087
|
+
this.logger.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
|
|
2736
3088
|
}
|
|
2737
3089
|
}
|
|
2738
3090
|
if (fromDate) {
|
|
@@ -2872,7 +3224,7 @@ var WorkflowsStorageClickhouse = class extends storage.WorkflowsStorage {
|
|
|
2872
3224
|
var isClientConfig = (config) => {
|
|
2873
3225
|
return "client" in config;
|
|
2874
3226
|
};
|
|
2875
|
-
var ClickhouseStore = class extends storage.
|
|
3227
|
+
var ClickhouseStore = class extends storage.MastraCompositeStore {
|
|
2876
3228
|
db;
|
|
2877
3229
|
ttl = {};
|
|
2878
3230
|
stores;
|
|
@@ -2890,11 +3242,11 @@ var ClickhouseStore = class extends storage.MastraStorage {
|
|
|
2890
3242
|
if (typeof config.password !== "string") {
|
|
2891
3243
|
throw new Error("ClickhouseStore: password must be a string.");
|
|
2892
3244
|
}
|
|
3245
|
+
const { id, ttl, disableInit, clickhouse_settings, ...clientOptions } = config;
|
|
2893
3246
|
this.db = client.createClient({
|
|
2894
|
-
|
|
2895
|
-
username: config.username,
|
|
2896
|
-
password: config.password,
|
|
3247
|
+
...clientOptions,
|
|
2897
3248
|
clickhouse_settings: {
|
|
3249
|
+
...clickhouse_settings,
|
|
2898
3250
|
date_time_input_format: "best_effort",
|
|
2899
3251
|
date_time_output_format: "iso",
|
|
2900
3252
|
// This is crucial
|