@mastra/clickhouse 1.4.2-alpha.0 → 1.5.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @mastra/clickhouse
2
2
 
3
+ ## 1.5.0-alpha.1
4
+
5
+ ### Minor Changes
6
+
7
+ - Added unique IDs (`logId`, `metricId`, `scoreId`, `feedbackId`) to all observability signals, generated automatically at emission time for de-duplication across the framework pipeline and cross-system correlation. User-facing APIs (`logger.info()`, `metrics.emit()`, `addScore()`, `addFeedback()`) are unchanged. ([#15242](https://github.com/mastra-ai/mastra/pull/15242))
8
+
9
+ For existing ClickHouse and DuckDB observability signal tables, run `npx mastra migrate` before initializing the store so the new signal-ID schema is applied.
10
+
11
+ ### Patch Changes
12
+
13
+ - Added `getTraceLight` method to the observability storage, returning only lightweight span fields needed for timeline rendering. This avoids transferring heavy fields like `input`, `output`, `attributes`, and `metadata` when they are not needed. ([#15574](https://github.com/mastra-ai/mastra/pull/15574))
14
+
15
+ - Updated dependencies [[`20f59b8`](https://github.com/mastra-ai/mastra/commit/20f59b876cf91199efbc49a0e36b391240708f08), [`e2687a7`](https://github.com/mastra-ai/mastra/commit/e2687a7408790c384563816a9a28ed06735684c9), [`8f1b280`](https://github.com/mastra-ai/mastra/commit/8f1b280b7fe6999ec654f160cb69c1a8719e7a57), [`12df98c`](https://github.com/mastra-ai/mastra/commit/12df98c4904643d9481f5c78f3bed443725b4c96)]:
16
+ - @mastra/core@1.26.0-alpha.11
17
+
3
18
  ## 1.4.2-alpha.0
4
19
 
5
20
  ### Patch Changes
@@ -3,7 +3,7 @@ name: mastra-clickhouse
3
3
  description: Documentation for @mastra/clickhouse. Use when working with @mastra/clickhouse APIs, configuration, or implementation.
4
4
  metadata:
5
5
  package: "@mastra/clickhouse"
6
- version: "1.4.2-alpha.0"
6
+ version: "1.5.0-alpha.1"
7
7
  ---
8
8
 
9
9
  ## When to use
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "1.4.2-alpha.0",
2
+ "version": "1.5.0-alpha.1",
3
3
  "package": "@mastra/clickhouse",
4
4
  "exports": {},
5
5
  "modules": {}
package/dist/index.cjs CHANGED
@@ -2430,6 +2430,49 @@ time for large tables. Please ensure you have a backup before proceeding.
2430
2430
  );
2431
2431
  }
2432
2432
  }
2433
+ async getTraceLight(args) {
2434
+ const { traceId } = args;
2435
+ try {
2436
+ const engine = TABLE_ENGINES[storage.TABLE_SPANS] ?? "MergeTree()";
2437
+ const result = await this.client.query({
2438
+ query: `
2439
+ SELECT traceId, spanId, parentSpanId, name,
2440
+ entityType, entityId, entityName,
2441
+ spanType, error, isEvent,
2442
+ startedAt, endedAt, createdAt, updatedAt
2443
+ FROM ${storage.TABLE_SPANS} ${engine.startsWith("ReplacingMergeTree") ? "FINAL" : ""}
2444
+ WHERE traceId = {traceId:String}
2445
+ ORDER BY startedAt ASC
2446
+ `,
2447
+ query_params: { traceId },
2448
+ format: "JSONEachRow",
2449
+ clickhouse_settings: {
2450
+ date_time_input_format: "best_effort",
2451
+ date_time_output_format: "iso",
2452
+ use_client_time_zone: 1,
2453
+ output_format_json_quote_64bit_integers: 0
2454
+ }
2455
+ });
2456
+ const rows = await result.json();
2457
+ if (!rows || rows.length === 0) {
2458
+ return null;
2459
+ }
2460
+ return {
2461
+ traceId,
2462
+ spans: transformRows(rows)
2463
+ };
2464
+ } catch (error$1) {
2465
+ throw new error.MastraError(
2466
+ {
2467
+ id: storage.createStorageErrorId("CLICKHOUSE", "GET_TRACE_LIGHT", "FAILED"),
2468
+ domain: error.ErrorDomain.STORAGE,
2469
+ category: error.ErrorCategory.THIRD_PARTY,
2470
+ details: { traceId }
2471
+ },
2472
+ error$1
2473
+ );
2474
+ }
2475
+ }
2433
2476
  async updateSpan(args) {
2434
2477
  const { traceId, spanId, updates } = args;
2435
2478
  try {
@@ -2944,6 +2987,7 @@ CREATE TABLE IF NOT EXISTS ${TABLE_METRIC_EVENTS} (
2944
2987
  timestamp DateTime64(3, 'UTC'),
2945
2988
 
2946
2989
  -- IDs
2990
+ metricId String,
2947
2991
  traceId Nullable(String),
2948
2992
  spanId Nullable(String),
2949
2993
  experimentId Nullable(String),
@@ -2991,9 +3035,9 @@ CREATE TABLE IF NOT EXISTS ${TABLE_METRIC_EVENTS} (
2991
3035
  metadata Nullable(String),
2992
3036
  scope Nullable(String)
2993
3037
  )
2994
- ENGINE = MergeTree
3038
+ ENGINE = ReplacingMergeTree
2995
3039
  PARTITION BY toDate(timestamp)
2996
- ORDER BY (name, timestamp)
3040
+ ORDER BY (name, timestamp, metricId)
2997
3041
  `;
2998
3042
  var LOG_EVENTS_DDL = `
2999
3043
  CREATE TABLE IF NOT EXISTS ${TABLE_LOG_EVENTS} (
@@ -3001,6 +3045,7 @@ CREATE TABLE IF NOT EXISTS ${TABLE_LOG_EVENTS} (
3001
3045
  timestamp DateTime64(3, 'UTC'),
3002
3046
 
3003
3047
  -- IDs
3048
+ logId String,
3004
3049
  traceId Nullable(String),
3005
3050
  spanId Nullable(String),
3006
3051
  experimentId Nullable(String),
@@ -3043,10 +3088,9 @@ CREATE TABLE IF NOT EXISTS ${TABLE_LOG_EVENTS} (
3043
3088
  metadata Nullable(String),
3044
3089
  scope Nullable(String)
3045
3090
  )
3046
- ENGINE = MergeTree
3091
+ ENGINE = ReplacingMergeTree
3047
3092
  PARTITION BY toDate(timestamp)
3048
- ORDER BY (timestamp, traceId)
3049
- SETTINGS allow_nullable_key = 1
3093
+ ORDER BY (timestamp, logId)
3050
3094
  `;
3051
3095
  var SCORE_EVENTS_DDL = `
3052
3096
  CREATE TABLE IF NOT EXISTS ${TABLE_SCORE_EVENTS} (
@@ -3054,6 +3098,7 @@ CREATE TABLE IF NOT EXISTS ${TABLE_SCORE_EVENTS} (
3054
3098
  timestamp DateTime64(3, 'UTC'),
3055
3099
 
3056
3100
  -- IDs
3101
+ scoreId String,
3057
3102
  traceId Nullable(String),
3058
3103
  spanId Nullable(String),
3059
3104
  experimentId Nullable(String),
@@ -3103,9 +3148,9 @@ CREATE TABLE IF NOT EXISTS ${TABLE_SCORE_EVENTS} (
3103
3148
  metadata Nullable(String),
3104
3149
  scope Nullable(String)
3105
3150
  )
3106
- ENGINE = MergeTree
3151
+ ENGINE = ReplacingMergeTree
3107
3152
  PARTITION BY toDate(timestamp)
3108
- ORDER BY (traceId, timestamp)
3153
+ ORDER BY (traceId, timestamp, scoreId)
3109
3154
  SETTINGS allow_nullable_key = 1
3110
3155
  `;
3111
3156
  var FEEDBACK_EVENTS_DDL = `
@@ -3114,6 +3159,7 @@ CREATE TABLE IF NOT EXISTS ${TABLE_FEEDBACK_EVENTS} (
3114
3159
  timestamp DateTime64(3, 'UTC'),
3115
3160
 
3116
3161
  -- IDs
3162
+ feedbackId String,
3117
3163
  traceId Nullable(String),
3118
3164
  spanId Nullable(String),
3119
3165
  experimentId Nullable(String),
@@ -3166,9 +3212,9 @@ CREATE TABLE IF NOT EXISTS ${TABLE_FEEDBACK_EVENTS} (
3166
3212
  metadata Nullable(String),
3167
3213
  scope Nullable(String)
3168
3214
  )
3169
- ENGINE = MergeTree
3215
+ ENGINE = ReplacingMergeTree
3170
3216
  PARTITION BY toDate(timestamp)
3171
- ORDER BY (traceId, timestamp)
3217
+ ORDER BY (traceId, timestamp, feedbackId)
3172
3218
  SETTINGS allow_nullable_key = 1
3173
3219
  `;
3174
3220
  var DISCOVERY_VALUES_DDL = `
@@ -3523,6 +3569,7 @@ function spanRecordToRow(span) {
3523
3569
  }
3524
3570
  function rowToLogRecord(row) {
3525
3571
  return {
3572
+ logId: row.logId,
3526
3573
  timestamp: toDate(row.timestamp),
3527
3574
  level: row.level,
3528
3575
  message: row.message,
@@ -3559,6 +3606,7 @@ function rowToLogRecord(row) {
3559
3606
  }
3560
3607
  function logRecordToRow(log) {
3561
3608
  return {
3609
+ logId: log.logId,
3562
3610
  timestamp: toISOString(log.timestamp),
3563
3611
  level: log.level,
3564
3612
  message: log.message,
@@ -3595,6 +3643,7 @@ function logRecordToRow(log) {
3595
3643
  }
3596
3644
  function rowToMetricRecord(row) {
3597
3645
  return {
3646
+ metricId: row.metricId,
3598
3647
  timestamp: toDate(row.timestamp),
3599
3648
  name: row.name,
3600
3649
  value: Number(row.value),
@@ -3636,6 +3685,7 @@ function rowToMetricRecord(row) {
3636
3685
  }
3637
3686
  function metricRecordToRow(metric) {
3638
3687
  return {
3688
+ metricId: metric.metricId,
3639
3689
  timestamp: toISOString(metric.timestamp),
3640
3690
  name: metric.name,
3641
3691
  value: metric.value,
@@ -3677,6 +3727,7 @@ function metricRecordToRow(metric) {
3677
3727
  }
3678
3728
  function rowToScoreRecord(row) {
3679
3729
  return {
3730
+ scoreId: row.scoreId,
3680
3731
  timestamp: toDate(row.timestamp),
3681
3732
  // Core score/feedback shapes still type traceId as required for now.
3682
3733
  traceId: nullableString(row.traceId),
@@ -3719,6 +3770,7 @@ function scoreRecordToRow(score) {
3719
3770
  const metadata = score.metadata ?? null;
3720
3771
  const scoreSource = score.scoreSource ?? score.source ?? null;
3721
3772
  return {
3773
+ scoreId: score.scoreId,
3722
3774
  timestamp: toISOString(score.timestamp),
3723
3775
  traceId: score.traceId ?? null,
3724
3776
  spanId: score.spanId ?? null,
@@ -3761,6 +3813,7 @@ function rowToFeedbackRecord(row) {
3761
3813
  const feedbackSource = nullableString(row.feedbackSource);
3762
3814
  const feedbackUserId = nullableString(row.feedbackUserId) ?? nullableString(row.userId);
3763
3815
  return {
3816
+ feedbackId: row.feedbackId,
3764
3817
  timestamp: toDate(row.timestamp),
3765
3818
  // Core score/feedback shapes still type traceId as required for now.
3766
3819
  traceId: nullableString(row.traceId),
@@ -3804,6 +3857,7 @@ function feedbackRecordToRow(feedback) {
3804
3857
  const feedbackSource = feedback.feedbackSource ?? feedback.source ?? "";
3805
3858
  const feedbackUserId = feedback.feedbackUserId ?? feedback.userId ?? null;
3806
3859
  return {
3860
+ feedbackId: feedback.feedbackId,
3807
3861
  timestamp: toISOString(feedback.timestamp),
3808
3862
  traceId: feedback.traceId ?? null,
3809
3863
  spanId: feedback.spanId ?? null,
@@ -4923,6 +4977,93 @@ async function getMetricLabelValues(client, args) {
4923
4977
  );
4924
4978
  return { values: rows.map((r) => r.value) };
4925
4979
  }
4980
+ var SIGNAL_MIGRATIONS = [
4981
+ { table: TABLE_METRIC_EVENTS, createDDL: METRIC_EVENTS_DDL, idColumn: "metricId" },
4982
+ { table: TABLE_LOG_EVENTS, createDDL: LOG_EVENTS_DDL, idColumn: "logId" },
4983
+ { table: TABLE_SCORE_EVENTS, createDDL: SCORE_EVENTS_DDL, idColumn: "scoreId" },
4984
+ { table: TABLE_FEEDBACK_EVENTS, createDDL: FEEDBACK_EVENTS_DDL, idColumn: "feedbackId" }
4985
+ ];
4986
+ async function getTableEngine(client, table) {
4987
+ const result = await client.query({
4988
+ query: `SELECT engine FROM system.tables WHERE database = currentDatabase() AND name = {table:String}`,
4989
+ query_params: { table },
4990
+ format: "JSONEachRow"
4991
+ });
4992
+ const rows = await result.json();
4993
+ return rows[0]?.engine ?? null;
4994
+ }
4995
+ async function getTableColumns(client, table) {
4996
+ const result = await client.query({ query: `DESCRIBE TABLE ${table}`, format: "JSONEachRow" });
4997
+ const rows = await result.json();
4998
+ return rows.map((r) => r.name);
4999
+ }
5000
+ function buildTemporaryTableDDL(createDDL, table, tempTable) {
5001
+ return createDDL.replace(`CREATE TABLE IF NOT EXISTS ${table}`, `CREATE TABLE ${tempTable}`);
5002
+ }
5003
+ async function dropTableIfExists(client, table) {
5004
+ if (await getTableEngine(client, table) !== null) {
5005
+ await client.command({ query: `DROP TABLE ${table}` });
5006
+ }
5007
+ }
5008
+ function createMigrationError(args, error$1) {
5009
+ return new error.MastraError(
5010
+ {
5011
+ id: storage.createStorageErrorId("CLICKHOUSE", "MIGRATE_SIGNAL_TABLES", "FAILED"),
5012
+ domain: error.ErrorDomain.STORAGE,
5013
+ category: error.ErrorCategory.THIRD_PARTY,
5014
+ details: args
5015
+ },
5016
+ error$1
5017
+ );
5018
+ }
5019
+ async function checkSignalTablesMigrationStatus(client) {
5020
+ const tables = [];
5021
+ for (const { table, idColumn } of SIGNAL_MIGRATIONS) {
5022
+ const engine = await getTableEngine(client, table);
5023
+ if (!engine || engine === "ReplacingMergeTree") {
5024
+ continue;
5025
+ }
5026
+ tables.push({ table, engine, idColumn });
5027
+ }
5028
+ return {
5029
+ needsMigration: tables.length > 0,
5030
+ tables
5031
+ };
5032
+ }
5033
+ async function migrateSignalTables(client, logger) {
5034
+ for (const { table, createDDL, idColumn } of SIGNAL_MIGRATIONS) {
5035
+ const engine = await getTableEngine(client, table);
5036
+ if (!engine || engine === "ReplacingMergeTree") continue;
5037
+ logger?.info?.(`Migrating ${table} from ${engine} to ReplacingMergeTree with ${idColumn} column`);
5038
+ const temp = `${table}_migrating_${Date.now()}`;
5039
+ try {
5040
+ await client.command({ query: buildTemporaryTableDDL(createDDL, table, temp) });
5041
+ const newColumns = await getTableColumns(client, temp);
5042
+ const currentColumns = new Set(await getTableColumns(client, table));
5043
+ const columnList = newColumns.map((c) => `"${c}"`).join(", ");
5044
+ const selectExprs = newColumns.map((c) => {
5045
+ if (c === idColumn) {
5046
+ return currentColumns.has(c) ? `COALESCE(nullIf("${c}", ''), toString(generateUUIDv4())) AS "${c}"` : `toString(generateUUIDv4()) AS "${c}"`;
5047
+ }
5048
+ return currentColumns.has(c) ? `"${c}"` : `NULL AS "${c}"`;
5049
+ }).join(", ");
5050
+ await client.command({
5051
+ query: `INSERT INTO ${temp} (${columnList}) SELECT ${selectExprs} FROM ${table}`
5052
+ });
5053
+ await client.command({ query: `EXCHANGE TABLES ${temp} AND ${table}` });
5054
+ await client.command({ query: `DROP TABLE ${temp}` });
5055
+ logger?.info?.(`Successfully migrated ${table}`);
5056
+ } catch (error) {
5057
+ logger?.error?.(`Migration of ${table} failed: ${error.message}`);
5058
+ try {
5059
+ await dropTableIfExists(client, temp);
5060
+ } catch (restoreError) {
5061
+ logger?.error?.(`Failed to clean up temporary table ${temp}: ${restoreError.message}`);
5062
+ }
5063
+ throw createMigrationError({ table, idColumn }, error);
5064
+ }
5065
+ }
5066
+ }
4926
5067
  var SCORE_TYPED_COLUMNS = /* @__PURE__ */ new Set([
4927
5068
  "timestamp",
4928
5069
  "traceId",
@@ -5386,6 +5527,31 @@ async function getTrace(client, args) {
5386
5527
  const spans = rows.map(rowToSpanRecord);
5387
5528
  return { traceId: args.traceId, spans };
5388
5529
  }
5530
+ async function getTraceLight(client, args) {
5531
+ const result = await client.query({
5532
+ query: `
5533
+ SELECT traceId, spanId, parentSpanId, name,
5534
+ entityType, entityId, entityName,
5535
+ spanType, error, isEvent,
5536
+ startedAt, endedAt
5537
+ FROM (
5538
+ SELECT *
5539
+ FROM ${TABLE_SPAN_EVENTS}
5540
+ WHERE traceId = {traceId:String}
5541
+ ORDER BY dedupeKey, endedAt DESC
5542
+ LIMIT 1 BY dedupeKey
5543
+ )
5544
+ ORDER BY startedAt ASC
5545
+ `,
5546
+ query_params: { traceId: args.traceId },
5547
+ format: "JSONEachRow",
5548
+ clickhouse_settings: CH_SETTINGS
5549
+ });
5550
+ const rows = await result.json();
5551
+ if (!rows || rows.length === 0) return null;
5552
+ const spans = rows.map(rowToSpanRecord);
5553
+ return { traceId: args.traceId, spans };
5554
+ }
5389
5555
  async function batchDeleteTraces(client, args) {
5390
5556
  if (args.traceIds.length === 0) return;
5391
5557
  const params = {};
@@ -5414,6 +5580,32 @@ async function batchDeleteTraces(client, args) {
5414
5580
  }
5415
5581
 
5416
5582
  // src/storage/domains/observability/v-next/index.ts
5583
+ function buildSignalMigrationRequiredMessage(args) {
5584
+ const tableList = args.tables.map((table) => ` - ${table.table} (${table.engine})`).join("\n");
5585
+ return `
5586
+ ===========================================================================
5587
+ MIGRATION REQUIRED: ${args.store} observability signal tables need signal IDs
5588
+ ===========================================================================
5589
+
5590
+ The following signal tables still use the legacy schema and must be migrated
5591
+ before observability storage can initialize:
5592
+
5593
+ ${tableList}
5594
+
5595
+ To fix this, run the manual migration command:
5596
+
5597
+ npx mastra migrate
5598
+
5599
+ This command will:
5600
+ 1. Create replacement signal tables with signal-ID dedupe keys
5601
+ 2. Backfill missing signal IDs for legacy rows
5602
+ 3. Swap the migrated tables into place
5603
+
5604
+ WARNING: This migration recreates the signal tables and may take significant
5605
+ time for large databases. Please ensure you have a backup before proceeding.
5606
+ ===========================================================================
5607
+ `;
5608
+ }
5417
5609
  var ObservabilityStorageClickhouseVNext = class extends storage.ObservabilityStorage {
5418
5610
  #client;
5419
5611
  #retention;
@@ -5427,6 +5619,18 @@ var ObservabilityStorageClickhouseVNext = class extends storage.ObservabilitySto
5427
5619
  // Initialization
5428
5620
  // -------------------------------------------------------------------------
5429
5621
  async init() {
5622
+ const migrationStatus = await checkSignalTablesMigrationStatus(this.#client);
5623
+ if (migrationStatus.needsMigration) {
5624
+ throw new error.MastraError({
5625
+ id: storage.createStorageErrorId("CLICKHOUSE", "MIGRATION_REQUIRED", "SIGNAL_TABLES"),
5626
+ domain: error.ErrorDomain.STORAGE,
5627
+ category: error.ErrorCategory.USER,
5628
+ text: buildSignalMigrationRequiredMessage({
5629
+ store: "ClickHouse",
5630
+ tables: migrationStatus.tables.map(({ table, engine }) => ({ table, engine }))
5631
+ })
5632
+ });
5633
+ }
5430
5634
  try {
5431
5635
  for (const ddl of [...ALL_TABLE_DDL, ...ALL_MV_DDL]) {
5432
5636
  await this.#client.command({ query: ddl });
@@ -5441,6 +5645,9 @@ var ObservabilityStorageClickhouseVNext = class extends storage.ObservabilitySto
5441
5645
  }
5442
5646
  }
5443
5647
  } catch (error$1) {
5648
+ if (error$1 instanceof error.MastraError) {
5649
+ throw error$1;
5650
+ }
5444
5651
  throw new error.MastraError(
5445
5652
  {
5446
5653
  id: storage.createStorageErrorId("CLICKHOUSE", "VNEXT_INIT", "FAILED"),
@@ -5462,6 +5669,29 @@ var ObservabilityStorageClickhouseVNext = class extends storage.ObservabilitySto
5462
5669
  } catch {
5463
5670
  }
5464
5671
  }
5672
+ /**
5673
+ * Manually migrate legacy signal tables to the signal-ID ReplacingMergeTree schema.
5674
+ * The public method name is historical; the CLI still calls `migrateSpans()`
5675
+ * for observability migrations even though this now also migrates signal tables.
5676
+ */
5677
+ async migrateSpans() {
5678
+ const migrationStatus = await checkSignalTablesMigrationStatus(this.#client);
5679
+ if (!migrationStatus.needsMigration) {
5680
+ return {
5681
+ success: true,
5682
+ alreadyMigrated: true,
5683
+ duplicatesRemoved: 0,
5684
+ message: "Migration already complete. Signal tables already use signal-ID dedupe keys."
5685
+ };
5686
+ }
5687
+ await migrateSignalTables(this.#client, this.logger);
5688
+ return {
5689
+ success: true,
5690
+ alreadyMigrated: false,
5691
+ duplicatesRemoved: 0,
5692
+ message: `Migration complete. Migrated signal tables: ${migrationStatus.tables.map((t) => t.table).join(", ")}.`
5693
+ };
5694
+ }
5465
5695
  // -------------------------------------------------------------------------
5466
5696
  // Strategy
5467
5697
  // -------------------------------------------------------------------------
@@ -5557,6 +5787,22 @@ var ObservabilityStorageClickhouseVNext = class extends storage.ObservabilitySto
5557
5787
  );
5558
5788
  }
5559
5789
  }
5790
+ async getTraceLight(args) {
5791
+ try {
5792
+ return await getTraceLight(this.#client, args);
5793
+ } catch (error$1) {
5794
+ if (error$1 instanceof error.MastraError) throw error$1;
5795
+ throw new error.MastraError(
5796
+ {
5797
+ id: storage.createStorageErrorId("CLICKHOUSE", "GET_TRACE_LIGHT", "FAILED"),
5798
+ domain: error.ErrorDomain.STORAGE,
5799
+ category: error.ErrorCategory.THIRD_PARTY,
5800
+ details: { traceId: args.traceId }
5801
+ },
5802
+ error$1
5803
+ );
5804
+ }
5805
+ }
5560
5806
  async listTraces(args) {
5561
5807
  try {
5562
5808
  return await listTraces(this.#client, args);