@mastra/libsql 1.0.0-beta.9 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +1164 -0
  2. package/dist/docs/README.md +39 -0
  3. package/dist/docs/SKILL.md +40 -0
  4. package/dist/docs/SOURCE_MAP.json +6 -0
  5. package/dist/docs/agents/01-agent-memory.md +166 -0
  6. package/dist/docs/agents/02-networks.md +292 -0
  7. package/dist/docs/agents/03-agent-approval.md +377 -0
  8. package/dist/docs/agents/04-network-approval.md +274 -0
  9. package/dist/docs/core/01-reference.md +151 -0
  10. package/dist/docs/guides/01-ai-sdk.md +141 -0
  11. package/dist/docs/memory/01-overview.md +76 -0
  12. package/dist/docs/memory/02-storage.md +233 -0
  13. package/dist/docs/memory/03-working-memory.md +390 -0
  14. package/dist/docs/memory/04-semantic-recall.md +233 -0
  15. package/dist/docs/memory/05-memory-processors.md +318 -0
  16. package/dist/docs/memory/06-reference.md +133 -0
  17. package/dist/docs/observability/01-overview.md +64 -0
  18. package/dist/docs/observability/02-default.md +177 -0
  19. package/dist/docs/rag/01-retrieval.md +548 -0
  20. package/dist/docs/storage/01-reference.md +542 -0
  21. package/dist/docs/vectors/01-reference.md +213 -0
  22. package/dist/docs/workflows/01-snapshots.md +240 -0
  23. package/dist/index.cjs +546 -107
  24. package/dist/index.cjs.map +1 -1
  25. package/dist/index.js +543 -109
  26. package/dist/index.js.map +1 -1
  27. package/dist/storage/db/index.d.ts +42 -1
  28. package/dist/storage/db/index.d.ts.map +1 -1
  29. package/dist/storage/db/utils.d.ts +16 -1
  30. package/dist/storage/db/utils.d.ts.map +1 -1
  31. package/dist/storage/domains/memory/index.d.ts +3 -2
  32. package/dist/storage/domains/memory/index.d.ts.map +1 -1
  33. package/dist/storage/domains/observability/index.d.ts +23 -0
  34. package/dist/storage/domains/observability/index.d.ts.map +1 -1
  35. package/dist/storage/domains/scores/index.d.ts +0 -1
  36. package/dist/storage/domains/scores/index.d.ts.map +1 -1
  37. package/dist/storage/domains/workflows/index.d.ts +1 -0
  38. package/dist/storage/domains/workflows/index.d.ts.map +1 -1
  39. package/dist/storage/index.d.ts +10 -4
  40. package/dist/storage/index.d.ts.map +1 -1
  41. package/dist/vector/index.d.ts +6 -2
  42. package/dist/vector/index.d.ts.map +1 -1
  43. package/dist/vector/sql-builder.d.ts.map +1 -1
  44. package/package.json +9 -8
package/dist/index.js CHANGED
@@ -1,8 +1,8 @@
1
1
  import { createClient } from '@libsql/client';
2
2
  import { MastraError, ErrorCategory, ErrorDomain } from '@mastra/core/error';
3
- import { createVectorErrorId, MastraStorage, ScoresStorage, SCORERS_SCHEMA, TABLE_SCORERS, normalizePerPage, calculatePagination, createStorageErrorId, transformScoreRow, WorkflowsStorage, TABLE_SCHEMAS, TABLE_WORKFLOW_SNAPSHOT, MemoryStorage, TABLE_THREADS, TABLE_MESSAGES, TABLE_RESOURCES, ObservabilityStorage, SPAN_SCHEMA, TABLE_SPANS, listTracesArgsSchema, AgentsStorage, AGENTS_SCHEMA, TABLE_AGENTS, getSqlType, TraceStatus, safelyParseJSON } from '@mastra/core/storage';
3
+ import { createVectorErrorId, AgentsStorage, AGENTS_SCHEMA, TABLE_AGENTS, createStorageErrorId, normalizePerPage, calculatePagination, MemoryStorage, TABLE_SCHEMAS, TABLE_THREADS, TABLE_MESSAGES, TABLE_RESOURCES, ObservabilityStorage, SPAN_SCHEMA, TABLE_SPANS, listTracesArgsSchema, ScoresStorage, SCORERS_SCHEMA, TABLE_SCORERS, transformScoreRow, WorkflowsStorage, TABLE_WORKFLOW_SNAPSHOT, MastraCompositeStore, TraceStatus, getSqlType, safelyParseJSON } from '@mastra/core/storage';
4
4
  import { parseSqlIdentifier, parseFieldKey } from '@mastra/core/utils';
5
- import { MastraVector } from '@mastra/core/vector';
5
+ import { MastraVector, validateTopK, validateUpsertInput } from '@mastra/core/vector';
6
6
  import { BaseFilterTranslator } from '@mastra/core/vector/filter';
7
7
  import { MastraBase } from '@mastra/core/base';
8
8
  import { MessageList } from '@mastra/core/agent';
@@ -242,10 +242,10 @@ var FILTER_OPERATORS = {
242
242
  };
243
243
  },
244
244
  // Element Operators
245
- $exists: (key) => {
245
+ $exists: (key, value) => {
246
246
  const jsonPath = getJsonPath(key);
247
247
  return {
248
- sql: `json_extract(metadata, ${jsonPath}) IS NOT NULL`,
248
+ sql: value === false ? `json_extract(metadata, ${jsonPath}) IS NULL` : `json_extract(metadata, ${jsonPath}) IS NOT NULL`,
249
249
  needsValue: false
250
250
  };
251
251
  },
@@ -509,7 +509,7 @@ var LibSQLVector = class extends MastraVector {
509
509
  maxRetries;
510
510
  initialBackoffMs;
511
511
  constructor({
512
- connectionUrl,
512
+ url,
513
513
  authToken,
514
514
  syncUrl,
515
515
  syncInterval,
@@ -519,14 +519,14 @@ var LibSQLVector = class extends MastraVector {
519
519
  }) {
520
520
  super({ id });
521
521
  this.turso = createClient({
522
- url: connectionUrl,
522
+ url,
523
523
  syncUrl,
524
524
  authToken,
525
525
  syncInterval
526
526
  });
527
527
  this.maxRetries = maxRetries;
528
528
  this.initialBackoffMs = initialBackoffMs;
529
- if (connectionUrl.includes(`file:`) || connectionUrl.includes(`:memory:`)) {
529
+ if (url.includes(`file:`) || url.includes(`:memory:`)) {
530
530
  this.turso.execute("PRAGMA journal_mode=WAL;").then(() => this.logger.debug("LibSQLStore: PRAGMA journal_mode=WAL set.")).catch((err) => this.logger.warn("LibSQLStore: Failed to set PRAGMA journal_mode=WAL.", err));
531
531
  this.turso.execute("PRAGMA busy_timeout = 5000;").then(() => this.logger.debug("LibSQLStore: PRAGMA busy_timeout=5000 set.")).catch((err) => this.logger.warn("LibSQLStore: Failed to set PRAGMA busy_timeout=5000.", err));
532
532
  }
@@ -538,7 +538,7 @@ var LibSQLVector = class extends MastraVector {
538
538
  try {
539
539
  return await operation();
540
540
  } catch (error) {
541
- if (error.code === "SQLITE_BUSY" || error.message && error.message.toLowerCase().includes("database is locked")) {
541
+ if (error.code === "SQLITE_BUSY" || error.code === "SQLITE_LOCKED" || error.code === "SQLITE_LOCKED_SHAREDCACHE" || error.message && error.message.toLowerCase().includes("database is locked") || error.message && error.message.toLowerCase().includes("database table is locked")) {
542
542
  attempts++;
543
543
  if (attempts >= this.maxRetries) {
544
544
  this.logger.error(
@@ -572,22 +572,14 @@ var LibSQLVector = class extends MastraVector {
572
572
  minScore = -1
573
573
  // Default to -1 to include all results (cosine similarity ranges from -1 to 1)
574
574
  }) {
575
- try {
576
- if (!Number.isInteger(topK) || topK <= 0) {
577
- throw new Error("topK must be a positive integer");
578
- }
579
- if (!Array.isArray(queryVector) || !queryVector.every((x) => typeof x === "number" && Number.isFinite(x))) {
580
- throw new Error("queryVector must be an array of finite numbers");
581
- }
582
- } catch (error) {
583
- throw new MastraError(
584
- {
585
- id: createVectorErrorId("LIBSQL", "QUERY", "INVALID_ARGS"),
586
- domain: ErrorDomain.STORAGE,
587
- category: ErrorCategory.USER
588
- },
589
- error
590
- );
575
+ validateTopK("LIBSQL", topK);
576
+ if (!Array.isArray(queryVector) || !queryVector.every((x) => typeof x === "number" && Number.isFinite(x))) {
577
+ throw new MastraError({
578
+ id: createVectorErrorId("LIBSQL", "QUERY", "INVALID_ARGS"),
579
+ domain: ErrorDomain.STORAGE,
580
+ category: ErrorCategory.USER,
581
+ details: { message: "queryVector must be an array of finite numbers" }
582
+ });
591
583
  }
592
584
  try {
593
585
  const parsedIndexName = parseSqlIdentifier(indexName, "index name");
@@ -647,6 +639,7 @@ var LibSQLVector = class extends MastraVector {
647
639
  }
648
640
  }
649
641
  async doUpsert({ indexName, vectors, metadata, ids }) {
642
+ validateUpsertInput("LIBSQL", vectors, metadata, ids);
650
643
  const tx = await this.turso.transaction("write");
651
644
  try {
652
645
  const parsedIndexName = parseSqlIdentifier(indexName, "index name");
@@ -1093,6 +1086,14 @@ var LibSQLVector = class extends MastraVector {
1093
1086
  });
1094
1087
  }
1095
1088
  };
1089
+ function buildSelectColumns(tableName) {
1090
+ const schema = TABLE_SCHEMAS[tableName];
1091
+ return Object.keys(schema).map((col) => {
1092
+ const colDef = schema[col];
1093
+ const parsedCol = parseSqlIdentifier(col, "column name");
1094
+ return colDef?.type === "jsonb" ? `json(${parsedCol}) as ${parsedCol}` : parsedCol;
1095
+ }).join(", ");
1096
+ }
1096
1097
  function isLockError(error) {
1097
1098
  return error.code === "SQLITE_BUSY" || error.code === "SQLITE_LOCKED" || error.message?.toLowerCase().includes("database is locked") || error.message?.toLowerCase().includes("database table is locked") || error.message?.toLowerCase().includes("table is locked") || error.constructor.name === "SqliteError" && error.message?.toLowerCase().includes("locked");
1098
1099
  }
@@ -1141,17 +1142,27 @@ function createExecuteWriteOperationWithRetry({
1141
1142
  }
1142
1143
  function prepareStatement({ tableName, record }) {
1143
1144
  const parsedTableName = parseSqlIdentifier(tableName, "table name");
1144
- const columns = Object.keys(record).map((col) => parseSqlIdentifier(col, "column name"));
1145
- const values = Object.values(record).map((v) => {
1145
+ const schema = TABLE_SCHEMAS[tableName];
1146
+ const columnNames = Object.keys(record);
1147
+ const columns = columnNames.map((col) => parseSqlIdentifier(col, "column name"));
1148
+ const values = columnNames.map((col) => {
1149
+ const v = record[col];
1146
1150
  if (typeof v === `undefined` || v === null) {
1147
1151
  return null;
1148
1152
  }
1153
+ const colDef = schema[col];
1154
+ if (colDef?.type === "jsonb") {
1155
+ return JSON.stringify(v);
1156
+ }
1149
1157
  if (v instanceof Date) {
1150
1158
  return v.toISOString();
1151
1159
  }
1152
1160
  return typeof v === "object" ? JSON.stringify(v) : v;
1153
1161
  });
1154
- const placeholders = values.map(() => "?").join(", ");
1162
+ const placeholders = columnNames.map((col) => {
1163
+ const colDef = schema[col];
1164
+ return colDef?.type === "jsonb" ? "jsonb(?)" : "?";
1165
+ }).join(", ");
1155
1166
  return {
1156
1167
  sql: `INSERT OR REPLACE INTO ${parsedTableName} (${columns.join(", ")}) VALUES (${placeholders})`,
1157
1168
  args: values
@@ -1164,19 +1175,33 @@ function prepareUpdateStatement({
1164
1175
  }) {
1165
1176
  const parsedTableName = parseSqlIdentifier(tableName, "table name");
1166
1177
  const schema = TABLE_SCHEMAS[tableName];
1167
- const updateColumns = Object.keys(updates).map((col) => parseSqlIdentifier(col, "column name"));
1168
- const updateValues = Object.values(updates).map(transformToSqlValue);
1169
- const setClause = updateColumns.map((col) => `${col} = ?`).join(", ");
1178
+ const updateColumnNames = Object.keys(updates);
1179
+ const updateColumns = updateColumnNames.map((col) => parseSqlIdentifier(col, "column name"));
1180
+ const updateValues = updateColumnNames.map((col) => {
1181
+ const colDef = schema[col];
1182
+ const v = updates[col];
1183
+ if (colDef?.type === "jsonb") {
1184
+ return transformToSqlValue(v, true);
1185
+ }
1186
+ return transformToSqlValue(v, false);
1187
+ });
1188
+ const setClause = updateColumns.map((col, i) => {
1189
+ const colDef = schema[updateColumnNames[i]];
1190
+ return colDef?.type === "jsonb" ? `${col} = jsonb(?)` : `${col} = ?`;
1191
+ }).join(", ");
1170
1192
  const whereClause = prepareWhereClause(keys, schema);
1171
1193
  return {
1172
1194
  sql: `UPDATE ${parsedTableName} SET ${setClause}${whereClause.sql}`,
1173
1195
  args: [...updateValues, ...whereClause.args]
1174
1196
  };
1175
1197
  }
1176
- function transformToSqlValue(value) {
1198
+ function transformToSqlValue(value, forceJsonStringify = false) {
1177
1199
  if (typeof value === "undefined" || value === null) {
1178
1200
  return null;
1179
1201
  }
1202
+ if (forceJsonStringify) {
1203
+ return JSON.stringify(value);
1204
+ }
1180
1205
  if (value instanceof Date) {
1181
1206
  return value.toISOString();
1182
1207
  }
@@ -1533,11 +1558,12 @@ var LibSQLDB = class extends MastraBase {
1533
1558
  */
1534
1559
  async select({ tableName, keys }) {
1535
1560
  const parsedTableName = parseSqlIdentifier(tableName, "table name");
1561
+ const columns = buildSelectColumns(tableName);
1536
1562
  const parsedKeys = Object.keys(keys).map((key) => parseSqlIdentifier(key, "column name"));
1537
1563
  const conditions = parsedKeys.map((key) => `${key} = ?`).join(" AND ");
1538
1564
  const values = Object.values(keys);
1539
1565
  const result = await this.client.execute({
1540
- sql: `SELECT * FROM ${parsedTableName} WHERE ${conditions} ORDER BY createdAt DESC LIMIT 1`,
1566
+ sql: `SELECT ${columns} FROM ${parsedTableName} WHERE ${conditions} ORDER BY createdAt DESC LIMIT 1`,
1541
1567
  args: values
1542
1568
  });
1543
1569
  if (!result.rows || result.rows.length === 0) {
@@ -1577,7 +1603,8 @@ var LibSQLDB = class extends MastraBase {
1577
1603
  args
1578
1604
  }) {
1579
1605
  const parsedTableName = parseSqlIdentifier(tableName, "table name");
1580
- let statement = `SELECT * FROM ${parsedTableName}`;
1606
+ const columns = buildSelectColumns(tableName);
1607
+ let statement = `SELECT ${columns} FROM ${parsedTableName}`;
1581
1608
  if (whereClause?.sql) {
1582
1609
  statement += ` ${whereClause.sql}`;
1583
1610
  }
@@ -1594,7 +1621,17 @@ var LibSQLDB = class extends MastraBase {
1594
1621
  sql: statement,
1595
1622
  args: [...whereClause?.args ?? [], ...args ?? []]
1596
1623
  });
1597
- return result.rows;
1624
+ return (result.rows ?? []).map((row) => {
1625
+ return Object.fromEntries(
1626
+ Object.entries(row || {}).map(([k, v]) => {
1627
+ try {
1628
+ return [k, typeof v === "string" ? v.startsWith("{") || v.startsWith("[") ? JSON.parse(v) : v : v];
1629
+ } catch {
1630
+ return [k, v];
1631
+ }
1632
+ })
1633
+ );
1634
+ });
1598
1635
  }
1599
1636
  /**
1600
1637
  * Returns the total count of records matching the optional WHERE clause.
@@ -1638,7 +1675,7 @@ var LibSQLDB = class extends MastraBase {
1638
1675
  // SQLite uses 0/1 for booleans
1639
1676
  case "jsonb":
1640
1677
  return "TEXT";
1641
- // Store JSON as TEXT in SQLite
1678
+ // SQLite: column stores TEXT, we use jsonb()/json() functions for binary optimization
1642
1679
  default:
1643
1680
  return getSqlType(type);
1644
1681
  }
@@ -1666,6 +1703,9 @@ var LibSQLDB = class extends MastraBase {
1666
1703
  if (tableName === TABLE_WORKFLOW_SNAPSHOT) {
1667
1704
  tableConstraints.push("UNIQUE (workflow_name, run_id)");
1668
1705
  }
1706
+ if (tableName === TABLE_SPANS) {
1707
+ tableConstraints.push("UNIQUE (spanId, traceId)");
1708
+ }
1669
1709
  const allDefinitions = [...columnDefinitions, ...tableConstraints].join(",\n ");
1670
1710
  const sql = `CREATE TABLE IF NOT EXISTS ${parsedTableName} (
1671
1711
  ${allDefinitions}
@@ -1676,6 +1716,9 @@ var LibSQLDB = class extends MastraBase {
1676
1716
  await this.migrateSpansTable();
1677
1717
  }
1678
1718
  } catch (error) {
1719
+ if (error instanceof MastraError) {
1720
+ throw error;
1721
+ }
1679
1722
  throw new MastraError(
1680
1723
  {
1681
1724
  id: createStorageErrorId("LIBSQL", "CREATE_TABLE", "FAILED"),
@@ -1689,7 +1732,7 @@ var LibSQLDB = class extends MastraBase {
1689
1732
  }
1690
1733
  /**
1691
1734
  * Migrates the spans table schema from OLD_SPAN_SCHEMA to current SPAN_SCHEMA.
1692
- * This adds new columns that don't exist in old schema.
1735
+ * This adds new columns that don't exist in old schema and ensures required indexes exist.
1693
1736
  */
1694
1737
  async migrateSpansTable() {
1695
1738
  const schema = TABLE_SCHEMAS[TABLE_SPANS];
@@ -1703,11 +1746,205 @@ var LibSQLDB = class extends MastraBase {
1703
1746
  this.logger.debug(`LibSQLDB: Added column '${columnName}' to ${TABLE_SPANS}`);
1704
1747
  }
1705
1748
  }
1749
+ const indexExists = await this.spansUniqueIndexExists();
1750
+ if (!indexExists) {
1751
+ const duplicateInfo = await this.checkForDuplicateSpans();
1752
+ if (duplicateInfo.hasDuplicates) {
1753
+ const errorMessage = `
1754
+ ===========================================================================
1755
+ MIGRATION REQUIRED: Duplicate spans detected in ${TABLE_SPANS}
1756
+ ===========================================================================
1757
+
1758
+ Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations.
1759
+
1760
+ The spans table requires a unique constraint on (traceId, spanId), but your
1761
+ database contains duplicate entries that must be resolved first.
1762
+
1763
+ To fix this, run the manual migration command:
1764
+
1765
+ npx mastra migrate
1766
+
1767
+ This command will:
1768
+ 1. Remove duplicate spans (keeping the most complete/recent version)
1769
+ 2. Add the required unique constraint
1770
+
1771
+ Note: This migration may take some time for large tables.
1772
+ ===========================================================================
1773
+ `;
1774
+ throw new MastraError({
1775
+ id: createStorageErrorId("LIBSQL", "MIGRATION_REQUIRED", "DUPLICATE_SPANS"),
1776
+ domain: ErrorDomain.STORAGE,
1777
+ category: ErrorCategory.USER,
1778
+ text: errorMessage
1779
+ });
1780
+ } else {
1781
+ await this.client.execute(
1782
+ `CREATE UNIQUE INDEX IF NOT EXISTS "mastra_ai_spans_spanid_traceid_idx" ON "${TABLE_SPANS}" ("spanId", "traceId")`
1783
+ );
1784
+ this.logger.debug(`LibSQLDB: Created unique index on (spanId, traceId) for ${TABLE_SPANS}`);
1785
+ }
1786
+ }
1706
1787
  this.logger.info(`LibSQLDB: Migration completed for ${TABLE_SPANS}`);
1707
1788
  } catch (error) {
1789
+ if (error instanceof MastraError) {
1790
+ throw error;
1791
+ }
1708
1792
  this.logger.warn(`LibSQLDB: Failed to migrate spans table ${TABLE_SPANS}:`, error);
1709
1793
  }
1710
1794
  }
1795
+ /**
1796
+ * Checks if the unique index on (spanId, traceId) already exists on the spans table.
1797
+ * Used to skip deduplication when the index already exists (migration already complete).
1798
+ */
1799
+ async spansUniqueIndexExists() {
1800
+ try {
1801
+ const result = await this.client.execute(
1802
+ `SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = 'mastra_ai_spans_spanid_traceid_idx'`
1803
+ );
1804
+ return (result.rows?.length ?? 0) > 0;
1805
+ } catch {
1806
+ return false;
1807
+ }
1808
+ }
1809
+ /**
1810
+ * Checks for duplicate (traceId, spanId) combinations in the spans table.
1811
+ * Returns information about duplicates for logging/CLI purposes.
1812
+ */
1813
+ async checkForDuplicateSpans() {
1814
+ try {
1815
+ const result = await this.client.execute(`
1816
+ SELECT COUNT(*) as duplicate_count FROM (
1817
+ SELECT "spanId", "traceId"
1818
+ FROM "${TABLE_SPANS}"
1819
+ GROUP BY "spanId", "traceId"
1820
+ HAVING COUNT(*) > 1
1821
+ )
1822
+ `);
1823
+ const duplicateCount = Number(result.rows?.[0]?.duplicate_count ?? 0);
1824
+ return {
1825
+ hasDuplicates: duplicateCount > 0,
1826
+ duplicateCount
1827
+ };
1828
+ } catch (error) {
1829
+ this.logger.debug(`LibSQLDB: Could not check for duplicates: ${error}`);
1830
+ return { hasDuplicates: false, duplicateCount: 0 };
1831
+ }
1832
+ }
1833
+ /**
1834
+ * Manually run the spans migration to deduplicate and add the unique constraint.
1835
+ * This is intended to be called from the CLI when duplicates are detected.
1836
+ *
1837
+ * @returns Migration result with status and details
1838
+ */
1839
+ async migrateSpans() {
1840
+ const indexExists = await this.spansUniqueIndexExists();
1841
+ if (indexExists) {
1842
+ return {
1843
+ success: true,
1844
+ alreadyMigrated: true,
1845
+ duplicatesRemoved: 0,
1846
+ message: `Migration already complete. Unique index exists on ${TABLE_SPANS}.`
1847
+ };
1848
+ }
1849
+ const duplicateInfo = await this.checkForDuplicateSpans();
1850
+ if (duplicateInfo.hasDuplicates) {
1851
+ this.logger.info(
1852
+ `Found ${duplicateInfo.duplicateCount} duplicate (traceId, spanId) combinations. Starting deduplication...`
1853
+ );
1854
+ await this.deduplicateSpans();
1855
+ } else {
1856
+ this.logger.info(`No duplicate spans found.`);
1857
+ }
1858
+ await this.client.execute(
1859
+ `CREATE UNIQUE INDEX IF NOT EXISTS "mastra_ai_spans_spanid_traceid_idx" ON "${TABLE_SPANS}" ("spanId", "traceId")`
1860
+ );
1861
+ return {
1862
+ success: true,
1863
+ alreadyMigrated: false,
1864
+ duplicatesRemoved: duplicateInfo.duplicateCount,
1865
+ message: duplicateInfo.hasDuplicates ? `Migration complete. Removed duplicates and added unique index to ${TABLE_SPANS}.` : `Migration complete. Added unique index to ${TABLE_SPANS}.`
1866
+ };
1867
+ }
1868
+ /**
1869
+ * Check migration status for the spans table.
1870
+ * Returns information about whether migration is needed.
1871
+ */
1872
+ async checkSpansMigrationStatus() {
1873
+ const indexExists = await this.spansUniqueIndexExists();
1874
+ if (indexExists) {
1875
+ return {
1876
+ needsMigration: false,
1877
+ hasDuplicates: false,
1878
+ duplicateCount: 0,
1879
+ constraintExists: true,
1880
+ tableName: TABLE_SPANS
1881
+ };
1882
+ }
1883
+ const duplicateInfo = await this.checkForDuplicateSpans();
1884
+ return {
1885
+ needsMigration: true,
1886
+ hasDuplicates: duplicateInfo.hasDuplicates,
1887
+ duplicateCount: duplicateInfo.duplicateCount,
1888
+ constraintExists: false,
1889
+ tableName: TABLE_SPANS
1890
+ };
1891
+ }
1892
+ /**
1893
+ * Deduplicates spans table by removing duplicate (spanId, traceId) combinations.
1894
+ * Keeps the "best" record for each duplicate group based on:
1895
+ * 1. Completed spans (endedAt IS NOT NULL) over incomplete ones
1896
+ * 2. Most recently updated (updatedAt DESC)
1897
+ * 3. Most recently created (createdAt DESC) as tiebreaker
1898
+ */
1899
+ async deduplicateSpans() {
1900
+ try {
1901
+ const duplicateCheck = await this.client.execute(`
1902
+ SELECT COUNT(*) as duplicate_count FROM (
1903
+ SELECT "spanId", "traceId"
1904
+ FROM "${TABLE_SPANS}"
1905
+ GROUP BY "spanId", "traceId"
1906
+ HAVING COUNT(*) > 1
1907
+ )
1908
+ `);
1909
+ const duplicateCount = Number(duplicateCheck.rows?.[0]?.duplicate_count ?? 0);
1910
+ if (duplicateCount === 0) {
1911
+ this.logger.debug(`LibSQLDB: No duplicate spans found, skipping deduplication`);
1912
+ return;
1913
+ }
1914
+ this.logger.warn(`LibSQLDB: Found ${duplicateCount} duplicate (spanId, traceId) combinations, deduplicating...`);
1915
+ const deleteResult = await this.client.execute(`
1916
+ DELETE FROM "${TABLE_SPANS}"
1917
+ WHERE rowid NOT IN (
1918
+ SELECT MIN(best_rowid) FROM (
1919
+ SELECT
1920
+ rowid as best_rowid,
1921
+ "spanId",
1922
+ "traceId",
1923
+ ROW_NUMBER() OVER (
1924
+ PARTITION BY "spanId", "traceId"
1925
+ ORDER BY
1926
+ CASE WHEN "endedAt" IS NOT NULL THEN 0 ELSE 1 END,
1927
+ "updatedAt" DESC,
1928
+ "createdAt" DESC
1929
+ ) as rn
1930
+ FROM "${TABLE_SPANS}"
1931
+ ) ranked
1932
+ WHERE rn = 1
1933
+ GROUP BY "spanId", "traceId"
1934
+ )
1935
+ AND ("spanId", "traceId") IN (
1936
+ SELECT "spanId", "traceId"
1937
+ FROM "${TABLE_SPANS}"
1938
+ GROUP BY "spanId", "traceId"
1939
+ HAVING COUNT(*) > 1
1940
+ )
1941
+ `);
1942
+ const deletedCount = deleteResult.rowsAffected ?? 0;
1943
+ this.logger.warn(`LibSQLDB: Deleted ${deletedCount} duplicate span records`);
1944
+ } catch (error) {
1945
+ this.logger.warn(`LibSQLDB: Failed to deduplicate spans:`, error);
1946
+ }
1947
+ }
1711
1948
  /**
1712
1949
  * Gets a default value for a column type (used when adding NOT NULL columns).
1713
1950
  */
@@ -2210,13 +2447,15 @@ var MemoryLibSQL = class extends MemoryStorage {
2210
2447
  queryParams.push(resourceId);
2211
2448
  }
2212
2449
  if (filter?.dateRange?.start) {
2213
- conditions.push(`"createdAt" >= ?`);
2450
+ const startOp = filter.dateRange.startExclusive ? ">" : ">=";
2451
+ conditions.push(`"createdAt" ${startOp} ?`);
2214
2452
  queryParams.push(
2215
2453
  filter.dateRange.start instanceof Date ? filter.dateRange.start.toISOString() : filter.dateRange.start
2216
2454
  );
2217
2455
  }
2218
2456
  if (filter?.dateRange?.end) {
2219
- conditions.push(`"createdAt" <= ?`);
2457
+ const endOp = filter.dateRange.endExclusive ? "<" : "<=";
2458
+ conditions.push(`"createdAt" ${endOp} ?`);
2220
2459
  queryParams.push(
2221
2460
  filter.dateRange.end instanceof Date ? filter.dateRange.end.toISOString() : filter.dateRange.end
2222
2461
  );
@@ -2522,8 +2761,8 @@ var MemoryLibSQL = class extends MemoryStorage {
2522
2761
  await this.#db.insert({
2523
2762
  tableName: TABLE_RESOURCES,
2524
2763
  record: {
2525
- ...resource,
2526
- metadata: JSON.stringify(resource.metadata)
2764
+ ...resource
2765
+ // metadata is handled by prepareStatement which stringifies jsonb columns
2527
2766
  }
2528
2767
  });
2529
2768
  return resource;
@@ -2560,7 +2799,7 @@ var MemoryLibSQL = class extends MemoryStorage {
2560
2799
  values.push(workingMemory);
2561
2800
  }
2562
2801
  if (metadata) {
2563
- updates.push("metadata = ?");
2802
+ updates.push("metadata = jsonb(?)");
2564
2803
  values.push(JSON.stringify(updatedResource.metadata));
2565
2804
  }
2566
2805
  updates.push("updatedAt = ?");
@@ -2599,33 +2838,76 @@ var MemoryLibSQL = class extends MemoryStorage {
2599
2838
  );
2600
2839
  }
2601
2840
  }
2602
- async listThreadsByResourceId(args) {
2603
- const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
2604
- if (page < 0) {
2841
+ async listThreads(args) {
2842
+ const { page = 0, perPage: perPageInput, orderBy, filter } = args;
2843
+ try {
2844
+ this.validatePaginationInput(page, perPageInput ?? 100);
2845
+ } catch (error) {
2605
2846
  throw new MastraError(
2606
2847
  {
2607
- id: createStorageErrorId("LIBSQL", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
2848
+ id: createStorageErrorId("LIBSQL", "LIST_THREADS", "INVALID_PAGE"),
2608
2849
  domain: ErrorDomain.STORAGE,
2609
2850
  category: ErrorCategory.USER,
2610
- details: { page }
2851
+ details: { page, ...perPageInput !== void 0 && { perPage: perPageInput } }
2611
2852
  },
2612
- new Error("page must be >= 0")
2853
+ error instanceof Error ? error : new Error("Invalid pagination parameters")
2613
2854
  );
2614
2855
  }
2615
2856
  const perPage = normalizePerPage(perPageInput, 100);
2857
+ try {
2858
+ this.validateMetadataKeys(filter?.metadata);
2859
+ } catch (error) {
2860
+ throw new MastraError(
2861
+ {
2862
+ id: createStorageErrorId("LIBSQL", "LIST_THREADS", "INVALID_METADATA_KEY"),
2863
+ domain: ErrorDomain.STORAGE,
2864
+ category: ErrorCategory.USER,
2865
+ details: { metadataKeys: filter?.metadata ? Object.keys(filter.metadata).join(", ") : "" }
2866
+ },
2867
+ error instanceof Error ? error : new Error("Invalid metadata key")
2868
+ );
2869
+ }
2616
2870
  const { offset, perPage: perPageForResponse } = calculatePagination(page, perPageInput, perPage);
2617
2871
  const { field, direction } = this.parseOrderBy(orderBy);
2618
2872
  try {
2619
- const baseQuery = `FROM ${TABLE_THREADS} WHERE resourceId = ?`;
2620
- const queryParams = [resourceId];
2873
+ const whereClauses = [];
2874
+ const queryParams = [];
2875
+ if (filter?.resourceId) {
2876
+ whereClauses.push("resourceId = ?");
2877
+ queryParams.push(filter.resourceId);
2878
+ }
2879
+ if (filter?.metadata && Object.keys(filter.metadata).length > 0) {
2880
+ for (const [key, value] of Object.entries(filter.metadata)) {
2881
+ if (value === null) {
2882
+ whereClauses.push(`json_extract(metadata, '$.${key}') IS NULL`);
2883
+ } else if (typeof value === "boolean") {
2884
+ whereClauses.push(`json_extract(metadata, '$.${key}') = ?`);
2885
+ queryParams.push(value ? 1 : 0);
2886
+ } else if (typeof value === "number") {
2887
+ whereClauses.push(`json_extract(metadata, '$.${key}') = ?`);
2888
+ queryParams.push(value);
2889
+ } else if (typeof value === "string") {
2890
+ whereClauses.push(`json_extract(metadata, '$.${key}') = ?`);
2891
+ queryParams.push(value);
2892
+ } else {
2893
+ throw new MastraError({
2894
+ id: createStorageErrorId("LIBSQL", "LIST_THREADS", "INVALID_METADATA_VALUE"),
2895
+ domain: ErrorDomain.STORAGE,
2896
+ category: ErrorCategory.USER,
2897
+ text: `Metadata filter value for key "${key}" must be a scalar type (string, number, boolean, or null), got ${typeof value}`,
2898
+ details: { key, valueType: typeof value }
2899
+ });
2900
+ }
2901
+ }
2902
+ }
2903
+ const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : "";
2904
+ const baseQuery = `FROM ${TABLE_THREADS} ${whereClause}`;
2621
2905
  const mapRowToStorageThreadType = (row) => ({
2622
2906
  id: row.id,
2623
2907
  resourceId: row.resourceId,
2624
2908
  title: row.title,
2625
2909
  createdAt: new Date(row.createdAt),
2626
- // Convert string to Date
2627
2910
  updatedAt: new Date(row.updatedAt),
2628
- // Convert string to Date
2629
2911
  metadata: typeof row.metadata === "string" ? JSON.parse(row.metadata) : row.metadata
2630
2912
  });
2631
2913
  const countResult = await this.#client.execute({
@@ -2644,7 +2926,7 @@ var MemoryLibSQL = class extends MemoryStorage {
2644
2926
  }
2645
2927
  const limitValue = perPageInput === false ? total : perPage;
2646
2928
  const dataResult = await this.#client.execute({
2647
- sql: `SELECT * ${baseQuery} ORDER BY "${field}" ${direction} LIMIT ? OFFSET ?`,
2929
+ sql: `SELECT ${buildSelectColumns(TABLE_THREADS)} ${baseQuery} ORDER BY "${field}" ${direction} LIMIT ? OFFSET ?`,
2648
2930
  args: [...queryParams, limitValue, offset]
2649
2931
  });
2650
2932
  const threads = (dataResult.rows || []).map(mapRowToStorageThreadType);
@@ -2656,12 +2938,18 @@ var MemoryLibSQL = class extends MemoryStorage {
2656
2938
  hasMore: perPageInput === false ? false : offset + perPage < total
2657
2939
  };
2658
2940
  } catch (error) {
2941
+ if (error instanceof MastraError && error.category === ErrorCategory.USER) {
2942
+ throw error;
2943
+ }
2659
2944
  const mastraError = new MastraError(
2660
2945
  {
2661
- id: createStorageErrorId("LIBSQL", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
2946
+ id: createStorageErrorId("LIBSQL", "LIST_THREADS", "FAILED"),
2662
2947
  domain: ErrorDomain.STORAGE,
2663
2948
  category: ErrorCategory.THIRD_PARTY,
2664
- details: { resourceId }
2949
+ details: {
2950
+ ...filter?.resourceId && { resourceId: filter.resourceId },
2951
+ hasMetadataFilter: !!filter?.metadata
2952
+ }
2665
2953
  },
2666
2954
  error
2667
2955
  );
@@ -2681,8 +2969,8 @@ var MemoryLibSQL = class extends MemoryStorage {
2681
2969
  await this.#db.insert({
2682
2970
  tableName: TABLE_THREADS,
2683
2971
  record: {
2684
- ...thread,
2685
- metadata: JSON.stringify(thread.metadata)
2972
+ ...thread
2973
+ // metadata is handled by prepareStatement which stringifies jsonb columns
2686
2974
  }
2687
2975
  });
2688
2976
  return thread;
@@ -2729,7 +3017,7 @@ var MemoryLibSQL = class extends MemoryStorage {
2729
3017
  };
2730
3018
  try {
2731
3019
  await this.#client.execute({
2732
- sql: `UPDATE ${TABLE_THREADS} SET title = ?, metadata = ? WHERE id = ?`,
3020
+ sql: `UPDATE ${TABLE_THREADS} SET title = ?, metadata = jsonb(?) WHERE id = ?`,
2733
3021
  args: [title, JSON.stringify(updatedThread.metadata), id]
2734
3022
  });
2735
3023
  return updatedThread;
@@ -2768,6 +3056,148 @@ var MemoryLibSQL = class extends MemoryStorage {
2768
3056
  );
2769
3057
  }
2770
3058
  }
3059
+ async cloneThread(args) {
3060
+ const { sourceThreadId, newThreadId: providedThreadId, resourceId, title, metadata, options } = args;
3061
+ const sourceThread = await this.getThreadById({ threadId: sourceThreadId });
3062
+ if (!sourceThread) {
3063
+ throw new MastraError({
3064
+ id: createStorageErrorId("LIBSQL", "CLONE_THREAD", "SOURCE_NOT_FOUND"),
3065
+ domain: ErrorDomain.STORAGE,
3066
+ category: ErrorCategory.USER,
3067
+ text: `Source thread with id ${sourceThreadId} not found`,
3068
+ details: { sourceThreadId }
3069
+ });
3070
+ }
3071
+ const newThreadId = providedThreadId || crypto.randomUUID();
3072
+ const existingThread = await this.getThreadById({ threadId: newThreadId });
3073
+ if (existingThread) {
3074
+ throw new MastraError({
3075
+ id: createStorageErrorId("LIBSQL", "CLONE_THREAD", "THREAD_EXISTS"),
3076
+ domain: ErrorDomain.STORAGE,
3077
+ category: ErrorCategory.USER,
3078
+ text: `Thread with id ${newThreadId} already exists`,
3079
+ details: { newThreadId }
3080
+ });
3081
+ }
3082
+ try {
3083
+ let messageQuery = `SELECT id, content, role, type, "createdAt", thread_id, "resourceId"
3084
+ FROM "${TABLE_MESSAGES}" WHERE thread_id = ?`;
3085
+ const messageParams = [sourceThreadId];
3086
+ if (options?.messageFilter?.startDate) {
3087
+ messageQuery += ` AND "createdAt" >= ?`;
3088
+ messageParams.push(
3089
+ options.messageFilter.startDate instanceof Date ? options.messageFilter.startDate.toISOString() : options.messageFilter.startDate
3090
+ );
3091
+ }
3092
+ if (options?.messageFilter?.endDate) {
3093
+ messageQuery += ` AND "createdAt" <= ?`;
3094
+ messageParams.push(
3095
+ options.messageFilter.endDate instanceof Date ? options.messageFilter.endDate.toISOString() : options.messageFilter.endDate
3096
+ );
3097
+ }
3098
+ if (options?.messageFilter?.messageIds && options.messageFilter.messageIds.length > 0) {
3099
+ messageQuery += ` AND id IN (${options.messageFilter.messageIds.map(() => "?").join(", ")})`;
3100
+ messageParams.push(...options.messageFilter.messageIds);
3101
+ }
3102
+ messageQuery += ` ORDER BY "createdAt" ASC`;
3103
+ if (options?.messageLimit && options.messageLimit > 0) {
3104
+ const limitQuery = `SELECT * FROM (${messageQuery.replace('ORDER BY "createdAt" ASC', 'ORDER BY "createdAt" DESC')} LIMIT ?) ORDER BY "createdAt" ASC`;
3105
+ messageParams.push(options.messageLimit);
3106
+ messageQuery = limitQuery;
3107
+ }
3108
+ const sourceMessagesResult = await this.#client.execute({ sql: messageQuery, args: messageParams });
3109
+ const sourceMessages = sourceMessagesResult.rows || [];
3110
+ const now = /* @__PURE__ */ new Date();
3111
+ const nowStr = now.toISOString();
3112
+ const lastMessageId = sourceMessages.length > 0 ? sourceMessages[sourceMessages.length - 1].id : void 0;
3113
+ const cloneMetadata = {
3114
+ sourceThreadId,
3115
+ clonedAt: now,
3116
+ ...lastMessageId && { lastMessageId }
3117
+ };
3118
+ const newThread = {
3119
+ id: newThreadId,
3120
+ resourceId: resourceId || sourceThread.resourceId,
3121
+ title: title || (sourceThread.title ? `Clone of ${sourceThread.title}` : void 0),
3122
+ metadata: {
3123
+ ...metadata,
3124
+ clone: cloneMetadata
3125
+ },
3126
+ createdAt: now,
3127
+ updatedAt: now
3128
+ };
3129
+ const tx = await this.#client.transaction("write");
3130
+ try {
3131
+ await tx.execute({
3132
+ sql: `INSERT INTO "${TABLE_THREADS}" (id, "resourceId", title, metadata, "createdAt", "updatedAt")
3133
+ VALUES (?, ?, ?, jsonb(?), ?, ?)`,
3134
+ args: [
3135
+ newThread.id,
3136
+ newThread.resourceId,
3137
+ newThread.title || null,
3138
+ JSON.stringify(newThread.metadata),
3139
+ nowStr,
3140
+ nowStr
3141
+ ]
3142
+ });
3143
+ const clonedMessages = [];
3144
+ const targetResourceId = resourceId || sourceThread.resourceId;
3145
+ for (const sourceMsg of sourceMessages) {
3146
+ const newMessageId = crypto.randomUUID();
3147
+ const contentStr = sourceMsg.content;
3148
+ let parsedContent;
3149
+ try {
3150
+ parsedContent = JSON.parse(contentStr);
3151
+ } catch {
3152
+ parsedContent = { format: 2, parts: [{ type: "text", text: contentStr }] };
3153
+ }
3154
+ await tx.execute({
3155
+ sql: `INSERT INTO "${TABLE_MESSAGES}" (id, thread_id, content, role, type, "createdAt", "resourceId")
3156
+ VALUES (?, ?, ?, ?, ?, ?, ?)`,
3157
+ args: [
3158
+ newMessageId,
3159
+ newThreadId,
3160
+ contentStr,
3161
+ sourceMsg.role,
3162
+ sourceMsg.type || "v2",
3163
+ sourceMsg.createdAt,
3164
+ targetResourceId
3165
+ ]
3166
+ });
3167
+ clonedMessages.push({
3168
+ id: newMessageId,
3169
+ threadId: newThreadId,
3170
+ content: parsedContent,
3171
+ role: sourceMsg.role,
3172
+ type: sourceMsg.type || void 0,
3173
+ createdAt: new Date(sourceMsg.createdAt),
3174
+ resourceId: targetResourceId
3175
+ });
3176
+ }
3177
+ await tx.commit();
3178
+ return {
3179
+ thread: newThread,
3180
+ clonedMessages
3181
+ };
3182
+ } catch (error) {
3183
+ await tx.rollback();
3184
+ throw error;
3185
+ }
3186
+ } catch (error) {
3187
+ if (error instanceof MastraError) {
3188
+ throw error;
3189
+ }
3190
+ throw new MastraError(
3191
+ {
3192
+ id: createStorageErrorId("LIBSQL", "CLONE_THREAD", "FAILED"),
3193
+ domain: ErrorDomain.STORAGE,
3194
+ category: ErrorCategory.THIRD_PARTY,
3195
+ details: { sourceThreadId, newThreadId }
3196
+ },
3197
+ error
3198
+ );
3199
+ }
3200
+ }
2771
3201
  };
2772
3202
  var ObservabilityLibSQL = class extends ObservabilityStorage {
2773
3203
  #db;
@@ -2782,6 +3212,22 @@ var ObservabilityLibSQL = class extends ObservabilityStorage {
2782
3212
  async dangerouslyClearAll() {
2783
3213
  await this.#db.deleteData({ tableName: TABLE_SPANS });
2784
3214
  }
3215
+ /**
3216
+ * Manually run the spans migration to deduplicate and add the unique constraint.
3217
+ * This is intended to be called from the CLI when duplicates are detected.
3218
+ *
3219
+ * @returns Migration result with status and details
3220
+ */
3221
+ async migrateSpans() {
3222
+ return this.#db.migrateSpans();
3223
+ }
3224
+ /**
3225
+ * Check migration status for the spans table.
3226
+ * Returns information about whether migration is needed.
3227
+ */
3228
+ async checkSpansMigrationStatus() {
3229
+ return this.#db.checkSpansMigrationStatus();
3230
+ }
2785
3231
  get tracingStrategy() {
2786
3232
  return {
2787
3233
  preferred: "batch-with-updates",
@@ -3249,7 +3695,7 @@ var ScoresLibSQL = class extends ScoresStorage {
3249
3695
  const limitValue = perPageInput === false ? total : perPage;
3250
3696
  const end = perPageInput === false ? total : start + perPage;
3251
3697
  const result = await this.#client.execute({
3252
- sql: `SELECT * FROM ${TABLE_SCORERS} WHERE runId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3698
+ sql: `SELECT ${buildSelectColumns(TABLE_SCORERS)} FROM ${TABLE_SCORERS} WHERE runId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3253
3699
  args: [runId, limitValue, start]
3254
3700
  });
3255
3701
  const scores = result.rows?.map((row) => this.transformScoreRow(row)) ?? [];
@@ -3322,7 +3768,7 @@ var ScoresLibSQL = class extends ScoresStorage {
3322
3768
  const limitValue = perPageInput === false ? total : perPage;
3323
3769
  const end = perPageInput === false ? total : start + perPage;
3324
3770
  const result = await this.#client.execute({
3325
- sql: `SELECT * FROM ${TABLE_SCORERS} ${whereClause} ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3771
+ sql: `SELECT ${buildSelectColumns(TABLE_SCORERS)} FROM ${TABLE_SCORERS} ${whereClause} ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3326
3772
  args: [...queryParams, limitValue, start]
3327
3773
  });
3328
3774
  const scores = result.rows?.map((row) => this.transformScoreRow(row)) ?? [];
@@ -3348,16 +3794,13 @@ var ScoresLibSQL = class extends ScoresStorage {
3348
3794
  }
3349
3795
  /**
3350
3796
  * LibSQL-specific score row transformation.
3351
- * Maps additionalLLMContext column to additionalContext field.
3352
3797
  */
3353
3798
  transformScoreRow(row) {
3354
- return transformScoreRow(row, {
3355
- fieldMappings: { additionalContext: "additionalLLMContext" }
3356
- });
3799
+ return transformScoreRow(row);
3357
3800
  }
3358
3801
  async getScoreById({ id }) {
3359
3802
  const result = await this.#client.execute({
3360
- sql: `SELECT * FROM ${TABLE_SCORERS} WHERE id = ?`,
3803
+ sql: `SELECT ${buildSelectColumns(TABLE_SCORERS)} FROM ${TABLE_SCORERS} WHERE id = ?`,
3361
3804
  args: [id]
3362
3805
  });
3363
3806
  return result.rows?.[0] ? this.transformScoreRow(result.rows[0]) : null;
@@ -3435,7 +3878,7 @@ var ScoresLibSQL = class extends ScoresStorage {
3435
3878
  const limitValue = perPageInput === false ? total : perPage;
3436
3879
  const end = perPageInput === false ? total : start + perPage;
3437
3880
  const result = await this.#client.execute({
3438
- sql: `SELECT * FROM ${TABLE_SCORERS} WHERE entityId = ? AND entityType = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3881
+ sql: `SELECT ${buildSelectColumns(TABLE_SCORERS)} FROM ${TABLE_SCORERS} WHERE entityId = ? AND entityType = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3439
3882
  args: [entityId, entityType, limitValue, start]
3440
3883
  });
3441
3884
  const scores = result.rows?.map((row) => this.transformScoreRow(row)) ?? [];
@@ -3476,7 +3919,7 @@ var ScoresLibSQL = class extends ScoresStorage {
3476
3919
  const limitValue = perPageInput === false ? total : perPage;
3477
3920
  const end = perPageInput === false ? total : start + perPage;
3478
3921
  const result = await this.#client.execute({
3479
- sql: `SELECT * FROM ${TABLE_SCORERS} WHERE traceId = ? AND spanId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3922
+ sql: `SELECT ${buildSelectColumns(TABLE_SCORERS)} FROM ${TABLE_SCORERS} WHERE traceId = ? AND spanId = ? ORDER BY createdAt DESC LIMIT ? OFFSET ?`,
3480
3923
  args: [traceId, spanId, limitValue, start]
3481
3924
  });
3482
3925
  const scores = result.rows?.map((row) => this.transformScoreRow(row)) ?? [];
@@ -3501,24 +3944,6 @@ var ScoresLibSQL = class extends ScoresStorage {
3501
3944
  }
3502
3945
  }
3503
3946
  };
3504
- function parseWorkflowRun(row) {
3505
- let parsedSnapshot = row.snapshot;
3506
- if (typeof parsedSnapshot === "string") {
3507
- try {
3508
- parsedSnapshot = JSON.parse(row.snapshot);
3509
- } catch (e) {
3510
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
3511
- }
3512
- }
3513
- return {
3514
- workflowName: row.workflow_name,
3515
- runId: row.run_id,
3516
- snapshot: parsedSnapshot,
3517
- resourceId: row.resourceId,
3518
- createdAt: new Date(row.createdAt),
3519
- updatedAt: new Date(row.updatedAt)
3520
- };
3521
- }
3522
3947
  var WorkflowsLibSQL = class extends WorkflowsStorage {
3523
3948
  #db;
3524
3949
  #client;
@@ -3539,6 +3964,24 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3539
3964
  (err) => this.logger.warn("LibSQL Workflows: Failed to setup PRAGMA settings.", err)
3540
3965
  );
3541
3966
  }
3967
+ parseWorkflowRun(row) {
3968
+ let parsedSnapshot = row.snapshot;
3969
+ if (typeof parsedSnapshot === "string") {
3970
+ try {
3971
+ parsedSnapshot = JSON.parse(row.snapshot);
3972
+ } catch (e) {
3973
+ this.logger.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
3974
+ }
3975
+ }
3976
+ return {
3977
+ workflowName: row.workflow_name,
3978
+ runId: row.run_id,
3979
+ snapshot: parsedSnapshot,
3980
+ resourceId: row.resourceId,
3981
+ createdAt: new Date(row.createdAt),
3982
+ updatedAt: new Date(row.updatedAt)
3983
+ };
3984
+ }
3542
3985
  async init() {
3543
3986
  const schema = TABLE_SCHEMAS[TABLE_WORKFLOW_SNAPSHOT];
3544
3987
  await this.#db.createTable({ tableName: TABLE_WORKFLOW_SNAPSHOT, schema });
@@ -3582,7 +4025,7 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3582
4025
  const tx = await this.#client.transaction("write");
3583
4026
  try {
3584
4027
  const existingSnapshotResult = await tx.execute({
3585
- sql: `SELECT snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
4028
+ sql: `SELECT json(snapshot) as snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
3586
4029
  args: [workflowName, runId]
3587
4030
  });
3588
4031
  let snapshot;
@@ -3607,9 +4050,13 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3607
4050
  }
3608
4051
  snapshot.context[stepId] = result;
3609
4052
  snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
4053
+ const now = (/* @__PURE__ */ new Date()).toISOString();
3610
4054
  await tx.execute({
3611
- sql: `UPDATE ${TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
3612
- args: [JSON.stringify(snapshot), workflowName, runId]
4055
+ sql: `INSERT INTO ${TABLE_WORKFLOW_SNAPSHOT} (workflow_name, run_id, snapshot, createdAt, updatedAt)
4056
+ VALUES (?, ?, jsonb(?), ?, ?)
4057
+ ON CONFLICT(workflow_name, run_id)
4058
+ DO UPDATE SET snapshot = excluded.snapshot, updatedAt = excluded.updatedAt`,
4059
+ args: [workflowName, runId, JSON.stringify(snapshot), now, now]
3613
4060
  });
3614
4061
  await tx.commit();
3615
4062
  return snapshot.context;
@@ -3630,7 +4077,7 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3630
4077
  const tx = await this.#client.transaction("write");
3631
4078
  try {
3632
4079
  const existingSnapshotResult = await tx.execute({
3633
- sql: `SELECT snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
4080
+ sql: `SELECT json(snapshot) as snapshot FROM ${TABLE_WORKFLOW_SNAPSHOT} WHERE workflow_name = ? AND run_id = ?`,
3634
4081
  args: [workflowName, runId]
3635
4082
  });
3636
4083
  if (!existingSnapshotResult.rows?.[0]) {
@@ -3645,7 +4092,7 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3645
4092
  }
3646
4093
  const updatedSnapshot = { ...snapshot, ...opts };
3647
4094
  await tx.execute({
3648
- sql: `UPDATE ${TABLE_WORKFLOW_SNAPSHOT} SET snapshot = ? WHERE workflow_name = ? AND run_id = ?`,
4095
+ sql: `UPDATE ${TABLE_WORKFLOW_SNAPSHOT} SET snapshot = jsonb(?) WHERE workflow_name = ? AND run_id = ?`,
3649
4096
  args: [JSON.stringify(updatedSnapshot), workflowName, runId]
3650
4097
  });
3651
4098
  await tx.commit();
@@ -3709,13 +4156,13 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3709
4156
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
3710
4157
  try {
3711
4158
  const result = await this.#client.execute({
3712
- sql: `SELECT * FROM ${TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC LIMIT 1`,
4159
+ sql: `SELECT workflow_name, run_id, resourceId, json(snapshot) as snapshot, createdAt, updatedAt FROM ${TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC LIMIT 1`,
3713
4160
  args
3714
4161
  });
3715
4162
  if (!result.rows?.[0]) {
3716
4163
  return null;
3717
4164
  }
3718
- return parseWorkflowRun(result.rows[0]);
4165
+ return this.parseWorkflowRun(result.rows[0]);
3719
4166
  } catch (error) {
3720
4167
  throw new MastraError(
3721
4168
  {
@@ -3781,7 +4228,7 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3781
4228
  conditions.push("resourceId = ?");
3782
4229
  args.push(resourceId);
3783
4230
  } else {
3784
- console.warn(`[${TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
4231
+ this.logger.warn(`[${TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
3785
4232
  }
3786
4233
  }
3787
4234
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
@@ -3797,10 +4244,10 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3797
4244
  const normalizedPerPage = usePagination ? normalizePerPage(perPage, Number.MAX_SAFE_INTEGER) : 0;
3798
4245
  const offset = usePagination ? page * normalizedPerPage : 0;
3799
4246
  const result = await this.#client.execute({
3800
- sql: `SELECT * FROM ${TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC${usePagination ? ` LIMIT ? OFFSET ?` : ""}`,
4247
+ sql: `SELECT workflow_name, run_id, resourceId, json(snapshot) as snapshot, createdAt, updatedAt FROM ${TABLE_WORKFLOW_SNAPSHOT} ${whereClause} ORDER BY createdAt DESC${usePagination ? ` LIMIT ? OFFSET ?` : ""}`,
3801
4248
  args: usePagination ? [...args, normalizedPerPage, offset] : args
3802
4249
  });
3803
- const runs = (result.rows || []).map((row) => parseWorkflowRun(row));
4250
+ const runs = (result.rows || []).map((row) => this.parseWorkflowRun(row));
3804
4251
  return { runs, total: total || runs.length };
3805
4252
  } catch (error) {
3806
4253
  throw new MastraError(
@@ -3816,7 +4263,7 @@ var WorkflowsLibSQL = class extends WorkflowsStorage {
3816
4263
  };
3817
4264
 
3818
4265
  // src/storage/index.ts
3819
- var LibSQLStore = class extends MastraStorage {
4266
+ var LibSQLStore = class extends MastraCompositeStore {
3820
4267
  client;
3821
4268
  maxRetries;
3822
4269
  initialBackoffMs;
@@ -3861,19 +4308,6 @@ var LibSQLStore = class extends MastraStorage {
3861
4308
  agents
3862
4309
  };
3863
4310
  }
3864
- get supports() {
3865
- return {
3866
- selectByIncludeResourceScope: true,
3867
- resourceWorkingMemory: true,
3868
- hasColumn: true,
3869
- createTable: true,
3870
- deleteMessages: true,
3871
- observability: true,
3872
- indexManagement: false,
3873
- listScoresBySpan: true,
3874
- agents: true
3875
- };
3876
- }
3877
4311
  };
3878
4312
 
3879
4313
  // src/vector/prompt.ts
@@ -3975,6 +4409,6 @@ Example Complex Query:
3975
4409
  ]
3976
4410
  }`;
3977
4411
 
3978
- export { LibSQLStore as DefaultStorage, LIBSQL_PROMPT, LibSQLStore, LibSQLVector };
4412
+ export { AgentsLibSQL, LibSQLStore as DefaultStorage, LIBSQL_PROMPT, LibSQLStore, LibSQLVector, MemoryLibSQL, ObservabilityLibSQL, ScoresLibSQL, WorkflowsLibSQL };
3979
4413
  //# sourceMappingURL=index.js.map
3980
4414
  //# sourceMappingURL=index.js.map