@mastra/mssql 0.0.0-vector-query-tool-provider-options-20250828222356 → 0.0.0-vnext-20251104230439

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +399 -3
  2. package/README.md +315 -36
  3. package/dist/index.cjs +1687 -710
  4. package/dist/index.cjs.map +1 -1
  5. package/dist/index.d.ts +1 -0
  6. package/dist/index.d.ts.map +1 -1
  7. package/dist/index.js +1689 -712
  8. package/dist/index.js.map +1 -1
  9. package/dist/storage/domains/memory/index.d.ts +18 -41
  10. package/dist/storage/domains/memory/index.d.ts.map +1 -1
  11. package/dist/storage/domains/observability/index.d.ts +44 -0
  12. package/dist/storage/domains/observability/index.d.ts.map +1 -0
  13. package/dist/storage/domains/operations/index.d.ts +67 -4
  14. package/dist/storage/domains/operations/index.d.ts.map +1 -1
  15. package/dist/storage/domains/scores/index.d.ts +13 -4
  16. package/dist/storage/domains/scores/index.d.ts.map +1 -1
  17. package/dist/storage/domains/utils.d.ts +19 -0
  18. package/dist/storage/domains/utils.d.ts.map +1 -1
  19. package/dist/storage/domains/workflows/index.d.ts +9 -13
  20. package/dist/storage/domains/workflows/index.d.ts.map +1 -1
  21. package/dist/storage/index.d.ts +76 -79
  22. package/dist/storage/index.d.ts.map +1 -1
  23. package/package.json +23 -10
  24. package/dist/storage/domains/legacy-evals/index.d.ts +0 -20
  25. package/dist/storage/domains/legacy-evals/index.d.ts.map +0 -1
  26. package/dist/storage/domains/traces/index.d.ts +0 -37
  27. package/dist/storage/domains/traces/index.d.ts.map +0 -1
  28. package/docker-compose.yaml +0 -14
  29. package/eslint.config.js +0 -6
  30. package/src/index.ts +0 -2
  31. package/src/storage/domains/legacy-evals/index.ts +0 -175
  32. package/src/storage/domains/memory/index.ts +0 -1084
  33. package/src/storage/domains/operations/index.ts +0 -401
  34. package/src/storage/domains/scores/index.ts +0 -316
  35. package/src/storage/domains/traces/index.ts +0 -212
  36. package/src/storage/domains/utils.ts +0 -12
  37. package/src/storage/domains/workflows/index.ts +0 -296
  38. package/src/storage/index.test.ts +0 -2228
  39. package/src/storage/index.ts +0 -494
  40. package/tsconfig.build.json +0 -9
  41. package/tsconfig.json +0 -5
  42. package/tsup.config.ts +0 -17
  43. package/vitest.config.ts +0 -12
package/dist/index.cjs CHANGED
@@ -3,8 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
8
10
 
9
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
12
 
@@ -20,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
20
22
  const quotedSchemaName = schemaName;
21
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
22
24
  }
23
-
24
- // src/storage/domains/legacy-evals/index.ts
25
- function transformEvalRow(row) {
26
- let testInfoValue = null, resultValue = null;
27
- if (row.test_info) {
28
- try {
29
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
30
- } catch {
31
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
32
29
  }
33
- if (row.test_info) {
34
- try {
35
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
36
- } catch {
37
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
38
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
39
57
  return {
40
- agentName: row.agent_name,
41
- input: row.input,
42
- output: row.output,
43
- result: resultValue,
44
- metricName: row.metric_name,
45
- instructions: row.instructions,
46
- testInfo: testInfoValue,
47
- globalRunId: row.global_run_id,
48
- runId: row.run_id,
49
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
50
60
  };
51
61
  }
52
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
53
- pool;
54
- schema;
55
- constructor({ pool, schema }) {
56
- super();
57
- this.pool = pool;
58
- this.schema = schema;
59
- }
60
- /** @deprecated use getEvals instead */
61
- async getEvalsByAgentName(agentName, type) {
62
- try {
63
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
64
- if (type === "test") {
65
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
66
- } else if (type === "live") {
67
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
68
- }
69
- query += " ORDER BY created_at DESC";
70
- const request = this.pool.request();
71
- request.input("p1", agentName);
72
- const result = await request.query(query);
73
- const rows = result.recordset;
74
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
75
- } catch (error) {
76
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
77
- return [];
78
- }
79
- console.error("Failed to get evals for the specified agent: " + error?.message);
80
- throw error;
81
- }
82
- }
83
- async getEvals(options = {}) {
84
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
85
- const fromDate = dateRange?.start;
86
- const toDate = dateRange?.end;
87
- const where = [];
88
- const params = {};
89
- if (agentName) {
90
- where.push("agent_name = @agentName");
91
- params["agentName"] = agentName;
92
- }
93
- if (type === "test") {
94
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
95
- } else if (type === "live") {
96
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
97
- }
98
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
99
- where.push(`[created_at] >= @fromDate`);
100
- params[`fromDate`] = fromDate.toISOString();
101
- }
102
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
103
- where.push(`[created_at] <= @toDate`);
104
- params[`toDate`] = toDate.toISOString();
105
- }
106
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
107
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
108
- const offset = page * perPage;
109
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
110
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
111
- try {
112
- const countReq = this.pool.request();
113
- Object.entries(params).forEach(([key, value]) => {
114
- if (value instanceof Date) {
115
- countReq.input(key, sql2__default.default.DateTime, value);
116
- } else {
117
- countReq.input(key, value);
118
- }
119
- });
120
- const countResult = await countReq.query(countQuery);
121
- const total = countResult.recordset[0]?.total || 0;
122
- if (total === 0) {
123
- return {
124
- evals: [],
125
- total: 0,
126
- page,
127
- perPage,
128
- hasMore: false
129
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
130
75
  }
131
- const req = this.pool.request();
132
- Object.entries(params).forEach(([key, value]) => {
133
- if (value instanceof Date) {
134
- req.input(key, sql2__default.default.DateTime, value);
135
- } else {
136
- req.input(key, value);
137
- }
138
- });
139
- req.input("offset", offset);
140
- req.input("perPage", perPage);
141
- const result = await req.query(dataQuery);
142
- const rows = result.recordset;
143
- return {
144
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
145
- total,
146
- page,
147
- perPage,
148
- hasMore: offset + (rows?.length ?? 0) < total
149
- };
150
- } catch (error$1) {
151
- const mastraError = new error.MastraError(
152
- {
153
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
154
- domain: error.ErrorDomain.STORAGE,
155
- category: error.ErrorCategory.THIRD_PARTY,
156
- details: {
157
- agentName: agentName || "all",
158
- type: type || "all",
159
- page,
160
- perPage
161
- }
162
- },
163
- error$1
164
- );
165
- this.logger?.error?.(mastraError.toString());
166
- this.logger?.trackException(mastraError);
167
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
168
84
  }
169
- }
170
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
171
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
172
91
  pool;
173
92
  schema;
@@ -185,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
185
104
  });
186
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
187
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
188
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
189
108
  }
190
109
  constructor({
191
110
  pool,
@@ -199,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
199
118
  }
200
119
  async getThreadById({ threadId }) {
201
120
  try {
202
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
203
122
  id,
204
123
  [resourceId],
205
124
  title,
@@ -210,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
210
129
  WHERE id = @threadId`;
211
130
  const request = this.pool.request();
212
131
  request.input("threadId", threadId);
213
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
214
133
  const thread = resultSet.recordset[0] || null;
215
134
  if (!thread) {
216
135
  return null;
@@ -235,11 +154,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
235
154
  );
236
155
  }
237
156
  }
238
- async getThreadsByResourceIdPaginated(args) {
239
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ const perPage = storage.normalizePerPage(perPageInput, 100);
160
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
161
+ const { field, direction } = this.parseOrderBy(orderBy);
240
162
  try {
241
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
242
- const currentOffset = page * perPage;
243
163
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
244
164
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
245
165
  const countRequest = this.pool.request();
@@ -251,16 +171,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
251
171
  threads: [],
252
172
  total: 0,
253
173
  page,
254
- perPage,
174
+ perPage: perPageForResponse,
255
175
  hasMore: false
256
176
  };
257
177
  }
258
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
259
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
178
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
179
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
180
+ const limitValue = perPageInput === false ? total : perPage;
181
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
260
182
  const dataRequest = this.pool.request();
261
183
  dataRequest.input("resourceId", resourceId);
262
- dataRequest.input("perPage", perPage);
263
- dataRequest.input("offset", currentOffset);
184
+ dataRequest.input("offset", offset);
185
+ if (limitValue > 2147483647) {
186
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
187
+ } else {
188
+ dataRequest.input("perPage", limitValue);
189
+ }
264
190
  const rowsResult = await dataRequest.query(dataQuery);
265
191
  const rows = rowsResult.recordset || [];
266
192
  const threads = rows.map((thread) => ({
@@ -273,13 +199,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
273
199
  threads,
274
200
  total,
275
201
  page,
276
- perPage,
277
- hasMore: currentOffset + threads.length < total
202
+ perPage: perPageForResponse,
203
+ hasMore: perPageInput === false ? false : offset + perPage < total
278
204
  };
279
205
  } catch (error$1) {
280
206
  const mastraError = new error.MastraError(
281
207
  {
282
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
208
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
283
209
  domain: error.ErrorDomain.STORAGE,
284
210
  category: error.ErrorCategory.THIRD_PARTY,
285
211
  details: {
@@ -291,7 +217,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
291
217
  );
292
218
  this.logger?.error?.(mastraError.toString());
293
219
  this.logger?.trackException?.(mastraError);
294
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
220
+ return {
221
+ threads: [],
222
+ total: 0,
223
+ page,
224
+ perPage: perPageForResponse,
225
+ hasMore: false
226
+ };
295
227
  }
296
228
  }
297
229
  async saveThread({ thread }) {
@@ -313,7 +245,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
313
245
  req.input("id", thread.id);
314
246
  req.input("resourceId", thread.resourceId);
315
247
  req.input("title", thread.title);
316
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
248
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
249
+ if (metadata === null) {
250
+ req.input("metadata", sql2__default.default.NVarChar, null);
251
+ } else {
252
+ req.input("metadata", metadata);
253
+ }
317
254
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
318
255
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
319
256
  await req.query(mergeSql);
@@ -332,30 +269,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
332
269
  );
333
270
  }
334
271
  }
335
- /**
336
- * @deprecated use getThreadsByResourceIdPaginated instead
337
- */
338
- async getThreadsByResourceId(args) {
339
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
340
- try {
341
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
342
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
343
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
344
- const request = this.pool.request();
345
- request.input("resourceId", resourceId);
346
- const resultSet = await request.query(dataQuery);
347
- const rows = resultSet.recordset || [];
348
- return rows.map((thread) => ({
349
- ...thread,
350
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
351
- createdAt: thread.createdAt,
352
- updatedAt: thread.updatedAt
353
- }));
354
- } catch (error) {
355
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
356
- return [];
357
- }
358
- }
359
272
  /**
360
273
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
361
274
  */
@@ -383,7 +296,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
383
296
  };
384
297
  try {
385
298
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
386
- const sql7 = `UPDATE ${table}
299
+ const sql5 = `UPDATE ${table}
387
300
  SET title = @title,
388
301
  metadata = @metadata,
389
302
  [updatedAt] = @updatedAt
@@ -394,7 +307,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
394
307
  req.input("title", title);
395
308
  req.input("metadata", JSON.stringify(mergedMetadata));
396
309
  req.input("updatedAt", /* @__PURE__ */ new Date());
397
- const result = await req.query(sql7);
310
+ const result = await req.query(sql5);
398
311
  let thread = result.recordset && result.recordset[0];
399
312
  if (thread && "seq_id" in thread) {
400
313
  const { seq_id, ...rest } = thread;
@@ -464,9 +377,9 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
464
377
  }
465
378
  async _getIncludedMessages({
466
379
  threadId,
467
- selectBy,
468
- orderByStatement
380
+ selectBy
469
381
  }) {
382
+ if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
470
383
  const include = selectBy?.include;
471
384
  if (!include) return null;
472
385
  const unionQueries = [];
@@ -492,7 +405,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
492
405
  m.[resourceId],
493
406
  m.seq_id
494
407
  FROM (
495
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
408
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
496
409
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
497
410
  WHERE [thread_id] = ${pThreadId}
498
411
  ) AS m
@@ -500,15 +413,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
500
413
  OR EXISTS (
501
414
  SELECT 1
502
415
  FROM (
503
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
416
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
504
417
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
505
418
  WHERE [thread_id] = ${pThreadId}
506
419
  ) AS target
507
420
  WHERE target.id = ${pId}
508
421
  AND (
509
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
422
+ -- Get previous messages (messages that come BEFORE the target)
423
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
510
424
  OR
511
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
425
+ -- Get next messages (messages that come AFTER the target)
426
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
512
427
  )
513
428
  )
514
429
  `
@@ -537,16 +452,20 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
537
452
  });
538
453
  return dedupedRows;
539
454
  }
455
+ /**
456
+ * @deprecated use listMessages instead
457
+ */
540
458
  async getMessages(args) {
541
- const { threadId, format, selectBy } = args;
459
+ const { threadId, resourceId, selectBy } = args;
542
460
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
543
461
  const orderByStatement = `ORDER BY [seq_id] DESC`;
544
462
  const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
545
463
  try {
464
+ if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
546
465
  let rows = [];
547
466
  const include = selectBy?.include || [];
548
467
  if (include?.length) {
549
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
468
+ const includeMessages = await this._getIncludedMessages({ threadId, selectBy });
550
469
  if (includeMessages) {
551
470
  rows.push(...includeMessages);
552
471
  }
@@ -571,8 +490,19 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
571
490
  const timeDiff = a.seq_id - b.seq_id;
572
491
  return timeDiff;
573
492
  });
574
- rows = rows.map(({ seq_id, ...rest }) => rest);
575
- return this._parseAndFormatMessages(rows, format);
493
+ const messagesWithParsedContent = rows.map((row) => {
494
+ if (typeof row.content === "string") {
495
+ try {
496
+ return { ...row, content: JSON.parse(row.content) };
497
+ } catch {
498
+ return row;
499
+ }
500
+ }
501
+ return row;
502
+ });
503
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
504
+ const list = new agent.MessageList().add(cleanMessages, "memory");
505
+ return { messages: list.get.all.db() };
576
506
  } catch (error$1) {
577
507
  const mastraError = new error.MastraError(
578
508
  {
@@ -580,21 +510,19 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
580
510
  domain: error.ErrorDomain.STORAGE,
581
511
  category: error.ErrorCategory.THIRD_PARTY,
582
512
  details: {
583
- threadId
513
+ threadId,
514
+ resourceId: resourceId ?? ""
584
515
  }
585
516
  },
586
517
  error$1
587
518
  );
588
519
  this.logger?.error?.(mastraError.toString());
589
- this.logger?.trackException(mastraError);
590
- return [];
520
+ this.logger?.trackException?.(mastraError);
521
+ return { messages: [] };
591
522
  }
592
523
  }
593
- async getMessagesById({
594
- messageIds,
595
- format
596
- }) {
597
- if (messageIds.length === 0) return [];
524
+ async listMessagesById({ messageIds }) {
525
+ if (messageIds.length === 0) return { messages: [] };
598
526
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
599
527
  const orderByStatement = `ORDER BY [seq_id] DESC`;
600
528
  try {
@@ -610,13 +538,23 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
610
538
  const timeDiff = a.seq_id - b.seq_id;
611
539
  return timeDiff;
612
540
  });
613
- rows = rows.map(({ seq_id, ...rest }) => rest);
614
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
615
- return this._parseAndFormatMessages(rows, `v2`);
541
+ const messagesWithParsedContent = rows.map((row) => {
542
+ if (typeof row.content === "string") {
543
+ try {
544
+ return { ...row, content: JSON.parse(row.content) };
545
+ } catch {
546
+ return row;
547
+ }
548
+ }
549
+ return row;
550
+ });
551
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
552
+ const list = new agent.MessageList().add(cleanMessages, "memory");
553
+ return { messages: list.get.all.db() };
616
554
  } catch (error$1) {
617
555
  const mastraError = new error.MastraError(
618
556
  {
619
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
557
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
620
558
  domain: error.ErrorDomain.STORAGE,
621
559
  category: error.ErrorCategory.THIRD_PARTY,
622
560
  details: {
@@ -626,101 +564,125 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
626
564
  error$1
627
565
  );
628
566
  this.logger?.error?.(mastraError.toString());
629
- this.logger?.trackException(mastraError);
630
- return [];
567
+ this.logger?.trackException?.(mastraError);
568
+ return { messages: [] };
631
569
  }
632
570
  }
633
- async getMessagesPaginated(args) {
634
- const { threadId, selectBy } = args;
635
- const { page = 0, perPage: perPageInput } = selectBy?.pagination || {};
636
- const orderByStatement = `ORDER BY [seq_id] DESC`;
637
- if (selectBy?.include?.length) {
638
- await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
571
+ async listMessages(args) {
572
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
573
+ if (!threadId.trim()) {
574
+ throw new error.MastraError(
575
+ {
576
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
577
+ domain: error.ErrorDomain.STORAGE,
578
+ category: error.ErrorCategory.THIRD_PARTY,
579
+ details: { threadId }
580
+ },
581
+ new Error("threadId must be a non-empty string")
582
+ );
639
583
  }
584
+ const perPage = storage.normalizePerPage(perPageInput, 40);
585
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
640
586
  try {
641
- const { threadId: threadId2, format, selectBy: selectBy2 } = args;
642
- const { page: page2 = 0, perPage: perPageInput2, dateRange } = selectBy2?.pagination || {};
643
- const fromDate = dateRange?.start;
644
- const toDate = dateRange?.end;
587
+ const { field, direction } = this.parseOrderBy(orderBy);
588
+ const orderByStatement = `ORDER BY [${field}] ${direction}`;
645
589
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
646
- const orderByStatement2 = `ORDER BY [seq_id] DESC`;
647
- let messages2 = [];
648
- if (selectBy2?.include?.length) {
649
- const includeMessages = await this._getIncludedMessages({ threadId: threadId2, selectBy: selectBy2, orderByStatement: orderByStatement2 });
650
- if (includeMessages) messages2.push(...includeMessages);
651
- }
652
- const perPage = perPageInput2 !== void 0 ? perPageInput2 : storage.resolveMessageLimit({ last: selectBy2?.last, defaultLimit: 40 });
653
- const currentOffset = page2 * perPage;
590
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
654
591
  const conditions = ["[thread_id] = @threadId"];
655
592
  const request = this.pool.request();
656
- request.input("threadId", threadId2);
657
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
593
+ request.input("threadId", threadId);
594
+ if (resourceId) {
595
+ conditions.push("[resourceId] = @resourceId");
596
+ request.input("resourceId", resourceId);
597
+ }
598
+ if (filter?.dateRange?.start) {
658
599
  conditions.push("[createdAt] >= @fromDate");
659
- request.input("fromDate", fromDate.toISOString());
600
+ request.input("fromDate", filter.dateRange.start);
660
601
  }
661
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
602
+ if (filter?.dateRange?.end) {
662
603
  conditions.push("[createdAt] <= @toDate");
663
- request.input("toDate", toDate.toISOString());
604
+ request.input("toDate", filter.dateRange.end);
664
605
  }
665
606
  const whereClause = `WHERE ${conditions.join(" AND ")}`;
666
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
607
+ const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
667
608
  const countResult = await request.query(countQuery);
668
609
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
669
- if (total === 0 && messages2.length > 0) {
670
- const parsedIncluded = this._parseAndFormatMessages(messages2, format);
610
+ const limitValue = perPageInput === false ? total : perPage;
611
+ const dataQuery = `${selectStatement} FROM ${tableName} ${whereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
612
+ request.input("offset", offset);
613
+ if (limitValue > 2147483647) {
614
+ request.input("limit", sql2__default.default.BigInt, limitValue);
615
+ } else {
616
+ request.input("limit", limitValue);
617
+ }
618
+ const rowsResult = await request.query(dataQuery);
619
+ const rows = rowsResult.recordset || [];
620
+ const messages = [...rows];
621
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
671
622
  return {
672
- messages: parsedIncluded,
673
- total: parsedIncluded.length,
674
- page: page2,
675
- perPage,
623
+ messages: [],
624
+ total: 0,
625
+ page,
626
+ perPage: perPageForResponse,
676
627
  hasMore: false
677
628
  };
678
629
  }
679
- const excludeIds = messages2.map((m) => m.id);
680
- if (excludeIds.length > 0) {
681
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
682
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
683
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
630
+ const messageIds = new Set(messages.map((m) => m.id));
631
+ if (include && include.length > 0) {
632
+ const selectBy = { include };
633
+ const includeMessages = await this._getIncludedMessages({ threadId, selectBy });
634
+ if (includeMessages) {
635
+ for (const includeMsg of includeMessages) {
636
+ if (!messageIds.has(includeMsg.id)) {
637
+ messages.push(includeMsg);
638
+ messageIds.add(includeMsg.id);
639
+ }
640
+ }
641
+ }
684
642
  }
685
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
686
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement2} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
687
- request.input("offset", currentOffset);
688
- request.input("limit", perPage);
689
- const rowsResult = await request.query(dataQuery);
690
- const rows = rowsResult.recordset || [];
691
- rows.sort((a, b) => a.seq_id - b.seq_id);
692
- messages2.push(...rows);
693
- const parsed = this._parseAndFormatMessages(messages2, format);
643
+ const parsed = this._parseAndFormatMessages(messages, "v2");
644
+ let finalMessages = parsed;
645
+ finalMessages = finalMessages.sort((a, b) => {
646
+ const aValue = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
647
+ const bValue = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
648
+ return direction === "ASC" ? aValue - bValue : bValue - aValue;
649
+ });
650
+ const returnedThreadMessageIds = new Set(finalMessages.filter((m) => m.threadId === threadId).map((m) => m.id));
651
+ const allThreadMessagesReturned = returnedThreadMessageIds.size >= total;
652
+ const hasMore = perPageInput !== false && !allThreadMessagesReturned && offset + perPage < total;
694
653
  return {
695
- messages: parsed,
696
- total: total + excludeIds.length,
697
- page: page2,
698
- perPage,
699
- hasMore: currentOffset + rows.length < total
654
+ messages: finalMessages,
655
+ total,
656
+ page,
657
+ perPage: perPageForResponse,
658
+ hasMore
700
659
  };
701
660
  } catch (error$1) {
702
661
  const mastraError = new error.MastraError(
703
662
  {
704
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
663
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
705
664
  domain: error.ErrorDomain.STORAGE,
706
665
  category: error.ErrorCategory.THIRD_PARTY,
707
666
  details: {
708
667
  threadId,
709
- page
668
+ resourceId: resourceId ?? ""
710
669
  }
711
670
  },
712
671
  error$1
713
672
  );
714
673
  this.logger?.error?.(mastraError.toString());
715
- this.logger?.trackException(mastraError);
716
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
674
+ this.logger?.trackException?.(mastraError);
675
+ return {
676
+ messages: [],
677
+ total: 0,
678
+ page,
679
+ perPage: perPageForResponse,
680
+ hasMore: false
681
+ };
717
682
  }
718
683
  }
719
- async saveMessages({
720
- messages,
721
- format
722
- }) {
723
- if (messages.length === 0) return messages;
684
+ async saveMessages({ messages }) {
685
+ if (messages.length === 0) return { messages: [] };
724
686
  const threadId = messages[0]?.threadId;
725
687
  if (!threadId) {
726
688
  throw new error.MastraError({
@@ -802,8 +764,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
802
764
  return message;
803
765
  });
804
766
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
805
- if (format === "v2") return list.get.all.v2();
806
- return list.get.all.v1();
767
+ return { messages: list.get.all.db() };
807
768
  } catch (error$1) {
808
769
  throw new error.MastraError(
809
770
  {
@@ -979,8 +940,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
979
940
  return null;
980
941
  }
981
942
  return {
982
- ...result,
983
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
943
+ id: result.id,
944
+ createdAt: result.createdAt,
945
+ updatedAt: result.updatedAt,
946
+ workingMemory: result.workingMemory,
984
947
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
985
948
  };
986
949
  } catch (error$1) {
@@ -994,7 +957,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
994
957
  error$1
995
958
  );
996
959
  this.logger?.error?.(mastraError.toString());
997
- this.logger?.trackException(mastraError);
960
+ this.logger?.trackException?.(mastraError);
998
961
  throw mastraError;
999
962
  }
1000
963
  }
@@ -1003,7 +966,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1003
966
  tableName: storage.TABLE_RESOURCES,
1004
967
  record: {
1005
968
  ...resource,
1006
- metadata: JSON.stringify(resource.metadata)
969
+ metadata: resource.metadata
1007
970
  }
1008
971
  });
1009
972
  return resource;
@@ -1061,138 +1024,463 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1061
1024
  error$1
1062
1025
  );
1063
1026
  this.logger?.error?.(mastraError.toString());
1064
- this.logger?.trackException(mastraError);
1027
+ this.logger?.trackException?.(mastraError);
1065
1028
  throw mastraError;
1066
1029
  }
1067
1030
  }
1068
1031
  };
1069
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
1032
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1070
1033
  pool;
1071
- schemaName;
1072
- setupSchemaPromise = null;
1073
- schemaSetupComplete = void 0;
1074
- getSqlType(type, isPrimaryKey = false) {
1075
- switch (type) {
1076
- case "text":
1077
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1078
- case "timestamp":
1079
- return "DATETIME2(7)";
1080
- case "uuid":
1081
- return "UNIQUEIDENTIFIER";
1082
- case "jsonb":
1083
- return "NVARCHAR(MAX)";
1084
- case "integer":
1085
- return "INT";
1086
- case "bigint":
1087
- return "BIGINT";
1088
- case "float":
1089
- return "FLOAT";
1090
- default:
1091
- throw new error.MastraError({
1092
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1093
- domain: error.ErrorDomain.STORAGE,
1094
- category: error.ErrorCategory.THIRD_PARTY
1095
- });
1096
- }
1097
- }
1098
- constructor({ pool, schemaName }) {
1034
+ operations;
1035
+ schema;
1036
+ constructor({
1037
+ pool,
1038
+ operations,
1039
+ schema
1040
+ }) {
1099
1041
  super();
1100
1042
  this.pool = pool;
1101
- this.schemaName = schemaName;
1102
- }
1103
- async hasColumn(table, column) {
1104
- const schema = this.schemaName || "dbo";
1105
- const request = this.pool.request();
1106
- request.input("schema", schema);
1107
- request.input("table", table);
1108
- request.input("column", column);
1109
- request.input("columnLower", column.toLowerCase());
1110
- const result = await request.query(
1111
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1112
- );
1113
- return result.recordset.length > 0;
1043
+ this.operations = operations;
1044
+ this.schema = schema;
1114
1045
  }
1115
- async setupSchema() {
1116
- if (!this.schemaName || this.schemaSetupComplete) {
1117
- return;
1118
- }
1119
- if (!this.setupSchemaPromise) {
1120
- this.setupSchemaPromise = (async () => {
1121
- try {
1122
- const checkRequest = this.pool.request();
1123
- checkRequest.input("schemaName", this.schemaName);
1124
- const checkResult = await checkRequest.query(`
1125
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1126
- `);
1127
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1128
- if (!schemaExists) {
1129
- try {
1130
- await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1131
- this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1132
- } catch (error) {
1133
- this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1134
- throw new Error(
1135
- `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1136
- );
1137
- }
1138
- }
1139
- this.schemaSetupComplete = true;
1140
- this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1141
- } catch (error) {
1142
- this.schemaSetupComplete = void 0;
1143
- this.setupSchemaPromise = null;
1144
- throw error;
1145
- } finally {
1146
- this.setupSchemaPromise = null;
1147
- }
1148
- })();
1149
- }
1150
- await this.setupSchemaPromise;
1046
+ get aiTracingStrategy() {
1047
+ return {
1048
+ preferred: "batch-with-updates",
1049
+ supported: ["batch-with-updates", "insert-only"]
1050
+ };
1151
1051
  }
1152
- async insert({ tableName, record }) {
1052
+ async createAISpan(span) {
1153
1053
  try {
1154
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1155
- const values = Object.values(record);
1156
- const paramNames = values.map((_, i) => `@param${i}`);
1157
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1158
- const request = this.pool.request();
1159
- values.forEach((value, i) => {
1160
- if (value instanceof Date) {
1161
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1162
- } else if (typeof value === "object" && value !== null) {
1163
- request.input(`param${i}`, JSON.stringify(value));
1164
- } else {
1165
- request.input(`param${i}`, value);
1166
- }
1167
- });
1168
- await request.query(insertSql);
1054
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
1055
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
1056
+ const record = {
1057
+ ...span,
1058
+ startedAt,
1059
+ endedAt
1060
+ // Note: createdAt/updatedAt will be set by default values
1061
+ };
1062
+ return this.operations.insert({ tableName: storage.TABLE_AI_SPANS, record });
1169
1063
  } catch (error$1) {
1170
1064
  throw new error.MastraError(
1171
1065
  {
1172
- id: "MASTRA_STORAGE_MSSQL_STORE_INSERT_FAILED",
1066
+ id: "MSSQL_STORE_CREATE_AI_SPAN_FAILED",
1173
1067
  domain: error.ErrorDomain.STORAGE,
1174
- category: error.ErrorCategory.THIRD_PARTY,
1068
+ category: error.ErrorCategory.USER,
1175
1069
  details: {
1176
- tableName
1070
+ spanId: span.spanId,
1071
+ traceId: span.traceId,
1072
+ spanType: span.spanType,
1073
+ spanName: span.name
1177
1074
  }
1178
1075
  },
1179
1076
  error$1
1180
1077
  );
1181
1078
  }
1182
1079
  }
1183
- async clearTable({ tableName }) {
1184
- const fullTableName = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1080
+ async getAITrace(traceId) {
1185
1081
  try {
1186
- try {
1187
- await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1188
- } catch (truncateError) {
1189
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1190
- await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1191
- } else {
1192
- throw truncateError;
1193
- }
1194
- }
1195
- } catch (error$1) {
1082
+ const tableName = getTableName({
1083
+ indexName: storage.TABLE_AI_SPANS,
1084
+ schemaName: getSchemaName(this.schema)
1085
+ });
1086
+ const request = this.pool.request();
1087
+ request.input("traceId", traceId);
1088
+ const result = await request.query(
1089
+ `SELECT
1090
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1091
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1092
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1093
+ FROM ${tableName}
1094
+ WHERE [traceId] = @traceId
1095
+ ORDER BY [startedAt] DESC`
1096
+ );
1097
+ if (!result.recordset || result.recordset.length === 0) {
1098
+ return null;
1099
+ }
1100
+ return {
1101
+ traceId,
1102
+ spans: result.recordset.map(
1103
+ (span) => transformFromSqlRow({
1104
+ tableName: storage.TABLE_AI_SPANS,
1105
+ sqlRow: span
1106
+ })
1107
+ )
1108
+ };
1109
+ } catch (error$1) {
1110
+ throw new error.MastraError(
1111
+ {
1112
+ id: "MSSQL_STORE_GET_AI_TRACE_FAILED",
1113
+ domain: error.ErrorDomain.STORAGE,
1114
+ category: error.ErrorCategory.USER,
1115
+ details: {
1116
+ traceId
1117
+ }
1118
+ },
1119
+ error$1
1120
+ );
1121
+ }
1122
+ }
1123
+ async updateAISpan({
1124
+ spanId,
1125
+ traceId,
1126
+ updates
1127
+ }) {
1128
+ try {
1129
+ const data = { ...updates };
1130
+ if (data.endedAt instanceof Date) {
1131
+ data.endedAt = data.endedAt.toISOString();
1132
+ }
1133
+ if (data.startedAt instanceof Date) {
1134
+ data.startedAt = data.startedAt.toISOString();
1135
+ }
1136
+ await this.operations.update({
1137
+ tableName: storage.TABLE_AI_SPANS,
1138
+ keys: { spanId, traceId },
1139
+ data
1140
+ });
1141
+ } catch (error$1) {
1142
+ throw new error.MastraError(
1143
+ {
1144
+ id: "MSSQL_STORE_UPDATE_AI_SPAN_FAILED",
1145
+ domain: error.ErrorDomain.STORAGE,
1146
+ category: error.ErrorCategory.USER,
1147
+ details: {
1148
+ spanId,
1149
+ traceId
1150
+ }
1151
+ },
1152
+ error$1
1153
+ );
1154
+ }
1155
+ }
1156
+ async getAITracesPaginated({
1157
+ filters,
1158
+ pagination
1159
+ }) {
1160
+ const page = pagination?.page ?? 0;
1161
+ const perPage = pagination?.perPage ?? 10;
1162
+ const { entityId, entityType, ...actualFilters } = filters || {};
1163
+ const filtersWithDateRange = {
1164
+ ...actualFilters,
1165
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1166
+ parentSpanId: null
1167
+ // Only get root spans for traces
1168
+ };
1169
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1170
+ let actualWhereClause = whereClause.sql;
1171
+ const params = { ...whereClause.params };
1172
+ let currentParamIndex = Object.keys(params).length + 1;
1173
+ if (entityId && entityType) {
1174
+ let name = "";
1175
+ if (entityType === "workflow") {
1176
+ name = `workflow run: '${entityId}'`;
1177
+ } else if (entityType === "agent") {
1178
+ name = `agent run: '${entityId}'`;
1179
+ } else {
1180
+ const error$1 = new error.MastraError({
1181
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1182
+ domain: error.ErrorDomain.STORAGE,
1183
+ category: error.ErrorCategory.USER,
1184
+ details: {
1185
+ entityType
1186
+ },
1187
+ text: `Cannot filter by entity type: ${entityType}`
1188
+ });
1189
+ throw error$1;
1190
+ }
1191
+ const entityParam = `p${currentParamIndex++}`;
1192
+ if (actualWhereClause) {
1193
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1194
+ } else {
1195
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1196
+ }
1197
+ params[entityParam] = name;
1198
+ }
1199
+ const tableName = getTableName({
1200
+ indexName: storage.TABLE_AI_SPANS,
1201
+ schemaName: getSchemaName(this.schema)
1202
+ });
1203
+ try {
1204
+ const countRequest = this.pool.request();
1205
+ Object.entries(params).forEach(([key, value]) => {
1206
+ countRequest.input(key, value);
1207
+ });
1208
+ const countResult = await countRequest.query(
1209
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1210
+ );
1211
+ const total = countResult.recordset[0]?.count ?? 0;
1212
+ if (total === 0) {
1213
+ return {
1214
+ pagination: {
1215
+ total: 0,
1216
+ page,
1217
+ perPage,
1218
+ hasMore: false
1219
+ },
1220
+ spans: []
1221
+ };
1222
+ }
1223
+ const dataRequest = this.pool.request();
1224
+ Object.entries(params).forEach(([key, value]) => {
1225
+ dataRequest.input(key, value);
1226
+ });
1227
+ dataRequest.input("offset", page * perPage);
1228
+ dataRequest.input("limit", perPage);
1229
+ const dataResult = await dataRequest.query(
1230
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1231
+ );
1232
+ const spans = dataResult.recordset.map(
1233
+ (row) => transformFromSqlRow({
1234
+ tableName: storage.TABLE_AI_SPANS,
1235
+ sqlRow: row
1236
+ })
1237
+ );
1238
+ return {
1239
+ pagination: {
1240
+ total,
1241
+ page,
1242
+ perPage,
1243
+ hasMore: (page + 1) * perPage < total
1244
+ },
1245
+ spans
1246
+ };
1247
+ } catch (error$1) {
1248
+ throw new error.MastraError(
1249
+ {
1250
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1251
+ domain: error.ErrorDomain.STORAGE,
1252
+ category: error.ErrorCategory.USER
1253
+ },
1254
+ error$1
1255
+ );
1256
+ }
1257
+ }
1258
+ async batchCreateAISpans(args) {
1259
+ if (!args.records || args.records.length === 0) {
1260
+ return;
1261
+ }
1262
+ try {
1263
+ await this.operations.batchInsert({
1264
+ tableName: storage.TABLE_AI_SPANS,
1265
+ records: args.records.map((span) => ({
1266
+ ...span,
1267
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1268
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1269
+ }))
1270
+ });
1271
+ } catch (error$1) {
1272
+ throw new error.MastraError(
1273
+ {
1274
+ id: "MSSQL_STORE_BATCH_CREATE_AI_SPANS_FAILED",
1275
+ domain: error.ErrorDomain.STORAGE,
1276
+ category: error.ErrorCategory.USER,
1277
+ details: {
1278
+ count: args.records.length
1279
+ }
1280
+ },
1281
+ error$1
1282
+ );
1283
+ }
1284
+ }
1285
+ async batchUpdateAISpans(args) {
1286
+ if (!args.records || args.records.length === 0) {
1287
+ return;
1288
+ }
1289
+ try {
1290
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1291
+ const processedData = { ...data };
1292
+ if (processedData.endedAt instanceof Date) {
1293
+ processedData.endedAt = processedData.endedAt.toISOString();
1294
+ }
1295
+ if (processedData.startedAt instanceof Date) {
1296
+ processedData.startedAt = processedData.startedAt.toISOString();
1297
+ }
1298
+ return {
1299
+ keys: { spanId, traceId },
1300
+ data: processedData
1301
+ };
1302
+ });
1303
+ await this.operations.batchUpdate({
1304
+ tableName: storage.TABLE_AI_SPANS,
1305
+ updates
1306
+ });
1307
+ } catch (error$1) {
1308
+ throw new error.MastraError(
1309
+ {
1310
+ id: "MSSQL_STORE_BATCH_UPDATE_AI_SPANS_FAILED",
1311
+ domain: error.ErrorDomain.STORAGE,
1312
+ category: error.ErrorCategory.USER,
1313
+ details: {
1314
+ count: args.records.length
1315
+ }
1316
+ },
1317
+ error$1
1318
+ );
1319
+ }
1320
+ }
1321
+ async batchDeleteAITraces(args) {
1322
+ if (!args.traceIds || args.traceIds.length === 0) {
1323
+ return;
1324
+ }
1325
+ try {
1326
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1327
+ await this.operations.batchDelete({
1328
+ tableName: storage.TABLE_AI_SPANS,
1329
+ keys
1330
+ });
1331
+ } catch (error$1) {
1332
+ throw new error.MastraError(
1333
+ {
1334
+ id: "MSSQL_STORE_BATCH_DELETE_AI_TRACES_FAILED",
1335
+ domain: error.ErrorDomain.STORAGE,
1336
+ category: error.ErrorCategory.USER,
1337
+ details: {
1338
+ count: args.traceIds.length
1339
+ }
1340
+ },
1341
+ error$1
1342
+ );
1343
+ }
1344
+ }
1345
+ };
1346
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1347
+ pool;
1348
+ schemaName;
1349
+ setupSchemaPromise = null;
1350
+ schemaSetupComplete = void 0;
1351
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1352
+ switch (type) {
1353
+ case "text":
1354
+ if (useLargeStorage) {
1355
+ return "NVARCHAR(MAX)";
1356
+ }
1357
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1358
+ case "timestamp":
1359
+ return "DATETIME2(7)";
1360
+ case "uuid":
1361
+ return "UNIQUEIDENTIFIER";
1362
+ case "jsonb":
1363
+ return "NVARCHAR(MAX)";
1364
+ case "integer":
1365
+ return "INT";
1366
+ case "bigint":
1367
+ return "BIGINT";
1368
+ case "float":
1369
+ return "FLOAT";
1370
+ case "boolean":
1371
+ return "BIT";
1372
+ default:
1373
+ throw new error.MastraError({
1374
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1375
+ domain: error.ErrorDomain.STORAGE,
1376
+ category: error.ErrorCategory.THIRD_PARTY
1377
+ });
1378
+ }
1379
+ }
1380
+ constructor({ pool, schemaName }) {
1381
+ super();
1382
+ this.pool = pool;
1383
+ this.schemaName = schemaName;
1384
+ }
1385
+ async hasColumn(table, column) {
1386
+ const schema = this.schemaName || "dbo";
1387
+ const request = this.pool.request();
1388
+ request.input("schema", schema);
1389
+ request.input("table", table);
1390
+ request.input("column", column);
1391
+ request.input("columnLower", column.toLowerCase());
1392
+ const result = await request.query(
1393
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1394
+ );
1395
+ return result.recordset.length > 0;
1396
+ }
1397
+ async setupSchema() {
1398
+ if (!this.schemaName || this.schemaSetupComplete) {
1399
+ return;
1400
+ }
1401
+ if (!this.setupSchemaPromise) {
1402
+ this.setupSchemaPromise = (async () => {
1403
+ try {
1404
+ const checkRequest = this.pool.request();
1405
+ checkRequest.input("schemaName", this.schemaName);
1406
+ const checkResult = await checkRequest.query(`
1407
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1408
+ `);
1409
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1410
+ if (!schemaExists) {
1411
+ try {
1412
+ await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1413
+ this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1414
+ } catch (error) {
1415
+ this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1416
+ throw new Error(
1417
+ `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1418
+ );
1419
+ }
1420
+ }
1421
+ this.schemaSetupComplete = true;
1422
+ this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1423
+ } catch (error) {
1424
+ this.schemaSetupComplete = void 0;
1425
+ this.setupSchemaPromise = null;
1426
+ throw error;
1427
+ } finally {
1428
+ this.setupSchemaPromise = null;
1429
+ }
1430
+ })();
1431
+ }
1432
+ await this.setupSchemaPromise;
1433
+ }
1434
+ async insert({
1435
+ tableName,
1436
+ record,
1437
+ transaction
1438
+ }) {
1439
+ try {
1440
+ const columns = Object.keys(record);
1441
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1442
+ const paramNames = columns.map((_, i) => `@param${i}`);
1443
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1444
+ const request = transaction ? transaction.request() : this.pool.request();
1445
+ columns.forEach((col, i) => {
1446
+ const value = record[col];
1447
+ const preparedValue = this.prepareValue(value, col, tableName);
1448
+ if (preparedValue instanceof Date) {
1449
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1450
+ } else if (preparedValue === null || preparedValue === void 0) {
1451
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1452
+ } else {
1453
+ request.input(`param${i}`, preparedValue);
1454
+ }
1455
+ });
1456
+ await request.query(insertSql);
1457
+ } catch (error$1) {
1458
+ throw new error.MastraError(
1459
+ {
1460
+ id: "MASTRA_STORAGE_MSSQL_STORE_INSERT_FAILED",
1461
+ domain: error.ErrorDomain.STORAGE,
1462
+ category: error.ErrorCategory.THIRD_PARTY,
1463
+ details: {
1464
+ tableName
1465
+ }
1466
+ },
1467
+ error$1
1468
+ );
1469
+ }
1470
+ }
1471
+ async clearTable({ tableName }) {
1472
+ const fullTableName = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1473
+ try {
1474
+ try {
1475
+ await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1476
+ } catch (truncateError) {
1477
+ if (truncateError?.number === 4712) {
1478
+ await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1479
+ } else {
1480
+ throw truncateError;
1481
+ }
1482
+ }
1483
+ } catch (error$1) {
1196
1484
  throw new error.MastraError(
1197
1485
  {
1198
1486
  id: "MASTRA_STORAGE_MSSQL_STORE_CLEAR_TABLE_FAILED",
@@ -1209,9 +1497,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1209
1497
  getDefaultValue(type) {
1210
1498
  switch (type) {
1211
1499
  case "timestamp":
1212
- return "DEFAULT SYSDATETIMEOFFSET()";
1500
+ return "DEFAULT SYSUTCDATETIME()";
1213
1501
  case "jsonb":
1214
1502
  return "DEFAULT N'{}'";
1503
+ case "boolean":
1504
+ return "DEFAULT 0";
1215
1505
  default:
1216
1506
  return super.getDefaultValue(type);
1217
1507
  }
@@ -1222,13 +1512,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1222
1512
  }) {
1223
1513
  try {
1224
1514
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1515
+ const largeDataColumns = [
1516
+ "workingMemory",
1517
+ "snapshot",
1518
+ "metadata",
1519
+ "content",
1520
+ // messages.content - can be very long conversation content
1521
+ "input",
1522
+ // evals.input - test input data
1523
+ "output",
1524
+ // evals.output - test output data
1525
+ "instructions",
1526
+ // evals.instructions - evaluation instructions
1527
+ "other"
1528
+ // traces.other - additional trace data
1529
+ ];
1225
1530
  const columns = Object.entries(schema).map(([name, def]) => {
1226
1531
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1227
1532
  const constraints = [];
1228
1533
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1229
1534
  if (!def.nullable) constraints.push("NOT NULL");
1230
1535
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1231
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1536
+ const useLargeStorage = largeDataColumns.includes(name);
1537
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1232
1538
  }).join(",\n");
1233
1539
  if (this.schemaName) {
1234
1540
  await this.setupSchema();
@@ -1315,7 +1621,19 @@ ${columns}
1315
1621
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1316
1622
  if (!columnExists) {
1317
1623
  const columnDef = schema[columnName];
1318
- const sqlType = this.getSqlType(columnDef.type);
1624
+ const largeDataColumns = [
1625
+ "workingMemory",
1626
+ "snapshot",
1627
+ "metadata",
1628
+ "content",
1629
+ "input",
1630
+ "output",
1631
+ "instructions",
1632
+ "other"
1633
+ ];
1634
+ const useLargeStorage = largeDataColumns.includes(columnName);
1635
+ const isIndexed = !!columnDef.primaryKey;
1636
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1319
1637
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1320
1638
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1321
1639
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1343,13 +1661,17 @@ ${columns}
1343
1661
  try {
1344
1662
  const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1345
1663
  const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1346
- const values = keyEntries.map(([_, value]) => value);
1347
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1664
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1348
1665
  const request = this.pool.request();
1349
- values.forEach((value, i) => {
1350
- request.input(`param${i}`, value);
1666
+ keyEntries.forEach(([key, value], i) => {
1667
+ const preparedValue = this.prepareValue(value, key, tableName);
1668
+ if (preparedValue === null || preparedValue === void 0) {
1669
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1670
+ } else {
1671
+ request.input(`param${i}`, preparedValue);
1672
+ }
1351
1673
  });
1352
- const resultSet = await request.query(sql7);
1674
+ const resultSet = await request.query(sql5);
1353
1675
  const result = resultSet.recordset[0] || null;
1354
1676
  if (!result) {
1355
1677
  return null;
@@ -1381,63 +1703,599 @@ ${columns}
1381
1703
  try {
1382
1704
  await transaction.begin();
1383
1705
  for (const record of records) {
1384
- await this.insert({ tableName, record });
1706
+ await this.insert({ tableName, record, transaction });
1707
+ }
1708
+ await transaction.commit();
1709
+ } catch (error$1) {
1710
+ await transaction.rollback();
1711
+ throw new error.MastraError(
1712
+ {
1713
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
1714
+ domain: error.ErrorDomain.STORAGE,
1715
+ category: error.ErrorCategory.THIRD_PARTY,
1716
+ details: {
1717
+ tableName,
1718
+ numberOfRecords: records.length
1719
+ }
1720
+ },
1721
+ error$1
1722
+ );
1723
+ }
1724
+ }
1725
+ async dropTable({ tableName }) {
1726
+ try {
1727
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1728
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1729
+ } catch (error$1) {
1730
+ throw new error.MastraError(
1731
+ {
1732
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1733
+ domain: error.ErrorDomain.STORAGE,
1734
+ category: error.ErrorCategory.THIRD_PARTY,
1735
+ details: {
1736
+ tableName
1737
+ }
1738
+ },
1739
+ error$1
1740
+ );
1741
+ }
1742
+ }
1743
+ /**
1744
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1745
+ */
1746
+ prepareValue(value, columnName, tableName) {
1747
+ if (value === null || value === void 0) {
1748
+ return value;
1749
+ }
1750
+ if (value instanceof Date) {
1751
+ return value;
1752
+ }
1753
+ const schema = storage.TABLE_SCHEMAS[tableName];
1754
+ const columnSchema = schema?.[columnName];
1755
+ if (columnSchema?.type === "boolean") {
1756
+ return value ? 1 : 0;
1757
+ }
1758
+ if (columnSchema?.type === "jsonb") {
1759
+ return JSON.stringify(value);
1760
+ }
1761
+ if (typeof value === "object") {
1762
+ return JSON.stringify(value);
1763
+ }
1764
+ return value;
1765
+ }
1766
+ /**
1767
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1768
+ */
1769
+ getMssqlType(tableName, columnName) {
1770
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1771
+ switch (col?.type) {
1772
+ case "text":
1773
+ return sql2__default.default.NVarChar;
1774
+ case "timestamp":
1775
+ return sql2__default.default.DateTime2;
1776
+ case "uuid":
1777
+ return sql2__default.default.UniqueIdentifier;
1778
+ case "jsonb":
1779
+ return sql2__default.default.NVarChar;
1780
+ case "integer":
1781
+ return sql2__default.default.Int;
1782
+ case "bigint":
1783
+ return sql2__default.default.BigInt;
1784
+ case "float":
1785
+ return sql2__default.default.Float;
1786
+ case "boolean":
1787
+ return sql2__default.default.Bit;
1788
+ default:
1789
+ return sql2__default.default.NVarChar;
1790
+ }
1791
+ }
1792
+ /**
1793
+ * Update a single record in the database
1794
+ */
1795
+ async update({
1796
+ tableName,
1797
+ keys,
1798
+ data,
1799
+ transaction
1800
+ }) {
1801
+ try {
1802
+ if (!data || Object.keys(data).length === 0) {
1803
+ throw new error.MastraError({
1804
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1805
+ domain: error.ErrorDomain.STORAGE,
1806
+ category: error.ErrorCategory.USER,
1807
+ text: "Cannot update with empty data payload"
1808
+ });
1809
+ }
1810
+ if (!keys || Object.keys(keys).length === 0) {
1811
+ throw new error.MastraError({
1812
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1813
+ domain: error.ErrorDomain.STORAGE,
1814
+ category: error.ErrorCategory.USER,
1815
+ text: "Cannot update without keys to identify records"
1816
+ });
1817
+ }
1818
+ const setClauses = [];
1819
+ const request = transaction ? transaction.request() : this.pool.request();
1820
+ let paramIndex = 0;
1821
+ Object.entries(data).forEach(([key, value]) => {
1822
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1823
+ const paramName = `set${paramIndex++}`;
1824
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1825
+ const preparedValue = this.prepareValue(value, key, tableName);
1826
+ if (preparedValue === null || preparedValue === void 0) {
1827
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1828
+ } else {
1829
+ request.input(paramName, preparedValue);
1830
+ }
1831
+ });
1832
+ const whereConditions = [];
1833
+ Object.entries(keys).forEach(([key, value]) => {
1834
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1835
+ const paramName = `where${paramIndex++}`;
1836
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1837
+ const preparedValue = this.prepareValue(value, key, tableName);
1838
+ if (preparedValue === null || preparedValue === void 0) {
1839
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1840
+ } else {
1841
+ request.input(paramName, preparedValue);
1842
+ }
1843
+ });
1844
+ const tableName_ = getTableName({
1845
+ indexName: tableName,
1846
+ schemaName: getSchemaName(this.schemaName)
1847
+ });
1848
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1849
+ await request.query(updateSql);
1850
+ } catch (error$1) {
1851
+ throw new error.MastraError(
1852
+ {
1853
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1854
+ domain: error.ErrorDomain.STORAGE,
1855
+ category: error.ErrorCategory.THIRD_PARTY,
1856
+ details: {
1857
+ tableName
1858
+ }
1859
+ },
1860
+ error$1
1861
+ );
1862
+ }
1863
+ }
1864
+ /**
1865
+ * Update multiple records in a single batch transaction
1866
+ */
1867
+ async batchUpdate({
1868
+ tableName,
1869
+ updates
1870
+ }) {
1871
+ const transaction = this.pool.transaction();
1872
+ try {
1873
+ await transaction.begin();
1874
+ for (const { keys, data } of updates) {
1875
+ await this.update({ tableName, keys, data, transaction });
1385
1876
  }
1386
1877
  await transaction.commit();
1387
1878
  } catch (error$1) {
1388
- await transaction.rollback();
1879
+ await transaction.rollback();
1880
+ throw new error.MastraError(
1881
+ {
1882
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1883
+ domain: error.ErrorDomain.STORAGE,
1884
+ category: error.ErrorCategory.THIRD_PARTY,
1885
+ details: {
1886
+ tableName,
1887
+ numberOfRecords: updates.length
1888
+ }
1889
+ },
1890
+ error$1
1891
+ );
1892
+ }
1893
+ }
1894
+ /**
1895
+ * Delete multiple records by keys
1896
+ */
1897
+ async batchDelete({ tableName, keys }) {
1898
+ if (keys.length === 0) {
1899
+ return;
1900
+ }
1901
+ const tableName_ = getTableName({
1902
+ indexName: tableName,
1903
+ schemaName: getSchemaName(this.schemaName)
1904
+ });
1905
+ const transaction = this.pool.transaction();
1906
+ try {
1907
+ await transaction.begin();
1908
+ for (const keySet of keys) {
1909
+ const conditions = [];
1910
+ const request = transaction.request();
1911
+ let paramIndex = 0;
1912
+ Object.entries(keySet).forEach(([key, value]) => {
1913
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1914
+ const paramName = `p${paramIndex++}`;
1915
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1916
+ const preparedValue = this.prepareValue(value, key, tableName);
1917
+ if (preparedValue === null || preparedValue === void 0) {
1918
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1919
+ } else {
1920
+ request.input(paramName, preparedValue);
1921
+ }
1922
+ });
1923
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1924
+ await request.query(deleteSql);
1925
+ }
1926
+ await transaction.commit();
1927
+ } catch (error$1) {
1928
+ await transaction.rollback();
1929
+ throw new error.MastraError(
1930
+ {
1931
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1932
+ domain: error.ErrorDomain.STORAGE,
1933
+ category: error.ErrorCategory.THIRD_PARTY,
1934
+ details: {
1935
+ tableName,
1936
+ numberOfRecords: keys.length
1937
+ }
1938
+ },
1939
+ error$1
1940
+ );
1941
+ }
1942
+ }
1943
+ /**
1944
+ * Create a new index on a table
1945
+ */
1946
+ async createIndex(options) {
1947
+ try {
1948
+ const { name, table, columns, unique = false, where } = options;
1949
+ const schemaName = this.schemaName || "dbo";
1950
+ const fullTableName = getTableName({
1951
+ indexName: table,
1952
+ schemaName: getSchemaName(this.schemaName)
1953
+ });
1954
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1955
+ const checkRequest = this.pool.request();
1956
+ checkRequest.input("indexName", indexNameSafe);
1957
+ checkRequest.input("schemaName", schemaName);
1958
+ checkRequest.input("tableName", table);
1959
+ const indexExists = await checkRequest.query(`
1960
+ SELECT 1 as found
1961
+ FROM sys.indexes i
1962
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1963
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1964
+ WHERE i.name = @indexName
1965
+ AND s.name = @schemaName
1966
+ AND t.name = @tableName
1967
+ `);
1968
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1969
+ return;
1970
+ }
1971
+ const uniqueStr = unique ? "UNIQUE " : "";
1972
+ const columnsStr = columns.map((col) => {
1973
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1974
+ const [colName, ...modifiers] = col.split(" ");
1975
+ if (!colName) {
1976
+ throw new Error(`Invalid column specification: ${col}`);
1977
+ }
1978
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1979
+ }
1980
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1981
+ }).join(", ");
1982
+ const whereStr = where ? ` WHERE ${where}` : "";
1983
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1984
+ await this.pool.request().query(createIndexSql);
1985
+ } catch (error$1) {
1986
+ throw new error.MastraError(
1987
+ {
1988
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1989
+ domain: error.ErrorDomain.STORAGE,
1990
+ category: error.ErrorCategory.THIRD_PARTY,
1991
+ details: {
1992
+ indexName: options.name,
1993
+ tableName: options.table
1994
+ }
1995
+ },
1996
+ error$1
1997
+ );
1998
+ }
1999
+ }
2000
+ /**
2001
+ * Drop an existing index
2002
+ */
2003
+ async dropIndex(indexName) {
2004
+ try {
2005
+ const schemaName = this.schemaName || "dbo";
2006
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
2007
+ const checkRequest = this.pool.request();
2008
+ checkRequest.input("indexName", indexNameSafe);
2009
+ checkRequest.input("schemaName", schemaName);
2010
+ const result = await checkRequest.query(`
2011
+ SELECT t.name as table_name
2012
+ FROM sys.indexes i
2013
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
2014
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
2015
+ WHERE i.name = @indexName
2016
+ AND s.name = @schemaName
2017
+ `);
2018
+ if (!result.recordset || result.recordset.length === 0) {
2019
+ return;
2020
+ }
2021
+ if (result.recordset.length > 1) {
2022
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
2023
+ throw new error.MastraError({
2024
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
2025
+ domain: error.ErrorDomain.STORAGE,
2026
+ category: error.ErrorCategory.USER,
2027
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
2028
+ });
2029
+ }
2030
+ const tableName = result.recordset[0].table_name;
2031
+ const fullTableName = getTableName({
2032
+ indexName: tableName,
2033
+ schemaName: getSchemaName(this.schemaName)
2034
+ });
2035
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
2036
+ await this.pool.request().query(dropSql);
2037
+ } catch (error$1) {
2038
+ throw new error.MastraError(
2039
+ {
2040
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
2041
+ domain: error.ErrorDomain.STORAGE,
2042
+ category: error.ErrorCategory.THIRD_PARTY,
2043
+ details: {
2044
+ indexName
2045
+ }
2046
+ },
2047
+ error$1
2048
+ );
2049
+ }
2050
+ }
2051
+ /**
2052
+ * List indexes for a specific table or all tables
2053
+ */
2054
+ async listIndexes(tableName) {
2055
+ try {
2056
+ const schemaName = this.schemaName || "dbo";
2057
+ let query;
2058
+ const request = this.pool.request();
2059
+ request.input("schemaName", schemaName);
2060
+ if (tableName) {
2061
+ query = `
2062
+ SELECT
2063
+ i.name as name,
2064
+ o.name as [table],
2065
+ i.is_unique as is_unique,
2066
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2067
+ FROM sys.indexes i
2068
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2069
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2070
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2071
+ WHERE sch.name = @schemaName
2072
+ AND o.name = @tableName
2073
+ AND i.name IS NOT NULL
2074
+ GROUP BY i.name, o.name, i.is_unique
2075
+ `;
2076
+ request.input("tableName", tableName);
2077
+ } else {
2078
+ query = `
2079
+ SELECT
2080
+ i.name as name,
2081
+ o.name as [table],
2082
+ i.is_unique as is_unique,
2083
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2084
+ FROM sys.indexes i
2085
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2086
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2087
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2088
+ WHERE sch.name = @schemaName
2089
+ AND i.name IS NOT NULL
2090
+ GROUP BY i.name, o.name, i.is_unique
2091
+ `;
2092
+ }
2093
+ const result = await request.query(query);
2094
+ const indexes = [];
2095
+ for (const row of result.recordset) {
2096
+ const colRequest = this.pool.request();
2097
+ colRequest.input("indexName", row.name);
2098
+ colRequest.input("schemaName", schemaName);
2099
+ const colResult = await colRequest.query(`
2100
+ SELECT c.name as column_name
2101
+ FROM sys.indexes i
2102
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2103
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2104
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2105
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2106
+ WHERE i.name = @indexName
2107
+ AND s.name = @schemaName
2108
+ ORDER BY ic.key_ordinal
2109
+ `);
2110
+ indexes.push({
2111
+ name: row.name,
2112
+ table: row.table,
2113
+ columns: colResult.recordset.map((c) => c.column_name),
2114
+ unique: row.is_unique || false,
2115
+ size: row.size || "0 MB",
2116
+ definition: ""
2117
+ // MSSQL doesn't store definition like PG
2118
+ });
2119
+ }
2120
+ return indexes;
2121
+ } catch (error$1) {
2122
+ throw new error.MastraError(
2123
+ {
2124
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
2125
+ domain: error.ErrorDomain.STORAGE,
2126
+ category: error.ErrorCategory.THIRD_PARTY,
2127
+ details: tableName ? {
2128
+ tableName
2129
+ } : {}
2130
+ },
2131
+ error$1
2132
+ );
2133
+ }
2134
+ }
2135
+ /**
2136
+ * Get detailed statistics for a specific index
2137
+ */
2138
+ async describeIndex(indexName) {
2139
+ try {
2140
+ const schemaName = this.schemaName || "dbo";
2141
+ const request = this.pool.request();
2142
+ request.input("indexName", indexName);
2143
+ request.input("schemaName", schemaName);
2144
+ const query = `
2145
+ SELECT
2146
+ i.name as name,
2147
+ o.name as [table],
2148
+ i.is_unique as is_unique,
2149
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2150
+ i.type_desc as method,
2151
+ ISNULL(us.user_scans, 0) as scans,
2152
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2153
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2154
+ FROM sys.indexes i
2155
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2156
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2157
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2158
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2159
+ WHERE i.name = @indexName
2160
+ AND sch.name = @schemaName
2161
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2162
+ `;
2163
+ const result = await request.query(query);
2164
+ if (!result.recordset || result.recordset.length === 0) {
2165
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
2166
+ }
2167
+ const row = result.recordset[0];
2168
+ const colRequest = this.pool.request();
2169
+ colRequest.input("indexName", indexName);
2170
+ colRequest.input("schemaName", schemaName);
2171
+ const colResult = await colRequest.query(`
2172
+ SELECT c.name as column_name
2173
+ FROM sys.indexes i
2174
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2175
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2176
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2177
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2178
+ WHERE i.name = @indexName
2179
+ AND s.name = @schemaName
2180
+ ORDER BY ic.key_ordinal
2181
+ `);
2182
+ return {
2183
+ name: row.name,
2184
+ table: row.table,
2185
+ columns: colResult.recordset.map((c) => c.column_name),
2186
+ unique: row.is_unique || false,
2187
+ size: row.size || "0 MB",
2188
+ definition: "",
2189
+ method: row.method?.toLowerCase() || "nonclustered",
2190
+ scans: Number(row.scans) || 0,
2191
+ tuples_read: Number(row.tuples_read) || 0,
2192
+ tuples_fetched: Number(row.tuples_fetched) || 0
2193
+ };
2194
+ } catch (error$1) {
1389
2195
  throw new error.MastraError(
1390
2196
  {
1391
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
2197
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
1392
2198
  domain: error.ErrorDomain.STORAGE,
1393
2199
  category: error.ErrorCategory.THIRD_PARTY,
1394
2200
  details: {
1395
- tableName,
1396
- numberOfRecords: records.length
2201
+ indexName
1397
2202
  }
1398
2203
  },
1399
2204
  error$1
1400
2205
  );
1401
2206
  }
1402
2207
  }
1403
- async dropTable({ tableName }) {
2208
+ /**
2209
+ * Returns definitions for automatic performance indexes
2210
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2211
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2212
+ */
2213
+ getAutomaticIndexDefinitions() {
2214
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2215
+ return [
2216
+ // Composite indexes for optimal filtering + sorting performance
2217
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2218
+ {
2219
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2220
+ table: storage.TABLE_THREADS,
2221
+ columns: ["resourceId", "seq_id DESC"]
2222
+ },
2223
+ {
2224
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2225
+ table: storage.TABLE_MESSAGES,
2226
+ columns: ["thread_id", "seq_id DESC"]
2227
+ },
2228
+ {
2229
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2230
+ table: storage.TABLE_TRACES,
2231
+ columns: ["name", "seq_id DESC"]
2232
+ },
2233
+ {
2234
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2235
+ table: storage.TABLE_SCORERS,
2236
+ columns: ["traceId", "spanId", "seq_id DESC"]
2237
+ },
2238
+ // AI Spans indexes for optimal trace querying
2239
+ {
2240
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2241
+ table: storage.TABLE_AI_SPANS,
2242
+ columns: ["traceId", "startedAt DESC"]
2243
+ },
2244
+ {
2245
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2246
+ table: storage.TABLE_AI_SPANS,
2247
+ columns: ["parentSpanId", "startedAt DESC"]
2248
+ },
2249
+ {
2250
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2251
+ table: storage.TABLE_AI_SPANS,
2252
+ columns: ["name"]
2253
+ },
2254
+ {
2255
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2256
+ table: storage.TABLE_AI_SPANS,
2257
+ columns: ["spanType", "startedAt DESC"]
2258
+ }
2259
+ ];
2260
+ }
2261
+ /**
2262
+ * Creates automatic indexes for optimal query performance
2263
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2264
+ */
2265
+ async createAutomaticIndexes() {
1404
2266
  try {
1405
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1406
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2267
+ const indexes = this.getAutomaticIndexDefinitions();
2268
+ for (const indexOptions of indexes) {
2269
+ try {
2270
+ await this.createIndex(indexOptions);
2271
+ } catch (error) {
2272
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2273
+ }
2274
+ }
1407
2275
  } catch (error$1) {
1408
2276
  throw new error.MastraError(
1409
2277
  {
1410
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2278
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1411
2279
  domain: error.ErrorDomain.STORAGE,
1412
- category: error.ErrorCategory.THIRD_PARTY,
1413
- details: {
1414
- tableName
1415
- }
2280
+ category: error.ErrorCategory.THIRD_PARTY
1416
2281
  },
1417
2282
  error$1
1418
2283
  );
1419
2284
  }
1420
2285
  }
1421
2286
  };
1422
- function parseJSON(jsonString) {
1423
- try {
1424
- return JSON.parse(jsonString);
1425
- } catch {
1426
- return jsonString;
1427
- }
1428
- }
1429
2287
  function transformScoreRow(row) {
1430
2288
  return {
1431
2289
  ...row,
1432
- input: parseJSON(row.input),
1433
- scorer: parseJSON(row.scorer),
1434
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1435
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1436
- metadata: parseJSON(row.metadata),
1437
- output: parseJSON(row.output),
1438
- additionalContext: parseJSON(row.additionalContext),
1439
- runtimeContext: parseJSON(row.runtimeContext),
1440
- entity: parseJSON(row.entity),
2290
+ input: storage.safelyParseJSON(row.input),
2291
+ scorer: storage.safelyParseJSON(row.scorer),
2292
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2293
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2294
+ metadata: storage.safelyParseJSON(row.metadata),
2295
+ output: storage.safelyParseJSON(row.output),
2296
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2297
+ requestContext: storage.safelyParseJSON(row.requestContext),
2298
+ entity: storage.safelyParseJSON(row.entity),
1441
2299
  createdAt: row.createdAt,
1442
2300
  updatedAt: row.updatedAt
1443
2301
  };
@@ -1480,6 +2338,19 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1480
2338
  }
1481
2339
  }
1482
2340
  async saveScore(score) {
2341
+ let validatedScore;
2342
+ try {
2343
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
2344
+ } catch (error$1) {
2345
+ throw new error.MastraError(
2346
+ {
2347
+ id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_SCORE_VALIDATION_FAILED",
2348
+ domain: error.ErrorDomain.STORAGE,
2349
+ category: error.ErrorCategory.THIRD_PARTY
2350
+ },
2351
+ error$1
2352
+ );
2353
+ }
1483
2354
  try {
1484
2355
  const scoreId = crypto.randomUUID();
1485
2356
  const {
@@ -1490,24 +2361,24 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1490
2361
  input,
1491
2362
  output,
1492
2363
  additionalContext,
1493
- runtimeContext,
2364
+ requestContext,
1494
2365
  entity,
1495
2366
  ...rest
1496
- } = score;
2367
+ } = validatedScore;
1497
2368
  await this.operations.insert({
1498
2369
  tableName: storage.TABLE_SCORERS,
1499
2370
  record: {
1500
2371
  id: scoreId,
1501
2372
  ...rest,
1502
- input: JSON.stringify(input) || "",
1503
- output: JSON.stringify(output) || "",
1504
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1505
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1506
- metadata: metadata ? JSON.stringify(metadata) : null,
1507
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1508
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1509
- entity: entity ? JSON.stringify(entity) : null,
1510
- scorer: scorer ? JSON.stringify(scorer) : null,
2373
+ input: input || "",
2374
+ output: output || "",
2375
+ preprocessStepResult: preprocessStepResult || null,
2376
+ analyzeStepResult: analyzeStepResult || null,
2377
+ metadata: metadata || null,
2378
+ additionalContext: additionalContext || null,
2379
+ requestContext: requestContext || null,
2380
+ entity: entity || null,
2381
+ scorer: scorer || null,
1511
2382
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1512
2383
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1513
2384
  }
@@ -1525,41 +2396,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1525
2396
  );
1526
2397
  }
1527
2398
  }
1528
- async getScoresByScorerId({
2399
+ async listScoresByScorerId({
1529
2400
  scorerId,
1530
- pagination
2401
+ pagination,
2402
+ entityId,
2403
+ entityType,
2404
+ source
1531
2405
  }) {
1532
2406
  try {
1533
- const request = this.pool.request();
1534
- request.input("p1", scorerId);
1535
- const totalResult = await request.query(
1536
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1537
- );
2407
+ const conditions = ["[scorerId] = @p1"];
2408
+ const params = { p1: scorerId };
2409
+ let paramIndex = 2;
2410
+ if (entityId) {
2411
+ conditions.push(`[entityId] = @p${paramIndex}`);
2412
+ params[`p${paramIndex}`] = entityId;
2413
+ paramIndex++;
2414
+ }
2415
+ if (entityType) {
2416
+ conditions.push(`[entityType] = @p${paramIndex}`);
2417
+ params[`p${paramIndex}`] = entityType;
2418
+ paramIndex++;
2419
+ }
2420
+ if (source) {
2421
+ conditions.push(`[source] = @p${paramIndex}`);
2422
+ params[`p${paramIndex}`] = source;
2423
+ paramIndex++;
2424
+ }
2425
+ const whereClause = conditions.join(" AND ");
2426
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2427
+ const countRequest = this.pool.request();
2428
+ Object.entries(params).forEach(([key, value]) => {
2429
+ countRequest.input(key, value);
2430
+ });
2431
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1538
2432
  const total = totalResult.recordset[0]?.count || 0;
2433
+ const { page, perPage: perPageInput } = pagination;
1539
2434
  if (total === 0) {
1540
2435
  return {
1541
2436
  pagination: {
1542
2437
  total: 0,
1543
- page: pagination.page,
1544
- perPage: pagination.perPage,
2438
+ page,
2439
+ perPage: perPageInput,
1545
2440
  hasMore: false
1546
2441
  },
1547
2442
  scores: []
1548
2443
  };
1549
2444
  }
2445
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2446
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2447
+ const limitValue = perPageInput === false ? total : perPage;
2448
+ const end = perPageInput === false ? total : start + perPage;
1550
2449
  const dataRequest = this.pool.request();
1551
- dataRequest.input("p1", scorerId);
1552
- dataRequest.input("p2", pagination.perPage);
1553
- dataRequest.input("p3", pagination.page * pagination.perPage);
1554
- const result = await dataRequest.query(
1555
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1556
- );
2450
+ Object.entries(params).forEach(([key, value]) => {
2451
+ dataRequest.input(key, value);
2452
+ });
2453
+ dataRequest.input("perPage", limitValue);
2454
+ dataRequest.input("offset", start);
2455
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2456
+ const result = await dataRequest.query(dataQuery);
1557
2457
  return {
1558
2458
  pagination: {
1559
2459
  total: Number(total),
1560
- page: pagination.page,
1561
- perPage: pagination.perPage,
1562
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2460
+ page,
2461
+ perPage: perPageForResponse,
2462
+ hasMore: end < total
1563
2463
  },
1564
2464
  scores: result.recordset.map((row) => transformScoreRow(row))
1565
2465
  };
@@ -1575,7 +2475,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1575
2475
  );
1576
2476
  }
1577
2477
  }
1578
- async getScoresByRunId({
2478
+ async listScoresByRunId({
1579
2479
  runId,
1580
2480
  pagination
1581
2481
  }) {
@@ -1586,30 +2486,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1586
2486
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1587
2487
  );
1588
2488
  const total = totalResult.recordset[0]?.count || 0;
2489
+ const { page, perPage: perPageInput } = pagination;
1589
2490
  if (total === 0) {
1590
2491
  return {
1591
2492
  pagination: {
1592
2493
  total: 0,
1593
- page: pagination.page,
1594
- perPage: pagination.perPage,
2494
+ page,
2495
+ perPage: perPageInput,
1595
2496
  hasMore: false
1596
2497
  },
1597
2498
  scores: []
1598
2499
  };
1599
2500
  }
2501
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2502
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2503
+ const limitValue = perPageInput === false ? total : perPage;
2504
+ const end = perPageInput === false ? total : start + perPage;
1600
2505
  const dataRequest = this.pool.request();
1601
2506
  dataRequest.input("p1", runId);
1602
- dataRequest.input("p2", pagination.perPage);
1603
- dataRequest.input("p3", pagination.page * pagination.perPage);
2507
+ dataRequest.input("p2", limitValue);
2508
+ dataRequest.input("p3", start);
1604
2509
  const result = await dataRequest.query(
1605
2510
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1606
2511
  );
1607
2512
  return {
1608
2513
  pagination: {
1609
2514
  total: Number(total),
1610
- page: pagination.page,
1611
- perPage: pagination.perPage,
1612
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2515
+ page,
2516
+ perPage: perPageForResponse,
2517
+ hasMore: end < total
1613
2518
  },
1614
2519
  scores: result.recordset.map((row) => transformScoreRow(row))
1615
2520
  };
@@ -1625,7 +2530,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1625
2530
  );
1626
2531
  }
1627
2532
  }
1628
- async getScoresByEntityId({
2533
+ async listScoresByEntityId({
1629
2534
  entityId,
1630
2535
  entityType,
1631
2536
  pagination
@@ -1638,31 +2543,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1638
2543
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1639
2544
  );
1640
2545
  const total = totalResult.recordset[0]?.count || 0;
2546
+ const { page, perPage: perPageInput } = pagination;
2547
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2548
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1641
2549
  if (total === 0) {
1642
2550
  return {
1643
2551
  pagination: {
1644
2552
  total: 0,
1645
- page: pagination.page,
1646
- perPage: pagination.perPage,
2553
+ page,
2554
+ perPage: perPageForResponse,
1647
2555
  hasMore: false
1648
2556
  },
1649
2557
  scores: []
1650
2558
  };
1651
2559
  }
2560
+ const limitValue = perPageInput === false ? total : perPage;
2561
+ const end = perPageInput === false ? total : start + perPage;
1652
2562
  const dataRequest = this.pool.request();
1653
2563
  dataRequest.input("p1", entityId);
1654
2564
  dataRequest.input("p2", entityType);
1655
- dataRequest.input("p3", pagination.perPage);
1656
- dataRequest.input("p4", pagination.page * pagination.perPage);
2565
+ dataRequest.input("p3", limitValue);
2566
+ dataRequest.input("p4", start);
1657
2567
  const result = await dataRequest.query(
1658
2568
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1659
2569
  );
1660
2570
  return {
1661
2571
  pagination: {
1662
2572
  total: Number(total),
1663
- page: pagination.page,
1664
- perPage: pagination.perPage,
1665
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2573
+ page,
2574
+ perPage: perPageForResponse,
2575
+ hasMore: end < total
1666
2576
  },
1667
2577
  scores: result.recordset.map((row) => transformScoreRow(row))
1668
2578
  };
@@ -1678,8 +2588,66 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1678
2588
  );
1679
2589
  }
1680
2590
  }
2591
+ async listScoresBySpan({
2592
+ traceId,
2593
+ spanId,
2594
+ pagination
2595
+ }) {
2596
+ try {
2597
+ const request = this.pool.request();
2598
+ request.input("p1", traceId);
2599
+ request.input("p2", spanId);
2600
+ const totalResult = await request.query(
2601
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
2602
+ );
2603
+ const total = totalResult.recordset[0]?.count || 0;
2604
+ const { page, perPage: perPageInput } = pagination;
2605
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2606
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2607
+ if (total === 0) {
2608
+ return {
2609
+ pagination: {
2610
+ total: 0,
2611
+ page,
2612
+ perPage: perPageForResponse,
2613
+ hasMore: false
2614
+ },
2615
+ scores: []
2616
+ };
2617
+ }
2618
+ const limitValue = perPageInput === false ? total : perPage;
2619
+ const end = perPageInput === false ? total : start + perPage;
2620
+ const dataRequest = this.pool.request();
2621
+ dataRequest.input("p1", traceId);
2622
+ dataRequest.input("p2", spanId);
2623
+ dataRequest.input("p3", limitValue);
2624
+ dataRequest.input("p4", start);
2625
+ const result = await dataRequest.query(
2626
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2627
+ );
2628
+ return {
2629
+ pagination: {
2630
+ total: Number(total),
2631
+ page,
2632
+ perPage: perPageForResponse,
2633
+ hasMore: end < total
2634
+ },
2635
+ scores: result.recordset.map((row) => transformScoreRow(row))
2636
+ };
2637
+ } catch (error$1) {
2638
+ throw new error.MastraError(
2639
+ {
2640
+ id: "MASTRA_STORAGE_MSSQL_STORE_GET_SCORES_BY_SPAN_FAILED",
2641
+ domain: error.ErrorDomain.STORAGE,
2642
+ category: error.ErrorCategory.THIRD_PARTY,
2643
+ details: { traceId, spanId }
2644
+ },
2645
+ error$1
2646
+ );
2647
+ }
2648
+ }
1681
2649
  };
1682
- var TracesMSSQL = class extends storage.TracesStorage {
2650
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1683
2651
  pool;
1684
2652
  operations;
1685
2653
  schema;
@@ -1693,210 +2661,168 @@ var TracesMSSQL = class extends storage.TracesStorage {
1693
2661
  this.operations = operations;
1694
2662
  this.schema = schema;
1695
2663
  }
1696
- /** @deprecated use getTracesPaginated instead*/
1697
- async getTraces(args) {
1698
- if (args.fromDate || args.toDate) {
1699
- args.dateRange = {
1700
- start: args.fromDate,
1701
- end: args.toDate
1702
- };
1703
- }
1704
- const result = await this.getTracesPaginated(args);
1705
- return result.traces;
1706
- }
1707
- async getTracesPaginated(args) {
1708
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1709
- const fromDate = dateRange?.start;
1710
- const toDate = dateRange?.end;
1711
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1712
- const currentOffset = page * perPage;
1713
- const paramMap = {};
1714
- const conditions = [];
1715
- let paramIndex = 1;
1716
- if (name) {
1717
- const paramName = `p${paramIndex++}`;
1718
- conditions.push(`[name] LIKE @${paramName}`);
1719
- paramMap[paramName] = `${name}%`;
1720
- }
1721
- if (scope) {
1722
- const paramName = `p${paramIndex++}`;
1723
- conditions.push(`[scope] = @${paramName}`);
1724
- paramMap[paramName] = scope;
1725
- }
1726
- if (attributes) {
1727
- Object.entries(attributes).forEach(([key, value]) => {
1728
- const parsedKey = utils.parseFieldKey(key);
1729
- const paramName = `p${paramIndex++}`;
1730
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1731
- paramMap[paramName] = value;
1732
- });
1733
- }
1734
- if (filters) {
1735
- Object.entries(filters).forEach(([key, value]) => {
1736
- const parsedKey = utils.parseFieldKey(key);
1737
- const paramName = `p${paramIndex++}`;
1738
- conditions.push(`[${parsedKey}] = @${paramName}`);
1739
- paramMap[paramName] = value;
1740
- });
1741
- }
1742
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1743
- const paramName = `p${paramIndex++}`;
1744
- conditions.push(`[createdAt] >= @${paramName}`);
1745
- paramMap[paramName] = fromDate.toISOString();
1746
- }
1747
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1748
- const paramName = `p${paramIndex++}`;
1749
- conditions.push(`[createdAt] <= @${paramName}`);
1750
- paramMap[paramName] = toDate.toISOString();
2664
+ parseWorkflowRun(row) {
2665
+ let parsedSnapshot = row.snapshot;
2666
+ if (typeof parsedSnapshot === "string") {
2667
+ try {
2668
+ parsedSnapshot = JSON.parse(row.snapshot);
2669
+ } catch (e) {
2670
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2671
+ }
1751
2672
  }
1752
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1753
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1754
- let total = 0;
2673
+ return {
2674
+ workflowName: row.workflow_name,
2675
+ runId: row.run_id,
2676
+ snapshot: parsedSnapshot,
2677
+ createdAt: row.createdAt,
2678
+ updatedAt: row.updatedAt,
2679
+ resourceId: row.resourceId
2680
+ };
2681
+ }
2682
+ async updateWorkflowResults({
2683
+ workflowName,
2684
+ runId,
2685
+ stepId,
2686
+ result,
2687
+ requestContext
2688
+ }) {
2689
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2690
+ const transaction = this.pool.transaction();
1755
2691
  try {
1756
- const countRequest = this.pool.request();
1757
- Object.entries(paramMap).forEach(([key, value]) => {
1758
- if (value instanceof Date) {
1759
- countRequest.input(key, sql2__default.default.DateTime, value);
1760
- } else {
1761
- countRequest.input(key, value);
1762
- }
1763
- });
1764
- const countResult = await countRequest.query(countQuery);
1765
- total = parseInt(countResult.recordset[0].total, 10);
2692
+ await transaction.begin();
2693
+ const selectRequest = new sql2__default.default.Request(transaction);
2694
+ selectRequest.input("workflow_name", workflowName);
2695
+ selectRequest.input("run_id", runId);
2696
+ const existingSnapshotResult = await selectRequest.query(
2697
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2698
+ );
2699
+ let snapshot;
2700
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2701
+ snapshot = {
2702
+ context: {},
2703
+ activePaths: [],
2704
+ timestamp: Date.now(),
2705
+ suspendedPaths: {},
2706
+ resumeLabels: {},
2707
+ serializedStepGraph: [],
2708
+ value: {},
2709
+ waitingPaths: {},
2710
+ status: "pending",
2711
+ runId,
2712
+ requestContext: {}
2713
+ };
2714
+ } else {
2715
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2716
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2717
+ }
2718
+ snapshot.context[stepId] = result;
2719
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2720
+ const upsertReq = new sql2__default.default.Request(transaction);
2721
+ upsertReq.input("workflow_name", workflowName);
2722
+ upsertReq.input("run_id", runId);
2723
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2724
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2725
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2726
+ await upsertReq.query(
2727
+ `MERGE ${table} AS target
2728
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2729
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2730
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2731
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2732
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2733
+ );
2734
+ await transaction.commit();
2735
+ return snapshot.context;
1766
2736
  } catch (error$1) {
2737
+ try {
2738
+ await transaction.rollback();
2739
+ } catch {
2740
+ }
1767
2741
  throw new error.MastraError(
1768
2742
  {
1769
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2743
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1770
2744
  domain: error.ErrorDomain.STORAGE,
1771
2745
  category: error.ErrorCategory.THIRD_PARTY,
1772
2746
  details: {
1773
- name: args.name ?? "",
1774
- scope: args.scope ?? ""
2747
+ workflowName,
2748
+ runId,
2749
+ stepId
1775
2750
  }
1776
2751
  },
1777
2752
  error$1
1778
2753
  );
1779
2754
  }
1780
- if (total === 0) {
1781
- return {
1782
- traces: [],
1783
- total: 0,
1784
- page,
1785
- perPage,
1786
- hasMore: false
1787
- };
1788
- }
1789
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1790
- const dataRequest = this.pool.request();
1791
- Object.entries(paramMap).forEach(([key, value]) => {
1792
- if (value instanceof Date) {
1793
- dataRequest.input(key, sql2__default.default.DateTime, value);
1794
- } else {
1795
- dataRequest.input(key, value);
1796
- }
1797
- });
1798
- dataRequest.input("offset", currentOffset);
1799
- dataRequest.input("limit", perPage);
2755
+ }
2756
+ async updateWorkflowState({
2757
+ workflowName,
2758
+ runId,
2759
+ opts
2760
+ }) {
2761
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2762
+ const transaction = this.pool.transaction();
1800
2763
  try {
1801
- const rowsResult = await dataRequest.query(dataQuery);
1802
- const rows = rowsResult.recordset;
1803
- const traces = rows.map((row) => ({
1804
- id: row.id,
1805
- parentSpanId: row.parentSpanId,
1806
- traceId: row.traceId,
1807
- name: row.name,
1808
- scope: row.scope,
1809
- kind: row.kind,
1810
- status: JSON.parse(row.status),
1811
- events: JSON.parse(row.events),
1812
- links: JSON.parse(row.links),
1813
- attributes: JSON.parse(row.attributes),
1814
- startTime: row.startTime,
1815
- endTime: row.endTime,
1816
- other: row.other,
1817
- createdAt: row.createdAt
1818
- }));
1819
- return {
1820
- traces,
1821
- total,
1822
- page,
1823
- perPage,
1824
- hasMore: currentOffset + traces.length < total
1825
- };
2764
+ await transaction.begin();
2765
+ const selectRequest = new sql2__default.default.Request(transaction);
2766
+ selectRequest.input("workflow_name", workflowName);
2767
+ selectRequest.input("run_id", runId);
2768
+ const existingSnapshotResult = await selectRequest.query(
2769
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2770
+ );
2771
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2772
+ await transaction.rollback();
2773
+ return void 0;
2774
+ }
2775
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2776
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2777
+ if (!snapshot || !snapshot?.context) {
2778
+ await transaction.rollback();
2779
+ throw new error.MastraError(
2780
+ {
2781
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2782
+ domain: error.ErrorDomain.STORAGE,
2783
+ category: error.ErrorCategory.SYSTEM,
2784
+ details: {
2785
+ workflowName,
2786
+ runId
2787
+ }
2788
+ },
2789
+ new Error(`Snapshot not found for runId ${runId}`)
2790
+ );
2791
+ }
2792
+ const updatedSnapshot = { ...snapshot, ...opts };
2793
+ const updateRequest = new sql2__default.default.Request(transaction);
2794
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2795
+ updateRequest.input("workflow_name", workflowName);
2796
+ updateRequest.input("run_id", runId);
2797
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2798
+ await updateRequest.query(
2799
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2800
+ );
2801
+ await transaction.commit();
2802
+ return updatedSnapshot;
1826
2803
  } catch (error$1) {
2804
+ try {
2805
+ await transaction.rollback();
2806
+ } catch {
2807
+ }
1827
2808
  throw new error.MastraError(
1828
2809
  {
1829
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2810
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1830
2811
  domain: error.ErrorDomain.STORAGE,
1831
2812
  category: error.ErrorCategory.THIRD_PARTY,
1832
2813
  details: {
1833
- name: args.name ?? "",
1834
- scope: args.scope ?? ""
2814
+ workflowName,
2815
+ runId
1835
2816
  }
1836
2817
  },
1837
2818
  error$1
1838
2819
  );
1839
2820
  }
1840
2821
  }
1841
- async batchTraceInsert({ records }) {
1842
- this.logger.debug("Batch inserting traces", { count: records.length });
1843
- await this.operations.batchInsert({
1844
- tableName: storage.TABLE_TRACES,
1845
- records
1846
- });
1847
- }
1848
- };
1849
- function parseWorkflowRun(row) {
1850
- let parsedSnapshot = row.snapshot;
1851
- if (typeof parsedSnapshot === "string") {
1852
- try {
1853
- parsedSnapshot = JSON.parse(row.snapshot);
1854
- } catch (e) {
1855
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1856
- }
1857
- }
1858
- return {
1859
- workflowName: row.workflow_name,
1860
- runId: row.run_id,
1861
- snapshot: parsedSnapshot,
1862
- createdAt: row.createdAt,
1863
- updatedAt: row.updatedAt,
1864
- resourceId: row.resourceId
1865
- };
1866
- }
1867
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1868
- pool;
1869
- operations;
1870
- schema;
1871
- constructor({
1872
- pool,
1873
- operations,
1874
- schema
1875
- }) {
1876
- super();
1877
- this.pool = pool;
1878
- this.operations = operations;
1879
- this.schema = schema;
1880
- }
1881
- updateWorkflowResults({
1882
- // workflowName,
1883
- // runId,
1884
- // stepId,
1885
- // result,
1886
- // runtimeContext,
1887
- }) {
1888
- throw new Error("Method not implemented.");
1889
- }
1890
- updateWorkflowState({
1891
- // workflowName,
1892
- // runId,
1893
- // opts,
1894
- }) {
1895
- throw new Error("Method not implemented.");
1896
- }
1897
2822
  async persistWorkflowSnapshot({
1898
2823
  workflowName,
1899
2824
  runId,
2825
+ resourceId,
1900
2826
  snapshot
1901
2827
  }) {
1902
2828
  const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
@@ -1905,6 +2831,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1905
2831
  const request = this.pool.request();
1906
2832
  request.input("workflow_name", workflowName);
1907
2833
  request.input("run_id", runId);
2834
+ request.input("resourceId", resourceId);
1908
2835
  request.input("snapshot", JSON.stringify(snapshot));
1909
2836
  request.input("createdAt", sql2__default.default.DateTime2, new Date(now));
1910
2837
  request.input("updatedAt", sql2__default.default.DateTime2, new Date(now));
@@ -1912,10 +2839,11 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1912
2839
  USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
1913
2840
  ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
1914
2841
  WHEN MATCHED THEN UPDATE SET
2842
+ resourceId = @resourceId,
1915
2843
  snapshot = @snapshot,
1916
2844
  [updatedAt] = @updatedAt
1917
- WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
1918
- VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`;
2845
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, resourceId, snapshot, [createdAt], [updatedAt])
2846
+ VALUES (@workflow_name, @run_id, @resourceId, @snapshot, @createdAt, @updatedAt);`;
1919
2847
  await request.query(mergeSql);
1920
2848
  } catch (error$1) {
1921
2849
  throw new error.MastraError(
@@ -1987,7 +2915,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1987
2915
  if (!result.recordset || result.recordset.length === 0) {
1988
2916
  return null;
1989
2917
  }
1990
- return parseWorkflowRun(result.recordset[0]);
2918
+ return this.parseWorkflowRun(result.recordset[0]);
1991
2919
  } catch (error$1) {
1992
2920
  throw new error.MastraError(
1993
2921
  {
@@ -2003,12 +2931,12 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2003
2931
  );
2004
2932
  }
2005
2933
  }
2006
- async getWorkflowRuns({
2934
+ async listWorkflowRuns({
2007
2935
  workflowName,
2008
2936
  fromDate,
2009
2937
  toDate,
2010
- limit,
2011
- offset,
2938
+ page,
2939
+ perPage,
2012
2940
  resourceId
2013
2941
  } = {}) {
2014
2942
  try {
@@ -2024,7 +2952,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2024
2952
  conditions.push(`[resourceId] = @resourceId`);
2025
2953
  paramMap["resourceId"] = resourceId;
2026
2954
  } else {
2027
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2955
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2028
2956
  }
2029
2957
  }
2030
2958
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2046,24 +2974,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2046
2974
  request.input(key, value);
2047
2975
  }
2048
2976
  });
2049
- if (limit !== void 0 && offset !== void 0) {
2977
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2978
+ if (usePagination) {
2050
2979
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2051
2980
  const countResult = await request.query(countQuery);
2052
2981
  total = Number(countResult.recordset[0]?.count || 0);
2053
2982
  }
2054
2983
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2055
- if (limit !== void 0 && offset !== void 0) {
2056
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2057
- request.input("limit", limit);
2984
+ if (usePagination) {
2985
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2986
+ const offset = page * normalizedPerPage;
2987
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2988
+ request.input("perPage", normalizedPerPage);
2058
2989
  request.input("offset", offset);
2059
2990
  }
2060
2991
  const result = await request.query(query);
2061
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2992
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2062
2993
  return { runs, total: total || runs.length };
2063
2994
  } catch (error$1) {
2064
2995
  throw new error.MastraError(
2065
2996
  {
2066
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2997
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2067
2998
  domain: error.ErrorDomain.STORAGE,
2068
2999
  category: error.ErrorCategory.THIRD_PARTY,
2069
3000
  details: {
@@ -2106,19 +3037,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2106
3037
  port: config.port,
2107
3038
  options: config.options || { encrypt: true, trustServerCertificate: true }
2108
3039
  });
2109
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2110
3040
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2111
3041
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2112
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2113
3042
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2114
3043
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
3044
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2115
3045
  this.stores = {
2116
3046
  operations,
2117
3047
  scores,
2118
- traces,
2119
3048
  workflows,
2120
- legacyEvals,
2121
- memory
3049
+ memory,
3050
+ observability
2122
3051
  };
2123
3052
  } catch (e) {
2124
3053
  throw new error.MastraError(
@@ -2138,6 +3067,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2138
3067
  try {
2139
3068
  await this.isConnected;
2140
3069
  await super.init();
3070
+ try {
3071
+ await this.stores.operations.createAutomaticIndexes();
3072
+ } catch (indexError) {
3073
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3074
+ }
2141
3075
  } catch (error$1) {
2142
3076
  this.isConnected = null;
2143
3077
  throw new error.MastraError(
@@ -2164,28 +3098,12 @@ var MSSQLStore = class extends storage.MastraStorage {
2164
3098
  resourceWorkingMemory: true,
2165
3099
  hasColumn: true,
2166
3100
  createTable: true,
2167
- deleteMessages: true
3101
+ deleteMessages: true,
3102
+ listScoresBySpan: true,
3103
+ aiTracing: true,
3104
+ indexManagement: true
2168
3105
  };
2169
3106
  }
2170
- /** @deprecated use getEvals instead */
2171
- async getEvalsByAgentName(agentName, type) {
2172
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2173
- }
2174
- async getEvals(options = {}) {
2175
- return this.stores.legacyEvals.getEvals(options);
2176
- }
2177
- /**
2178
- * @deprecated use getTracesPaginated instead
2179
- */
2180
- async getTraces(args) {
2181
- return this.stores.traces.getTraces(args);
2182
- }
2183
- async getTracesPaginated(args) {
2184
- return this.stores.traces.getTracesPaginated(args);
2185
- }
2186
- async batchTraceInsert({ records }) {
2187
- return this.stores.traces.batchTraceInsert({ records });
2188
- }
2189
3107
  async createTable({
2190
3108
  tableName,
2191
3109
  schema
@@ -2220,15 +3138,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2220
3138
  async getThreadById({ threadId }) {
2221
3139
  return this.stores.memory.getThreadById({ threadId });
2222
3140
  }
2223
- /**
2224
- * @deprecated use getThreadsByResourceIdPaginated instead
2225
- */
2226
- async getThreadsByResourceId(args) {
2227
- return this.stores.memory.getThreadsByResourceId(args);
2228
- }
2229
- async getThreadsByResourceIdPaginated(args) {
2230
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2231
- }
2232
3141
  async saveThread({ thread }) {
2233
3142
  return this.stores.memory.saveThread({ thread });
2234
3143
  }
@@ -2242,17 +3151,14 @@ var MSSQLStore = class extends storage.MastraStorage {
2242
3151
  async deleteThread({ threadId }) {
2243
3152
  return this.stores.memory.deleteThread({ threadId });
2244
3153
  }
3154
+ /**
3155
+ * @deprecated use listMessages instead
3156
+ */
2245
3157
  async getMessages(args) {
2246
3158
  return this.stores.memory.getMessages(args);
2247
3159
  }
2248
- async getMessagesById({
2249
- messageIds,
2250
- format
2251
- }) {
2252
- return this.stores.memory.getMessagesById({ messageIds, format });
2253
- }
2254
- async getMessagesPaginated(args) {
2255
- return this.stores.memory.getMessagesPaginated(args);
3160
+ async listMessagesById({ messageIds }) {
3161
+ return this.stores.memory.listMessagesById({ messageIds });
2256
3162
  }
2257
3163
  async saveMessages(args) {
2258
3164
  return this.stores.memory.saveMessages(args);
@@ -2286,9 +3192,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2286
3192
  runId,
2287
3193
  stepId,
2288
3194
  result,
2289
- runtimeContext
3195
+ requestContext
2290
3196
  }) {
2291
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3197
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2292
3198
  }
2293
3199
  async updateWorkflowState({
2294
3200
  workflowName,
@@ -2300,9 +3206,10 @@ var MSSQLStore = class extends storage.MastraStorage {
2300
3206
  async persistWorkflowSnapshot({
2301
3207
  workflowName,
2302
3208
  runId,
3209
+ resourceId,
2303
3210
  snapshot
2304
3211
  }) {
2305
- return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, snapshot });
3212
+ return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, resourceId, snapshot });
2306
3213
  }
2307
3214
  async loadWorkflowSnapshot({
2308
3215
  workflowName,
@@ -2310,15 +3217,15 @@ var MSSQLStore = class extends storage.MastraStorage {
2310
3217
  }) {
2311
3218
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2312
3219
  }
2313
- async getWorkflowRuns({
3220
+ async listWorkflowRuns({
2314
3221
  workflowName,
2315
3222
  fromDate,
2316
3223
  toDate,
2317
- limit,
2318
- offset,
3224
+ perPage,
3225
+ page,
2319
3226
  resourceId
2320
3227
  } = {}) {
2321
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3228
+ return this.stores.workflows.listWorkflowRuns({ workflowName, fromDate, toDate, perPage, page, resourceId });
2322
3229
  }
2323
3230
  async getWorkflowRunById({
2324
3231
  runId,
@@ -2329,38 +3236,108 @@ var MSSQLStore = class extends storage.MastraStorage {
2329
3236
  async close() {
2330
3237
  await this.pool.close();
2331
3238
  }
3239
+ /**
3240
+ * Index Management
3241
+ */
3242
+ async createIndex(options) {
3243
+ return this.stores.operations.createIndex(options);
3244
+ }
3245
+ async listIndexes(tableName) {
3246
+ return this.stores.operations.listIndexes(tableName);
3247
+ }
3248
+ async describeIndex(indexName) {
3249
+ return this.stores.operations.describeIndex(indexName);
3250
+ }
3251
+ async dropIndex(indexName) {
3252
+ return this.stores.operations.dropIndex(indexName);
3253
+ }
3254
+ /**
3255
+ * AI Tracing / Observability
3256
+ */
3257
+ getObservabilityStore() {
3258
+ if (!this.stores.observability) {
3259
+ throw new error.MastraError({
3260
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3261
+ domain: error.ErrorDomain.STORAGE,
3262
+ category: error.ErrorCategory.SYSTEM,
3263
+ text: "Observability storage is not initialized"
3264
+ });
3265
+ }
3266
+ return this.stores.observability;
3267
+ }
3268
+ async createAISpan(span) {
3269
+ return this.getObservabilityStore().createAISpan(span);
3270
+ }
3271
+ async updateAISpan({
3272
+ spanId,
3273
+ traceId,
3274
+ updates
3275
+ }) {
3276
+ return this.getObservabilityStore().updateAISpan({ spanId, traceId, updates });
3277
+ }
3278
+ async getAITrace(traceId) {
3279
+ return this.getObservabilityStore().getAITrace(traceId);
3280
+ }
3281
+ async getAITracesPaginated(args) {
3282
+ return this.getObservabilityStore().getAITracesPaginated(args);
3283
+ }
3284
+ async batchCreateAISpans(args) {
3285
+ return this.getObservabilityStore().batchCreateAISpans(args);
3286
+ }
3287
+ async batchUpdateAISpans(args) {
3288
+ return this.getObservabilityStore().batchUpdateAISpans(args);
3289
+ }
3290
+ async batchDeleteAITraces(args) {
3291
+ return this.getObservabilityStore().batchDeleteAITraces(args);
3292
+ }
2332
3293
  /**
2333
3294
  * Scorers
2334
3295
  */
2335
3296
  async getScoreById({ id: _id }) {
2336
3297
  return this.stores.scores.getScoreById({ id: _id });
2337
3298
  }
2338
- async getScoresByScorerId({
3299
+ async listScoresByScorerId({
2339
3300
  scorerId: _scorerId,
2340
- pagination: _pagination
3301
+ pagination: _pagination,
3302
+ entityId: _entityId,
3303
+ entityType: _entityType,
3304
+ source: _source
2341
3305
  }) {
2342
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3306
+ return this.stores.scores.listScoresByScorerId({
3307
+ scorerId: _scorerId,
3308
+ pagination: _pagination,
3309
+ entityId: _entityId,
3310
+ entityType: _entityType,
3311
+ source: _source
3312
+ });
2343
3313
  }
2344
3314
  async saveScore(_score) {
2345
3315
  return this.stores.scores.saveScore(_score);
2346
3316
  }
2347
- async getScoresByRunId({
3317
+ async listScoresByRunId({
2348
3318
  runId: _runId,
2349
3319
  pagination: _pagination
2350
3320
  }) {
2351
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3321
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2352
3322
  }
2353
- async getScoresByEntityId({
3323
+ async listScoresByEntityId({
2354
3324
  entityId: _entityId,
2355
3325
  entityType: _entityType,
2356
3326
  pagination: _pagination
2357
3327
  }) {
2358
- return this.stores.scores.getScoresByEntityId({
3328
+ return this.stores.scores.listScoresByEntityId({
2359
3329
  entityId: _entityId,
2360
3330
  entityType: _entityType,
2361
3331
  pagination: _pagination
2362
3332
  });
2363
3333
  }
3334
+ async listScoresBySpan({
3335
+ traceId,
3336
+ spanId,
3337
+ pagination: _pagination
3338
+ }) {
3339
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
3340
+ }
2364
3341
  };
2365
3342
 
2366
3343
  exports.MSSQLStore = MSSQLStore;