@mastra/mssql 0.0.0-just-snapshot-20251014192224 → 0.0.0-main-test-05-11-2025-2-20251105214713

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,9 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
8
- var scores = require('@mastra/core/scores');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
9
10
 
10
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
11
12
 
@@ -21,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
21
22
  const quotedSchemaName = schemaName;
22
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
23
24
  }
24
-
25
- // src/storage/domains/legacy-evals/index.ts
26
- function transformEvalRow(row) {
27
- let testInfoValue = null, resultValue = null;
28
- if (row.test_info) {
29
- try {
30
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
31
- } catch {
32
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
33
29
  }
34
- if (row.test_info) {
35
- try {
36
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
37
- } catch {
38
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
39
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
40
57
  return {
41
- agentName: row.agent_name,
42
- input: row.input,
43
- output: row.output,
44
- result: resultValue,
45
- metricName: row.metric_name,
46
- instructions: row.instructions,
47
- testInfo: testInfoValue,
48
- globalRunId: row.global_run_id,
49
- runId: row.run_id,
50
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
51
60
  };
52
61
  }
53
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
54
- pool;
55
- schema;
56
- constructor({ pool, schema }) {
57
- super();
58
- this.pool = pool;
59
- this.schema = schema;
60
- }
61
- /** @deprecated use getEvals instead */
62
- async getEvalsByAgentName(agentName, type) {
63
- try {
64
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
65
- if (type === "test") {
66
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
67
- } else if (type === "live") {
68
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
69
- }
70
- query += " ORDER BY created_at DESC";
71
- const request = this.pool.request();
72
- request.input("p1", agentName);
73
- const result = await request.query(query);
74
- const rows = result.recordset;
75
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
76
- } catch (error) {
77
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
78
- return [];
79
- }
80
- console.error("Failed to get evals for the specified agent: " + error?.message);
81
- throw error;
82
- }
83
- }
84
- async getEvals(options = {}) {
85
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
86
- const fromDate = dateRange?.start;
87
- const toDate = dateRange?.end;
88
- const where = [];
89
- const params = {};
90
- if (agentName) {
91
- where.push("agent_name = @agentName");
92
- params["agentName"] = agentName;
93
- }
94
- if (type === "test") {
95
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
96
- } else if (type === "live") {
97
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
98
- }
99
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
100
- where.push(`[created_at] >= @fromDate`);
101
- params[`fromDate`] = fromDate.toISOString();
102
- }
103
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
104
- where.push(`[created_at] <= @toDate`);
105
- params[`toDate`] = toDate.toISOString();
106
- }
107
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
108
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
109
- const offset = page * perPage;
110
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
111
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
112
- try {
113
- const countReq = this.pool.request();
114
- Object.entries(params).forEach(([key, value]) => {
115
- if (value instanceof Date) {
116
- countReq.input(key, sql2__default.default.DateTime, value);
117
- } else {
118
- countReq.input(key, value);
119
- }
120
- });
121
- const countResult = await countReq.query(countQuery);
122
- const total = countResult.recordset[0]?.total || 0;
123
- if (total === 0) {
124
- return {
125
- evals: [],
126
- total: 0,
127
- page,
128
- perPage,
129
- hasMore: false
130
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
131
75
  }
132
- const req = this.pool.request();
133
- Object.entries(params).forEach(([key, value]) => {
134
- if (value instanceof Date) {
135
- req.input(key, sql2__default.default.DateTime, value);
136
- } else {
137
- req.input(key, value);
138
- }
139
- });
140
- req.input("offset", offset);
141
- req.input("perPage", perPage);
142
- const result = await req.query(dataQuery);
143
- const rows = result.recordset;
144
- return {
145
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
146
- total,
147
- page,
148
- perPage,
149
- hasMore: offset + (rows?.length ?? 0) < total
150
- };
151
- } catch (error$1) {
152
- const mastraError = new error.MastraError(
153
- {
154
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
155
- domain: error.ErrorDomain.STORAGE,
156
- category: error.ErrorCategory.THIRD_PARTY,
157
- details: {
158
- agentName: agentName || "all",
159
- type: type || "all",
160
- page,
161
- perPage
162
- }
163
- },
164
- error$1
165
- );
166
- this.logger?.error?.(mastraError.toString());
167
- this.logger?.trackException(mastraError);
168
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
169
84
  }
170
- }
171
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
172
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
173
91
  pool;
174
92
  schema;
@@ -186,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
186
104
  });
187
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
188
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
189
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
190
108
  }
191
109
  constructor({
192
110
  pool,
@@ -200,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
200
118
  }
201
119
  async getThreadById({ threadId }) {
202
120
  try {
203
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
204
122
  id,
205
123
  [resourceId],
206
124
  title,
@@ -211,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
211
129
  WHERE id = @threadId`;
212
130
  const request = this.pool.request();
213
131
  request.input("threadId", threadId);
214
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
215
133
  const thread = resultSet.recordset[0] || null;
216
134
  if (!thread) {
217
135
  return null;
@@ -236,11 +154,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
236
154
  );
237
155
  }
238
156
  }
239
- async getThreadsByResourceIdPaginated(args) {
240
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ const perPage = storage.normalizePerPage(perPageInput, 100);
160
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
161
+ const { field, direction } = this.parseOrderBy(orderBy);
241
162
  try {
242
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
243
- const currentOffset = page * perPage;
244
163
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
245
164
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
246
165
  const countRequest = this.pool.request();
@@ -252,16 +171,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
252
171
  threads: [],
253
172
  total: 0,
254
173
  page,
255
- perPage,
174
+ perPage: perPageForResponse,
256
175
  hasMore: false
257
176
  };
258
177
  }
259
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
260
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
178
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
179
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
180
+ const limitValue = perPageInput === false ? total : perPage;
181
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
261
182
  const dataRequest = this.pool.request();
262
183
  dataRequest.input("resourceId", resourceId);
263
- dataRequest.input("perPage", perPage);
264
- dataRequest.input("offset", currentOffset);
184
+ dataRequest.input("offset", offset);
185
+ if (limitValue > 2147483647) {
186
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
187
+ } else {
188
+ dataRequest.input("perPage", limitValue);
189
+ }
265
190
  const rowsResult = await dataRequest.query(dataQuery);
266
191
  const rows = rowsResult.recordset || [];
267
192
  const threads = rows.map((thread) => ({
@@ -274,13 +199,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
274
199
  threads,
275
200
  total,
276
201
  page,
277
- perPage,
278
- hasMore: currentOffset + threads.length < total
202
+ perPage: perPageForResponse,
203
+ hasMore: perPageInput === false ? false : offset + perPage < total
279
204
  };
280
205
  } catch (error$1) {
281
206
  const mastraError = new error.MastraError(
282
207
  {
283
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
208
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
284
209
  domain: error.ErrorDomain.STORAGE,
285
210
  category: error.ErrorCategory.THIRD_PARTY,
286
211
  details: {
@@ -292,7 +217,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
292
217
  );
293
218
  this.logger?.error?.(mastraError.toString());
294
219
  this.logger?.trackException?.(mastraError);
295
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
220
+ return {
221
+ threads: [],
222
+ total: 0,
223
+ page,
224
+ perPage: perPageForResponse,
225
+ hasMore: false
226
+ };
296
227
  }
297
228
  }
298
229
  async saveThread({ thread }) {
@@ -314,7 +245,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
314
245
  req.input("id", thread.id);
315
246
  req.input("resourceId", thread.resourceId);
316
247
  req.input("title", thread.title);
317
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
248
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
249
+ if (metadata === null) {
250
+ req.input("metadata", sql2__default.default.NVarChar, null);
251
+ } else {
252
+ req.input("metadata", metadata);
253
+ }
318
254
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
319
255
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
320
256
  await req.query(mergeSql);
@@ -333,30 +269,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
333
269
  );
334
270
  }
335
271
  }
336
- /**
337
- * @deprecated use getThreadsByResourceIdPaginated instead
338
- */
339
- async getThreadsByResourceId(args) {
340
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
341
- try {
342
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
343
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
344
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
345
- const request = this.pool.request();
346
- request.input("resourceId", resourceId);
347
- const resultSet = await request.query(dataQuery);
348
- const rows = resultSet.recordset || [];
349
- return rows.map((thread) => ({
350
- ...thread,
351
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
352
- createdAt: thread.createdAt,
353
- updatedAt: thread.updatedAt
354
- }));
355
- } catch (error) {
356
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
357
- return [];
358
- }
359
- }
360
272
  /**
361
273
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
362
274
  */
@@ -384,7 +296,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
384
296
  };
385
297
  try {
386
298
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
387
- const sql7 = `UPDATE ${table}
299
+ const sql5 = `UPDATE ${table}
388
300
  SET title = @title,
389
301
  metadata = @metadata,
390
302
  [updatedAt] = @updatedAt
@@ -395,7 +307,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
395
307
  req.input("title", title);
396
308
  req.input("metadata", JSON.stringify(mergedMetadata));
397
309
  req.input("updatedAt", /* @__PURE__ */ new Date());
398
- const result = await req.query(sql7);
310
+ const result = await req.query(sql5);
399
311
  let thread = result.recordset && result.recordset[0];
400
312
  if (thread && "seq_id" in thread) {
401
313
  const { seq_id, ...rest } = thread;
@@ -465,11 +377,9 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
465
377
  }
466
378
  async _getIncludedMessages({
467
379
  threadId,
468
- selectBy,
469
- orderByStatement
380
+ include
470
381
  }) {
471
382
  if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
472
- const include = selectBy?.include;
473
383
  if (!include) return null;
474
384
  const unionQueries = [];
475
385
  const paramValues = [];
@@ -494,7 +404,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
494
404
  m.[resourceId],
495
405
  m.seq_id
496
406
  FROM (
497
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
407
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
498
408
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
499
409
  WHERE [thread_id] = ${pThreadId}
500
410
  ) AS m
@@ -502,15 +412,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
502
412
  OR EXISTS (
503
413
  SELECT 1
504
414
  FROM (
505
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
415
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
506
416
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
507
417
  WHERE [thread_id] = ${pThreadId}
508
418
  ) AS target
509
419
  WHERE target.id = ${pId}
510
420
  AND (
511
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
421
+ -- Get previous messages (messages that come BEFORE the target)
422
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
512
423
  OR
513
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
424
+ -- Get next messages (messages that come AFTER the target)
425
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
514
426
  )
515
427
  )
516
428
  `
@@ -539,34 +451,16 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
539
451
  });
540
452
  return dedupedRows;
541
453
  }
542
- async getMessages(args) {
543
- const { threadId, resourceId, format, selectBy } = args;
454
+ async listMessagesById({ messageIds }) {
455
+ if (messageIds.length === 0) return { messages: [] };
544
456
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
545
457
  const orderByStatement = `ORDER BY [seq_id] DESC`;
546
- const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
547
458
  try {
548
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
549
459
  let rows = [];
550
- const include = selectBy?.include || [];
551
- if (include?.length) {
552
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
553
- if (includeMessages) {
554
- rows.push(...includeMessages);
555
- }
556
- }
557
- const excludeIds = rows.map((m) => m.id).filter(Boolean);
558
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [thread_id] = @threadId`;
460
+ let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
559
461
  const request = this.pool.request();
560
- request.input("threadId", threadId);
561
- if (excludeIds.length > 0) {
562
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
563
- query += ` AND id NOT IN (${excludeParams.join(", ")})`;
564
- excludeIds.forEach((id, idx) => {
565
- request.input(`id${idx}`, id);
566
- });
567
- }
568
- query += ` ${orderByStatement} OFFSET 0 ROWS FETCH NEXT @limit ROWS ONLY`;
569
- request.input("limit", limit);
462
+ messageIds.forEach((id, i) => request.input(`id${i}`, id));
463
+ query += ` ${orderByStatement}`;
570
464
  const result = await request.query(query);
571
465
  const remainingRows = result.recordset || [];
572
466
  rows.push(...remainingRows);
@@ -574,153 +468,150 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
574
468
  const timeDiff = a.seq_id - b.seq_id;
575
469
  return timeDiff;
576
470
  });
577
- rows = rows.map(({ seq_id, ...rest }) => rest);
578
- return this._parseAndFormatMessages(rows, format);
471
+ const messagesWithParsedContent = rows.map((row) => {
472
+ if (typeof row.content === "string") {
473
+ try {
474
+ return { ...row, content: JSON.parse(row.content) };
475
+ } catch {
476
+ return row;
477
+ }
478
+ }
479
+ return row;
480
+ });
481
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
482
+ const list = new agent.MessageList().add(cleanMessages, "memory");
483
+ return { messages: list.get.all.db() };
579
484
  } catch (error$1) {
580
485
  const mastraError = new error.MastraError(
581
486
  {
582
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_FAILED",
487
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
583
488
  domain: error.ErrorDomain.STORAGE,
584
489
  category: error.ErrorCategory.THIRD_PARTY,
585
490
  details: {
586
- threadId,
587
- resourceId: resourceId ?? ""
491
+ messageIds: JSON.stringify(messageIds)
588
492
  }
589
493
  },
590
494
  error$1
591
495
  );
592
496
  this.logger?.error?.(mastraError.toString());
593
- this.logger?.trackException(mastraError);
594
- return [];
497
+ this.logger?.trackException?.(mastraError);
498
+ return { messages: [] };
595
499
  }
596
500
  }
597
- async getMessagesById({
598
- messageIds,
599
- format
600
- }) {
601
- if (messageIds.length === 0) return [];
602
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
603
- const orderByStatement = `ORDER BY [seq_id] DESC`;
604
- try {
605
- let rows = [];
606
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
607
- const request = this.pool.request();
608
- messageIds.forEach((id, i) => request.input(`id${i}`, id));
609
- query += ` ${orderByStatement}`;
610
- const result = await request.query(query);
611
- const remainingRows = result.recordset || [];
612
- rows.push(...remainingRows);
613
- rows.sort((a, b) => {
614
- const timeDiff = a.seq_id - b.seq_id;
615
- return timeDiff;
616
- });
617
- rows = rows.map(({ seq_id, ...rest }) => rest);
618
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
619
- return this._parseAndFormatMessages(rows, `v2`);
620
- } catch (error$1) {
621
- const mastraError = new error.MastraError(
501
+ async listMessages(args) {
502
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
503
+ if (!threadId.trim()) {
504
+ throw new error.MastraError(
622
505
  {
623
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
506
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
624
507
  domain: error.ErrorDomain.STORAGE,
625
508
  category: error.ErrorCategory.THIRD_PARTY,
626
- details: {
627
- messageIds: JSON.stringify(messageIds)
628
- }
509
+ details: { threadId }
629
510
  },
630
- error$1
511
+ new Error("threadId must be a non-empty string")
631
512
  );
632
- this.logger?.error?.(mastraError.toString());
633
- this.logger?.trackException(mastraError);
634
- return [];
635
513
  }
636
- }
637
- async getMessagesPaginated(args) {
638
- const { threadId, resourceId, format, selectBy } = args;
639
- const { page = 0, perPage: perPageInput, dateRange } = selectBy?.pagination || {};
514
+ const perPage = storage.normalizePerPage(perPageInput, 40);
515
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
640
516
  try {
641
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
642
- const fromDate = dateRange?.start;
643
- const toDate = dateRange?.end;
517
+ const { field, direction } = this.parseOrderBy(orderBy, "ASC");
518
+ const orderByStatement = `ORDER BY [${field}] ${direction}`;
644
519
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
645
- const orderByStatement = `ORDER BY [seq_id] DESC`;
646
- let messages = [];
647
- if (selectBy?.include?.length) {
648
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
649
- if (includeMessages) messages.push(...includeMessages);
650
- }
651
- const perPage = perPageInput !== void 0 ? perPageInput : storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
652
- const currentOffset = page * perPage;
520
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
653
521
  const conditions = ["[thread_id] = @threadId"];
654
522
  const request = this.pool.request();
655
523
  request.input("threadId", threadId);
656
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
524
+ if (resourceId) {
525
+ conditions.push("[resourceId] = @resourceId");
526
+ request.input("resourceId", resourceId);
527
+ }
528
+ if (filter?.dateRange?.start) {
657
529
  conditions.push("[createdAt] >= @fromDate");
658
- request.input("fromDate", fromDate.toISOString());
530
+ request.input("fromDate", filter.dateRange.start);
659
531
  }
660
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
532
+ if (filter?.dateRange?.end) {
661
533
  conditions.push("[createdAt] <= @toDate");
662
- request.input("toDate", toDate.toISOString());
534
+ request.input("toDate", filter.dateRange.end);
663
535
  }
664
536
  const whereClause = `WHERE ${conditions.join(" AND ")}`;
665
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
537
+ const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
666
538
  const countResult = await request.query(countQuery);
667
539
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
668
- if (total === 0 && messages.length > 0) {
669
- const parsedIncluded = this._parseAndFormatMessages(messages, format);
540
+ const limitValue = perPageInput === false ? total : perPage;
541
+ const dataQuery = `${selectStatement} FROM ${tableName} ${whereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
542
+ request.input("offset", offset);
543
+ if (limitValue > 2147483647) {
544
+ request.input("limit", sql2__default.default.BigInt, limitValue);
545
+ } else {
546
+ request.input("limit", limitValue);
547
+ }
548
+ const rowsResult = await request.query(dataQuery);
549
+ const rows = rowsResult.recordset || [];
550
+ const messages = [...rows];
551
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
670
552
  return {
671
- messages: parsedIncluded,
672
- total: parsedIncluded.length,
553
+ messages: [],
554
+ total: 0,
673
555
  page,
674
- perPage,
556
+ perPage: perPageForResponse,
675
557
  hasMore: false
676
558
  };
677
559
  }
678
- const excludeIds = messages.map((m) => m.id);
679
- if (excludeIds.length > 0) {
680
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
681
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
682
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
560
+ const messageIds = new Set(messages.map((m) => m.id));
561
+ if (include && include.length > 0) {
562
+ const includeMessages = await this._getIncludedMessages({ threadId, include });
563
+ if (includeMessages) {
564
+ for (const includeMsg of includeMessages) {
565
+ if (!messageIds.has(includeMsg.id)) {
566
+ messages.push(includeMsg);
567
+ messageIds.add(includeMsg.id);
568
+ }
569
+ }
570
+ }
683
571
  }
684
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
685
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
686
- request.input("offset", currentOffset);
687
- request.input("limit", perPage);
688
- const rowsResult = await request.query(dataQuery);
689
- const rows = rowsResult.recordset || [];
690
- rows.sort((a, b) => a.seq_id - b.seq_id);
691
- messages.push(...rows);
692
- const parsed = this._parseAndFormatMessages(messages, format);
572
+ const parsed = this._parseAndFormatMessages(messages, "v2");
573
+ let finalMessages = parsed;
574
+ finalMessages = finalMessages.sort((a, b) => {
575
+ const aValue = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
576
+ const bValue = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
577
+ return direction === "ASC" ? aValue - bValue : bValue - aValue;
578
+ });
579
+ const returnedThreadMessageIds = new Set(finalMessages.filter((m) => m.threadId === threadId).map((m) => m.id));
580
+ const allThreadMessagesReturned = returnedThreadMessageIds.size >= total;
581
+ const hasMore = perPageInput !== false && !allThreadMessagesReturned && offset + perPage < total;
693
582
  return {
694
- messages: parsed,
695
- total: total + excludeIds.length,
583
+ messages: finalMessages,
584
+ total,
696
585
  page,
697
- perPage,
698
- hasMore: currentOffset + rows.length < total
586
+ perPage: perPageForResponse,
587
+ hasMore
699
588
  };
700
589
  } catch (error$1) {
701
590
  const mastraError = new error.MastraError(
702
591
  {
703
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
592
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
704
593
  domain: error.ErrorDomain.STORAGE,
705
594
  category: error.ErrorCategory.THIRD_PARTY,
706
595
  details: {
707
596
  threadId,
708
- resourceId: resourceId ?? "",
709
- page
597
+ resourceId: resourceId ?? ""
710
598
  }
711
599
  },
712
600
  error$1
713
601
  );
714
602
  this.logger?.error?.(mastraError.toString());
715
- this.logger?.trackException(mastraError);
716
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
603
+ this.logger?.trackException?.(mastraError);
604
+ return {
605
+ messages: [],
606
+ total: 0,
607
+ page,
608
+ perPage: perPageForResponse,
609
+ hasMore: false
610
+ };
717
611
  }
718
612
  }
719
- async saveMessages({
720
- messages,
721
- format
722
- }) {
723
- if (messages.length === 0) return messages;
613
+ async saveMessages({ messages }) {
614
+ if (messages.length === 0) return { messages: [] };
724
615
  const threadId = messages[0]?.threadId;
725
616
  if (!threadId) {
726
617
  throw new error.MastraError({
@@ -802,8 +693,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
802
693
  return message;
803
694
  });
804
695
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
805
- if (format === "v2") return list.get.all.v2();
806
- return list.get.all.v1();
696
+ return { messages: list.get.all.db() };
807
697
  } catch (error$1) {
808
698
  throw new error.MastraError(
809
699
  {
@@ -979,8 +869,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
979
869
  return null;
980
870
  }
981
871
  return {
982
- ...result,
983
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
872
+ id: result.id,
873
+ createdAt: result.createdAt,
874
+ updatedAt: result.updatedAt,
875
+ workingMemory: result.workingMemory,
984
876
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
985
877
  };
986
878
  } catch (error$1) {
@@ -994,7 +886,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
994
886
  error$1
995
887
  );
996
888
  this.logger?.error?.(mastraError.toString());
997
- this.logger?.trackException(mastraError);
889
+ this.logger?.trackException?.(mastraError);
998
890
  throw mastraError;
999
891
  }
1000
892
  }
@@ -1003,7 +895,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1003
895
  tableName: storage.TABLE_RESOURCES,
1004
896
  record: {
1005
897
  ...resource,
1006
- metadata: JSON.stringify(resource.metadata)
898
+ metadata: resource.metadata
1007
899
  }
1008
900
  });
1009
901
  return resource;
@@ -1061,111 +953,436 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1061
953
  error$1
1062
954
  );
1063
955
  this.logger?.error?.(mastraError.toString());
1064
- this.logger?.trackException(mastraError);
956
+ this.logger?.trackException?.(mastraError);
1065
957
  throw mastraError;
1066
958
  }
1067
959
  }
1068
960
  };
1069
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
961
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1070
962
  pool;
1071
- schemaName;
1072
- setupSchemaPromise = null;
1073
- schemaSetupComplete = void 0;
1074
- getSqlType(type, isPrimaryKey = false) {
1075
- switch (type) {
1076
- case "text":
1077
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1078
- case "timestamp":
1079
- return "DATETIME2(7)";
1080
- case "uuid":
1081
- return "UNIQUEIDENTIFIER";
1082
- case "jsonb":
1083
- return "NVARCHAR(MAX)";
1084
- case "integer":
1085
- return "INT";
1086
- case "bigint":
1087
- return "BIGINT";
1088
- case "float":
1089
- return "FLOAT";
1090
- default:
1091
- throw new error.MastraError({
1092
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1093
- domain: error.ErrorDomain.STORAGE,
1094
- category: error.ErrorCategory.THIRD_PARTY
1095
- });
1096
- }
1097
- }
1098
- constructor({ pool, schemaName }) {
963
+ operations;
964
+ schema;
965
+ constructor({
966
+ pool,
967
+ operations,
968
+ schema
969
+ }) {
1099
970
  super();
1100
971
  this.pool = pool;
1101
- this.schemaName = schemaName;
1102
- }
1103
- async hasColumn(table, column) {
1104
- const schema = this.schemaName || "dbo";
1105
- const request = this.pool.request();
1106
- request.input("schema", schema);
1107
- request.input("table", table);
1108
- request.input("column", column);
1109
- request.input("columnLower", column.toLowerCase());
1110
- const result = await request.query(
1111
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1112
- );
1113
- return result.recordset.length > 0;
972
+ this.operations = operations;
973
+ this.schema = schema;
1114
974
  }
1115
- async setupSchema() {
1116
- if (!this.schemaName || this.schemaSetupComplete) {
1117
- return;
1118
- }
1119
- if (!this.setupSchemaPromise) {
1120
- this.setupSchemaPromise = (async () => {
1121
- try {
1122
- const checkRequest = this.pool.request();
1123
- checkRequest.input("schemaName", this.schemaName);
1124
- const checkResult = await checkRequest.query(`
1125
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1126
- `);
1127
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1128
- if (!schemaExists) {
1129
- try {
1130
- await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1131
- this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1132
- } catch (error) {
1133
- this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1134
- throw new Error(
1135
- `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1136
- );
1137
- }
1138
- }
1139
- this.schemaSetupComplete = true;
1140
- this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1141
- } catch (error) {
1142
- this.schemaSetupComplete = void 0;
1143
- this.setupSchemaPromise = null;
1144
- throw error;
1145
- } finally {
1146
- this.setupSchemaPromise = null;
1147
- }
1148
- })();
1149
- }
1150
- await this.setupSchemaPromise;
975
+ get tracingStrategy() {
976
+ return {
977
+ preferred: "batch-with-updates",
978
+ supported: ["batch-with-updates", "insert-only"]
979
+ };
1151
980
  }
1152
- async insert({ tableName, record }) {
981
+ async createSpan(span) {
1153
982
  try {
1154
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1155
- const values = Object.values(record);
1156
- const paramNames = values.map((_, i) => `@param${i}`);
1157
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1158
- const request = this.pool.request();
1159
- values.forEach((value, i) => {
1160
- if (value instanceof Date) {
1161
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1162
- } else if (typeof value === "object" && value !== null) {
1163
- request.input(`param${i}`, JSON.stringify(value));
1164
- } else {
1165
- request.input(`param${i}`, value);
1166
- }
1167
- });
1168
- await request.query(insertSql);
983
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
984
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
985
+ const record = {
986
+ ...span,
987
+ startedAt,
988
+ endedAt
989
+ // Note: createdAt/updatedAt will be set by default values
990
+ };
991
+ return this.operations.insert({ tableName: storage.TABLE_AI_SPANS, record });
992
+ } catch (error$1) {
993
+ throw new error.MastraError(
994
+ {
995
+ id: "MSSQL_STORE_CREATE_AI_SPAN_FAILED",
996
+ domain: error.ErrorDomain.STORAGE,
997
+ category: error.ErrorCategory.USER,
998
+ details: {
999
+ spanId: span.spanId,
1000
+ traceId: span.traceId,
1001
+ spanType: span.spanType,
1002
+ spanName: span.name
1003
+ }
1004
+ },
1005
+ error$1
1006
+ );
1007
+ }
1008
+ }
1009
+ async getAITrace(traceId) {
1010
+ try {
1011
+ const tableName = getTableName({
1012
+ indexName: storage.TABLE_AI_SPANS,
1013
+ schemaName: getSchemaName(this.schema)
1014
+ });
1015
+ const request = this.pool.request();
1016
+ request.input("traceId", traceId);
1017
+ const result = await request.query(
1018
+ `SELECT
1019
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1020
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1021
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1022
+ FROM ${tableName}
1023
+ WHERE [traceId] = @traceId
1024
+ ORDER BY [startedAt] DESC`
1025
+ );
1026
+ if (!result.recordset || result.recordset.length === 0) {
1027
+ return null;
1028
+ }
1029
+ return {
1030
+ traceId,
1031
+ spans: result.recordset.map(
1032
+ (span) => transformFromSqlRow({
1033
+ tableName: storage.TABLE_AI_SPANS,
1034
+ sqlRow: span
1035
+ })
1036
+ )
1037
+ };
1038
+ } catch (error$1) {
1039
+ throw new error.MastraError(
1040
+ {
1041
+ id: "MSSQL_STORE_GET_AI_TRACE_FAILED",
1042
+ domain: error.ErrorDomain.STORAGE,
1043
+ category: error.ErrorCategory.USER,
1044
+ details: {
1045
+ traceId
1046
+ }
1047
+ },
1048
+ error$1
1049
+ );
1050
+ }
1051
+ }
1052
+ async updateSpan({
1053
+ spanId,
1054
+ traceId,
1055
+ updates
1056
+ }) {
1057
+ try {
1058
+ const data = { ...updates };
1059
+ if (data.endedAt instanceof Date) {
1060
+ data.endedAt = data.endedAt.toISOString();
1061
+ }
1062
+ if (data.startedAt instanceof Date) {
1063
+ data.startedAt = data.startedAt.toISOString();
1064
+ }
1065
+ await this.operations.update({
1066
+ tableName: storage.TABLE_AI_SPANS,
1067
+ keys: { spanId, traceId },
1068
+ data
1069
+ });
1070
+ } catch (error$1) {
1071
+ throw new error.MastraError(
1072
+ {
1073
+ id: "MSSQL_STORE_UPDATE_AI_SPAN_FAILED",
1074
+ domain: error.ErrorDomain.STORAGE,
1075
+ category: error.ErrorCategory.USER,
1076
+ details: {
1077
+ spanId,
1078
+ traceId
1079
+ }
1080
+ },
1081
+ error$1
1082
+ );
1083
+ }
1084
+ }
1085
+ async getAITracesPaginated({
1086
+ filters,
1087
+ pagination
1088
+ }) {
1089
+ const page = pagination?.page ?? 0;
1090
+ const perPage = pagination?.perPage ?? 10;
1091
+ const { entityId, entityType, ...actualFilters } = filters || {};
1092
+ const filtersWithDateRange = {
1093
+ ...actualFilters,
1094
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1095
+ parentSpanId: null
1096
+ // Only get root spans for traces
1097
+ };
1098
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1099
+ let actualWhereClause = whereClause.sql;
1100
+ const params = { ...whereClause.params };
1101
+ let currentParamIndex = Object.keys(params).length + 1;
1102
+ if (entityId && entityType) {
1103
+ let name = "";
1104
+ if (entityType === "workflow") {
1105
+ name = `workflow run: '${entityId}'`;
1106
+ } else if (entityType === "agent") {
1107
+ name = `agent run: '${entityId}'`;
1108
+ } else {
1109
+ const error$1 = new error.MastraError({
1110
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1111
+ domain: error.ErrorDomain.STORAGE,
1112
+ category: error.ErrorCategory.USER,
1113
+ details: {
1114
+ entityType
1115
+ },
1116
+ text: `Cannot filter by entity type: ${entityType}`
1117
+ });
1118
+ throw error$1;
1119
+ }
1120
+ const entityParam = `p${currentParamIndex++}`;
1121
+ if (actualWhereClause) {
1122
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1123
+ } else {
1124
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1125
+ }
1126
+ params[entityParam] = name;
1127
+ }
1128
+ const tableName = getTableName({
1129
+ indexName: storage.TABLE_AI_SPANS,
1130
+ schemaName: getSchemaName(this.schema)
1131
+ });
1132
+ try {
1133
+ const countRequest = this.pool.request();
1134
+ Object.entries(params).forEach(([key, value]) => {
1135
+ countRequest.input(key, value);
1136
+ });
1137
+ const countResult = await countRequest.query(
1138
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1139
+ );
1140
+ const total = countResult.recordset[0]?.count ?? 0;
1141
+ if (total === 0) {
1142
+ return {
1143
+ pagination: {
1144
+ total: 0,
1145
+ page,
1146
+ perPage,
1147
+ hasMore: false
1148
+ },
1149
+ spans: []
1150
+ };
1151
+ }
1152
+ const dataRequest = this.pool.request();
1153
+ Object.entries(params).forEach(([key, value]) => {
1154
+ dataRequest.input(key, value);
1155
+ });
1156
+ dataRequest.input("offset", page * perPage);
1157
+ dataRequest.input("limit", perPage);
1158
+ const dataResult = await dataRequest.query(
1159
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1160
+ );
1161
+ const spans = dataResult.recordset.map(
1162
+ (row) => transformFromSqlRow({
1163
+ tableName: storage.TABLE_AI_SPANS,
1164
+ sqlRow: row
1165
+ })
1166
+ );
1167
+ return {
1168
+ pagination: {
1169
+ total,
1170
+ page,
1171
+ perPage,
1172
+ hasMore: (page + 1) * perPage < total
1173
+ },
1174
+ spans
1175
+ };
1176
+ } catch (error$1) {
1177
+ throw new error.MastraError(
1178
+ {
1179
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1180
+ domain: error.ErrorDomain.STORAGE,
1181
+ category: error.ErrorCategory.USER
1182
+ },
1183
+ error$1
1184
+ );
1185
+ }
1186
+ }
1187
+ async batchCreateSpans(args) {
1188
+ if (!args.records || args.records.length === 0) {
1189
+ return;
1190
+ }
1191
+ try {
1192
+ await this.operations.batchInsert({
1193
+ tableName: storage.TABLE_AI_SPANS,
1194
+ records: args.records.map((span) => ({
1195
+ ...span,
1196
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1197
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1198
+ }))
1199
+ });
1200
+ } catch (error$1) {
1201
+ throw new error.MastraError(
1202
+ {
1203
+ id: "MSSQL_STORE_BATCH_CREATE_AI_SPANS_FAILED",
1204
+ domain: error.ErrorDomain.STORAGE,
1205
+ category: error.ErrorCategory.USER,
1206
+ details: {
1207
+ count: args.records.length
1208
+ }
1209
+ },
1210
+ error$1
1211
+ );
1212
+ }
1213
+ }
1214
+ async batchUpdateSpans(args) {
1215
+ if (!args.records || args.records.length === 0) {
1216
+ return;
1217
+ }
1218
+ try {
1219
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1220
+ const processedData = { ...data };
1221
+ if (processedData.endedAt instanceof Date) {
1222
+ processedData.endedAt = processedData.endedAt.toISOString();
1223
+ }
1224
+ if (processedData.startedAt instanceof Date) {
1225
+ processedData.startedAt = processedData.startedAt.toISOString();
1226
+ }
1227
+ return {
1228
+ keys: { spanId, traceId },
1229
+ data: processedData
1230
+ };
1231
+ });
1232
+ await this.operations.batchUpdate({
1233
+ tableName: storage.TABLE_AI_SPANS,
1234
+ updates
1235
+ });
1236
+ } catch (error$1) {
1237
+ throw new error.MastraError(
1238
+ {
1239
+ id: "MSSQL_STORE_BATCH_UPDATE_AI_SPANS_FAILED",
1240
+ domain: error.ErrorDomain.STORAGE,
1241
+ category: error.ErrorCategory.USER,
1242
+ details: {
1243
+ count: args.records.length
1244
+ }
1245
+ },
1246
+ error$1
1247
+ );
1248
+ }
1249
+ }
1250
+ async batchDeleteAITraces(args) {
1251
+ if (!args.traceIds || args.traceIds.length === 0) {
1252
+ return;
1253
+ }
1254
+ try {
1255
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1256
+ await this.operations.batchDelete({
1257
+ tableName: storage.TABLE_AI_SPANS,
1258
+ keys
1259
+ });
1260
+ } catch (error$1) {
1261
+ throw new error.MastraError(
1262
+ {
1263
+ id: "MSSQL_STORE_BATCH_DELETE_AI_TRACES_FAILED",
1264
+ domain: error.ErrorDomain.STORAGE,
1265
+ category: error.ErrorCategory.USER,
1266
+ details: {
1267
+ count: args.traceIds.length
1268
+ }
1269
+ },
1270
+ error$1
1271
+ );
1272
+ }
1273
+ }
1274
+ };
1275
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1276
+ pool;
1277
+ schemaName;
1278
+ setupSchemaPromise = null;
1279
+ schemaSetupComplete = void 0;
1280
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1281
+ switch (type) {
1282
+ case "text":
1283
+ if (useLargeStorage) {
1284
+ return "NVARCHAR(MAX)";
1285
+ }
1286
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1287
+ case "timestamp":
1288
+ return "DATETIME2(7)";
1289
+ case "uuid":
1290
+ return "UNIQUEIDENTIFIER";
1291
+ case "jsonb":
1292
+ return "NVARCHAR(MAX)";
1293
+ case "integer":
1294
+ return "INT";
1295
+ case "bigint":
1296
+ return "BIGINT";
1297
+ case "float":
1298
+ return "FLOAT";
1299
+ case "boolean":
1300
+ return "BIT";
1301
+ default:
1302
+ throw new error.MastraError({
1303
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1304
+ domain: error.ErrorDomain.STORAGE,
1305
+ category: error.ErrorCategory.THIRD_PARTY
1306
+ });
1307
+ }
1308
+ }
1309
+ constructor({ pool, schemaName }) {
1310
+ super();
1311
+ this.pool = pool;
1312
+ this.schemaName = schemaName;
1313
+ }
1314
+ async hasColumn(table, column) {
1315
+ const schema = this.schemaName || "dbo";
1316
+ const request = this.pool.request();
1317
+ request.input("schema", schema);
1318
+ request.input("table", table);
1319
+ request.input("column", column);
1320
+ request.input("columnLower", column.toLowerCase());
1321
+ const result = await request.query(
1322
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1323
+ );
1324
+ return result.recordset.length > 0;
1325
+ }
1326
+ async setupSchema() {
1327
+ if (!this.schemaName || this.schemaSetupComplete) {
1328
+ return;
1329
+ }
1330
+ if (!this.setupSchemaPromise) {
1331
+ this.setupSchemaPromise = (async () => {
1332
+ try {
1333
+ const checkRequest = this.pool.request();
1334
+ checkRequest.input("schemaName", this.schemaName);
1335
+ const checkResult = await checkRequest.query(`
1336
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1337
+ `);
1338
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1339
+ if (!schemaExists) {
1340
+ try {
1341
+ await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1342
+ this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1343
+ } catch (error) {
1344
+ this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1345
+ throw new Error(
1346
+ `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1347
+ );
1348
+ }
1349
+ }
1350
+ this.schemaSetupComplete = true;
1351
+ this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1352
+ } catch (error) {
1353
+ this.schemaSetupComplete = void 0;
1354
+ this.setupSchemaPromise = null;
1355
+ throw error;
1356
+ } finally {
1357
+ this.setupSchemaPromise = null;
1358
+ }
1359
+ })();
1360
+ }
1361
+ await this.setupSchemaPromise;
1362
+ }
1363
+ async insert({
1364
+ tableName,
1365
+ record,
1366
+ transaction
1367
+ }) {
1368
+ try {
1369
+ const columns = Object.keys(record);
1370
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1371
+ const paramNames = columns.map((_, i) => `@param${i}`);
1372
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1373
+ const request = transaction ? transaction.request() : this.pool.request();
1374
+ columns.forEach((col, i) => {
1375
+ const value = record[col];
1376
+ const preparedValue = this.prepareValue(value, col, tableName);
1377
+ if (preparedValue instanceof Date) {
1378
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1379
+ } else if (preparedValue === null || preparedValue === void 0) {
1380
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1381
+ } else {
1382
+ request.input(`param${i}`, preparedValue);
1383
+ }
1384
+ });
1385
+ await request.query(insertSql);
1169
1386
  } catch (error$1) {
1170
1387
  throw new error.MastraError(
1171
1388
  {
@@ -1186,7 +1403,7 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1186
1403
  try {
1187
1404
  await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1188
1405
  } catch (truncateError) {
1189
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1406
+ if (truncateError?.number === 4712) {
1190
1407
  await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1191
1408
  } else {
1192
1409
  throw truncateError;
@@ -1209,9 +1426,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1209
1426
  getDefaultValue(type) {
1210
1427
  switch (type) {
1211
1428
  case "timestamp":
1212
- return "DEFAULT SYSDATETIMEOFFSET()";
1429
+ return "DEFAULT SYSUTCDATETIME()";
1213
1430
  case "jsonb":
1214
1431
  return "DEFAULT N'{}'";
1432
+ case "boolean":
1433
+ return "DEFAULT 0";
1215
1434
  default:
1216
1435
  return super.getDefaultValue(type);
1217
1436
  }
@@ -1222,13 +1441,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1222
1441
  }) {
1223
1442
  try {
1224
1443
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1444
+ const largeDataColumns = [
1445
+ "workingMemory",
1446
+ "snapshot",
1447
+ "metadata",
1448
+ "content",
1449
+ // messages.content - can be very long conversation content
1450
+ "input",
1451
+ // evals.input - test input data
1452
+ "output",
1453
+ // evals.output - test output data
1454
+ "instructions",
1455
+ // evals.instructions - evaluation instructions
1456
+ "other"
1457
+ // traces.other - additional trace data
1458
+ ];
1225
1459
  const columns = Object.entries(schema).map(([name, def]) => {
1226
1460
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1227
1461
  const constraints = [];
1228
1462
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1229
1463
  if (!def.nullable) constraints.push("NOT NULL");
1230
1464
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1231
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1465
+ const useLargeStorage = largeDataColumns.includes(name);
1466
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1232
1467
  }).join(",\n");
1233
1468
  if (this.schemaName) {
1234
1469
  await this.setupSchema();
@@ -1315,7 +1550,19 @@ ${columns}
1315
1550
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1316
1551
  if (!columnExists) {
1317
1552
  const columnDef = schema[columnName];
1318
- const sqlType = this.getSqlType(columnDef.type);
1553
+ const largeDataColumns = [
1554
+ "workingMemory",
1555
+ "snapshot",
1556
+ "metadata",
1557
+ "content",
1558
+ "input",
1559
+ "output",
1560
+ "instructions",
1561
+ "other"
1562
+ ];
1563
+ const useLargeStorage = largeDataColumns.includes(columnName);
1564
+ const isIndexed = !!columnDef.primaryKey;
1565
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1319
1566
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1320
1567
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1321
1568
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1328,116 +1575,656 @@ ${columns}
1328
1575
  } catch (error$1) {
1329
1576
  throw new error.MastraError(
1330
1577
  {
1331
- id: "MASTRA_STORAGE_MSSQL_STORE_ALTER_TABLE_FAILED",
1578
+ id: "MASTRA_STORAGE_MSSQL_STORE_ALTER_TABLE_FAILED",
1579
+ domain: error.ErrorDomain.STORAGE,
1580
+ category: error.ErrorCategory.THIRD_PARTY,
1581
+ details: {
1582
+ tableName
1583
+ }
1584
+ },
1585
+ error$1
1586
+ );
1587
+ }
1588
+ }
1589
+ async load({ tableName, keys }) {
1590
+ try {
1591
+ const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1592
+ const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1593
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1594
+ const request = this.pool.request();
1595
+ keyEntries.forEach(([key, value], i) => {
1596
+ const preparedValue = this.prepareValue(value, key, tableName);
1597
+ if (preparedValue === null || preparedValue === void 0) {
1598
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1599
+ } else {
1600
+ request.input(`param${i}`, preparedValue);
1601
+ }
1602
+ });
1603
+ const resultSet = await request.query(sql5);
1604
+ const result = resultSet.recordset[0] || null;
1605
+ if (!result) {
1606
+ return null;
1607
+ }
1608
+ if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1609
+ const snapshot = result;
1610
+ if (typeof snapshot.snapshot === "string") {
1611
+ snapshot.snapshot = JSON.parse(snapshot.snapshot);
1612
+ }
1613
+ return snapshot;
1614
+ }
1615
+ return result;
1616
+ } catch (error$1) {
1617
+ throw new error.MastraError(
1618
+ {
1619
+ id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_FAILED",
1620
+ domain: error.ErrorDomain.STORAGE,
1621
+ category: error.ErrorCategory.THIRD_PARTY,
1622
+ details: {
1623
+ tableName
1624
+ }
1625
+ },
1626
+ error$1
1627
+ );
1628
+ }
1629
+ }
1630
+ async batchInsert({ tableName, records }) {
1631
+ const transaction = this.pool.transaction();
1632
+ try {
1633
+ await transaction.begin();
1634
+ for (const record of records) {
1635
+ await this.insert({ tableName, record, transaction });
1636
+ }
1637
+ await transaction.commit();
1638
+ } catch (error$1) {
1639
+ await transaction.rollback();
1640
+ throw new error.MastraError(
1641
+ {
1642
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
1643
+ domain: error.ErrorDomain.STORAGE,
1644
+ category: error.ErrorCategory.THIRD_PARTY,
1645
+ details: {
1646
+ tableName,
1647
+ numberOfRecords: records.length
1648
+ }
1649
+ },
1650
+ error$1
1651
+ );
1652
+ }
1653
+ }
1654
+ async dropTable({ tableName }) {
1655
+ try {
1656
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1657
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1658
+ } catch (error$1) {
1659
+ throw new error.MastraError(
1660
+ {
1661
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1662
+ domain: error.ErrorDomain.STORAGE,
1663
+ category: error.ErrorCategory.THIRD_PARTY,
1664
+ details: {
1665
+ tableName
1666
+ }
1667
+ },
1668
+ error$1
1669
+ );
1670
+ }
1671
+ }
1672
+ /**
1673
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1674
+ */
1675
+ prepareValue(value, columnName, tableName) {
1676
+ if (value === null || value === void 0) {
1677
+ return value;
1678
+ }
1679
+ if (value instanceof Date) {
1680
+ return value;
1681
+ }
1682
+ const schema = storage.TABLE_SCHEMAS[tableName];
1683
+ const columnSchema = schema?.[columnName];
1684
+ if (columnSchema?.type === "boolean") {
1685
+ return value ? 1 : 0;
1686
+ }
1687
+ if (columnSchema?.type === "jsonb") {
1688
+ return JSON.stringify(value);
1689
+ }
1690
+ if (typeof value === "object") {
1691
+ return JSON.stringify(value);
1692
+ }
1693
+ return value;
1694
+ }
1695
+ /**
1696
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1697
+ */
1698
+ getMssqlType(tableName, columnName) {
1699
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1700
+ switch (col?.type) {
1701
+ case "text":
1702
+ return sql2__default.default.NVarChar;
1703
+ case "timestamp":
1704
+ return sql2__default.default.DateTime2;
1705
+ case "uuid":
1706
+ return sql2__default.default.UniqueIdentifier;
1707
+ case "jsonb":
1708
+ return sql2__default.default.NVarChar;
1709
+ case "integer":
1710
+ return sql2__default.default.Int;
1711
+ case "bigint":
1712
+ return sql2__default.default.BigInt;
1713
+ case "float":
1714
+ return sql2__default.default.Float;
1715
+ case "boolean":
1716
+ return sql2__default.default.Bit;
1717
+ default:
1718
+ return sql2__default.default.NVarChar;
1719
+ }
1720
+ }
1721
+ /**
1722
+ * Update a single record in the database
1723
+ */
1724
+ async update({
1725
+ tableName,
1726
+ keys,
1727
+ data,
1728
+ transaction
1729
+ }) {
1730
+ try {
1731
+ if (!data || Object.keys(data).length === 0) {
1732
+ throw new error.MastraError({
1733
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1734
+ domain: error.ErrorDomain.STORAGE,
1735
+ category: error.ErrorCategory.USER,
1736
+ text: "Cannot update with empty data payload"
1737
+ });
1738
+ }
1739
+ if (!keys || Object.keys(keys).length === 0) {
1740
+ throw new error.MastraError({
1741
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1742
+ domain: error.ErrorDomain.STORAGE,
1743
+ category: error.ErrorCategory.USER,
1744
+ text: "Cannot update without keys to identify records"
1745
+ });
1746
+ }
1747
+ const setClauses = [];
1748
+ const request = transaction ? transaction.request() : this.pool.request();
1749
+ let paramIndex = 0;
1750
+ Object.entries(data).forEach(([key, value]) => {
1751
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1752
+ const paramName = `set${paramIndex++}`;
1753
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1754
+ const preparedValue = this.prepareValue(value, key, tableName);
1755
+ if (preparedValue === null || preparedValue === void 0) {
1756
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1757
+ } else {
1758
+ request.input(paramName, preparedValue);
1759
+ }
1760
+ });
1761
+ const whereConditions = [];
1762
+ Object.entries(keys).forEach(([key, value]) => {
1763
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1764
+ const paramName = `where${paramIndex++}`;
1765
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1766
+ const preparedValue = this.prepareValue(value, key, tableName);
1767
+ if (preparedValue === null || preparedValue === void 0) {
1768
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1769
+ } else {
1770
+ request.input(paramName, preparedValue);
1771
+ }
1772
+ });
1773
+ const tableName_ = getTableName({
1774
+ indexName: tableName,
1775
+ schemaName: getSchemaName(this.schemaName)
1776
+ });
1777
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1778
+ await request.query(updateSql);
1779
+ } catch (error$1) {
1780
+ throw new error.MastraError(
1781
+ {
1782
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1783
+ domain: error.ErrorDomain.STORAGE,
1784
+ category: error.ErrorCategory.THIRD_PARTY,
1785
+ details: {
1786
+ tableName
1787
+ }
1788
+ },
1789
+ error$1
1790
+ );
1791
+ }
1792
+ }
1793
+ /**
1794
+ * Update multiple records in a single batch transaction
1795
+ */
1796
+ async batchUpdate({
1797
+ tableName,
1798
+ updates
1799
+ }) {
1800
+ const transaction = this.pool.transaction();
1801
+ try {
1802
+ await transaction.begin();
1803
+ for (const { keys, data } of updates) {
1804
+ await this.update({ tableName, keys, data, transaction });
1805
+ }
1806
+ await transaction.commit();
1807
+ } catch (error$1) {
1808
+ await transaction.rollback();
1809
+ throw new error.MastraError(
1810
+ {
1811
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1812
+ domain: error.ErrorDomain.STORAGE,
1813
+ category: error.ErrorCategory.THIRD_PARTY,
1814
+ details: {
1815
+ tableName,
1816
+ numberOfRecords: updates.length
1817
+ }
1818
+ },
1819
+ error$1
1820
+ );
1821
+ }
1822
+ }
1823
+ /**
1824
+ * Delete multiple records by keys
1825
+ */
1826
+ async batchDelete({ tableName, keys }) {
1827
+ if (keys.length === 0) {
1828
+ return;
1829
+ }
1830
+ const tableName_ = getTableName({
1831
+ indexName: tableName,
1832
+ schemaName: getSchemaName(this.schemaName)
1833
+ });
1834
+ const transaction = this.pool.transaction();
1835
+ try {
1836
+ await transaction.begin();
1837
+ for (const keySet of keys) {
1838
+ const conditions = [];
1839
+ const request = transaction.request();
1840
+ let paramIndex = 0;
1841
+ Object.entries(keySet).forEach(([key, value]) => {
1842
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1843
+ const paramName = `p${paramIndex++}`;
1844
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1845
+ const preparedValue = this.prepareValue(value, key, tableName);
1846
+ if (preparedValue === null || preparedValue === void 0) {
1847
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1848
+ } else {
1849
+ request.input(paramName, preparedValue);
1850
+ }
1851
+ });
1852
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1853
+ await request.query(deleteSql);
1854
+ }
1855
+ await transaction.commit();
1856
+ } catch (error$1) {
1857
+ await transaction.rollback();
1858
+ throw new error.MastraError(
1859
+ {
1860
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1861
+ domain: error.ErrorDomain.STORAGE,
1862
+ category: error.ErrorCategory.THIRD_PARTY,
1863
+ details: {
1864
+ tableName,
1865
+ numberOfRecords: keys.length
1866
+ }
1867
+ },
1868
+ error$1
1869
+ );
1870
+ }
1871
+ }
1872
+ /**
1873
+ * Create a new index on a table
1874
+ */
1875
+ async createIndex(options) {
1876
+ try {
1877
+ const { name, table, columns, unique = false, where } = options;
1878
+ const schemaName = this.schemaName || "dbo";
1879
+ const fullTableName = getTableName({
1880
+ indexName: table,
1881
+ schemaName: getSchemaName(this.schemaName)
1882
+ });
1883
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1884
+ const checkRequest = this.pool.request();
1885
+ checkRequest.input("indexName", indexNameSafe);
1886
+ checkRequest.input("schemaName", schemaName);
1887
+ checkRequest.input("tableName", table);
1888
+ const indexExists = await checkRequest.query(`
1889
+ SELECT 1 as found
1890
+ FROM sys.indexes i
1891
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1892
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1893
+ WHERE i.name = @indexName
1894
+ AND s.name = @schemaName
1895
+ AND t.name = @tableName
1896
+ `);
1897
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1898
+ return;
1899
+ }
1900
+ const uniqueStr = unique ? "UNIQUE " : "";
1901
+ const columnsStr = columns.map((col) => {
1902
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1903
+ const [colName, ...modifiers] = col.split(" ");
1904
+ if (!colName) {
1905
+ throw new Error(`Invalid column specification: ${col}`);
1906
+ }
1907
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1908
+ }
1909
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1910
+ }).join(", ");
1911
+ const whereStr = where ? ` WHERE ${where}` : "";
1912
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1913
+ await this.pool.request().query(createIndexSql);
1914
+ } catch (error$1) {
1915
+ throw new error.MastraError(
1916
+ {
1917
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1918
+ domain: error.ErrorDomain.STORAGE,
1919
+ category: error.ErrorCategory.THIRD_PARTY,
1920
+ details: {
1921
+ indexName: options.name,
1922
+ tableName: options.table
1923
+ }
1924
+ },
1925
+ error$1
1926
+ );
1927
+ }
1928
+ }
1929
+ /**
1930
+ * Drop an existing index
1931
+ */
1932
+ async dropIndex(indexName) {
1933
+ try {
1934
+ const schemaName = this.schemaName || "dbo";
1935
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
1936
+ const checkRequest = this.pool.request();
1937
+ checkRequest.input("indexName", indexNameSafe);
1938
+ checkRequest.input("schemaName", schemaName);
1939
+ const result = await checkRequest.query(`
1940
+ SELECT t.name as table_name
1941
+ FROM sys.indexes i
1942
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1943
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1944
+ WHERE i.name = @indexName
1945
+ AND s.name = @schemaName
1946
+ `);
1947
+ if (!result.recordset || result.recordset.length === 0) {
1948
+ return;
1949
+ }
1950
+ if (result.recordset.length > 1) {
1951
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
1952
+ throw new error.MastraError({
1953
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
1954
+ domain: error.ErrorDomain.STORAGE,
1955
+ category: error.ErrorCategory.USER,
1956
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
1957
+ });
1958
+ }
1959
+ const tableName = result.recordset[0].table_name;
1960
+ const fullTableName = getTableName({
1961
+ indexName: tableName,
1962
+ schemaName: getSchemaName(this.schemaName)
1963
+ });
1964
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
1965
+ await this.pool.request().query(dropSql);
1966
+ } catch (error$1) {
1967
+ throw new error.MastraError(
1968
+ {
1969
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
1332
1970
  domain: error.ErrorDomain.STORAGE,
1333
1971
  category: error.ErrorCategory.THIRD_PARTY,
1334
1972
  details: {
1335
- tableName
1973
+ indexName
1336
1974
  }
1337
1975
  },
1338
1976
  error$1
1339
1977
  );
1340
1978
  }
1341
1979
  }
1342
- async load({ tableName, keys }) {
1980
+ /**
1981
+ * List indexes for a specific table or all tables
1982
+ */
1983
+ async listIndexes(tableName) {
1343
1984
  try {
1344
- const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1345
- const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1346
- const values = keyEntries.map(([_, value]) => value);
1347
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1985
+ const schemaName = this.schemaName || "dbo";
1986
+ let query;
1348
1987
  const request = this.pool.request();
1349
- values.forEach((value, i) => {
1350
- request.input(`param${i}`, value);
1351
- });
1352
- const resultSet = await request.query(sql7);
1353
- const result = resultSet.recordset[0] || null;
1354
- if (!result) {
1355
- return null;
1988
+ request.input("schemaName", schemaName);
1989
+ if (tableName) {
1990
+ query = `
1991
+ SELECT
1992
+ i.name as name,
1993
+ o.name as [table],
1994
+ i.is_unique as is_unique,
1995
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
1996
+ FROM sys.indexes i
1997
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
1998
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
1999
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2000
+ WHERE sch.name = @schemaName
2001
+ AND o.name = @tableName
2002
+ AND i.name IS NOT NULL
2003
+ GROUP BY i.name, o.name, i.is_unique
2004
+ `;
2005
+ request.input("tableName", tableName);
2006
+ } else {
2007
+ query = `
2008
+ SELECT
2009
+ i.name as name,
2010
+ o.name as [table],
2011
+ i.is_unique as is_unique,
2012
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2013
+ FROM sys.indexes i
2014
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2015
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2016
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2017
+ WHERE sch.name = @schemaName
2018
+ AND i.name IS NOT NULL
2019
+ GROUP BY i.name, o.name, i.is_unique
2020
+ `;
1356
2021
  }
1357
- if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1358
- const snapshot = result;
1359
- if (typeof snapshot.snapshot === "string") {
1360
- snapshot.snapshot = JSON.parse(snapshot.snapshot);
1361
- }
1362
- return snapshot;
2022
+ const result = await request.query(query);
2023
+ const indexes = [];
2024
+ for (const row of result.recordset) {
2025
+ const colRequest = this.pool.request();
2026
+ colRequest.input("indexName", row.name);
2027
+ colRequest.input("schemaName", schemaName);
2028
+ const colResult = await colRequest.query(`
2029
+ SELECT c.name as column_name
2030
+ FROM sys.indexes i
2031
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2032
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2033
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2034
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2035
+ WHERE i.name = @indexName
2036
+ AND s.name = @schemaName
2037
+ ORDER BY ic.key_ordinal
2038
+ `);
2039
+ indexes.push({
2040
+ name: row.name,
2041
+ table: row.table,
2042
+ columns: colResult.recordset.map((c) => c.column_name),
2043
+ unique: row.is_unique || false,
2044
+ size: row.size || "0 MB",
2045
+ definition: ""
2046
+ // MSSQL doesn't store definition like PG
2047
+ });
1363
2048
  }
1364
- return result;
2049
+ return indexes;
1365
2050
  } catch (error$1) {
1366
2051
  throw new error.MastraError(
1367
2052
  {
1368
- id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_FAILED",
2053
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
1369
2054
  domain: error.ErrorDomain.STORAGE,
1370
2055
  category: error.ErrorCategory.THIRD_PARTY,
1371
- details: {
2056
+ details: tableName ? {
1372
2057
  tableName
1373
- }
2058
+ } : {}
1374
2059
  },
1375
2060
  error$1
1376
2061
  );
1377
2062
  }
1378
2063
  }
1379
- async batchInsert({ tableName, records }) {
1380
- const transaction = this.pool.transaction();
2064
+ /**
2065
+ * Get detailed statistics for a specific index
2066
+ */
2067
+ async describeIndex(indexName) {
1381
2068
  try {
1382
- await transaction.begin();
1383
- for (const record of records) {
1384
- await this.insert({ tableName, record });
2069
+ const schemaName = this.schemaName || "dbo";
2070
+ const request = this.pool.request();
2071
+ request.input("indexName", indexName);
2072
+ request.input("schemaName", schemaName);
2073
+ const query = `
2074
+ SELECT
2075
+ i.name as name,
2076
+ o.name as [table],
2077
+ i.is_unique as is_unique,
2078
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2079
+ i.type_desc as method,
2080
+ ISNULL(us.user_scans, 0) as scans,
2081
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2082
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2083
+ FROM sys.indexes i
2084
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2085
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2086
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2087
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2088
+ WHERE i.name = @indexName
2089
+ AND sch.name = @schemaName
2090
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2091
+ `;
2092
+ const result = await request.query(query);
2093
+ if (!result.recordset || result.recordset.length === 0) {
2094
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
1385
2095
  }
1386
- await transaction.commit();
2096
+ const row = result.recordset[0];
2097
+ const colRequest = this.pool.request();
2098
+ colRequest.input("indexName", indexName);
2099
+ colRequest.input("schemaName", schemaName);
2100
+ const colResult = await colRequest.query(`
2101
+ SELECT c.name as column_name
2102
+ FROM sys.indexes i
2103
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2104
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2105
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2106
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2107
+ WHERE i.name = @indexName
2108
+ AND s.name = @schemaName
2109
+ ORDER BY ic.key_ordinal
2110
+ `);
2111
+ return {
2112
+ name: row.name,
2113
+ table: row.table,
2114
+ columns: colResult.recordset.map((c) => c.column_name),
2115
+ unique: row.is_unique || false,
2116
+ size: row.size || "0 MB",
2117
+ definition: "",
2118
+ method: row.method?.toLowerCase() || "nonclustered",
2119
+ scans: Number(row.scans) || 0,
2120
+ tuples_read: Number(row.tuples_read) || 0,
2121
+ tuples_fetched: Number(row.tuples_fetched) || 0
2122
+ };
1387
2123
  } catch (error$1) {
1388
- await transaction.rollback();
1389
2124
  throw new error.MastraError(
1390
2125
  {
1391
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
2126
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
1392
2127
  domain: error.ErrorDomain.STORAGE,
1393
2128
  category: error.ErrorCategory.THIRD_PARTY,
1394
2129
  details: {
1395
- tableName,
1396
- numberOfRecords: records.length
2130
+ indexName
1397
2131
  }
1398
2132
  },
1399
2133
  error$1
1400
2134
  );
1401
2135
  }
1402
2136
  }
1403
- async dropTable({ tableName }) {
2137
+ /**
2138
+ * Returns definitions for automatic performance indexes
2139
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2140
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2141
+ */
2142
+ getAutomaticIndexDefinitions() {
2143
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2144
+ return [
2145
+ // Composite indexes for optimal filtering + sorting performance
2146
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2147
+ {
2148
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2149
+ table: storage.TABLE_THREADS,
2150
+ columns: ["resourceId", "seq_id DESC"]
2151
+ },
2152
+ {
2153
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2154
+ table: storage.TABLE_MESSAGES,
2155
+ columns: ["thread_id", "seq_id DESC"]
2156
+ },
2157
+ {
2158
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2159
+ table: storage.TABLE_TRACES,
2160
+ columns: ["name", "seq_id DESC"]
2161
+ },
2162
+ {
2163
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2164
+ table: storage.TABLE_SCORERS,
2165
+ columns: ["traceId", "spanId", "seq_id DESC"]
2166
+ },
2167
+ // Spans indexes for optimal trace querying
2168
+ {
2169
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2170
+ table: storage.TABLE_AI_SPANS,
2171
+ columns: ["traceId", "startedAt DESC"]
2172
+ },
2173
+ {
2174
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2175
+ table: storage.TABLE_AI_SPANS,
2176
+ columns: ["parentSpanId", "startedAt DESC"]
2177
+ },
2178
+ {
2179
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2180
+ table: storage.TABLE_AI_SPANS,
2181
+ columns: ["name"]
2182
+ },
2183
+ {
2184
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2185
+ table: storage.TABLE_AI_SPANS,
2186
+ columns: ["spanType", "startedAt DESC"]
2187
+ }
2188
+ ];
2189
+ }
2190
+ /**
2191
+ * Creates automatic indexes for optimal query performance
2192
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2193
+ */
2194
+ async createAutomaticIndexes() {
1404
2195
  try {
1405
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1406
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2196
+ const indexes = this.getAutomaticIndexDefinitions();
2197
+ for (const indexOptions of indexes) {
2198
+ try {
2199
+ await this.createIndex(indexOptions);
2200
+ } catch (error) {
2201
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2202
+ }
2203
+ }
1407
2204
  } catch (error$1) {
1408
2205
  throw new error.MastraError(
1409
2206
  {
1410
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2207
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1411
2208
  domain: error.ErrorDomain.STORAGE,
1412
- category: error.ErrorCategory.THIRD_PARTY,
1413
- details: {
1414
- tableName
1415
- }
2209
+ category: error.ErrorCategory.THIRD_PARTY
1416
2210
  },
1417
2211
  error$1
1418
2212
  );
1419
2213
  }
1420
2214
  }
1421
2215
  };
1422
- function parseJSON(jsonString) {
1423
- try {
1424
- return JSON.parse(jsonString);
1425
- } catch {
1426
- return jsonString;
1427
- }
1428
- }
1429
2216
  function transformScoreRow(row) {
1430
2217
  return {
1431
2218
  ...row,
1432
- input: parseJSON(row.input),
1433
- scorer: parseJSON(row.scorer),
1434
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1435
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1436
- metadata: parseJSON(row.metadata),
1437
- output: parseJSON(row.output),
1438
- additionalContext: parseJSON(row.additionalContext),
1439
- runtimeContext: parseJSON(row.runtimeContext),
1440
- entity: parseJSON(row.entity),
2219
+ input: storage.safelyParseJSON(row.input),
2220
+ scorer: storage.safelyParseJSON(row.scorer),
2221
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2222
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2223
+ metadata: storage.safelyParseJSON(row.metadata),
2224
+ output: storage.safelyParseJSON(row.output),
2225
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2226
+ requestContext: storage.safelyParseJSON(row.requestContext),
2227
+ entity: storage.safelyParseJSON(row.entity),
1441
2228
  createdAt: row.createdAt,
1442
2229
  updatedAt: row.updatedAt
1443
2230
  };
@@ -1482,7 +2269,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1482
2269
  async saveScore(score) {
1483
2270
  let validatedScore;
1484
2271
  try {
1485
- validatedScore = scores.saveScorePayloadSchema.parse(score);
2272
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
1486
2273
  } catch (error$1) {
1487
2274
  throw new error.MastraError(
1488
2275
  {
@@ -1503,7 +2290,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1503
2290
  input,
1504
2291
  output,
1505
2292
  additionalContext,
1506
- runtimeContext,
2293
+ requestContext,
1507
2294
  entity,
1508
2295
  ...rest
1509
2296
  } = validatedScore;
@@ -1512,15 +2299,15 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1512
2299
  record: {
1513
2300
  id: scoreId,
1514
2301
  ...rest,
1515
- input: JSON.stringify(input) || "",
1516
- output: JSON.stringify(output) || "",
1517
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1518
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1519
- metadata: metadata ? JSON.stringify(metadata) : null,
1520
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1521
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1522
- entity: entity ? JSON.stringify(entity) : null,
1523
- scorer: scorer ? JSON.stringify(scorer) : null,
2302
+ input: input || "",
2303
+ output: output || "",
2304
+ preprocessStepResult: preprocessStepResult || null,
2305
+ analyzeStepResult: analyzeStepResult || null,
2306
+ metadata: metadata || null,
2307
+ additionalContext: additionalContext || null,
2308
+ requestContext: requestContext || null,
2309
+ entity: entity || null,
2310
+ scorer: scorer || null,
1524
2311
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1525
2312
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1526
2313
  }
@@ -1538,41 +2325,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1538
2325
  );
1539
2326
  }
1540
2327
  }
1541
- async getScoresByScorerId({
2328
+ async listScoresByScorerId({
1542
2329
  scorerId,
1543
- pagination
2330
+ pagination,
2331
+ entityId,
2332
+ entityType,
2333
+ source
1544
2334
  }) {
1545
2335
  try {
1546
- const request = this.pool.request();
1547
- request.input("p1", scorerId);
1548
- const totalResult = await request.query(
1549
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1550
- );
2336
+ const conditions = ["[scorerId] = @p1"];
2337
+ const params = { p1: scorerId };
2338
+ let paramIndex = 2;
2339
+ if (entityId) {
2340
+ conditions.push(`[entityId] = @p${paramIndex}`);
2341
+ params[`p${paramIndex}`] = entityId;
2342
+ paramIndex++;
2343
+ }
2344
+ if (entityType) {
2345
+ conditions.push(`[entityType] = @p${paramIndex}`);
2346
+ params[`p${paramIndex}`] = entityType;
2347
+ paramIndex++;
2348
+ }
2349
+ if (source) {
2350
+ conditions.push(`[source] = @p${paramIndex}`);
2351
+ params[`p${paramIndex}`] = source;
2352
+ paramIndex++;
2353
+ }
2354
+ const whereClause = conditions.join(" AND ");
2355
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2356
+ const countRequest = this.pool.request();
2357
+ Object.entries(params).forEach(([key, value]) => {
2358
+ countRequest.input(key, value);
2359
+ });
2360
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1551
2361
  const total = totalResult.recordset[0]?.count || 0;
2362
+ const { page, perPage: perPageInput } = pagination;
1552
2363
  if (total === 0) {
1553
2364
  return {
1554
2365
  pagination: {
1555
2366
  total: 0,
1556
- page: pagination.page,
1557
- perPage: pagination.perPage,
2367
+ page,
2368
+ perPage: perPageInput,
1558
2369
  hasMore: false
1559
2370
  },
1560
2371
  scores: []
1561
2372
  };
1562
2373
  }
2374
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2375
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2376
+ const limitValue = perPageInput === false ? total : perPage;
2377
+ const end = perPageInput === false ? total : start + perPage;
1563
2378
  const dataRequest = this.pool.request();
1564
- dataRequest.input("p1", scorerId);
1565
- dataRequest.input("p2", pagination.perPage);
1566
- dataRequest.input("p3", pagination.page * pagination.perPage);
1567
- const result = await dataRequest.query(
1568
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1569
- );
2379
+ Object.entries(params).forEach(([key, value]) => {
2380
+ dataRequest.input(key, value);
2381
+ });
2382
+ dataRequest.input("perPage", limitValue);
2383
+ dataRequest.input("offset", start);
2384
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2385
+ const result = await dataRequest.query(dataQuery);
1570
2386
  return {
1571
2387
  pagination: {
1572
2388
  total: Number(total),
1573
- page: pagination.page,
1574
- perPage: pagination.perPage,
1575
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2389
+ page,
2390
+ perPage: perPageForResponse,
2391
+ hasMore: end < total
1576
2392
  },
1577
2393
  scores: result.recordset.map((row) => transformScoreRow(row))
1578
2394
  };
@@ -1588,7 +2404,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1588
2404
  );
1589
2405
  }
1590
2406
  }
1591
- async getScoresByRunId({
2407
+ async listScoresByRunId({
1592
2408
  runId,
1593
2409
  pagination
1594
2410
  }) {
@@ -1599,30 +2415,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1599
2415
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1600
2416
  );
1601
2417
  const total = totalResult.recordset[0]?.count || 0;
2418
+ const { page, perPage: perPageInput } = pagination;
1602
2419
  if (total === 0) {
1603
2420
  return {
1604
2421
  pagination: {
1605
2422
  total: 0,
1606
- page: pagination.page,
1607
- perPage: pagination.perPage,
2423
+ page,
2424
+ perPage: perPageInput,
1608
2425
  hasMore: false
1609
2426
  },
1610
2427
  scores: []
1611
2428
  };
1612
2429
  }
2430
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2431
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2432
+ const limitValue = perPageInput === false ? total : perPage;
2433
+ const end = perPageInput === false ? total : start + perPage;
1613
2434
  const dataRequest = this.pool.request();
1614
2435
  dataRequest.input("p1", runId);
1615
- dataRequest.input("p2", pagination.perPage);
1616
- dataRequest.input("p3", pagination.page * pagination.perPage);
2436
+ dataRequest.input("p2", limitValue);
2437
+ dataRequest.input("p3", start);
1617
2438
  const result = await dataRequest.query(
1618
2439
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1619
2440
  );
1620
2441
  return {
1621
2442
  pagination: {
1622
2443
  total: Number(total),
1623
- page: pagination.page,
1624
- perPage: pagination.perPage,
1625
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2444
+ page,
2445
+ perPage: perPageForResponse,
2446
+ hasMore: end < total
1626
2447
  },
1627
2448
  scores: result.recordset.map((row) => transformScoreRow(row))
1628
2449
  };
@@ -1638,7 +2459,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1638
2459
  );
1639
2460
  }
1640
2461
  }
1641
- async getScoresByEntityId({
2462
+ async listScoresByEntityId({
1642
2463
  entityId,
1643
2464
  entityType,
1644
2465
  pagination
@@ -1651,31 +2472,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1651
2472
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1652
2473
  );
1653
2474
  const total = totalResult.recordset[0]?.count || 0;
2475
+ const { page, perPage: perPageInput } = pagination;
2476
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2477
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1654
2478
  if (total === 0) {
1655
2479
  return {
1656
2480
  pagination: {
1657
2481
  total: 0,
1658
- page: pagination.page,
1659
- perPage: pagination.perPage,
2482
+ page,
2483
+ perPage: perPageForResponse,
1660
2484
  hasMore: false
1661
2485
  },
1662
2486
  scores: []
1663
2487
  };
1664
2488
  }
2489
+ const limitValue = perPageInput === false ? total : perPage;
2490
+ const end = perPageInput === false ? total : start + perPage;
1665
2491
  const dataRequest = this.pool.request();
1666
2492
  dataRequest.input("p1", entityId);
1667
2493
  dataRequest.input("p2", entityType);
1668
- dataRequest.input("p3", pagination.perPage);
1669
- dataRequest.input("p4", pagination.page * pagination.perPage);
2494
+ dataRequest.input("p3", limitValue);
2495
+ dataRequest.input("p4", start);
1670
2496
  const result = await dataRequest.query(
1671
2497
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1672
2498
  );
1673
2499
  return {
1674
2500
  pagination: {
1675
2501
  total: Number(total),
1676
- page: pagination.page,
1677
- perPage: pagination.perPage,
1678
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2502
+ page,
2503
+ perPage: perPageForResponse,
2504
+ hasMore: end < total
1679
2505
  },
1680
2506
  scores: result.recordset.map((row) => transformScoreRow(row))
1681
2507
  };
@@ -1691,7 +2517,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1691
2517
  );
1692
2518
  }
1693
2519
  }
1694
- async getScoresBySpan({
2520
+ async listScoresBySpan({
1695
2521
  traceId,
1696
2522
  spanId,
1697
2523
  pagination
@@ -1704,34 +2530,38 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1704
2530
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
1705
2531
  );
1706
2532
  const total = totalResult.recordset[0]?.count || 0;
2533
+ const { page, perPage: perPageInput } = pagination;
2534
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2535
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1707
2536
  if (total === 0) {
1708
2537
  return {
1709
2538
  pagination: {
1710
2539
  total: 0,
1711
- page: pagination.page,
1712
- perPage: pagination.perPage,
2540
+ page,
2541
+ perPage: perPageForResponse,
1713
2542
  hasMore: false
1714
2543
  },
1715
2544
  scores: []
1716
2545
  };
1717
2546
  }
1718
- const limit = pagination.perPage + 1;
2547
+ const limitValue = perPageInput === false ? total : perPage;
2548
+ const end = perPageInput === false ? total : start + perPage;
1719
2549
  const dataRequest = this.pool.request();
1720
2550
  dataRequest.input("p1", traceId);
1721
2551
  dataRequest.input("p2", spanId);
1722
- dataRequest.input("p3", limit);
1723
- dataRequest.input("p4", pagination.page * pagination.perPage);
2552
+ dataRequest.input("p3", limitValue);
2553
+ dataRequest.input("p4", start);
1724
2554
  const result = await dataRequest.query(
1725
2555
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1726
2556
  );
1727
2557
  return {
1728
2558
  pagination: {
1729
2559
  total: Number(total),
1730
- page: pagination.page,
1731
- perPage: pagination.perPage,
1732
- hasMore: result.recordset.length > pagination.perPage
2560
+ page,
2561
+ perPage: perPageForResponse,
2562
+ hasMore: end < total
1733
2563
  },
1734
- scores: result.recordset.slice(0, pagination.perPage).map((row) => transformScoreRow(row))
2564
+ scores: result.recordset.map((row) => transformScoreRow(row))
1735
2565
  };
1736
2566
  } catch (error$1) {
1737
2567
  throw new error.MastraError(
@@ -1746,7 +2576,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1746
2576
  }
1747
2577
  }
1748
2578
  };
1749
- var TracesMSSQL = class extends storage.TracesStorage {
2579
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1750
2580
  pool;
1751
2581
  operations;
1752
2582
  schema;
@@ -1760,207 +2590,164 @@ var TracesMSSQL = class extends storage.TracesStorage {
1760
2590
  this.operations = operations;
1761
2591
  this.schema = schema;
1762
2592
  }
1763
- /** @deprecated use getTracesPaginated instead*/
1764
- async getTraces(args) {
1765
- if (args.fromDate || args.toDate) {
1766
- args.dateRange = {
1767
- start: args.fromDate,
1768
- end: args.toDate
1769
- };
1770
- }
1771
- const result = await this.getTracesPaginated(args);
1772
- return result.traces;
1773
- }
1774
- async getTracesPaginated(args) {
1775
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1776
- const fromDate = dateRange?.start;
1777
- const toDate = dateRange?.end;
1778
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1779
- const currentOffset = page * perPage;
1780
- const paramMap = {};
1781
- const conditions = [];
1782
- let paramIndex = 1;
1783
- if (name) {
1784
- const paramName = `p${paramIndex++}`;
1785
- conditions.push(`[name] LIKE @${paramName}`);
1786
- paramMap[paramName] = `${name}%`;
1787
- }
1788
- if (scope) {
1789
- const paramName = `p${paramIndex++}`;
1790
- conditions.push(`[scope] = @${paramName}`);
1791
- paramMap[paramName] = scope;
1792
- }
1793
- if (attributes) {
1794
- Object.entries(attributes).forEach(([key, value]) => {
1795
- const parsedKey = utils.parseFieldKey(key);
1796
- const paramName = `p${paramIndex++}`;
1797
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1798
- paramMap[paramName] = value;
1799
- });
1800
- }
1801
- if (filters) {
1802
- Object.entries(filters).forEach(([key, value]) => {
1803
- const parsedKey = utils.parseFieldKey(key);
1804
- const paramName = `p${paramIndex++}`;
1805
- conditions.push(`[${parsedKey}] = @${paramName}`);
1806
- paramMap[paramName] = value;
1807
- });
1808
- }
1809
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1810
- const paramName = `p${paramIndex++}`;
1811
- conditions.push(`[createdAt] >= @${paramName}`);
1812
- paramMap[paramName] = fromDate.toISOString();
1813
- }
1814
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1815
- const paramName = `p${paramIndex++}`;
1816
- conditions.push(`[createdAt] <= @${paramName}`);
1817
- paramMap[paramName] = toDate.toISOString();
2593
+ parseWorkflowRun(row) {
2594
+ let parsedSnapshot = row.snapshot;
2595
+ if (typeof parsedSnapshot === "string") {
2596
+ try {
2597
+ parsedSnapshot = JSON.parse(row.snapshot);
2598
+ } catch (e) {
2599
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2600
+ }
1818
2601
  }
1819
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1820
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1821
- let total = 0;
2602
+ return {
2603
+ workflowName: row.workflow_name,
2604
+ runId: row.run_id,
2605
+ snapshot: parsedSnapshot,
2606
+ createdAt: row.createdAt,
2607
+ updatedAt: row.updatedAt,
2608
+ resourceId: row.resourceId
2609
+ };
2610
+ }
2611
+ async updateWorkflowResults({
2612
+ workflowName,
2613
+ runId,
2614
+ stepId,
2615
+ result,
2616
+ requestContext
2617
+ }) {
2618
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2619
+ const transaction = this.pool.transaction();
1822
2620
  try {
1823
- const countRequest = this.pool.request();
1824
- Object.entries(paramMap).forEach(([key, value]) => {
1825
- if (value instanceof Date) {
1826
- countRequest.input(key, sql2__default.default.DateTime, value);
1827
- } else {
1828
- countRequest.input(key, value);
1829
- }
1830
- });
1831
- const countResult = await countRequest.query(countQuery);
1832
- total = parseInt(countResult.recordset[0].total, 10);
2621
+ await transaction.begin();
2622
+ const selectRequest = new sql2__default.default.Request(transaction);
2623
+ selectRequest.input("workflow_name", workflowName);
2624
+ selectRequest.input("run_id", runId);
2625
+ const existingSnapshotResult = await selectRequest.query(
2626
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2627
+ );
2628
+ let snapshot;
2629
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2630
+ snapshot = {
2631
+ context: {},
2632
+ activePaths: [],
2633
+ timestamp: Date.now(),
2634
+ suspendedPaths: {},
2635
+ resumeLabels: {},
2636
+ serializedStepGraph: [],
2637
+ value: {},
2638
+ waitingPaths: {},
2639
+ status: "pending",
2640
+ runId,
2641
+ requestContext: {}
2642
+ };
2643
+ } else {
2644
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2645
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2646
+ }
2647
+ snapshot.context[stepId] = result;
2648
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2649
+ const upsertReq = new sql2__default.default.Request(transaction);
2650
+ upsertReq.input("workflow_name", workflowName);
2651
+ upsertReq.input("run_id", runId);
2652
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2653
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2654
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2655
+ await upsertReq.query(
2656
+ `MERGE ${table} AS target
2657
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2658
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2659
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2660
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2661
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2662
+ );
2663
+ await transaction.commit();
2664
+ return snapshot.context;
1833
2665
  } catch (error$1) {
2666
+ try {
2667
+ await transaction.rollback();
2668
+ } catch {
2669
+ }
1834
2670
  throw new error.MastraError(
1835
2671
  {
1836
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2672
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1837
2673
  domain: error.ErrorDomain.STORAGE,
1838
2674
  category: error.ErrorCategory.THIRD_PARTY,
1839
2675
  details: {
1840
- name: args.name ?? "",
1841
- scope: args.scope ?? ""
2676
+ workflowName,
2677
+ runId,
2678
+ stepId
1842
2679
  }
1843
2680
  },
1844
2681
  error$1
1845
2682
  );
1846
2683
  }
1847
- if (total === 0) {
1848
- return {
1849
- traces: [],
1850
- total: 0,
1851
- page,
1852
- perPage,
1853
- hasMore: false
1854
- };
1855
- }
1856
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1857
- const dataRequest = this.pool.request();
1858
- Object.entries(paramMap).forEach(([key, value]) => {
1859
- if (value instanceof Date) {
1860
- dataRequest.input(key, sql2__default.default.DateTime, value);
1861
- } else {
1862
- dataRequest.input(key, value);
1863
- }
1864
- });
1865
- dataRequest.input("offset", currentOffset);
1866
- dataRequest.input("limit", perPage);
2684
+ }
2685
+ async updateWorkflowState({
2686
+ workflowName,
2687
+ runId,
2688
+ opts
2689
+ }) {
2690
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2691
+ const transaction = this.pool.transaction();
1867
2692
  try {
1868
- const rowsResult = await dataRequest.query(dataQuery);
1869
- const rows = rowsResult.recordset;
1870
- const traces = rows.map((row) => ({
1871
- id: row.id,
1872
- parentSpanId: row.parentSpanId,
1873
- traceId: row.traceId,
1874
- name: row.name,
1875
- scope: row.scope,
1876
- kind: row.kind,
1877
- status: JSON.parse(row.status),
1878
- events: JSON.parse(row.events),
1879
- links: JSON.parse(row.links),
1880
- attributes: JSON.parse(row.attributes),
1881
- startTime: row.startTime,
1882
- endTime: row.endTime,
1883
- other: row.other,
1884
- createdAt: row.createdAt
1885
- }));
1886
- return {
1887
- traces,
1888
- total,
1889
- page,
1890
- perPage,
1891
- hasMore: currentOffset + traces.length < total
1892
- };
2693
+ await transaction.begin();
2694
+ const selectRequest = new sql2__default.default.Request(transaction);
2695
+ selectRequest.input("workflow_name", workflowName);
2696
+ selectRequest.input("run_id", runId);
2697
+ const existingSnapshotResult = await selectRequest.query(
2698
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2699
+ );
2700
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2701
+ await transaction.rollback();
2702
+ return void 0;
2703
+ }
2704
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2705
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2706
+ if (!snapshot || !snapshot?.context) {
2707
+ await transaction.rollback();
2708
+ throw new error.MastraError(
2709
+ {
2710
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2711
+ domain: error.ErrorDomain.STORAGE,
2712
+ category: error.ErrorCategory.SYSTEM,
2713
+ details: {
2714
+ workflowName,
2715
+ runId
2716
+ }
2717
+ },
2718
+ new Error(`Snapshot not found for runId ${runId}`)
2719
+ );
2720
+ }
2721
+ const updatedSnapshot = { ...snapshot, ...opts };
2722
+ const updateRequest = new sql2__default.default.Request(transaction);
2723
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2724
+ updateRequest.input("workflow_name", workflowName);
2725
+ updateRequest.input("run_id", runId);
2726
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2727
+ await updateRequest.query(
2728
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2729
+ );
2730
+ await transaction.commit();
2731
+ return updatedSnapshot;
1893
2732
  } catch (error$1) {
2733
+ try {
2734
+ await transaction.rollback();
2735
+ } catch {
2736
+ }
1894
2737
  throw new error.MastraError(
1895
2738
  {
1896
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2739
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1897
2740
  domain: error.ErrorDomain.STORAGE,
1898
2741
  category: error.ErrorCategory.THIRD_PARTY,
1899
2742
  details: {
1900
- name: args.name ?? "",
1901
- scope: args.scope ?? ""
2743
+ workflowName,
2744
+ runId
1902
2745
  }
1903
2746
  },
1904
2747
  error$1
1905
2748
  );
1906
2749
  }
1907
2750
  }
1908
- async batchTraceInsert({ records }) {
1909
- this.logger.debug("Batch inserting traces", { count: records.length });
1910
- await this.operations.batchInsert({
1911
- tableName: storage.TABLE_TRACES,
1912
- records
1913
- });
1914
- }
1915
- };
1916
- function parseWorkflowRun(row) {
1917
- let parsedSnapshot = row.snapshot;
1918
- if (typeof parsedSnapshot === "string") {
1919
- try {
1920
- parsedSnapshot = JSON.parse(row.snapshot);
1921
- } catch (e) {
1922
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1923
- }
1924
- }
1925
- return {
1926
- workflowName: row.workflow_name,
1927
- runId: row.run_id,
1928
- snapshot: parsedSnapshot,
1929
- createdAt: row.createdAt,
1930
- updatedAt: row.updatedAt,
1931
- resourceId: row.resourceId
1932
- };
1933
- }
1934
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1935
- pool;
1936
- operations;
1937
- schema;
1938
- constructor({
1939
- pool,
1940
- operations,
1941
- schema
1942
- }) {
1943
- super();
1944
- this.pool = pool;
1945
- this.operations = operations;
1946
- this.schema = schema;
1947
- }
1948
- updateWorkflowResults({
1949
- // workflowName,
1950
- // runId,
1951
- // stepId,
1952
- // result,
1953
- // runtimeContext,
1954
- }) {
1955
- throw new Error("Method not implemented.");
1956
- }
1957
- updateWorkflowState({
1958
- // workflowName,
1959
- // runId,
1960
- // opts,
1961
- }) {
1962
- throw new Error("Method not implemented.");
1963
- }
1964
2751
  async persistWorkflowSnapshot({
1965
2752
  workflowName,
1966
2753
  runId,
@@ -2057,7 +2844,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2057
2844
  if (!result.recordset || result.recordset.length === 0) {
2058
2845
  return null;
2059
2846
  }
2060
- return parseWorkflowRun(result.recordset[0]);
2847
+ return this.parseWorkflowRun(result.recordset[0]);
2061
2848
  } catch (error$1) {
2062
2849
  throw new error.MastraError(
2063
2850
  {
@@ -2073,12 +2860,12 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2073
2860
  );
2074
2861
  }
2075
2862
  }
2076
- async getWorkflowRuns({
2863
+ async listWorkflowRuns({
2077
2864
  workflowName,
2078
2865
  fromDate,
2079
2866
  toDate,
2080
- limit,
2081
- offset,
2867
+ page,
2868
+ perPage,
2082
2869
  resourceId
2083
2870
  } = {}) {
2084
2871
  try {
@@ -2094,7 +2881,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2094
2881
  conditions.push(`[resourceId] = @resourceId`);
2095
2882
  paramMap["resourceId"] = resourceId;
2096
2883
  } else {
2097
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2884
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2098
2885
  }
2099
2886
  }
2100
2887
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2116,24 +2903,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2116
2903
  request.input(key, value);
2117
2904
  }
2118
2905
  });
2119
- if (limit !== void 0 && offset !== void 0) {
2906
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2907
+ if (usePagination) {
2120
2908
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2121
2909
  const countResult = await request.query(countQuery);
2122
2910
  total = Number(countResult.recordset[0]?.count || 0);
2123
2911
  }
2124
2912
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2125
- if (limit !== void 0 && offset !== void 0) {
2126
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2127
- request.input("limit", limit);
2913
+ if (usePagination) {
2914
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2915
+ const offset = page * normalizedPerPage;
2916
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2917
+ request.input("perPage", normalizedPerPage);
2128
2918
  request.input("offset", offset);
2129
2919
  }
2130
2920
  const result = await request.query(query);
2131
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2921
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2132
2922
  return { runs, total: total || runs.length };
2133
2923
  } catch (error$1) {
2134
2924
  throw new error.MastraError(
2135
2925
  {
2136
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2926
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2137
2927
  domain: error.ErrorDomain.STORAGE,
2138
2928
  category: error.ErrorCategory.THIRD_PARTY,
2139
2929
  details: {
@@ -2176,19 +2966,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2176
2966
  port: config.port,
2177
2967
  options: config.options || { encrypt: true, trustServerCertificate: true }
2178
2968
  });
2179
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2180
2969
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2181
2970
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2182
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2183
2971
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2184
2972
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
2973
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2185
2974
  this.stores = {
2186
2975
  operations,
2187
2976
  scores,
2188
- traces,
2189
2977
  workflows,
2190
- legacyEvals,
2191
- memory
2978
+ memory,
2979
+ observability
2192
2980
  };
2193
2981
  } catch (e) {
2194
2982
  throw new error.MastraError(
@@ -2208,6 +2996,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2208
2996
  try {
2209
2997
  await this.isConnected;
2210
2998
  await super.init();
2999
+ try {
3000
+ await this.stores.operations.createAutomaticIndexes();
3001
+ } catch (indexError) {
3002
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3003
+ }
2211
3004
  } catch (error$1) {
2212
3005
  this.isConnected = null;
2213
3006
  throw new error.MastraError(
@@ -2235,28 +3028,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2235
3028
  hasColumn: true,
2236
3029
  createTable: true,
2237
3030
  deleteMessages: true,
2238
- getScoresBySpan: true
3031
+ listScoresBySpan: true,
3032
+ observabilityInstance: true,
3033
+ indexManagement: true
2239
3034
  };
2240
3035
  }
2241
- /** @deprecated use getEvals instead */
2242
- async getEvalsByAgentName(agentName, type) {
2243
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2244
- }
2245
- async getEvals(options = {}) {
2246
- return this.stores.legacyEvals.getEvals(options);
2247
- }
2248
- /**
2249
- * @deprecated use getTracesPaginated instead
2250
- */
2251
- async getTraces(args) {
2252
- return this.stores.traces.getTraces(args);
2253
- }
2254
- async getTracesPaginated(args) {
2255
- return this.stores.traces.getTracesPaginated(args);
2256
- }
2257
- async batchTraceInsert({ records }) {
2258
- return this.stores.traces.batchTraceInsert({ records });
2259
- }
2260
3036
  async createTable({
2261
3037
  tableName,
2262
3038
  schema
@@ -2291,15 +3067,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2291
3067
  async getThreadById({ threadId }) {
2292
3068
  return this.stores.memory.getThreadById({ threadId });
2293
3069
  }
2294
- /**
2295
- * @deprecated use getThreadsByResourceIdPaginated instead
2296
- */
2297
- async getThreadsByResourceId(args) {
2298
- return this.stores.memory.getThreadsByResourceId(args);
2299
- }
2300
- async getThreadsByResourceIdPaginated(args) {
2301
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2302
- }
2303
3070
  async saveThread({ thread }) {
2304
3071
  return this.stores.memory.saveThread({ thread });
2305
3072
  }
@@ -2313,17 +3080,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2313
3080
  async deleteThread({ threadId }) {
2314
3081
  return this.stores.memory.deleteThread({ threadId });
2315
3082
  }
2316
- async getMessages(args) {
2317
- return this.stores.memory.getMessages(args);
2318
- }
2319
- async getMessagesById({
2320
- messageIds,
2321
- format
2322
- }) {
2323
- return this.stores.memory.getMessagesById({ messageIds, format });
2324
- }
2325
- async getMessagesPaginated(args) {
2326
- return this.stores.memory.getMessagesPaginated(args);
3083
+ async listMessagesById({ messageIds }) {
3084
+ return this.stores.memory.listMessagesById({ messageIds });
2327
3085
  }
2328
3086
  async saveMessages(args) {
2329
3087
  return this.stores.memory.saveMessages(args);
@@ -2357,9 +3115,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2357
3115
  runId,
2358
3116
  stepId,
2359
3117
  result,
2360
- runtimeContext
3118
+ requestContext
2361
3119
  }) {
2362
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3120
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2363
3121
  }
2364
3122
  async updateWorkflowState({
2365
3123
  workflowName,
@@ -2382,15 +3140,15 @@ var MSSQLStore = class extends storage.MastraStorage {
2382
3140
  }) {
2383
3141
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2384
3142
  }
2385
- async getWorkflowRuns({
3143
+ async listWorkflowRuns({
2386
3144
  workflowName,
2387
3145
  fromDate,
2388
3146
  toDate,
2389
- limit,
2390
- offset,
3147
+ perPage,
3148
+ page,
2391
3149
  resourceId
2392
3150
  } = {}) {
2393
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3151
+ return this.stores.workflows.listWorkflowRuns({ workflowName, fromDate, toDate, perPage, page, resourceId });
2394
3152
  }
2395
3153
  async getWorkflowRunById({
2396
3154
  runId,
@@ -2401,44 +3159,107 @@ var MSSQLStore = class extends storage.MastraStorage {
2401
3159
  async close() {
2402
3160
  await this.pool.close();
2403
3161
  }
3162
+ /**
3163
+ * Index Management
3164
+ */
3165
+ async createIndex(options) {
3166
+ return this.stores.operations.createIndex(options);
3167
+ }
3168
+ async listIndexes(tableName) {
3169
+ return this.stores.operations.listIndexes(tableName);
3170
+ }
3171
+ async describeIndex(indexName) {
3172
+ return this.stores.operations.describeIndex(indexName);
3173
+ }
3174
+ async dropIndex(indexName) {
3175
+ return this.stores.operations.dropIndex(indexName);
3176
+ }
3177
+ /**
3178
+ * Tracing / Observability
3179
+ */
3180
+ getObservabilityStore() {
3181
+ if (!this.stores.observability) {
3182
+ throw new error.MastraError({
3183
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3184
+ domain: error.ErrorDomain.STORAGE,
3185
+ category: error.ErrorCategory.SYSTEM,
3186
+ text: "Observability storage is not initialized"
3187
+ });
3188
+ }
3189
+ return this.stores.observability;
3190
+ }
3191
+ async createSpan(span) {
3192
+ return this.getObservabilityStore().createSpan(span);
3193
+ }
3194
+ async updateSpan({
3195
+ spanId,
3196
+ traceId,
3197
+ updates
3198
+ }) {
3199
+ return this.getObservabilityStore().updateSpan({ spanId, traceId, updates });
3200
+ }
3201
+ async getAITrace(traceId) {
3202
+ return this.getObservabilityStore().getAITrace(traceId);
3203
+ }
3204
+ async getAITracesPaginated(args) {
3205
+ return this.getObservabilityStore().getAITracesPaginated(args);
3206
+ }
3207
+ async batchCreateSpans(args) {
3208
+ return this.getObservabilityStore().batchCreateSpans(args);
3209
+ }
3210
+ async batchUpdateSpans(args) {
3211
+ return this.getObservabilityStore().batchUpdateSpans(args);
3212
+ }
3213
+ async batchDeleteAITraces(args) {
3214
+ return this.getObservabilityStore().batchDeleteAITraces(args);
3215
+ }
2404
3216
  /**
2405
3217
  * Scorers
2406
3218
  */
2407
3219
  async getScoreById({ id: _id }) {
2408
3220
  return this.stores.scores.getScoreById({ id: _id });
2409
3221
  }
2410
- async getScoresByScorerId({
3222
+ async listScoresByScorerId({
2411
3223
  scorerId: _scorerId,
2412
- pagination: _pagination
3224
+ pagination: _pagination,
3225
+ entityId: _entityId,
3226
+ entityType: _entityType,
3227
+ source: _source
2413
3228
  }) {
2414
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3229
+ return this.stores.scores.listScoresByScorerId({
3230
+ scorerId: _scorerId,
3231
+ pagination: _pagination,
3232
+ entityId: _entityId,
3233
+ entityType: _entityType,
3234
+ source: _source
3235
+ });
2415
3236
  }
2416
3237
  async saveScore(_score) {
2417
3238
  return this.stores.scores.saveScore(_score);
2418
3239
  }
2419
- async getScoresByRunId({
3240
+ async listScoresByRunId({
2420
3241
  runId: _runId,
2421
3242
  pagination: _pagination
2422
3243
  }) {
2423
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3244
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2424
3245
  }
2425
- async getScoresByEntityId({
3246
+ async listScoresByEntityId({
2426
3247
  entityId: _entityId,
2427
3248
  entityType: _entityType,
2428
3249
  pagination: _pagination
2429
3250
  }) {
2430
- return this.stores.scores.getScoresByEntityId({
3251
+ return this.stores.scores.listScoresByEntityId({
2431
3252
  entityId: _entityId,
2432
3253
  entityType: _entityType,
2433
3254
  pagination: _pagination
2434
3255
  });
2435
3256
  }
2436
- async getScoresBySpan({
3257
+ async listScoresBySpan({
2437
3258
  traceId,
2438
3259
  spanId,
2439
3260
  pagination: _pagination
2440
3261
  }) {
2441
- return this.stores.scores.getScoresBySpan({ traceId, spanId, pagination: _pagination });
3262
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
2442
3263
  }
2443
3264
  };
2444
3265