@mastra/mssql 0.0.0-monorepo-binary-20251013210052 → 0.0.0-netlify-no-bundle-20251127120354

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,9 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
8
- var scores = require('@mastra/core/scores');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
9
10
 
10
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
11
12
 
@@ -21,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
21
22
  const quotedSchemaName = schemaName;
22
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
23
24
  }
24
-
25
- // src/storage/domains/legacy-evals/index.ts
26
- function transformEvalRow(row) {
27
- let testInfoValue = null, resultValue = null;
28
- if (row.test_info) {
29
- try {
30
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
31
- } catch {
32
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
33
29
  }
34
- if (row.test_info) {
35
- try {
36
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
37
- } catch {
38
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
39
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
40
57
  return {
41
- agentName: row.agent_name,
42
- input: row.input,
43
- output: row.output,
44
- result: resultValue,
45
- metricName: row.metric_name,
46
- instructions: row.instructions,
47
- testInfo: testInfoValue,
48
- globalRunId: row.global_run_id,
49
- runId: row.run_id,
50
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
51
60
  };
52
61
  }
53
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
54
- pool;
55
- schema;
56
- constructor({ pool, schema }) {
57
- super();
58
- this.pool = pool;
59
- this.schema = schema;
60
- }
61
- /** @deprecated use getEvals instead */
62
- async getEvalsByAgentName(agentName, type) {
63
- try {
64
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
65
- if (type === "test") {
66
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
67
- } else if (type === "live") {
68
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
69
- }
70
- query += " ORDER BY created_at DESC";
71
- const request = this.pool.request();
72
- request.input("p1", agentName);
73
- const result = await request.query(query);
74
- const rows = result.recordset;
75
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
76
- } catch (error) {
77
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
78
- return [];
79
- }
80
- console.error("Failed to get evals for the specified agent: " + error?.message);
81
- throw error;
82
- }
83
- }
84
- async getEvals(options = {}) {
85
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
86
- const fromDate = dateRange?.start;
87
- const toDate = dateRange?.end;
88
- const where = [];
89
- const params = {};
90
- if (agentName) {
91
- where.push("agent_name = @agentName");
92
- params["agentName"] = agentName;
93
- }
94
- if (type === "test") {
95
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
96
- } else if (type === "live") {
97
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
98
- }
99
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
100
- where.push(`[created_at] >= @fromDate`);
101
- params[`fromDate`] = fromDate.toISOString();
102
- }
103
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
104
- where.push(`[created_at] <= @toDate`);
105
- params[`toDate`] = toDate.toISOString();
106
- }
107
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
108
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
109
- const offset = page * perPage;
110
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
111
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
112
- try {
113
- const countReq = this.pool.request();
114
- Object.entries(params).forEach(([key, value]) => {
115
- if (value instanceof Date) {
116
- countReq.input(key, sql2__default.default.DateTime, value);
117
- } else {
118
- countReq.input(key, value);
119
- }
120
- });
121
- const countResult = await countReq.query(countQuery);
122
- const total = countResult.recordset[0]?.total || 0;
123
- if (total === 0) {
124
- return {
125
- evals: [],
126
- total: 0,
127
- page,
128
- perPage,
129
- hasMore: false
130
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
131
75
  }
132
- const req = this.pool.request();
133
- Object.entries(params).forEach(([key, value]) => {
134
- if (value instanceof Date) {
135
- req.input(key, sql2__default.default.DateTime, value);
136
- } else {
137
- req.input(key, value);
138
- }
139
- });
140
- req.input("offset", offset);
141
- req.input("perPage", perPage);
142
- const result = await req.query(dataQuery);
143
- const rows = result.recordset;
144
- return {
145
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
146
- total,
147
- page,
148
- perPage,
149
- hasMore: offset + (rows?.length ?? 0) < total
150
- };
151
- } catch (error$1) {
152
- const mastraError = new error.MastraError(
153
- {
154
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
155
- domain: error.ErrorDomain.STORAGE,
156
- category: error.ErrorCategory.THIRD_PARTY,
157
- details: {
158
- agentName: agentName || "all",
159
- type: type || "all",
160
- page,
161
- perPage
162
- }
163
- },
164
- error$1
165
- );
166
- this.logger?.error?.(mastraError.toString());
167
- this.logger?.trackException(mastraError);
168
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
169
84
  }
170
- }
171
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
172
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
173
91
  pool;
174
92
  schema;
@@ -186,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
186
104
  });
187
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
188
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
189
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
190
108
  }
191
109
  constructor({
192
110
  pool,
@@ -200,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
200
118
  }
201
119
  async getThreadById({ threadId }) {
202
120
  try {
203
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
204
122
  id,
205
123
  [resourceId],
206
124
  title,
@@ -211,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
211
129
  WHERE id = @threadId`;
212
130
  const request = this.pool.request();
213
131
  request.input("threadId", threadId);
214
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
215
133
  const thread = resultSet.recordset[0] || null;
216
134
  if (!thread) {
217
135
  return null;
@@ -236,11 +154,24 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
236
154
  );
237
155
  }
238
156
  }
239
- async getThreadsByResourceIdPaginated(args) {
240
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ if (page < 0) {
160
+ throw new error.MastraError({
161
+ id: "MASTRA_STORAGE_MSSQL_STORE_INVALID_PAGE",
162
+ domain: error.ErrorDomain.STORAGE,
163
+ category: error.ErrorCategory.USER,
164
+ text: "Page number must be non-negative",
165
+ details: {
166
+ resourceId,
167
+ page
168
+ }
169
+ });
170
+ }
171
+ const perPage = storage.normalizePerPage(perPageInput, 100);
172
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
173
+ const { field, direction } = this.parseOrderBy(orderBy);
241
174
  try {
242
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
243
- const currentOffset = page * perPage;
244
175
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
245
176
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
246
177
  const countRequest = this.pool.request();
@@ -252,16 +183,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
252
183
  threads: [],
253
184
  total: 0,
254
185
  page,
255
- perPage,
186
+ perPage: perPageForResponse,
256
187
  hasMore: false
257
188
  };
258
189
  }
259
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
260
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
190
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
191
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
192
+ const limitValue = perPageInput === false ? total : perPage;
193
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
261
194
  const dataRequest = this.pool.request();
262
195
  dataRequest.input("resourceId", resourceId);
263
- dataRequest.input("perPage", perPage);
264
- dataRequest.input("offset", currentOffset);
196
+ dataRequest.input("offset", offset);
197
+ if (limitValue > 2147483647) {
198
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
199
+ } else {
200
+ dataRequest.input("perPage", limitValue);
201
+ }
265
202
  const rowsResult = await dataRequest.query(dataQuery);
266
203
  const rows = rowsResult.recordset || [];
267
204
  const threads = rows.map((thread) => ({
@@ -274,13 +211,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
274
211
  threads,
275
212
  total,
276
213
  page,
277
- perPage,
278
- hasMore: currentOffset + threads.length < total
214
+ perPage: perPageForResponse,
215
+ hasMore: perPageInput === false ? false : offset + perPage < total
279
216
  };
280
217
  } catch (error$1) {
281
218
  const mastraError = new error.MastraError(
282
219
  {
283
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
220
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
284
221
  domain: error.ErrorDomain.STORAGE,
285
222
  category: error.ErrorCategory.THIRD_PARTY,
286
223
  details: {
@@ -292,7 +229,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
292
229
  );
293
230
  this.logger?.error?.(mastraError.toString());
294
231
  this.logger?.trackException?.(mastraError);
295
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
232
+ return {
233
+ threads: [],
234
+ total: 0,
235
+ page,
236
+ perPage: perPageForResponse,
237
+ hasMore: false
238
+ };
296
239
  }
297
240
  }
298
241
  async saveThread({ thread }) {
@@ -314,7 +257,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
314
257
  req.input("id", thread.id);
315
258
  req.input("resourceId", thread.resourceId);
316
259
  req.input("title", thread.title);
317
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
260
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
261
+ if (metadata === null) {
262
+ req.input("metadata", sql2__default.default.NVarChar, null);
263
+ } else {
264
+ req.input("metadata", metadata);
265
+ }
318
266
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
319
267
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
320
268
  await req.query(mergeSql);
@@ -333,30 +281,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
333
281
  );
334
282
  }
335
283
  }
336
- /**
337
- * @deprecated use getThreadsByResourceIdPaginated instead
338
- */
339
- async getThreadsByResourceId(args) {
340
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
341
- try {
342
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
343
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
344
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
345
- const request = this.pool.request();
346
- request.input("resourceId", resourceId);
347
- const resultSet = await request.query(dataQuery);
348
- const rows = resultSet.recordset || [];
349
- return rows.map((thread) => ({
350
- ...thread,
351
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
352
- createdAt: thread.createdAt,
353
- updatedAt: thread.updatedAt
354
- }));
355
- } catch (error) {
356
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
357
- return [];
358
- }
359
- }
360
284
  /**
361
285
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
362
286
  */
@@ -384,7 +308,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
384
308
  };
385
309
  try {
386
310
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
387
- const sql7 = `UPDATE ${table}
311
+ const sql5 = `UPDATE ${table}
388
312
  SET title = @title,
389
313
  metadata = @metadata,
390
314
  [updatedAt] = @updatedAt
@@ -395,7 +319,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
395
319
  req.input("title", title);
396
320
  req.input("metadata", JSON.stringify(mergedMetadata));
397
321
  req.input("updatedAt", /* @__PURE__ */ new Date());
398
- const result = await req.query(sql7);
322
+ const result = await req.query(sql5);
399
323
  let thread = result.recordset && result.recordset[0];
400
324
  if (thread && "seq_id" in thread) {
401
325
  const { seq_id, ...rest } = thread;
@@ -465,11 +389,9 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
465
389
  }
466
390
  async _getIncludedMessages({
467
391
  threadId,
468
- selectBy,
469
- orderByStatement
392
+ include
470
393
  }) {
471
394
  if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
472
- const include = selectBy?.include;
473
395
  if (!include) return null;
474
396
  const unionQueries = [];
475
397
  const paramValues = [];
@@ -494,7 +416,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
494
416
  m.[resourceId],
495
417
  m.seq_id
496
418
  FROM (
497
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
419
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
498
420
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
499
421
  WHERE [thread_id] = ${pThreadId}
500
422
  ) AS m
@@ -502,15 +424,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
502
424
  OR EXISTS (
503
425
  SELECT 1
504
426
  FROM (
505
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
427
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
506
428
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
507
429
  WHERE [thread_id] = ${pThreadId}
508
430
  ) AS target
509
431
  WHERE target.id = ${pId}
510
432
  AND (
511
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
433
+ -- Get previous messages (messages that come BEFORE the target)
434
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
512
435
  OR
513
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
436
+ -- Get next messages (messages that come AFTER the target)
437
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
514
438
  )
515
439
  )
516
440
  `
@@ -539,34 +463,16 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
539
463
  });
540
464
  return dedupedRows;
541
465
  }
542
- async getMessages(args) {
543
- const { threadId, resourceId, format, selectBy } = args;
466
+ async listMessagesById({ messageIds }) {
467
+ if (messageIds.length === 0) return { messages: [] };
544
468
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
545
469
  const orderByStatement = `ORDER BY [seq_id] DESC`;
546
- const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
547
470
  try {
548
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
549
471
  let rows = [];
550
- const include = selectBy?.include || [];
551
- if (include?.length) {
552
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
553
- if (includeMessages) {
554
- rows.push(...includeMessages);
555
- }
556
- }
557
- const excludeIds = rows.map((m) => m.id).filter(Boolean);
558
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [thread_id] = @threadId`;
472
+ let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
559
473
  const request = this.pool.request();
560
- request.input("threadId", threadId);
561
- if (excludeIds.length > 0) {
562
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
563
- query += ` AND id NOT IN (${excludeParams.join(", ")})`;
564
- excludeIds.forEach((id, idx) => {
565
- request.input(`id${idx}`, id);
566
- });
567
- }
568
- query += ` ${orderByStatement} OFFSET 0 ROWS FETCH NEXT @limit ROWS ONLY`;
569
- request.input("limit", limit);
474
+ messageIds.forEach((id, i) => request.input(`id${i}`, id));
475
+ query += ` ${orderByStatement}`;
570
476
  const result = await request.query(query);
571
477
  const remainingRows = result.recordset || [];
572
478
  rows.push(...remainingRows);
@@ -574,153 +480,171 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
574
480
  const timeDiff = a.seq_id - b.seq_id;
575
481
  return timeDiff;
576
482
  });
577
- rows = rows.map(({ seq_id, ...rest }) => rest);
578
- return this._parseAndFormatMessages(rows, format);
483
+ const messagesWithParsedContent = rows.map((row) => {
484
+ if (typeof row.content === "string") {
485
+ try {
486
+ return { ...row, content: JSON.parse(row.content) };
487
+ } catch {
488
+ return row;
489
+ }
490
+ }
491
+ return row;
492
+ });
493
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
494
+ const list = new agent.MessageList().add(cleanMessages, "memory");
495
+ return { messages: list.get.all.db() };
579
496
  } catch (error$1) {
580
497
  const mastraError = new error.MastraError(
581
498
  {
582
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_FAILED",
499
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
583
500
  domain: error.ErrorDomain.STORAGE,
584
501
  category: error.ErrorCategory.THIRD_PARTY,
585
502
  details: {
586
- threadId,
587
- resourceId: resourceId ?? ""
503
+ messageIds: JSON.stringify(messageIds)
588
504
  }
589
505
  },
590
506
  error$1
591
507
  );
592
508
  this.logger?.error?.(mastraError.toString());
593
- this.logger?.trackException(mastraError);
594
- return [];
509
+ this.logger?.trackException?.(mastraError);
510
+ return { messages: [] };
595
511
  }
596
512
  }
597
- async getMessagesById({
598
- messageIds,
599
- format
600
- }) {
601
- if (messageIds.length === 0) return [];
602
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
603
- const orderByStatement = `ORDER BY [seq_id] DESC`;
604
- try {
605
- let rows = [];
606
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
607
- const request = this.pool.request();
608
- messageIds.forEach((id, i) => request.input(`id${i}`, id));
609
- query += ` ${orderByStatement}`;
610
- const result = await request.query(query);
611
- const remainingRows = result.recordset || [];
612
- rows.push(...remainingRows);
613
- rows.sort((a, b) => {
614
- const timeDiff = a.seq_id - b.seq_id;
615
- return timeDiff;
616
- });
617
- rows = rows.map(({ seq_id, ...rest }) => rest);
618
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
619
- return this._parseAndFormatMessages(rows, `v2`);
620
- } catch (error$1) {
621
- const mastraError = new error.MastraError(
513
+ async listMessages(args) {
514
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
515
+ if (!threadId.trim()) {
516
+ throw new error.MastraError(
622
517
  {
623
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
518
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
624
519
  domain: error.ErrorDomain.STORAGE,
625
520
  category: error.ErrorCategory.THIRD_PARTY,
626
- details: {
627
- messageIds: JSON.stringify(messageIds)
628
- }
521
+ details: { threadId }
629
522
  },
630
- error$1
523
+ new Error("threadId must be a non-empty string")
631
524
  );
632
- this.logger?.error?.(mastraError.toString());
633
- this.logger?.trackException(mastraError);
634
- return [];
635
525
  }
636
- }
637
- async getMessagesPaginated(args) {
638
- const { threadId, resourceId, format, selectBy } = args;
639
- const { page = 0, perPage: perPageInput, dateRange } = selectBy?.pagination || {};
526
+ if (page < 0) {
527
+ throw new error.MastraError({
528
+ id: "MASTRA_STORAGE_MSSQL_STORE_INVALID_PAGE",
529
+ domain: error.ErrorDomain.STORAGE,
530
+ category: error.ErrorCategory.USER,
531
+ text: "Page number must be non-negative",
532
+ details: {
533
+ threadId,
534
+ page
535
+ }
536
+ });
537
+ }
538
+ const perPage = storage.normalizePerPage(perPageInput, 40);
539
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
640
540
  try {
641
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
642
- const fromDate = dateRange?.start;
643
- const toDate = dateRange?.end;
644
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
645
- const orderByStatement = `ORDER BY [seq_id] DESC`;
646
- let messages = [];
647
- if (selectBy?.include?.length) {
648
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
649
- if (includeMessages) messages.push(...includeMessages);
650
- }
651
- const perPage = perPageInput !== void 0 ? perPageInput : storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
652
- const currentOffset = page * perPage;
653
- const conditions = ["[thread_id] = @threadId"];
654
- const request = this.pool.request();
655
- request.input("threadId", threadId);
656
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
657
- conditions.push("[createdAt] >= @fromDate");
658
- request.input("fromDate", fromDate.toISOString());
659
- }
660
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
661
- conditions.push("[createdAt] <= @toDate");
662
- request.input("toDate", toDate.toISOString());
663
- }
664
- const whereClause = `WHERE ${conditions.join(" AND ")}`;
665
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
666
- const countResult = await request.query(countQuery);
541
+ const { field, direction } = this.parseOrderBy(orderBy, "ASC");
542
+ const orderByStatement = `ORDER BY [${field}] ${direction}, [seq_id] ${direction}`;
543
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
544
+ const baseQuery = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId FROM ${tableName}`;
545
+ const filters = {
546
+ thread_id: threadId,
547
+ ...resourceId ? { resourceId } : {},
548
+ ...buildDateRangeFilter(filter?.dateRange, "createdAt")
549
+ };
550
+ const { sql: actualWhereClause = "", params: whereParams } = prepareWhereClause(
551
+ filters);
552
+ const bindWhereParams = (req) => {
553
+ Object.entries(whereParams).forEach(([paramName, paramValue]) => req.input(paramName, paramValue));
554
+ };
555
+ const countRequest = this.pool.request();
556
+ bindWhereParams(countRequest);
557
+ const countResult = await countRequest.query(`SELECT COUNT(*) as total FROM ${tableName}${actualWhereClause}`);
667
558
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
668
- if (total === 0 && messages.length > 0) {
669
- const parsedIncluded = this._parseAndFormatMessages(messages, format);
559
+ const fetchBaseMessages = async () => {
560
+ const request = this.pool.request();
561
+ bindWhereParams(request);
562
+ if (perPageInput === false) {
563
+ const result2 = await request.query(`${baseQuery}${actualWhereClause} ${orderByStatement}`);
564
+ return result2.recordset || [];
565
+ }
566
+ request.input("offset", offset);
567
+ request.input("limit", perPage > 2147483647 ? sql2__default.default.BigInt : sql2__default.default.Int, perPage);
568
+ const result = await request.query(
569
+ `${baseQuery}${actualWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
570
+ );
571
+ return result.recordset || [];
572
+ };
573
+ const baseRows = perPage === 0 ? [] : await fetchBaseMessages();
574
+ const messages = [...baseRows];
575
+ const seqById = /* @__PURE__ */ new Map();
576
+ messages.forEach((msg) => {
577
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
578
+ });
579
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
670
580
  return {
671
- messages: parsedIncluded,
672
- total: parsedIncluded.length,
581
+ messages: [],
582
+ total: 0,
673
583
  page,
674
- perPage,
584
+ perPage: perPageForResponse,
675
585
  hasMore: false
676
586
  };
677
587
  }
678
- const excludeIds = messages.map((m) => m.id);
679
- if (excludeIds.length > 0) {
680
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
681
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
682
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
683
- }
684
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
685
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
686
- request.input("offset", currentOffset);
687
- request.input("limit", perPage);
688
- const rowsResult = await request.query(dataQuery);
689
- const rows = rowsResult.recordset || [];
690
- rows.sort((a, b) => a.seq_id - b.seq_id);
691
- messages.push(...rows);
692
- const parsed = this._parseAndFormatMessages(messages, format);
588
+ if (include?.length) {
589
+ const messageIds = new Set(messages.map((m) => m.id));
590
+ const includeMessages = await this._getIncludedMessages({ threadId, include });
591
+ includeMessages?.forEach((msg) => {
592
+ if (!messageIds.has(msg.id)) {
593
+ messages.push(msg);
594
+ messageIds.add(msg.id);
595
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
596
+ }
597
+ });
598
+ }
599
+ const parsed = this._parseAndFormatMessages(messages, "v2");
600
+ const mult = direction === "ASC" ? 1 : -1;
601
+ const finalMessages = parsed.sort((a, b) => {
602
+ const aVal = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
603
+ const bVal = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
604
+ if (aVal == null || bVal == null) {
605
+ return aVal == null && bVal == null ? a.id.localeCompare(b.id) : aVal == null ? 1 : -1;
606
+ }
607
+ const diff = (typeof aVal === "number" && typeof bVal === "number" ? aVal - bVal : String(aVal).localeCompare(String(bVal))) * mult;
608
+ if (diff !== 0) return diff;
609
+ const seqA = seqById.get(a.id);
610
+ const seqB = seqById.get(b.id);
611
+ return seqA != null && seqB != null ? (seqA - seqB) * mult : a.id.localeCompare(b.id);
612
+ });
613
+ const returnedThreadMessageCount = finalMessages.filter((m) => m.threadId === threadId).length;
614
+ const hasMore = perPageInput !== false && returnedThreadMessageCount < total && offset + perPage < total;
693
615
  return {
694
- messages: parsed,
695
- total: total + excludeIds.length,
616
+ messages: finalMessages,
617
+ total,
696
618
  page,
697
- perPage,
698
- hasMore: currentOffset + rows.length < total
619
+ perPage: perPageForResponse,
620
+ hasMore
699
621
  };
700
622
  } catch (error$1) {
701
623
  const mastraError = new error.MastraError(
702
624
  {
703
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
625
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
704
626
  domain: error.ErrorDomain.STORAGE,
705
627
  category: error.ErrorCategory.THIRD_PARTY,
706
628
  details: {
707
629
  threadId,
708
- resourceId: resourceId ?? "",
709
- page
630
+ resourceId: resourceId ?? ""
710
631
  }
711
632
  },
712
633
  error$1
713
634
  );
714
635
  this.logger?.error?.(mastraError.toString());
715
- this.logger?.trackException(mastraError);
716
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
636
+ this.logger?.trackException?.(mastraError);
637
+ return {
638
+ messages: [],
639
+ total: 0,
640
+ page,
641
+ perPage: perPageForResponse,
642
+ hasMore: false
643
+ };
717
644
  }
718
645
  }
719
- async saveMessages({
720
- messages,
721
- format
722
- }) {
723
- if (messages.length === 0) return messages;
646
+ async saveMessages({ messages }) {
647
+ if (messages.length === 0) return { messages: [] };
724
648
  const threadId = messages[0]?.threadId;
725
649
  if (!threadId) {
726
650
  throw new error.MastraError({
@@ -802,8 +726,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
802
726
  return message;
803
727
  });
804
728
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
805
- if (format === "v2") return list.get.all.v2();
806
- return list.get.all.v1();
729
+ return { messages: list.get.all.db() };
807
730
  } catch (error$1) {
808
731
  throw new error.MastraError(
809
732
  {
@@ -979,8 +902,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
979
902
  return null;
980
903
  }
981
904
  return {
982
- ...result,
983
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
905
+ id: result.id,
906
+ createdAt: result.createdAt,
907
+ updatedAt: result.updatedAt,
908
+ workingMemory: result.workingMemory,
984
909
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
985
910
  };
986
911
  } catch (error$1) {
@@ -994,7 +919,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
994
919
  error$1
995
920
  );
996
921
  this.logger?.error?.(mastraError.toString());
997
- this.logger?.trackException(mastraError);
922
+ this.logger?.trackException?.(mastraError);
998
923
  throw mastraError;
999
924
  }
1000
925
  }
@@ -1003,7 +928,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1003
928
  tableName: storage.TABLE_RESOURCES,
1004
929
  record: {
1005
930
  ...resource,
1006
- metadata: JSON.stringify(resource.metadata)
931
+ metadata: resource.metadata
1007
932
  }
1008
933
  });
1009
934
  return resource;
@@ -1061,72 +986,391 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1061
986
  error$1
1062
987
  );
1063
988
  this.logger?.error?.(mastraError.toString());
1064
- this.logger?.trackException(mastraError);
989
+ this.logger?.trackException?.(mastraError);
1065
990
  throw mastraError;
1066
991
  }
1067
992
  }
1068
993
  };
1069
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
994
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1070
995
  pool;
1071
- schemaName;
1072
- setupSchemaPromise = null;
1073
- schemaSetupComplete = void 0;
1074
- getSqlType(type, isPrimaryKey = false) {
1075
- switch (type) {
1076
- case "text":
1077
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1078
- case "timestamp":
1079
- return "DATETIME2(7)";
1080
- case "uuid":
1081
- return "UNIQUEIDENTIFIER";
1082
- case "jsonb":
1083
- return "NVARCHAR(MAX)";
1084
- case "integer":
1085
- return "INT";
1086
- case "bigint":
1087
- return "BIGINT";
1088
- case "float":
1089
- return "FLOAT";
1090
- default:
1091
- throw new error.MastraError({
1092
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1093
- domain: error.ErrorDomain.STORAGE,
1094
- category: error.ErrorCategory.THIRD_PARTY
1095
- });
1096
- }
1097
- }
1098
- constructor({ pool, schemaName }) {
996
+ operations;
997
+ schema;
998
+ constructor({
999
+ pool,
1000
+ operations,
1001
+ schema
1002
+ }) {
1099
1003
  super();
1100
1004
  this.pool = pool;
1101
- this.schemaName = schemaName;
1005
+ this.operations = operations;
1006
+ this.schema = schema;
1102
1007
  }
1103
- async hasColumn(table, column) {
1104
- const schema = this.schemaName || "dbo";
1105
- const request = this.pool.request();
1106
- request.input("schema", schema);
1107
- request.input("table", table);
1108
- request.input("column", column);
1109
- request.input("columnLower", column.toLowerCase());
1110
- const result = await request.query(
1111
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1112
- );
1113
- return result.recordset.length > 0;
1008
+ get tracingStrategy() {
1009
+ return {
1010
+ preferred: "batch-with-updates",
1011
+ supported: ["batch-with-updates", "insert-only"]
1012
+ };
1114
1013
  }
1115
- async setupSchema() {
1116
- if (!this.schemaName || this.schemaSetupComplete) {
1117
- return;
1014
+ async createSpan(span) {
1015
+ try {
1016
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
1017
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
1018
+ const record = {
1019
+ ...span,
1020
+ startedAt,
1021
+ endedAt
1022
+ // Note: createdAt/updatedAt will be set by default values
1023
+ };
1024
+ return this.operations.insert({ tableName: storage.TABLE_SPANS, record });
1025
+ } catch (error$1) {
1026
+ throw new error.MastraError(
1027
+ {
1028
+ id: "MSSQL_STORE_CREATE_SPAN_FAILED",
1029
+ domain: error.ErrorDomain.STORAGE,
1030
+ category: error.ErrorCategory.USER,
1031
+ details: {
1032
+ spanId: span.spanId,
1033
+ traceId: span.traceId,
1034
+ spanType: span.spanType,
1035
+ spanName: span.name
1036
+ }
1037
+ },
1038
+ error$1
1039
+ );
1118
1040
  }
1119
- if (!this.setupSchemaPromise) {
1120
- this.setupSchemaPromise = (async () => {
1121
- try {
1122
- const checkRequest = this.pool.request();
1123
- checkRequest.input("schemaName", this.schemaName);
1124
- const checkResult = await checkRequest.query(`
1125
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1126
- `);
1127
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1128
- if (!schemaExists) {
1129
- try {
1041
+ }
1042
+ async getTrace(traceId) {
1043
+ try {
1044
+ const tableName = getTableName({
1045
+ indexName: storage.TABLE_SPANS,
1046
+ schemaName: getSchemaName(this.schema)
1047
+ });
1048
+ const request = this.pool.request();
1049
+ request.input("traceId", traceId);
1050
+ const result = await request.query(
1051
+ `SELECT
1052
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1053
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1054
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1055
+ FROM ${tableName}
1056
+ WHERE [traceId] = @traceId
1057
+ ORDER BY [startedAt] DESC`
1058
+ );
1059
+ if (!result.recordset || result.recordset.length === 0) {
1060
+ return null;
1061
+ }
1062
+ return {
1063
+ traceId,
1064
+ spans: result.recordset.map(
1065
+ (span) => transformFromSqlRow({
1066
+ tableName: storage.TABLE_SPANS,
1067
+ sqlRow: span
1068
+ })
1069
+ )
1070
+ };
1071
+ } catch (error$1) {
1072
+ throw new error.MastraError(
1073
+ {
1074
+ id: "MSSQL_STORE_GET_TRACE_FAILED",
1075
+ domain: error.ErrorDomain.STORAGE,
1076
+ category: error.ErrorCategory.USER,
1077
+ details: {
1078
+ traceId
1079
+ }
1080
+ },
1081
+ error$1
1082
+ );
1083
+ }
1084
+ }
1085
+ async updateSpan({
1086
+ spanId,
1087
+ traceId,
1088
+ updates
1089
+ }) {
1090
+ try {
1091
+ const data = { ...updates };
1092
+ if (data.endedAt instanceof Date) {
1093
+ data.endedAt = data.endedAt.toISOString();
1094
+ }
1095
+ if (data.startedAt instanceof Date) {
1096
+ data.startedAt = data.startedAt.toISOString();
1097
+ }
1098
+ await this.operations.update({
1099
+ tableName: storage.TABLE_SPANS,
1100
+ keys: { spanId, traceId },
1101
+ data
1102
+ });
1103
+ } catch (error$1) {
1104
+ throw new error.MastraError(
1105
+ {
1106
+ id: "MSSQL_STORE_UPDATE_SPAN_FAILED",
1107
+ domain: error.ErrorDomain.STORAGE,
1108
+ category: error.ErrorCategory.USER,
1109
+ details: {
1110
+ spanId,
1111
+ traceId
1112
+ }
1113
+ },
1114
+ error$1
1115
+ );
1116
+ }
1117
+ }
1118
+ async getTracesPaginated({
1119
+ filters,
1120
+ pagination
1121
+ }) {
1122
+ const page = pagination?.page ?? 0;
1123
+ const perPage = pagination?.perPage ?? 10;
1124
+ const { entityId, entityType, ...actualFilters } = filters || {};
1125
+ const filtersWithDateRange = {
1126
+ ...actualFilters,
1127
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1128
+ parentSpanId: null
1129
+ // Only get root spans for traces
1130
+ };
1131
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1132
+ let actualWhereClause = whereClause.sql;
1133
+ const params = { ...whereClause.params };
1134
+ let currentParamIndex = Object.keys(params).length + 1;
1135
+ if (entityId && entityType) {
1136
+ let name = "";
1137
+ if (entityType === "workflow") {
1138
+ name = `workflow run: '${entityId}'`;
1139
+ } else if (entityType === "agent") {
1140
+ name = `agent run: '${entityId}'`;
1141
+ } else {
1142
+ const error$1 = new error.MastraError({
1143
+ id: "MSSQL_STORE_GET_TRACES_PAGINATED_FAILED",
1144
+ domain: error.ErrorDomain.STORAGE,
1145
+ category: error.ErrorCategory.USER,
1146
+ details: {
1147
+ entityType
1148
+ },
1149
+ text: `Cannot filter by entity type: ${entityType}`
1150
+ });
1151
+ throw error$1;
1152
+ }
1153
+ const entityParam = `p${currentParamIndex++}`;
1154
+ if (actualWhereClause) {
1155
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1156
+ } else {
1157
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1158
+ }
1159
+ params[entityParam] = name;
1160
+ }
1161
+ const tableName = getTableName({
1162
+ indexName: storage.TABLE_SPANS,
1163
+ schemaName: getSchemaName(this.schema)
1164
+ });
1165
+ try {
1166
+ const countRequest = this.pool.request();
1167
+ Object.entries(params).forEach(([key, value]) => {
1168
+ countRequest.input(key, value);
1169
+ });
1170
+ const countResult = await countRequest.query(
1171
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1172
+ );
1173
+ const total = countResult.recordset[0]?.count ?? 0;
1174
+ if (total === 0) {
1175
+ return {
1176
+ pagination: {
1177
+ total: 0,
1178
+ page,
1179
+ perPage,
1180
+ hasMore: false
1181
+ },
1182
+ spans: []
1183
+ };
1184
+ }
1185
+ const dataRequest = this.pool.request();
1186
+ Object.entries(params).forEach(([key, value]) => {
1187
+ dataRequest.input(key, value);
1188
+ });
1189
+ dataRequest.input("offset", page * perPage);
1190
+ dataRequest.input("limit", perPage);
1191
+ const dataResult = await dataRequest.query(
1192
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1193
+ );
1194
+ const spans = dataResult.recordset.map(
1195
+ (row) => transformFromSqlRow({
1196
+ tableName: storage.TABLE_SPANS,
1197
+ sqlRow: row
1198
+ })
1199
+ );
1200
+ return {
1201
+ pagination: {
1202
+ total,
1203
+ page,
1204
+ perPage,
1205
+ hasMore: (page + 1) * perPage < total
1206
+ },
1207
+ spans
1208
+ };
1209
+ } catch (error$1) {
1210
+ throw new error.MastraError(
1211
+ {
1212
+ id: "MSSQL_STORE_GET_TRACES_PAGINATED_FAILED",
1213
+ domain: error.ErrorDomain.STORAGE,
1214
+ category: error.ErrorCategory.USER
1215
+ },
1216
+ error$1
1217
+ );
1218
+ }
1219
+ }
1220
+ async batchCreateSpans(args) {
1221
+ if (!args.records || args.records.length === 0) {
1222
+ return;
1223
+ }
1224
+ try {
1225
+ await this.operations.batchInsert({
1226
+ tableName: storage.TABLE_SPANS,
1227
+ records: args.records.map((span) => ({
1228
+ ...span,
1229
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1230
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1231
+ }))
1232
+ });
1233
+ } catch (error$1) {
1234
+ throw new error.MastraError(
1235
+ {
1236
+ id: "MSSQL_STORE_BATCH_CREATE_SPANS_FAILED",
1237
+ domain: error.ErrorDomain.STORAGE,
1238
+ category: error.ErrorCategory.USER,
1239
+ details: {
1240
+ count: args.records.length
1241
+ }
1242
+ },
1243
+ error$1
1244
+ );
1245
+ }
1246
+ }
1247
+ async batchUpdateSpans(args) {
1248
+ if (!args.records || args.records.length === 0) {
1249
+ return;
1250
+ }
1251
+ try {
1252
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1253
+ const processedData = { ...data };
1254
+ if (processedData.endedAt instanceof Date) {
1255
+ processedData.endedAt = processedData.endedAt.toISOString();
1256
+ }
1257
+ if (processedData.startedAt instanceof Date) {
1258
+ processedData.startedAt = processedData.startedAt.toISOString();
1259
+ }
1260
+ return {
1261
+ keys: { spanId, traceId },
1262
+ data: processedData
1263
+ };
1264
+ });
1265
+ await this.operations.batchUpdate({
1266
+ tableName: storage.TABLE_SPANS,
1267
+ updates
1268
+ });
1269
+ } catch (error$1) {
1270
+ throw new error.MastraError(
1271
+ {
1272
+ id: "MSSQL_STORE_BATCH_UPDATE_SPANS_FAILED",
1273
+ domain: error.ErrorDomain.STORAGE,
1274
+ category: error.ErrorCategory.USER,
1275
+ details: {
1276
+ count: args.records.length
1277
+ }
1278
+ },
1279
+ error$1
1280
+ );
1281
+ }
1282
+ }
1283
+ async batchDeleteTraces(args) {
1284
+ if (!args.traceIds || args.traceIds.length === 0) {
1285
+ return;
1286
+ }
1287
+ try {
1288
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1289
+ await this.operations.batchDelete({
1290
+ tableName: storage.TABLE_SPANS,
1291
+ keys
1292
+ });
1293
+ } catch (error$1) {
1294
+ throw new error.MastraError(
1295
+ {
1296
+ id: "MSSQL_STORE_BATCH_DELETE_TRACES_FAILED",
1297
+ domain: error.ErrorDomain.STORAGE,
1298
+ category: error.ErrorCategory.USER,
1299
+ details: {
1300
+ count: args.traceIds.length
1301
+ }
1302
+ },
1303
+ error$1
1304
+ );
1305
+ }
1306
+ }
1307
+ };
1308
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1309
+ pool;
1310
+ schemaName;
1311
+ setupSchemaPromise = null;
1312
+ schemaSetupComplete = void 0;
1313
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1314
+ switch (type) {
1315
+ case "text":
1316
+ if (useLargeStorage) {
1317
+ return "NVARCHAR(MAX)";
1318
+ }
1319
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1320
+ case "timestamp":
1321
+ return "DATETIME2(7)";
1322
+ case "uuid":
1323
+ return "UNIQUEIDENTIFIER";
1324
+ case "jsonb":
1325
+ return "NVARCHAR(MAX)";
1326
+ case "integer":
1327
+ return "INT";
1328
+ case "bigint":
1329
+ return "BIGINT";
1330
+ case "float":
1331
+ return "FLOAT";
1332
+ case "boolean":
1333
+ return "BIT";
1334
+ default:
1335
+ throw new error.MastraError({
1336
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1337
+ domain: error.ErrorDomain.STORAGE,
1338
+ category: error.ErrorCategory.THIRD_PARTY
1339
+ });
1340
+ }
1341
+ }
1342
+ constructor({ pool, schemaName }) {
1343
+ super();
1344
+ this.pool = pool;
1345
+ this.schemaName = schemaName;
1346
+ }
1347
+ async hasColumn(table, column) {
1348
+ const schema = this.schemaName || "dbo";
1349
+ const request = this.pool.request();
1350
+ request.input("schema", schema);
1351
+ request.input("table", table);
1352
+ request.input("column", column);
1353
+ request.input("columnLower", column.toLowerCase());
1354
+ const result = await request.query(
1355
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1356
+ );
1357
+ return result.recordset.length > 0;
1358
+ }
1359
+ async setupSchema() {
1360
+ if (!this.schemaName || this.schemaSetupComplete) {
1361
+ return;
1362
+ }
1363
+ if (!this.setupSchemaPromise) {
1364
+ this.setupSchemaPromise = (async () => {
1365
+ try {
1366
+ const checkRequest = this.pool.request();
1367
+ checkRequest.input("schemaName", this.schemaName);
1368
+ const checkResult = await checkRequest.query(`
1369
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1370
+ `);
1371
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1372
+ if (!schemaExists) {
1373
+ try {
1130
1374
  await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1131
1375
  this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1132
1376
  } catch (error) {
@@ -1149,20 +1393,26 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1149
1393
  }
1150
1394
  await this.setupSchemaPromise;
1151
1395
  }
1152
- async insert({ tableName, record }) {
1396
+ async insert({
1397
+ tableName,
1398
+ record,
1399
+ transaction
1400
+ }) {
1153
1401
  try {
1154
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1155
- const values = Object.values(record);
1156
- const paramNames = values.map((_, i) => `@param${i}`);
1157
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1158
- const request = this.pool.request();
1159
- values.forEach((value, i) => {
1160
- if (value instanceof Date) {
1161
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1162
- } else if (typeof value === "object" && value !== null) {
1163
- request.input(`param${i}`, JSON.stringify(value));
1402
+ const columns = Object.keys(record);
1403
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1404
+ const paramNames = columns.map((_, i) => `@param${i}`);
1405
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1406
+ const request = transaction ? transaction.request() : this.pool.request();
1407
+ columns.forEach((col, i) => {
1408
+ const value = record[col];
1409
+ const preparedValue = this.prepareValue(value, col, tableName);
1410
+ if (preparedValue instanceof Date) {
1411
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1412
+ } else if (preparedValue === null || preparedValue === void 0) {
1413
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1164
1414
  } else {
1165
- request.input(`param${i}`, value);
1415
+ request.input(`param${i}`, preparedValue);
1166
1416
  }
1167
1417
  });
1168
1418
  await request.query(insertSql);
@@ -1186,7 +1436,7 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1186
1436
  try {
1187
1437
  await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1188
1438
  } catch (truncateError) {
1189
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1439
+ if (truncateError?.number === 4712) {
1190
1440
  await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1191
1441
  } else {
1192
1442
  throw truncateError;
@@ -1209,9 +1459,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1209
1459
  getDefaultValue(type) {
1210
1460
  switch (type) {
1211
1461
  case "timestamp":
1212
- return "DEFAULT SYSDATETIMEOFFSET()";
1462
+ return "DEFAULT SYSUTCDATETIME()";
1213
1463
  case "jsonb":
1214
1464
  return "DEFAULT N'{}'";
1465
+ case "boolean":
1466
+ return "DEFAULT 0";
1215
1467
  default:
1216
1468
  return super.getDefaultValue(type);
1217
1469
  }
@@ -1222,13 +1474,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1222
1474
  }) {
1223
1475
  try {
1224
1476
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1477
+ const largeDataColumns = [
1478
+ "workingMemory",
1479
+ "snapshot",
1480
+ "metadata",
1481
+ "content",
1482
+ // messages.content - can be very long conversation content
1483
+ "input",
1484
+ // evals.input - test input data
1485
+ "output",
1486
+ // evals.output - test output data
1487
+ "instructions",
1488
+ // evals.instructions - evaluation instructions
1489
+ "other"
1490
+ // traces.other - additional trace data
1491
+ ];
1225
1492
  const columns = Object.entries(schema).map(([name, def]) => {
1226
1493
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1227
1494
  const constraints = [];
1228
1495
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1229
1496
  if (!def.nullable) constraints.push("NOT NULL");
1230
1497
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1231
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1498
+ const useLargeStorage = largeDataColumns.includes(name);
1499
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1232
1500
  }).join(",\n");
1233
1501
  if (this.schemaName) {
1234
1502
  await this.setupSchema();
@@ -1315,7 +1583,19 @@ ${columns}
1315
1583
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1316
1584
  if (!columnExists) {
1317
1585
  const columnDef = schema[columnName];
1318
- const sqlType = this.getSqlType(columnDef.type);
1586
+ const largeDataColumns = [
1587
+ "workingMemory",
1588
+ "snapshot",
1589
+ "metadata",
1590
+ "content",
1591
+ "input",
1592
+ "output",
1593
+ "instructions",
1594
+ "other"
1595
+ ];
1596
+ const useLargeStorage = largeDataColumns.includes(columnName);
1597
+ const isIndexed = !!columnDef.primaryKey;
1598
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1319
1599
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1320
1600
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1321
1601
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1328,118 +1608,672 @@ ${columns}
1328
1608
  } catch (error$1) {
1329
1609
  throw new error.MastraError(
1330
1610
  {
1331
- id: "MASTRA_STORAGE_MSSQL_STORE_ALTER_TABLE_FAILED",
1611
+ id: "MASTRA_STORAGE_MSSQL_STORE_ALTER_TABLE_FAILED",
1612
+ domain: error.ErrorDomain.STORAGE,
1613
+ category: error.ErrorCategory.THIRD_PARTY,
1614
+ details: {
1615
+ tableName
1616
+ }
1617
+ },
1618
+ error$1
1619
+ );
1620
+ }
1621
+ }
1622
+ async load({ tableName, keys }) {
1623
+ try {
1624
+ const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1625
+ const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1626
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1627
+ const request = this.pool.request();
1628
+ keyEntries.forEach(([key, value], i) => {
1629
+ const preparedValue = this.prepareValue(value, key, tableName);
1630
+ if (preparedValue === null || preparedValue === void 0) {
1631
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1632
+ } else {
1633
+ request.input(`param${i}`, preparedValue);
1634
+ }
1635
+ });
1636
+ const resultSet = await request.query(sql5);
1637
+ const result = resultSet.recordset[0] || null;
1638
+ if (!result) {
1639
+ return null;
1640
+ }
1641
+ if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1642
+ const snapshot = result;
1643
+ if (typeof snapshot.snapshot === "string") {
1644
+ snapshot.snapshot = JSON.parse(snapshot.snapshot);
1645
+ }
1646
+ return snapshot;
1647
+ }
1648
+ return result;
1649
+ } catch (error$1) {
1650
+ throw new error.MastraError(
1651
+ {
1652
+ id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_FAILED",
1653
+ domain: error.ErrorDomain.STORAGE,
1654
+ category: error.ErrorCategory.THIRD_PARTY,
1655
+ details: {
1656
+ tableName
1657
+ }
1658
+ },
1659
+ error$1
1660
+ );
1661
+ }
1662
+ }
1663
+ async batchInsert({ tableName, records }) {
1664
+ const transaction = this.pool.transaction();
1665
+ try {
1666
+ await transaction.begin();
1667
+ for (const record of records) {
1668
+ await this.insert({ tableName, record, transaction });
1669
+ }
1670
+ await transaction.commit();
1671
+ } catch (error$1) {
1672
+ await transaction.rollback();
1673
+ throw new error.MastraError(
1674
+ {
1675
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
1676
+ domain: error.ErrorDomain.STORAGE,
1677
+ category: error.ErrorCategory.THIRD_PARTY,
1678
+ details: {
1679
+ tableName,
1680
+ numberOfRecords: records.length
1681
+ }
1682
+ },
1683
+ error$1
1684
+ );
1685
+ }
1686
+ }
1687
+ async dropTable({ tableName }) {
1688
+ try {
1689
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1690
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1691
+ } catch (error$1) {
1692
+ throw new error.MastraError(
1693
+ {
1694
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1695
+ domain: error.ErrorDomain.STORAGE,
1696
+ category: error.ErrorCategory.THIRD_PARTY,
1697
+ details: {
1698
+ tableName
1699
+ }
1700
+ },
1701
+ error$1
1702
+ );
1703
+ }
1704
+ }
1705
+ /**
1706
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1707
+ */
1708
+ prepareValue(value, columnName, tableName) {
1709
+ if (value === null || value === void 0) {
1710
+ return value;
1711
+ }
1712
+ if (value instanceof Date) {
1713
+ return value;
1714
+ }
1715
+ const schema = storage.TABLE_SCHEMAS[tableName];
1716
+ const columnSchema = schema?.[columnName];
1717
+ if (columnSchema?.type === "boolean") {
1718
+ return value ? 1 : 0;
1719
+ }
1720
+ if (columnSchema?.type === "jsonb") {
1721
+ if (typeof value === "string") {
1722
+ const trimmed = value.trim();
1723
+ if (trimmed.length > 0) {
1724
+ try {
1725
+ JSON.parse(trimmed);
1726
+ return trimmed;
1727
+ } catch {
1728
+ }
1729
+ }
1730
+ return JSON.stringify(value);
1731
+ }
1732
+ if (typeof value === "bigint") {
1733
+ return value.toString();
1734
+ }
1735
+ return JSON.stringify(value);
1736
+ }
1737
+ if (typeof value === "object") {
1738
+ return JSON.stringify(value);
1739
+ }
1740
+ return value;
1741
+ }
1742
+ /**
1743
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1744
+ */
1745
+ getMssqlType(tableName, columnName) {
1746
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1747
+ switch (col?.type) {
1748
+ case "text":
1749
+ return sql2__default.default.NVarChar;
1750
+ case "timestamp":
1751
+ return sql2__default.default.DateTime2;
1752
+ case "uuid":
1753
+ return sql2__default.default.UniqueIdentifier;
1754
+ case "jsonb":
1755
+ return sql2__default.default.NVarChar;
1756
+ case "integer":
1757
+ return sql2__default.default.Int;
1758
+ case "bigint":
1759
+ return sql2__default.default.BigInt;
1760
+ case "float":
1761
+ return sql2__default.default.Float;
1762
+ case "boolean":
1763
+ return sql2__default.default.Bit;
1764
+ default:
1765
+ return sql2__default.default.NVarChar;
1766
+ }
1767
+ }
1768
+ /**
1769
+ * Update a single record in the database
1770
+ */
1771
+ async update({
1772
+ tableName,
1773
+ keys,
1774
+ data,
1775
+ transaction
1776
+ }) {
1777
+ try {
1778
+ if (!data || Object.keys(data).length === 0) {
1779
+ throw new error.MastraError({
1780
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1781
+ domain: error.ErrorDomain.STORAGE,
1782
+ category: error.ErrorCategory.USER,
1783
+ text: "Cannot update with empty data payload"
1784
+ });
1785
+ }
1786
+ if (!keys || Object.keys(keys).length === 0) {
1787
+ throw new error.MastraError({
1788
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1789
+ domain: error.ErrorDomain.STORAGE,
1790
+ category: error.ErrorCategory.USER,
1791
+ text: "Cannot update without keys to identify records"
1792
+ });
1793
+ }
1794
+ const setClauses = [];
1795
+ const request = transaction ? transaction.request() : this.pool.request();
1796
+ let paramIndex = 0;
1797
+ Object.entries(data).forEach(([key, value]) => {
1798
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1799
+ const paramName = `set${paramIndex++}`;
1800
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1801
+ const preparedValue = this.prepareValue(value, key, tableName);
1802
+ if (preparedValue === null || preparedValue === void 0) {
1803
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1804
+ } else {
1805
+ request.input(paramName, preparedValue);
1806
+ }
1807
+ });
1808
+ const whereConditions = [];
1809
+ Object.entries(keys).forEach(([key, value]) => {
1810
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1811
+ const paramName = `where${paramIndex++}`;
1812
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1813
+ const preparedValue = this.prepareValue(value, key, tableName);
1814
+ if (preparedValue === null || preparedValue === void 0) {
1815
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1816
+ } else {
1817
+ request.input(paramName, preparedValue);
1818
+ }
1819
+ });
1820
+ const tableName_ = getTableName({
1821
+ indexName: tableName,
1822
+ schemaName: getSchemaName(this.schemaName)
1823
+ });
1824
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1825
+ await request.query(updateSql);
1826
+ } catch (error$1) {
1827
+ throw new error.MastraError(
1828
+ {
1829
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1830
+ domain: error.ErrorDomain.STORAGE,
1831
+ category: error.ErrorCategory.THIRD_PARTY,
1832
+ details: {
1833
+ tableName
1834
+ }
1835
+ },
1836
+ error$1
1837
+ );
1838
+ }
1839
+ }
1840
+ /**
1841
+ * Update multiple records in a single batch transaction
1842
+ */
1843
+ async batchUpdate({
1844
+ tableName,
1845
+ updates
1846
+ }) {
1847
+ const transaction = this.pool.transaction();
1848
+ try {
1849
+ await transaction.begin();
1850
+ for (const { keys, data } of updates) {
1851
+ await this.update({ tableName, keys, data, transaction });
1852
+ }
1853
+ await transaction.commit();
1854
+ } catch (error$1) {
1855
+ await transaction.rollback();
1856
+ throw new error.MastraError(
1857
+ {
1858
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1859
+ domain: error.ErrorDomain.STORAGE,
1860
+ category: error.ErrorCategory.THIRD_PARTY,
1861
+ details: {
1862
+ tableName,
1863
+ numberOfRecords: updates.length
1864
+ }
1865
+ },
1866
+ error$1
1867
+ );
1868
+ }
1869
+ }
1870
+ /**
1871
+ * Delete multiple records by keys
1872
+ */
1873
+ async batchDelete({ tableName, keys }) {
1874
+ if (keys.length === 0) {
1875
+ return;
1876
+ }
1877
+ const tableName_ = getTableName({
1878
+ indexName: tableName,
1879
+ schemaName: getSchemaName(this.schemaName)
1880
+ });
1881
+ const transaction = this.pool.transaction();
1882
+ try {
1883
+ await transaction.begin();
1884
+ for (const keySet of keys) {
1885
+ const conditions = [];
1886
+ const request = transaction.request();
1887
+ let paramIndex = 0;
1888
+ Object.entries(keySet).forEach(([key, value]) => {
1889
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1890
+ const paramName = `p${paramIndex++}`;
1891
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1892
+ const preparedValue = this.prepareValue(value, key, tableName);
1893
+ if (preparedValue === null || preparedValue === void 0) {
1894
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1895
+ } else {
1896
+ request.input(paramName, preparedValue);
1897
+ }
1898
+ });
1899
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1900
+ await request.query(deleteSql);
1901
+ }
1902
+ await transaction.commit();
1903
+ } catch (error$1) {
1904
+ await transaction.rollback();
1905
+ throw new error.MastraError(
1906
+ {
1907
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1908
+ domain: error.ErrorDomain.STORAGE,
1909
+ category: error.ErrorCategory.THIRD_PARTY,
1910
+ details: {
1911
+ tableName,
1912
+ numberOfRecords: keys.length
1913
+ }
1914
+ },
1915
+ error$1
1916
+ );
1917
+ }
1918
+ }
1919
+ /**
1920
+ * Create a new index on a table
1921
+ */
1922
+ async createIndex(options) {
1923
+ try {
1924
+ const { name, table, columns, unique = false, where } = options;
1925
+ const schemaName = this.schemaName || "dbo";
1926
+ const fullTableName = getTableName({
1927
+ indexName: table,
1928
+ schemaName: getSchemaName(this.schemaName)
1929
+ });
1930
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1931
+ const checkRequest = this.pool.request();
1932
+ checkRequest.input("indexName", indexNameSafe);
1933
+ checkRequest.input("schemaName", schemaName);
1934
+ checkRequest.input("tableName", table);
1935
+ const indexExists = await checkRequest.query(`
1936
+ SELECT 1 as found
1937
+ FROM sys.indexes i
1938
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1939
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1940
+ WHERE i.name = @indexName
1941
+ AND s.name = @schemaName
1942
+ AND t.name = @tableName
1943
+ `);
1944
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1945
+ return;
1946
+ }
1947
+ const uniqueStr = unique ? "UNIQUE " : "";
1948
+ const columnsStr = columns.map((col) => {
1949
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1950
+ const [colName, ...modifiers] = col.split(" ");
1951
+ if (!colName) {
1952
+ throw new Error(`Invalid column specification: ${col}`);
1953
+ }
1954
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1955
+ }
1956
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1957
+ }).join(", ");
1958
+ const whereStr = where ? ` WHERE ${where}` : "";
1959
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1960
+ await this.pool.request().query(createIndexSql);
1961
+ } catch (error$1) {
1962
+ throw new error.MastraError(
1963
+ {
1964
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1965
+ domain: error.ErrorDomain.STORAGE,
1966
+ category: error.ErrorCategory.THIRD_PARTY,
1967
+ details: {
1968
+ indexName: options.name,
1969
+ tableName: options.table
1970
+ }
1971
+ },
1972
+ error$1
1973
+ );
1974
+ }
1975
+ }
1976
+ /**
1977
+ * Drop an existing index
1978
+ */
1979
+ async dropIndex(indexName) {
1980
+ try {
1981
+ const schemaName = this.schemaName || "dbo";
1982
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
1983
+ const checkRequest = this.pool.request();
1984
+ checkRequest.input("indexName", indexNameSafe);
1985
+ checkRequest.input("schemaName", schemaName);
1986
+ const result = await checkRequest.query(`
1987
+ SELECT t.name as table_name
1988
+ FROM sys.indexes i
1989
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1990
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1991
+ WHERE i.name = @indexName
1992
+ AND s.name = @schemaName
1993
+ `);
1994
+ if (!result.recordset || result.recordset.length === 0) {
1995
+ return;
1996
+ }
1997
+ if (result.recordset.length > 1) {
1998
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
1999
+ throw new error.MastraError({
2000
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
2001
+ domain: error.ErrorDomain.STORAGE,
2002
+ category: error.ErrorCategory.USER,
2003
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
2004
+ });
2005
+ }
2006
+ const tableName = result.recordset[0].table_name;
2007
+ const fullTableName = getTableName({
2008
+ indexName: tableName,
2009
+ schemaName: getSchemaName(this.schemaName)
2010
+ });
2011
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
2012
+ await this.pool.request().query(dropSql);
2013
+ } catch (error$1) {
2014
+ throw new error.MastraError(
2015
+ {
2016
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
1332
2017
  domain: error.ErrorDomain.STORAGE,
1333
2018
  category: error.ErrorCategory.THIRD_PARTY,
1334
2019
  details: {
1335
- tableName
2020
+ indexName
1336
2021
  }
1337
2022
  },
1338
2023
  error$1
1339
2024
  );
1340
2025
  }
1341
2026
  }
1342
- async load({ tableName, keys }) {
2027
+ /**
2028
+ * List indexes for a specific table or all tables
2029
+ */
2030
+ async listIndexes(tableName) {
1343
2031
  try {
1344
- const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1345
- const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1346
- const values = keyEntries.map(([_, value]) => value);
1347
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
2032
+ const schemaName = this.schemaName || "dbo";
2033
+ let query;
1348
2034
  const request = this.pool.request();
1349
- values.forEach((value, i) => {
1350
- request.input(`param${i}`, value);
1351
- });
1352
- const resultSet = await request.query(sql7);
1353
- const result = resultSet.recordset[0] || null;
1354
- if (!result) {
1355
- return null;
2035
+ request.input("schemaName", schemaName);
2036
+ if (tableName) {
2037
+ query = `
2038
+ SELECT
2039
+ i.name as name,
2040
+ o.name as [table],
2041
+ i.is_unique as is_unique,
2042
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2043
+ FROM sys.indexes i
2044
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2045
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2046
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2047
+ WHERE sch.name = @schemaName
2048
+ AND o.name = @tableName
2049
+ AND i.name IS NOT NULL
2050
+ GROUP BY i.name, o.name, i.is_unique
2051
+ `;
2052
+ request.input("tableName", tableName);
2053
+ } else {
2054
+ query = `
2055
+ SELECT
2056
+ i.name as name,
2057
+ o.name as [table],
2058
+ i.is_unique as is_unique,
2059
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2060
+ FROM sys.indexes i
2061
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2062
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2063
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2064
+ WHERE sch.name = @schemaName
2065
+ AND i.name IS NOT NULL
2066
+ GROUP BY i.name, o.name, i.is_unique
2067
+ `;
1356
2068
  }
1357
- if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1358
- const snapshot = result;
1359
- if (typeof snapshot.snapshot === "string") {
1360
- snapshot.snapshot = JSON.parse(snapshot.snapshot);
1361
- }
1362
- return snapshot;
2069
+ const result = await request.query(query);
2070
+ const indexes = [];
2071
+ for (const row of result.recordset) {
2072
+ const colRequest = this.pool.request();
2073
+ colRequest.input("indexName", row.name);
2074
+ colRequest.input("schemaName", schemaName);
2075
+ const colResult = await colRequest.query(`
2076
+ SELECT c.name as column_name
2077
+ FROM sys.indexes i
2078
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2079
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2080
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2081
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2082
+ WHERE i.name = @indexName
2083
+ AND s.name = @schemaName
2084
+ ORDER BY ic.key_ordinal
2085
+ `);
2086
+ indexes.push({
2087
+ name: row.name,
2088
+ table: row.table,
2089
+ columns: colResult.recordset.map((c) => c.column_name),
2090
+ unique: row.is_unique || false,
2091
+ size: row.size || "0 MB",
2092
+ definition: ""
2093
+ // MSSQL doesn't store definition like PG
2094
+ });
1363
2095
  }
1364
- return result;
2096
+ return indexes;
1365
2097
  } catch (error$1) {
1366
2098
  throw new error.MastraError(
1367
2099
  {
1368
- id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_FAILED",
2100
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
1369
2101
  domain: error.ErrorDomain.STORAGE,
1370
2102
  category: error.ErrorCategory.THIRD_PARTY,
1371
- details: {
2103
+ details: tableName ? {
1372
2104
  tableName
1373
- }
2105
+ } : {}
1374
2106
  },
1375
2107
  error$1
1376
2108
  );
1377
2109
  }
1378
2110
  }
1379
- async batchInsert({ tableName, records }) {
1380
- const transaction = this.pool.transaction();
2111
+ /**
2112
+ * Get detailed statistics for a specific index
2113
+ */
2114
+ async describeIndex(indexName) {
1381
2115
  try {
1382
- await transaction.begin();
1383
- for (const record of records) {
1384
- await this.insert({ tableName, record });
2116
+ const schemaName = this.schemaName || "dbo";
2117
+ const request = this.pool.request();
2118
+ request.input("indexName", indexName);
2119
+ request.input("schemaName", schemaName);
2120
+ const query = `
2121
+ SELECT
2122
+ i.name as name,
2123
+ o.name as [table],
2124
+ i.is_unique as is_unique,
2125
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2126
+ i.type_desc as method,
2127
+ ISNULL(us.user_scans, 0) as scans,
2128
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2129
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2130
+ FROM sys.indexes i
2131
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2132
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2133
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2134
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2135
+ WHERE i.name = @indexName
2136
+ AND sch.name = @schemaName
2137
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2138
+ `;
2139
+ const result = await request.query(query);
2140
+ if (!result.recordset || result.recordset.length === 0) {
2141
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
1385
2142
  }
1386
- await transaction.commit();
2143
+ const row = result.recordset[0];
2144
+ const colRequest = this.pool.request();
2145
+ colRequest.input("indexName", indexName);
2146
+ colRequest.input("schemaName", schemaName);
2147
+ const colResult = await colRequest.query(`
2148
+ SELECT c.name as column_name
2149
+ FROM sys.indexes i
2150
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2151
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2152
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2153
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2154
+ WHERE i.name = @indexName
2155
+ AND s.name = @schemaName
2156
+ ORDER BY ic.key_ordinal
2157
+ `);
2158
+ return {
2159
+ name: row.name,
2160
+ table: row.table,
2161
+ columns: colResult.recordset.map((c) => c.column_name),
2162
+ unique: row.is_unique || false,
2163
+ size: row.size || "0 MB",
2164
+ definition: "",
2165
+ method: row.method?.toLowerCase() || "nonclustered",
2166
+ scans: Number(row.scans) || 0,
2167
+ tuples_read: Number(row.tuples_read) || 0,
2168
+ tuples_fetched: Number(row.tuples_fetched) || 0
2169
+ };
1387
2170
  } catch (error$1) {
1388
- await transaction.rollback();
1389
2171
  throw new error.MastraError(
1390
2172
  {
1391
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
2173
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
1392
2174
  domain: error.ErrorDomain.STORAGE,
1393
2175
  category: error.ErrorCategory.THIRD_PARTY,
1394
2176
  details: {
1395
- tableName,
1396
- numberOfRecords: records.length
2177
+ indexName
1397
2178
  }
1398
2179
  },
1399
2180
  error$1
1400
2181
  );
1401
2182
  }
1402
2183
  }
1403
- async dropTable({ tableName }) {
2184
+ /**
2185
+ * Returns definitions for automatic performance indexes
2186
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2187
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2188
+ */
2189
+ getAutomaticIndexDefinitions() {
2190
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2191
+ return [
2192
+ // Composite indexes for optimal filtering + sorting performance
2193
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2194
+ {
2195
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2196
+ table: storage.TABLE_THREADS,
2197
+ columns: ["resourceId", "seq_id DESC"]
2198
+ },
2199
+ {
2200
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2201
+ table: storage.TABLE_MESSAGES,
2202
+ columns: ["thread_id", "seq_id DESC"]
2203
+ },
2204
+ {
2205
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2206
+ table: storage.TABLE_TRACES,
2207
+ columns: ["name", "seq_id DESC"]
2208
+ },
2209
+ {
2210
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2211
+ table: storage.TABLE_SCORERS,
2212
+ columns: ["traceId", "spanId", "seq_id DESC"]
2213
+ },
2214
+ // Spans indexes for optimal trace querying
2215
+ {
2216
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2217
+ table: storage.TABLE_SPANS,
2218
+ columns: ["traceId", "startedAt DESC"]
2219
+ },
2220
+ {
2221
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2222
+ table: storage.TABLE_SPANS,
2223
+ columns: ["parentSpanId", "startedAt DESC"]
2224
+ },
2225
+ {
2226
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2227
+ table: storage.TABLE_SPANS,
2228
+ columns: ["name"]
2229
+ },
2230
+ {
2231
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2232
+ table: storage.TABLE_SPANS,
2233
+ columns: ["spanType", "startedAt DESC"]
2234
+ }
2235
+ ];
2236
+ }
2237
+ /**
2238
+ * Creates automatic indexes for optimal query performance
2239
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2240
+ */
2241
+ async createAutomaticIndexes() {
1404
2242
  try {
1405
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1406
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2243
+ const indexes = this.getAutomaticIndexDefinitions();
2244
+ for (const indexOptions of indexes) {
2245
+ try {
2246
+ await this.createIndex(indexOptions);
2247
+ } catch (error) {
2248
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2249
+ }
2250
+ }
1407
2251
  } catch (error$1) {
1408
2252
  throw new error.MastraError(
1409
2253
  {
1410
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2254
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1411
2255
  domain: error.ErrorDomain.STORAGE,
1412
- category: error.ErrorCategory.THIRD_PARTY,
1413
- details: {
1414
- tableName
1415
- }
2256
+ category: error.ErrorCategory.THIRD_PARTY
1416
2257
  },
1417
2258
  error$1
1418
2259
  );
1419
2260
  }
1420
2261
  }
1421
2262
  };
1422
- function parseJSON(jsonString) {
1423
- try {
1424
- return JSON.parse(jsonString);
1425
- } catch {
1426
- return jsonString;
1427
- }
1428
- }
1429
2263
  function transformScoreRow(row) {
1430
2264
  return {
1431
2265
  ...row,
1432
- input: parseJSON(row.input),
1433
- scorer: parseJSON(row.scorer),
1434
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1435
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1436
- metadata: parseJSON(row.metadata),
1437
- output: parseJSON(row.output),
1438
- additionalContext: parseJSON(row.additionalContext),
1439
- runtimeContext: parseJSON(row.runtimeContext),
1440
- entity: parseJSON(row.entity),
1441
- createdAt: row.createdAt,
1442
- updatedAt: row.updatedAt
2266
+ input: storage.safelyParseJSON(row.input),
2267
+ scorer: storage.safelyParseJSON(row.scorer),
2268
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2269
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2270
+ metadata: storage.safelyParseJSON(row.metadata),
2271
+ output: storage.safelyParseJSON(row.output),
2272
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2273
+ requestContext: storage.safelyParseJSON(row.requestContext),
2274
+ entity: storage.safelyParseJSON(row.entity),
2275
+ createdAt: new Date(row.createdAt),
2276
+ updatedAt: new Date(row.updatedAt)
1443
2277
  };
1444
2278
  }
1445
2279
  var ScoresMSSQL = class extends storage.ScoresStorage {
@@ -1482,7 +2316,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1482
2316
  async saveScore(score) {
1483
2317
  let validatedScore;
1484
2318
  try {
1485
- validatedScore = scores.saveScorePayloadSchema.parse(score);
2319
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
1486
2320
  } catch (error$1) {
1487
2321
  throw new error.MastraError(
1488
2322
  {
@@ -1503,7 +2337,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1503
2337
  input,
1504
2338
  output,
1505
2339
  additionalContext,
1506
- runtimeContext,
2340
+ requestContext,
1507
2341
  entity,
1508
2342
  ...rest
1509
2343
  } = validatedScore;
@@ -1512,15 +2346,15 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1512
2346
  record: {
1513
2347
  id: scoreId,
1514
2348
  ...rest,
1515
- input: JSON.stringify(input) || "",
1516
- output: JSON.stringify(output) || "",
1517
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1518
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1519
- metadata: metadata ? JSON.stringify(metadata) : null,
1520
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1521
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1522
- entity: entity ? JSON.stringify(entity) : null,
1523
- scorer: scorer ? JSON.stringify(scorer) : null,
2349
+ input: input || "",
2350
+ output: output || "",
2351
+ preprocessStepResult: preprocessStepResult || null,
2352
+ analyzeStepResult: analyzeStepResult || null,
2353
+ metadata: metadata || null,
2354
+ additionalContext: additionalContext || null,
2355
+ requestContext: requestContext || null,
2356
+ entity: entity || null,
2357
+ scorer: scorer || null,
1524
2358
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1525
2359
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1526
2360
  }
@@ -1538,41 +2372,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1538
2372
  );
1539
2373
  }
1540
2374
  }
1541
- async getScoresByScorerId({
2375
+ async listScoresByScorerId({
1542
2376
  scorerId,
1543
- pagination
2377
+ pagination,
2378
+ entityId,
2379
+ entityType,
2380
+ source
1544
2381
  }) {
1545
2382
  try {
1546
- const request = this.pool.request();
1547
- request.input("p1", scorerId);
1548
- const totalResult = await request.query(
1549
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1550
- );
2383
+ const conditions = ["[scorerId] = @p1"];
2384
+ const params = { p1: scorerId };
2385
+ let paramIndex = 2;
2386
+ if (entityId) {
2387
+ conditions.push(`[entityId] = @p${paramIndex}`);
2388
+ params[`p${paramIndex}`] = entityId;
2389
+ paramIndex++;
2390
+ }
2391
+ if (entityType) {
2392
+ conditions.push(`[entityType] = @p${paramIndex}`);
2393
+ params[`p${paramIndex}`] = entityType;
2394
+ paramIndex++;
2395
+ }
2396
+ if (source) {
2397
+ conditions.push(`[source] = @p${paramIndex}`);
2398
+ params[`p${paramIndex}`] = source;
2399
+ paramIndex++;
2400
+ }
2401
+ const whereClause = conditions.join(" AND ");
2402
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2403
+ const countRequest = this.pool.request();
2404
+ Object.entries(params).forEach(([key, value]) => {
2405
+ countRequest.input(key, value);
2406
+ });
2407
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1551
2408
  const total = totalResult.recordset[0]?.count || 0;
2409
+ const { page, perPage: perPageInput } = pagination;
1552
2410
  if (total === 0) {
1553
2411
  return {
1554
2412
  pagination: {
1555
2413
  total: 0,
1556
- page: pagination.page,
1557
- perPage: pagination.perPage,
2414
+ page,
2415
+ perPage: perPageInput,
1558
2416
  hasMore: false
1559
2417
  },
1560
2418
  scores: []
1561
2419
  };
1562
2420
  }
2421
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2422
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2423
+ const limitValue = perPageInput === false ? total : perPage;
2424
+ const end = perPageInput === false ? total : start + perPage;
1563
2425
  const dataRequest = this.pool.request();
1564
- dataRequest.input("p1", scorerId);
1565
- dataRequest.input("p2", pagination.perPage);
1566
- dataRequest.input("p3", pagination.page * pagination.perPage);
1567
- const result = await dataRequest.query(
1568
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1569
- );
2426
+ Object.entries(params).forEach(([key, value]) => {
2427
+ dataRequest.input(key, value);
2428
+ });
2429
+ dataRequest.input("perPage", limitValue);
2430
+ dataRequest.input("offset", start);
2431
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2432
+ const result = await dataRequest.query(dataQuery);
1570
2433
  return {
1571
2434
  pagination: {
1572
2435
  total: Number(total),
1573
- page: pagination.page,
1574
- perPage: pagination.perPage,
1575
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2436
+ page,
2437
+ perPage: perPageForResponse,
2438
+ hasMore: end < total
1576
2439
  },
1577
2440
  scores: result.recordset.map((row) => transformScoreRow(row))
1578
2441
  };
@@ -1588,7 +2451,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1588
2451
  );
1589
2452
  }
1590
2453
  }
1591
- async getScoresByRunId({
2454
+ async listScoresByRunId({
1592
2455
  runId,
1593
2456
  pagination
1594
2457
  }) {
@@ -1599,30 +2462,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1599
2462
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1600
2463
  );
1601
2464
  const total = totalResult.recordset[0]?.count || 0;
2465
+ const { page, perPage: perPageInput } = pagination;
1602
2466
  if (total === 0) {
1603
2467
  return {
1604
2468
  pagination: {
1605
2469
  total: 0,
1606
- page: pagination.page,
1607
- perPage: pagination.perPage,
2470
+ page,
2471
+ perPage: perPageInput,
1608
2472
  hasMore: false
1609
2473
  },
1610
2474
  scores: []
1611
2475
  };
1612
2476
  }
2477
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2478
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2479
+ const limitValue = perPageInput === false ? total : perPage;
2480
+ const end = perPageInput === false ? total : start + perPage;
1613
2481
  const dataRequest = this.pool.request();
1614
2482
  dataRequest.input("p1", runId);
1615
- dataRequest.input("p2", pagination.perPage);
1616
- dataRequest.input("p3", pagination.page * pagination.perPage);
2483
+ dataRequest.input("p2", limitValue);
2484
+ dataRequest.input("p3", start);
1617
2485
  const result = await dataRequest.query(
1618
2486
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1619
2487
  );
1620
2488
  return {
1621
2489
  pagination: {
1622
2490
  total: Number(total),
1623
- page: pagination.page,
1624
- perPage: pagination.perPage,
1625
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2491
+ page,
2492
+ perPage: perPageForResponse,
2493
+ hasMore: end < total
1626
2494
  },
1627
2495
  scores: result.recordset.map((row) => transformScoreRow(row))
1628
2496
  };
@@ -1638,7 +2506,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1638
2506
  );
1639
2507
  }
1640
2508
  }
1641
- async getScoresByEntityId({
2509
+ async listScoresByEntityId({
1642
2510
  entityId,
1643
2511
  entityType,
1644
2512
  pagination
@@ -1651,31 +2519,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1651
2519
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1652
2520
  );
1653
2521
  const total = totalResult.recordset[0]?.count || 0;
2522
+ const { page, perPage: perPageInput } = pagination;
2523
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2524
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1654
2525
  if (total === 0) {
1655
2526
  return {
1656
2527
  pagination: {
1657
2528
  total: 0,
1658
- page: pagination.page,
1659
- perPage: pagination.perPage,
2529
+ page,
2530
+ perPage: perPageForResponse,
1660
2531
  hasMore: false
1661
2532
  },
1662
2533
  scores: []
1663
2534
  };
1664
2535
  }
2536
+ const limitValue = perPageInput === false ? total : perPage;
2537
+ const end = perPageInput === false ? total : start + perPage;
1665
2538
  const dataRequest = this.pool.request();
1666
2539
  dataRequest.input("p1", entityId);
1667
2540
  dataRequest.input("p2", entityType);
1668
- dataRequest.input("p3", pagination.perPage);
1669
- dataRequest.input("p4", pagination.page * pagination.perPage);
2541
+ dataRequest.input("p3", limitValue);
2542
+ dataRequest.input("p4", start);
1670
2543
  const result = await dataRequest.query(
1671
2544
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1672
2545
  );
1673
2546
  return {
1674
2547
  pagination: {
1675
2548
  total: Number(total),
1676
- page: pagination.page,
1677
- perPage: pagination.perPage,
1678
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2549
+ page,
2550
+ perPage: perPageForResponse,
2551
+ hasMore: end < total
1679
2552
  },
1680
2553
  scores: result.recordset.map((row) => transformScoreRow(row))
1681
2554
  };
@@ -1691,7 +2564,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1691
2564
  );
1692
2565
  }
1693
2566
  }
1694
- async getScoresBySpan({
2567
+ async listScoresBySpan({
1695
2568
  traceId,
1696
2569
  spanId,
1697
2570
  pagination
@@ -1704,34 +2577,38 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1704
2577
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
1705
2578
  );
1706
2579
  const total = totalResult.recordset[0]?.count || 0;
2580
+ const { page, perPage: perPageInput } = pagination;
2581
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2582
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1707
2583
  if (total === 0) {
1708
2584
  return {
1709
2585
  pagination: {
1710
2586
  total: 0,
1711
- page: pagination.page,
1712
- perPage: pagination.perPage,
2587
+ page,
2588
+ perPage: perPageForResponse,
1713
2589
  hasMore: false
1714
2590
  },
1715
2591
  scores: []
1716
2592
  };
1717
2593
  }
1718
- const limit = pagination.perPage + 1;
2594
+ const limitValue = perPageInput === false ? total : perPage;
2595
+ const end = perPageInput === false ? total : start + perPage;
1719
2596
  const dataRequest = this.pool.request();
1720
2597
  dataRequest.input("p1", traceId);
1721
2598
  dataRequest.input("p2", spanId);
1722
- dataRequest.input("p3", limit);
1723
- dataRequest.input("p4", pagination.page * pagination.perPage);
2599
+ dataRequest.input("p3", limitValue);
2600
+ dataRequest.input("p4", start);
1724
2601
  const result = await dataRequest.query(
1725
2602
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1726
2603
  );
1727
2604
  return {
1728
2605
  pagination: {
1729
2606
  total: Number(total),
1730
- page: pagination.page,
1731
- perPage: pagination.perPage,
1732
- hasMore: result.recordset.length > pagination.perPage
2607
+ page,
2608
+ perPage: perPageForResponse,
2609
+ hasMore: end < total
1733
2610
  },
1734
- scores: result.recordset.slice(0, pagination.perPage).map((row) => transformScoreRow(row))
2611
+ scores: result.recordset.map((row) => transformScoreRow(row))
1735
2612
  };
1736
2613
  } catch (error$1) {
1737
2614
  throw new error.MastraError(
@@ -1746,7 +2623,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1746
2623
  }
1747
2624
  }
1748
2625
  };
1749
- var TracesMSSQL = class extends storage.TracesStorage {
2626
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1750
2627
  pool;
1751
2628
  operations;
1752
2629
  schema;
@@ -1760,207 +2637,165 @@ var TracesMSSQL = class extends storage.TracesStorage {
1760
2637
  this.operations = operations;
1761
2638
  this.schema = schema;
1762
2639
  }
1763
- /** @deprecated use getTracesPaginated instead*/
1764
- async getTraces(args) {
1765
- if (args.fromDate || args.toDate) {
1766
- args.dateRange = {
1767
- start: args.fromDate,
1768
- end: args.toDate
1769
- };
2640
+ parseWorkflowRun(row) {
2641
+ let parsedSnapshot = row.snapshot;
2642
+ if (typeof parsedSnapshot === "string") {
2643
+ try {
2644
+ parsedSnapshot = JSON.parse(row.snapshot);
2645
+ } catch (e) {
2646
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2647
+ }
1770
2648
  }
1771
- const result = await this.getTracesPaginated(args);
1772
- return result.traces;
2649
+ return {
2650
+ workflowName: row.workflow_name,
2651
+ runId: row.run_id,
2652
+ snapshot: parsedSnapshot,
2653
+ createdAt: row.createdAt,
2654
+ updatedAt: row.updatedAt,
2655
+ resourceId: row.resourceId
2656
+ };
1773
2657
  }
1774
- async getTracesPaginated(args) {
1775
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1776
- const fromDate = dateRange?.start;
1777
- const toDate = dateRange?.end;
1778
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1779
- const currentOffset = page * perPage;
1780
- const paramMap = {};
1781
- const conditions = [];
1782
- let paramIndex = 1;
1783
- if (name) {
1784
- const paramName = `p${paramIndex++}`;
1785
- conditions.push(`[name] LIKE @${paramName}`);
1786
- paramMap[paramName] = `${name}%`;
1787
- }
1788
- if (scope) {
1789
- const paramName = `p${paramIndex++}`;
1790
- conditions.push(`[scope] = @${paramName}`);
1791
- paramMap[paramName] = scope;
1792
- }
1793
- if (attributes) {
1794
- Object.entries(attributes).forEach(([key, value]) => {
1795
- const parsedKey = utils.parseFieldKey(key);
1796
- const paramName = `p${paramIndex++}`;
1797
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1798
- paramMap[paramName] = value;
1799
- });
1800
- }
1801
- if (filters) {
1802
- Object.entries(filters).forEach(([key, value]) => {
1803
- const parsedKey = utils.parseFieldKey(key);
1804
- const paramName = `p${paramIndex++}`;
1805
- conditions.push(`[${parsedKey}] = @${paramName}`);
1806
- paramMap[paramName] = value;
1807
- });
1808
- }
1809
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1810
- const paramName = `p${paramIndex++}`;
1811
- conditions.push(`[createdAt] >= @${paramName}`);
1812
- paramMap[paramName] = fromDate.toISOString();
1813
- }
1814
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1815
- const paramName = `p${paramIndex++}`;
1816
- conditions.push(`[createdAt] <= @${paramName}`);
1817
- paramMap[paramName] = toDate.toISOString();
1818
- }
1819
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1820
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1821
- let total = 0;
2658
+ async updateWorkflowResults({
2659
+ workflowName,
2660
+ runId,
2661
+ stepId,
2662
+ result,
2663
+ requestContext
2664
+ }) {
2665
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2666
+ const transaction = this.pool.transaction();
1822
2667
  try {
1823
- const countRequest = this.pool.request();
1824
- Object.entries(paramMap).forEach(([key, value]) => {
1825
- if (value instanceof Date) {
1826
- countRequest.input(key, sql2__default.default.DateTime, value);
1827
- } else {
1828
- countRequest.input(key, value);
1829
- }
1830
- });
1831
- const countResult = await countRequest.query(countQuery);
1832
- total = parseInt(countResult.recordset[0].total, 10);
2668
+ await transaction.begin();
2669
+ const selectRequest = new sql2__default.default.Request(transaction);
2670
+ selectRequest.input("workflow_name", workflowName);
2671
+ selectRequest.input("run_id", runId);
2672
+ const existingSnapshotResult = await selectRequest.query(
2673
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2674
+ );
2675
+ let snapshot;
2676
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2677
+ snapshot = {
2678
+ context: {},
2679
+ activePaths: [],
2680
+ activeStepsPath: {},
2681
+ timestamp: Date.now(),
2682
+ suspendedPaths: {},
2683
+ resumeLabels: {},
2684
+ serializedStepGraph: [],
2685
+ status: "pending",
2686
+ value: {},
2687
+ waitingPaths: {},
2688
+ runId,
2689
+ requestContext: {}
2690
+ };
2691
+ } else {
2692
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2693
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2694
+ }
2695
+ snapshot.context[stepId] = result;
2696
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2697
+ const upsertReq = new sql2__default.default.Request(transaction);
2698
+ upsertReq.input("workflow_name", workflowName);
2699
+ upsertReq.input("run_id", runId);
2700
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2701
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2702
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2703
+ await upsertReq.query(
2704
+ `MERGE ${table} AS target
2705
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2706
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2707
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2708
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2709
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2710
+ );
2711
+ await transaction.commit();
2712
+ return snapshot.context;
1833
2713
  } catch (error$1) {
2714
+ try {
2715
+ await transaction.rollback();
2716
+ } catch {
2717
+ }
1834
2718
  throw new error.MastraError(
1835
2719
  {
1836
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2720
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1837
2721
  domain: error.ErrorDomain.STORAGE,
1838
2722
  category: error.ErrorCategory.THIRD_PARTY,
1839
2723
  details: {
1840
- name: args.name ?? "",
1841
- scope: args.scope ?? ""
2724
+ workflowName,
2725
+ runId,
2726
+ stepId
1842
2727
  }
1843
2728
  },
1844
2729
  error$1
1845
2730
  );
1846
2731
  }
1847
- if (total === 0) {
1848
- return {
1849
- traces: [],
1850
- total: 0,
1851
- page,
1852
- perPage,
1853
- hasMore: false
1854
- };
1855
- }
1856
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1857
- const dataRequest = this.pool.request();
1858
- Object.entries(paramMap).forEach(([key, value]) => {
1859
- if (value instanceof Date) {
1860
- dataRequest.input(key, sql2__default.default.DateTime, value);
1861
- } else {
1862
- dataRequest.input(key, value);
1863
- }
1864
- });
1865
- dataRequest.input("offset", currentOffset);
1866
- dataRequest.input("limit", perPage);
2732
+ }
2733
+ async updateWorkflowState({
2734
+ workflowName,
2735
+ runId,
2736
+ opts
2737
+ }) {
2738
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2739
+ const transaction = this.pool.transaction();
1867
2740
  try {
1868
- const rowsResult = await dataRequest.query(dataQuery);
1869
- const rows = rowsResult.recordset;
1870
- const traces = rows.map((row) => ({
1871
- id: row.id,
1872
- parentSpanId: row.parentSpanId,
1873
- traceId: row.traceId,
1874
- name: row.name,
1875
- scope: row.scope,
1876
- kind: row.kind,
1877
- status: JSON.parse(row.status),
1878
- events: JSON.parse(row.events),
1879
- links: JSON.parse(row.links),
1880
- attributes: JSON.parse(row.attributes),
1881
- startTime: row.startTime,
1882
- endTime: row.endTime,
1883
- other: row.other,
1884
- createdAt: row.createdAt
1885
- }));
1886
- return {
1887
- traces,
1888
- total,
1889
- page,
1890
- perPage,
1891
- hasMore: currentOffset + traces.length < total
1892
- };
2741
+ await transaction.begin();
2742
+ const selectRequest = new sql2__default.default.Request(transaction);
2743
+ selectRequest.input("workflow_name", workflowName);
2744
+ selectRequest.input("run_id", runId);
2745
+ const existingSnapshotResult = await selectRequest.query(
2746
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2747
+ );
2748
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2749
+ await transaction.rollback();
2750
+ return void 0;
2751
+ }
2752
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2753
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2754
+ if (!snapshot || !snapshot?.context) {
2755
+ await transaction.rollback();
2756
+ throw new error.MastraError(
2757
+ {
2758
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2759
+ domain: error.ErrorDomain.STORAGE,
2760
+ category: error.ErrorCategory.SYSTEM,
2761
+ details: {
2762
+ workflowName,
2763
+ runId
2764
+ }
2765
+ },
2766
+ new Error(`Snapshot not found for runId ${runId}`)
2767
+ );
2768
+ }
2769
+ const updatedSnapshot = { ...snapshot, ...opts };
2770
+ const updateRequest = new sql2__default.default.Request(transaction);
2771
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2772
+ updateRequest.input("workflow_name", workflowName);
2773
+ updateRequest.input("run_id", runId);
2774
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2775
+ await updateRequest.query(
2776
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2777
+ );
2778
+ await transaction.commit();
2779
+ return updatedSnapshot;
1893
2780
  } catch (error$1) {
2781
+ try {
2782
+ await transaction.rollback();
2783
+ } catch {
2784
+ }
1894
2785
  throw new error.MastraError(
1895
2786
  {
1896
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2787
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1897
2788
  domain: error.ErrorDomain.STORAGE,
1898
2789
  category: error.ErrorCategory.THIRD_PARTY,
1899
2790
  details: {
1900
- name: args.name ?? "",
1901
- scope: args.scope ?? ""
2791
+ workflowName,
2792
+ runId
1902
2793
  }
1903
2794
  },
1904
2795
  error$1
1905
2796
  );
1906
2797
  }
1907
2798
  }
1908
- async batchTraceInsert({ records }) {
1909
- this.logger.debug("Batch inserting traces", { count: records.length });
1910
- await this.operations.batchInsert({
1911
- tableName: storage.TABLE_TRACES,
1912
- records
1913
- });
1914
- }
1915
- };
1916
- function parseWorkflowRun(row) {
1917
- let parsedSnapshot = row.snapshot;
1918
- if (typeof parsedSnapshot === "string") {
1919
- try {
1920
- parsedSnapshot = JSON.parse(row.snapshot);
1921
- } catch (e) {
1922
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1923
- }
1924
- }
1925
- return {
1926
- workflowName: row.workflow_name,
1927
- runId: row.run_id,
1928
- snapshot: parsedSnapshot,
1929
- createdAt: row.createdAt,
1930
- updatedAt: row.updatedAt,
1931
- resourceId: row.resourceId
1932
- };
1933
- }
1934
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1935
- pool;
1936
- operations;
1937
- schema;
1938
- constructor({
1939
- pool,
1940
- operations,
1941
- schema
1942
- }) {
1943
- super();
1944
- this.pool = pool;
1945
- this.operations = operations;
1946
- this.schema = schema;
1947
- }
1948
- updateWorkflowResults({
1949
- // workflowName,
1950
- // runId,
1951
- // stepId,
1952
- // result,
1953
- // runtimeContext,
1954
- }) {
1955
- throw new Error("Method not implemented.");
1956
- }
1957
- updateWorkflowState({
1958
- // workflowName,
1959
- // runId,
1960
- // opts,
1961
- }) {
1962
- throw new Error("Method not implemented.");
1963
- }
1964
2799
  async persistWorkflowSnapshot({
1965
2800
  workflowName,
1966
2801
  runId,
@@ -2057,7 +2892,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2057
2892
  if (!result.recordset || result.recordset.length === 0) {
2058
2893
  return null;
2059
2894
  }
2060
- return parseWorkflowRun(result.recordset[0]);
2895
+ return this.parseWorkflowRun(result.recordset[0]);
2061
2896
  } catch (error$1) {
2062
2897
  throw new error.MastraError(
2063
2898
  {
@@ -2073,13 +2908,14 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2073
2908
  );
2074
2909
  }
2075
2910
  }
2076
- async getWorkflowRuns({
2911
+ async listWorkflowRuns({
2077
2912
  workflowName,
2078
2913
  fromDate,
2079
2914
  toDate,
2080
- limit,
2081
- offset,
2082
- resourceId
2915
+ page,
2916
+ perPage,
2917
+ resourceId,
2918
+ status
2083
2919
  } = {}) {
2084
2920
  try {
2085
2921
  const conditions = [];
@@ -2088,13 +2924,17 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2088
2924
  conditions.push(`[workflow_name] = @workflowName`);
2089
2925
  paramMap["workflowName"] = workflowName;
2090
2926
  }
2927
+ if (status) {
2928
+ conditions.push(`JSON_VALUE([snapshot], '$.status') = @status`);
2929
+ paramMap["status"] = status;
2930
+ }
2091
2931
  if (resourceId) {
2092
2932
  const hasResourceId = await this.operations.hasColumn(storage.TABLE_WORKFLOW_SNAPSHOT, "resourceId");
2093
2933
  if (hasResourceId) {
2094
2934
  conditions.push(`[resourceId] = @resourceId`);
2095
2935
  paramMap["resourceId"] = resourceId;
2096
2936
  } else {
2097
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2937
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2098
2938
  }
2099
2939
  }
2100
2940
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2116,24 +2956,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2116
2956
  request.input(key, value);
2117
2957
  }
2118
2958
  });
2119
- if (limit !== void 0 && offset !== void 0) {
2959
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2960
+ if (usePagination) {
2120
2961
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2121
2962
  const countResult = await request.query(countQuery);
2122
2963
  total = Number(countResult.recordset[0]?.count || 0);
2123
2964
  }
2124
2965
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2125
- if (limit !== void 0 && offset !== void 0) {
2126
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2127
- request.input("limit", limit);
2966
+ if (usePagination) {
2967
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2968
+ const offset = page * normalizedPerPage;
2969
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2970
+ request.input("perPage", normalizedPerPage);
2128
2971
  request.input("offset", offset);
2129
2972
  }
2130
2973
  const result = await request.query(query);
2131
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2974
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2132
2975
  return { runs, total: total || runs.length };
2133
2976
  } catch (error$1) {
2134
2977
  throw new error.MastraError(
2135
2978
  {
2136
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2979
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2137
2980
  domain: error.ErrorDomain.STORAGE,
2138
2981
  category: error.ErrorCategory.THIRD_PARTY,
2139
2982
  details: {
@@ -2153,7 +2996,10 @@ var MSSQLStore = class extends storage.MastraStorage {
2153
2996
  isConnected = null;
2154
2997
  stores;
2155
2998
  constructor(config) {
2156
- super({ name: "MSSQLStore" });
2999
+ if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
3000
+ throw new Error("MSSQLStore: id must be provided and cannot be empty.");
3001
+ }
3002
+ super({ id: config.id, name: "MSSQLStore" });
2157
3003
  try {
2158
3004
  if ("connectionString" in config) {
2159
3005
  if (!config.connectionString || typeof config.connectionString !== "string" || config.connectionString.trim() === "") {
@@ -2176,19 +3022,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2176
3022
  port: config.port,
2177
3023
  options: config.options || { encrypt: true, trustServerCertificate: true }
2178
3024
  });
2179
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2180
3025
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2181
3026
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2182
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2183
3027
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2184
3028
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
3029
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2185
3030
  this.stores = {
2186
3031
  operations,
2187
3032
  scores,
2188
- traces,
2189
3033
  workflows,
2190
- legacyEvals,
2191
- memory
3034
+ memory,
3035
+ observability
2192
3036
  };
2193
3037
  } catch (e) {
2194
3038
  throw new error.MastraError(
@@ -2208,6 +3052,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2208
3052
  try {
2209
3053
  await this.isConnected;
2210
3054
  await super.init();
3055
+ try {
3056
+ await this.stores.operations.createAutomaticIndexes();
3057
+ } catch (indexError) {
3058
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3059
+ }
2211
3060
  } catch (error$1) {
2212
3061
  this.isConnected = null;
2213
3062
  throw new error.MastraError(
@@ -2235,28 +3084,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2235
3084
  hasColumn: true,
2236
3085
  createTable: true,
2237
3086
  deleteMessages: true,
2238
- getScoresBySpan: true
3087
+ listScoresBySpan: true,
3088
+ observabilityInstance: true,
3089
+ indexManagement: true
2239
3090
  };
2240
3091
  }
2241
- /** @deprecated use getEvals instead */
2242
- async getEvalsByAgentName(agentName, type) {
2243
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2244
- }
2245
- async getEvals(options = {}) {
2246
- return this.stores.legacyEvals.getEvals(options);
2247
- }
2248
- /**
2249
- * @deprecated use getTracesPaginated instead
2250
- */
2251
- async getTraces(args) {
2252
- return this.stores.traces.getTraces(args);
2253
- }
2254
- async getTracesPaginated(args) {
2255
- return this.stores.traces.getTracesPaginated(args);
2256
- }
2257
- async batchTraceInsert({ records }) {
2258
- return this.stores.traces.batchTraceInsert({ records });
2259
- }
2260
3092
  async createTable({
2261
3093
  tableName,
2262
3094
  schema
@@ -2291,15 +3123,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2291
3123
  async getThreadById({ threadId }) {
2292
3124
  return this.stores.memory.getThreadById({ threadId });
2293
3125
  }
2294
- /**
2295
- * @deprecated use getThreadsByResourceIdPaginated instead
2296
- */
2297
- async getThreadsByResourceId(args) {
2298
- return this.stores.memory.getThreadsByResourceId(args);
2299
- }
2300
- async getThreadsByResourceIdPaginated(args) {
2301
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2302
- }
2303
3126
  async saveThread({ thread }) {
2304
3127
  return this.stores.memory.saveThread({ thread });
2305
3128
  }
@@ -2313,17 +3136,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2313
3136
  async deleteThread({ threadId }) {
2314
3137
  return this.stores.memory.deleteThread({ threadId });
2315
3138
  }
2316
- async getMessages(args) {
2317
- return this.stores.memory.getMessages(args);
2318
- }
2319
- async getMessagesById({
2320
- messageIds,
2321
- format
2322
- }) {
2323
- return this.stores.memory.getMessagesById({ messageIds, format });
2324
- }
2325
- async getMessagesPaginated(args) {
2326
- return this.stores.memory.getMessagesPaginated(args);
3139
+ async listMessagesById({ messageIds }) {
3140
+ return this.stores.memory.listMessagesById({ messageIds });
2327
3141
  }
2328
3142
  async saveMessages(args) {
2329
3143
  return this.stores.memory.saveMessages(args);
@@ -2357,9 +3171,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2357
3171
  runId,
2358
3172
  stepId,
2359
3173
  result,
2360
- runtimeContext
3174
+ requestContext
2361
3175
  }) {
2362
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3176
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2363
3177
  }
2364
3178
  async updateWorkflowState({
2365
3179
  workflowName,
@@ -2382,15 +3196,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2382
3196
  }) {
2383
3197
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2384
3198
  }
2385
- async getWorkflowRuns({
2386
- workflowName,
2387
- fromDate,
2388
- toDate,
2389
- limit,
2390
- offset,
2391
- resourceId
2392
- } = {}) {
2393
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3199
+ async listWorkflowRuns(args = {}) {
3200
+ return this.stores.workflows.listWorkflowRuns(args);
2394
3201
  }
2395
3202
  async getWorkflowRunById({
2396
3203
  runId,
@@ -2401,44 +3208,107 @@ var MSSQLStore = class extends storage.MastraStorage {
2401
3208
  async close() {
2402
3209
  await this.pool.close();
2403
3210
  }
3211
+ /**
3212
+ * Index Management
3213
+ */
3214
+ async createIndex(options) {
3215
+ return this.stores.operations.createIndex(options);
3216
+ }
3217
+ async listIndexes(tableName) {
3218
+ return this.stores.operations.listIndexes(tableName);
3219
+ }
3220
+ async describeIndex(indexName) {
3221
+ return this.stores.operations.describeIndex(indexName);
3222
+ }
3223
+ async dropIndex(indexName) {
3224
+ return this.stores.operations.dropIndex(indexName);
3225
+ }
3226
+ /**
3227
+ * Tracing / Observability
3228
+ */
3229
+ getObservabilityStore() {
3230
+ if (!this.stores.observability) {
3231
+ throw new error.MastraError({
3232
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3233
+ domain: error.ErrorDomain.STORAGE,
3234
+ category: error.ErrorCategory.SYSTEM,
3235
+ text: "Observability storage is not initialized"
3236
+ });
3237
+ }
3238
+ return this.stores.observability;
3239
+ }
3240
+ async createSpan(span) {
3241
+ return this.getObservabilityStore().createSpan(span);
3242
+ }
3243
+ async updateSpan({
3244
+ spanId,
3245
+ traceId,
3246
+ updates
3247
+ }) {
3248
+ return this.getObservabilityStore().updateSpan({ spanId, traceId, updates });
3249
+ }
3250
+ async getTrace(traceId) {
3251
+ return this.getObservabilityStore().getTrace(traceId);
3252
+ }
3253
+ async getTracesPaginated(args) {
3254
+ return this.getObservabilityStore().getTracesPaginated(args);
3255
+ }
3256
+ async batchCreateSpans(args) {
3257
+ return this.getObservabilityStore().batchCreateSpans(args);
3258
+ }
3259
+ async batchUpdateSpans(args) {
3260
+ return this.getObservabilityStore().batchUpdateSpans(args);
3261
+ }
3262
+ async batchDeleteTraces(args) {
3263
+ return this.getObservabilityStore().batchDeleteTraces(args);
3264
+ }
2404
3265
  /**
2405
3266
  * Scorers
2406
3267
  */
2407
3268
  async getScoreById({ id: _id }) {
2408
3269
  return this.stores.scores.getScoreById({ id: _id });
2409
3270
  }
2410
- async getScoresByScorerId({
3271
+ async listScoresByScorerId({
2411
3272
  scorerId: _scorerId,
2412
- pagination: _pagination
3273
+ pagination: _pagination,
3274
+ entityId: _entityId,
3275
+ entityType: _entityType,
3276
+ source: _source
2413
3277
  }) {
2414
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3278
+ return this.stores.scores.listScoresByScorerId({
3279
+ scorerId: _scorerId,
3280
+ pagination: _pagination,
3281
+ entityId: _entityId,
3282
+ entityType: _entityType,
3283
+ source: _source
3284
+ });
2415
3285
  }
2416
3286
  async saveScore(_score) {
2417
3287
  return this.stores.scores.saveScore(_score);
2418
3288
  }
2419
- async getScoresByRunId({
3289
+ async listScoresByRunId({
2420
3290
  runId: _runId,
2421
3291
  pagination: _pagination
2422
3292
  }) {
2423
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3293
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2424
3294
  }
2425
- async getScoresByEntityId({
3295
+ async listScoresByEntityId({
2426
3296
  entityId: _entityId,
2427
3297
  entityType: _entityType,
2428
3298
  pagination: _pagination
2429
3299
  }) {
2430
- return this.stores.scores.getScoresByEntityId({
3300
+ return this.stores.scores.listScoresByEntityId({
2431
3301
  entityId: _entityId,
2432
3302
  entityType: _entityType,
2433
3303
  pagination: _pagination
2434
3304
  });
2435
3305
  }
2436
- async getScoresBySpan({
3306
+ async listScoresBySpan({
2437
3307
  traceId,
2438
3308
  spanId,
2439
3309
  pagination: _pagination
2440
3310
  }) {
2441
- return this.stores.scores.getScoresBySpan({ traceId, spanId, pagination: _pagination });
3311
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
2442
3312
  }
2443
3313
  };
2444
3314