@mastra/mssql 0.0.0-iterate-traces-ui-again-20250912091900 → 0.0.0-main-test-20251105183450

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,8 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
8
10
 
9
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
12
 
@@ -20,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
20
22
  const quotedSchemaName = schemaName;
21
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
22
24
  }
23
-
24
- // src/storage/domains/legacy-evals/index.ts
25
- function transformEvalRow(row) {
26
- let testInfoValue = null, resultValue = null;
27
- if (row.test_info) {
28
- try {
29
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
30
- } catch {
31
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
32
29
  }
33
- if (row.test_info) {
34
- try {
35
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
36
- } catch {
37
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
38
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
39
57
  return {
40
- agentName: row.agent_name,
41
- input: row.input,
42
- output: row.output,
43
- result: resultValue,
44
- metricName: row.metric_name,
45
- instructions: row.instructions,
46
- testInfo: testInfoValue,
47
- globalRunId: row.global_run_id,
48
- runId: row.run_id,
49
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
50
60
  };
51
61
  }
52
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
53
- pool;
54
- schema;
55
- constructor({ pool, schema }) {
56
- super();
57
- this.pool = pool;
58
- this.schema = schema;
59
- }
60
- /** @deprecated use getEvals instead */
61
- async getEvalsByAgentName(agentName, type) {
62
- try {
63
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
64
- if (type === "test") {
65
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
66
- } else if (type === "live") {
67
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
68
- }
69
- query += " ORDER BY created_at DESC";
70
- const request = this.pool.request();
71
- request.input("p1", agentName);
72
- const result = await request.query(query);
73
- const rows = result.recordset;
74
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
75
- } catch (error) {
76
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
77
- return [];
78
- }
79
- console.error("Failed to get evals for the specified agent: " + error?.message);
80
- throw error;
81
- }
82
- }
83
- async getEvals(options = {}) {
84
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
85
- const fromDate = dateRange?.start;
86
- const toDate = dateRange?.end;
87
- const where = [];
88
- const params = {};
89
- if (agentName) {
90
- where.push("agent_name = @agentName");
91
- params["agentName"] = agentName;
92
- }
93
- if (type === "test") {
94
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
95
- } else if (type === "live") {
96
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
97
- }
98
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
99
- where.push(`[created_at] >= @fromDate`);
100
- params[`fromDate`] = fromDate.toISOString();
101
- }
102
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
103
- where.push(`[created_at] <= @toDate`);
104
- params[`toDate`] = toDate.toISOString();
105
- }
106
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
107
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
108
- const offset = page * perPage;
109
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
110
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
111
- try {
112
- const countReq = this.pool.request();
113
- Object.entries(params).forEach(([key, value]) => {
114
- if (value instanceof Date) {
115
- countReq.input(key, sql2__default.default.DateTime, value);
116
- } else {
117
- countReq.input(key, value);
118
- }
119
- });
120
- const countResult = await countReq.query(countQuery);
121
- const total = countResult.recordset[0]?.total || 0;
122
- if (total === 0) {
123
- return {
124
- evals: [],
125
- total: 0,
126
- page,
127
- perPage,
128
- hasMore: false
129
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
130
75
  }
131
- const req = this.pool.request();
132
- Object.entries(params).forEach(([key, value]) => {
133
- if (value instanceof Date) {
134
- req.input(key, sql2__default.default.DateTime, value);
135
- } else {
136
- req.input(key, value);
137
- }
138
- });
139
- req.input("offset", offset);
140
- req.input("perPage", perPage);
141
- const result = await req.query(dataQuery);
142
- const rows = result.recordset;
143
- return {
144
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
145
- total,
146
- page,
147
- perPage,
148
- hasMore: offset + (rows?.length ?? 0) < total
149
- };
150
- } catch (error$1) {
151
- const mastraError = new error.MastraError(
152
- {
153
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
154
- domain: error.ErrorDomain.STORAGE,
155
- category: error.ErrorCategory.THIRD_PARTY,
156
- details: {
157
- agentName: agentName || "all",
158
- type: type || "all",
159
- page,
160
- perPage
161
- }
162
- },
163
- error$1
164
- );
165
- this.logger?.error?.(mastraError.toString());
166
- this.logger?.trackException(mastraError);
167
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
168
84
  }
169
- }
170
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
171
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
172
91
  pool;
173
92
  schema;
@@ -185,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
185
104
  });
186
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
187
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
188
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
189
108
  }
190
109
  constructor({
191
110
  pool,
@@ -199,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
199
118
  }
200
119
  async getThreadById({ threadId }) {
201
120
  try {
202
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
203
122
  id,
204
123
  [resourceId],
205
124
  title,
@@ -210,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
210
129
  WHERE id = @threadId`;
211
130
  const request = this.pool.request();
212
131
  request.input("threadId", threadId);
213
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
214
133
  const thread = resultSet.recordset[0] || null;
215
134
  if (!thread) {
216
135
  return null;
@@ -235,11 +154,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
235
154
  );
236
155
  }
237
156
  }
238
- async getThreadsByResourceIdPaginated(args) {
239
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ const perPage = storage.normalizePerPage(perPageInput, 100);
160
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
161
+ const { field, direction } = this.parseOrderBy(orderBy);
240
162
  try {
241
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
242
- const currentOffset = page * perPage;
243
163
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
244
164
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
245
165
  const countRequest = this.pool.request();
@@ -251,16 +171,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
251
171
  threads: [],
252
172
  total: 0,
253
173
  page,
254
- perPage,
174
+ perPage: perPageForResponse,
255
175
  hasMore: false
256
176
  };
257
177
  }
258
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
259
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
178
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
179
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
180
+ const limitValue = perPageInput === false ? total : perPage;
181
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
260
182
  const dataRequest = this.pool.request();
261
183
  dataRequest.input("resourceId", resourceId);
262
- dataRequest.input("perPage", perPage);
263
- dataRequest.input("offset", currentOffset);
184
+ dataRequest.input("offset", offset);
185
+ if (limitValue > 2147483647) {
186
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
187
+ } else {
188
+ dataRequest.input("perPage", limitValue);
189
+ }
264
190
  const rowsResult = await dataRequest.query(dataQuery);
265
191
  const rows = rowsResult.recordset || [];
266
192
  const threads = rows.map((thread) => ({
@@ -273,13 +199,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
273
199
  threads,
274
200
  total,
275
201
  page,
276
- perPage,
277
- hasMore: currentOffset + threads.length < total
202
+ perPage: perPageForResponse,
203
+ hasMore: perPageInput === false ? false : offset + perPage < total
278
204
  };
279
205
  } catch (error$1) {
280
206
  const mastraError = new error.MastraError(
281
207
  {
282
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
208
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
283
209
  domain: error.ErrorDomain.STORAGE,
284
210
  category: error.ErrorCategory.THIRD_PARTY,
285
211
  details: {
@@ -291,7 +217,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
291
217
  );
292
218
  this.logger?.error?.(mastraError.toString());
293
219
  this.logger?.trackException?.(mastraError);
294
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
220
+ return {
221
+ threads: [],
222
+ total: 0,
223
+ page,
224
+ perPage: perPageForResponse,
225
+ hasMore: false
226
+ };
295
227
  }
296
228
  }
297
229
  async saveThread({ thread }) {
@@ -313,7 +245,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
313
245
  req.input("id", thread.id);
314
246
  req.input("resourceId", thread.resourceId);
315
247
  req.input("title", thread.title);
316
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
248
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
249
+ if (metadata === null) {
250
+ req.input("metadata", sql2__default.default.NVarChar, null);
251
+ } else {
252
+ req.input("metadata", metadata);
253
+ }
317
254
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
318
255
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
319
256
  await req.query(mergeSql);
@@ -332,30 +269,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
332
269
  );
333
270
  }
334
271
  }
335
- /**
336
- * @deprecated use getThreadsByResourceIdPaginated instead
337
- */
338
- async getThreadsByResourceId(args) {
339
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
340
- try {
341
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
342
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
343
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
344
- const request = this.pool.request();
345
- request.input("resourceId", resourceId);
346
- const resultSet = await request.query(dataQuery);
347
- const rows = resultSet.recordset || [];
348
- return rows.map((thread) => ({
349
- ...thread,
350
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
351
- createdAt: thread.createdAt,
352
- updatedAt: thread.updatedAt
353
- }));
354
- } catch (error) {
355
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
356
- return [];
357
- }
358
- }
359
272
  /**
360
273
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
361
274
  */
@@ -383,7 +296,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
383
296
  };
384
297
  try {
385
298
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
386
- const sql7 = `UPDATE ${table}
299
+ const sql5 = `UPDATE ${table}
387
300
  SET title = @title,
388
301
  metadata = @metadata,
389
302
  [updatedAt] = @updatedAt
@@ -394,7 +307,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
394
307
  req.input("title", title);
395
308
  req.input("metadata", JSON.stringify(mergedMetadata));
396
309
  req.input("updatedAt", /* @__PURE__ */ new Date());
397
- const result = await req.query(sql7);
310
+ const result = await req.query(sql5);
398
311
  let thread = result.recordset && result.recordset[0];
399
312
  if (thread && "seq_id" in thread) {
400
313
  const { seq_id, ...rest } = thread;
@@ -464,11 +377,9 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
464
377
  }
465
378
  async _getIncludedMessages({
466
379
  threadId,
467
- selectBy,
468
- orderByStatement
380
+ include
469
381
  }) {
470
382
  if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
471
- const include = selectBy?.include;
472
383
  if (!include) return null;
473
384
  const unionQueries = [];
474
385
  const paramValues = [];
@@ -493,7 +404,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
493
404
  m.[resourceId],
494
405
  m.seq_id
495
406
  FROM (
496
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
407
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
497
408
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
498
409
  WHERE [thread_id] = ${pThreadId}
499
410
  ) AS m
@@ -501,15 +412,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
501
412
  OR EXISTS (
502
413
  SELECT 1
503
414
  FROM (
504
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
415
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
505
416
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
506
417
  WHERE [thread_id] = ${pThreadId}
507
418
  ) AS target
508
419
  WHERE target.id = ${pId}
509
420
  AND (
510
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
421
+ -- Get previous messages (messages that come BEFORE the target)
422
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
511
423
  OR
512
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
424
+ -- Get next messages (messages that come AFTER the target)
425
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
513
426
  )
514
427
  )
515
428
  `
@@ -538,34 +451,16 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
538
451
  });
539
452
  return dedupedRows;
540
453
  }
541
- async getMessages(args) {
542
- const { threadId, resourceId, format, selectBy } = args;
454
+ async listMessagesById({ messageIds }) {
455
+ if (messageIds.length === 0) return { messages: [] };
543
456
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
544
457
  const orderByStatement = `ORDER BY [seq_id] DESC`;
545
- const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
546
458
  try {
547
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
548
459
  let rows = [];
549
- const include = selectBy?.include || [];
550
- if (include?.length) {
551
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
552
- if (includeMessages) {
553
- rows.push(...includeMessages);
554
- }
555
- }
556
- const excludeIds = rows.map((m) => m.id).filter(Boolean);
557
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [thread_id] = @threadId`;
460
+ let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
558
461
  const request = this.pool.request();
559
- request.input("threadId", threadId);
560
- if (excludeIds.length > 0) {
561
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
562
- query += ` AND id NOT IN (${excludeParams.join(", ")})`;
563
- excludeIds.forEach((id, idx) => {
564
- request.input(`id${idx}`, id);
565
- });
566
- }
567
- query += ` ${orderByStatement} OFFSET 0 ROWS FETCH NEXT @limit ROWS ONLY`;
568
- request.input("limit", limit);
462
+ messageIds.forEach((id, i) => request.input(`id${i}`, id));
463
+ query += ` ${orderByStatement}`;
569
464
  const result = await request.query(query);
570
465
  const remainingRows = result.recordset || [];
571
466
  rows.push(...remainingRows);
@@ -573,153 +468,150 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
573
468
  const timeDiff = a.seq_id - b.seq_id;
574
469
  return timeDiff;
575
470
  });
576
- rows = rows.map(({ seq_id, ...rest }) => rest);
577
- return this._parseAndFormatMessages(rows, format);
471
+ const messagesWithParsedContent = rows.map((row) => {
472
+ if (typeof row.content === "string") {
473
+ try {
474
+ return { ...row, content: JSON.parse(row.content) };
475
+ } catch {
476
+ return row;
477
+ }
478
+ }
479
+ return row;
480
+ });
481
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
482
+ const list = new agent.MessageList().add(cleanMessages, "memory");
483
+ return { messages: list.get.all.db() };
578
484
  } catch (error$1) {
579
485
  const mastraError = new error.MastraError(
580
486
  {
581
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_FAILED",
487
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
582
488
  domain: error.ErrorDomain.STORAGE,
583
489
  category: error.ErrorCategory.THIRD_PARTY,
584
490
  details: {
585
- threadId,
586
- resourceId: resourceId ?? ""
491
+ messageIds: JSON.stringify(messageIds)
587
492
  }
588
493
  },
589
494
  error$1
590
495
  );
591
496
  this.logger?.error?.(mastraError.toString());
592
- this.logger?.trackException(mastraError);
593
- return [];
497
+ this.logger?.trackException?.(mastraError);
498
+ return { messages: [] };
594
499
  }
595
500
  }
596
- async getMessagesById({
597
- messageIds,
598
- format
599
- }) {
600
- if (messageIds.length === 0) return [];
601
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
602
- const orderByStatement = `ORDER BY [seq_id] DESC`;
603
- try {
604
- let rows = [];
605
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
606
- const request = this.pool.request();
607
- messageIds.forEach((id, i) => request.input(`id${i}`, id));
608
- query += ` ${orderByStatement}`;
609
- const result = await request.query(query);
610
- const remainingRows = result.recordset || [];
611
- rows.push(...remainingRows);
612
- rows.sort((a, b) => {
613
- const timeDiff = a.seq_id - b.seq_id;
614
- return timeDiff;
615
- });
616
- rows = rows.map(({ seq_id, ...rest }) => rest);
617
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
618
- return this._parseAndFormatMessages(rows, `v2`);
619
- } catch (error$1) {
620
- const mastraError = new error.MastraError(
501
+ async listMessages(args) {
502
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
503
+ if (!threadId.trim()) {
504
+ throw new error.MastraError(
621
505
  {
622
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
506
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
623
507
  domain: error.ErrorDomain.STORAGE,
624
508
  category: error.ErrorCategory.THIRD_PARTY,
625
- details: {
626
- messageIds: JSON.stringify(messageIds)
627
- }
509
+ details: { threadId }
628
510
  },
629
- error$1
511
+ new Error("threadId must be a non-empty string")
630
512
  );
631
- this.logger?.error?.(mastraError.toString());
632
- this.logger?.trackException(mastraError);
633
- return [];
634
513
  }
635
- }
636
- async getMessagesPaginated(args) {
637
- const { threadId, resourceId, format, selectBy } = args;
638
- const { page = 0, perPage: perPageInput, dateRange } = selectBy?.pagination || {};
514
+ const perPage = storage.normalizePerPage(perPageInput, 40);
515
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
639
516
  try {
640
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
641
- const fromDate = dateRange?.start;
642
- const toDate = dateRange?.end;
517
+ const { field, direction } = this.parseOrderBy(orderBy, "ASC");
518
+ const orderByStatement = `ORDER BY [${field}] ${direction}`;
643
519
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
644
- const orderByStatement = `ORDER BY [seq_id] DESC`;
645
- let messages = [];
646
- if (selectBy?.include?.length) {
647
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
648
- if (includeMessages) messages.push(...includeMessages);
649
- }
650
- const perPage = perPageInput !== void 0 ? perPageInput : storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
651
- const currentOffset = page * perPage;
520
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
652
521
  const conditions = ["[thread_id] = @threadId"];
653
522
  const request = this.pool.request();
654
523
  request.input("threadId", threadId);
655
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
524
+ if (resourceId) {
525
+ conditions.push("[resourceId] = @resourceId");
526
+ request.input("resourceId", resourceId);
527
+ }
528
+ if (filter?.dateRange?.start) {
656
529
  conditions.push("[createdAt] >= @fromDate");
657
- request.input("fromDate", fromDate.toISOString());
530
+ request.input("fromDate", filter.dateRange.start);
658
531
  }
659
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
532
+ if (filter?.dateRange?.end) {
660
533
  conditions.push("[createdAt] <= @toDate");
661
- request.input("toDate", toDate.toISOString());
534
+ request.input("toDate", filter.dateRange.end);
662
535
  }
663
536
  const whereClause = `WHERE ${conditions.join(" AND ")}`;
664
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
537
+ const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
665
538
  const countResult = await request.query(countQuery);
666
539
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
667
- if (total === 0 && messages.length > 0) {
668
- const parsedIncluded = this._parseAndFormatMessages(messages, format);
540
+ const limitValue = perPageInput === false ? total : perPage;
541
+ const dataQuery = `${selectStatement} FROM ${tableName} ${whereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
542
+ request.input("offset", offset);
543
+ if (limitValue > 2147483647) {
544
+ request.input("limit", sql2__default.default.BigInt, limitValue);
545
+ } else {
546
+ request.input("limit", limitValue);
547
+ }
548
+ const rowsResult = await request.query(dataQuery);
549
+ const rows = rowsResult.recordset || [];
550
+ const messages = [...rows];
551
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
669
552
  return {
670
- messages: parsedIncluded,
671
- total: parsedIncluded.length,
553
+ messages: [],
554
+ total: 0,
672
555
  page,
673
- perPage,
556
+ perPage: perPageForResponse,
674
557
  hasMore: false
675
558
  };
676
559
  }
677
- const excludeIds = messages.map((m) => m.id);
678
- if (excludeIds.length > 0) {
679
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
680
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
681
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
560
+ const messageIds = new Set(messages.map((m) => m.id));
561
+ if (include && include.length > 0) {
562
+ const includeMessages = await this._getIncludedMessages({ threadId, include });
563
+ if (includeMessages) {
564
+ for (const includeMsg of includeMessages) {
565
+ if (!messageIds.has(includeMsg.id)) {
566
+ messages.push(includeMsg);
567
+ messageIds.add(includeMsg.id);
568
+ }
569
+ }
570
+ }
682
571
  }
683
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
684
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
685
- request.input("offset", currentOffset);
686
- request.input("limit", perPage);
687
- const rowsResult = await request.query(dataQuery);
688
- const rows = rowsResult.recordset || [];
689
- rows.sort((a, b) => a.seq_id - b.seq_id);
690
- messages.push(...rows);
691
- const parsed = this._parseAndFormatMessages(messages, format);
572
+ const parsed = this._parseAndFormatMessages(messages, "v2");
573
+ let finalMessages = parsed;
574
+ finalMessages = finalMessages.sort((a, b) => {
575
+ const aValue = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
576
+ const bValue = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
577
+ return direction === "ASC" ? aValue - bValue : bValue - aValue;
578
+ });
579
+ const returnedThreadMessageIds = new Set(finalMessages.filter((m) => m.threadId === threadId).map((m) => m.id));
580
+ const allThreadMessagesReturned = returnedThreadMessageIds.size >= total;
581
+ const hasMore = perPageInput !== false && !allThreadMessagesReturned && offset + perPage < total;
692
582
  return {
693
- messages: parsed,
694
- total: total + excludeIds.length,
583
+ messages: finalMessages,
584
+ total,
695
585
  page,
696
- perPage,
697
- hasMore: currentOffset + rows.length < total
586
+ perPage: perPageForResponse,
587
+ hasMore
698
588
  };
699
589
  } catch (error$1) {
700
590
  const mastraError = new error.MastraError(
701
591
  {
702
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
592
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
703
593
  domain: error.ErrorDomain.STORAGE,
704
594
  category: error.ErrorCategory.THIRD_PARTY,
705
595
  details: {
706
596
  threadId,
707
- resourceId: resourceId ?? "",
708
- page
597
+ resourceId: resourceId ?? ""
709
598
  }
710
599
  },
711
600
  error$1
712
601
  );
713
602
  this.logger?.error?.(mastraError.toString());
714
- this.logger?.trackException(mastraError);
715
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
603
+ this.logger?.trackException?.(mastraError);
604
+ return {
605
+ messages: [],
606
+ total: 0,
607
+ page,
608
+ perPage: perPageForResponse,
609
+ hasMore: false
610
+ };
716
611
  }
717
612
  }
718
- async saveMessages({
719
- messages,
720
- format
721
- }) {
722
- if (messages.length === 0) return messages;
613
+ async saveMessages({ messages }) {
614
+ if (messages.length === 0) return { messages: [] };
723
615
  const threadId = messages[0]?.threadId;
724
616
  if (!threadId) {
725
617
  throw new error.MastraError({
@@ -801,8 +693,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
801
693
  return message;
802
694
  });
803
695
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
804
- if (format === "v2") return list.get.all.v2();
805
- return list.get.all.v1();
696
+ return { messages: list.get.all.db() };
806
697
  } catch (error$1) {
807
698
  throw new error.MastraError(
808
699
  {
@@ -978,8 +869,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
978
869
  return null;
979
870
  }
980
871
  return {
981
- ...result,
982
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
872
+ id: result.id,
873
+ createdAt: result.createdAt,
874
+ updatedAt: result.updatedAt,
875
+ workingMemory: result.workingMemory,
983
876
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
984
877
  };
985
878
  } catch (error$1) {
@@ -993,7 +886,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
993
886
  error$1
994
887
  );
995
888
  this.logger?.error?.(mastraError.toString());
996
- this.logger?.trackException(mastraError);
889
+ this.logger?.trackException?.(mastraError);
997
890
  throw mastraError;
998
891
  }
999
892
  }
@@ -1002,7 +895,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1002
895
  tableName: storage.TABLE_RESOURCES,
1003
896
  record: {
1004
897
  ...resource,
1005
- metadata: JSON.stringify(resource.metadata)
898
+ metadata: resource.metadata
1006
899
  }
1007
900
  });
1008
901
  return resource;
@@ -1060,111 +953,436 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1060
953
  error$1
1061
954
  );
1062
955
  this.logger?.error?.(mastraError.toString());
1063
- this.logger?.trackException(mastraError);
956
+ this.logger?.trackException?.(mastraError);
1064
957
  throw mastraError;
1065
958
  }
1066
959
  }
1067
960
  };
1068
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
961
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1069
962
  pool;
1070
- schemaName;
1071
- setupSchemaPromise = null;
1072
- schemaSetupComplete = void 0;
1073
- getSqlType(type, isPrimaryKey = false) {
1074
- switch (type) {
1075
- case "text":
1076
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1077
- case "timestamp":
1078
- return "DATETIME2(7)";
1079
- case "uuid":
1080
- return "UNIQUEIDENTIFIER";
1081
- case "jsonb":
1082
- return "NVARCHAR(MAX)";
1083
- case "integer":
1084
- return "INT";
1085
- case "bigint":
1086
- return "BIGINT";
1087
- case "float":
1088
- return "FLOAT";
1089
- default:
1090
- throw new error.MastraError({
1091
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1092
- domain: error.ErrorDomain.STORAGE,
1093
- category: error.ErrorCategory.THIRD_PARTY
1094
- });
1095
- }
1096
- }
1097
- constructor({ pool, schemaName }) {
963
+ operations;
964
+ schema;
965
+ constructor({
966
+ pool,
967
+ operations,
968
+ schema
969
+ }) {
1098
970
  super();
1099
971
  this.pool = pool;
1100
- this.schemaName = schemaName;
1101
- }
1102
- async hasColumn(table, column) {
1103
- const schema = this.schemaName || "dbo";
1104
- const request = this.pool.request();
1105
- request.input("schema", schema);
1106
- request.input("table", table);
1107
- request.input("column", column);
1108
- request.input("columnLower", column.toLowerCase());
1109
- const result = await request.query(
1110
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1111
- );
1112
- return result.recordset.length > 0;
972
+ this.operations = operations;
973
+ this.schema = schema;
1113
974
  }
1114
- async setupSchema() {
1115
- if (!this.schemaName || this.schemaSetupComplete) {
1116
- return;
1117
- }
1118
- if (!this.setupSchemaPromise) {
1119
- this.setupSchemaPromise = (async () => {
1120
- try {
1121
- const checkRequest = this.pool.request();
1122
- checkRequest.input("schemaName", this.schemaName);
1123
- const checkResult = await checkRequest.query(`
1124
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1125
- `);
1126
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1127
- if (!schemaExists) {
1128
- try {
1129
- await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1130
- this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1131
- } catch (error) {
1132
- this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1133
- throw new Error(
1134
- `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1135
- );
1136
- }
1137
- }
1138
- this.schemaSetupComplete = true;
1139
- this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1140
- } catch (error) {
1141
- this.schemaSetupComplete = void 0;
1142
- this.setupSchemaPromise = null;
1143
- throw error;
1144
- } finally {
1145
- this.setupSchemaPromise = null;
1146
- }
1147
- })();
1148
- }
1149
- await this.setupSchemaPromise;
975
+ get tracingStrategy() {
976
+ return {
977
+ preferred: "batch-with-updates",
978
+ supported: ["batch-with-updates", "insert-only"]
979
+ };
1150
980
  }
1151
- async insert({ tableName, record }) {
981
+ async createSpan(span) {
1152
982
  try {
1153
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1154
- const values = Object.values(record);
1155
- const paramNames = values.map((_, i) => `@param${i}`);
1156
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1157
- const request = this.pool.request();
1158
- values.forEach((value, i) => {
1159
- if (value instanceof Date) {
1160
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1161
- } else if (typeof value === "object" && value !== null) {
1162
- request.input(`param${i}`, JSON.stringify(value));
1163
- } else {
1164
- request.input(`param${i}`, value);
1165
- }
1166
- });
1167
- await request.query(insertSql);
983
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
984
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
985
+ const record = {
986
+ ...span,
987
+ startedAt,
988
+ endedAt
989
+ // Note: createdAt/updatedAt will be set by default values
990
+ };
991
+ return this.operations.insert({ tableName: storage.TABLE_AI_SPANS, record });
992
+ } catch (error$1) {
993
+ throw new error.MastraError(
994
+ {
995
+ id: "MSSQL_STORE_CREATE_AI_SPAN_FAILED",
996
+ domain: error.ErrorDomain.STORAGE,
997
+ category: error.ErrorCategory.USER,
998
+ details: {
999
+ spanId: span.spanId,
1000
+ traceId: span.traceId,
1001
+ spanType: span.spanType,
1002
+ spanName: span.name
1003
+ }
1004
+ },
1005
+ error$1
1006
+ );
1007
+ }
1008
+ }
1009
+ async getAITrace(traceId) {
1010
+ try {
1011
+ const tableName = getTableName({
1012
+ indexName: storage.TABLE_AI_SPANS,
1013
+ schemaName: getSchemaName(this.schema)
1014
+ });
1015
+ const request = this.pool.request();
1016
+ request.input("traceId", traceId);
1017
+ const result = await request.query(
1018
+ `SELECT
1019
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1020
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1021
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1022
+ FROM ${tableName}
1023
+ WHERE [traceId] = @traceId
1024
+ ORDER BY [startedAt] DESC`
1025
+ );
1026
+ if (!result.recordset || result.recordset.length === 0) {
1027
+ return null;
1028
+ }
1029
+ return {
1030
+ traceId,
1031
+ spans: result.recordset.map(
1032
+ (span) => transformFromSqlRow({
1033
+ tableName: storage.TABLE_AI_SPANS,
1034
+ sqlRow: span
1035
+ })
1036
+ )
1037
+ };
1038
+ } catch (error$1) {
1039
+ throw new error.MastraError(
1040
+ {
1041
+ id: "MSSQL_STORE_GET_AI_TRACE_FAILED",
1042
+ domain: error.ErrorDomain.STORAGE,
1043
+ category: error.ErrorCategory.USER,
1044
+ details: {
1045
+ traceId
1046
+ }
1047
+ },
1048
+ error$1
1049
+ );
1050
+ }
1051
+ }
1052
+ async updateSpan({
1053
+ spanId,
1054
+ traceId,
1055
+ updates
1056
+ }) {
1057
+ try {
1058
+ const data = { ...updates };
1059
+ if (data.endedAt instanceof Date) {
1060
+ data.endedAt = data.endedAt.toISOString();
1061
+ }
1062
+ if (data.startedAt instanceof Date) {
1063
+ data.startedAt = data.startedAt.toISOString();
1064
+ }
1065
+ await this.operations.update({
1066
+ tableName: storage.TABLE_AI_SPANS,
1067
+ keys: { spanId, traceId },
1068
+ data
1069
+ });
1070
+ } catch (error$1) {
1071
+ throw new error.MastraError(
1072
+ {
1073
+ id: "MSSQL_STORE_UPDATE_AI_SPAN_FAILED",
1074
+ domain: error.ErrorDomain.STORAGE,
1075
+ category: error.ErrorCategory.USER,
1076
+ details: {
1077
+ spanId,
1078
+ traceId
1079
+ }
1080
+ },
1081
+ error$1
1082
+ );
1083
+ }
1084
+ }
1085
+ async getAITracesPaginated({
1086
+ filters,
1087
+ pagination
1088
+ }) {
1089
+ const page = pagination?.page ?? 0;
1090
+ const perPage = pagination?.perPage ?? 10;
1091
+ const { entityId, entityType, ...actualFilters } = filters || {};
1092
+ const filtersWithDateRange = {
1093
+ ...actualFilters,
1094
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1095
+ parentSpanId: null
1096
+ // Only get root spans for traces
1097
+ };
1098
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1099
+ let actualWhereClause = whereClause.sql;
1100
+ const params = { ...whereClause.params };
1101
+ let currentParamIndex = Object.keys(params).length + 1;
1102
+ if (entityId && entityType) {
1103
+ let name = "";
1104
+ if (entityType === "workflow") {
1105
+ name = `workflow run: '${entityId}'`;
1106
+ } else if (entityType === "agent") {
1107
+ name = `agent run: '${entityId}'`;
1108
+ } else {
1109
+ const error$1 = new error.MastraError({
1110
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1111
+ domain: error.ErrorDomain.STORAGE,
1112
+ category: error.ErrorCategory.USER,
1113
+ details: {
1114
+ entityType
1115
+ },
1116
+ text: `Cannot filter by entity type: ${entityType}`
1117
+ });
1118
+ throw error$1;
1119
+ }
1120
+ const entityParam = `p${currentParamIndex++}`;
1121
+ if (actualWhereClause) {
1122
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1123
+ } else {
1124
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1125
+ }
1126
+ params[entityParam] = name;
1127
+ }
1128
+ const tableName = getTableName({
1129
+ indexName: storage.TABLE_AI_SPANS,
1130
+ schemaName: getSchemaName(this.schema)
1131
+ });
1132
+ try {
1133
+ const countRequest = this.pool.request();
1134
+ Object.entries(params).forEach(([key, value]) => {
1135
+ countRequest.input(key, value);
1136
+ });
1137
+ const countResult = await countRequest.query(
1138
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1139
+ );
1140
+ const total = countResult.recordset[0]?.count ?? 0;
1141
+ if (total === 0) {
1142
+ return {
1143
+ pagination: {
1144
+ total: 0,
1145
+ page,
1146
+ perPage,
1147
+ hasMore: false
1148
+ },
1149
+ spans: []
1150
+ };
1151
+ }
1152
+ const dataRequest = this.pool.request();
1153
+ Object.entries(params).forEach(([key, value]) => {
1154
+ dataRequest.input(key, value);
1155
+ });
1156
+ dataRequest.input("offset", page * perPage);
1157
+ dataRequest.input("limit", perPage);
1158
+ const dataResult = await dataRequest.query(
1159
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1160
+ );
1161
+ const spans = dataResult.recordset.map(
1162
+ (row) => transformFromSqlRow({
1163
+ tableName: storage.TABLE_AI_SPANS,
1164
+ sqlRow: row
1165
+ })
1166
+ );
1167
+ return {
1168
+ pagination: {
1169
+ total,
1170
+ page,
1171
+ perPage,
1172
+ hasMore: (page + 1) * perPage < total
1173
+ },
1174
+ spans
1175
+ };
1176
+ } catch (error$1) {
1177
+ throw new error.MastraError(
1178
+ {
1179
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1180
+ domain: error.ErrorDomain.STORAGE,
1181
+ category: error.ErrorCategory.USER
1182
+ },
1183
+ error$1
1184
+ );
1185
+ }
1186
+ }
1187
+ async batchCreateSpans(args) {
1188
+ if (!args.records || args.records.length === 0) {
1189
+ return;
1190
+ }
1191
+ try {
1192
+ await this.operations.batchInsert({
1193
+ tableName: storage.TABLE_AI_SPANS,
1194
+ records: args.records.map((span) => ({
1195
+ ...span,
1196
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1197
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1198
+ }))
1199
+ });
1200
+ } catch (error$1) {
1201
+ throw new error.MastraError(
1202
+ {
1203
+ id: "MSSQL_STORE_BATCH_CREATE_AI_SPANS_FAILED",
1204
+ domain: error.ErrorDomain.STORAGE,
1205
+ category: error.ErrorCategory.USER,
1206
+ details: {
1207
+ count: args.records.length
1208
+ }
1209
+ },
1210
+ error$1
1211
+ );
1212
+ }
1213
+ }
1214
+ async batchUpdateSpans(args) {
1215
+ if (!args.records || args.records.length === 0) {
1216
+ return;
1217
+ }
1218
+ try {
1219
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1220
+ const processedData = { ...data };
1221
+ if (processedData.endedAt instanceof Date) {
1222
+ processedData.endedAt = processedData.endedAt.toISOString();
1223
+ }
1224
+ if (processedData.startedAt instanceof Date) {
1225
+ processedData.startedAt = processedData.startedAt.toISOString();
1226
+ }
1227
+ return {
1228
+ keys: { spanId, traceId },
1229
+ data: processedData
1230
+ };
1231
+ });
1232
+ await this.operations.batchUpdate({
1233
+ tableName: storage.TABLE_AI_SPANS,
1234
+ updates
1235
+ });
1236
+ } catch (error$1) {
1237
+ throw new error.MastraError(
1238
+ {
1239
+ id: "MSSQL_STORE_BATCH_UPDATE_AI_SPANS_FAILED",
1240
+ domain: error.ErrorDomain.STORAGE,
1241
+ category: error.ErrorCategory.USER,
1242
+ details: {
1243
+ count: args.records.length
1244
+ }
1245
+ },
1246
+ error$1
1247
+ );
1248
+ }
1249
+ }
1250
+ async batchDeleteAITraces(args) {
1251
+ if (!args.traceIds || args.traceIds.length === 0) {
1252
+ return;
1253
+ }
1254
+ try {
1255
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1256
+ await this.operations.batchDelete({
1257
+ tableName: storage.TABLE_AI_SPANS,
1258
+ keys
1259
+ });
1260
+ } catch (error$1) {
1261
+ throw new error.MastraError(
1262
+ {
1263
+ id: "MSSQL_STORE_BATCH_DELETE_AI_TRACES_FAILED",
1264
+ domain: error.ErrorDomain.STORAGE,
1265
+ category: error.ErrorCategory.USER,
1266
+ details: {
1267
+ count: args.traceIds.length
1268
+ }
1269
+ },
1270
+ error$1
1271
+ );
1272
+ }
1273
+ }
1274
+ };
1275
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1276
+ pool;
1277
+ schemaName;
1278
+ setupSchemaPromise = null;
1279
+ schemaSetupComplete = void 0;
1280
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1281
+ switch (type) {
1282
+ case "text":
1283
+ if (useLargeStorage) {
1284
+ return "NVARCHAR(MAX)";
1285
+ }
1286
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1287
+ case "timestamp":
1288
+ return "DATETIME2(7)";
1289
+ case "uuid":
1290
+ return "UNIQUEIDENTIFIER";
1291
+ case "jsonb":
1292
+ return "NVARCHAR(MAX)";
1293
+ case "integer":
1294
+ return "INT";
1295
+ case "bigint":
1296
+ return "BIGINT";
1297
+ case "float":
1298
+ return "FLOAT";
1299
+ case "boolean":
1300
+ return "BIT";
1301
+ default:
1302
+ throw new error.MastraError({
1303
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1304
+ domain: error.ErrorDomain.STORAGE,
1305
+ category: error.ErrorCategory.THIRD_PARTY
1306
+ });
1307
+ }
1308
+ }
1309
+ constructor({ pool, schemaName }) {
1310
+ super();
1311
+ this.pool = pool;
1312
+ this.schemaName = schemaName;
1313
+ }
1314
+ async hasColumn(table, column) {
1315
+ const schema = this.schemaName || "dbo";
1316
+ const request = this.pool.request();
1317
+ request.input("schema", schema);
1318
+ request.input("table", table);
1319
+ request.input("column", column);
1320
+ request.input("columnLower", column.toLowerCase());
1321
+ const result = await request.query(
1322
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1323
+ );
1324
+ return result.recordset.length > 0;
1325
+ }
1326
+ async setupSchema() {
1327
+ if (!this.schemaName || this.schemaSetupComplete) {
1328
+ return;
1329
+ }
1330
+ if (!this.setupSchemaPromise) {
1331
+ this.setupSchemaPromise = (async () => {
1332
+ try {
1333
+ const checkRequest = this.pool.request();
1334
+ checkRequest.input("schemaName", this.schemaName);
1335
+ const checkResult = await checkRequest.query(`
1336
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1337
+ `);
1338
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1339
+ if (!schemaExists) {
1340
+ try {
1341
+ await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1342
+ this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1343
+ } catch (error) {
1344
+ this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1345
+ throw new Error(
1346
+ `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1347
+ );
1348
+ }
1349
+ }
1350
+ this.schemaSetupComplete = true;
1351
+ this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1352
+ } catch (error) {
1353
+ this.schemaSetupComplete = void 0;
1354
+ this.setupSchemaPromise = null;
1355
+ throw error;
1356
+ } finally {
1357
+ this.setupSchemaPromise = null;
1358
+ }
1359
+ })();
1360
+ }
1361
+ await this.setupSchemaPromise;
1362
+ }
1363
+ async insert({
1364
+ tableName,
1365
+ record,
1366
+ transaction
1367
+ }) {
1368
+ try {
1369
+ const columns = Object.keys(record);
1370
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1371
+ const paramNames = columns.map((_, i) => `@param${i}`);
1372
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1373
+ const request = transaction ? transaction.request() : this.pool.request();
1374
+ columns.forEach((col, i) => {
1375
+ const value = record[col];
1376
+ const preparedValue = this.prepareValue(value, col, tableName);
1377
+ if (preparedValue instanceof Date) {
1378
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1379
+ } else if (preparedValue === null || preparedValue === void 0) {
1380
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1381
+ } else {
1382
+ request.input(`param${i}`, preparedValue);
1383
+ }
1384
+ });
1385
+ await request.query(insertSql);
1168
1386
  } catch (error$1) {
1169
1387
  throw new error.MastraError(
1170
1388
  {
@@ -1185,7 +1403,7 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1185
1403
  try {
1186
1404
  await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1187
1405
  } catch (truncateError) {
1188
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1406
+ if (truncateError?.number === 4712) {
1189
1407
  await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1190
1408
  } else {
1191
1409
  throw truncateError;
@@ -1208,9 +1426,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1208
1426
  getDefaultValue(type) {
1209
1427
  switch (type) {
1210
1428
  case "timestamp":
1211
- return "DEFAULT SYSDATETIMEOFFSET()";
1429
+ return "DEFAULT SYSUTCDATETIME()";
1212
1430
  case "jsonb":
1213
1431
  return "DEFAULT N'{}'";
1432
+ case "boolean":
1433
+ return "DEFAULT 0";
1214
1434
  default:
1215
1435
  return super.getDefaultValue(type);
1216
1436
  }
@@ -1221,13 +1441,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1221
1441
  }) {
1222
1442
  try {
1223
1443
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1444
+ const largeDataColumns = [
1445
+ "workingMemory",
1446
+ "snapshot",
1447
+ "metadata",
1448
+ "content",
1449
+ // messages.content - can be very long conversation content
1450
+ "input",
1451
+ // evals.input - test input data
1452
+ "output",
1453
+ // evals.output - test output data
1454
+ "instructions",
1455
+ // evals.instructions - evaluation instructions
1456
+ "other"
1457
+ // traces.other - additional trace data
1458
+ ];
1224
1459
  const columns = Object.entries(schema).map(([name, def]) => {
1225
1460
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1226
1461
  const constraints = [];
1227
1462
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1228
1463
  if (!def.nullable) constraints.push("NOT NULL");
1229
1464
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1230
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1465
+ const useLargeStorage = largeDataColumns.includes(name);
1466
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1231
1467
  }).join(",\n");
1232
1468
  if (this.schemaName) {
1233
1469
  await this.setupSchema();
@@ -1314,7 +1550,19 @@ ${columns}
1314
1550
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1315
1551
  if (!columnExists) {
1316
1552
  const columnDef = schema[columnName];
1317
- const sqlType = this.getSqlType(columnDef.type);
1553
+ const largeDataColumns = [
1554
+ "workingMemory",
1555
+ "snapshot",
1556
+ "metadata",
1557
+ "content",
1558
+ "input",
1559
+ "output",
1560
+ "instructions",
1561
+ "other"
1562
+ ];
1563
+ const useLargeStorage = largeDataColumns.includes(columnName);
1564
+ const isIndexed = !!columnDef.primaryKey;
1565
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1318
1566
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1319
1567
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1320
1568
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1342,13 +1590,17 @@ ${columns}
1342
1590
  try {
1343
1591
  const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1344
1592
  const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1345
- const values = keyEntries.map(([_, value]) => value);
1346
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1593
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1347
1594
  const request = this.pool.request();
1348
- values.forEach((value, i) => {
1349
- request.input(`param${i}`, value);
1595
+ keyEntries.forEach(([key, value], i) => {
1596
+ const preparedValue = this.prepareValue(value, key, tableName);
1597
+ if (preparedValue === null || preparedValue === void 0) {
1598
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1599
+ } else {
1600
+ request.input(`param${i}`, preparedValue);
1601
+ }
1350
1602
  });
1351
- const resultSet = await request.query(sql7);
1603
+ const resultSet = await request.query(sql5);
1352
1604
  const result = resultSet.recordset[0] || null;
1353
1605
  if (!result) {
1354
1606
  return null;
@@ -1380,63 +1632,599 @@ ${columns}
1380
1632
  try {
1381
1633
  await transaction.begin();
1382
1634
  for (const record of records) {
1383
- await this.insert({ tableName, record });
1635
+ await this.insert({ tableName, record, transaction });
1384
1636
  }
1385
1637
  await transaction.commit();
1386
1638
  } catch (error$1) {
1387
- await transaction.rollback();
1639
+ await transaction.rollback();
1640
+ throw new error.MastraError(
1641
+ {
1642
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
1643
+ domain: error.ErrorDomain.STORAGE,
1644
+ category: error.ErrorCategory.THIRD_PARTY,
1645
+ details: {
1646
+ tableName,
1647
+ numberOfRecords: records.length
1648
+ }
1649
+ },
1650
+ error$1
1651
+ );
1652
+ }
1653
+ }
1654
+ async dropTable({ tableName }) {
1655
+ try {
1656
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1657
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1658
+ } catch (error$1) {
1659
+ throw new error.MastraError(
1660
+ {
1661
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1662
+ domain: error.ErrorDomain.STORAGE,
1663
+ category: error.ErrorCategory.THIRD_PARTY,
1664
+ details: {
1665
+ tableName
1666
+ }
1667
+ },
1668
+ error$1
1669
+ );
1670
+ }
1671
+ }
1672
+ /**
1673
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1674
+ */
1675
+ prepareValue(value, columnName, tableName) {
1676
+ if (value === null || value === void 0) {
1677
+ return value;
1678
+ }
1679
+ if (value instanceof Date) {
1680
+ return value;
1681
+ }
1682
+ const schema = storage.TABLE_SCHEMAS[tableName];
1683
+ const columnSchema = schema?.[columnName];
1684
+ if (columnSchema?.type === "boolean") {
1685
+ return value ? 1 : 0;
1686
+ }
1687
+ if (columnSchema?.type === "jsonb") {
1688
+ return JSON.stringify(value);
1689
+ }
1690
+ if (typeof value === "object") {
1691
+ return JSON.stringify(value);
1692
+ }
1693
+ return value;
1694
+ }
1695
+ /**
1696
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1697
+ */
1698
+ getMssqlType(tableName, columnName) {
1699
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1700
+ switch (col?.type) {
1701
+ case "text":
1702
+ return sql2__default.default.NVarChar;
1703
+ case "timestamp":
1704
+ return sql2__default.default.DateTime2;
1705
+ case "uuid":
1706
+ return sql2__default.default.UniqueIdentifier;
1707
+ case "jsonb":
1708
+ return sql2__default.default.NVarChar;
1709
+ case "integer":
1710
+ return sql2__default.default.Int;
1711
+ case "bigint":
1712
+ return sql2__default.default.BigInt;
1713
+ case "float":
1714
+ return sql2__default.default.Float;
1715
+ case "boolean":
1716
+ return sql2__default.default.Bit;
1717
+ default:
1718
+ return sql2__default.default.NVarChar;
1719
+ }
1720
+ }
1721
+ /**
1722
+ * Update a single record in the database
1723
+ */
1724
+ async update({
1725
+ tableName,
1726
+ keys,
1727
+ data,
1728
+ transaction
1729
+ }) {
1730
+ try {
1731
+ if (!data || Object.keys(data).length === 0) {
1732
+ throw new error.MastraError({
1733
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1734
+ domain: error.ErrorDomain.STORAGE,
1735
+ category: error.ErrorCategory.USER,
1736
+ text: "Cannot update with empty data payload"
1737
+ });
1738
+ }
1739
+ if (!keys || Object.keys(keys).length === 0) {
1740
+ throw new error.MastraError({
1741
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1742
+ domain: error.ErrorDomain.STORAGE,
1743
+ category: error.ErrorCategory.USER,
1744
+ text: "Cannot update without keys to identify records"
1745
+ });
1746
+ }
1747
+ const setClauses = [];
1748
+ const request = transaction ? transaction.request() : this.pool.request();
1749
+ let paramIndex = 0;
1750
+ Object.entries(data).forEach(([key, value]) => {
1751
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1752
+ const paramName = `set${paramIndex++}`;
1753
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1754
+ const preparedValue = this.prepareValue(value, key, tableName);
1755
+ if (preparedValue === null || preparedValue === void 0) {
1756
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1757
+ } else {
1758
+ request.input(paramName, preparedValue);
1759
+ }
1760
+ });
1761
+ const whereConditions = [];
1762
+ Object.entries(keys).forEach(([key, value]) => {
1763
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1764
+ const paramName = `where${paramIndex++}`;
1765
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1766
+ const preparedValue = this.prepareValue(value, key, tableName);
1767
+ if (preparedValue === null || preparedValue === void 0) {
1768
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1769
+ } else {
1770
+ request.input(paramName, preparedValue);
1771
+ }
1772
+ });
1773
+ const tableName_ = getTableName({
1774
+ indexName: tableName,
1775
+ schemaName: getSchemaName(this.schemaName)
1776
+ });
1777
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1778
+ await request.query(updateSql);
1779
+ } catch (error$1) {
1780
+ throw new error.MastraError(
1781
+ {
1782
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1783
+ domain: error.ErrorDomain.STORAGE,
1784
+ category: error.ErrorCategory.THIRD_PARTY,
1785
+ details: {
1786
+ tableName
1787
+ }
1788
+ },
1789
+ error$1
1790
+ );
1791
+ }
1792
+ }
1793
+ /**
1794
+ * Update multiple records in a single batch transaction
1795
+ */
1796
+ async batchUpdate({
1797
+ tableName,
1798
+ updates
1799
+ }) {
1800
+ const transaction = this.pool.transaction();
1801
+ try {
1802
+ await transaction.begin();
1803
+ for (const { keys, data } of updates) {
1804
+ await this.update({ tableName, keys, data, transaction });
1805
+ }
1806
+ await transaction.commit();
1807
+ } catch (error$1) {
1808
+ await transaction.rollback();
1809
+ throw new error.MastraError(
1810
+ {
1811
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1812
+ domain: error.ErrorDomain.STORAGE,
1813
+ category: error.ErrorCategory.THIRD_PARTY,
1814
+ details: {
1815
+ tableName,
1816
+ numberOfRecords: updates.length
1817
+ }
1818
+ },
1819
+ error$1
1820
+ );
1821
+ }
1822
+ }
1823
+ /**
1824
+ * Delete multiple records by keys
1825
+ */
1826
+ async batchDelete({ tableName, keys }) {
1827
+ if (keys.length === 0) {
1828
+ return;
1829
+ }
1830
+ const tableName_ = getTableName({
1831
+ indexName: tableName,
1832
+ schemaName: getSchemaName(this.schemaName)
1833
+ });
1834
+ const transaction = this.pool.transaction();
1835
+ try {
1836
+ await transaction.begin();
1837
+ for (const keySet of keys) {
1838
+ const conditions = [];
1839
+ const request = transaction.request();
1840
+ let paramIndex = 0;
1841
+ Object.entries(keySet).forEach(([key, value]) => {
1842
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1843
+ const paramName = `p${paramIndex++}`;
1844
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1845
+ const preparedValue = this.prepareValue(value, key, tableName);
1846
+ if (preparedValue === null || preparedValue === void 0) {
1847
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1848
+ } else {
1849
+ request.input(paramName, preparedValue);
1850
+ }
1851
+ });
1852
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1853
+ await request.query(deleteSql);
1854
+ }
1855
+ await transaction.commit();
1856
+ } catch (error$1) {
1857
+ await transaction.rollback();
1858
+ throw new error.MastraError(
1859
+ {
1860
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1861
+ domain: error.ErrorDomain.STORAGE,
1862
+ category: error.ErrorCategory.THIRD_PARTY,
1863
+ details: {
1864
+ tableName,
1865
+ numberOfRecords: keys.length
1866
+ }
1867
+ },
1868
+ error$1
1869
+ );
1870
+ }
1871
+ }
1872
+ /**
1873
+ * Create a new index on a table
1874
+ */
1875
+ async createIndex(options) {
1876
+ try {
1877
+ const { name, table, columns, unique = false, where } = options;
1878
+ const schemaName = this.schemaName || "dbo";
1879
+ const fullTableName = getTableName({
1880
+ indexName: table,
1881
+ schemaName: getSchemaName(this.schemaName)
1882
+ });
1883
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1884
+ const checkRequest = this.pool.request();
1885
+ checkRequest.input("indexName", indexNameSafe);
1886
+ checkRequest.input("schemaName", schemaName);
1887
+ checkRequest.input("tableName", table);
1888
+ const indexExists = await checkRequest.query(`
1889
+ SELECT 1 as found
1890
+ FROM sys.indexes i
1891
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1892
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1893
+ WHERE i.name = @indexName
1894
+ AND s.name = @schemaName
1895
+ AND t.name = @tableName
1896
+ `);
1897
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1898
+ return;
1899
+ }
1900
+ const uniqueStr = unique ? "UNIQUE " : "";
1901
+ const columnsStr = columns.map((col) => {
1902
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1903
+ const [colName, ...modifiers] = col.split(" ");
1904
+ if (!colName) {
1905
+ throw new Error(`Invalid column specification: ${col}`);
1906
+ }
1907
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1908
+ }
1909
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1910
+ }).join(", ");
1911
+ const whereStr = where ? ` WHERE ${where}` : "";
1912
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1913
+ await this.pool.request().query(createIndexSql);
1914
+ } catch (error$1) {
1915
+ throw new error.MastraError(
1916
+ {
1917
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1918
+ domain: error.ErrorDomain.STORAGE,
1919
+ category: error.ErrorCategory.THIRD_PARTY,
1920
+ details: {
1921
+ indexName: options.name,
1922
+ tableName: options.table
1923
+ }
1924
+ },
1925
+ error$1
1926
+ );
1927
+ }
1928
+ }
1929
+ /**
1930
+ * Drop an existing index
1931
+ */
1932
+ async dropIndex(indexName) {
1933
+ try {
1934
+ const schemaName = this.schemaName || "dbo";
1935
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
1936
+ const checkRequest = this.pool.request();
1937
+ checkRequest.input("indexName", indexNameSafe);
1938
+ checkRequest.input("schemaName", schemaName);
1939
+ const result = await checkRequest.query(`
1940
+ SELECT t.name as table_name
1941
+ FROM sys.indexes i
1942
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1943
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1944
+ WHERE i.name = @indexName
1945
+ AND s.name = @schemaName
1946
+ `);
1947
+ if (!result.recordset || result.recordset.length === 0) {
1948
+ return;
1949
+ }
1950
+ if (result.recordset.length > 1) {
1951
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
1952
+ throw new error.MastraError({
1953
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
1954
+ domain: error.ErrorDomain.STORAGE,
1955
+ category: error.ErrorCategory.USER,
1956
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
1957
+ });
1958
+ }
1959
+ const tableName = result.recordset[0].table_name;
1960
+ const fullTableName = getTableName({
1961
+ indexName: tableName,
1962
+ schemaName: getSchemaName(this.schemaName)
1963
+ });
1964
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
1965
+ await this.pool.request().query(dropSql);
1966
+ } catch (error$1) {
1967
+ throw new error.MastraError(
1968
+ {
1969
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
1970
+ domain: error.ErrorDomain.STORAGE,
1971
+ category: error.ErrorCategory.THIRD_PARTY,
1972
+ details: {
1973
+ indexName
1974
+ }
1975
+ },
1976
+ error$1
1977
+ );
1978
+ }
1979
+ }
1980
+ /**
1981
+ * List indexes for a specific table or all tables
1982
+ */
1983
+ async listIndexes(tableName) {
1984
+ try {
1985
+ const schemaName = this.schemaName || "dbo";
1986
+ let query;
1987
+ const request = this.pool.request();
1988
+ request.input("schemaName", schemaName);
1989
+ if (tableName) {
1990
+ query = `
1991
+ SELECT
1992
+ i.name as name,
1993
+ o.name as [table],
1994
+ i.is_unique as is_unique,
1995
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
1996
+ FROM sys.indexes i
1997
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
1998
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
1999
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2000
+ WHERE sch.name = @schemaName
2001
+ AND o.name = @tableName
2002
+ AND i.name IS NOT NULL
2003
+ GROUP BY i.name, o.name, i.is_unique
2004
+ `;
2005
+ request.input("tableName", tableName);
2006
+ } else {
2007
+ query = `
2008
+ SELECT
2009
+ i.name as name,
2010
+ o.name as [table],
2011
+ i.is_unique as is_unique,
2012
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2013
+ FROM sys.indexes i
2014
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2015
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2016
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2017
+ WHERE sch.name = @schemaName
2018
+ AND i.name IS NOT NULL
2019
+ GROUP BY i.name, o.name, i.is_unique
2020
+ `;
2021
+ }
2022
+ const result = await request.query(query);
2023
+ const indexes = [];
2024
+ for (const row of result.recordset) {
2025
+ const colRequest = this.pool.request();
2026
+ colRequest.input("indexName", row.name);
2027
+ colRequest.input("schemaName", schemaName);
2028
+ const colResult = await colRequest.query(`
2029
+ SELECT c.name as column_name
2030
+ FROM sys.indexes i
2031
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2032
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2033
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2034
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2035
+ WHERE i.name = @indexName
2036
+ AND s.name = @schemaName
2037
+ ORDER BY ic.key_ordinal
2038
+ `);
2039
+ indexes.push({
2040
+ name: row.name,
2041
+ table: row.table,
2042
+ columns: colResult.recordset.map((c) => c.column_name),
2043
+ unique: row.is_unique || false,
2044
+ size: row.size || "0 MB",
2045
+ definition: ""
2046
+ // MSSQL doesn't store definition like PG
2047
+ });
2048
+ }
2049
+ return indexes;
2050
+ } catch (error$1) {
2051
+ throw new error.MastraError(
2052
+ {
2053
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
2054
+ domain: error.ErrorDomain.STORAGE,
2055
+ category: error.ErrorCategory.THIRD_PARTY,
2056
+ details: tableName ? {
2057
+ tableName
2058
+ } : {}
2059
+ },
2060
+ error$1
2061
+ );
2062
+ }
2063
+ }
2064
+ /**
2065
+ * Get detailed statistics for a specific index
2066
+ */
2067
+ async describeIndex(indexName) {
2068
+ try {
2069
+ const schemaName = this.schemaName || "dbo";
2070
+ const request = this.pool.request();
2071
+ request.input("indexName", indexName);
2072
+ request.input("schemaName", schemaName);
2073
+ const query = `
2074
+ SELECT
2075
+ i.name as name,
2076
+ o.name as [table],
2077
+ i.is_unique as is_unique,
2078
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2079
+ i.type_desc as method,
2080
+ ISNULL(us.user_scans, 0) as scans,
2081
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2082
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2083
+ FROM sys.indexes i
2084
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2085
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2086
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2087
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2088
+ WHERE i.name = @indexName
2089
+ AND sch.name = @schemaName
2090
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2091
+ `;
2092
+ const result = await request.query(query);
2093
+ if (!result.recordset || result.recordset.length === 0) {
2094
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
2095
+ }
2096
+ const row = result.recordset[0];
2097
+ const colRequest = this.pool.request();
2098
+ colRequest.input("indexName", indexName);
2099
+ colRequest.input("schemaName", schemaName);
2100
+ const colResult = await colRequest.query(`
2101
+ SELECT c.name as column_name
2102
+ FROM sys.indexes i
2103
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2104
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2105
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2106
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2107
+ WHERE i.name = @indexName
2108
+ AND s.name = @schemaName
2109
+ ORDER BY ic.key_ordinal
2110
+ `);
2111
+ return {
2112
+ name: row.name,
2113
+ table: row.table,
2114
+ columns: colResult.recordset.map((c) => c.column_name),
2115
+ unique: row.is_unique || false,
2116
+ size: row.size || "0 MB",
2117
+ definition: "",
2118
+ method: row.method?.toLowerCase() || "nonclustered",
2119
+ scans: Number(row.scans) || 0,
2120
+ tuples_read: Number(row.tuples_read) || 0,
2121
+ tuples_fetched: Number(row.tuples_fetched) || 0
2122
+ };
2123
+ } catch (error$1) {
1388
2124
  throw new error.MastraError(
1389
2125
  {
1390
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
2126
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
1391
2127
  domain: error.ErrorDomain.STORAGE,
1392
2128
  category: error.ErrorCategory.THIRD_PARTY,
1393
2129
  details: {
1394
- tableName,
1395
- numberOfRecords: records.length
2130
+ indexName
1396
2131
  }
1397
2132
  },
1398
2133
  error$1
1399
2134
  );
1400
2135
  }
1401
2136
  }
1402
- async dropTable({ tableName }) {
2137
+ /**
2138
+ * Returns definitions for automatic performance indexes
2139
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2140
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2141
+ */
2142
+ getAutomaticIndexDefinitions() {
2143
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2144
+ return [
2145
+ // Composite indexes for optimal filtering + sorting performance
2146
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2147
+ {
2148
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2149
+ table: storage.TABLE_THREADS,
2150
+ columns: ["resourceId", "seq_id DESC"]
2151
+ },
2152
+ {
2153
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2154
+ table: storage.TABLE_MESSAGES,
2155
+ columns: ["thread_id", "seq_id DESC"]
2156
+ },
2157
+ {
2158
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2159
+ table: storage.TABLE_TRACES,
2160
+ columns: ["name", "seq_id DESC"]
2161
+ },
2162
+ {
2163
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2164
+ table: storage.TABLE_SCORERS,
2165
+ columns: ["traceId", "spanId", "seq_id DESC"]
2166
+ },
2167
+ // Spans indexes for optimal trace querying
2168
+ {
2169
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2170
+ table: storage.TABLE_AI_SPANS,
2171
+ columns: ["traceId", "startedAt DESC"]
2172
+ },
2173
+ {
2174
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2175
+ table: storage.TABLE_AI_SPANS,
2176
+ columns: ["parentSpanId", "startedAt DESC"]
2177
+ },
2178
+ {
2179
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2180
+ table: storage.TABLE_AI_SPANS,
2181
+ columns: ["name"]
2182
+ },
2183
+ {
2184
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2185
+ table: storage.TABLE_AI_SPANS,
2186
+ columns: ["spanType", "startedAt DESC"]
2187
+ }
2188
+ ];
2189
+ }
2190
+ /**
2191
+ * Creates automatic indexes for optimal query performance
2192
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2193
+ */
2194
+ async createAutomaticIndexes() {
1403
2195
  try {
1404
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1405
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2196
+ const indexes = this.getAutomaticIndexDefinitions();
2197
+ for (const indexOptions of indexes) {
2198
+ try {
2199
+ await this.createIndex(indexOptions);
2200
+ } catch (error) {
2201
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2202
+ }
2203
+ }
1406
2204
  } catch (error$1) {
1407
2205
  throw new error.MastraError(
1408
2206
  {
1409
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2207
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1410
2208
  domain: error.ErrorDomain.STORAGE,
1411
- category: error.ErrorCategory.THIRD_PARTY,
1412
- details: {
1413
- tableName
1414
- }
2209
+ category: error.ErrorCategory.THIRD_PARTY
1415
2210
  },
1416
2211
  error$1
1417
2212
  );
1418
2213
  }
1419
2214
  }
1420
2215
  };
1421
- function parseJSON(jsonString) {
1422
- try {
1423
- return JSON.parse(jsonString);
1424
- } catch {
1425
- return jsonString;
1426
- }
1427
- }
1428
2216
  function transformScoreRow(row) {
1429
2217
  return {
1430
2218
  ...row,
1431
- input: parseJSON(row.input),
1432
- scorer: parseJSON(row.scorer),
1433
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1434
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1435
- metadata: parseJSON(row.metadata),
1436
- output: parseJSON(row.output),
1437
- additionalContext: parseJSON(row.additionalContext),
1438
- runtimeContext: parseJSON(row.runtimeContext),
1439
- entity: parseJSON(row.entity),
2219
+ input: storage.safelyParseJSON(row.input),
2220
+ scorer: storage.safelyParseJSON(row.scorer),
2221
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2222
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2223
+ metadata: storage.safelyParseJSON(row.metadata),
2224
+ output: storage.safelyParseJSON(row.output),
2225
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2226
+ requestContext: storage.safelyParseJSON(row.requestContext),
2227
+ entity: storage.safelyParseJSON(row.entity),
1440
2228
  createdAt: row.createdAt,
1441
2229
  updatedAt: row.updatedAt
1442
2230
  };
@@ -1479,6 +2267,19 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1479
2267
  }
1480
2268
  }
1481
2269
  async saveScore(score) {
2270
+ let validatedScore;
2271
+ try {
2272
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
2273
+ } catch (error$1) {
2274
+ throw new error.MastraError(
2275
+ {
2276
+ id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_SCORE_VALIDATION_FAILED",
2277
+ domain: error.ErrorDomain.STORAGE,
2278
+ category: error.ErrorCategory.THIRD_PARTY
2279
+ },
2280
+ error$1
2281
+ );
2282
+ }
1482
2283
  try {
1483
2284
  const scoreId = crypto.randomUUID();
1484
2285
  const {
@@ -1489,24 +2290,24 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1489
2290
  input,
1490
2291
  output,
1491
2292
  additionalContext,
1492
- runtimeContext,
2293
+ requestContext,
1493
2294
  entity,
1494
2295
  ...rest
1495
- } = score;
2296
+ } = validatedScore;
1496
2297
  await this.operations.insert({
1497
2298
  tableName: storage.TABLE_SCORERS,
1498
2299
  record: {
1499
2300
  id: scoreId,
1500
2301
  ...rest,
1501
- input: JSON.stringify(input) || "",
1502
- output: JSON.stringify(output) || "",
1503
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1504
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1505
- metadata: metadata ? JSON.stringify(metadata) : null,
1506
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1507
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1508
- entity: entity ? JSON.stringify(entity) : null,
1509
- scorer: scorer ? JSON.stringify(scorer) : null,
2302
+ input: input || "",
2303
+ output: output || "",
2304
+ preprocessStepResult: preprocessStepResult || null,
2305
+ analyzeStepResult: analyzeStepResult || null,
2306
+ metadata: metadata || null,
2307
+ additionalContext: additionalContext || null,
2308
+ requestContext: requestContext || null,
2309
+ entity: entity || null,
2310
+ scorer: scorer || null,
1510
2311
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1511
2312
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1512
2313
  }
@@ -1524,41 +2325,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1524
2325
  );
1525
2326
  }
1526
2327
  }
1527
- async getScoresByScorerId({
2328
+ async listScoresByScorerId({
1528
2329
  scorerId,
1529
- pagination
2330
+ pagination,
2331
+ entityId,
2332
+ entityType,
2333
+ source
1530
2334
  }) {
1531
2335
  try {
1532
- const request = this.pool.request();
1533
- request.input("p1", scorerId);
1534
- const totalResult = await request.query(
1535
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1536
- );
2336
+ const conditions = ["[scorerId] = @p1"];
2337
+ const params = { p1: scorerId };
2338
+ let paramIndex = 2;
2339
+ if (entityId) {
2340
+ conditions.push(`[entityId] = @p${paramIndex}`);
2341
+ params[`p${paramIndex}`] = entityId;
2342
+ paramIndex++;
2343
+ }
2344
+ if (entityType) {
2345
+ conditions.push(`[entityType] = @p${paramIndex}`);
2346
+ params[`p${paramIndex}`] = entityType;
2347
+ paramIndex++;
2348
+ }
2349
+ if (source) {
2350
+ conditions.push(`[source] = @p${paramIndex}`);
2351
+ params[`p${paramIndex}`] = source;
2352
+ paramIndex++;
2353
+ }
2354
+ const whereClause = conditions.join(" AND ");
2355
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2356
+ const countRequest = this.pool.request();
2357
+ Object.entries(params).forEach(([key, value]) => {
2358
+ countRequest.input(key, value);
2359
+ });
2360
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1537
2361
  const total = totalResult.recordset[0]?.count || 0;
2362
+ const { page, perPage: perPageInput } = pagination;
1538
2363
  if (total === 0) {
1539
2364
  return {
1540
2365
  pagination: {
1541
2366
  total: 0,
1542
- page: pagination.page,
1543
- perPage: pagination.perPage,
2367
+ page,
2368
+ perPage: perPageInput,
1544
2369
  hasMore: false
1545
2370
  },
1546
2371
  scores: []
1547
2372
  };
1548
2373
  }
2374
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2375
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2376
+ const limitValue = perPageInput === false ? total : perPage;
2377
+ const end = perPageInput === false ? total : start + perPage;
1549
2378
  const dataRequest = this.pool.request();
1550
- dataRequest.input("p1", scorerId);
1551
- dataRequest.input("p2", pagination.perPage);
1552
- dataRequest.input("p3", pagination.page * pagination.perPage);
1553
- const result = await dataRequest.query(
1554
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1555
- );
2379
+ Object.entries(params).forEach(([key, value]) => {
2380
+ dataRequest.input(key, value);
2381
+ });
2382
+ dataRequest.input("perPage", limitValue);
2383
+ dataRequest.input("offset", start);
2384
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2385
+ const result = await dataRequest.query(dataQuery);
1556
2386
  return {
1557
2387
  pagination: {
1558
2388
  total: Number(total),
1559
- page: pagination.page,
1560
- perPage: pagination.perPage,
1561
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2389
+ page,
2390
+ perPage: perPageForResponse,
2391
+ hasMore: end < total
1562
2392
  },
1563
2393
  scores: result.recordset.map((row) => transformScoreRow(row))
1564
2394
  };
@@ -1574,7 +2404,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1574
2404
  );
1575
2405
  }
1576
2406
  }
1577
- async getScoresByRunId({
2407
+ async listScoresByRunId({
1578
2408
  runId,
1579
2409
  pagination
1580
2410
  }) {
@@ -1585,30 +2415,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1585
2415
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1586
2416
  );
1587
2417
  const total = totalResult.recordset[0]?.count || 0;
2418
+ const { page, perPage: perPageInput } = pagination;
1588
2419
  if (total === 0) {
1589
2420
  return {
1590
2421
  pagination: {
1591
2422
  total: 0,
1592
- page: pagination.page,
1593
- perPage: pagination.perPage,
2423
+ page,
2424
+ perPage: perPageInput,
1594
2425
  hasMore: false
1595
2426
  },
1596
2427
  scores: []
1597
2428
  };
1598
2429
  }
2430
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2431
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2432
+ const limitValue = perPageInput === false ? total : perPage;
2433
+ const end = perPageInput === false ? total : start + perPage;
1599
2434
  const dataRequest = this.pool.request();
1600
2435
  dataRequest.input("p1", runId);
1601
- dataRequest.input("p2", pagination.perPage);
1602
- dataRequest.input("p3", pagination.page * pagination.perPage);
2436
+ dataRequest.input("p2", limitValue);
2437
+ dataRequest.input("p3", start);
1603
2438
  const result = await dataRequest.query(
1604
2439
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1605
2440
  );
1606
2441
  return {
1607
2442
  pagination: {
1608
2443
  total: Number(total),
1609
- page: pagination.page,
1610
- perPage: pagination.perPage,
1611
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2444
+ page,
2445
+ perPage: perPageForResponse,
2446
+ hasMore: end < total
1612
2447
  },
1613
2448
  scores: result.recordset.map((row) => transformScoreRow(row))
1614
2449
  };
@@ -1624,7 +2459,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1624
2459
  );
1625
2460
  }
1626
2461
  }
1627
- async getScoresByEntityId({
2462
+ async listScoresByEntityId({
1628
2463
  entityId,
1629
2464
  entityType,
1630
2465
  pagination
@@ -1637,31 +2472,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1637
2472
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1638
2473
  );
1639
2474
  const total = totalResult.recordset[0]?.count || 0;
2475
+ const { page, perPage: perPageInput } = pagination;
2476
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2477
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1640
2478
  if (total === 0) {
1641
2479
  return {
1642
2480
  pagination: {
1643
2481
  total: 0,
1644
- page: pagination.page,
1645
- perPage: pagination.perPage,
2482
+ page,
2483
+ perPage: perPageForResponse,
1646
2484
  hasMore: false
1647
2485
  },
1648
2486
  scores: []
1649
2487
  };
1650
2488
  }
2489
+ const limitValue = perPageInput === false ? total : perPage;
2490
+ const end = perPageInput === false ? total : start + perPage;
1651
2491
  const dataRequest = this.pool.request();
1652
2492
  dataRequest.input("p1", entityId);
1653
2493
  dataRequest.input("p2", entityType);
1654
- dataRequest.input("p3", pagination.perPage);
1655
- dataRequest.input("p4", pagination.page * pagination.perPage);
2494
+ dataRequest.input("p3", limitValue);
2495
+ dataRequest.input("p4", start);
1656
2496
  const result = await dataRequest.query(
1657
2497
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1658
2498
  );
1659
2499
  return {
1660
2500
  pagination: {
1661
2501
  total: Number(total),
1662
- page: pagination.page,
1663
- perPage: pagination.perPage,
1664
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2502
+ page,
2503
+ perPage: perPageForResponse,
2504
+ hasMore: end < total
1665
2505
  },
1666
2506
  scores: result.recordset.map((row) => transformScoreRow(row))
1667
2507
  };
@@ -1677,8 +2517,66 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1677
2517
  );
1678
2518
  }
1679
2519
  }
2520
+ async listScoresBySpan({
2521
+ traceId,
2522
+ spanId,
2523
+ pagination
2524
+ }) {
2525
+ try {
2526
+ const request = this.pool.request();
2527
+ request.input("p1", traceId);
2528
+ request.input("p2", spanId);
2529
+ const totalResult = await request.query(
2530
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
2531
+ );
2532
+ const total = totalResult.recordset[0]?.count || 0;
2533
+ const { page, perPage: perPageInput } = pagination;
2534
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2535
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2536
+ if (total === 0) {
2537
+ return {
2538
+ pagination: {
2539
+ total: 0,
2540
+ page,
2541
+ perPage: perPageForResponse,
2542
+ hasMore: false
2543
+ },
2544
+ scores: []
2545
+ };
2546
+ }
2547
+ const limitValue = perPageInput === false ? total : perPage;
2548
+ const end = perPageInput === false ? total : start + perPage;
2549
+ const dataRequest = this.pool.request();
2550
+ dataRequest.input("p1", traceId);
2551
+ dataRequest.input("p2", spanId);
2552
+ dataRequest.input("p3", limitValue);
2553
+ dataRequest.input("p4", start);
2554
+ const result = await dataRequest.query(
2555
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2556
+ );
2557
+ return {
2558
+ pagination: {
2559
+ total: Number(total),
2560
+ page,
2561
+ perPage: perPageForResponse,
2562
+ hasMore: end < total
2563
+ },
2564
+ scores: result.recordset.map((row) => transformScoreRow(row))
2565
+ };
2566
+ } catch (error$1) {
2567
+ throw new error.MastraError(
2568
+ {
2569
+ id: "MASTRA_STORAGE_MSSQL_STORE_GET_SCORES_BY_SPAN_FAILED",
2570
+ domain: error.ErrorDomain.STORAGE,
2571
+ category: error.ErrorCategory.THIRD_PARTY,
2572
+ details: { traceId, spanId }
2573
+ },
2574
+ error$1
2575
+ );
2576
+ }
2577
+ }
1680
2578
  };
1681
- var TracesMSSQL = class extends storage.TracesStorage {
2579
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1682
2580
  pool;
1683
2581
  operations;
1684
2582
  schema;
@@ -1692,210 +2590,168 @@ var TracesMSSQL = class extends storage.TracesStorage {
1692
2590
  this.operations = operations;
1693
2591
  this.schema = schema;
1694
2592
  }
1695
- /** @deprecated use getTracesPaginated instead*/
1696
- async getTraces(args) {
1697
- if (args.fromDate || args.toDate) {
1698
- args.dateRange = {
1699
- start: args.fromDate,
1700
- end: args.toDate
1701
- };
1702
- }
1703
- const result = await this.getTracesPaginated(args);
1704
- return result.traces;
1705
- }
1706
- async getTracesPaginated(args) {
1707
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1708
- const fromDate = dateRange?.start;
1709
- const toDate = dateRange?.end;
1710
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1711
- const currentOffset = page * perPage;
1712
- const paramMap = {};
1713
- const conditions = [];
1714
- let paramIndex = 1;
1715
- if (name) {
1716
- const paramName = `p${paramIndex++}`;
1717
- conditions.push(`[name] LIKE @${paramName}`);
1718
- paramMap[paramName] = `${name}%`;
1719
- }
1720
- if (scope) {
1721
- const paramName = `p${paramIndex++}`;
1722
- conditions.push(`[scope] = @${paramName}`);
1723
- paramMap[paramName] = scope;
1724
- }
1725
- if (attributes) {
1726
- Object.entries(attributes).forEach(([key, value]) => {
1727
- const parsedKey = utils.parseFieldKey(key);
1728
- const paramName = `p${paramIndex++}`;
1729
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1730
- paramMap[paramName] = value;
1731
- });
1732
- }
1733
- if (filters) {
1734
- Object.entries(filters).forEach(([key, value]) => {
1735
- const parsedKey = utils.parseFieldKey(key);
1736
- const paramName = `p${paramIndex++}`;
1737
- conditions.push(`[${parsedKey}] = @${paramName}`);
1738
- paramMap[paramName] = value;
1739
- });
1740
- }
1741
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1742
- const paramName = `p${paramIndex++}`;
1743
- conditions.push(`[createdAt] >= @${paramName}`);
1744
- paramMap[paramName] = fromDate.toISOString();
1745
- }
1746
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1747
- const paramName = `p${paramIndex++}`;
1748
- conditions.push(`[createdAt] <= @${paramName}`);
1749
- paramMap[paramName] = toDate.toISOString();
2593
+ parseWorkflowRun(row) {
2594
+ let parsedSnapshot = row.snapshot;
2595
+ if (typeof parsedSnapshot === "string") {
2596
+ try {
2597
+ parsedSnapshot = JSON.parse(row.snapshot);
2598
+ } catch (e) {
2599
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2600
+ }
1750
2601
  }
1751
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1752
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1753
- let total = 0;
2602
+ return {
2603
+ workflowName: row.workflow_name,
2604
+ runId: row.run_id,
2605
+ snapshot: parsedSnapshot,
2606
+ createdAt: row.createdAt,
2607
+ updatedAt: row.updatedAt,
2608
+ resourceId: row.resourceId
2609
+ };
2610
+ }
2611
+ async updateWorkflowResults({
2612
+ workflowName,
2613
+ runId,
2614
+ stepId,
2615
+ result,
2616
+ requestContext
2617
+ }) {
2618
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2619
+ const transaction = this.pool.transaction();
1754
2620
  try {
1755
- const countRequest = this.pool.request();
1756
- Object.entries(paramMap).forEach(([key, value]) => {
1757
- if (value instanceof Date) {
1758
- countRequest.input(key, sql2__default.default.DateTime, value);
1759
- } else {
1760
- countRequest.input(key, value);
1761
- }
1762
- });
1763
- const countResult = await countRequest.query(countQuery);
1764
- total = parseInt(countResult.recordset[0].total, 10);
2621
+ await transaction.begin();
2622
+ const selectRequest = new sql2__default.default.Request(transaction);
2623
+ selectRequest.input("workflow_name", workflowName);
2624
+ selectRequest.input("run_id", runId);
2625
+ const existingSnapshotResult = await selectRequest.query(
2626
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2627
+ );
2628
+ let snapshot;
2629
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2630
+ snapshot = {
2631
+ context: {},
2632
+ activePaths: [],
2633
+ timestamp: Date.now(),
2634
+ suspendedPaths: {},
2635
+ resumeLabels: {},
2636
+ serializedStepGraph: [],
2637
+ value: {},
2638
+ waitingPaths: {},
2639
+ status: "pending",
2640
+ runId,
2641
+ requestContext: {}
2642
+ };
2643
+ } else {
2644
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2645
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2646
+ }
2647
+ snapshot.context[stepId] = result;
2648
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2649
+ const upsertReq = new sql2__default.default.Request(transaction);
2650
+ upsertReq.input("workflow_name", workflowName);
2651
+ upsertReq.input("run_id", runId);
2652
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2653
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2654
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2655
+ await upsertReq.query(
2656
+ `MERGE ${table} AS target
2657
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2658
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2659
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2660
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2661
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2662
+ );
2663
+ await transaction.commit();
2664
+ return snapshot.context;
1765
2665
  } catch (error$1) {
2666
+ try {
2667
+ await transaction.rollback();
2668
+ } catch {
2669
+ }
1766
2670
  throw new error.MastraError(
1767
2671
  {
1768
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2672
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1769
2673
  domain: error.ErrorDomain.STORAGE,
1770
2674
  category: error.ErrorCategory.THIRD_PARTY,
1771
2675
  details: {
1772
- name: args.name ?? "",
1773
- scope: args.scope ?? ""
2676
+ workflowName,
2677
+ runId,
2678
+ stepId
1774
2679
  }
1775
2680
  },
1776
2681
  error$1
1777
2682
  );
1778
2683
  }
1779
- if (total === 0) {
1780
- return {
1781
- traces: [],
1782
- total: 0,
1783
- page,
1784
- perPage,
1785
- hasMore: false
1786
- };
1787
- }
1788
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1789
- const dataRequest = this.pool.request();
1790
- Object.entries(paramMap).forEach(([key, value]) => {
1791
- if (value instanceof Date) {
1792
- dataRequest.input(key, sql2__default.default.DateTime, value);
1793
- } else {
1794
- dataRequest.input(key, value);
1795
- }
1796
- });
1797
- dataRequest.input("offset", currentOffset);
1798
- dataRequest.input("limit", perPage);
2684
+ }
2685
+ async updateWorkflowState({
2686
+ workflowName,
2687
+ runId,
2688
+ opts
2689
+ }) {
2690
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2691
+ const transaction = this.pool.transaction();
1799
2692
  try {
1800
- const rowsResult = await dataRequest.query(dataQuery);
1801
- const rows = rowsResult.recordset;
1802
- const traces = rows.map((row) => ({
1803
- id: row.id,
1804
- parentSpanId: row.parentSpanId,
1805
- traceId: row.traceId,
1806
- name: row.name,
1807
- scope: row.scope,
1808
- kind: row.kind,
1809
- status: JSON.parse(row.status),
1810
- events: JSON.parse(row.events),
1811
- links: JSON.parse(row.links),
1812
- attributes: JSON.parse(row.attributes),
1813
- startTime: row.startTime,
1814
- endTime: row.endTime,
1815
- other: row.other,
1816
- createdAt: row.createdAt
1817
- }));
1818
- return {
1819
- traces,
1820
- total,
1821
- page,
1822
- perPage,
1823
- hasMore: currentOffset + traces.length < total
1824
- };
2693
+ await transaction.begin();
2694
+ const selectRequest = new sql2__default.default.Request(transaction);
2695
+ selectRequest.input("workflow_name", workflowName);
2696
+ selectRequest.input("run_id", runId);
2697
+ const existingSnapshotResult = await selectRequest.query(
2698
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2699
+ );
2700
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2701
+ await transaction.rollback();
2702
+ return void 0;
2703
+ }
2704
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2705
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2706
+ if (!snapshot || !snapshot?.context) {
2707
+ await transaction.rollback();
2708
+ throw new error.MastraError(
2709
+ {
2710
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2711
+ domain: error.ErrorDomain.STORAGE,
2712
+ category: error.ErrorCategory.SYSTEM,
2713
+ details: {
2714
+ workflowName,
2715
+ runId
2716
+ }
2717
+ },
2718
+ new Error(`Snapshot not found for runId ${runId}`)
2719
+ );
2720
+ }
2721
+ const updatedSnapshot = { ...snapshot, ...opts };
2722
+ const updateRequest = new sql2__default.default.Request(transaction);
2723
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2724
+ updateRequest.input("workflow_name", workflowName);
2725
+ updateRequest.input("run_id", runId);
2726
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2727
+ await updateRequest.query(
2728
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2729
+ );
2730
+ await transaction.commit();
2731
+ return updatedSnapshot;
1825
2732
  } catch (error$1) {
2733
+ try {
2734
+ await transaction.rollback();
2735
+ } catch {
2736
+ }
1826
2737
  throw new error.MastraError(
1827
2738
  {
1828
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2739
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1829
2740
  domain: error.ErrorDomain.STORAGE,
1830
2741
  category: error.ErrorCategory.THIRD_PARTY,
1831
2742
  details: {
1832
- name: args.name ?? "",
1833
- scope: args.scope ?? ""
2743
+ workflowName,
2744
+ runId
1834
2745
  }
1835
2746
  },
1836
2747
  error$1
1837
2748
  );
1838
2749
  }
1839
2750
  }
1840
- async batchTraceInsert({ records }) {
1841
- this.logger.debug("Batch inserting traces", { count: records.length });
1842
- await this.operations.batchInsert({
1843
- tableName: storage.TABLE_TRACES,
1844
- records
1845
- });
1846
- }
1847
- };
1848
- function parseWorkflowRun(row) {
1849
- let parsedSnapshot = row.snapshot;
1850
- if (typeof parsedSnapshot === "string") {
1851
- try {
1852
- parsedSnapshot = JSON.parse(row.snapshot);
1853
- } catch (e) {
1854
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1855
- }
1856
- }
1857
- return {
1858
- workflowName: row.workflow_name,
1859
- runId: row.run_id,
1860
- snapshot: parsedSnapshot,
1861
- createdAt: row.createdAt,
1862
- updatedAt: row.updatedAt,
1863
- resourceId: row.resourceId
1864
- };
1865
- }
1866
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1867
- pool;
1868
- operations;
1869
- schema;
1870
- constructor({
1871
- pool,
1872
- operations,
1873
- schema
1874
- }) {
1875
- super();
1876
- this.pool = pool;
1877
- this.operations = operations;
1878
- this.schema = schema;
1879
- }
1880
- updateWorkflowResults({
1881
- // workflowName,
1882
- // runId,
1883
- // stepId,
1884
- // result,
1885
- // runtimeContext,
1886
- }) {
1887
- throw new Error("Method not implemented.");
1888
- }
1889
- updateWorkflowState({
1890
- // workflowName,
1891
- // runId,
1892
- // opts,
1893
- }) {
1894
- throw new Error("Method not implemented.");
1895
- }
1896
2751
  async persistWorkflowSnapshot({
1897
2752
  workflowName,
1898
2753
  runId,
2754
+ resourceId,
1899
2755
  snapshot
1900
2756
  }) {
1901
2757
  const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
@@ -1904,6 +2760,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1904
2760
  const request = this.pool.request();
1905
2761
  request.input("workflow_name", workflowName);
1906
2762
  request.input("run_id", runId);
2763
+ request.input("resourceId", resourceId);
1907
2764
  request.input("snapshot", JSON.stringify(snapshot));
1908
2765
  request.input("createdAt", sql2__default.default.DateTime2, new Date(now));
1909
2766
  request.input("updatedAt", sql2__default.default.DateTime2, new Date(now));
@@ -1911,10 +2768,11 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1911
2768
  USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
1912
2769
  ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
1913
2770
  WHEN MATCHED THEN UPDATE SET
2771
+ resourceId = @resourceId,
1914
2772
  snapshot = @snapshot,
1915
2773
  [updatedAt] = @updatedAt
1916
- WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
1917
- VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`;
2774
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, resourceId, snapshot, [createdAt], [updatedAt])
2775
+ VALUES (@workflow_name, @run_id, @resourceId, @snapshot, @createdAt, @updatedAt);`;
1918
2776
  await request.query(mergeSql);
1919
2777
  } catch (error$1) {
1920
2778
  throw new error.MastraError(
@@ -1986,7 +2844,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1986
2844
  if (!result.recordset || result.recordset.length === 0) {
1987
2845
  return null;
1988
2846
  }
1989
- return parseWorkflowRun(result.recordset[0]);
2847
+ return this.parseWorkflowRun(result.recordset[0]);
1990
2848
  } catch (error$1) {
1991
2849
  throw new error.MastraError(
1992
2850
  {
@@ -2002,12 +2860,12 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2002
2860
  );
2003
2861
  }
2004
2862
  }
2005
- async getWorkflowRuns({
2863
+ async listWorkflowRuns({
2006
2864
  workflowName,
2007
2865
  fromDate,
2008
2866
  toDate,
2009
- limit,
2010
- offset,
2867
+ page,
2868
+ perPage,
2011
2869
  resourceId
2012
2870
  } = {}) {
2013
2871
  try {
@@ -2023,7 +2881,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2023
2881
  conditions.push(`[resourceId] = @resourceId`);
2024
2882
  paramMap["resourceId"] = resourceId;
2025
2883
  } else {
2026
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2884
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2027
2885
  }
2028
2886
  }
2029
2887
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2045,24 +2903,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2045
2903
  request.input(key, value);
2046
2904
  }
2047
2905
  });
2048
- if (limit !== void 0 && offset !== void 0) {
2906
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2907
+ if (usePagination) {
2049
2908
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2050
2909
  const countResult = await request.query(countQuery);
2051
2910
  total = Number(countResult.recordset[0]?.count || 0);
2052
2911
  }
2053
2912
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2054
- if (limit !== void 0 && offset !== void 0) {
2055
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2056
- request.input("limit", limit);
2913
+ if (usePagination) {
2914
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2915
+ const offset = page * normalizedPerPage;
2916
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2917
+ request.input("perPage", normalizedPerPage);
2057
2918
  request.input("offset", offset);
2058
2919
  }
2059
2920
  const result = await request.query(query);
2060
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2921
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2061
2922
  return { runs, total: total || runs.length };
2062
2923
  } catch (error$1) {
2063
2924
  throw new error.MastraError(
2064
2925
  {
2065
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2926
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2066
2927
  domain: error.ErrorDomain.STORAGE,
2067
2928
  category: error.ErrorCategory.THIRD_PARTY,
2068
2929
  details: {
@@ -2105,19 +2966,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2105
2966
  port: config.port,
2106
2967
  options: config.options || { encrypt: true, trustServerCertificate: true }
2107
2968
  });
2108
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2109
2969
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2110
2970
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2111
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2112
2971
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2113
2972
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
2973
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2114
2974
  this.stores = {
2115
2975
  operations,
2116
2976
  scores,
2117
- traces,
2118
2977
  workflows,
2119
- legacyEvals,
2120
- memory
2978
+ memory,
2979
+ observability
2121
2980
  };
2122
2981
  } catch (e) {
2123
2982
  throw new error.MastraError(
@@ -2137,6 +2996,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2137
2996
  try {
2138
2997
  await this.isConnected;
2139
2998
  await super.init();
2999
+ try {
3000
+ await this.stores.operations.createAutomaticIndexes();
3001
+ } catch (indexError) {
3002
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3003
+ }
2140
3004
  } catch (error$1) {
2141
3005
  this.isConnected = null;
2142
3006
  throw new error.MastraError(
@@ -2163,28 +3027,12 @@ var MSSQLStore = class extends storage.MastraStorage {
2163
3027
  resourceWorkingMemory: true,
2164
3028
  hasColumn: true,
2165
3029
  createTable: true,
2166
- deleteMessages: true
3030
+ deleteMessages: true,
3031
+ listScoresBySpan: true,
3032
+ observabilityInstance: true,
3033
+ indexManagement: true
2167
3034
  };
2168
3035
  }
2169
- /** @deprecated use getEvals instead */
2170
- async getEvalsByAgentName(agentName, type) {
2171
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2172
- }
2173
- async getEvals(options = {}) {
2174
- return this.stores.legacyEvals.getEvals(options);
2175
- }
2176
- /**
2177
- * @deprecated use getTracesPaginated instead
2178
- */
2179
- async getTraces(args) {
2180
- return this.stores.traces.getTraces(args);
2181
- }
2182
- async getTracesPaginated(args) {
2183
- return this.stores.traces.getTracesPaginated(args);
2184
- }
2185
- async batchTraceInsert({ records }) {
2186
- return this.stores.traces.batchTraceInsert({ records });
2187
- }
2188
3036
  async createTable({
2189
3037
  tableName,
2190
3038
  schema
@@ -2219,15 +3067,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2219
3067
  async getThreadById({ threadId }) {
2220
3068
  return this.stores.memory.getThreadById({ threadId });
2221
3069
  }
2222
- /**
2223
- * @deprecated use getThreadsByResourceIdPaginated instead
2224
- */
2225
- async getThreadsByResourceId(args) {
2226
- return this.stores.memory.getThreadsByResourceId(args);
2227
- }
2228
- async getThreadsByResourceIdPaginated(args) {
2229
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2230
- }
2231
3070
  async saveThread({ thread }) {
2232
3071
  return this.stores.memory.saveThread({ thread });
2233
3072
  }
@@ -2241,17 +3080,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2241
3080
  async deleteThread({ threadId }) {
2242
3081
  return this.stores.memory.deleteThread({ threadId });
2243
3082
  }
2244
- async getMessages(args) {
2245
- return this.stores.memory.getMessages(args);
2246
- }
2247
- async getMessagesById({
2248
- messageIds,
2249
- format
2250
- }) {
2251
- return this.stores.memory.getMessagesById({ messageIds, format });
2252
- }
2253
- async getMessagesPaginated(args) {
2254
- return this.stores.memory.getMessagesPaginated(args);
3083
+ async listMessagesById({ messageIds }) {
3084
+ return this.stores.memory.listMessagesById({ messageIds });
2255
3085
  }
2256
3086
  async saveMessages(args) {
2257
3087
  return this.stores.memory.saveMessages(args);
@@ -2285,9 +3115,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2285
3115
  runId,
2286
3116
  stepId,
2287
3117
  result,
2288
- runtimeContext
3118
+ requestContext
2289
3119
  }) {
2290
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3120
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2291
3121
  }
2292
3122
  async updateWorkflowState({
2293
3123
  workflowName,
@@ -2299,9 +3129,10 @@ var MSSQLStore = class extends storage.MastraStorage {
2299
3129
  async persistWorkflowSnapshot({
2300
3130
  workflowName,
2301
3131
  runId,
3132
+ resourceId,
2302
3133
  snapshot
2303
3134
  }) {
2304
- return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, snapshot });
3135
+ return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, resourceId, snapshot });
2305
3136
  }
2306
3137
  async loadWorkflowSnapshot({
2307
3138
  workflowName,
@@ -2309,15 +3140,15 @@ var MSSQLStore = class extends storage.MastraStorage {
2309
3140
  }) {
2310
3141
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2311
3142
  }
2312
- async getWorkflowRuns({
3143
+ async listWorkflowRuns({
2313
3144
  workflowName,
2314
3145
  fromDate,
2315
3146
  toDate,
2316
- limit,
2317
- offset,
3147
+ perPage,
3148
+ page,
2318
3149
  resourceId
2319
3150
  } = {}) {
2320
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3151
+ return this.stores.workflows.listWorkflowRuns({ workflowName, fromDate, toDate, perPage, page, resourceId });
2321
3152
  }
2322
3153
  async getWorkflowRunById({
2323
3154
  runId,
@@ -2328,38 +3159,108 @@ var MSSQLStore = class extends storage.MastraStorage {
2328
3159
  async close() {
2329
3160
  await this.pool.close();
2330
3161
  }
3162
+ /**
3163
+ * Index Management
3164
+ */
3165
+ async createIndex(options) {
3166
+ return this.stores.operations.createIndex(options);
3167
+ }
3168
+ async listIndexes(tableName) {
3169
+ return this.stores.operations.listIndexes(tableName);
3170
+ }
3171
+ async describeIndex(indexName) {
3172
+ return this.stores.operations.describeIndex(indexName);
3173
+ }
3174
+ async dropIndex(indexName) {
3175
+ return this.stores.operations.dropIndex(indexName);
3176
+ }
3177
+ /**
3178
+ * Tracing / Observability
3179
+ */
3180
+ getObservabilityStore() {
3181
+ if (!this.stores.observability) {
3182
+ throw new error.MastraError({
3183
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3184
+ domain: error.ErrorDomain.STORAGE,
3185
+ category: error.ErrorCategory.SYSTEM,
3186
+ text: "Observability storage is not initialized"
3187
+ });
3188
+ }
3189
+ return this.stores.observability;
3190
+ }
3191
+ async createSpan(span) {
3192
+ return this.getObservabilityStore().createSpan(span);
3193
+ }
3194
+ async updateSpan({
3195
+ spanId,
3196
+ traceId,
3197
+ updates
3198
+ }) {
3199
+ return this.getObservabilityStore().updateSpan({ spanId, traceId, updates });
3200
+ }
3201
+ async getAITrace(traceId) {
3202
+ return this.getObservabilityStore().getAITrace(traceId);
3203
+ }
3204
+ async getAITracesPaginated(args) {
3205
+ return this.getObservabilityStore().getAITracesPaginated(args);
3206
+ }
3207
+ async batchCreateSpans(args) {
3208
+ return this.getObservabilityStore().batchCreateSpans(args);
3209
+ }
3210
+ async batchUpdateSpans(args) {
3211
+ return this.getObservabilityStore().batchUpdateSpans(args);
3212
+ }
3213
+ async batchDeleteAITraces(args) {
3214
+ return this.getObservabilityStore().batchDeleteAITraces(args);
3215
+ }
2331
3216
  /**
2332
3217
  * Scorers
2333
3218
  */
2334
3219
  async getScoreById({ id: _id }) {
2335
3220
  return this.stores.scores.getScoreById({ id: _id });
2336
3221
  }
2337
- async getScoresByScorerId({
3222
+ async listScoresByScorerId({
2338
3223
  scorerId: _scorerId,
2339
- pagination: _pagination
3224
+ pagination: _pagination,
3225
+ entityId: _entityId,
3226
+ entityType: _entityType,
3227
+ source: _source
2340
3228
  }) {
2341
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3229
+ return this.stores.scores.listScoresByScorerId({
3230
+ scorerId: _scorerId,
3231
+ pagination: _pagination,
3232
+ entityId: _entityId,
3233
+ entityType: _entityType,
3234
+ source: _source
3235
+ });
2342
3236
  }
2343
3237
  async saveScore(_score) {
2344
3238
  return this.stores.scores.saveScore(_score);
2345
3239
  }
2346
- async getScoresByRunId({
3240
+ async listScoresByRunId({
2347
3241
  runId: _runId,
2348
3242
  pagination: _pagination
2349
3243
  }) {
2350
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3244
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2351
3245
  }
2352
- async getScoresByEntityId({
3246
+ async listScoresByEntityId({
2353
3247
  entityId: _entityId,
2354
3248
  entityType: _entityType,
2355
3249
  pagination: _pagination
2356
3250
  }) {
2357
- return this.stores.scores.getScoresByEntityId({
3251
+ return this.stores.scores.listScoresByEntityId({
2358
3252
  entityId: _entityId,
2359
3253
  entityType: _entityType,
2360
3254
  pagination: _pagination
2361
3255
  });
2362
3256
  }
3257
+ async listScoresBySpan({
3258
+ traceId,
3259
+ spanId,
3260
+ pagination: _pagination
3261
+ }) {
3262
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
3263
+ }
2363
3264
  };
2364
3265
 
2365
3266
  exports.MSSQLStore = MSSQLStore;