@mastra/mssql 0.0.0-toolOptionTypes-20250917085558 → 0.0.0-trace-timeline-update-20251121114225

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,8 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
8
10
 
9
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
12
 
@@ -20,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
20
22
  const quotedSchemaName = schemaName;
21
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
22
24
  }
23
-
24
- // src/storage/domains/legacy-evals/index.ts
25
- function transformEvalRow(row) {
26
- let testInfoValue = null, resultValue = null;
27
- if (row.test_info) {
28
- try {
29
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
30
- } catch {
31
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
32
29
  }
33
- if (row.test_info) {
34
- try {
35
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
36
- } catch {
37
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
38
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
39
57
  return {
40
- agentName: row.agent_name,
41
- input: row.input,
42
- output: row.output,
43
- result: resultValue,
44
- metricName: row.metric_name,
45
- instructions: row.instructions,
46
- testInfo: testInfoValue,
47
- globalRunId: row.global_run_id,
48
- runId: row.run_id,
49
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
50
60
  };
51
61
  }
52
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
53
- pool;
54
- schema;
55
- constructor({ pool, schema }) {
56
- super();
57
- this.pool = pool;
58
- this.schema = schema;
59
- }
60
- /** @deprecated use getEvals instead */
61
- async getEvalsByAgentName(agentName, type) {
62
- try {
63
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
64
- if (type === "test") {
65
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
66
- } else if (type === "live") {
67
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
68
- }
69
- query += " ORDER BY created_at DESC";
70
- const request = this.pool.request();
71
- request.input("p1", agentName);
72
- const result = await request.query(query);
73
- const rows = result.recordset;
74
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
75
- } catch (error) {
76
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
77
- return [];
78
- }
79
- console.error("Failed to get evals for the specified agent: " + error?.message);
80
- throw error;
81
- }
82
- }
83
- async getEvals(options = {}) {
84
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
85
- const fromDate = dateRange?.start;
86
- const toDate = dateRange?.end;
87
- const where = [];
88
- const params = {};
89
- if (agentName) {
90
- where.push("agent_name = @agentName");
91
- params["agentName"] = agentName;
92
- }
93
- if (type === "test") {
94
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
95
- } else if (type === "live") {
96
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
97
- }
98
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
99
- where.push(`[created_at] >= @fromDate`);
100
- params[`fromDate`] = fromDate.toISOString();
101
- }
102
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
103
- where.push(`[created_at] <= @toDate`);
104
- params[`toDate`] = toDate.toISOString();
105
- }
106
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
107
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
108
- const offset = page * perPage;
109
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
110
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
111
- try {
112
- const countReq = this.pool.request();
113
- Object.entries(params).forEach(([key, value]) => {
114
- if (value instanceof Date) {
115
- countReq.input(key, sql2__default.default.DateTime, value);
116
- } else {
117
- countReq.input(key, value);
118
- }
119
- });
120
- const countResult = await countReq.query(countQuery);
121
- const total = countResult.recordset[0]?.total || 0;
122
- if (total === 0) {
123
- return {
124
- evals: [],
125
- total: 0,
126
- page,
127
- perPage,
128
- hasMore: false
129
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
130
75
  }
131
- const req = this.pool.request();
132
- Object.entries(params).forEach(([key, value]) => {
133
- if (value instanceof Date) {
134
- req.input(key, sql2__default.default.DateTime, value);
135
- } else {
136
- req.input(key, value);
137
- }
138
- });
139
- req.input("offset", offset);
140
- req.input("perPage", perPage);
141
- const result = await req.query(dataQuery);
142
- const rows = result.recordset;
143
- return {
144
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
145
- total,
146
- page,
147
- perPage,
148
- hasMore: offset + (rows?.length ?? 0) < total
149
- };
150
- } catch (error$1) {
151
- const mastraError = new error.MastraError(
152
- {
153
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
154
- domain: error.ErrorDomain.STORAGE,
155
- category: error.ErrorCategory.THIRD_PARTY,
156
- details: {
157
- agentName: agentName || "all",
158
- type: type || "all",
159
- page,
160
- perPage
161
- }
162
- },
163
- error$1
164
- );
165
- this.logger?.error?.(mastraError.toString());
166
- this.logger?.trackException(mastraError);
167
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
168
84
  }
169
- }
170
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
171
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
172
91
  pool;
173
92
  schema;
@@ -185,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
185
104
  });
186
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
187
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
188
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
189
108
  }
190
109
  constructor({
191
110
  pool,
@@ -199,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
199
118
  }
200
119
  async getThreadById({ threadId }) {
201
120
  try {
202
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
203
122
  id,
204
123
  [resourceId],
205
124
  title,
@@ -210,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
210
129
  WHERE id = @threadId`;
211
130
  const request = this.pool.request();
212
131
  request.input("threadId", threadId);
213
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
214
133
  const thread = resultSet.recordset[0] || null;
215
134
  if (!thread) {
216
135
  return null;
@@ -235,11 +154,24 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
235
154
  );
236
155
  }
237
156
  }
238
- async getThreadsByResourceIdPaginated(args) {
239
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ if (page < 0) {
160
+ throw new error.MastraError({
161
+ id: "MASTRA_STORAGE_MSSQL_STORE_INVALID_PAGE",
162
+ domain: error.ErrorDomain.STORAGE,
163
+ category: error.ErrorCategory.USER,
164
+ text: "Page number must be non-negative",
165
+ details: {
166
+ resourceId,
167
+ page
168
+ }
169
+ });
170
+ }
171
+ const perPage = storage.normalizePerPage(perPageInput, 100);
172
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
173
+ const { field, direction } = this.parseOrderBy(orderBy);
240
174
  try {
241
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
242
- const currentOffset = page * perPage;
243
175
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
244
176
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
245
177
  const countRequest = this.pool.request();
@@ -251,16 +183,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
251
183
  threads: [],
252
184
  total: 0,
253
185
  page,
254
- perPage,
186
+ perPage: perPageForResponse,
255
187
  hasMore: false
256
188
  };
257
189
  }
258
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
259
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
190
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
191
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
192
+ const limitValue = perPageInput === false ? total : perPage;
193
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
260
194
  const dataRequest = this.pool.request();
261
195
  dataRequest.input("resourceId", resourceId);
262
- dataRequest.input("perPage", perPage);
263
- dataRequest.input("offset", currentOffset);
196
+ dataRequest.input("offset", offset);
197
+ if (limitValue > 2147483647) {
198
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
199
+ } else {
200
+ dataRequest.input("perPage", limitValue);
201
+ }
264
202
  const rowsResult = await dataRequest.query(dataQuery);
265
203
  const rows = rowsResult.recordset || [];
266
204
  const threads = rows.map((thread) => ({
@@ -273,13 +211,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
273
211
  threads,
274
212
  total,
275
213
  page,
276
- perPage,
277
- hasMore: currentOffset + threads.length < total
214
+ perPage: perPageForResponse,
215
+ hasMore: perPageInput === false ? false : offset + perPage < total
278
216
  };
279
217
  } catch (error$1) {
280
218
  const mastraError = new error.MastraError(
281
219
  {
282
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
220
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
283
221
  domain: error.ErrorDomain.STORAGE,
284
222
  category: error.ErrorCategory.THIRD_PARTY,
285
223
  details: {
@@ -291,7 +229,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
291
229
  );
292
230
  this.logger?.error?.(mastraError.toString());
293
231
  this.logger?.trackException?.(mastraError);
294
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
232
+ return {
233
+ threads: [],
234
+ total: 0,
235
+ page,
236
+ perPage: perPageForResponse,
237
+ hasMore: false
238
+ };
295
239
  }
296
240
  }
297
241
  async saveThread({ thread }) {
@@ -313,7 +257,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
313
257
  req.input("id", thread.id);
314
258
  req.input("resourceId", thread.resourceId);
315
259
  req.input("title", thread.title);
316
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
260
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
261
+ if (metadata === null) {
262
+ req.input("metadata", sql2__default.default.NVarChar, null);
263
+ } else {
264
+ req.input("metadata", metadata);
265
+ }
317
266
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
318
267
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
319
268
  await req.query(mergeSql);
@@ -332,30 +281,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
332
281
  );
333
282
  }
334
283
  }
335
- /**
336
- * @deprecated use getThreadsByResourceIdPaginated instead
337
- */
338
- async getThreadsByResourceId(args) {
339
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
340
- try {
341
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
342
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
343
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
344
- const request = this.pool.request();
345
- request.input("resourceId", resourceId);
346
- const resultSet = await request.query(dataQuery);
347
- const rows = resultSet.recordset || [];
348
- return rows.map((thread) => ({
349
- ...thread,
350
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
351
- createdAt: thread.createdAt,
352
- updatedAt: thread.updatedAt
353
- }));
354
- } catch (error) {
355
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
356
- return [];
357
- }
358
- }
359
284
  /**
360
285
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
361
286
  */
@@ -383,7 +308,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
383
308
  };
384
309
  try {
385
310
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
386
- const sql7 = `UPDATE ${table}
311
+ const sql5 = `UPDATE ${table}
387
312
  SET title = @title,
388
313
  metadata = @metadata,
389
314
  [updatedAt] = @updatedAt
@@ -394,7 +319,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
394
319
  req.input("title", title);
395
320
  req.input("metadata", JSON.stringify(mergedMetadata));
396
321
  req.input("updatedAt", /* @__PURE__ */ new Date());
397
- const result = await req.query(sql7);
322
+ const result = await req.query(sql5);
398
323
  let thread = result.recordset && result.recordset[0];
399
324
  if (thread && "seq_id" in thread) {
400
325
  const { seq_id, ...rest } = thread;
@@ -464,11 +389,9 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
464
389
  }
465
390
  async _getIncludedMessages({
466
391
  threadId,
467
- selectBy,
468
- orderByStatement
392
+ include
469
393
  }) {
470
394
  if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
471
- const include = selectBy?.include;
472
395
  if (!include) return null;
473
396
  const unionQueries = [];
474
397
  const paramValues = [];
@@ -493,7 +416,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
493
416
  m.[resourceId],
494
417
  m.seq_id
495
418
  FROM (
496
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
419
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
497
420
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
498
421
  WHERE [thread_id] = ${pThreadId}
499
422
  ) AS m
@@ -501,15 +424,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
501
424
  OR EXISTS (
502
425
  SELECT 1
503
426
  FROM (
504
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
427
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
505
428
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
506
429
  WHERE [thread_id] = ${pThreadId}
507
430
  ) AS target
508
431
  WHERE target.id = ${pId}
509
432
  AND (
510
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
433
+ -- Get previous messages (messages that come BEFORE the target)
434
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
511
435
  OR
512
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
436
+ -- Get next messages (messages that come AFTER the target)
437
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
513
438
  )
514
439
  )
515
440
  `
@@ -538,34 +463,16 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
538
463
  });
539
464
  return dedupedRows;
540
465
  }
541
- async getMessages(args) {
542
- const { threadId, resourceId, format, selectBy } = args;
466
+ async listMessagesById({ messageIds }) {
467
+ if (messageIds.length === 0) return { messages: [] };
543
468
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
544
469
  const orderByStatement = `ORDER BY [seq_id] DESC`;
545
- const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
546
470
  try {
547
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
548
471
  let rows = [];
549
- const include = selectBy?.include || [];
550
- if (include?.length) {
551
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
552
- if (includeMessages) {
553
- rows.push(...includeMessages);
554
- }
555
- }
556
- const excludeIds = rows.map((m) => m.id).filter(Boolean);
557
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [thread_id] = @threadId`;
472
+ let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
558
473
  const request = this.pool.request();
559
- request.input("threadId", threadId);
560
- if (excludeIds.length > 0) {
561
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
562
- query += ` AND id NOT IN (${excludeParams.join(", ")})`;
563
- excludeIds.forEach((id, idx) => {
564
- request.input(`id${idx}`, id);
565
- });
566
- }
567
- query += ` ${orderByStatement} OFFSET 0 ROWS FETCH NEXT @limit ROWS ONLY`;
568
- request.input("limit", limit);
474
+ messageIds.forEach((id, i) => request.input(`id${i}`, id));
475
+ query += ` ${orderByStatement}`;
569
476
  const result = await request.query(query);
570
477
  const remainingRows = result.recordset || [];
571
478
  rows.push(...remainingRows);
@@ -573,153 +480,171 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
573
480
  const timeDiff = a.seq_id - b.seq_id;
574
481
  return timeDiff;
575
482
  });
576
- rows = rows.map(({ seq_id, ...rest }) => rest);
577
- return this._parseAndFormatMessages(rows, format);
483
+ const messagesWithParsedContent = rows.map((row) => {
484
+ if (typeof row.content === "string") {
485
+ try {
486
+ return { ...row, content: JSON.parse(row.content) };
487
+ } catch {
488
+ return row;
489
+ }
490
+ }
491
+ return row;
492
+ });
493
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
494
+ const list = new agent.MessageList().add(cleanMessages, "memory");
495
+ return { messages: list.get.all.db() };
578
496
  } catch (error$1) {
579
497
  const mastraError = new error.MastraError(
580
498
  {
581
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_FAILED",
499
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
582
500
  domain: error.ErrorDomain.STORAGE,
583
501
  category: error.ErrorCategory.THIRD_PARTY,
584
502
  details: {
585
- threadId,
586
- resourceId: resourceId ?? ""
503
+ messageIds: JSON.stringify(messageIds)
587
504
  }
588
505
  },
589
506
  error$1
590
507
  );
591
508
  this.logger?.error?.(mastraError.toString());
592
- this.logger?.trackException(mastraError);
593
- return [];
509
+ this.logger?.trackException?.(mastraError);
510
+ return { messages: [] };
594
511
  }
595
512
  }
596
- async getMessagesById({
597
- messageIds,
598
- format
599
- }) {
600
- if (messageIds.length === 0) return [];
601
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
602
- const orderByStatement = `ORDER BY [seq_id] DESC`;
603
- try {
604
- let rows = [];
605
- let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
606
- const request = this.pool.request();
607
- messageIds.forEach((id, i) => request.input(`id${i}`, id));
608
- query += ` ${orderByStatement}`;
609
- const result = await request.query(query);
610
- const remainingRows = result.recordset || [];
611
- rows.push(...remainingRows);
612
- rows.sort((a, b) => {
613
- const timeDiff = a.seq_id - b.seq_id;
614
- return timeDiff;
615
- });
616
- rows = rows.map(({ seq_id, ...rest }) => rest);
617
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
618
- return this._parseAndFormatMessages(rows, `v2`);
619
- } catch (error$1) {
620
- const mastraError = new error.MastraError(
513
+ async listMessages(args) {
514
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
515
+ if (!threadId.trim()) {
516
+ throw new error.MastraError(
621
517
  {
622
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
518
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
623
519
  domain: error.ErrorDomain.STORAGE,
624
520
  category: error.ErrorCategory.THIRD_PARTY,
625
- details: {
626
- messageIds: JSON.stringify(messageIds)
627
- }
521
+ details: { threadId }
628
522
  },
629
- error$1
523
+ new Error("threadId must be a non-empty string")
630
524
  );
631
- this.logger?.error?.(mastraError.toString());
632
- this.logger?.trackException(mastraError);
633
- return [];
634
525
  }
635
- }
636
- async getMessagesPaginated(args) {
637
- const { threadId, resourceId, format, selectBy } = args;
638
- const { page = 0, perPage: perPageInput, dateRange } = selectBy?.pagination || {};
526
+ if (page < 0) {
527
+ throw new error.MastraError({
528
+ id: "MASTRA_STORAGE_MSSQL_STORE_INVALID_PAGE",
529
+ domain: error.ErrorDomain.STORAGE,
530
+ category: error.ErrorCategory.USER,
531
+ text: "Page number must be non-negative",
532
+ details: {
533
+ threadId,
534
+ page
535
+ }
536
+ });
537
+ }
538
+ const perPage = storage.normalizePerPage(perPageInput, 40);
539
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
639
540
  try {
640
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
641
- const fromDate = dateRange?.start;
642
- const toDate = dateRange?.end;
643
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
644
- const orderByStatement = `ORDER BY [seq_id] DESC`;
645
- let messages = [];
646
- if (selectBy?.include?.length) {
647
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
648
- if (includeMessages) messages.push(...includeMessages);
649
- }
650
- const perPage = perPageInput !== void 0 ? perPageInput : storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
651
- const currentOffset = page * perPage;
652
- const conditions = ["[thread_id] = @threadId"];
653
- const request = this.pool.request();
654
- request.input("threadId", threadId);
655
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
656
- conditions.push("[createdAt] >= @fromDate");
657
- request.input("fromDate", fromDate.toISOString());
658
- }
659
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
660
- conditions.push("[createdAt] <= @toDate");
661
- request.input("toDate", toDate.toISOString());
662
- }
663
- const whereClause = `WHERE ${conditions.join(" AND ")}`;
664
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
665
- const countResult = await request.query(countQuery);
541
+ const { field, direction } = this.parseOrderBy(orderBy, "ASC");
542
+ const orderByStatement = `ORDER BY [${field}] ${direction}, [seq_id] ${direction}`;
543
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
544
+ const baseQuery = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId FROM ${tableName}`;
545
+ const filters = {
546
+ thread_id: threadId,
547
+ ...resourceId ? { resourceId } : {},
548
+ ...buildDateRangeFilter(filter?.dateRange, "createdAt")
549
+ };
550
+ const { sql: actualWhereClause = "", params: whereParams } = prepareWhereClause(
551
+ filters);
552
+ const bindWhereParams = (req) => {
553
+ Object.entries(whereParams).forEach(([paramName, paramValue]) => req.input(paramName, paramValue));
554
+ };
555
+ const countRequest = this.pool.request();
556
+ bindWhereParams(countRequest);
557
+ const countResult = await countRequest.query(`SELECT COUNT(*) as total FROM ${tableName}${actualWhereClause}`);
666
558
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
667
- if (total === 0 && messages.length > 0) {
668
- const parsedIncluded = this._parseAndFormatMessages(messages, format);
559
+ const fetchBaseMessages = async () => {
560
+ const request = this.pool.request();
561
+ bindWhereParams(request);
562
+ if (perPageInput === false) {
563
+ const result2 = await request.query(`${baseQuery}${actualWhereClause} ${orderByStatement}`);
564
+ return result2.recordset || [];
565
+ }
566
+ request.input("offset", offset);
567
+ request.input("limit", perPage > 2147483647 ? sql2__default.default.BigInt : sql2__default.default.Int, perPage);
568
+ const result = await request.query(
569
+ `${baseQuery}${actualWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
570
+ );
571
+ return result.recordset || [];
572
+ };
573
+ const baseRows = perPage === 0 ? [] : await fetchBaseMessages();
574
+ const messages = [...baseRows];
575
+ const seqById = /* @__PURE__ */ new Map();
576
+ messages.forEach((msg) => {
577
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
578
+ });
579
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
669
580
  return {
670
- messages: parsedIncluded,
671
- total: parsedIncluded.length,
581
+ messages: [],
582
+ total: 0,
672
583
  page,
673
- perPage,
584
+ perPage: perPageForResponse,
674
585
  hasMore: false
675
586
  };
676
587
  }
677
- const excludeIds = messages.map((m) => m.id);
678
- if (excludeIds.length > 0) {
679
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
680
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
681
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
682
- }
683
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
684
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
685
- request.input("offset", currentOffset);
686
- request.input("limit", perPage);
687
- const rowsResult = await request.query(dataQuery);
688
- const rows = rowsResult.recordset || [];
689
- rows.sort((a, b) => a.seq_id - b.seq_id);
690
- messages.push(...rows);
691
- const parsed = this._parseAndFormatMessages(messages, format);
588
+ if (include?.length) {
589
+ const messageIds = new Set(messages.map((m) => m.id));
590
+ const includeMessages = await this._getIncludedMessages({ threadId, include });
591
+ includeMessages?.forEach((msg) => {
592
+ if (!messageIds.has(msg.id)) {
593
+ messages.push(msg);
594
+ messageIds.add(msg.id);
595
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
596
+ }
597
+ });
598
+ }
599
+ const parsed = this._parseAndFormatMessages(messages, "v2");
600
+ const mult = direction === "ASC" ? 1 : -1;
601
+ const finalMessages = parsed.sort((a, b) => {
602
+ const aVal = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
603
+ const bVal = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
604
+ if (aVal == null || bVal == null) {
605
+ return aVal == null && bVal == null ? a.id.localeCompare(b.id) : aVal == null ? 1 : -1;
606
+ }
607
+ const diff = (typeof aVal === "number" && typeof bVal === "number" ? aVal - bVal : String(aVal).localeCompare(String(bVal))) * mult;
608
+ if (diff !== 0) return diff;
609
+ const seqA = seqById.get(a.id);
610
+ const seqB = seqById.get(b.id);
611
+ return seqA != null && seqB != null ? (seqA - seqB) * mult : a.id.localeCompare(b.id);
612
+ });
613
+ const returnedThreadMessageCount = finalMessages.filter((m) => m.threadId === threadId).length;
614
+ const hasMore = perPageInput !== false && returnedThreadMessageCount < total && offset + perPage < total;
692
615
  return {
693
- messages: parsed,
694
- total: total + excludeIds.length,
616
+ messages: finalMessages,
617
+ total,
695
618
  page,
696
- perPage,
697
- hasMore: currentOffset + rows.length < total
619
+ perPage: perPageForResponse,
620
+ hasMore
698
621
  };
699
622
  } catch (error$1) {
700
623
  const mastraError = new error.MastraError(
701
624
  {
702
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
625
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
703
626
  domain: error.ErrorDomain.STORAGE,
704
627
  category: error.ErrorCategory.THIRD_PARTY,
705
628
  details: {
706
629
  threadId,
707
- resourceId: resourceId ?? "",
708
- page
630
+ resourceId: resourceId ?? ""
709
631
  }
710
632
  },
711
633
  error$1
712
634
  );
713
635
  this.logger?.error?.(mastraError.toString());
714
- this.logger?.trackException(mastraError);
715
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
636
+ this.logger?.trackException?.(mastraError);
637
+ return {
638
+ messages: [],
639
+ total: 0,
640
+ page,
641
+ perPage: perPageForResponse,
642
+ hasMore: false
643
+ };
716
644
  }
717
645
  }
718
- async saveMessages({
719
- messages,
720
- format
721
- }) {
722
- if (messages.length === 0) return messages;
646
+ async saveMessages({ messages }) {
647
+ if (messages.length === 0) return { messages: [] };
723
648
  const threadId = messages[0]?.threadId;
724
649
  if (!threadId) {
725
650
  throw new error.MastraError({
@@ -801,8 +726,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
801
726
  return message;
802
727
  });
803
728
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
804
- if (format === "v2") return list.get.all.v2();
805
- return list.get.all.v1();
729
+ return { messages: list.get.all.db() };
806
730
  } catch (error$1) {
807
731
  throw new error.MastraError(
808
732
  {
@@ -978,8 +902,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
978
902
  return null;
979
903
  }
980
904
  return {
981
- ...result,
982
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
905
+ id: result.id,
906
+ createdAt: result.createdAt,
907
+ updatedAt: result.updatedAt,
908
+ workingMemory: result.workingMemory,
983
909
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
984
910
  };
985
911
  } catch (error$1) {
@@ -993,7 +919,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
993
919
  error$1
994
920
  );
995
921
  this.logger?.error?.(mastraError.toString());
996
- this.logger?.trackException(mastraError);
922
+ this.logger?.trackException?.(mastraError);
997
923
  throw mastraError;
998
924
  }
999
925
  }
@@ -1002,7 +928,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1002
928
  tableName: storage.TABLE_RESOURCES,
1003
929
  record: {
1004
930
  ...resource,
1005
- metadata: JSON.stringify(resource.metadata)
931
+ metadata: resource.metadata
1006
932
  }
1007
933
  });
1008
934
  return resource;
@@ -1060,72 +986,391 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1060
986
  error$1
1061
987
  );
1062
988
  this.logger?.error?.(mastraError.toString());
1063
- this.logger?.trackException(mastraError);
989
+ this.logger?.trackException?.(mastraError);
1064
990
  throw mastraError;
1065
991
  }
1066
992
  }
1067
993
  };
1068
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
994
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1069
995
  pool;
1070
- schemaName;
1071
- setupSchemaPromise = null;
1072
- schemaSetupComplete = void 0;
1073
- getSqlType(type, isPrimaryKey = false) {
1074
- switch (type) {
1075
- case "text":
1076
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1077
- case "timestamp":
1078
- return "DATETIME2(7)";
1079
- case "uuid":
1080
- return "UNIQUEIDENTIFIER";
1081
- case "jsonb":
1082
- return "NVARCHAR(MAX)";
1083
- case "integer":
1084
- return "INT";
1085
- case "bigint":
1086
- return "BIGINT";
1087
- case "float":
1088
- return "FLOAT";
1089
- default:
1090
- throw new error.MastraError({
1091
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1092
- domain: error.ErrorDomain.STORAGE,
1093
- category: error.ErrorCategory.THIRD_PARTY
1094
- });
1095
- }
1096
- }
1097
- constructor({ pool, schemaName }) {
996
+ operations;
997
+ schema;
998
+ constructor({
999
+ pool,
1000
+ operations,
1001
+ schema
1002
+ }) {
1098
1003
  super();
1099
1004
  this.pool = pool;
1100
- this.schemaName = schemaName;
1005
+ this.operations = operations;
1006
+ this.schema = schema;
1101
1007
  }
1102
- async hasColumn(table, column) {
1103
- const schema = this.schemaName || "dbo";
1104
- const request = this.pool.request();
1105
- request.input("schema", schema);
1106
- request.input("table", table);
1107
- request.input("column", column);
1108
- request.input("columnLower", column.toLowerCase());
1109
- const result = await request.query(
1110
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1111
- );
1112
- return result.recordset.length > 0;
1008
+ get tracingStrategy() {
1009
+ return {
1010
+ preferred: "batch-with-updates",
1011
+ supported: ["batch-with-updates", "insert-only"]
1012
+ };
1113
1013
  }
1114
- async setupSchema() {
1115
- if (!this.schemaName || this.schemaSetupComplete) {
1116
- return;
1014
+ async createSpan(span) {
1015
+ try {
1016
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
1017
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
1018
+ const record = {
1019
+ ...span,
1020
+ startedAt,
1021
+ endedAt
1022
+ // Note: createdAt/updatedAt will be set by default values
1023
+ };
1024
+ return this.operations.insert({ tableName: storage.TABLE_SPANS, record });
1025
+ } catch (error$1) {
1026
+ throw new error.MastraError(
1027
+ {
1028
+ id: "MSSQL_STORE_CREATE_SPAN_FAILED",
1029
+ domain: error.ErrorDomain.STORAGE,
1030
+ category: error.ErrorCategory.USER,
1031
+ details: {
1032
+ spanId: span.spanId,
1033
+ traceId: span.traceId,
1034
+ spanType: span.spanType,
1035
+ spanName: span.name
1036
+ }
1037
+ },
1038
+ error$1
1039
+ );
1117
1040
  }
1118
- if (!this.setupSchemaPromise) {
1119
- this.setupSchemaPromise = (async () => {
1120
- try {
1121
- const checkRequest = this.pool.request();
1122
- checkRequest.input("schemaName", this.schemaName);
1123
- const checkResult = await checkRequest.query(`
1124
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1125
- `);
1126
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1127
- if (!schemaExists) {
1128
- try {
1041
+ }
1042
+ async getTrace(traceId) {
1043
+ try {
1044
+ const tableName = getTableName({
1045
+ indexName: storage.TABLE_SPANS,
1046
+ schemaName: getSchemaName(this.schema)
1047
+ });
1048
+ const request = this.pool.request();
1049
+ request.input("traceId", traceId);
1050
+ const result = await request.query(
1051
+ `SELECT
1052
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1053
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1054
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1055
+ FROM ${tableName}
1056
+ WHERE [traceId] = @traceId
1057
+ ORDER BY [startedAt] DESC`
1058
+ );
1059
+ if (!result.recordset || result.recordset.length === 0) {
1060
+ return null;
1061
+ }
1062
+ return {
1063
+ traceId,
1064
+ spans: result.recordset.map(
1065
+ (span) => transformFromSqlRow({
1066
+ tableName: storage.TABLE_SPANS,
1067
+ sqlRow: span
1068
+ })
1069
+ )
1070
+ };
1071
+ } catch (error$1) {
1072
+ throw new error.MastraError(
1073
+ {
1074
+ id: "MSSQL_STORE_GET_TRACE_FAILED",
1075
+ domain: error.ErrorDomain.STORAGE,
1076
+ category: error.ErrorCategory.USER,
1077
+ details: {
1078
+ traceId
1079
+ }
1080
+ },
1081
+ error$1
1082
+ );
1083
+ }
1084
+ }
1085
+ async updateSpan({
1086
+ spanId,
1087
+ traceId,
1088
+ updates
1089
+ }) {
1090
+ try {
1091
+ const data = { ...updates };
1092
+ if (data.endedAt instanceof Date) {
1093
+ data.endedAt = data.endedAt.toISOString();
1094
+ }
1095
+ if (data.startedAt instanceof Date) {
1096
+ data.startedAt = data.startedAt.toISOString();
1097
+ }
1098
+ await this.operations.update({
1099
+ tableName: storage.TABLE_SPANS,
1100
+ keys: { spanId, traceId },
1101
+ data
1102
+ });
1103
+ } catch (error$1) {
1104
+ throw new error.MastraError(
1105
+ {
1106
+ id: "MSSQL_STORE_UPDATE_SPAN_FAILED",
1107
+ domain: error.ErrorDomain.STORAGE,
1108
+ category: error.ErrorCategory.USER,
1109
+ details: {
1110
+ spanId,
1111
+ traceId
1112
+ }
1113
+ },
1114
+ error$1
1115
+ );
1116
+ }
1117
+ }
1118
+ async getTracesPaginated({
1119
+ filters,
1120
+ pagination
1121
+ }) {
1122
+ const page = pagination?.page ?? 0;
1123
+ const perPage = pagination?.perPage ?? 10;
1124
+ const { entityId, entityType, ...actualFilters } = filters || {};
1125
+ const filtersWithDateRange = {
1126
+ ...actualFilters,
1127
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1128
+ parentSpanId: null
1129
+ // Only get root spans for traces
1130
+ };
1131
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1132
+ let actualWhereClause = whereClause.sql;
1133
+ const params = { ...whereClause.params };
1134
+ let currentParamIndex = Object.keys(params).length + 1;
1135
+ if (entityId && entityType) {
1136
+ let name = "";
1137
+ if (entityType === "workflow") {
1138
+ name = `workflow run: '${entityId}'`;
1139
+ } else if (entityType === "agent") {
1140
+ name = `agent run: '${entityId}'`;
1141
+ } else {
1142
+ const error$1 = new error.MastraError({
1143
+ id: "MSSQL_STORE_GET_TRACES_PAGINATED_FAILED",
1144
+ domain: error.ErrorDomain.STORAGE,
1145
+ category: error.ErrorCategory.USER,
1146
+ details: {
1147
+ entityType
1148
+ },
1149
+ text: `Cannot filter by entity type: ${entityType}`
1150
+ });
1151
+ throw error$1;
1152
+ }
1153
+ const entityParam = `p${currentParamIndex++}`;
1154
+ if (actualWhereClause) {
1155
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1156
+ } else {
1157
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1158
+ }
1159
+ params[entityParam] = name;
1160
+ }
1161
+ const tableName = getTableName({
1162
+ indexName: storage.TABLE_SPANS,
1163
+ schemaName: getSchemaName(this.schema)
1164
+ });
1165
+ try {
1166
+ const countRequest = this.pool.request();
1167
+ Object.entries(params).forEach(([key, value]) => {
1168
+ countRequest.input(key, value);
1169
+ });
1170
+ const countResult = await countRequest.query(
1171
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1172
+ );
1173
+ const total = countResult.recordset[0]?.count ?? 0;
1174
+ if (total === 0) {
1175
+ return {
1176
+ pagination: {
1177
+ total: 0,
1178
+ page,
1179
+ perPage,
1180
+ hasMore: false
1181
+ },
1182
+ spans: []
1183
+ };
1184
+ }
1185
+ const dataRequest = this.pool.request();
1186
+ Object.entries(params).forEach(([key, value]) => {
1187
+ dataRequest.input(key, value);
1188
+ });
1189
+ dataRequest.input("offset", page * perPage);
1190
+ dataRequest.input("limit", perPage);
1191
+ const dataResult = await dataRequest.query(
1192
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1193
+ );
1194
+ const spans = dataResult.recordset.map(
1195
+ (row) => transformFromSqlRow({
1196
+ tableName: storage.TABLE_SPANS,
1197
+ sqlRow: row
1198
+ })
1199
+ );
1200
+ return {
1201
+ pagination: {
1202
+ total,
1203
+ page,
1204
+ perPage,
1205
+ hasMore: (page + 1) * perPage < total
1206
+ },
1207
+ spans
1208
+ };
1209
+ } catch (error$1) {
1210
+ throw new error.MastraError(
1211
+ {
1212
+ id: "MSSQL_STORE_GET_TRACES_PAGINATED_FAILED",
1213
+ domain: error.ErrorDomain.STORAGE,
1214
+ category: error.ErrorCategory.USER
1215
+ },
1216
+ error$1
1217
+ );
1218
+ }
1219
+ }
1220
+ async batchCreateSpans(args) {
1221
+ if (!args.records || args.records.length === 0) {
1222
+ return;
1223
+ }
1224
+ try {
1225
+ await this.operations.batchInsert({
1226
+ tableName: storage.TABLE_SPANS,
1227
+ records: args.records.map((span) => ({
1228
+ ...span,
1229
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1230
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1231
+ }))
1232
+ });
1233
+ } catch (error$1) {
1234
+ throw new error.MastraError(
1235
+ {
1236
+ id: "MSSQL_STORE_BATCH_CREATE_SPANS_FAILED",
1237
+ domain: error.ErrorDomain.STORAGE,
1238
+ category: error.ErrorCategory.USER,
1239
+ details: {
1240
+ count: args.records.length
1241
+ }
1242
+ },
1243
+ error$1
1244
+ );
1245
+ }
1246
+ }
1247
+ async batchUpdateSpans(args) {
1248
+ if (!args.records || args.records.length === 0) {
1249
+ return;
1250
+ }
1251
+ try {
1252
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1253
+ const processedData = { ...data };
1254
+ if (processedData.endedAt instanceof Date) {
1255
+ processedData.endedAt = processedData.endedAt.toISOString();
1256
+ }
1257
+ if (processedData.startedAt instanceof Date) {
1258
+ processedData.startedAt = processedData.startedAt.toISOString();
1259
+ }
1260
+ return {
1261
+ keys: { spanId, traceId },
1262
+ data: processedData
1263
+ };
1264
+ });
1265
+ await this.operations.batchUpdate({
1266
+ tableName: storage.TABLE_SPANS,
1267
+ updates
1268
+ });
1269
+ } catch (error$1) {
1270
+ throw new error.MastraError(
1271
+ {
1272
+ id: "MSSQL_STORE_BATCH_UPDATE_SPANS_FAILED",
1273
+ domain: error.ErrorDomain.STORAGE,
1274
+ category: error.ErrorCategory.USER,
1275
+ details: {
1276
+ count: args.records.length
1277
+ }
1278
+ },
1279
+ error$1
1280
+ );
1281
+ }
1282
+ }
1283
+ async batchDeleteTraces(args) {
1284
+ if (!args.traceIds || args.traceIds.length === 0) {
1285
+ return;
1286
+ }
1287
+ try {
1288
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1289
+ await this.operations.batchDelete({
1290
+ tableName: storage.TABLE_SPANS,
1291
+ keys
1292
+ });
1293
+ } catch (error$1) {
1294
+ throw new error.MastraError(
1295
+ {
1296
+ id: "MSSQL_STORE_BATCH_DELETE_TRACES_FAILED",
1297
+ domain: error.ErrorDomain.STORAGE,
1298
+ category: error.ErrorCategory.USER,
1299
+ details: {
1300
+ count: args.traceIds.length
1301
+ }
1302
+ },
1303
+ error$1
1304
+ );
1305
+ }
1306
+ }
1307
+ };
1308
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1309
+ pool;
1310
+ schemaName;
1311
+ setupSchemaPromise = null;
1312
+ schemaSetupComplete = void 0;
1313
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1314
+ switch (type) {
1315
+ case "text":
1316
+ if (useLargeStorage) {
1317
+ return "NVARCHAR(MAX)";
1318
+ }
1319
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1320
+ case "timestamp":
1321
+ return "DATETIME2(7)";
1322
+ case "uuid":
1323
+ return "UNIQUEIDENTIFIER";
1324
+ case "jsonb":
1325
+ return "NVARCHAR(MAX)";
1326
+ case "integer":
1327
+ return "INT";
1328
+ case "bigint":
1329
+ return "BIGINT";
1330
+ case "float":
1331
+ return "FLOAT";
1332
+ case "boolean":
1333
+ return "BIT";
1334
+ default:
1335
+ throw new error.MastraError({
1336
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1337
+ domain: error.ErrorDomain.STORAGE,
1338
+ category: error.ErrorCategory.THIRD_PARTY
1339
+ });
1340
+ }
1341
+ }
1342
+ constructor({ pool, schemaName }) {
1343
+ super();
1344
+ this.pool = pool;
1345
+ this.schemaName = schemaName;
1346
+ }
1347
+ async hasColumn(table, column) {
1348
+ const schema = this.schemaName || "dbo";
1349
+ const request = this.pool.request();
1350
+ request.input("schema", schema);
1351
+ request.input("table", table);
1352
+ request.input("column", column);
1353
+ request.input("columnLower", column.toLowerCase());
1354
+ const result = await request.query(
1355
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1356
+ );
1357
+ return result.recordset.length > 0;
1358
+ }
1359
+ async setupSchema() {
1360
+ if (!this.schemaName || this.schemaSetupComplete) {
1361
+ return;
1362
+ }
1363
+ if (!this.setupSchemaPromise) {
1364
+ this.setupSchemaPromise = (async () => {
1365
+ try {
1366
+ const checkRequest = this.pool.request();
1367
+ checkRequest.input("schemaName", this.schemaName);
1368
+ const checkResult = await checkRequest.query(`
1369
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1370
+ `);
1371
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1372
+ if (!schemaExists) {
1373
+ try {
1129
1374
  await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1130
1375
  this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1131
1376
  } catch (error) {
@@ -1148,20 +1393,26 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1148
1393
  }
1149
1394
  await this.setupSchemaPromise;
1150
1395
  }
1151
- async insert({ tableName, record }) {
1396
+ async insert({
1397
+ tableName,
1398
+ record,
1399
+ transaction
1400
+ }) {
1152
1401
  try {
1153
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1154
- const values = Object.values(record);
1155
- const paramNames = values.map((_, i) => `@param${i}`);
1156
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1157
- const request = this.pool.request();
1158
- values.forEach((value, i) => {
1159
- if (value instanceof Date) {
1160
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1161
- } else if (typeof value === "object" && value !== null) {
1162
- request.input(`param${i}`, JSON.stringify(value));
1402
+ const columns = Object.keys(record);
1403
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1404
+ const paramNames = columns.map((_, i) => `@param${i}`);
1405
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1406
+ const request = transaction ? transaction.request() : this.pool.request();
1407
+ columns.forEach((col, i) => {
1408
+ const value = record[col];
1409
+ const preparedValue = this.prepareValue(value, col, tableName);
1410
+ if (preparedValue instanceof Date) {
1411
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1412
+ } else if (preparedValue === null || preparedValue === void 0) {
1413
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1163
1414
  } else {
1164
- request.input(`param${i}`, value);
1415
+ request.input(`param${i}`, preparedValue);
1165
1416
  }
1166
1417
  });
1167
1418
  await request.query(insertSql);
@@ -1185,7 +1436,7 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1185
1436
  try {
1186
1437
  await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1187
1438
  } catch (truncateError) {
1188
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1439
+ if (truncateError?.number === 4712) {
1189
1440
  await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1190
1441
  } else {
1191
1442
  throw truncateError;
@@ -1208,9 +1459,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1208
1459
  getDefaultValue(type) {
1209
1460
  switch (type) {
1210
1461
  case "timestamp":
1211
- return "DEFAULT SYSDATETIMEOFFSET()";
1462
+ return "DEFAULT SYSUTCDATETIME()";
1212
1463
  case "jsonb":
1213
1464
  return "DEFAULT N'{}'";
1465
+ case "boolean":
1466
+ return "DEFAULT 0";
1214
1467
  default:
1215
1468
  return super.getDefaultValue(type);
1216
1469
  }
@@ -1221,13 +1474,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1221
1474
  }) {
1222
1475
  try {
1223
1476
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1477
+ const largeDataColumns = [
1478
+ "workingMemory",
1479
+ "snapshot",
1480
+ "metadata",
1481
+ "content",
1482
+ // messages.content - can be very long conversation content
1483
+ "input",
1484
+ // evals.input - test input data
1485
+ "output",
1486
+ // evals.output - test output data
1487
+ "instructions",
1488
+ // evals.instructions - evaluation instructions
1489
+ "other"
1490
+ // traces.other - additional trace data
1491
+ ];
1224
1492
  const columns = Object.entries(schema).map(([name, def]) => {
1225
1493
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1226
1494
  const constraints = [];
1227
1495
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1228
1496
  if (!def.nullable) constraints.push("NOT NULL");
1229
1497
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1230
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1498
+ const useLargeStorage = largeDataColumns.includes(name);
1499
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1231
1500
  }).join(",\n");
1232
1501
  if (this.schemaName) {
1233
1502
  await this.setupSchema();
@@ -1314,7 +1583,19 @@ ${columns}
1314
1583
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1315
1584
  if (!columnExists) {
1316
1585
  const columnDef = schema[columnName];
1317
- const sqlType = this.getSqlType(columnDef.type);
1586
+ const largeDataColumns = [
1587
+ "workingMemory",
1588
+ "snapshot",
1589
+ "metadata",
1590
+ "content",
1591
+ "input",
1592
+ "output",
1593
+ "instructions",
1594
+ "other"
1595
+ ];
1596
+ const useLargeStorage = largeDataColumns.includes(columnName);
1597
+ const isIndexed = !!columnDef.primaryKey;
1598
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1318
1599
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1319
1600
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1320
1601
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1342,13 +1623,17 @@ ${columns}
1342
1623
  try {
1343
1624
  const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1344
1625
  const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1345
- const values = keyEntries.map(([_, value]) => value);
1346
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1626
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1347
1627
  const request = this.pool.request();
1348
- values.forEach((value, i) => {
1349
- request.input(`param${i}`, value);
1628
+ keyEntries.forEach(([key, value], i) => {
1629
+ const preparedValue = this.prepareValue(value, key, tableName);
1630
+ if (preparedValue === null || preparedValue === void 0) {
1631
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1632
+ } else {
1633
+ request.input(`param${i}`, preparedValue);
1634
+ }
1350
1635
  });
1351
- const resultSet = await request.query(sql7);
1636
+ const resultSet = await request.query(sql5);
1352
1637
  const result = resultSet.recordset[0] || null;
1353
1638
  if (!result) {
1354
1639
  return null;
@@ -1380,7 +1665,7 @@ ${columns}
1380
1665
  try {
1381
1666
  await transaction.begin();
1382
1667
  for (const record of records) {
1383
- await this.insert({ tableName, record });
1668
+ await this.insert({ tableName, record, transaction });
1384
1669
  }
1385
1670
  await transaction.commit();
1386
1671
  } catch (error$1) {
@@ -1399,44 +1684,594 @@ ${columns}
1399
1684
  );
1400
1685
  }
1401
1686
  }
1402
- async dropTable({ tableName }) {
1687
+ async dropTable({ tableName }) {
1688
+ try {
1689
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1690
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1691
+ } catch (error$1) {
1692
+ throw new error.MastraError(
1693
+ {
1694
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1695
+ domain: error.ErrorDomain.STORAGE,
1696
+ category: error.ErrorCategory.THIRD_PARTY,
1697
+ details: {
1698
+ tableName
1699
+ }
1700
+ },
1701
+ error$1
1702
+ );
1703
+ }
1704
+ }
1705
+ /**
1706
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1707
+ */
1708
+ prepareValue(value, columnName, tableName) {
1709
+ if (value === null || value === void 0) {
1710
+ return value;
1711
+ }
1712
+ if (value instanceof Date) {
1713
+ return value;
1714
+ }
1715
+ const schema = storage.TABLE_SCHEMAS[tableName];
1716
+ const columnSchema = schema?.[columnName];
1717
+ if (columnSchema?.type === "boolean") {
1718
+ return value ? 1 : 0;
1719
+ }
1720
+ if (columnSchema?.type === "jsonb") {
1721
+ if (typeof value === "string") {
1722
+ const trimmed = value.trim();
1723
+ if (trimmed.length > 0) {
1724
+ try {
1725
+ JSON.parse(trimmed);
1726
+ return trimmed;
1727
+ } catch {
1728
+ }
1729
+ }
1730
+ return JSON.stringify(value);
1731
+ }
1732
+ if (typeof value === "bigint") {
1733
+ return value.toString();
1734
+ }
1735
+ return JSON.stringify(value);
1736
+ }
1737
+ if (typeof value === "object") {
1738
+ return JSON.stringify(value);
1739
+ }
1740
+ return value;
1741
+ }
1742
+ /**
1743
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1744
+ */
1745
+ getMssqlType(tableName, columnName) {
1746
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1747
+ switch (col?.type) {
1748
+ case "text":
1749
+ return sql2__default.default.NVarChar;
1750
+ case "timestamp":
1751
+ return sql2__default.default.DateTime2;
1752
+ case "uuid":
1753
+ return sql2__default.default.UniqueIdentifier;
1754
+ case "jsonb":
1755
+ return sql2__default.default.NVarChar;
1756
+ case "integer":
1757
+ return sql2__default.default.Int;
1758
+ case "bigint":
1759
+ return sql2__default.default.BigInt;
1760
+ case "float":
1761
+ return sql2__default.default.Float;
1762
+ case "boolean":
1763
+ return sql2__default.default.Bit;
1764
+ default:
1765
+ return sql2__default.default.NVarChar;
1766
+ }
1767
+ }
1768
+ /**
1769
+ * Update a single record in the database
1770
+ */
1771
+ async update({
1772
+ tableName,
1773
+ keys,
1774
+ data,
1775
+ transaction
1776
+ }) {
1777
+ try {
1778
+ if (!data || Object.keys(data).length === 0) {
1779
+ throw new error.MastraError({
1780
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1781
+ domain: error.ErrorDomain.STORAGE,
1782
+ category: error.ErrorCategory.USER,
1783
+ text: "Cannot update with empty data payload"
1784
+ });
1785
+ }
1786
+ if (!keys || Object.keys(keys).length === 0) {
1787
+ throw new error.MastraError({
1788
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1789
+ domain: error.ErrorDomain.STORAGE,
1790
+ category: error.ErrorCategory.USER,
1791
+ text: "Cannot update without keys to identify records"
1792
+ });
1793
+ }
1794
+ const setClauses = [];
1795
+ const request = transaction ? transaction.request() : this.pool.request();
1796
+ let paramIndex = 0;
1797
+ Object.entries(data).forEach(([key, value]) => {
1798
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1799
+ const paramName = `set${paramIndex++}`;
1800
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1801
+ const preparedValue = this.prepareValue(value, key, tableName);
1802
+ if (preparedValue === null || preparedValue === void 0) {
1803
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1804
+ } else {
1805
+ request.input(paramName, preparedValue);
1806
+ }
1807
+ });
1808
+ const whereConditions = [];
1809
+ Object.entries(keys).forEach(([key, value]) => {
1810
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1811
+ const paramName = `where${paramIndex++}`;
1812
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1813
+ const preparedValue = this.prepareValue(value, key, tableName);
1814
+ if (preparedValue === null || preparedValue === void 0) {
1815
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1816
+ } else {
1817
+ request.input(paramName, preparedValue);
1818
+ }
1819
+ });
1820
+ const tableName_ = getTableName({
1821
+ indexName: tableName,
1822
+ schemaName: getSchemaName(this.schemaName)
1823
+ });
1824
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1825
+ await request.query(updateSql);
1826
+ } catch (error$1) {
1827
+ throw new error.MastraError(
1828
+ {
1829
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1830
+ domain: error.ErrorDomain.STORAGE,
1831
+ category: error.ErrorCategory.THIRD_PARTY,
1832
+ details: {
1833
+ tableName
1834
+ }
1835
+ },
1836
+ error$1
1837
+ );
1838
+ }
1839
+ }
1840
+ /**
1841
+ * Update multiple records in a single batch transaction
1842
+ */
1843
+ async batchUpdate({
1844
+ tableName,
1845
+ updates
1846
+ }) {
1847
+ const transaction = this.pool.transaction();
1848
+ try {
1849
+ await transaction.begin();
1850
+ for (const { keys, data } of updates) {
1851
+ await this.update({ tableName, keys, data, transaction });
1852
+ }
1853
+ await transaction.commit();
1854
+ } catch (error$1) {
1855
+ await transaction.rollback();
1856
+ throw new error.MastraError(
1857
+ {
1858
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1859
+ domain: error.ErrorDomain.STORAGE,
1860
+ category: error.ErrorCategory.THIRD_PARTY,
1861
+ details: {
1862
+ tableName,
1863
+ numberOfRecords: updates.length
1864
+ }
1865
+ },
1866
+ error$1
1867
+ );
1868
+ }
1869
+ }
1870
+ /**
1871
+ * Delete multiple records by keys
1872
+ */
1873
+ async batchDelete({ tableName, keys }) {
1874
+ if (keys.length === 0) {
1875
+ return;
1876
+ }
1877
+ const tableName_ = getTableName({
1878
+ indexName: tableName,
1879
+ schemaName: getSchemaName(this.schemaName)
1880
+ });
1881
+ const transaction = this.pool.transaction();
1882
+ try {
1883
+ await transaction.begin();
1884
+ for (const keySet of keys) {
1885
+ const conditions = [];
1886
+ const request = transaction.request();
1887
+ let paramIndex = 0;
1888
+ Object.entries(keySet).forEach(([key, value]) => {
1889
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1890
+ const paramName = `p${paramIndex++}`;
1891
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1892
+ const preparedValue = this.prepareValue(value, key, tableName);
1893
+ if (preparedValue === null || preparedValue === void 0) {
1894
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1895
+ } else {
1896
+ request.input(paramName, preparedValue);
1897
+ }
1898
+ });
1899
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1900
+ await request.query(deleteSql);
1901
+ }
1902
+ await transaction.commit();
1903
+ } catch (error$1) {
1904
+ await transaction.rollback();
1905
+ throw new error.MastraError(
1906
+ {
1907
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1908
+ domain: error.ErrorDomain.STORAGE,
1909
+ category: error.ErrorCategory.THIRD_PARTY,
1910
+ details: {
1911
+ tableName,
1912
+ numberOfRecords: keys.length
1913
+ }
1914
+ },
1915
+ error$1
1916
+ );
1917
+ }
1918
+ }
1919
+ /**
1920
+ * Create a new index on a table
1921
+ */
1922
+ async createIndex(options) {
1923
+ try {
1924
+ const { name, table, columns, unique = false, where } = options;
1925
+ const schemaName = this.schemaName || "dbo";
1926
+ const fullTableName = getTableName({
1927
+ indexName: table,
1928
+ schemaName: getSchemaName(this.schemaName)
1929
+ });
1930
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1931
+ const checkRequest = this.pool.request();
1932
+ checkRequest.input("indexName", indexNameSafe);
1933
+ checkRequest.input("schemaName", schemaName);
1934
+ checkRequest.input("tableName", table);
1935
+ const indexExists = await checkRequest.query(`
1936
+ SELECT 1 as found
1937
+ FROM sys.indexes i
1938
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1939
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1940
+ WHERE i.name = @indexName
1941
+ AND s.name = @schemaName
1942
+ AND t.name = @tableName
1943
+ `);
1944
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1945
+ return;
1946
+ }
1947
+ const uniqueStr = unique ? "UNIQUE " : "";
1948
+ const columnsStr = columns.map((col) => {
1949
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1950
+ const [colName, ...modifiers] = col.split(" ");
1951
+ if (!colName) {
1952
+ throw new Error(`Invalid column specification: ${col}`);
1953
+ }
1954
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1955
+ }
1956
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1957
+ }).join(", ");
1958
+ const whereStr = where ? ` WHERE ${where}` : "";
1959
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1960
+ await this.pool.request().query(createIndexSql);
1961
+ } catch (error$1) {
1962
+ throw new error.MastraError(
1963
+ {
1964
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1965
+ domain: error.ErrorDomain.STORAGE,
1966
+ category: error.ErrorCategory.THIRD_PARTY,
1967
+ details: {
1968
+ indexName: options.name,
1969
+ tableName: options.table
1970
+ }
1971
+ },
1972
+ error$1
1973
+ );
1974
+ }
1975
+ }
1976
+ /**
1977
+ * Drop an existing index
1978
+ */
1979
+ async dropIndex(indexName) {
1980
+ try {
1981
+ const schemaName = this.schemaName || "dbo";
1982
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
1983
+ const checkRequest = this.pool.request();
1984
+ checkRequest.input("indexName", indexNameSafe);
1985
+ checkRequest.input("schemaName", schemaName);
1986
+ const result = await checkRequest.query(`
1987
+ SELECT t.name as table_name
1988
+ FROM sys.indexes i
1989
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1990
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1991
+ WHERE i.name = @indexName
1992
+ AND s.name = @schemaName
1993
+ `);
1994
+ if (!result.recordset || result.recordset.length === 0) {
1995
+ return;
1996
+ }
1997
+ if (result.recordset.length > 1) {
1998
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
1999
+ throw new error.MastraError({
2000
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
2001
+ domain: error.ErrorDomain.STORAGE,
2002
+ category: error.ErrorCategory.USER,
2003
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
2004
+ });
2005
+ }
2006
+ const tableName = result.recordset[0].table_name;
2007
+ const fullTableName = getTableName({
2008
+ indexName: tableName,
2009
+ schemaName: getSchemaName(this.schemaName)
2010
+ });
2011
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
2012
+ await this.pool.request().query(dropSql);
2013
+ } catch (error$1) {
2014
+ throw new error.MastraError(
2015
+ {
2016
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
2017
+ domain: error.ErrorDomain.STORAGE,
2018
+ category: error.ErrorCategory.THIRD_PARTY,
2019
+ details: {
2020
+ indexName
2021
+ }
2022
+ },
2023
+ error$1
2024
+ );
2025
+ }
2026
+ }
2027
+ /**
2028
+ * List indexes for a specific table or all tables
2029
+ */
2030
+ async listIndexes(tableName) {
2031
+ try {
2032
+ const schemaName = this.schemaName || "dbo";
2033
+ let query;
2034
+ const request = this.pool.request();
2035
+ request.input("schemaName", schemaName);
2036
+ if (tableName) {
2037
+ query = `
2038
+ SELECT
2039
+ i.name as name,
2040
+ o.name as [table],
2041
+ i.is_unique as is_unique,
2042
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2043
+ FROM sys.indexes i
2044
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2045
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2046
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2047
+ WHERE sch.name = @schemaName
2048
+ AND o.name = @tableName
2049
+ AND i.name IS NOT NULL
2050
+ GROUP BY i.name, o.name, i.is_unique
2051
+ `;
2052
+ request.input("tableName", tableName);
2053
+ } else {
2054
+ query = `
2055
+ SELECT
2056
+ i.name as name,
2057
+ o.name as [table],
2058
+ i.is_unique as is_unique,
2059
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2060
+ FROM sys.indexes i
2061
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2062
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2063
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2064
+ WHERE sch.name = @schemaName
2065
+ AND i.name IS NOT NULL
2066
+ GROUP BY i.name, o.name, i.is_unique
2067
+ `;
2068
+ }
2069
+ const result = await request.query(query);
2070
+ const indexes = [];
2071
+ for (const row of result.recordset) {
2072
+ const colRequest = this.pool.request();
2073
+ colRequest.input("indexName", row.name);
2074
+ colRequest.input("schemaName", schemaName);
2075
+ const colResult = await colRequest.query(`
2076
+ SELECT c.name as column_name
2077
+ FROM sys.indexes i
2078
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2079
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2080
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2081
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2082
+ WHERE i.name = @indexName
2083
+ AND s.name = @schemaName
2084
+ ORDER BY ic.key_ordinal
2085
+ `);
2086
+ indexes.push({
2087
+ name: row.name,
2088
+ table: row.table,
2089
+ columns: colResult.recordset.map((c) => c.column_name),
2090
+ unique: row.is_unique || false,
2091
+ size: row.size || "0 MB",
2092
+ definition: ""
2093
+ // MSSQL doesn't store definition like PG
2094
+ });
2095
+ }
2096
+ return indexes;
2097
+ } catch (error$1) {
2098
+ throw new error.MastraError(
2099
+ {
2100
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
2101
+ domain: error.ErrorDomain.STORAGE,
2102
+ category: error.ErrorCategory.THIRD_PARTY,
2103
+ details: tableName ? {
2104
+ tableName
2105
+ } : {}
2106
+ },
2107
+ error$1
2108
+ );
2109
+ }
2110
+ }
2111
+ /**
2112
+ * Get detailed statistics for a specific index
2113
+ */
2114
+ async describeIndex(indexName) {
2115
+ try {
2116
+ const schemaName = this.schemaName || "dbo";
2117
+ const request = this.pool.request();
2118
+ request.input("indexName", indexName);
2119
+ request.input("schemaName", schemaName);
2120
+ const query = `
2121
+ SELECT
2122
+ i.name as name,
2123
+ o.name as [table],
2124
+ i.is_unique as is_unique,
2125
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2126
+ i.type_desc as method,
2127
+ ISNULL(us.user_scans, 0) as scans,
2128
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2129
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2130
+ FROM sys.indexes i
2131
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2132
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2133
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2134
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2135
+ WHERE i.name = @indexName
2136
+ AND sch.name = @schemaName
2137
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2138
+ `;
2139
+ const result = await request.query(query);
2140
+ if (!result.recordset || result.recordset.length === 0) {
2141
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
2142
+ }
2143
+ const row = result.recordset[0];
2144
+ const colRequest = this.pool.request();
2145
+ colRequest.input("indexName", indexName);
2146
+ colRequest.input("schemaName", schemaName);
2147
+ const colResult = await colRequest.query(`
2148
+ SELECT c.name as column_name
2149
+ FROM sys.indexes i
2150
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2151
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2152
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2153
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2154
+ WHERE i.name = @indexName
2155
+ AND s.name = @schemaName
2156
+ ORDER BY ic.key_ordinal
2157
+ `);
2158
+ return {
2159
+ name: row.name,
2160
+ table: row.table,
2161
+ columns: colResult.recordset.map((c) => c.column_name),
2162
+ unique: row.is_unique || false,
2163
+ size: row.size || "0 MB",
2164
+ definition: "",
2165
+ method: row.method?.toLowerCase() || "nonclustered",
2166
+ scans: Number(row.scans) || 0,
2167
+ tuples_read: Number(row.tuples_read) || 0,
2168
+ tuples_fetched: Number(row.tuples_fetched) || 0
2169
+ };
2170
+ } catch (error$1) {
2171
+ throw new error.MastraError(
2172
+ {
2173
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
2174
+ domain: error.ErrorDomain.STORAGE,
2175
+ category: error.ErrorCategory.THIRD_PARTY,
2176
+ details: {
2177
+ indexName
2178
+ }
2179
+ },
2180
+ error$1
2181
+ );
2182
+ }
2183
+ }
2184
+ /**
2185
+ * Returns definitions for automatic performance indexes
2186
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2187
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2188
+ */
2189
+ getAutomaticIndexDefinitions() {
2190
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2191
+ return [
2192
+ // Composite indexes for optimal filtering + sorting performance
2193
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2194
+ {
2195
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2196
+ table: storage.TABLE_THREADS,
2197
+ columns: ["resourceId", "seq_id DESC"]
2198
+ },
2199
+ {
2200
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2201
+ table: storage.TABLE_MESSAGES,
2202
+ columns: ["thread_id", "seq_id DESC"]
2203
+ },
2204
+ {
2205
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2206
+ table: storage.TABLE_TRACES,
2207
+ columns: ["name", "seq_id DESC"]
2208
+ },
2209
+ {
2210
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2211
+ table: storage.TABLE_SCORERS,
2212
+ columns: ["traceId", "spanId", "seq_id DESC"]
2213
+ },
2214
+ // Spans indexes for optimal trace querying
2215
+ {
2216
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2217
+ table: storage.TABLE_SPANS,
2218
+ columns: ["traceId", "startedAt DESC"]
2219
+ },
2220
+ {
2221
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2222
+ table: storage.TABLE_SPANS,
2223
+ columns: ["parentSpanId", "startedAt DESC"]
2224
+ },
2225
+ {
2226
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2227
+ table: storage.TABLE_SPANS,
2228
+ columns: ["name"]
2229
+ },
2230
+ {
2231
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2232
+ table: storage.TABLE_SPANS,
2233
+ columns: ["spanType", "startedAt DESC"]
2234
+ }
2235
+ ];
2236
+ }
2237
+ /**
2238
+ * Creates automatic indexes for optimal query performance
2239
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2240
+ */
2241
+ async createAutomaticIndexes() {
1403
2242
  try {
1404
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1405
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2243
+ const indexes = this.getAutomaticIndexDefinitions();
2244
+ for (const indexOptions of indexes) {
2245
+ try {
2246
+ await this.createIndex(indexOptions);
2247
+ } catch (error) {
2248
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2249
+ }
2250
+ }
1406
2251
  } catch (error$1) {
1407
2252
  throw new error.MastraError(
1408
2253
  {
1409
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2254
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1410
2255
  domain: error.ErrorDomain.STORAGE,
1411
- category: error.ErrorCategory.THIRD_PARTY,
1412
- details: {
1413
- tableName
1414
- }
2256
+ category: error.ErrorCategory.THIRD_PARTY
1415
2257
  },
1416
2258
  error$1
1417
2259
  );
1418
2260
  }
1419
2261
  }
1420
2262
  };
1421
- function parseJSON(jsonString) {
1422
- try {
1423
- return JSON.parse(jsonString);
1424
- } catch {
1425
- return jsonString;
1426
- }
1427
- }
1428
2263
  function transformScoreRow(row) {
1429
2264
  return {
1430
2265
  ...row,
1431
- input: parseJSON(row.input),
1432
- scorer: parseJSON(row.scorer),
1433
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1434
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1435
- metadata: parseJSON(row.metadata),
1436
- output: parseJSON(row.output),
1437
- additionalContext: parseJSON(row.additionalContext),
1438
- runtimeContext: parseJSON(row.runtimeContext),
1439
- entity: parseJSON(row.entity),
2266
+ input: storage.safelyParseJSON(row.input),
2267
+ scorer: storage.safelyParseJSON(row.scorer),
2268
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2269
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2270
+ metadata: storage.safelyParseJSON(row.metadata),
2271
+ output: storage.safelyParseJSON(row.output),
2272
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2273
+ requestContext: storage.safelyParseJSON(row.requestContext),
2274
+ entity: storage.safelyParseJSON(row.entity),
1440
2275
  createdAt: row.createdAt,
1441
2276
  updatedAt: row.updatedAt
1442
2277
  };
@@ -1479,6 +2314,19 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1479
2314
  }
1480
2315
  }
1481
2316
  async saveScore(score) {
2317
+ let validatedScore;
2318
+ try {
2319
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
2320
+ } catch (error$1) {
2321
+ throw new error.MastraError(
2322
+ {
2323
+ id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_SCORE_VALIDATION_FAILED",
2324
+ domain: error.ErrorDomain.STORAGE,
2325
+ category: error.ErrorCategory.THIRD_PARTY
2326
+ },
2327
+ error$1
2328
+ );
2329
+ }
1482
2330
  try {
1483
2331
  const scoreId = crypto.randomUUID();
1484
2332
  const {
@@ -1489,24 +2337,24 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1489
2337
  input,
1490
2338
  output,
1491
2339
  additionalContext,
1492
- runtimeContext,
2340
+ requestContext,
1493
2341
  entity,
1494
2342
  ...rest
1495
- } = score;
2343
+ } = validatedScore;
1496
2344
  await this.operations.insert({
1497
2345
  tableName: storage.TABLE_SCORERS,
1498
2346
  record: {
1499
2347
  id: scoreId,
1500
2348
  ...rest,
1501
- input: JSON.stringify(input) || "",
1502
- output: JSON.stringify(output) || "",
1503
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1504
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1505
- metadata: metadata ? JSON.stringify(metadata) : null,
1506
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1507
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1508
- entity: entity ? JSON.stringify(entity) : null,
1509
- scorer: scorer ? JSON.stringify(scorer) : null,
2349
+ input: input || "",
2350
+ output: output || "",
2351
+ preprocessStepResult: preprocessStepResult || null,
2352
+ analyzeStepResult: analyzeStepResult || null,
2353
+ metadata: metadata || null,
2354
+ additionalContext: additionalContext || null,
2355
+ requestContext: requestContext || null,
2356
+ entity: entity || null,
2357
+ scorer: scorer || null,
1510
2358
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1511
2359
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1512
2360
  }
@@ -1524,41 +2372,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1524
2372
  );
1525
2373
  }
1526
2374
  }
1527
- async getScoresByScorerId({
2375
+ async listScoresByScorerId({
1528
2376
  scorerId,
1529
- pagination
2377
+ pagination,
2378
+ entityId,
2379
+ entityType,
2380
+ source
1530
2381
  }) {
1531
2382
  try {
1532
- const request = this.pool.request();
1533
- request.input("p1", scorerId);
1534
- const totalResult = await request.query(
1535
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1536
- );
2383
+ const conditions = ["[scorerId] = @p1"];
2384
+ const params = { p1: scorerId };
2385
+ let paramIndex = 2;
2386
+ if (entityId) {
2387
+ conditions.push(`[entityId] = @p${paramIndex}`);
2388
+ params[`p${paramIndex}`] = entityId;
2389
+ paramIndex++;
2390
+ }
2391
+ if (entityType) {
2392
+ conditions.push(`[entityType] = @p${paramIndex}`);
2393
+ params[`p${paramIndex}`] = entityType;
2394
+ paramIndex++;
2395
+ }
2396
+ if (source) {
2397
+ conditions.push(`[source] = @p${paramIndex}`);
2398
+ params[`p${paramIndex}`] = source;
2399
+ paramIndex++;
2400
+ }
2401
+ const whereClause = conditions.join(" AND ");
2402
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2403
+ const countRequest = this.pool.request();
2404
+ Object.entries(params).forEach(([key, value]) => {
2405
+ countRequest.input(key, value);
2406
+ });
2407
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1537
2408
  const total = totalResult.recordset[0]?.count || 0;
2409
+ const { page, perPage: perPageInput } = pagination;
1538
2410
  if (total === 0) {
1539
2411
  return {
1540
2412
  pagination: {
1541
2413
  total: 0,
1542
- page: pagination.page,
1543
- perPage: pagination.perPage,
2414
+ page,
2415
+ perPage: perPageInput,
1544
2416
  hasMore: false
1545
2417
  },
1546
2418
  scores: []
1547
2419
  };
1548
2420
  }
2421
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2422
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2423
+ const limitValue = perPageInput === false ? total : perPage;
2424
+ const end = perPageInput === false ? total : start + perPage;
1549
2425
  const dataRequest = this.pool.request();
1550
- dataRequest.input("p1", scorerId);
1551
- dataRequest.input("p2", pagination.perPage);
1552
- dataRequest.input("p3", pagination.page * pagination.perPage);
1553
- const result = await dataRequest.query(
1554
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1555
- );
2426
+ Object.entries(params).forEach(([key, value]) => {
2427
+ dataRequest.input(key, value);
2428
+ });
2429
+ dataRequest.input("perPage", limitValue);
2430
+ dataRequest.input("offset", start);
2431
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2432
+ const result = await dataRequest.query(dataQuery);
1556
2433
  return {
1557
2434
  pagination: {
1558
2435
  total: Number(total),
1559
- page: pagination.page,
1560
- perPage: pagination.perPage,
1561
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2436
+ page,
2437
+ perPage: perPageForResponse,
2438
+ hasMore: end < total
1562
2439
  },
1563
2440
  scores: result.recordset.map((row) => transformScoreRow(row))
1564
2441
  };
@@ -1574,7 +2451,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1574
2451
  );
1575
2452
  }
1576
2453
  }
1577
- async getScoresByRunId({
2454
+ async listScoresByRunId({
1578
2455
  runId,
1579
2456
  pagination
1580
2457
  }) {
@@ -1585,30 +2462,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1585
2462
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1586
2463
  );
1587
2464
  const total = totalResult.recordset[0]?.count || 0;
2465
+ const { page, perPage: perPageInput } = pagination;
1588
2466
  if (total === 0) {
1589
2467
  return {
1590
2468
  pagination: {
1591
2469
  total: 0,
1592
- page: pagination.page,
1593
- perPage: pagination.perPage,
2470
+ page,
2471
+ perPage: perPageInput,
1594
2472
  hasMore: false
1595
2473
  },
1596
2474
  scores: []
1597
2475
  };
1598
2476
  }
2477
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2478
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2479
+ const limitValue = perPageInput === false ? total : perPage;
2480
+ const end = perPageInput === false ? total : start + perPage;
1599
2481
  const dataRequest = this.pool.request();
1600
2482
  dataRequest.input("p1", runId);
1601
- dataRequest.input("p2", pagination.perPage);
1602
- dataRequest.input("p3", pagination.page * pagination.perPage);
2483
+ dataRequest.input("p2", limitValue);
2484
+ dataRequest.input("p3", start);
1603
2485
  const result = await dataRequest.query(
1604
2486
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1605
2487
  );
1606
2488
  return {
1607
2489
  pagination: {
1608
2490
  total: Number(total),
1609
- page: pagination.page,
1610
- perPage: pagination.perPage,
1611
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2491
+ page,
2492
+ perPage: perPageForResponse,
2493
+ hasMore: end < total
1612
2494
  },
1613
2495
  scores: result.recordset.map((row) => transformScoreRow(row))
1614
2496
  };
@@ -1624,7 +2506,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1624
2506
  );
1625
2507
  }
1626
2508
  }
1627
- async getScoresByEntityId({
2509
+ async listScoresByEntityId({
1628
2510
  entityId,
1629
2511
  entityType,
1630
2512
  pagination
@@ -1637,31 +2519,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1637
2519
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1638
2520
  );
1639
2521
  const total = totalResult.recordset[0]?.count || 0;
2522
+ const { page, perPage: perPageInput } = pagination;
2523
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2524
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1640
2525
  if (total === 0) {
1641
2526
  return {
1642
2527
  pagination: {
1643
2528
  total: 0,
1644
- page: pagination.page,
1645
- perPage: pagination.perPage,
2529
+ page,
2530
+ perPage: perPageForResponse,
1646
2531
  hasMore: false
1647
2532
  },
1648
2533
  scores: []
1649
2534
  };
1650
2535
  }
2536
+ const limitValue = perPageInput === false ? total : perPage;
2537
+ const end = perPageInput === false ? total : start + perPage;
1651
2538
  const dataRequest = this.pool.request();
1652
2539
  dataRequest.input("p1", entityId);
1653
2540
  dataRequest.input("p2", entityType);
1654
- dataRequest.input("p3", pagination.perPage);
1655
- dataRequest.input("p4", pagination.page * pagination.perPage);
2541
+ dataRequest.input("p3", limitValue);
2542
+ dataRequest.input("p4", start);
1656
2543
  const result = await dataRequest.query(
1657
2544
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1658
2545
  );
1659
2546
  return {
1660
2547
  pagination: {
1661
2548
  total: Number(total),
1662
- page: pagination.page,
1663
- perPage: pagination.perPage,
1664
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2549
+ page,
2550
+ perPage: perPageForResponse,
2551
+ hasMore: end < total
1665
2552
  },
1666
2553
  scores: result.recordset.map((row) => transformScoreRow(row))
1667
2554
  };
@@ -1677,8 +2564,66 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1677
2564
  );
1678
2565
  }
1679
2566
  }
2567
+ async listScoresBySpan({
2568
+ traceId,
2569
+ spanId,
2570
+ pagination
2571
+ }) {
2572
+ try {
2573
+ const request = this.pool.request();
2574
+ request.input("p1", traceId);
2575
+ request.input("p2", spanId);
2576
+ const totalResult = await request.query(
2577
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
2578
+ );
2579
+ const total = totalResult.recordset[0]?.count || 0;
2580
+ const { page, perPage: perPageInput } = pagination;
2581
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2582
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2583
+ if (total === 0) {
2584
+ return {
2585
+ pagination: {
2586
+ total: 0,
2587
+ page,
2588
+ perPage: perPageForResponse,
2589
+ hasMore: false
2590
+ },
2591
+ scores: []
2592
+ };
2593
+ }
2594
+ const limitValue = perPageInput === false ? total : perPage;
2595
+ const end = perPageInput === false ? total : start + perPage;
2596
+ const dataRequest = this.pool.request();
2597
+ dataRequest.input("p1", traceId);
2598
+ dataRequest.input("p2", spanId);
2599
+ dataRequest.input("p3", limitValue);
2600
+ dataRequest.input("p4", start);
2601
+ const result = await dataRequest.query(
2602
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2603
+ );
2604
+ return {
2605
+ pagination: {
2606
+ total: Number(total),
2607
+ page,
2608
+ perPage: perPageForResponse,
2609
+ hasMore: end < total
2610
+ },
2611
+ scores: result.recordset.map((row) => transformScoreRow(row))
2612
+ };
2613
+ } catch (error$1) {
2614
+ throw new error.MastraError(
2615
+ {
2616
+ id: "MASTRA_STORAGE_MSSQL_STORE_GET_SCORES_BY_SPAN_FAILED",
2617
+ domain: error.ErrorDomain.STORAGE,
2618
+ category: error.ErrorCategory.THIRD_PARTY,
2619
+ details: { traceId, spanId }
2620
+ },
2621
+ error$1
2622
+ );
2623
+ }
2624
+ }
1680
2625
  };
1681
- var TracesMSSQL = class extends storage.TracesStorage {
2626
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1682
2627
  pool;
1683
2628
  operations;
1684
2629
  schema;
@@ -1692,207 +2637,165 @@ var TracesMSSQL = class extends storage.TracesStorage {
1692
2637
  this.operations = operations;
1693
2638
  this.schema = schema;
1694
2639
  }
1695
- /** @deprecated use getTracesPaginated instead*/
1696
- async getTraces(args) {
1697
- if (args.fromDate || args.toDate) {
1698
- args.dateRange = {
1699
- start: args.fromDate,
1700
- end: args.toDate
1701
- };
2640
+ parseWorkflowRun(row) {
2641
+ let parsedSnapshot = row.snapshot;
2642
+ if (typeof parsedSnapshot === "string") {
2643
+ try {
2644
+ parsedSnapshot = JSON.parse(row.snapshot);
2645
+ } catch (e) {
2646
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2647
+ }
1702
2648
  }
1703
- const result = await this.getTracesPaginated(args);
1704
- return result.traces;
2649
+ return {
2650
+ workflowName: row.workflow_name,
2651
+ runId: row.run_id,
2652
+ snapshot: parsedSnapshot,
2653
+ createdAt: row.createdAt,
2654
+ updatedAt: row.updatedAt,
2655
+ resourceId: row.resourceId
2656
+ };
1705
2657
  }
1706
- async getTracesPaginated(args) {
1707
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1708
- const fromDate = dateRange?.start;
1709
- const toDate = dateRange?.end;
1710
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1711
- const currentOffset = page * perPage;
1712
- const paramMap = {};
1713
- const conditions = [];
1714
- let paramIndex = 1;
1715
- if (name) {
1716
- const paramName = `p${paramIndex++}`;
1717
- conditions.push(`[name] LIKE @${paramName}`);
1718
- paramMap[paramName] = `${name}%`;
1719
- }
1720
- if (scope) {
1721
- const paramName = `p${paramIndex++}`;
1722
- conditions.push(`[scope] = @${paramName}`);
1723
- paramMap[paramName] = scope;
1724
- }
1725
- if (attributes) {
1726
- Object.entries(attributes).forEach(([key, value]) => {
1727
- const parsedKey = utils.parseFieldKey(key);
1728
- const paramName = `p${paramIndex++}`;
1729
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1730
- paramMap[paramName] = value;
1731
- });
1732
- }
1733
- if (filters) {
1734
- Object.entries(filters).forEach(([key, value]) => {
1735
- const parsedKey = utils.parseFieldKey(key);
1736
- const paramName = `p${paramIndex++}`;
1737
- conditions.push(`[${parsedKey}] = @${paramName}`);
1738
- paramMap[paramName] = value;
1739
- });
1740
- }
1741
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1742
- const paramName = `p${paramIndex++}`;
1743
- conditions.push(`[createdAt] >= @${paramName}`);
1744
- paramMap[paramName] = fromDate.toISOString();
1745
- }
1746
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1747
- const paramName = `p${paramIndex++}`;
1748
- conditions.push(`[createdAt] <= @${paramName}`);
1749
- paramMap[paramName] = toDate.toISOString();
1750
- }
1751
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1752
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1753
- let total = 0;
2658
+ async updateWorkflowResults({
2659
+ workflowName,
2660
+ runId,
2661
+ stepId,
2662
+ result,
2663
+ requestContext
2664
+ }) {
2665
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2666
+ const transaction = this.pool.transaction();
1754
2667
  try {
1755
- const countRequest = this.pool.request();
1756
- Object.entries(paramMap).forEach(([key, value]) => {
1757
- if (value instanceof Date) {
1758
- countRequest.input(key, sql2__default.default.DateTime, value);
1759
- } else {
1760
- countRequest.input(key, value);
1761
- }
1762
- });
1763
- const countResult = await countRequest.query(countQuery);
1764
- total = parseInt(countResult.recordset[0].total, 10);
2668
+ await transaction.begin();
2669
+ const selectRequest = new sql2__default.default.Request(transaction);
2670
+ selectRequest.input("workflow_name", workflowName);
2671
+ selectRequest.input("run_id", runId);
2672
+ const existingSnapshotResult = await selectRequest.query(
2673
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2674
+ );
2675
+ let snapshot;
2676
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2677
+ snapshot = {
2678
+ context: {},
2679
+ activePaths: [],
2680
+ activeStepsPath: {},
2681
+ timestamp: Date.now(),
2682
+ suspendedPaths: {},
2683
+ resumeLabels: {},
2684
+ serializedStepGraph: [],
2685
+ status: "pending",
2686
+ value: {},
2687
+ waitingPaths: {},
2688
+ runId,
2689
+ requestContext: {}
2690
+ };
2691
+ } else {
2692
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2693
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2694
+ }
2695
+ snapshot.context[stepId] = result;
2696
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2697
+ const upsertReq = new sql2__default.default.Request(transaction);
2698
+ upsertReq.input("workflow_name", workflowName);
2699
+ upsertReq.input("run_id", runId);
2700
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2701
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2702
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2703
+ await upsertReq.query(
2704
+ `MERGE ${table} AS target
2705
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2706
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2707
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2708
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2709
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2710
+ );
2711
+ await transaction.commit();
2712
+ return snapshot.context;
1765
2713
  } catch (error$1) {
2714
+ try {
2715
+ await transaction.rollback();
2716
+ } catch {
2717
+ }
1766
2718
  throw new error.MastraError(
1767
2719
  {
1768
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2720
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1769
2721
  domain: error.ErrorDomain.STORAGE,
1770
2722
  category: error.ErrorCategory.THIRD_PARTY,
1771
2723
  details: {
1772
- name: args.name ?? "",
1773
- scope: args.scope ?? ""
2724
+ workflowName,
2725
+ runId,
2726
+ stepId
1774
2727
  }
1775
2728
  },
1776
2729
  error$1
1777
2730
  );
1778
2731
  }
1779
- if (total === 0) {
1780
- return {
1781
- traces: [],
1782
- total: 0,
1783
- page,
1784
- perPage,
1785
- hasMore: false
1786
- };
1787
- }
1788
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1789
- const dataRequest = this.pool.request();
1790
- Object.entries(paramMap).forEach(([key, value]) => {
1791
- if (value instanceof Date) {
1792
- dataRequest.input(key, sql2__default.default.DateTime, value);
1793
- } else {
1794
- dataRequest.input(key, value);
1795
- }
1796
- });
1797
- dataRequest.input("offset", currentOffset);
1798
- dataRequest.input("limit", perPage);
2732
+ }
2733
+ async updateWorkflowState({
2734
+ workflowName,
2735
+ runId,
2736
+ opts
2737
+ }) {
2738
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2739
+ const transaction = this.pool.transaction();
1799
2740
  try {
1800
- const rowsResult = await dataRequest.query(dataQuery);
1801
- const rows = rowsResult.recordset;
1802
- const traces = rows.map((row) => ({
1803
- id: row.id,
1804
- parentSpanId: row.parentSpanId,
1805
- traceId: row.traceId,
1806
- name: row.name,
1807
- scope: row.scope,
1808
- kind: row.kind,
1809
- status: JSON.parse(row.status),
1810
- events: JSON.parse(row.events),
1811
- links: JSON.parse(row.links),
1812
- attributes: JSON.parse(row.attributes),
1813
- startTime: row.startTime,
1814
- endTime: row.endTime,
1815
- other: row.other,
1816
- createdAt: row.createdAt
1817
- }));
1818
- return {
1819
- traces,
1820
- total,
1821
- page,
1822
- perPage,
1823
- hasMore: currentOffset + traces.length < total
1824
- };
2741
+ await transaction.begin();
2742
+ const selectRequest = new sql2__default.default.Request(transaction);
2743
+ selectRequest.input("workflow_name", workflowName);
2744
+ selectRequest.input("run_id", runId);
2745
+ const existingSnapshotResult = await selectRequest.query(
2746
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2747
+ );
2748
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2749
+ await transaction.rollback();
2750
+ return void 0;
2751
+ }
2752
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2753
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2754
+ if (!snapshot || !snapshot?.context) {
2755
+ await transaction.rollback();
2756
+ throw new error.MastraError(
2757
+ {
2758
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2759
+ domain: error.ErrorDomain.STORAGE,
2760
+ category: error.ErrorCategory.SYSTEM,
2761
+ details: {
2762
+ workflowName,
2763
+ runId
2764
+ }
2765
+ },
2766
+ new Error(`Snapshot not found for runId ${runId}`)
2767
+ );
2768
+ }
2769
+ const updatedSnapshot = { ...snapshot, ...opts };
2770
+ const updateRequest = new sql2__default.default.Request(transaction);
2771
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2772
+ updateRequest.input("workflow_name", workflowName);
2773
+ updateRequest.input("run_id", runId);
2774
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2775
+ await updateRequest.query(
2776
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2777
+ );
2778
+ await transaction.commit();
2779
+ return updatedSnapshot;
1825
2780
  } catch (error$1) {
2781
+ try {
2782
+ await transaction.rollback();
2783
+ } catch {
2784
+ }
1826
2785
  throw new error.MastraError(
1827
2786
  {
1828
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2787
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1829
2788
  domain: error.ErrorDomain.STORAGE,
1830
2789
  category: error.ErrorCategory.THIRD_PARTY,
1831
2790
  details: {
1832
- name: args.name ?? "",
1833
- scope: args.scope ?? ""
2791
+ workflowName,
2792
+ runId
1834
2793
  }
1835
2794
  },
1836
2795
  error$1
1837
2796
  );
1838
2797
  }
1839
2798
  }
1840
- async batchTraceInsert({ records }) {
1841
- this.logger.debug("Batch inserting traces", { count: records.length });
1842
- await this.operations.batchInsert({
1843
- tableName: storage.TABLE_TRACES,
1844
- records
1845
- });
1846
- }
1847
- };
1848
- function parseWorkflowRun(row) {
1849
- let parsedSnapshot = row.snapshot;
1850
- if (typeof parsedSnapshot === "string") {
1851
- try {
1852
- parsedSnapshot = JSON.parse(row.snapshot);
1853
- } catch (e) {
1854
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1855
- }
1856
- }
1857
- return {
1858
- workflowName: row.workflow_name,
1859
- runId: row.run_id,
1860
- snapshot: parsedSnapshot,
1861
- createdAt: row.createdAt,
1862
- updatedAt: row.updatedAt,
1863
- resourceId: row.resourceId
1864
- };
1865
- }
1866
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1867
- pool;
1868
- operations;
1869
- schema;
1870
- constructor({
1871
- pool,
1872
- operations,
1873
- schema
1874
- }) {
1875
- super();
1876
- this.pool = pool;
1877
- this.operations = operations;
1878
- this.schema = schema;
1879
- }
1880
- updateWorkflowResults({
1881
- // workflowName,
1882
- // runId,
1883
- // stepId,
1884
- // result,
1885
- // runtimeContext,
1886
- }) {
1887
- throw new Error("Method not implemented.");
1888
- }
1889
- updateWorkflowState({
1890
- // workflowName,
1891
- // runId,
1892
- // opts,
1893
- }) {
1894
- throw new Error("Method not implemented.");
1895
- }
1896
2799
  async persistWorkflowSnapshot({
1897
2800
  workflowName,
1898
2801
  runId,
@@ -1989,7 +2892,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1989
2892
  if (!result.recordset || result.recordset.length === 0) {
1990
2893
  return null;
1991
2894
  }
1992
- return parseWorkflowRun(result.recordset[0]);
2895
+ return this.parseWorkflowRun(result.recordset[0]);
1993
2896
  } catch (error$1) {
1994
2897
  throw new error.MastraError(
1995
2898
  {
@@ -2005,13 +2908,14 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2005
2908
  );
2006
2909
  }
2007
2910
  }
2008
- async getWorkflowRuns({
2911
+ async listWorkflowRuns({
2009
2912
  workflowName,
2010
2913
  fromDate,
2011
2914
  toDate,
2012
- limit,
2013
- offset,
2014
- resourceId
2915
+ page,
2916
+ perPage,
2917
+ resourceId,
2918
+ status
2015
2919
  } = {}) {
2016
2920
  try {
2017
2921
  const conditions = [];
@@ -2020,13 +2924,17 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2020
2924
  conditions.push(`[workflow_name] = @workflowName`);
2021
2925
  paramMap["workflowName"] = workflowName;
2022
2926
  }
2927
+ if (status) {
2928
+ conditions.push(`JSON_VALUE([snapshot], '$.status') = @status`);
2929
+ paramMap["status"] = status;
2930
+ }
2023
2931
  if (resourceId) {
2024
2932
  const hasResourceId = await this.operations.hasColumn(storage.TABLE_WORKFLOW_SNAPSHOT, "resourceId");
2025
2933
  if (hasResourceId) {
2026
2934
  conditions.push(`[resourceId] = @resourceId`);
2027
2935
  paramMap["resourceId"] = resourceId;
2028
2936
  } else {
2029
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2937
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2030
2938
  }
2031
2939
  }
2032
2940
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2048,24 +2956,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2048
2956
  request.input(key, value);
2049
2957
  }
2050
2958
  });
2051
- if (limit !== void 0 && offset !== void 0) {
2959
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2960
+ if (usePagination) {
2052
2961
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2053
2962
  const countResult = await request.query(countQuery);
2054
2963
  total = Number(countResult.recordset[0]?.count || 0);
2055
2964
  }
2056
2965
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2057
- if (limit !== void 0 && offset !== void 0) {
2058
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2059
- request.input("limit", limit);
2966
+ if (usePagination) {
2967
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2968
+ const offset = page * normalizedPerPage;
2969
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2970
+ request.input("perPage", normalizedPerPage);
2060
2971
  request.input("offset", offset);
2061
2972
  }
2062
2973
  const result = await request.query(query);
2063
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2974
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2064
2975
  return { runs, total: total || runs.length };
2065
2976
  } catch (error$1) {
2066
2977
  throw new error.MastraError(
2067
2978
  {
2068
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2979
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2069
2980
  domain: error.ErrorDomain.STORAGE,
2070
2981
  category: error.ErrorCategory.THIRD_PARTY,
2071
2982
  details: {
@@ -2085,7 +2996,10 @@ var MSSQLStore = class extends storage.MastraStorage {
2085
2996
  isConnected = null;
2086
2997
  stores;
2087
2998
  constructor(config) {
2088
- super({ name: "MSSQLStore" });
2999
+ if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
3000
+ throw new Error("MSSQLStore: id must be provided and cannot be empty.");
3001
+ }
3002
+ super({ id: config.id, name: "MSSQLStore" });
2089
3003
  try {
2090
3004
  if ("connectionString" in config) {
2091
3005
  if (!config.connectionString || typeof config.connectionString !== "string" || config.connectionString.trim() === "") {
@@ -2108,19 +3022,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2108
3022
  port: config.port,
2109
3023
  options: config.options || { encrypt: true, trustServerCertificate: true }
2110
3024
  });
2111
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2112
3025
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2113
3026
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2114
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2115
3027
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2116
3028
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
3029
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2117
3030
  this.stores = {
2118
3031
  operations,
2119
3032
  scores,
2120
- traces,
2121
3033
  workflows,
2122
- legacyEvals,
2123
- memory
3034
+ memory,
3035
+ observability
2124
3036
  };
2125
3037
  } catch (e) {
2126
3038
  throw new error.MastraError(
@@ -2140,6 +3052,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2140
3052
  try {
2141
3053
  await this.isConnected;
2142
3054
  await super.init();
3055
+ try {
3056
+ await this.stores.operations.createAutomaticIndexes();
3057
+ } catch (indexError) {
3058
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3059
+ }
2143
3060
  } catch (error$1) {
2144
3061
  this.isConnected = null;
2145
3062
  throw new error.MastraError(
@@ -2166,28 +3083,12 @@ var MSSQLStore = class extends storage.MastraStorage {
2166
3083
  resourceWorkingMemory: true,
2167
3084
  hasColumn: true,
2168
3085
  createTable: true,
2169
- deleteMessages: true
3086
+ deleteMessages: true,
3087
+ listScoresBySpan: true,
3088
+ observabilityInstance: true,
3089
+ indexManagement: true
2170
3090
  };
2171
3091
  }
2172
- /** @deprecated use getEvals instead */
2173
- async getEvalsByAgentName(agentName, type) {
2174
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2175
- }
2176
- async getEvals(options = {}) {
2177
- return this.stores.legacyEvals.getEvals(options);
2178
- }
2179
- /**
2180
- * @deprecated use getTracesPaginated instead
2181
- */
2182
- async getTraces(args) {
2183
- return this.stores.traces.getTraces(args);
2184
- }
2185
- async getTracesPaginated(args) {
2186
- return this.stores.traces.getTracesPaginated(args);
2187
- }
2188
- async batchTraceInsert({ records }) {
2189
- return this.stores.traces.batchTraceInsert({ records });
2190
- }
2191
3092
  async createTable({
2192
3093
  tableName,
2193
3094
  schema
@@ -2222,15 +3123,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2222
3123
  async getThreadById({ threadId }) {
2223
3124
  return this.stores.memory.getThreadById({ threadId });
2224
3125
  }
2225
- /**
2226
- * @deprecated use getThreadsByResourceIdPaginated instead
2227
- */
2228
- async getThreadsByResourceId(args) {
2229
- return this.stores.memory.getThreadsByResourceId(args);
2230
- }
2231
- async getThreadsByResourceIdPaginated(args) {
2232
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2233
- }
2234
3126
  async saveThread({ thread }) {
2235
3127
  return this.stores.memory.saveThread({ thread });
2236
3128
  }
@@ -2244,17 +3136,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2244
3136
  async deleteThread({ threadId }) {
2245
3137
  return this.stores.memory.deleteThread({ threadId });
2246
3138
  }
2247
- async getMessages(args) {
2248
- return this.stores.memory.getMessages(args);
2249
- }
2250
- async getMessagesById({
2251
- messageIds,
2252
- format
2253
- }) {
2254
- return this.stores.memory.getMessagesById({ messageIds, format });
2255
- }
2256
- async getMessagesPaginated(args) {
2257
- return this.stores.memory.getMessagesPaginated(args);
3139
+ async listMessagesById({ messageIds }) {
3140
+ return this.stores.memory.listMessagesById({ messageIds });
2258
3141
  }
2259
3142
  async saveMessages(args) {
2260
3143
  return this.stores.memory.saveMessages(args);
@@ -2288,9 +3171,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2288
3171
  runId,
2289
3172
  stepId,
2290
3173
  result,
2291
- runtimeContext
3174
+ requestContext
2292
3175
  }) {
2293
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3176
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2294
3177
  }
2295
3178
  async updateWorkflowState({
2296
3179
  workflowName,
@@ -2313,15 +3196,8 @@ var MSSQLStore = class extends storage.MastraStorage {
2313
3196
  }) {
2314
3197
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2315
3198
  }
2316
- async getWorkflowRuns({
2317
- workflowName,
2318
- fromDate,
2319
- toDate,
2320
- limit,
2321
- offset,
2322
- resourceId
2323
- } = {}) {
2324
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3199
+ async listWorkflowRuns(args = {}) {
3200
+ return this.stores.workflows.listWorkflowRuns(args);
2325
3201
  }
2326
3202
  async getWorkflowRunById({
2327
3203
  runId,
@@ -2332,38 +3208,108 @@ var MSSQLStore = class extends storage.MastraStorage {
2332
3208
  async close() {
2333
3209
  await this.pool.close();
2334
3210
  }
3211
+ /**
3212
+ * Index Management
3213
+ */
3214
+ async createIndex(options) {
3215
+ return this.stores.operations.createIndex(options);
3216
+ }
3217
+ async listIndexes(tableName) {
3218
+ return this.stores.operations.listIndexes(tableName);
3219
+ }
3220
+ async describeIndex(indexName) {
3221
+ return this.stores.operations.describeIndex(indexName);
3222
+ }
3223
+ async dropIndex(indexName) {
3224
+ return this.stores.operations.dropIndex(indexName);
3225
+ }
3226
+ /**
3227
+ * Tracing / Observability
3228
+ */
3229
+ getObservabilityStore() {
3230
+ if (!this.stores.observability) {
3231
+ throw new error.MastraError({
3232
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3233
+ domain: error.ErrorDomain.STORAGE,
3234
+ category: error.ErrorCategory.SYSTEM,
3235
+ text: "Observability storage is not initialized"
3236
+ });
3237
+ }
3238
+ return this.stores.observability;
3239
+ }
3240
+ async createSpan(span) {
3241
+ return this.getObservabilityStore().createSpan(span);
3242
+ }
3243
+ async updateSpan({
3244
+ spanId,
3245
+ traceId,
3246
+ updates
3247
+ }) {
3248
+ return this.getObservabilityStore().updateSpan({ spanId, traceId, updates });
3249
+ }
3250
+ async getTrace(traceId) {
3251
+ return this.getObservabilityStore().getTrace(traceId);
3252
+ }
3253
+ async getTracesPaginated(args) {
3254
+ return this.getObservabilityStore().getTracesPaginated(args);
3255
+ }
3256
+ async batchCreateSpans(args) {
3257
+ return this.getObservabilityStore().batchCreateSpans(args);
3258
+ }
3259
+ async batchUpdateSpans(args) {
3260
+ return this.getObservabilityStore().batchUpdateSpans(args);
3261
+ }
3262
+ async batchDeleteTraces(args) {
3263
+ return this.getObservabilityStore().batchDeleteTraces(args);
3264
+ }
2335
3265
  /**
2336
3266
  * Scorers
2337
3267
  */
2338
3268
  async getScoreById({ id: _id }) {
2339
3269
  return this.stores.scores.getScoreById({ id: _id });
2340
3270
  }
2341
- async getScoresByScorerId({
3271
+ async listScoresByScorerId({
2342
3272
  scorerId: _scorerId,
2343
- pagination: _pagination
3273
+ pagination: _pagination,
3274
+ entityId: _entityId,
3275
+ entityType: _entityType,
3276
+ source: _source
2344
3277
  }) {
2345
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3278
+ return this.stores.scores.listScoresByScorerId({
3279
+ scorerId: _scorerId,
3280
+ pagination: _pagination,
3281
+ entityId: _entityId,
3282
+ entityType: _entityType,
3283
+ source: _source
3284
+ });
2346
3285
  }
2347
3286
  async saveScore(_score) {
2348
3287
  return this.stores.scores.saveScore(_score);
2349
3288
  }
2350
- async getScoresByRunId({
3289
+ async listScoresByRunId({
2351
3290
  runId: _runId,
2352
3291
  pagination: _pagination
2353
3292
  }) {
2354
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3293
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2355
3294
  }
2356
- async getScoresByEntityId({
3295
+ async listScoresByEntityId({
2357
3296
  entityId: _entityId,
2358
3297
  entityType: _entityType,
2359
3298
  pagination: _pagination
2360
3299
  }) {
2361
- return this.stores.scores.getScoresByEntityId({
3300
+ return this.stores.scores.listScoresByEntityId({
2362
3301
  entityId: _entityId,
2363
3302
  entityType: _entityType,
2364
3303
  pagination: _pagination
2365
3304
  });
2366
3305
  }
3306
+ async listScoresBySpan({
3307
+ traceId,
3308
+ spanId,
3309
+ pagination: _pagination
3310
+ }) {
3311
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
3312
+ }
2367
3313
  };
2368
3314
 
2369
3315
  exports.MSSQLStore = MSSQLStore;