@mastra/mssql 0.0.0-vector-extension-schema-20250922130418 → 0.0.0-vnext-20251104230439

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -3,8 +3,10 @@
3
3
  var error = require('@mastra/core/error');
4
4
  var storage = require('@mastra/core/storage');
5
5
  var sql2 = require('mssql');
6
- var utils = require('@mastra/core/utils');
7
6
  var agent = require('@mastra/core/agent');
7
+ var utils = require('@mastra/core/utils');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
8
10
 
9
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
12
 
@@ -20,154 +22,71 @@ function getTableName({ indexName, schemaName }) {
20
22
  const quotedSchemaName = schemaName;
21
23
  return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
22
24
  }
23
-
24
- // src/storage/domains/legacy-evals/index.ts
25
- function transformEvalRow(row) {
26
- let testInfoValue = null, resultValue = null;
27
- if (row.test_info) {
28
- try {
29
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
30
- } catch {
31
- }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
32
29
  }
33
- if (row.test_info) {
34
- try {
35
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
36
- } catch {
37
- }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
38
32
  }
33
+ return filters;
34
+ }
35
+ function prepareWhereClause(filters, _schema) {
36
+ const conditions = [];
37
+ const params = {};
38
+ let paramIndex = 1;
39
+ Object.entries(filters).forEach(([key, value]) => {
40
+ if (value === void 0) return;
41
+ const paramName = `p${paramIndex++}`;
42
+ if (key.endsWith("_gte")) {
43
+ const fieldName = key.slice(0, -4);
44
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
45
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
46
+ } else if (key.endsWith("_lte")) {
47
+ const fieldName = key.slice(0, -4);
48
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
49
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
50
+ } else if (value === null) {
51
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
52
+ } else {
53
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
54
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
55
+ }
56
+ });
39
57
  return {
40
- agentName: row.agent_name,
41
- input: row.input,
42
- output: row.output,
43
- result: resultValue,
44
- metricName: row.metric_name,
45
- instructions: row.instructions,
46
- testInfo: testInfoValue,
47
- globalRunId: row.global_run_id,
48
- runId: row.run_id,
49
- createdAt: row.created_at
58
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
59
+ params
50
60
  };
51
61
  }
52
- var LegacyEvalsMSSQL = class extends storage.LegacyEvalsStorage {
53
- pool;
54
- schema;
55
- constructor({ pool, schema }) {
56
- super();
57
- this.pool = pool;
58
- this.schema = schema;
59
- }
60
- /** @deprecated use getEvals instead */
61
- async getEvalsByAgentName(agentName, type) {
62
- try {
63
- let query = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) })} WHERE agent_name = @p1`;
64
- if (type === "test") {
65
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
66
- } else if (type === "live") {
67
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
68
- }
69
- query += " ORDER BY created_at DESC";
70
- const request = this.pool.request();
71
- request.input("p1", agentName);
72
- const result = await request.query(query);
73
- const rows = result.recordset;
74
- return typeof transformEvalRow === "function" ? rows?.map((row) => transformEvalRow(row)) ?? [] : rows ?? [];
75
- } catch (error) {
76
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
77
- return [];
78
- }
79
- console.error("Failed to get evals for the specified agent: " + error?.message);
80
- throw error;
81
- }
82
- }
83
- async getEvals(options = {}) {
84
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
85
- const fromDate = dateRange?.start;
86
- const toDate = dateRange?.end;
87
- const where = [];
88
- const params = {};
89
- if (agentName) {
90
- where.push("agent_name = @agentName");
91
- params["agentName"] = agentName;
92
- }
93
- if (type === "test") {
94
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
95
- } else if (type === "live") {
96
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
97
- }
98
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
99
- where.push(`[created_at] >= @fromDate`);
100
- params[`fromDate`] = fromDate.toISOString();
101
- }
102
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
103
- where.push(`[created_at] <= @toDate`);
104
- params[`toDate`] = toDate.toISOString();
105
- }
106
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
107
- const tableName = getTableName({ indexName: storage.TABLE_EVALS, schemaName: getSchemaName(this.schema) });
108
- const offset = page * perPage;
109
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
110
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
111
- try {
112
- const countReq = this.pool.request();
113
- Object.entries(params).forEach(([key, value]) => {
114
- if (value instanceof Date) {
115
- countReq.input(key, sql2__default.default.DateTime, value);
116
- } else {
117
- countReq.input(key, value);
118
- }
119
- });
120
- const countResult = await countReq.query(countQuery);
121
- const total = countResult.recordset[0]?.total || 0;
122
- if (total === 0) {
123
- return {
124
- evals: [],
125
- total: 0,
126
- page,
127
- perPage,
128
- hasMore: false
129
- };
62
+ function transformFromSqlRow({
63
+ tableName,
64
+ sqlRow
65
+ }) {
66
+ const schema = storage.TABLE_SCHEMAS[tableName];
67
+ const result = {};
68
+ Object.entries(sqlRow).forEach(([key, value]) => {
69
+ const columnSchema = schema?.[key];
70
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
71
+ try {
72
+ result[key] = JSON.parse(value);
73
+ } catch {
74
+ result[key] = value;
130
75
  }
131
- const req = this.pool.request();
132
- Object.entries(params).forEach(([key, value]) => {
133
- if (value instanceof Date) {
134
- req.input(key, sql2__default.default.DateTime, value);
135
- } else {
136
- req.input(key, value);
137
- }
138
- });
139
- req.input("offset", offset);
140
- req.input("perPage", perPage);
141
- const result = await req.query(dataQuery);
142
- const rows = result.recordset;
143
- return {
144
- evals: rows?.map((row) => transformEvalRow(row)) ?? [],
145
- total,
146
- page,
147
- perPage,
148
- hasMore: offset + (rows?.length ?? 0) < total
149
- };
150
- } catch (error$1) {
151
- const mastraError = new error.MastraError(
152
- {
153
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
154
- domain: error.ErrorDomain.STORAGE,
155
- category: error.ErrorCategory.THIRD_PARTY,
156
- details: {
157
- agentName: agentName || "all",
158
- type: type || "all",
159
- page,
160
- perPage
161
- }
162
- },
163
- error$1
164
- );
165
- this.logger?.error?.(mastraError.toString());
166
- this.logger?.trackException(mastraError);
167
- throw mastraError;
76
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
77
+ result[key] = new Date(value);
78
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
79
+ result[key] = value;
80
+ } else if (columnSchema?.type === "boolean") {
81
+ result[key] = Boolean(value);
82
+ } else {
83
+ result[key] = value;
168
84
  }
169
- }
170
- };
85
+ });
86
+ return result;
87
+ }
88
+
89
+ // src/storage/domains/memory/index.ts
171
90
  var MemoryMSSQL = class extends storage.MemoryStorage {
172
91
  pool;
173
92
  schema;
@@ -185,7 +104,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
185
104
  });
186
105
  const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
187
106
  const list = new agent.MessageList().add(cleanMessages, "memory");
188
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
107
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
189
108
  }
190
109
  constructor({
191
110
  pool,
@@ -199,7 +118,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
199
118
  }
200
119
  async getThreadById({ threadId }) {
201
120
  try {
202
- const sql7 = `SELECT
121
+ const sql5 = `SELECT
203
122
  id,
204
123
  [resourceId],
205
124
  title,
@@ -210,7 +129,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
210
129
  WHERE id = @threadId`;
211
130
  const request = this.pool.request();
212
131
  request.input("threadId", threadId);
213
- const resultSet = await request.query(sql7);
132
+ const resultSet = await request.query(sql5);
214
133
  const thread = resultSet.recordset[0] || null;
215
134
  if (!thread) {
216
135
  return null;
@@ -235,11 +154,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
235
154
  );
236
155
  }
237
156
  }
238
- async getThreadsByResourceIdPaginated(args) {
239
- const { resourceId, page = 0, perPage: perPageInput, orderBy = "createdAt", sortDirection = "DESC" } = args;
157
+ async listThreadsByResourceId(args) {
158
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
159
+ const perPage = storage.normalizePerPage(perPageInput, 100);
160
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
161
+ const { field, direction } = this.parseOrderBy(orderBy);
240
162
  try {
241
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
242
- const currentOffset = page * perPage;
243
163
  const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
244
164
  const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
245
165
  const countRequest = this.pool.request();
@@ -251,16 +171,22 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
251
171
  threads: [],
252
172
  total: 0,
253
173
  page,
254
- perPage,
174
+ perPage: perPageForResponse,
255
175
  hasMore: false
256
176
  };
257
177
  }
258
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
259
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
178
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
179
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
180
+ const limitValue = perPageInput === false ? total : perPage;
181
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
260
182
  const dataRequest = this.pool.request();
261
183
  dataRequest.input("resourceId", resourceId);
262
- dataRequest.input("perPage", perPage);
263
- dataRequest.input("offset", currentOffset);
184
+ dataRequest.input("offset", offset);
185
+ if (limitValue > 2147483647) {
186
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
187
+ } else {
188
+ dataRequest.input("perPage", limitValue);
189
+ }
264
190
  const rowsResult = await dataRequest.query(dataQuery);
265
191
  const rows = rowsResult.recordset || [];
266
192
  const threads = rows.map((thread) => ({
@@ -273,13 +199,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
273
199
  threads,
274
200
  total,
275
201
  page,
276
- perPage,
277
- hasMore: currentOffset + threads.length < total
202
+ perPage: perPageForResponse,
203
+ hasMore: perPageInput === false ? false : offset + perPage < total
278
204
  };
279
205
  } catch (error$1) {
280
206
  const mastraError = new error.MastraError(
281
207
  {
282
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
208
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_THREADS_BY_RESOURCE_ID_FAILED",
283
209
  domain: error.ErrorDomain.STORAGE,
284
210
  category: error.ErrorCategory.THIRD_PARTY,
285
211
  details: {
@@ -291,7 +217,13 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
291
217
  );
292
218
  this.logger?.error?.(mastraError.toString());
293
219
  this.logger?.trackException?.(mastraError);
294
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
220
+ return {
221
+ threads: [],
222
+ total: 0,
223
+ page,
224
+ perPage: perPageForResponse,
225
+ hasMore: false
226
+ };
295
227
  }
296
228
  }
297
229
  async saveThread({ thread }) {
@@ -313,7 +245,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
313
245
  req.input("id", thread.id);
314
246
  req.input("resourceId", thread.resourceId);
315
247
  req.input("title", thread.title);
316
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
248
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
249
+ if (metadata === null) {
250
+ req.input("metadata", sql2__default.default.NVarChar, null);
251
+ } else {
252
+ req.input("metadata", metadata);
253
+ }
317
254
  req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
318
255
  req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
319
256
  await req.query(mergeSql);
@@ -332,30 +269,6 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
332
269
  );
333
270
  }
334
271
  }
335
- /**
336
- * @deprecated use getThreadsByResourceIdPaginated instead
337
- */
338
- async getThreadsByResourceId(args) {
339
- const { resourceId, orderBy = "createdAt", sortDirection = "DESC" } = args;
340
- try {
341
- const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
342
- const orderByField = orderBy === "createdAt" ? "[createdAt]" : "[updatedAt]";
343
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${sortDirection}`;
344
- const request = this.pool.request();
345
- request.input("resourceId", resourceId);
346
- const resultSet = await request.query(dataQuery);
347
- const rows = resultSet.recordset || [];
348
- return rows.map((thread) => ({
349
- ...thread,
350
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
351
- createdAt: thread.createdAt,
352
- updatedAt: thread.updatedAt
353
- }));
354
- } catch (error) {
355
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
356
- return [];
357
- }
358
- }
359
272
  /**
360
273
  * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
361
274
  */
@@ -383,7 +296,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
383
296
  };
384
297
  try {
385
298
  const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
386
- const sql7 = `UPDATE ${table}
299
+ const sql5 = `UPDATE ${table}
387
300
  SET title = @title,
388
301
  metadata = @metadata,
389
302
  [updatedAt] = @updatedAt
@@ -394,7 +307,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
394
307
  req.input("title", title);
395
308
  req.input("metadata", JSON.stringify(mergedMetadata));
396
309
  req.input("updatedAt", /* @__PURE__ */ new Date());
397
- const result = await req.query(sql7);
310
+ const result = await req.query(sql5);
398
311
  let thread = result.recordset && result.recordset[0];
399
312
  if (thread && "seq_id" in thread) {
400
313
  const { seq_id, ...rest } = thread;
@@ -464,8 +377,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
464
377
  }
465
378
  async _getIncludedMessages({
466
379
  threadId,
467
- selectBy,
468
- orderByStatement
380
+ selectBy
469
381
  }) {
470
382
  if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
471
383
  const include = selectBy?.include;
@@ -493,7 +405,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
493
405
  m.[resourceId],
494
406
  m.seq_id
495
407
  FROM (
496
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
408
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
497
409
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
498
410
  WHERE [thread_id] = ${pThreadId}
499
411
  ) AS m
@@ -501,15 +413,17 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
501
413
  OR EXISTS (
502
414
  SELECT 1
503
415
  FROM (
504
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
416
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
505
417
  FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}
506
418
  WHERE [thread_id] = ${pThreadId}
507
419
  ) AS target
508
420
  WHERE target.id = ${pId}
509
421
  AND (
510
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
422
+ -- Get previous messages (messages that come BEFORE the target)
423
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
511
424
  OR
512
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
425
+ -- Get next messages (messages that come AFTER the target)
426
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
513
427
  )
514
428
  )
515
429
  `
@@ -538,8 +452,11 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
538
452
  });
539
453
  return dedupedRows;
540
454
  }
455
+ /**
456
+ * @deprecated use listMessages instead
457
+ */
541
458
  async getMessages(args) {
542
- const { threadId, resourceId, format, selectBy } = args;
459
+ const { threadId, resourceId, selectBy } = args;
543
460
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
544
461
  const orderByStatement = `ORDER BY [seq_id] DESC`;
545
462
  const limit = storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
@@ -548,7 +465,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
548
465
  let rows = [];
549
466
  const include = selectBy?.include || [];
550
467
  if (include?.length) {
551
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
468
+ const includeMessages = await this._getIncludedMessages({ threadId, selectBy });
552
469
  if (includeMessages) {
553
470
  rows.push(...includeMessages);
554
471
  }
@@ -573,8 +490,19 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
573
490
  const timeDiff = a.seq_id - b.seq_id;
574
491
  return timeDiff;
575
492
  });
576
- rows = rows.map(({ seq_id, ...rest }) => rest);
577
- return this._parseAndFormatMessages(rows, format);
493
+ const messagesWithParsedContent = rows.map((row) => {
494
+ if (typeof row.content === "string") {
495
+ try {
496
+ return { ...row, content: JSON.parse(row.content) };
497
+ } catch {
498
+ return row;
499
+ }
500
+ }
501
+ return row;
502
+ });
503
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
504
+ const list = new agent.MessageList().add(cleanMessages, "memory");
505
+ return { messages: list.get.all.db() };
578
506
  } catch (error$1) {
579
507
  const mastraError = new error.MastraError(
580
508
  {
@@ -589,15 +517,12 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
589
517
  error$1
590
518
  );
591
519
  this.logger?.error?.(mastraError.toString());
592
- this.logger?.trackException(mastraError);
593
- return [];
520
+ this.logger?.trackException?.(mastraError);
521
+ return { messages: [] };
594
522
  }
595
523
  }
596
- async getMessagesById({
597
- messageIds,
598
- format
599
- }) {
600
- if (messageIds.length === 0) return [];
524
+ async listMessagesById({ messageIds }) {
525
+ if (messageIds.length === 0) return { messages: [] };
601
526
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
602
527
  const orderByStatement = `ORDER BY [seq_id] DESC`;
603
528
  try {
@@ -613,13 +538,23 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
613
538
  const timeDiff = a.seq_id - b.seq_id;
614
539
  return timeDiff;
615
540
  });
616
- rows = rows.map(({ seq_id, ...rest }) => rest);
617
- if (format === `v1`) return this._parseAndFormatMessages(rows, format);
618
- return this._parseAndFormatMessages(rows, `v2`);
541
+ const messagesWithParsedContent = rows.map((row) => {
542
+ if (typeof row.content === "string") {
543
+ try {
544
+ return { ...row, content: JSON.parse(row.content) };
545
+ } catch {
546
+ return row;
547
+ }
548
+ }
549
+ return row;
550
+ });
551
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
552
+ const list = new agent.MessageList().add(cleanMessages, "memory");
553
+ return { messages: list.get.all.db() };
619
554
  } catch (error$1) {
620
555
  const mastraError = new error.MastraError(
621
556
  {
622
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_BY_ID_FAILED",
557
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_BY_ID_FAILED",
623
558
  domain: error.ErrorDomain.STORAGE,
624
559
  category: error.ErrorCategory.THIRD_PARTY,
625
560
  details: {
@@ -629,97 +564,125 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
629
564
  error$1
630
565
  );
631
566
  this.logger?.error?.(mastraError.toString());
632
- this.logger?.trackException(mastraError);
633
- return [];
567
+ this.logger?.trackException?.(mastraError);
568
+ return { messages: [] };
634
569
  }
635
570
  }
636
- async getMessagesPaginated(args) {
637
- const { threadId, resourceId, format, selectBy } = args;
638
- const { page = 0, perPage: perPageInput, dateRange } = selectBy?.pagination || {};
571
+ async listMessages(args) {
572
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
573
+ if (!threadId.trim()) {
574
+ throw new error.MastraError(
575
+ {
576
+ id: "STORAGE_MSSQL_LIST_MESSAGES_INVALID_THREAD_ID",
577
+ domain: error.ErrorDomain.STORAGE,
578
+ category: error.ErrorCategory.THIRD_PARTY,
579
+ details: { threadId }
580
+ },
581
+ new Error("threadId must be a non-empty string")
582
+ );
583
+ }
584
+ const perPage = storage.normalizePerPage(perPageInput, 40);
585
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
639
586
  try {
640
- if (!threadId.trim()) throw new Error("threadId must be a non-empty string");
641
- const fromDate = dateRange?.start;
642
- const toDate = dateRange?.end;
587
+ const { field, direction } = this.parseOrderBy(orderBy);
588
+ const orderByStatement = `ORDER BY [${field}] ${direction}`;
643
589
  const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
644
- const orderByStatement = `ORDER BY [seq_id] DESC`;
645
- let messages = [];
646
- if (selectBy?.include?.length) {
647
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
648
- if (includeMessages) messages.push(...includeMessages);
649
- }
650
- const perPage = perPageInput !== void 0 ? perPageInput : storage.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
651
- const currentOffset = page * perPage;
590
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
652
591
  const conditions = ["[thread_id] = @threadId"];
653
592
  const request = this.pool.request();
654
593
  request.input("threadId", threadId);
655
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
594
+ if (resourceId) {
595
+ conditions.push("[resourceId] = @resourceId");
596
+ request.input("resourceId", resourceId);
597
+ }
598
+ if (filter?.dateRange?.start) {
656
599
  conditions.push("[createdAt] >= @fromDate");
657
- request.input("fromDate", fromDate.toISOString());
600
+ request.input("fromDate", filter.dateRange.start);
658
601
  }
659
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
602
+ if (filter?.dateRange?.end) {
660
603
  conditions.push("[createdAt] <= @toDate");
661
- request.input("toDate", toDate.toISOString());
604
+ request.input("toDate", filter.dateRange.end);
662
605
  }
663
606
  const whereClause = `WHERE ${conditions.join(" AND ")}`;
664
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
607
+ const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
665
608
  const countResult = await request.query(countQuery);
666
609
  const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
667
- if (total === 0 && messages.length > 0) {
668
- const parsedIncluded = this._parseAndFormatMessages(messages, format);
610
+ const limitValue = perPageInput === false ? total : perPage;
611
+ const dataQuery = `${selectStatement} FROM ${tableName} ${whereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
612
+ request.input("offset", offset);
613
+ if (limitValue > 2147483647) {
614
+ request.input("limit", sql2__default.default.BigInt, limitValue);
615
+ } else {
616
+ request.input("limit", limitValue);
617
+ }
618
+ const rowsResult = await request.query(dataQuery);
619
+ const rows = rowsResult.recordset || [];
620
+ const messages = [...rows];
621
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
669
622
  return {
670
- messages: parsedIncluded,
671
- total: parsedIncluded.length,
623
+ messages: [],
624
+ total: 0,
672
625
  page,
673
- perPage,
626
+ perPage: perPageForResponse,
674
627
  hasMore: false
675
628
  };
676
629
  }
677
- const excludeIds = messages.map((m) => m.id);
678
- if (excludeIds.length > 0) {
679
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
680
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
681
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
630
+ const messageIds = new Set(messages.map((m) => m.id));
631
+ if (include && include.length > 0) {
632
+ const selectBy = { include };
633
+ const includeMessages = await this._getIncludedMessages({ threadId, selectBy });
634
+ if (includeMessages) {
635
+ for (const includeMsg of includeMessages) {
636
+ if (!messageIds.has(includeMsg.id)) {
637
+ messages.push(includeMsg);
638
+ messageIds.add(includeMsg.id);
639
+ }
640
+ }
641
+ }
682
642
  }
683
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
684
- const dataQuery = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} ${finalWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
685
- request.input("offset", currentOffset);
686
- request.input("limit", perPage);
687
- const rowsResult = await request.query(dataQuery);
688
- const rows = rowsResult.recordset || [];
689
- rows.sort((a, b) => a.seq_id - b.seq_id);
690
- messages.push(...rows);
691
- const parsed = this._parseAndFormatMessages(messages, format);
643
+ const parsed = this._parseAndFormatMessages(messages, "v2");
644
+ let finalMessages = parsed;
645
+ finalMessages = finalMessages.sort((a, b) => {
646
+ const aValue = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
647
+ const bValue = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
648
+ return direction === "ASC" ? aValue - bValue : bValue - aValue;
649
+ });
650
+ const returnedThreadMessageIds = new Set(finalMessages.filter((m) => m.threadId === threadId).map((m) => m.id));
651
+ const allThreadMessagesReturned = returnedThreadMessageIds.size >= total;
652
+ const hasMore = perPageInput !== false && !allThreadMessagesReturned && offset + perPage < total;
692
653
  return {
693
- messages: parsed,
694
- total: total + excludeIds.length,
654
+ messages: finalMessages,
655
+ total,
695
656
  page,
696
- perPage,
697
- hasMore: currentOffset + rows.length < total
657
+ perPage: perPageForResponse,
658
+ hasMore
698
659
  };
699
660
  } catch (error$1) {
700
661
  const mastraError = new error.MastraError(
701
662
  {
702
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
663
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_MESSAGES_FAILED",
703
664
  domain: error.ErrorDomain.STORAGE,
704
665
  category: error.ErrorCategory.THIRD_PARTY,
705
666
  details: {
706
667
  threadId,
707
- resourceId: resourceId ?? "",
708
- page
668
+ resourceId: resourceId ?? ""
709
669
  }
710
670
  },
711
671
  error$1
712
672
  );
713
673
  this.logger?.error?.(mastraError.toString());
714
- this.logger?.trackException(mastraError);
715
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
674
+ this.logger?.trackException?.(mastraError);
675
+ return {
676
+ messages: [],
677
+ total: 0,
678
+ page,
679
+ perPage: perPageForResponse,
680
+ hasMore: false
681
+ };
716
682
  }
717
683
  }
718
- async saveMessages({
719
- messages,
720
- format
721
- }) {
722
- if (messages.length === 0) return messages;
684
+ async saveMessages({ messages }) {
685
+ if (messages.length === 0) return { messages: [] };
723
686
  const threadId = messages[0]?.threadId;
724
687
  if (!threadId) {
725
688
  throw new error.MastraError({
@@ -801,8 +764,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
801
764
  return message;
802
765
  });
803
766
  const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
804
- if (format === "v2") return list.get.all.v2();
805
- return list.get.all.v1();
767
+ return { messages: list.get.all.db() };
806
768
  } catch (error$1) {
807
769
  throw new error.MastraError(
808
770
  {
@@ -978,8 +940,10 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
978
940
  return null;
979
941
  }
980
942
  return {
981
- ...result,
982
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
943
+ id: result.id,
944
+ createdAt: result.createdAt,
945
+ updatedAt: result.updatedAt,
946
+ workingMemory: result.workingMemory,
983
947
  metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
984
948
  };
985
949
  } catch (error$1) {
@@ -993,7 +957,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
993
957
  error$1
994
958
  );
995
959
  this.logger?.error?.(mastraError.toString());
996
- this.logger?.trackException(mastraError);
960
+ this.logger?.trackException?.(mastraError);
997
961
  throw mastraError;
998
962
  }
999
963
  }
@@ -1002,7 +966,7 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1002
966
  tableName: storage.TABLE_RESOURCES,
1003
967
  record: {
1004
968
  ...resource,
1005
- metadata: JSON.stringify(resource.metadata)
969
+ metadata: resource.metadata
1006
970
  }
1007
971
  });
1008
972
  return resource;
@@ -1060,119 +1024,444 @@ var MemoryMSSQL = class extends storage.MemoryStorage {
1060
1024
  error$1
1061
1025
  );
1062
1026
  this.logger?.error?.(mastraError.toString());
1063
- this.logger?.trackException(mastraError);
1027
+ this.logger?.trackException?.(mastraError);
1064
1028
  throw mastraError;
1065
1029
  }
1066
1030
  }
1067
1031
  };
1068
- var StoreOperationsMSSQL = class extends storage.StoreOperations {
1032
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1069
1033
  pool;
1070
- schemaName;
1071
- setupSchemaPromise = null;
1072
- schemaSetupComplete = void 0;
1073
- getSqlType(type, isPrimaryKey = false) {
1074
- switch (type) {
1075
- case "text":
1076
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
1077
- case "timestamp":
1078
- return "DATETIME2(7)";
1079
- case "uuid":
1080
- return "UNIQUEIDENTIFIER";
1081
- case "jsonb":
1082
- return "NVARCHAR(MAX)";
1083
- case "integer":
1084
- return "INT";
1085
- case "bigint":
1086
- return "BIGINT";
1087
- case "float":
1088
- return "FLOAT";
1089
- default:
1090
- throw new error.MastraError({
1091
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1092
- domain: error.ErrorDomain.STORAGE,
1093
- category: error.ErrorCategory.THIRD_PARTY
1094
- });
1095
- }
1096
- }
1097
- constructor({ pool, schemaName }) {
1034
+ operations;
1035
+ schema;
1036
+ constructor({
1037
+ pool,
1038
+ operations,
1039
+ schema
1040
+ }) {
1098
1041
  super();
1099
1042
  this.pool = pool;
1100
- this.schemaName = schemaName;
1043
+ this.operations = operations;
1044
+ this.schema = schema;
1101
1045
  }
1102
- async hasColumn(table, column) {
1103
- const schema = this.schemaName || "dbo";
1104
- const request = this.pool.request();
1105
- request.input("schema", schema);
1106
- request.input("table", table);
1107
- request.input("column", column);
1108
- request.input("columnLower", column.toLowerCase());
1109
- const result = await request.query(
1110
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1111
- );
1112
- return result.recordset.length > 0;
1046
+ get aiTracingStrategy() {
1047
+ return {
1048
+ preferred: "batch-with-updates",
1049
+ supported: ["batch-with-updates", "insert-only"]
1050
+ };
1113
1051
  }
1114
- async setupSchema() {
1115
- if (!this.schemaName || this.schemaSetupComplete) {
1116
- return;
1117
- }
1118
- if (!this.setupSchemaPromise) {
1119
- this.setupSchemaPromise = (async () => {
1120
- try {
1121
- const checkRequest = this.pool.request();
1122
- checkRequest.input("schemaName", this.schemaName);
1123
- const checkResult = await checkRequest.query(`
1124
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1125
- `);
1126
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1127
- if (!schemaExists) {
1128
- try {
1129
- await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1130
- this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1131
- } catch (error) {
1132
- this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1133
- throw new Error(
1134
- `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1135
- );
1136
- }
1052
+ async createAISpan(span) {
1053
+ try {
1054
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
1055
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
1056
+ const record = {
1057
+ ...span,
1058
+ startedAt,
1059
+ endedAt
1060
+ // Note: createdAt/updatedAt will be set by default values
1061
+ };
1062
+ return this.operations.insert({ tableName: storage.TABLE_AI_SPANS, record });
1063
+ } catch (error$1) {
1064
+ throw new error.MastraError(
1065
+ {
1066
+ id: "MSSQL_STORE_CREATE_AI_SPAN_FAILED",
1067
+ domain: error.ErrorDomain.STORAGE,
1068
+ category: error.ErrorCategory.USER,
1069
+ details: {
1070
+ spanId: span.spanId,
1071
+ traceId: span.traceId,
1072
+ spanType: span.spanType,
1073
+ spanName: span.name
1137
1074
  }
1138
- this.schemaSetupComplete = true;
1139
- this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1140
- } catch (error) {
1141
- this.schemaSetupComplete = void 0;
1142
- this.setupSchemaPromise = null;
1143
- throw error;
1144
- } finally {
1145
- this.setupSchemaPromise = null;
1146
- }
1147
- })();
1075
+ },
1076
+ error$1
1077
+ );
1148
1078
  }
1149
- await this.setupSchemaPromise;
1150
1079
  }
1151
- async insert({ tableName, record }) {
1080
+ async getAITrace(traceId) {
1152
1081
  try {
1153
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
1154
- const values = Object.values(record);
1155
- const paramNames = values.map((_, i) => `@param${i}`);
1156
- const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1157
- const request = this.pool.request();
1158
- values.forEach((value, i) => {
1159
- if (value instanceof Date) {
1160
- request.input(`param${i}`, sql2__default.default.DateTime2, value);
1161
- } else if (typeof value === "object" && value !== null) {
1162
- request.input(`param${i}`, JSON.stringify(value));
1163
- } else {
1164
- request.input(`param${i}`, value);
1165
- }
1082
+ const tableName = getTableName({
1083
+ indexName: storage.TABLE_AI_SPANS,
1084
+ schemaName: getSchemaName(this.schema)
1166
1085
  });
1167
- await request.query(insertSql);
1086
+ const request = this.pool.request();
1087
+ request.input("traceId", traceId);
1088
+ const result = await request.query(
1089
+ `SELECT
1090
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1091
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1092
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1093
+ FROM ${tableName}
1094
+ WHERE [traceId] = @traceId
1095
+ ORDER BY [startedAt] DESC`
1096
+ );
1097
+ if (!result.recordset || result.recordset.length === 0) {
1098
+ return null;
1099
+ }
1100
+ return {
1101
+ traceId,
1102
+ spans: result.recordset.map(
1103
+ (span) => transformFromSqlRow({
1104
+ tableName: storage.TABLE_AI_SPANS,
1105
+ sqlRow: span
1106
+ })
1107
+ )
1108
+ };
1168
1109
  } catch (error$1) {
1169
1110
  throw new error.MastraError(
1170
1111
  {
1171
- id: "MASTRA_STORAGE_MSSQL_STORE_INSERT_FAILED",
1112
+ id: "MSSQL_STORE_GET_AI_TRACE_FAILED",
1172
1113
  domain: error.ErrorDomain.STORAGE,
1173
- category: error.ErrorCategory.THIRD_PARTY,
1114
+ category: error.ErrorCategory.USER,
1174
1115
  details: {
1175
- tableName
1116
+ traceId
1117
+ }
1118
+ },
1119
+ error$1
1120
+ );
1121
+ }
1122
+ }
1123
+ async updateAISpan({
1124
+ spanId,
1125
+ traceId,
1126
+ updates
1127
+ }) {
1128
+ try {
1129
+ const data = { ...updates };
1130
+ if (data.endedAt instanceof Date) {
1131
+ data.endedAt = data.endedAt.toISOString();
1132
+ }
1133
+ if (data.startedAt instanceof Date) {
1134
+ data.startedAt = data.startedAt.toISOString();
1135
+ }
1136
+ await this.operations.update({
1137
+ tableName: storage.TABLE_AI_SPANS,
1138
+ keys: { spanId, traceId },
1139
+ data
1140
+ });
1141
+ } catch (error$1) {
1142
+ throw new error.MastraError(
1143
+ {
1144
+ id: "MSSQL_STORE_UPDATE_AI_SPAN_FAILED",
1145
+ domain: error.ErrorDomain.STORAGE,
1146
+ category: error.ErrorCategory.USER,
1147
+ details: {
1148
+ spanId,
1149
+ traceId
1150
+ }
1151
+ },
1152
+ error$1
1153
+ );
1154
+ }
1155
+ }
1156
+ async getAITracesPaginated({
1157
+ filters,
1158
+ pagination
1159
+ }) {
1160
+ const page = pagination?.page ?? 0;
1161
+ const perPage = pagination?.perPage ?? 10;
1162
+ const { entityId, entityType, ...actualFilters } = filters || {};
1163
+ const filtersWithDateRange = {
1164
+ ...actualFilters,
1165
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1166
+ parentSpanId: null
1167
+ // Only get root spans for traces
1168
+ };
1169
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1170
+ let actualWhereClause = whereClause.sql;
1171
+ const params = { ...whereClause.params };
1172
+ let currentParamIndex = Object.keys(params).length + 1;
1173
+ if (entityId && entityType) {
1174
+ let name = "";
1175
+ if (entityType === "workflow") {
1176
+ name = `workflow run: '${entityId}'`;
1177
+ } else if (entityType === "agent") {
1178
+ name = `agent run: '${entityId}'`;
1179
+ } else {
1180
+ const error$1 = new error.MastraError({
1181
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1182
+ domain: error.ErrorDomain.STORAGE,
1183
+ category: error.ErrorCategory.USER,
1184
+ details: {
1185
+ entityType
1186
+ },
1187
+ text: `Cannot filter by entity type: ${entityType}`
1188
+ });
1189
+ throw error$1;
1190
+ }
1191
+ const entityParam = `p${currentParamIndex++}`;
1192
+ if (actualWhereClause) {
1193
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1194
+ } else {
1195
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1196
+ }
1197
+ params[entityParam] = name;
1198
+ }
1199
+ const tableName = getTableName({
1200
+ indexName: storage.TABLE_AI_SPANS,
1201
+ schemaName: getSchemaName(this.schema)
1202
+ });
1203
+ try {
1204
+ const countRequest = this.pool.request();
1205
+ Object.entries(params).forEach(([key, value]) => {
1206
+ countRequest.input(key, value);
1207
+ });
1208
+ const countResult = await countRequest.query(
1209
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1210
+ );
1211
+ const total = countResult.recordset[0]?.count ?? 0;
1212
+ if (total === 0) {
1213
+ return {
1214
+ pagination: {
1215
+ total: 0,
1216
+ page,
1217
+ perPage,
1218
+ hasMore: false
1219
+ },
1220
+ spans: []
1221
+ };
1222
+ }
1223
+ const dataRequest = this.pool.request();
1224
+ Object.entries(params).forEach(([key, value]) => {
1225
+ dataRequest.input(key, value);
1226
+ });
1227
+ dataRequest.input("offset", page * perPage);
1228
+ dataRequest.input("limit", perPage);
1229
+ const dataResult = await dataRequest.query(
1230
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1231
+ );
1232
+ const spans = dataResult.recordset.map(
1233
+ (row) => transformFromSqlRow({
1234
+ tableName: storage.TABLE_AI_SPANS,
1235
+ sqlRow: row
1236
+ })
1237
+ );
1238
+ return {
1239
+ pagination: {
1240
+ total,
1241
+ page,
1242
+ perPage,
1243
+ hasMore: (page + 1) * perPage < total
1244
+ },
1245
+ spans
1246
+ };
1247
+ } catch (error$1) {
1248
+ throw new error.MastraError(
1249
+ {
1250
+ id: "MSSQL_STORE_GET_AI_TRACES_PAGINATED_FAILED",
1251
+ domain: error.ErrorDomain.STORAGE,
1252
+ category: error.ErrorCategory.USER
1253
+ },
1254
+ error$1
1255
+ );
1256
+ }
1257
+ }
1258
+ async batchCreateAISpans(args) {
1259
+ if (!args.records || args.records.length === 0) {
1260
+ return;
1261
+ }
1262
+ try {
1263
+ await this.operations.batchInsert({
1264
+ tableName: storage.TABLE_AI_SPANS,
1265
+ records: args.records.map((span) => ({
1266
+ ...span,
1267
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1268
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1269
+ }))
1270
+ });
1271
+ } catch (error$1) {
1272
+ throw new error.MastraError(
1273
+ {
1274
+ id: "MSSQL_STORE_BATCH_CREATE_AI_SPANS_FAILED",
1275
+ domain: error.ErrorDomain.STORAGE,
1276
+ category: error.ErrorCategory.USER,
1277
+ details: {
1278
+ count: args.records.length
1279
+ }
1280
+ },
1281
+ error$1
1282
+ );
1283
+ }
1284
+ }
1285
+ async batchUpdateAISpans(args) {
1286
+ if (!args.records || args.records.length === 0) {
1287
+ return;
1288
+ }
1289
+ try {
1290
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1291
+ const processedData = { ...data };
1292
+ if (processedData.endedAt instanceof Date) {
1293
+ processedData.endedAt = processedData.endedAt.toISOString();
1294
+ }
1295
+ if (processedData.startedAt instanceof Date) {
1296
+ processedData.startedAt = processedData.startedAt.toISOString();
1297
+ }
1298
+ return {
1299
+ keys: { spanId, traceId },
1300
+ data: processedData
1301
+ };
1302
+ });
1303
+ await this.operations.batchUpdate({
1304
+ tableName: storage.TABLE_AI_SPANS,
1305
+ updates
1306
+ });
1307
+ } catch (error$1) {
1308
+ throw new error.MastraError(
1309
+ {
1310
+ id: "MSSQL_STORE_BATCH_UPDATE_AI_SPANS_FAILED",
1311
+ domain: error.ErrorDomain.STORAGE,
1312
+ category: error.ErrorCategory.USER,
1313
+ details: {
1314
+ count: args.records.length
1315
+ }
1316
+ },
1317
+ error$1
1318
+ );
1319
+ }
1320
+ }
1321
+ async batchDeleteAITraces(args) {
1322
+ if (!args.traceIds || args.traceIds.length === 0) {
1323
+ return;
1324
+ }
1325
+ try {
1326
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1327
+ await this.operations.batchDelete({
1328
+ tableName: storage.TABLE_AI_SPANS,
1329
+ keys
1330
+ });
1331
+ } catch (error$1) {
1332
+ throw new error.MastraError(
1333
+ {
1334
+ id: "MSSQL_STORE_BATCH_DELETE_AI_TRACES_FAILED",
1335
+ domain: error.ErrorDomain.STORAGE,
1336
+ category: error.ErrorCategory.USER,
1337
+ details: {
1338
+ count: args.traceIds.length
1339
+ }
1340
+ },
1341
+ error$1
1342
+ );
1343
+ }
1344
+ }
1345
+ };
1346
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1347
+ pool;
1348
+ schemaName;
1349
+ setupSchemaPromise = null;
1350
+ schemaSetupComplete = void 0;
1351
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1352
+ switch (type) {
1353
+ case "text":
1354
+ if (useLargeStorage) {
1355
+ return "NVARCHAR(MAX)";
1356
+ }
1357
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1358
+ case "timestamp":
1359
+ return "DATETIME2(7)";
1360
+ case "uuid":
1361
+ return "UNIQUEIDENTIFIER";
1362
+ case "jsonb":
1363
+ return "NVARCHAR(MAX)";
1364
+ case "integer":
1365
+ return "INT";
1366
+ case "bigint":
1367
+ return "BIGINT";
1368
+ case "float":
1369
+ return "FLOAT";
1370
+ case "boolean":
1371
+ return "BIT";
1372
+ default:
1373
+ throw new error.MastraError({
1374
+ id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
1375
+ domain: error.ErrorDomain.STORAGE,
1376
+ category: error.ErrorCategory.THIRD_PARTY
1377
+ });
1378
+ }
1379
+ }
1380
+ constructor({ pool, schemaName }) {
1381
+ super();
1382
+ this.pool = pool;
1383
+ this.schemaName = schemaName;
1384
+ }
1385
+ async hasColumn(table, column) {
1386
+ const schema = this.schemaName || "dbo";
1387
+ const request = this.pool.request();
1388
+ request.input("schema", schema);
1389
+ request.input("table", table);
1390
+ request.input("column", column);
1391
+ request.input("columnLower", column.toLowerCase());
1392
+ const result = await request.query(
1393
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1394
+ );
1395
+ return result.recordset.length > 0;
1396
+ }
1397
+ async setupSchema() {
1398
+ if (!this.schemaName || this.schemaSetupComplete) {
1399
+ return;
1400
+ }
1401
+ if (!this.setupSchemaPromise) {
1402
+ this.setupSchemaPromise = (async () => {
1403
+ try {
1404
+ const checkRequest = this.pool.request();
1405
+ checkRequest.input("schemaName", this.schemaName);
1406
+ const checkResult = await checkRequest.query(`
1407
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1408
+ `);
1409
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1410
+ if (!schemaExists) {
1411
+ try {
1412
+ await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1413
+ this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1414
+ } catch (error) {
1415
+ this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1416
+ throw new Error(
1417
+ `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1418
+ );
1419
+ }
1420
+ }
1421
+ this.schemaSetupComplete = true;
1422
+ this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1423
+ } catch (error) {
1424
+ this.schemaSetupComplete = void 0;
1425
+ this.setupSchemaPromise = null;
1426
+ throw error;
1427
+ } finally {
1428
+ this.setupSchemaPromise = null;
1429
+ }
1430
+ })();
1431
+ }
1432
+ await this.setupSchemaPromise;
1433
+ }
1434
+ async insert({
1435
+ tableName,
1436
+ record,
1437
+ transaction
1438
+ }) {
1439
+ try {
1440
+ const columns = Object.keys(record);
1441
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1442
+ const paramNames = columns.map((_, i) => `@param${i}`);
1443
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1444
+ const request = transaction ? transaction.request() : this.pool.request();
1445
+ columns.forEach((col, i) => {
1446
+ const value = record[col];
1447
+ const preparedValue = this.prepareValue(value, col, tableName);
1448
+ if (preparedValue instanceof Date) {
1449
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1450
+ } else if (preparedValue === null || preparedValue === void 0) {
1451
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1452
+ } else {
1453
+ request.input(`param${i}`, preparedValue);
1454
+ }
1455
+ });
1456
+ await request.query(insertSql);
1457
+ } catch (error$1) {
1458
+ throw new error.MastraError(
1459
+ {
1460
+ id: "MASTRA_STORAGE_MSSQL_STORE_INSERT_FAILED",
1461
+ domain: error.ErrorDomain.STORAGE,
1462
+ category: error.ErrorCategory.THIRD_PARTY,
1463
+ details: {
1464
+ tableName
1176
1465
  }
1177
1466
  },
1178
1467
  error$1
@@ -1185,7 +1474,7 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1185
1474
  try {
1186
1475
  await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1187
1476
  } catch (truncateError) {
1188
- if (truncateError.message && truncateError.message.includes("foreign key")) {
1477
+ if (truncateError?.number === 4712) {
1189
1478
  await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1190
1479
  } else {
1191
1480
  throw truncateError;
@@ -1208,9 +1497,11 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1208
1497
  getDefaultValue(type) {
1209
1498
  switch (type) {
1210
1499
  case "timestamp":
1211
- return "DEFAULT SYSDATETIMEOFFSET()";
1500
+ return "DEFAULT SYSUTCDATETIME()";
1212
1501
  case "jsonb":
1213
1502
  return "DEFAULT N'{}'";
1503
+ case "boolean":
1504
+ return "DEFAULT 0";
1214
1505
  default:
1215
1506
  return super.getDefaultValue(type);
1216
1507
  }
@@ -1221,13 +1512,29 @@ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1221
1512
  }) {
1222
1513
  try {
1223
1514
  const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1515
+ const largeDataColumns = [
1516
+ "workingMemory",
1517
+ "snapshot",
1518
+ "metadata",
1519
+ "content",
1520
+ // messages.content - can be very long conversation content
1521
+ "input",
1522
+ // evals.input - test input data
1523
+ "output",
1524
+ // evals.output - test output data
1525
+ "instructions",
1526
+ // evals.instructions - evaluation instructions
1527
+ "other"
1528
+ // traces.other - additional trace data
1529
+ ];
1224
1530
  const columns = Object.entries(schema).map(([name, def]) => {
1225
1531
  const parsedName = utils.parseSqlIdentifier(name, "column name");
1226
1532
  const constraints = [];
1227
1533
  if (def.primaryKey) constraints.push("PRIMARY KEY");
1228
1534
  if (!def.nullable) constraints.push("NOT NULL");
1229
1535
  const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1230
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
1536
+ const useLargeStorage = largeDataColumns.includes(name);
1537
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1231
1538
  }).join(",\n");
1232
1539
  if (this.schemaName) {
1233
1540
  await this.setupSchema();
@@ -1314,7 +1621,19 @@ ${columns}
1314
1621
  const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1315
1622
  if (!columnExists) {
1316
1623
  const columnDef = schema[columnName];
1317
- const sqlType = this.getSqlType(columnDef.type);
1624
+ const largeDataColumns = [
1625
+ "workingMemory",
1626
+ "snapshot",
1627
+ "metadata",
1628
+ "content",
1629
+ "input",
1630
+ "output",
1631
+ "instructions",
1632
+ "other"
1633
+ ];
1634
+ const useLargeStorage = largeDataColumns.includes(columnName);
1635
+ const isIndexed = !!columnDef.primaryKey;
1636
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1318
1637
  const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1319
1638
  const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1320
1639
  const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
@@ -1342,13 +1661,17 @@ ${columns}
1342
1661
  try {
1343
1662
  const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1344
1663
  const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1345
- const values = keyEntries.map(([_, value]) => value);
1346
- const sql7 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1664
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1347
1665
  const request = this.pool.request();
1348
- values.forEach((value, i) => {
1349
- request.input(`param${i}`, value);
1666
+ keyEntries.forEach(([key, value], i) => {
1667
+ const preparedValue = this.prepareValue(value, key, tableName);
1668
+ if (preparedValue === null || preparedValue === void 0) {
1669
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1670
+ } else {
1671
+ request.input(`param${i}`, preparedValue);
1672
+ }
1350
1673
  });
1351
- const resultSet = await request.query(sql7);
1674
+ const resultSet = await request.query(sql5);
1352
1675
  const result = resultSet.recordset[0] || null;
1353
1676
  if (!result) {
1354
1677
  return null;
@@ -1380,63 +1703,599 @@ ${columns}
1380
1703
  try {
1381
1704
  await transaction.begin();
1382
1705
  for (const record of records) {
1383
- await this.insert({ tableName, record });
1706
+ await this.insert({ tableName, record, transaction });
1707
+ }
1708
+ await transaction.commit();
1709
+ } catch (error$1) {
1710
+ await transaction.rollback();
1711
+ throw new error.MastraError(
1712
+ {
1713
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
1714
+ domain: error.ErrorDomain.STORAGE,
1715
+ category: error.ErrorCategory.THIRD_PARTY,
1716
+ details: {
1717
+ tableName,
1718
+ numberOfRecords: records.length
1719
+ }
1720
+ },
1721
+ error$1
1722
+ );
1723
+ }
1724
+ }
1725
+ async dropTable({ tableName }) {
1726
+ try {
1727
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1728
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1729
+ } catch (error$1) {
1730
+ throw new error.MastraError(
1731
+ {
1732
+ id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
1733
+ domain: error.ErrorDomain.STORAGE,
1734
+ category: error.ErrorCategory.THIRD_PARTY,
1735
+ details: {
1736
+ tableName
1737
+ }
1738
+ },
1739
+ error$1
1740
+ );
1741
+ }
1742
+ }
1743
+ /**
1744
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1745
+ */
1746
+ prepareValue(value, columnName, tableName) {
1747
+ if (value === null || value === void 0) {
1748
+ return value;
1749
+ }
1750
+ if (value instanceof Date) {
1751
+ return value;
1752
+ }
1753
+ const schema = storage.TABLE_SCHEMAS[tableName];
1754
+ const columnSchema = schema?.[columnName];
1755
+ if (columnSchema?.type === "boolean") {
1756
+ return value ? 1 : 0;
1757
+ }
1758
+ if (columnSchema?.type === "jsonb") {
1759
+ return JSON.stringify(value);
1760
+ }
1761
+ if (typeof value === "object") {
1762
+ return JSON.stringify(value);
1763
+ }
1764
+ return value;
1765
+ }
1766
+ /**
1767
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1768
+ */
1769
+ getMssqlType(tableName, columnName) {
1770
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1771
+ switch (col?.type) {
1772
+ case "text":
1773
+ return sql2__default.default.NVarChar;
1774
+ case "timestamp":
1775
+ return sql2__default.default.DateTime2;
1776
+ case "uuid":
1777
+ return sql2__default.default.UniqueIdentifier;
1778
+ case "jsonb":
1779
+ return sql2__default.default.NVarChar;
1780
+ case "integer":
1781
+ return sql2__default.default.Int;
1782
+ case "bigint":
1783
+ return sql2__default.default.BigInt;
1784
+ case "float":
1785
+ return sql2__default.default.Float;
1786
+ case "boolean":
1787
+ return sql2__default.default.Bit;
1788
+ default:
1789
+ return sql2__default.default.NVarChar;
1790
+ }
1791
+ }
1792
+ /**
1793
+ * Update a single record in the database
1794
+ */
1795
+ async update({
1796
+ tableName,
1797
+ keys,
1798
+ data,
1799
+ transaction
1800
+ }) {
1801
+ try {
1802
+ if (!data || Object.keys(data).length === 0) {
1803
+ throw new error.MastraError({
1804
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_DATA",
1805
+ domain: error.ErrorDomain.STORAGE,
1806
+ category: error.ErrorCategory.USER,
1807
+ text: "Cannot update with empty data payload"
1808
+ });
1809
+ }
1810
+ if (!keys || Object.keys(keys).length === 0) {
1811
+ throw new error.MastraError({
1812
+ id: "MASTRA_STORAGE_MSSQL_UPDATE_EMPTY_KEYS",
1813
+ domain: error.ErrorDomain.STORAGE,
1814
+ category: error.ErrorCategory.USER,
1815
+ text: "Cannot update without keys to identify records"
1816
+ });
1817
+ }
1818
+ const setClauses = [];
1819
+ const request = transaction ? transaction.request() : this.pool.request();
1820
+ let paramIndex = 0;
1821
+ Object.entries(data).forEach(([key, value]) => {
1822
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1823
+ const paramName = `set${paramIndex++}`;
1824
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1825
+ const preparedValue = this.prepareValue(value, key, tableName);
1826
+ if (preparedValue === null || preparedValue === void 0) {
1827
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1828
+ } else {
1829
+ request.input(paramName, preparedValue);
1830
+ }
1831
+ });
1832
+ const whereConditions = [];
1833
+ Object.entries(keys).forEach(([key, value]) => {
1834
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1835
+ const paramName = `where${paramIndex++}`;
1836
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1837
+ const preparedValue = this.prepareValue(value, key, tableName);
1838
+ if (preparedValue === null || preparedValue === void 0) {
1839
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1840
+ } else {
1841
+ request.input(paramName, preparedValue);
1842
+ }
1843
+ });
1844
+ const tableName_ = getTableName({
1845
+ indexName: tableName,
1846
+ schemaName: getSchemaName(this.schemaName)
1847
+ });
1848
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1849
+ await request.query(updateSql);
1850
+ } catch (error$1) {
1851
+ throw new error.MastraError(
1852
+ {
1853
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_FAILED",
1854
+ domain: error.ErrorDomain.STORAGE,
1855
+ category: error.ErrorCategory.THIRD_PARTY,
1856
+ details: {
1857
+ tableName
1858
+ }
1859
+ },
1860
+ error$1
1861
+ );
1862
+ }
1863
+ }
1864
+ /**
1865
+ * Update multiple records in a single batch transaction
1866
+ */
1867
+ async batchUpdate({
1868
+ tableName,
1869
+ updates
1870
+ }) {
1871
+ const transaction = this.pool.transaction();
1872
+ try {
1873
+ await transaction.begin();
1874
+ for (const { keys, data } of updates) {
1875
+ await this.update({ tableName, keys, data, transaction });
1876
+ }
1877
+ await transaction.commit();
1878
+ } catch (error$1) {
1879
+ await transaction.rollback();
1880
+ throw new error.MastraError(
1881
+ {
1882
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_UPDATE_FAILED",
1883
+ domain: error.ErrorDomain.STORAGE,
1884
+ category: error.ErrorCategory.THIRD_PARTY,
1885
+ details: {
1886
+ tableName,
1887
+ numberOfRecords: updates.length
1888
+ }
1889
+ },
1890
+ error$1
1891
+ );
1892
+ }
1893
+ }
1894
+ /**
1895
+ * Delete multiple records by keys
1896
+ */
1897
+ async batchDelete({ tableName, keys }) {
1898
+ if (keys.length === 0) {
1899
+ return;
1900
+ }
1901
+ const tableName_ = getTableName({
1902
+ indexName: tableName,
1903
+ schemaName: getSchemaName(this.schemaName)
1904
+ });
1905
+ const transaction = this.pool.transaction();
1906
+ try {
1907
+ await transaction.begin();
1908
+ for (const keySet of keys) {
1909
+ const conditions = [];
1910
+ const request = transaction.request();
1911
+ let paramIndex = 0;
1912
+ Object.entries(keySet).forEach(([key, value]) => {
1913
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1914
+ const paramName = `p${paramIndex++}`;
1915
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1916
+ const preparedValue = this.prepareValue(value, key, tableName);
1917
+ if (preparedValue === null || preparedValue === void 0) {
1918
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1919
+ } else {
1920
+ request.input(paramName, preparedValue);
1921
+ }
1922
+ });
1923
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1924
+ await request.query(deleteSql);
1925
+ }
1926
+ await transaction.commit();
1927
+ } catch (error$1) {
1928
+ await transaction.rollback();
1929
+ throw new error.MastraError(
1930
+ {
1931
+ id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_DELETE_FAILED",
1932
+ domain: error.ErrorDomain.STORAGE,
1933
+ category: error.ErrorCategory.THIRD_PARTY,
1934
+ details: {
1935
+ tableName,
1936
+ numberOfRecords: keys.length
1937
+ }
1938
+ },
1939
+ error$1
1940
+ );
1941
+ }
1942
+ }
1943
+ /**
1944
+ * Create a new index on a table
1945
+ */
1946
+ async createIndex(options) {
1947
+ try {
1948
+ const { name, table, columns, unique = false, where } = options;
1949
+ const schemaName = this.schemaName || "dbo";
1950
+ const fullTableName = getTableName({
1951
+ indexName: table,
1952
+ schemaName: getSchemaName(this.schemaName)
1953
+ });
1954
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1955
+ const checkRequest = this.pool.request();
1956
+ checkRequest.input("indexName", indexNameSafe);
1957
+ checkRequest.input("schemaName", schemaName);
1958
+ checkRequest.input("tableName", table);
1959
+ const indexExists = await checkRequest.query(`
1960
+ SELECT 1 as found
1961
+ FROM sys.indexes i
1962
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1963
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1964
+ WHERE i.name = @indexName
1965
+ AND s.name = @schemaName
1966
+ AND t.name = @tableName
1967
+ `);
1968
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1969
+ return;
1970
+ }
1971
+ const uniqueStr = unique ? "UNIQUE " : "";
1972
+ const columnsStr = columns.map((col) => {
1973
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1974
+ const [colName, ...modifiers] = col.split(" ");
1975
+ if (!colName) {
1976
+ throw new Error(`Invalid column specification: ${col}`);
1977
+ }
1978
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1979
+ }
1980
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1981
+ }).join(", ");
1982
+ const whereStr = where ? ` WHERE ${where}` : "";
1983
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1984
+ await this.pool.request().query(createIndexSql);
1985
+ } catch (error$1) {
1986
+ throw new error.MastraError(
1987
+ {
1988
+ id: "MASTRA_STORAGE_MSSQL_INDEX_CREATE_FAILED",
1989
+ domain: error.ErrorDomain.STORAGE,
1990
+ category: error.ErrorCategory.THIRD_PARTY,
1991
+ details: {
1992
+ indexName: options.name,
1993
+ tableName: options.table
1994
+ }
1995
+ },
1996
+ error$1
1997
+ );
1998
+ }
1999
+ }
2000
+ /**
2001
+ * Drop an existing index
2002
+ */
2003
+ async dropIndex(indexName) {
2004
+ try {
2005
+ const schemaName = this.schemaName || "dbo";
2006
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
2007
+ const checkRequest = this.pool.request();
2008
+ checkRequest.input("indexName", indexNameSafe);
2009
+ checkRequest.input("schemaName", schemaName);
2010
+ const result = await checkRequest.query(`
2011
+ SELECT t.name as table_name
2012
+ FROM sys.indexes i
2013
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
2014
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
2015
+ WHERE i.name = @indexName
2016
+ AND s.name = @schemaName
2017
+ `);
2018
+ if (!result.recordset || result.recordset.length === 0) {
2019
+ return;
2020
+ }
2021
+ if (result.recordset.length > 1) {
2022
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
2023
+ throw new error.MastraError({
2024
+ id: "MASTRA_STORAGE_MSSQL_INDEX_AMBIGUOUS",
2025
+ domain: error.ErrorDomain.STORAGE,
2026
+ category: error.ErrorCategory.USER,
2027
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
2028
+ });
2029
+ }
2030
+ const tableName = result.recordset[0].table_name;
2031
+ const fullTableName = getTableName({
2032
+ indexName: tableName,
2033
+ schemaName: getSchemaName(this.schemaName)
2034
+ });
2035
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
2036
+ await this.pool.request().query(dropSql);
2037
+ } catch (error$1) {
2038
+ throw new error.MastraError(
2039
+ {
2040
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DROP_FAILED",
2041
+ domain: error.ErrorDomain.STORAGE,
2042
+ category: error.ErrorCategory.THIRD_PARTY,
2043
+ details: {
2044
+ indexName
2045
+ }
2046
+ },
2047
+ error$1
2048
+ );
2049
+ }
2050
+ }
2051
+ /**
2052
+ * List indexes for a specific table or all tables
2053
+ */
2054
+ async listIndexes(tableName) {
2055
+ try {
2056
+ const schemaName = this.schemaName || "dbo";
2057
+ let query;
2058
+ const request = this.pool.request();
2059
+ request.input("schemaName", schemaName);
2060
+ if (tableName) {
2061
+ query = `
2062
+ SELECT
2063
+ i.name as name,
2064
+ o.name as [table],
2065
+ i.is_unique as is_unique,
2066
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2067
+ FROM sys.indexes i
2068
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2069
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2070
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2071
+ WHERE sch.name = @schemaName
2072
+ AND o.name = @tableName
2073
+ AND i.name IS NOT NULL
2074
+ GROUP BY i.name, o.name, i.is_unique
2075
+ `;
2076
+ request.input("tableName", tableName);
2077
+ } else {
2078
+ query = `
2079
+ SELECT
2080
+ i.name as name,
2081
+ o.name as [table],
2082
+ i.is_unique as is_unique,
2083
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2084
+ FROM sys.indexes i
2085
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2086
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2087
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2088
+ WHERE sch.name = @schemaName
2089
+ AND i.name IS NOT NULL
2090
+ GROUP BY i.name, o.name, i.is_unique
2091
+ `;
2092
+ }
2093
+ const result = await request.query(query);
2094
+ const indexes = [];
2095
+ for (const row of result.recordset) {
2096
+ const colRequest = this.pool.request();
2097
+ colRequest.input("indexName", row.name);
2098
+ colRequest.input("schemaName", schemaName);
2099
+ const colResult = await colRequest.query(`
2100
+ SELECT c.name as column_name
2101
+ FROM sys.indexes i
2102
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2103
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2104
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2105
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2106
+ WHERE i.name = @indexName
2107
+ AND s.name = @schemaName
2108
+ ORDER BY ic.key_ordinal
2109
+ `);
2110
+ indexes.push({
2111
+ name: row.name,
2112
+ table: row.table,
2113
+ columns: colResult.recordset.map((c) => c.column_name),
2114
+ unique: row.is_unique || false,
2115
+ size: row.size || "0 MB",
2116
+ definition: ""
2117
+ // MSSQL doesn't store definition like PG
2118
+ });
2119
+ }
2120
+ return indexes;
2121
+ } catch (error$1) {
2122
+ throw new error.MastraError(
2123
+ {
2124
+ id: "MASTRA_STORAGE_MSSQL_INDEX_LIST_FAILED",
2125
+ domain: error.ErrorDomain.STORAGE,
2126
+ category: error.ErrorCategory.THIRD_PARTY,
2127
+ details: tableName ? {
2128
+ tableName
2129
+ } : {}
2130
+ },
2131
+ error$1
2132
+ );
2133
+ }
2134
+ }
2135
+ /**
2136
+ * Get detailed statistics for a specific index
2137
+ */
2138
+ async describeIndex(indexName) {
2139
+ try {
2140
+ const schemaName = this.schemaName || "dbo";
2141
+ const request = this.pool.request();
2142
+ request.input("indexName", indexName);
2143
+ request.input("schemaName", schemaName);
2144
+ const query = `
2145
+ SELECT
2146
+ i.name as name,
2147
+ o.name as [table],
2148
+ i.is_unique as is_unique,
2149
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2150
+ i.type_desc as method,
2151
+ ISNULL(us.user_scans, 0) as scans,
2152
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2153
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2154
+ FROM sys.indexes i
2155
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2156
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2157
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2158
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2159
+ WHERE i.name = @indexName
2160
+ AND sch.name = @schemaName
2161
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2162
+ `;
2163
+ const result = await request.query(query);
2164
+ if (!result.recordset || result.recordset.length === 0) {
2165
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
1384
2166
  }
1385
- await transaction.commit();
2167
+ const row = result.recordset[0];
2168
+ const colRequest = this.pool.request();
2169
+ colRequest.input("indexName", indexName);
2170
+ colRequest.input("schemaName", schemaName);
2171
+ const colResult = await colRequest.query(`
2172
+ SELECT c.name as column_name
2173
+ FROM sys.indexes i
2174
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2175
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2176
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2177
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2178
+ WHERE i.name = @indexName
2179
+ AND s.name = @schemaName
2180
+ ORDER BY ic.key_ordinal
2181
+ `);
2182
+ return {
2183
+ name: row.name,
2184
+ table: row.table,
2185
+ columns: colResult.recordset.map((c) => c.column_name),
2186
+ unique: row.is_unique || false,
2187
+ size: row.size || "0 MB",
2188
+ definition: "",
2189
+ method: row.method?.toLowerCase() || "nonclustered",
2190
+ scans: Number(row.scans) || 0,
2191
+ tuples_read: Number(row.tuples_read) || 0,
2192
+ tuples_fetched: Number(row.tuples_fetched) || 0
2193
+ };
1386
2194
  } catch (error$1) {
1387
- await transaction.rollback();
1388
2195
  throw new error.MastraError(
1389
2196
  {
1390
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
2197
+ id: "MASTRA_STORAGE_MSSQL_INDEX_DESCRIBE_FAILED",
1391
2198
  domain: error.ErrorDomain.STORAGE,
1392
2199
  category: error.ErrorCategory.THIRD_PARTY,
1393
2200
  details: {
1394
- tableName,
1395
- numberOfRecords: records.length
2201
+ indexName
1396
2202
  }
1397
2203
  },
1398
2204
  error$1
1399
2205
  );
1400
2206
  }
1401
2207
  }
1402
- async dropTable({ tableName }) {
2208
+ /**
2209
+ * Returns definitions for automatic performance indexes
2210
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2211
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2212
+ */
2213
+ getAutomaticIndexDefinitions() {
2214
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2215
+ return [
2216
+ // Composite indexes for optimal filtering + sorting performance
2217
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2218
+ {
2219
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2220
+ table: storage.TABLE_THREADS,
2221
+ columns: ["resourceId", "seq_id DESC"]
2222
+ },
2223
+ {
2224
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2225
+ table: storage.TABLE_MESSAGES,
2226
+ columns: ["thread_id", "seq_id DESC"]
2227
+ },
2228
+ {
2229
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2230
+ table: storage.TABLE_TRACES,
2231
+ columns: ["name", "seq_id DESC"]
2232
+ },
2233
+ {
2234
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2235
+ table: storage.TABLE_SCORERS,
2236
+ columns: ["traceId", "spanId", "seq_id DESC"]
2237
+ },
2238
+ // AI Spans indexes for optimal trace querying
2239
+ {
2240
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2241
+ table: storage.TABLE_AI_SPANS,
2242
+ columns: ["traceId", "startedAt DESC"]
2243
+ },
2244
+ {
2245
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2246
+ table: storage.TABLE_AI_SPANS,
2247
+ columns: ["parentSpanId", "startedAt DESC"]
2248
+ },
2249
+ {
2250
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2251
+ table: storage.TABLE_AI_SPANS,
2252
+ columns: ["name"]
2253
+ },
2254
+ {
2255
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2256
+ table: storage.TABLE_AI_SPANS,
2257
+ columns: ["spanType", "startedAt DESC"]
2258
+ }
2259
+ ];
2260
+ }
2261
+ /**
2262
+ * Creates automatic indexes for optimal query performance
2263
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2264
+ */
2265
+ async createAutomaticIndexes() {
1403
2266
  try {
1404
- const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1405
- await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
2267
+ const indexes = this.getAutomaticIndexDefinitions();
2268
+ for (const indexOptions of indexes) {
2269
+ try {
2270
+ await this.createIndex(indexOptions);
2271
+ } catch (error) {
2272
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2273
+ }
2274
+ }
1406
2275
  } catch (error$1) {
1407
2276
  throw new error.MastraError(
1408
2277
  {
1409
- id: "MASTRA_STORAGE_MSSQL_STORE_DROP_TABLE_FAILED",
2278
+ id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_PERFORMANCE_INDEXES_FAILED",
1410
2279
  domain: error.ErrorDomain.STORAGE,
1411
- category: error.ErrorCategory.THIRD_PARTY,
1412
- details: {
1413
- tableName
1414
- }
2280
+ category: error.ErrorCategory.THIRD_PARTY
1415
2281
  },
1416
2282
  error$1
1417
2283
  );
1418
2284
  }
1419
2285
  }
1420
2286
  };
1421
- function parseJSON(jsonString) {
1422
- try {
1423
- return JSON.parse(jsonString);
1424
- } catch {
1425
- return jsonString;
1426
- }
1427
- }
1428
2287
  function transformScoreRow(row) {
1429
2288
  return {
1430
2289
  ...row,
1431
- input: parseJSON(row.input),
1432
- scorer: parseJSON(row.scorer),
1433
- preprocessStepResult: parseJSON(row.preprocessStepResult),
1434
- analyzeStepResult: parseJSON(row.analyzeStepResult),
1435
- metadata: parseJSON(row.metadata),
1436
- output: parseJSON(row.output),
1437
- additionalContext: parseJSON(row.additionalContext),
1438
- runtimeContext: parseJSON(row.runtimeContext),
1439
- entity: parseJSON(row.entity),
2290
+ input: storage.safelyParseJSON(row.input),
2291
+ scorer: storage.safelyParseJSON(row.scorer),
2292
+ preprocessStepResult: storage.safelyParseJSON(row.preprocessStepResult),
2293
+ analyzeStepResult: storage.safelyParseJSON(row.analyzeStepResult),
2294
+ metadata: storage.safelyParseJSON(row.metadata),
2295
+ output: storage.safelyParseJSON(row.output),
2296
+ additionalContext: storage.safelyParseJSON(row.additionalContext),
2297
+ requestContext: storage.safelyParseJSON(row.requestContext),
2298
+ entity: storage.safelyParseJSON(row.entity),
1440
2299
  createdAt: row.createdAt,
1441
2300
  updatedAt: row.updatedAt
1442
2301
  };
@@ -1479,6 +2338,19 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1479
2338
  }
1480
2339
  }
1481
2340
  async saveScore(score) {
2341
+ let validatedScore;
2342
+ try {
2343
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
2344
+ } catch (error$1) {
2345
+ throw new error.MastraError(
2346
+ {
2347
+ id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_SCORE_VALIDATION_FAILED",
2348
+ domain: error.ErrorDomain.STORAGE,
2349
+ category: error.ErrorCategory.THIRD_PARTY
2350
+ },
2351
+ error$1
2352
+ );
2353
+ }
1482
2354
  try {
1483
2355
  const scoreId = crypto.randomUUID();
1484
2356
  const {
@@ -1489,24 +2361,24 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1489
2361
  input,
1490
2362
  output,
1491
2363
  additionalContext,
1492
- runtimeContext,
2364
+ requestContext,
1493
2365
  entity,
1494
2366
  ...rest
1495
- } = score;
2367
+ } = validatedScore;
1496
2368
  await this.operations.insert({
1497
2369
  tableName: storage.TABLE_SCORERS,
1498
2370
  record: {
1499
2371
  id: scoreId,
1500
2372
  ...rest,
1501
- input: JSON.stringify(input) || "",
1502
- output: JSON.stringify(output) || "",
1503
- preprocessStepResult: preprocessStepResult ? JSON.stringify(preprocessStepResult) : null,
1504
- analyzeStepResult: analyzeStepResult ? JSON.stringify(analyzeStepResult) : null,
1505
- metadata: metadata ? JSON.stringify(metadata) : null,
1506
- additionalContext: additionalContext ? JSON.stringify(additionalContext) : null,
1507
- runtimeContext: runtimeContext ? JSON.stringify(runtimeContext) : null,
1508
- entity: entity ? JSON.stringify(entity) : null,
1509
- scorer: scorer ? JSON.stringify(scorer) : null,
2373
+ input: input || "",
2374
+ output: output || "",
2375
+ preprocessStepResult: preprocessStepResult || null,
2376
+ analyzeStepResult: analyzeStepResult || null,
2377
+ metadata: metadata || null,
2378
+ additionalContext: additionalContext || null,
2379
+ requestContext: requestContext || null,
2380
+ entity: entity || null,
2381
+ scorer: scorer || null,
1510
2382
  createdAt: (/* @__PURE__ */ new Date()).toISOString(),
1511
2383
  updatedAt: (/* @__PURE__ */ new Date()).toISOString()
1512
2384
  }
@@ -1524,41 +2396,70 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1524
2396
  );
1525
2397
  }
1526
2398
  }
1527
- async getScoresByScorerId({
2399
+ async listScoresByScorerId({
1528
2400
  scorerId,
1529
- pagination
2401
+ pagination,
2402
+ entityId,
2403
+ entityType,
2404
+ source
1530
2405
  }) {
1531
2406
  try {
1532
- const request = this.pool.request();
1533
- request.input("p1", scorerId);
1534
- const totalResult = await request.query(
1535
- `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1`
1536
- );
2407
+ const conditions = ["[scorerId] = @p1"];
2408
+ const params = { p1: scorerId };
2409
+ let paramIndex = 2;
2410
+ if (entityId) {
2411
+ conditions.push(`[entityId] = @p${paramIndex}`);
2412
+ params[`p${paramIndex}`] = entityId;
2413
+ paramIndex++;
2414
+ }
2415
+ if (entityType) {
2416
+ conditions.push(`[entityType] = @p${paramIndex}`);
2417
+ params[`p${paramIndex}`] = entityType;
2418
+ paramIndex++;
2419
+ }
2420
+ if (source) {
2421
+ conditions.push(`[source] = @p${paramIndex}`);
2422
+ params[`p${paramIndex}`] = source;
2423
+ paramIndex++;
2424
+ }
2425
+ const whereClause = conditions.join(" AND ");
2426
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2427
+ const countRequest = this.pool.request();
2428
+ Object.entries(params).forEach(([key, value]) => {
2429
+ countRequest.input(key, value);
2430
+ });
2431
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
1537
2432
  const total = totalResult.recordset[0]?.count || 0;
2433
+ const { page, perPage: perPageInput } = pagination;
1538
2434
  if (total === 0) {
1539
2435
  return {
1540
2436
  pagination: {
1541
2437
  total: 0,
1542
- page: pagination.page,
1543
- perPage: pagination.perPage,
2438
+ page,
2439
+ perPage: perPageInput,
1544
2440
  hasMore: false
1545
2441
  },
1546
2442
  scores: []
1547
2443
  };
1548
2444
  }
2445
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2446
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2447
+ const limitValue = perPageInput === false ? total : perPage;
2448
+ const end = perPageInput === false ? total : start + perPage;
1549
2449
  const dataRequest = this.pool.request();
1550
- dataRequest.input("p1", scorerId);
1551
- dataRequest.input("p2", pagination.perPage);
1552
- dataRequest.input("p3", pagination.page * pagination.perPage);
1553
- const result = await dataRequest.query(
1554
- `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [scorerId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1555
- );
2450
+ Object.entries(params).forEach(([key, value]) => {
2451
+ dataRequest.input(key, value);
2452
+ });
2453
+ dataRequest.input("perPage", limitValue);
2454
+ dataRequest.input("offset", start);
2455
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2456
+ const result = await dataRequest.query(dataQuery);
1556
2457
  return {
1557
2458
  pagination: {
1558
2459
  total: Number(total),
1559
- page: pagination.page,
1560
- perPage: pagination.perPage,
1561
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2460
+ page,
2461
+ perPage: perPageForResponse,
2462
+ hasMore: end < total
1562
2463
  },
1563
2464
  scores: result.recordset.map((row) => transformScoreRow(row))
1564
2465
  };
@@ -1574,7 +2475,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1574
2475
  );
1575
2476
  }
1576
2477
  }
1577
- async getScoresByRunId({
2478
+ async listScoresByRunId({
1578
2479
  runId,
1579
2480
  pagination
1580
2481
  }) {
@@ -1585,30 +2486,35 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1585
2486
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
1586
2487
  );
1587
2488
  const total = totalResult.recordset[0]?.count || 0;
2489
+ const { page, perPage: perPageInput } = pagination;
1588
2490
  if (total === 0) {
1589
2491
  return {
1590
2492
  pagination: {
1591
2493
  total: 0,
1592
- page: pagination.page,
1593
- perPage: pagination.perPage,
2494
+ page,
2495
+ perPage: perPageInput,
1594
2496
  hasMore: false
1595
2497
  },
1596
2498
  scores: []
1597
2499
  };
1598
2500
  }
2501
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2502
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2503
+ const limitValue = perPageInput === false ? total : perPage;
2504
+ const end = perPageInput === false ? total : start + perPage;
1599
2505
  const dataRequest = this.pool.request();
1600
2506
  dataRequest.input("p1", runId);
1601
- dataRequest.input("p2", pagination.perPage);
1602
- dataRequest.input("p3", pagination.page * pagination.perPage);
2507
+ dataRequest.input("p2", limitValue);
2508
+ dataRequest.input("p3", start);
1603
2509
  const result = await dataRequest.query(
1604
2510
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
1605
2511
  );
1606
2512
  return {
1607
2513
  pagination: {
1608
2514
  total: Number(total),
1609
- page: pagination.page,
1610
- perPage: pagination.perPage,
1611
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2515
+ page,
2516
+ perPage: perPageForResponse,
2517
+ hasMore: end < total
1612
2518
  },
1613
2519
  scores: result.recordset.map((row) => transformScoreRow(row))
1614
2520
  };
@@ -1624,7 +2530,7 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1624
2530
  );
1625
2531
  }
1626
2532
  }
1627
- async getScoresByEntityId({
2533
+ async listScoresByEntityId({
1628
2534
  entityId,
1629
2535
  entityType,
1630
2536
  pagination
@@ -1637,31 +2543,36 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1637
2543
  `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
1638
2544
  );
1639
2545
  const total = totalResult.recordset[0]?.count || 0;
2546
+ const { page, perPage: perPageInput } = pagination;
2547
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2548
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
1640
2549
  if (total === 0) {
1641
2550
  return {
1642
2551
  pagination: {
1643
2552
  total: 0,
1644
- page: pagination.page,
1645
- perPage: pagination.perPage,
2553
+ page,
2554
+ perPage: perPageForResponse,
1646
2555
  hasMore: false
1647
2556
  },
1648
2557
  scores: []
1649
2558
  };
1650
2559
  }
2560
+ const limitValue = perPageInput === false ? total : perPage;
2561
+ const end = perPageInput === false ? total : start + perPage;
1651
2562
  const dataRequest = this.pool.request();
1652
2563
  dataRequest.input("p1", entityId);
1653
2564
  dataRequest.input("p2", entityType);
1654
- dataRequest.input("p3", pagination.perPage);
1655
- dataRequest.input("p4", pagination.page * pagination.perPage);
2565
+ dataRequest.input("p3", limitValue);
2566
+ dataRequest.input("p4", start);
1656
2567
  const result = await dataRequest.query(
1657
2568
  `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
1658
2569
  );
1659
2570
  return {
1660
2571
  pagination: {
1661
2572
  total: Number(total),
1662
- page: pagination.page,
1663
- perPage: pagination.perPage,
1664
- hasMore: Number(total) > (pagination.page + 1) * pagination.perPage
2573
+ page,
2574
+ perPage: perPageForResponse,
2575
+ hasMore: end < total
1665
2576
  },
1666
2577
  scores: result.recordset.map((row) => transformScoreRow(row))
1667
2578
  };
@@ -1677,8 +2588,66 @@ var ScoresMSSQL = class extends storage.ScoresStorage {
1677
2588
  );
1678
2589
  }
1679
2590
  }
2591
+ async listScoresBySpan({
2592
+ traceId,
2593
+ spanId,
2594
+ pagination
2595
+ }) {
2596
+ try {
2597
+ const request = this.pool.request();
2598
+ request.input("p1", traceId);
2599
+ request.input("p2", spanId);
2600
+ const totalResult = await request.query(
2601
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
2602
+ );
2603
+ const total = totalResult.recordset[0]?.count || 0;
2604
+ const { page, perPage: perPageInput } = pagination;
2605
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2606
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2607
+ if (total === 0) {
2608
+ return {
2609
+ pagination: {
2610
+ total: 0,
2611
+ page,
2612
+ perPage: perPageForResponse,
2613
+ hasMore: false
2614
+ },
2615
+ scores: []
2616
+ };
2617
+ }
2618
+ const limitValue = perPageInput === false ? total : perPage;
2619
+ const end = perPageInput === false ? total : start + perPage;
2620
+ const dataRequest = this.pool.request();
2621
+ dataRequest.input("p1", traceId);
2622
+ dataRequest.input("p2", spanId);
2623
+ dataRequest.input("p3", limitValue);
2624
+ dataRequest.input("p4", start);
2625
+ const result = await dataRequest.query(
2626
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2627
+ );
2628
+ return {
2629
+ pagination: {
2630
+ total: Number(total),
2631
+ page,
2632
+ perPage: perPageForResponse,
2633
+ hasMore: end < total
2634
+ },
2635
+ scores: result.recordset.map((row) => transformScoreRow(row))
2636
+ };
2637
+ } catch (error$1) {
2638
+ throw new error.MastraError(
2639
+ {
2640
+ id: "MASTRA_STORAGE_MSSQL_STORE_GET_SCORES_BY_SPAN_FAILED",
2641
+ domain: error.ErrorDomain.STORAGE,
2642
+ category: error.ErrorCategory.THIRD_PARTY,
2643
+ details: { traceId, spanId }
2644
+ },
2645
+ error$1
2646
+ );
2647
+ }
2648
+ }
1680
2649
  };
1681
- var TracesMSSQL = class extends storage.TracesStorage {
2650
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1682
2651
  pool;
1683
2652
  operations;
1684
2653
  schema;
@@ -1692,207 +2661,164 @@ var TracesMSSQL = class extends storage.TracesStorage {
1692
2661
  this.operations = operations;
1693
2662
  this.schema = schema;
1694
2663
  }
1695
- /** @deprecated use getTracesPaginated instead*/
1696
- async getTraces(args) {
1697
- if (args.fromDate || args.toDate) {
1698
- args.dateRange = {
1699
- start: args.fromDate,
1700
- end: args.toDate
1701
- };
1702
- }
1703
- const result = await this.getTracesPaginated(args);
1704
- return result.traces;
1705
- }
1706
- async getTracesPaginated(args) {
1707
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
1708
- const fromDate = dateRange?.start;
1709
- const toDate = dateRange?.end;
1710
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
1711
- const currentOffset = page * perPage;
1712
- const paramMap = {};
1713
- const conditions = [];
1714
- let paramIndex = 1;
1715
- if (name) {
1716
- const paramName = `p${paramIndex++}`;
1717
- conditions.push(`[name] LIKE @${paramName}`);
1718
- paramMap[paramName] = `${name}%`;
1719
- }
1720
- if (scope) {
1721
- const paramName = `p${paramIndex++}`;
1722
- conditions.push(`[scope] = @${paramName}`);
1723
- paramMap[paramName] = scope;
1724
- }
1725
- if (attributes) {
1726
- Object.entries(attributes).forEach(([key, value]) => {
1727
- const parsedKey = utils.parseFieldKey(key);
1728
- const paramName = `p${paramIndex++}`;
1729
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
1730
- paramMap[paramName] = value;
1731
- });
1732
- }
1733
- if (filters) {
1734
- Object.entries(filters).forEach(([key, value]) => {
1735
- const parsedKey = utils.parseFieldKey(key);
1736
- const paramName = `p${paramIndex++}`;
1737
- conditions.push(`[${parsedKey}] = @${paramName}`);
1738
- paramMap[paramName] = value;
1739
- });
1740
- }
1741
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1742
- const paramName = `p${paramIndex++}`;
1743
- conditions.push(`[createdAt] >= @${paramName}`);
1744
- paramMap[paramName] = fromDate.toISOString();
1745
- }
1746
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1747
- const paramName = `p${paramIndex++}`;
1748
- conditions.push(`[createdAt] <= @${paramName}`);
1749
- paramMap[paramName] = toDate.toISOString();
2664
+ parseWorkflowRun(row) {
2665
+ let parsedSnapshot = row.snapshot;
2666
+ if (typeof parsedSnapshot === "string") {
2667
+ try {
2668
+ parsedSnapshot = JSON.parse(row.snapshot);
2669
+ } catch (e) {
2670
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2671
+ }
1750
2672
  }
1751
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1752
- const countQuery = `SELECT COUNT(*) as total FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause}`;
1753
- let total = 0;
2673
+ return {
2674
+ workflowName: row.workflow_name,
2675
+ runId: row.run_id,
2676
+ snapshot: parsedSnapshot,
2677
+ createdAt: row.createdAt,
2678
+ updatedAt: row.updatedAt,
2679
+ resourceId: row.resourceId
2680
+ };
2681
+ }
2682
+ async updateWorkflowResults({
2683
+ workflowName,
2684
+ runId,
2685
+ stepId,
2686
+ result,
2687
+ requestContext
2688
+ }) {
2689
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2690
+ const transaction = this.pool.transaction();
1754
2691
  try {
1755
- const countRequest = this.pool.request();
1756
- Object.entries(paramMap).forEach(([key, value]) => {
1757
- if (value instanceof Date) {
1758
- countRequest.input(key, sql2__default.default.DateTime, value);
1759
- } else {
1760
- countRequest.input(key, value);
1761
- }
1762
- });
1763
- const countResult = await countRequest.query(countQuery);
1764
- total = parseInt(countResult.recordset[0].total, 10);
2692
+ await transaction.begin();
2693
+ const selectRequest = new sql2__default.default.Request(transaction);
2694
+ selectRequest.input("workflow_name", workflowName);
2695
+ selectRequest.input("run_id", runId);
2696
+ const existingSnapshotResult = await selectRequest.query(
2697
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2698
+ );
2699
+ let snapshot;
2700
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2701
+ snapshot = {
2702
+ context: {},
2703
+ activePaths: [],
2704
+ timestamp: Date.now(),
2705
+ suspendedPaths: {},
2706
+ resumeLabels: {},
2707
+ serializedStepGraph: [],
2708
+ value: {},
2709
+ waitingPaths: {},
2710
+ status: "pending",
2711
+ runId,
2712
+ requestContext: {}
2713
+ };
2714
+ } else {
2715
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2716
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2717
+ }
2718
+ snapshot.context[stepId] = result;
2719
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2720
+ const upsertReq = new sql2__default.default.Request(transaction);
2721
+ upsertReq.input("workflow_name", workflowName);
2722
+ upsertReq.input("run_id", runId);
2723
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2724
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2725
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2726
+ await upsertReq.query(
2727
+ `MERGE ${table} AS target
2728
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2729
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2730
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2731
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2732
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2733
+ );
2734
+ await transaction.commit();
2735
+ return snapshot.context;
1765
2736
  } catch (error$1) {
2737
+ try {
2738
+ await transaction.rollback();
2739
+ } catch {
2740
+ }
1766
2741
  throw new error.MastraError(
1767
2742
  {
1768
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
2743
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_RESULTS_FAILED",
1769
2744
  domain: error.ErrorDomain.STORAGE,
1770
2745
  category: error.ErrorCategory.THIRD_PARTY,
1771
2746
  details: {
1772
- name: args.name ?? "",
1773
- scope: args.scope ?? ""
2747
+ workflowName,
2748
+ runId,
2749
+ stepId
1774
2750
  }
1775
2751
  },
1776
2752
  error$1
1777
2753
  );
1778
2754
  }
1779
- if (total === 0) {
1780
- return {
1781
- traces: [],
1782
- total: 0,
1783
- page,
1784
- perPage,
1785
- hasMore: false
1786
- };
1787
- }
1788
- const dataQuery = `SELECT * FROM ${getTableName({ indexName: storage.TABLE_TRACES, schemaName: getSchemaName(this.schema) })} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1789
- const dataRequest = this.pool.request();
1790
- Object.entries(paramMap).forEach(([key, value]) => {
1791
- if (value instanceof Date) {
1792
- dataRequest.input(key, sql2__default.default.DateTime, value);
1793
- } else {
1794
- dataRequest.input(key, value);
1795
- }
1796
- });
1797
- dataRequest.input("offset", currentOffset);
1798
- dataRequest.input("limit", perPage);
2755
+ }
2756
+ async updateWorkflowState({
2757
+ workflowName,
2758
+ runId,
2759
+ opts
2760
+ }) {
2761
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2762
+ const transaction = this.pool.transaction();
1799
2763
  try {
1800
- const rowsResult = await dataRequest.query(dataQuery);
1801
- const rows = rowsResult.recordset;
1802
- const traces = rows.map((row) => ({
1803
- id: row.id,
1804
- parentSpanId: row.parentSpanId,
1805
- traceId: row.traceId,
1806
- name: row.name,
1807
- scope: row.scope,
1808
- kind: row.kind,
1809
- status: JSON.parse(row.status),
1810
- events: JSON.parse(row.events),
1811
- links: JSON.parse(row.links),
1812
- attributes: JSON.parse(row.attributes),
1813
- startTime: row.startTime,
1814
- endTime: row.endTime,
1815
- other: row.other,
1816
- createdAt: row.createdAt
1817
- }));
1818
- return {
1819
- traces,
1820
- total,
1821
- page,
1822
- perPage,
1823
- hasMore: currentOffset + traces.length < total
1824
- };
2764
+ await transaction.begin();
2765
+ const selectRequest = new sql2__default.default.Request(transaction);
2766
+ selectRequest.input("workflow_name", workflowName);
2767
+ selectRequest.input("run_id", runId);
2768
+ const existingSnapshotResult = await selectRequest.query(
2769
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2770
+ );
2771
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2772
+ await transaction.rollback();
2773
+ return void 0;
2774
+ }
2775
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2776
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2777
+ if (!snapshot || !snapshot?.context) {
2778
+ await transaction.rollback();
2779
+ throw new error.MastraError(
2780
+ {
2781
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_SNAPSHOT_NOT_FOUND",
2782
+ domain: error.ErrorDomain.STORAGE,
2783
+ category: error.ErrorCategory.SYSTEM,
2784
+ details: {
2785
+ workflowName,
2786
+ runId
2787
+ }
2788
+ },
2789
+ new Error(`Snapshot not found for runId ${runId}`)
2790
+ );
2791
+ }
2792
+ const updatedSnapshot = { ...snapshot, ...opts };
2793
+ const updateRequest = new sql2__default.default.Request(transaction);
2794
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2795
+ updateRequest.input("workflow_name", workflowName);
2796
+ updateRequest.input("run_id", runId);
2797
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2798
+ await updateRequest.query(
2799
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2800
+ );
2801
+ await transaction.commit();
2802
+ return updatedSnapshot;
1825
2803
  } catch (error$1) {
2804
+ try {
2805
+ await transaction.rollback();
2806
+ } catch {
2807
+ }
1826
2808
  throw new error.MastraError(
1827
2809
  {
1828
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
2810
+ id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_WORKFLOW_STATE_FAILED",
1829
2811
  domain: error.ErrorDomain.STORAGE,
1830
2812
  category: error.ErrorCategory.THIRD_PARTY,
1831
2813
  details: {
1832
- name: args.name ?? "",
1833
- scope: args.scope ?? ""
2814
+ workflowName,
2815
+ runId
1834
2816
  }
1835
2817
  },
1836
2818
  error$1
1837
2819
  );
1838
2820
  }
1839
2821
  }
1840
- async batchTraceInsert({ records }) {
1841
- this.logger.debug("Batch inserting traces", { count: records.length });
1842
- await this.operations.batchInsert({
1843
- tableName: storage.TABLE_TRACES,
1844
- records
1845
- });
1846
- }
1847
- };
1848
- function parseWorkflowRun(row) {
1849
- let parsedSnapshot = row.snapshot;
1850
- if (typeof parsedSnapshot === "string") {
1851
- try {
1852
- parsedSnapshot = JSON.parse(row.snapshot);
1853
- } catch (e) {
1854
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1855
- }
1856
- }
1857
- return {
1858
- workflowName: row.workflow_name,
1859
- runId: row.run_id,
1860
- snapshot: parsedSnapshot,
1861
- createdAt: row.createdAt,
1862
- updatedAt: row.updatedAt,
1863
- resourceId: row.resourceId
1864
- };
1865
- }
1866
- var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1867
- pool;
1868
- operations;
1869
- schema;
1870
- constructor({
1871
- pool,
1872
- operations,
1873
- schema
1874
- }) {
1875
- super();
1876
- this.pool = pool;
1877
- this.operations = operations;
1878
- this.schema = schema;
1879
- }
1880
- updateWorkflowResults({
1881
- // workflowName,
1882
- // runId,
1883
- // stepId,
1884
- // result,
1885
- // runtimeContext,
1886
- }) {
1887
- throw new Error("Method not implemented.");
1888
- }
1889
- updateWorkflowState({
1890
- // workflowName,
1891
- // runId,
1892
- // opts,
1893
- }) {
1894
- throw new Error("Method not implemented.");
1895
- }
1896
2822
  async persistWorkflowSnapshot({
1897
2823
  workflowName,
1898
2824
  runId,
@@ -1989,7 +2915,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
1989
2915
  if (!result.recordset || result.recordset.length === 0) {
1990
2916
  return null;
1991
2917
  }
1992
- return parseWorkflowRun(result.recordset[0]);
2918
+ return this.parseWorkflowRun(result.recordset[0]);
1993
2919
  } catch (error$1) {
1994
2920
  throw new error.MastraError(
1995
2921
  {
@@ -2005,12 +2931,12 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2005
2931
  );
2006
2932
  }
2007
2933
  }
2008
- async getWorkflowRuns({
2934
+ async listWorkflowRuns({
2009
2935
  workflowName,
2010
2936
  fromDate,
2011
2937
  toDate,
2012
- limit,
2013
- offset,
2938
+ page,
2939
+ perPage,
2014
2940
  resourceId
2015
2941
  } = {}) {
2016
2942
  try {
@@ -2026,7 +2952,7 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2026
2952
  conditions.push(`[resourceId] = @resourceId`);
2027
2953
  paramMap["resourceId"] = resourceId;
2028
2954
  } else {
2029
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2955
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
2030
2956
  }
2031
2957
  }
2032
2958
  if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
@@ -2048,24 +2974,27 @@ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2048
2974
  request.input(key, value);
2049
2975
  }
2050
2976
  });
2051
- if (limit !== void 0 && offset !== void 0) {
2977
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2978
+ if (usePagination) {
2052
2979
  const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2053
2980
  const countResult = await request.query(countQuery);
2054
2981
  total = Number(countResult.recordset[0]?.count || 0);
2055
2982
  }
2056
2983
  let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2057
- if (limit !== void 0 && offset !== void 0) {
2058
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
2059
- request.input("limit", limit);
2984
+ if (usePagination) {
2985
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
2986
+ const offset = page * normalizedPerPage;
2987
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2988
+ request.input("perPage", normalizedPerPage);
2060
2989
  request.input("offset", offset);
2061
2990
  }
2062
2991
  const result = await request.query(query);
2063
- const runs = (result.recordset || []).map((row) => parseWorkflowRun(row));
2992
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
2064
2993
  return { runs, total: total || runs.length };
2065
2994
  } catch (error$1) {
2066
2995
  throw new error.MastraError(
2067
2996
  {
2068
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
2997
+ id: "MASTRA_STORAGE_MSSQL_STORE_LIST_WORKFLOW_RUNS_FAILED",
2069
2998
  domain: error.ErrorDomain.STORAGE,
2070
2999
  category: error.ErrorCategory.THIRD_PARTY,
2071
3000
  details: {
@@ -2108,19 +3037,17 @@ var MSSQLStore = class extends storage.MastraStorage {
2108
3037
  port: config.port,
2109
3038
  options: config.options || { encrypt: true, trustServerCertificate: true }
2110
3039
  });
2111
- const legacyEvals = new LegacyEvalsMSSQL({ pool: this.pool, schema: this.schema });
2112
3040
  const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
2113
3041
  const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
2114
- const traces = new TracesMSSQL({ pool: this.pool, operations, schema: this.schema });
2115
3042
  const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
2116
3043
  const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
3044
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
2117
3045
  this.stores = {
2118
3046
  operations,
2119
3047
  scores,
2120
- traces,
2121
3048
  workflows,
2122
- legacyEvals,
2123
- memory
3049
+ memory,
3050
+ observability
2124
3051
  };
2125
3052
  } catch (e) {
2126
3053
  throw new error.MastraError(
@@ -2140,6 +3067,11 @@ var MSSQLStore = class extends storage.MastraStorage {
2140
3067
  try {
2141
3068
  await this.isConnected;
2142
3069
  await super.init();
3070
+ try {
3071
+ await this.stores.operations.createAutomaticIndexes();
3072
+ } catch (indexError) {
3073
+ this.logger?.warn?.("Failed to create indexes:", indexError);
3074
+ }
2143
3075
  } catch (error$1) {
2144
3076
  this.isConnected = null;
2145
3077
  throw new error.MastraError(
@@ -2166,28 +3098,12 @@ var MSSQLStore = class extends storage.MastraStorage {
2166
3098
  resourceWorkingMemory: true,
2167
3099
  hasColumn: true,
2168
3100
  createTable: true,
2169
- deleteMessages: true
3101
+ deleteMessages: true,
3102
+ listScoresBySpan: true,
3103
+ aiTracing: true,
3104
+ indexManagement: true
2170
3105
  };
2171
3106
  }
2172
- /** @deprecated use getEvals instead */
2173
- async getEvalsByAgentName(agentName, type) {
2174
- return this.stores.legacyEvals.getEvalsByAgentName(agentName, type);
2175
- }
2176
- async getEvals(options = {}) {
2177
- return this.stores.legacyEvals.getEvals(options);
2178
- }
2179
- /**
2180
- * @deprecated use getTracesPaginated instead
2181
- */
2182
- async getTraces(args) {
2183
- return this.stores.traces.getTraces(args);
2184
- }
2185
- async getTracesPaginated(args) {
2186
- return this.stores.traces.getTracesPaginated(args);
2187
- }
2188
- async batchTraceInsert({ records }) {
2189
- return this.stores.traces.batchTraceInsert({ records });
2190
- }
2191
3107
  async createTable({
2192
3108
  tableName,
2193
3109
  schema
@@ -2222,15 +3138,6 @@ var MSSQLStore = class extends storage.MastraStorage {
2222
3138
  async getThreadById({ threadId }) {
2223
3139
  return this.stores.memory.getThreadById({ threadId });
2224
3140
  }
2225
- /**
2226
- * @deprecated use getThreadsByResourceIdPaginated instead
2227
- */
2228
- async getThreadsByResourceId(args) {
2229
- return this.stores.memory.getThreadsByResourceId(args);
2230
- }
2231
- async getThreadsByResourceIdPaginated(args) {
2232
- return this.stores.memory.getThreadsByResourceIdPaginated(args);
2233
- }
2234
3141
  async saveThread({ thread }) {
2235
3142
  return this.stores.memory.saveThread({ thread });
2236
3143
  }
@@ -2244,17 +3151,14 @@ var MSSQLStore = class extends storage.MastraStorage {
2244
3151
  async deleteThread({ threadId }) {
2245
3152
  return this.stores.memory.deleteThread({ threadId });
2246
3153
  }
3154
+ /**
3155
+ * @deprecated use listMessages instead
3156
+ */
2247
3157
  async getMessages(args) {
2248
3158
  return this.stores.memory.getMessages(args);
2249
3159
  }
2250
- async getMessagesById({
2251
- messageIds,
2252
- format
2253
- }) {
2254
- return this.stores.memory.getMessagesById({ messageIds, format });
2255
- }
2256
- async getMessagesPaginated(args) {
2257
- return this.stores.memory.getMessagesPaginated(args);
3160
+ async listMessagesById({ messageIds }) {
3161
+ return this.stores.memory.listMessagesById({ messageIds });
2258
3162
  }
2259
3163
  async saveMessages(args) {
2260
3164
  return this.stores.memory.saveMessages(args);
@@ -2288,9 +3192,9 @@ var MSSQLStore = class extends storage.MastraStorage {
2288
3192
  runId,
2289
3193
  stepId,
2290
3194
  result,
2291
- runtimeContext
3195
+ requestContext
2292
3196
  }) {
2293
- return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, runtimeContext });
3197
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
2294
3198
  }
2295
3199
  async updateWorkflowState({
2296
3200
  workflowName,
@@ -2313,15 +3217,15 @@ var MSSQLStore = class extends storage.MastraStorage {
2313
3217
  }) {
2314
3218
  return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
2315
3219
  }
2316
- async getWorkflowRuns({
3220
+ async listWorkflowRuns({
2317
3221
  workflowName,
2318
3222
  fromDate,
2319
3223
  toDate,
2320
- limit,
2321
- offset,
3224
+ perPage,
3225
+ page,
2322
3226
  resourceId
2323
3227
  } = {}) {
2324
- return this.stores.workflows.getWorkflowRuns({ workflowName, fromDate, toDate, limit, offset, resourceId });
3228
+ return this.stores.workflows.listWorkflowRuns({ workflowName, fromDate, toDate, perPage, page, resourceId });
2325
3229
  }
2326
3230
  async getWorkflowRunById({
2327
3231
  runId,
@@ -2332,38 +3236,108 @@ var MSSQLStore = class extends storage.MastraStorage {
2332
3236
  async close() {
2333
3237
  await this.pool.close();
2334
3238
  }
3239
+ /**
3240
+ * Index Management
3241
+ */
3242
+ async createIndex(options) {
3243
+ return this.stores.operations.createIndex(options);
3244
+ }
3245
+ async listIndexes(tableName) {
3246
+ return this.stores.operations.listIndexes(tableName);
3247
+ }
3248
+ async describeIndex(indexName) {
3249
+ return this.stores.operations.describeIndex(indexName);
3250
+ }
3251
+ async dropIndex(indexName) {
3252
+ return this.stores.operations.dropIndex(indexName);
3253
+ }
3254
+ /**
3255
+ * AI Tracing / Observability
3256
+ */
3257
+ getObservabilityStore() {
3258
+ if (!this.stores.observability) {
3259
+ throw new error.MastraError({
3260
+ id: "MSSQL_STORE_OBSERVABILITY_NOT_INITIALIZED",
3261
+ domain: error.ErrorDomain.STORAGE,
3262
+ category: error.ErrorCategory.SYSTEM,
3263
+ text: "Observability storage is not initialized"
3264
+ });
3265
+ }
3266
+ return this.stores.observability;
3267
+ }
3268
+ async createAISpan(span) {
3269
+ return this.getObservabilityStore().createAISpan(span);
3270
+ }
3271
+ async updateAISpan({
3272
+ spanId,
3273
+ traceId,
3274
+ updates
3275
+ }) {
3276
+ return this.getObservabilityStore().updateAISpan({ spanId, traceId, updates });
3277
+ }
3278
+ async getAITrace(traceId) {
3279
+ return this.getObservabilityStore().getAITrace(traceId);
3280
+ }
3281
+ async getAITracesPaginated(args) {
3282
+ return this.getObservabilityStore().getAITracesPaginated(args);
3283
+ }
3284
+ async batchCreateAISpans(args) {
3285
+ return this.getObservabilityStore().batchCreateAISpans(args);
3286
+ }
3287
+ async batchUpdateAISpans(args) {
3288
+ return this.getObservabilityStore().batchUpdateAISpans(args);
3289
+ }
3290
+ async batchDeleteAITraces(args) {
3291
+ return this.getObservabilityStore().batchDeleteAITraces(args);
3292
+ }
2335
3293
  /**
2336
3294
  * Scorers
2337
3295
  */
2338
3296
  async getScoreById({ id: _id }) {
2339
3297
  return this.stores.scores.getScoreById({ id: _id });
2340
3298
  }
2341
- async getScoresByScorerId({
3299
+ async listScoresByScorerId({
2342
3300
  scorerId: _scorerId,
2343
- pagination: _pagination
3301
+ pagination: _pagination,
3302
+ entityId: _entityId,
3303
+ entityType: _entityType,
3304
+ source: _source
2344
3305
  }) {
2345
- return this.stores.scores.getScoresByScorerId({ scorerId: _scorerId, pagination: _pagination });
3306
+ return this.stores.scores.listScoresByScorerId({
3307
+ scorerId: _scorerId,
3308
+ pagination: _pagination,
3309
+ entityId: _entityId,
3310
+ entityType: _entityType,
3311
+ source: _source
3312
+ });
2346
3313
  }
2347
3314
  async saveScore(_score) {
2348
3315
  return this.stores.scores.saveScore(_score);
2349
3316
  }
2350
- async getScoresByRunId({
3317
+ async listScoresByRunId({
2351
3318
  runId: _runId,
2352
3319
  pagination: _pagination
2353
3320
  }) {
2354
- return this.stores.scores.getScoresByRunId({ runId: _runId, pagination: _pagination });
3321
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
2355
3322
  }
2356
- async getScoresByEntityId({
3323
+ async listScoresByEntityId({
2357
3324
  entityId: _entityId,
2358
3325
  entityType: _entityType,
2359
3326
  pagination: _pagination
2360
3327
  }) {
2361
- return this.stores.scores.getScoresByEntityId({
3328
+ return this.stores.scores.listScoresByEntityId({
2362
3329
  entityId: _entityId,
2363
3330
  entityType: _entityType,
2364
3331
  pagination: _pagination
2365
3332
  });
2366
3333
  }
3334
+ async listScoresBySpan({
3335
+ traceId,
3336
+ spanId,
3337
+ pagination: _pagination
3338
+ }) {
3339
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
3340
+ }
2367
3341
  };
2368
3342
 
2369
3343
  exports.MSSQLStore = MSSQLStore;