@mastra/mssql 0.0.0-scorers-api-v2-20250801171841 → 0.0.0-scorers-logs-20251208093427

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,1221 +1,2828 @@
1
1
  'use strict';
2
2
 
3
- var agent = require('@mastra/core/agent');
4
3
  var error = require('@mastra/core/error');
5
4
  var storage = require('@mastra/core/storage');
5
+ var sql2 = require('mssql');
6
+ var agent = require('@mastra/core/agent');
6
7
  var utils = require('@mastra/core/utils');
7
- var sql = require('mssql');
8
+ var crypto = require('crypto');
9
+ var evals = require('@mastra/core/evals');
8
10
 
9
11
  function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
10
12
 
11
- var sql__default = /*#__PURE__*/_interopDefault(sql);
13
+ var sql2__default = /*#__PURE__*/_interopDefault(sql2);
12
14
 
13
15
  // src/storage/index.ts
14
- var MSSQLStore = class extends storage.MastraStorage {
15
- pool;
16
- schema;
17
- setupSchemaPromise = null;
18
- schemaSetupComplete = void 0;
19
- isConnected = null;
20
- constructor(config) {
21
- super({ name: "MSSQLStore" });
22
- try {
23
- if ("connectionString" in config) {
24
- if (!config.connectionString || typeof config.connectionString !== "string" || config.connectionString.trim() === "") {
25
- throw new Error("MSSQLStore: connectionString must be provided and cannot be empty.");
16
+ function getSchemaName(schema) {
17
+ return schema ? `[${utils.parseSqlIdentifier(schema, "schema name")}]` : void 0;
18
+ }
19
+ function getTableName({ indexName, schemaName }) {
20
+ const parsedIndexName = utils.parseSqlIdentifier(indexName, "index name");
21
+ const quotedIndexName = `[${parsedIndexName}]`;
22
+ const quotedSchemaName = schemaName;
23
+ return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
24
+ }
25
+ function buildDateRangeFilter(dateRange, fieldName) {
26
+ const filters = {};
27
+ if (dateRange?.start) {
28
+ filters[`${fieldName}_gte`] = dateRange.start;
29
+ }
30
+ if (dateRange?.end) {
31
+ filters[`${fieldName}_lte`] = dateRange.end;
32
+ }
33
+ return filters;
34
+ }
35
+ function isInOperator(value) {
36
+ return typeof value === "object" && value !== null && "$in" in value && Array.isArray(value.$in);
37
+ }
38
+ function prepareWhereClause(filters, _schema) {
39
+ const conditions = [];
40
+ const params = {};
41
+ let paramIndex = 1;
42
+ Object.entries(filters).forEach(([key, value]) => {
43
+ if (value === void 0) return;
44
+ if (key.endsWith("_gte")) {
45
+ const paramName = `p${paramIndex++}`;
46
+ const fieldName = key.slice(0, -4);
47
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] >= @${paramName}`);
48
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
49
+ } else if (key.endsWith("_lte")) {
50
+ const paramName = `p${paramIndex++}`;
51
+ const fieldName = key.slice(0, -4);
52
+ conditions.push(`[${utils.parseSqlIdentifier(fieldName, "field name")}] <= @${paramName}`);
53
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
54
+ } else if (value === null) {
55
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IS NULL`);
56
+ } else if (isInOperator(value)) {
57
+ const inValues = value.$in;
58
+ if (inValues.length === 0) {
59
+ conditions.push("1 = 0");
60
+ } else if (inValues.length === 1) {
61
+ const paramName = `p${paramIndex++}`;
62
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
63
+ params[paramName] = inValues[0] instanceof Date ? inValues[0].toISOString() : inValues[0];
64
+ } else {
65
+ const inParamNames = [];
66
+ for (const item of inValues) {
67
+ const paramName = `p${paramIndex++}`;
68
+ inParamNames.push(`@${paramName}`);
69
+ params[paramName] = item instanceof Date ? item.toISOString() : item;
26
70
  }
71
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IN (${inParamNames.join(", ")})`);
72
+ }
73
+ } else if (Array.isArray(value)) {
74
+ if (value.length === 0) {
75
+ conditions.push("1 = 0");
76
+ } else if (value.length === 1) {
77
+ const paramName = `p${paramIndex++}`;
78
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
79
+ params[paramName] = value[0] instanceof Date ? value[0].toISOString() : value[0];
27
80
  } else {
28
- const required = ["server", "database", "user", "password"];
29
- for (const key of required) {
30
- if (!(key in config) || typeof config[key] !== "string" || config[key].trim() === "") {
31
- throw new Error(`MSSQLStore: ${key} must be provided and cannot be empty.`);
32
- }
81
+ const inParamNames = [];
82
+ for (const item of value) {
83
+ const paramName = `p${paramIndex++}`;
84
+ inParamNames.push(`@${paramName}`);
85
+ params[paramName] = item instanceof Date ? item.toISOString() : item;
33
86
  }
87
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] IN (${inParamNames.join(", ")})`);
34
88
  }
35
- this.schema = config.schemaName;
36
- this.pool = "connectionString" in config ? new sql__default.default.ConnectionPool(config.connectionString) : new sql__default.default.ConnectionPool({
37
- server: config.server,
38
- database: config.database,
39
- user: config.user,
40
- password: config.password,
41
- port: config.port,
42
- options: config.options || { encrypt: true, trustServerCertificate: true }
43
- });
44
- } catch (e) {
45
- throw new error.MastraError(
46
- {
47
- id: "MASTRA_STORAGE_MSSQL_STORE_INITIALIZATION_FAILED",
48
- domain: error.ErrorDomain.STORAGE,
49
- category: error.ErrorCategory.USER
50
- },
51
- e
52
- );
53
- }
54
- }
55
- async init() {
56
- if (this.isConnected === null) {
57
- this.isConnected = this._performInitializationAndStore();
58
- }
59
- try {
60
- await this.isConnected;
61
- await super.init();
62
- } catch (error$1) {
63
- this.isConnected = null;
64
- throw new error.MastraError(
65
- {
66
- id: "MASTRA_STORAGE_MSSQL_STORE_INIT_FAILED",
67
- domain: error.ErrorDomain.STORAGE,
68
- category: error.ErrorCategory.THIRD_PARTY
69
- },
70
- error$1
71
- );
72
- }
73
- }
74
- async _performInitializationAndStore() {
75
- try {
76
- await this.pool.connect();
77
- return true;
78
- } catch (err) {
79
- throw err;
89
+ } else {
90
+ const paramName = `p${paramIndex++}`;
91
+ conditions.push(`[${utils.parseSqlIdentifier(key, "field name")}] = @${paramName}`);
92
+ params[paramName] = value instanceof Date ? value.toISOString() : value;
80
93
  }
81
- }
82
- get supports() {
83
- return {
84
- selectByIncludeResourceScope: true,
85
- resourceWorkingMemory: true,
86
- hasColumn: true,
87
- createTable: true,
88
- deleteMessages: false
89
- };
90
- }
91
- getTableName(indexName) {
92
- const parsedIndexName = utils.parseSqlIdentifier(indexName, "index name");
93
- const quotedIndexName = `[${parsedIndexName}]`;
94
- const quotedSchemaName = this.getSchemaName();
95
- return quotedSchemaName ? `${quotedSchemaName}.${quotedIndexName}` : quotedIndexName;
96
- }
97
- getSchemaName() {
98
- return this.schema ? `[${utils.parseSqlIdentifier(this.schema, "schema name")}]` : void 0;
99
- }
100
- transformEvalRow(row) {
101
- let testInfoValue = null, resultValue = null;
102
- if (row.test_info) {
94
+ });
95
+ return {
96
+ sql: conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "",
97
+ params
98
+ };
99
+ }
100
+ function transformFromSqlRow({
101
+ tableName,
102
+ sqlRow
103
+ }) {
104
+ const schema = storage.TABLE_SCHEMAS[tableName];
105
+ const result = {};
106
+ Object.entries(sqlRow).forEach(([key, value]) => {
107
+ const columnSchema = schema?.[key];
108
+ if (columnSchema?.type === "jsonb" && typeof value === "string") {
103
109
  try {
104
- testInfoValue = typeof row.test_info === "string" ? JSON.parse(row.test_info) : row.test_info;
110
+ result[key] = JSON.parse(value);
105
111
  } catch {
112
+ result[key] = value;
106
113
  }
114
+ } else if (columnSchema?.type === "timestamp" && value && typeof value === "string") {
115
+ result[key] = new Date(value);
116
+ } else if (columnSchema?.type === "timestamp" && value instanceof Date) {
117
+ result[key] = value;
118
+ } else if (columnSchema?.type === "boolean") {
119
+ result[key] = Boolean(value);
120
+ } else {
121
+ result[key] = value;
107
122
  }
108
- if (row.test_info) {
109
- try {
110
- resultValue = typeof row.result === "string" ? JSON.parse(row.result) : row.result;
111
- } catch {
123
+ });
124
+ return result;
125
+ }
126
+
127
+ // src/storage/domains/memory/index.ts
128
+ var MemoryMSSQL = class extends storage.MemoryStorage {
129
+ pool;
130
+ schema;
131
+ operations;
132
+ _parseAndFormatMessages(messages, format) {
133
+ const messagesWithParsedContent = messages.map((message) => {
134
+ if (typeof message.content === "string") {
135
+ try {
136
+ return { ...message, content: JSON.parse(message.content) };
137
+ } catch {
138
+ return message;
139
+ }
112
140
  }
113
- }
114
- return {
115
- agentName: row.agent_name,
116
- input: row.input,
117
- output: row.output,
118
- result: resultValue,
119
- metricName: row.metric_name,
120
- instructions: row.instructions,
121
- testInfo: testInfoValue,
122
- globalRunId: row.global_run_id,
123
- runId: row.run_id,
124
- createdAt: row.created_at
125
- };
141
+ return message;
142
+ });
143
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
144
+ const list = new agent.MessageList().add(cleanMessages, "memory");
145
+ return format === "v2" ? list.get.all.db() : list.get.all.v1();
126
146
  }
127
- /** @deprecated use getEvals instead */
128
- async getEvalsByAgentName(agentName, type) {
129
- try {
130
- let query = `SELECT * FROM ${this.getTableName(storage.TABLE_EVALS)} WHERE agent_name = @p1`;
131
- if (type === "test") {
132
- query += " AND test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL";
133
- } else if (type === "live") {
134
- query += " AND (test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)";
135
- }
136
- query += " ORDER BY created_at DESC";
137
- const request = this.pool.request();
138
- request.input("p1", agentName);
139
- const result = await request.query(query);
140
- const rows = result.recordset;
141
- return typeof this.transformEvalRow === "function" ? rows?.map((row) => this.transformEvalRow(row)) ?? [] : rows ?? [];
142
- } catch (error) {
143
- if (error && error.number === 208 && error.message && error.message.includes("Invalid object name")) {
144
- return [];
145
- }
146
- console.error("Failed to get evals for the specified agent: " + error?.message);
147
- throw error;
148
- }
147
+ constructor({
148
+ pool,
149
+ schema,
150
+ operations
151
+ }) {
152
+ super();
153
+ this.pool = pool;
154
+ this.schema = schema;
155
+ this.operations = operations;
149
156
  }
150
- async batchInsert({ tableName, records }) {
151
- const transaction = this.pool.transaction();
157
+ async getThreadById({ threadId }) {
152
158
  try {
153
- await transaction.begin();
154
- for (const record of records) {
155
- await this.insert({ tableName, record });
159
+ const sql5 = `SELECT
160
+ id,
161
+ [resourceId],
162
+ title,
163
+ metadata,
164
+ [createdAt],
165
+ [updatedAt]
166
+ FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })}
167
+ WHERE id = @threadId`;
168
+ const request = this.pool.request();
169
+ request.input("threadId", threadId);
170
+ const resultSet = await request.query(sql5);
171
+ const thread = resultSet.recordset[0] || null;
172
+ if (!thread) {
173
+ return null;
156
174
  }
157
- await transaction.commit();
175
+ return {
176
+ ...thread,
177
+ metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
178
+ createdAt: thread.createdAt,
179
+ updatedAt: thread.updatedAt
180
+ };
158
181
  } catch (error$1) {
159
- await transaction.rollback();
160
182
  throw new error.MastraError(
161
183
  {
162
- id: "MASTRA_STORAGE_MSSQL_STORE_BATCH_INSERT_FAILED",
184
+ id: storage.createStorageErrorId("MSSQL", "GET_THREAD_BY_ID", "FAILED"),
163
185
  domain: error.ErrorDomain.STORAGE,
164
186
  category: error.ErrorCategory.THIRD_PARTY,
165
187
  details: {
166
- tableName,
167
- numberOfRecords: records.length
188
+ threadId
168
189
  }
169
190
  },
170
191
  error$1
171
192
  );
172
193
  }
173
194
  }
174
- /** @deprecated use getTracesPaginated instead*/
175
- async getTraces(args) {
176
- if (args.fromDate || args.toDate) {
177
- args.dateRange = {
178
- start: args.fromDate,
179
- end: args.toDate
180
- };
181
- }
182
- const result = await this.getTracesPaginated(args);
183
- return result.traces;
184
- }
185
- async getTracesPaginated(args) {
186
- const { name, scope, page = 0, perPage: perPageInput, attributes, filters, dateRange } = args;
187
- const fromDate = dateRange?.start;
188
- const toDate = dateRange?.end;
189
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
190
- const currentOffset = page * perPage;
191
- const paramMap = {};
192
- const conditions = [];
193
- let paramIndex = 1;
194
- if (name) {
195
- const paramName = `p${paramIndex++}`;
196
- conditions.push(`[name] LIKE @${paramName}`);
197
- paramMap[paramName] = `${name}%`;
198
- }
199
- if (scope) {
200
- const paramName = `p${paramIndex++}`;
201
- conditions.push(`[scope] = @${paramName}`);
202
- paramMap[paramName] = scope;
203
- }
204
- if (attributes) {
205
- Object.entries(attributes).forEach(([key, value]) => {
206
- const parsedKey = utils.parseFieldKey(key);
207
- const paramName = `p${paramIndex++}`;
208
- conditions.push(`JSON_VALUE([attributes], '$.${parsedKey}') = @${paramName}`);
209
- paramMap[paramName] = value;
210
- });
211
- }
212
- if (filters) {
213
- Object.entries(filters).forEach(([key, value]) => {
214
- const parsedKey = utils.parseFieldKey(key);
215
- const paramName = `p${paramIndex++}`;
216
- conditions.push(`[${parsedKey}] = @${paramName}`);
217
- paramMap[paramName] = value;
195
+ async listThreadsByResourceId(args) {
196
+ const { resourceId, page = 0, perPage: perPageInput, orderBy } = args;
197
+ if (page < 0) {
198
+ throw new error.MastraError({
199
+ id: storage.createStorageErrorId("MSSQL", "LIST_THREADS_BY_RESOURCE_ID", "INVALID_PAGE"),
200
+ domain: error.ErrorDomain.STORAGE,
201
+ category: error.ErrorCategory.USER,
202
+ text: "Page number must be non-negative",
203
+ details: {
204
+ resourceId,
205
+ page
206
+ }
218
207
  });
219
208
  }
220
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
221
- const paramName = `p${paramIndex++}`;
222
- conditions.push(`[createdAt] >= @${paramName}`);
223
- paramMap[paramName] = fromDate.toISOString();
224
- }
225
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
226
- const paramName = `p${paramIndex++}`;
227
- conditions.push(`[createdAt] <= @${paramName}`);
228
- paramMap[paramName] = toDate.toISOString();
229
- }
230
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
231
- const countQuery = `SELECT COUNT(*) as total FROM ${this.getTableName(storage.TABLE_TRACES)} ${whereClause}`;
232
- let total = 0;
209
+ const perPage = storage.normalizePerPage(perPageInput, 100);
210
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
211
+ const { field, direction } = this.parseOrderBy(orderBy);
233
212
  try {
213
+ const baseQuery = `FROM ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} WHERE [resourceId] = @resourceId`;
214
+ const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
234
215
  const countRequest = this.pool.request();
235
- Object.entries(paramMap).forEach(([key, value]) => {
236
- if (value instanceof Date) {
237
- countRequest.input(key, sql__default.default.DateTime, value);
238
- } else {
239
- countRequest.input(key, value);
240
- }
241
- });
216
+ countRequest.input("resourceId", resourceId);
242
217
  const countResult = await countRequest.query(countQuery);
243
- total = parseInt(countResult.recordset[0].total, 10);
218
+ const total = parseInt(countResult.recordset[0]?.count ?? "0", 10);
219
+ if (total === 0) {
220
+ return {
221
+ threads: [],
222
+ total: 0,
223
+ page,
224
+ perPage: perPageForResponse,
225
+ hasMore: false
226
+ };
227
+ }
228
+ const orderByField = field === "createdAt" ? "[createdAt]" : "[updatedAt]";
229
+ const dir = (direction || "DESC").toUpperCase() === "ASC" ? "ASC" : "DESC";
230
+ const limitValue = perPageInput === false ? total : perPage;
231
+ const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY ${orderByField} ${dir} OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
232
+ const dataRequest = this.pool.request();
233
+ dataRequest.input("resourceId", resourceId);
234
+ dataRequest.input("offset", offset);
235
+ if (limitValue > 2147483647) {
236
+ dataRequest.input("perPage", sql2__default.default.BigInt, limitValue);
237
+ } else {
238
+ dataRequest.input("perPage", limitValue);
239
+ }
240
+ const rowsResult = await dataRequest.query(dataQuery);
241
+ const rows = rowsResult.recordset || [];
242
+ const threads = rows.map((thread) => ({
243
+ ...thread,
244
+ metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
245
+ createdAt: thread.createdAt,
246
+ updatedAt: thread.updatedAt
247
+ }));
248
+ return {
249
+ threads,
250
+ total,
251
+ page,
252
+ perPage: perPageForResponse,
253
+ hasMore: perPageInput === false ? false : offset + perPage < total
254
+ };
244
255
  } catch (error$1) {
245
- throw new error.MastraError(
256
+ const mastraError = new error.MastraError(
246
257
  {
247
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TOTAL_COUNT",
258
+ id: storage.createStorageErrorId("MSSQL", "LIST_THREADS_BY_RESOURCE_ID", "FAILED"),
248
259
  domain: error.ErrorDomain.STORAGE,
249
260
  category: error.ErrorCategory.THIRD_PARTY,
250
261
  details: {
251
- name: args.name ?? "",
252
- scope: args.scope ?? ""
262
+ resourceId,
263
+ page
253
264
  }
254
265
  },
255
266
  error$1
256
267
  );
257
- }
258
- if (total === 0) {
268
+ this.logger?.error?.(mastraError.toString());
269
+ this.logger?.trackException?.(mastraError);
259
270
  return {
260
- traces: [],
271
+ threads: [],
261
272
  total: 0,
262
273
  page,
263
- perPage,
274
+ perPage: perPageForResponse,
264
275
  hasMore: false
265
276
  };
266
277
  }
267
- const dataQuery = `SELECT * FROM ${this.getTableName(storage.TABLE_TRACES)} ${whereClause} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
268
- const dataRequest = this.pool.request();
269
- Object.entries(paramMap).forEach(([key, value]) => {
270
- if (value instanceof Date) {
271
- dataRequest.input(key, sql__default.default.DateTime, value);
278
+ }
279
+ async saveThread({ thread }) {
280
+ try {
281
+ const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
282
+ const mergeSql = `MERGE INTO ${table} WITH (HOLDLOCK) AS target
283
+ USING (SELECT @id AS id) AS source
284
+ ON (target.id = source.id)
285
+ WHEN MATCHED THEN
286
+ UPDATE SET
287
+ [resourceId] = @resourceId,
288
+ title = @title,
289
+ metadata = @metadata,
290
+ [updatedAt] = @updatedAt
291
+ WHEN NOT MATCHED THEN
292
+ INSERT (id, [resourceId], title, metadata, [createdAt], [updatedAt])
293
+ VALUES (@id, @resourceId, @title, @metadata, @createdAt, @updatedAt);`;
294
+ const req = this.pool.request();
295
+ req.input("id", thread.id);
296
+ req.input("resourceId", thread.resourceId);
297
+ req.input("title", thread.title);
298
+ const metadata = thread.metadata ? JSON.stringify(thread.metadata) : null;
299
+ if (metadata === null) {
300
+ req.input("metadata", sql2__default.default.NVarChar, null);
272
301
  } else {
273
- dataRequest.input(key, value);
302
+ req.input("metadata", metadata);
274
303
  }
275
- });
276
- dataRequest.input("offset", currentOffset);
277
- dataRequest.input("limit", perPage);
278
- try {
279
- const rowsResult = await dataRequest.query(dataQuery);
280
- const rows = rowsResult.recordset;
281
- const traces = rows.map((row) => ({
282
- id: row.id,
283
- parentSpanId: row.parentSpanId,
284
- traceId: row.traceId,
285
- name: row.name,
286
- scope: row.scope,
287
- kind: row.kind,
288
- status: JSON.parse(row.status),
289
- events: JSON.parse(row.events),
290
- links: JSON.parse(row.links),
291
- attributes: JSON.parse(row.attributes),
292
- startTime: row.startTime,
293
- endTime: row.endTime,
294
- other: row.other,
295
- createdAt: row.createdAt
296
- }));
297
- return {
298
- traces,
299
- total,
300
- page,
301
- perPage,
302
- hasMore: currentOffset + traces.length < total
303
- };
304
+ req.input("createdAt", sql2__default.default.DateTime2, thread.createdAt);
305
+ req.input("updatedAt", sql2__default.default.DateTime2, thread.updatedAt);
306
+ await req.query(mergeSql);
307
+ return thread;
304
308
  } catch (error$1) {
305
309
  throw new error.MastraError(
306
310
  {
307
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_TRACES_PAGINATED_FAILED_TO_RETRIEVE_TRACES",
311
+ id: storage.createStorageErrorId("MSSQL", "SAVE_THREAD", "FAILED"),
308
312
  domain: error.ErrorDomain.STORAGE,
309
313
  category: error.ErrorCategory.THIRD_PARTY,
310
314
  details: {
311
- name: args.name ?? "",
312
- scope: args.scope ?? ""
315
+ threadId: thread.id
313
316
  }
314
317
  },
315
318
  error$1
316
319
  );
317
320
  }
318
321
  }
319
- async setupSchema() {
320
- if (!this.schema || this.schemaSetupComplete) {
321
- return;
322
+ /**
323
+ * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
324
+ */
325
+ async updateThread({
326
+ id,
327
+ title,
328
+ metadata
329
+ }) {
330
+ const existingThread = await this.getThreadById({ threadId: id });
331
+ if (!existingThread) {
332
+ throw new error.MastraError({
333
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_THREAD", "NOT_FOUND"),
334
+ domain: error.ErrorDomain.STORAGE,
335
+ category: error.ErrorCategory.USER,
336
+ text: `Thread ${id} not found`,
337
+ details: {
338
+ threadId: id,
339
+ title
340
+ }
341
+ });
322
342
  }
323
- if (!this.setupSchemaPromise) {
324
- this.setupSchemaPromise = (async () => {
325
- try {
326
- const checkRequest = this.pool.request();
327
- checkRequest.input("schemaName", this.schema);
328
- const checkResult = await checkRequest.query(`
329
- SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
330
- `);
331
- const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
332
- if (!schemaExists) {
333
- try {
334
- await this.pool.request().query(`CREATE SCHEMA [${this.schema}]`);
335
- this.logger?.info?.(`Schema "${this.schema}" created successfully`);
336
- } catch (error) {
337
- this.logger?.error?.(`Failed to create schema "${this.schema}"`, { error });
338
- throw new Error(
339
- `Unable to create schema "${this.schema}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
340
- );
341
- }
342
- }
343
- this.schemaSetupComplete = true;
344
- this.logger?.debug?.(`Schema "${this.schema}" is ready for use`);
345
- } catch (error) {
346
- this.schemaSetupComplete = void 0;
347
- this.setupSchemaPromise = null;
348
- throw error;
349
- } finally {
350
- this.setupSchemaPromise = null;
351
- }
352
- })();
353
- }
354
- await this.setupSchemaPromise;
355
- }
356
- getSqlType(type, isPrimaryKey = false) {
357
- switch (type) {
358
- case "text":
359
- return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(MAX)";
360
- case "timestamp":
361
- return "DATETIME2(7)";
362
- case "uuid":
363
- return "UNIQUEIDENTIFIER";
364
- case "jsonb":
365
- return "NVARCHAR(MAX)";
366
- case "integer":
367
- return "INT";
368
- case "bigint":
369
- return "BIGINT";
370
- default:
343
+ const mergedMetadata = {
344
+ ...existingThread.metadata,
345
+ ...metadata
346
+ };
347
+ try {
348
+ const table = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
349
+ const sql5 = `UPDATE ${table}
350
+ SET title = @title,
351
+ metadata = @metadata,
352
+ [updatedAt] = @updatedAt
353
+ OUTPUT INSERTED.*
354
+ WHERE id = @id`;
355
+ const req = this.pool.request();
356
+ req.input("id", id);
357
+ req.input("title", title);
358
+ req.input("metadata", JSON.stringify(mergedMetadata));
359
+ req.input("updatedAt", /* @__PURE__ */ new Date());
360
+ const result = await req.query(sql5);
361
+ let thread = result.recordset && result.recordset[0];
362
+ if (thread && "seq_id" in thread) {
363
+ const { seq_id, ...rest } = thread;
364
+ thread = rest;
365
+ }
366
+ if (!thread) {
371
367
  throw new error.MastraError({
372
- id: "MASTRA_STORAGE_MSSQL_STORE_TYPE_NOT_SUPPORTED",
368
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_THREAD", "NOT_FOUND"),
373
369
  domain: error.ErrorDomain.STORAGE,
374
- category: error.ErrorCategory.THIRD_PARTY
370
+ category: error.ErrorCategory.USER,
371
+ text: `Thread ${id} not found after update`,
372
+ details: {
373
+ threadId: id,
374
+ title
375
+ }
375
376
  });
376
- }
377
- }
378
- async createTable({
379
- tableName,
380
- schema
381
- }) {
382
- try {
383
- const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
384
- const columns = Object.entries(schema).map(([name, def]) => {
385
- const parsedName = utils.parseSqlIdentifier(name, "column name");
386
- const constraints = [];
387
- if (def.primaryKey) constraints.push("PRIMARY KEY");
388
- if (!def.nullable) constraints.push("NOT NULL");
389
- const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
390
- return `[${parsedName}] ${this.getSqlType(def.type, isIndexed)} ${constraints.join(" ")}`.trim();
391
- }).join(",\n");
392
- if (this.schema) {
393
- await this.setupSchema();
394
- }
395
- const checkTableRequest = this.pool.request();
396
- checkTableRequest.input("tableName", this.getTableName(tableName).replace(/[[\]]/g, "").split(".").pop());
397
- const checkTableSql = `SELECT 1 AS found FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName`;
398
- checkTableRequest.input("schema", this.schema || "dbo");
399
- const checkTableResult = await checkTableRequest.query(checkTableSql);
400
- const tableExists = Array.isArray(checkTableResult.recordset) && checkTableResult.recordset.length > 0;
401
- if (!tableExists) {
402
- const createSql = `CREATE TABLE ${this.getTableName(tableName)} (
403
- ${columns}
404
- )`;
405
- await this.pool.request().query(createSql);
406
- }
407
- const columnCheckSql = `
408
- SELECT 1 AS found
409
- FROM INFORMATION_SCHEMA.COLUMNS
410
- WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName AND COLUMN_NAME = 'seq_id'
411
- `;
412
- const checkColumnRequest = this.pool.request();
413
- checkColumnRequest.input("schema", this.schema || "dbo");
414
- checkColumnRequest.input("tableName", this.getTableName(tableName).replace(/[[\]]/g, "").split(".").pop());
415
- const columnResult = await checkColumnRequest.query(columnCheckSql);
416
- const columnExists = Array.isArray(columnResult.recordset) && columnResult.recordset.length > 0;
417
- if (!columnExists) {
418
- const alterSql = `ALTER TABLE ${this.getTableName(tableName)} ADD seq_id BIGINT IDENTITY(1,1)`;
419
- await this.pool.request().query(alterSql);
420
- }
421
- if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
422
- const constraintName = "mastra_workflow_snapshot_workflow_name_run_id_key";
423
- const checkConstraintSql = `SELECT 1 AS found FROM sys.key_constraints WHERE name = @constraintName`;
424
- const checkConstraintRequest = this.pool.request();
425
- checkConstraintRequest.input("constraintName", constraintName);
426
- const constraintResult = await checkConstraintRequest.query(checkConstraintSql);
427
- const constraintExists = Array.isArray(constraintResult.recordset) && constraintResult.recordset.length > 0;
428
- if (!constraintExists) {
429
- const addConstraintSql = `ALTER TABLE ${this.getTableName(tableName)} ADD CONSTRAINT ${constraintName} UNIQUE ([workflow_name], [run_id])`;
430
- await this.pool.request().query(addConstraintSql);
431
- }
432
377
  }
378
+ return {
379
+ ...thread,
380
+ metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
381
+ createdAt: thread.createdAt,
382
+ updatedAt: thread.updatedAt
383
+ };
433
384
  } catch (error$1) {
434
385
  throw new error.MastraError(
435
386
  {
436
- id: "MASTRA_STORAGE_MSSQL_STORE_CREATE_TABLE_FAILED",
387
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_THREAD", "FAILED"),
437
388
  domain: error.ErrorDomain.STORAGE,
438
389
  category: error.ErrorCategory.THIRD_PARTY,
439
390
  details: {
440
- tableName
391
+ threadId: id,
392
+ title
441
393
  }
442
394
  },
443
395
  error$1
444
396
  );
445
397
  }
446
398
  }
447
- getDefaultValue(type) {
448
- switch (type) {
449
- case "timestamp":
450
- return "DEFAULT SYSDATETIMEOFFSET()";
451
- case "jsonb":
452
- return "DEFAULT N'{}'";
453
- default:
454
- return super.getDefaultValue(type);
455
- }
456
- }
457
- async alterTable({
458
- tableName,
459
- schema,
460
- ifNotExists
461
- }) {
462
- const fullTableName = this.getTableName(tableName);
399
+ async deleteThread({ threadId }) {
400
+ const messagesTable = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
401
+ const threadsTable = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
402
+ const deleteMessagesSql = `DELETE FROM ${messagesTable} WHERE [thread_id] = @threadId`;
403
+ const deleteThreadSql = `DELETE FROM ${threadsTable} WHERE id = @threadId`;
404
+ const tx = this.pool.transaction();
463
405
  try {
464
- for (const columnName of ifNotExists) {
465
- if (schema[columnName]) {
466
- const columnCheckRequest = this.pool.request();
467
- columnCheckRequest.input("tableName", fullTableName.replace(/[[\]]/g, "").split(".").pop());
468
- columnCheckRequest.input("columnName", columnName);
469
- columnCheckRequest.input("schema", this.schema || "dbo");
470
- const checkSql = `SELECT 1 AS found FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName AND COLUMN_NAME = @columnName`;
471
- const checkResult = await columnCheckRequest.query(checkSql);
472
- const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
473
- if (!columnExists) {
474
- const columnDef = schema[columnName];
475
- const sqlType = this.getSqlType(columnDef.type);
476
- const nullable = columnDef.nullable === false ? "NOT NULL" : "";
477
- const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
478
- const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
479
- const alterSql = `ALTER TABLE ${fullTableName} ADD [${parsedColumnName}] ${sqlType} ${nullable} ${defaultValue}`.trim();
480
- await this.pool.request().query(alterSql);
481
- this.logger?.debug?.(`Ensured column ${parsedColumnName} exists in table ${fullTableName}`);
482
- }
483
- }
484
- }
406
+ await tx.begin();
407
+ const req = tx.request();
408
+ req.input("threadId", threadId);
409
+ await req.query(deleteMessagesSql);
410
+ await req.query(deleteThreadSql);
411
+ await tx.commit();
485
412
  } catch (error$1) {
413
+ await tx.rollback().catch(() => {
414
+ });
486
415
  throw new error.MastraError(
487
416
  {
488
- id: "MASTRA_STORAGE_MSSQL_STORE_ALTER_TABLE_FAILED",
417
+ id: storage.createStorageErrorId("MSSQL", "DELETE_THREAD", "FAILED"),
489
418
  domain: error.ErrorDomain.STORAGE,
490
419
  category: error.ErrorCategory.THIRD_PARTY,
491
420
  details: {
492
- tableName
421
+ threadId
493
422
  }
494
423
  },
495
424
  error$1
496
425
  );
497
426
  }
498
427
  }
499
- async clearTable({ tableName }) {
500
- const fullTableName = this.getTableName(tableName);
428
+ async _getIncludedMessages({ include }) {
429
+ if (!include || include.length === 0) return null;
430
+ const unionQueries = [];
431
+ const paramValues = [];
432
+ let paramIdx = 1;
433
+ const paramNames = [];
434
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
435
+ for (const inc of include) {
436
+ const { id, withPreviousMessages = 0, withNextMessages = 0 } = inc;
437
+ const pId = `@p${paramIdx}`;
438
+ const pPrev = `@p${paramIdx + 1}`;
439
+ const pNext = `@p${paramIdx + 2}`;
440
+ unionQueries.push(
441
+ `
442
+ SELECT
443
+ m.id,
444
+ m.content,
445
+ m.role,
446
+ m.type,
447
+ m.[createdAt],
448
+ m.thread_id AS threadId,
449
+ m.[resourceId],
450
+ m.seq_id
451
+ FROM (
452
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
453
+ FROM ${tableName}
454
+ WHERE [thread_id] = (SELECT thread_id FROM ${tableName} WHERE id = ${pId})
455
+ ) AS m
456
+ WHERE m.id = ${pId}
457
+ OR EXISTS (
458
+ SELECT 1
459
+ FROM (
460
+ SELECT *, ROW_NUMBER() OVER (ORDER BY [createdAt] ASC) as row_num
461
+ FROM ${tableName}
462
+ WHERE [thread_id] = (SELECT thread_id FROM ${tableName} WHERE id = ${pId})
463
+ ) AS target
464
+ WHERE target.id = ${pId}
465
+ AND (
466
+ -- Get previous messages (messages that come BEFORE the target)
467
+ (m.row_num < target.row_num AND m.row_num >= target.row_num - ${pPrev})
468
+ OR
469
+ -- Get next messages (messages that come AFTER the target)
470
+ (m.row_num > target.row_num AND m.row_num <= target.row_num + ${pNext})
471
+ )
472
+ )
473
+ `
474
+ );
475
+ paramValues.push(id, withPreviousMessages, withNextMessages);
476
+ paramNames.push(`p${paramIdx}`, `p${paramIdx + 1}`, `p${paramIdx + 2}`);
477
+ paramIdx += 3;
478
+ }
479
+ const finalQuery = `
480
+ SELECT * FROM (
481
+ ${unionQueries.join(" UNION ALL ")}
482
+ ) AS union_result
483
+ ORDER BY [seq_id] ASC
484
+ `;
485
+ const req = this.pool.request();
486
+ for (let i = 0; i < paramValues.length; ++i) {
487
+ req.input(paramNames[i], paramValues[i]);
488
+ }
489
+ const result = await req.query(finalQuery);
490
+ const includedRows = result.recordset || [];
491
+ const seen = /* @__PURE__ */ new Set();
492
+ const dedupedRows = includedRows.filter((row) => {
493
+ if (seen.has(row.id)) return false;
494
+ seen.add(row.id);
495
+ return true;
496
+ });
497
+ return dedupedRows;
498
+ }
499
+ async listMessagesById({ messageIds }) {
500
+ if (messageIds.length === 0) return { messages: [] };
501
+ const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId`;
502
+ const orderByStatement = `ORDER BY [seq_id] DESC`;
501
503
  try {
502
- const fkQuery = `
503
- SELECT
504
- OBJECT_SCHEMA_NAME(fk.parent_object_id) AS schema_name,
505
- OBJECT_NAME(fk.parent_object_id) AS table_name
506
- FROM sys.foreign_keys fk
507
- WHERE fk.referenced_object_id = OBJECT_ID(@fullTableName)
508
- `;
509
- const fkResult = await this.pool.request().input("fullTableName", fullTableName).query(fkQuery);
510
- const childTables = fkResult.recordset || [];
511
- for (const child of childTables) {
512
- const childTableName = this.schema ? `[${child.schema_name}].[${child.table_name}]` : `[${child.table_name}]`;
513
- await this.clearTable({ tableName: childTableName });
514
- }
515
- await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
504
+ let rows = [];
505
+ let query = `${selectStatement} FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} WHERE [id] IN (${messageIds.map((_, i) => `@id${i}`).join(", ")})`;
506
+ const request = this.pool.request();
507
+ messageIds.forEach((id, i) => request.input(`id${i}`, id));
508
+ query += ` ${orderByStatement}`;
509
+ const result = await request.query(query);
510
+ const remainingRows = result.recordset || [];
511
+ rows.push(...remainingRows);
512
+ rows.sort((a, b) => {
513
+ const timeDiff = a.seq_id - b.seq_id;
514
+ return timeDiff;
515
+ });
516
+ const messagesWithParsedContent = rows.map((row) => {
517
+ if (typeof row.content === "string") {
518
+ try {
519
+ return { ...row, content: JSON.parse(row.content) };
520
+ } catch {
521
+ return row;
522
+ }
523
+ }
524
+ return row;
525
+ });
526
+ const cleanMessages = messagesWithParsedContent.map(({ seq_id, ...rest }) => rest);
527
+ const list = new agent.MessageList().add(cleanMessages, "memory");
528
+ return { messages: list.get.all.db() };
516
529
  } catch (error$1) {
517
- throw new error.MastraError(
530
+ const mastraError = new error.MastraError(
518
531
  {
519
- id: "MASTRA_STORAGE_MSSQL_STORE_CLEAR_TABLE_FAILED",
532
+ id: storage.createStorageErrorId("MSSQL", "LIST_MESSAGES_BY_ID", "FAILED"),
520
533
  domain: error.ErrorDomain.STORAGE,
521
534
  category: error.ErrorCategory.THIRD_PARTY,
522
535
  details: {
523
- tableName
536
+ messageIds: JSON.stringify(messageIds)
524
537
  }
525
538
  },
526
539
  error$1
527
540
  );
541
+ this.logger?.error?.(mastraError.toString());
542
+ this.logger?.trackException?.(mastraError);
543
+ return { messages: [] };
528
544
  }
529
545
  }
530
- async insert({ tableName, record }) {
531
- try {
532
- const columns = Object.keys(record).map((col) => utils.parseSqlIdentifier(col, "column name"));
533
- const values = Object.values(record);
534
- const paramNames = values.map((_, i) => `@param${i}`);
535
- const insertSql = `INSERT INTO ${this.getTableName(tableName)} (${columns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
536
- const request = this.pool.request();
537
- values.forEach((value, i) => {
538
- if (value instanceof Date) {
539
- request.input(`param${i}`, sql__default.default.DateTime2, value);
540
- } else if (typeof value === "object" && value !== null) {
541
- request.input(`param${i}`, JSON.stringify(value));
542
- } else {
543
- request.input(`param${i}`, value);
544
- }
545
- });
546
- await request.query(insertSql);
547
- } catch (error$1) {
546
+ async listMessages(args) {
547
+ const { threadId, resourceId, include, filter, perPage: perPageInput, page = 0, orderBy } = args;
548
+ const threadIds = Array.isArray(threadId) ? threadId : [threadId];
549
+ if (threadIds.length === 0 || threadIds.some((id) => !id.trim())) {
548
550
  throw new error.MastraError(
549
551
  {
550
- id: "MASTRA_STORAGE_MSSQL_STORE_INSERT_FAILED",
552
+ id: storage.createStorageErrorId("MSSQL", "LIST_MESSAGES", "INVALID_THREAD_ID"),
551
553
  domain: error.ErrorDomain.STORAGE,
552
554
  category: error.ErrorCategory.THIRD_PARTY,
553
- details: {
554
- tableName
555
- }
555
+ details: { threadId: Array.isArray(threadId) ? threadId.join(",") : threadId }
556
556
  },
557
- error$1
557
+ new Error("threadId must be a non-empty string or array of non-empty strings")
558
558
  );
559
559
  }
560
- }
561
- async load({ tableName, keys }) {
562
- try {
563
- const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
564
- const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
565
- const values = keyEntries.map(([_, value]) => value);
566
- const sql2 = `SELECT * FROM ${this.getTableName(tableName)} WHERE ${conditions}`;
567
- const request = this.pool.request();
568
- values.forEach((value, i) => {
569
- request.input(`param${i}`, value);
570
- });
571
- const resultSet = await request.query(sql2);
572
- const result = resultSet.recordset[0] || null;
573
- if (!result) {
574
- return null;
575
- }
576
- if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
577
- const snapshot = result;
578
- if (typeof snapshot.snapshot === "string") {
579
- snapshot.snapshot = JSON.parse(snapshot.snapshot);
560
+ if (page < 0) {
561
+ throw new error.MastraError({
562
+ id: storage.createStorageErrorId("MSSQL", "LIST_MESSAGES", "INVALID_PAGE"),
563
+ domain: error.ErrorDomain.STORAGE,
564
+ category: error.ErrorCategory.USER,
565
+ text: "Page number must be non-negative",
566
+ details: {
567
+ threadId: Array.isArray(threadId) ? threadId.join(",") : threadId,
568
+ page
580
569
  }
581
- return snapshot;
582
- }
583
- return result;
584
- } catch (error$1) {
585
- throw new error.MastraError(
586
- {
587
- id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_FAILED",
588
- domain: error.ErrorDomain.STORAGE,
589
- category: error.ErrorCategory.THIRD_PARTY,
590
- details: {
591
- tableName
592
- }
593
- },
594
- error$1
595
- );
570
+ });
596
571
  }
597
- }
598
- async getThreadById({ threadId }) {
572
+ const perPage = storage.normalizePerPage(perPageInput, 40);
573
+ const { offset, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
599
574
  try {
600
- const sql2 = `SELECT
601
- id,
602
- [resourceId],
603
- title,
604
- metadata,
605
- [createdAt],
606
- [updatedAt]
607
- FROM ${this.getTableName(storage.TABLE_THREADS)}
608
- WHERE id = @threadId`;
609
- const request = this.pool.request();
610
- request.input("threadId", threadId);
611
- const resultSet = await request.query(sql2);
612
- const thread = resultSet.recordset[0] || null;
613
- if (!thread) {
614
- return null;
615
- }
616
- return {
617
- ...thread,
618
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
619
- createdAt: thread.createdAt,
620
- updatedAt: thread.updatedAt
575
+ const { field, direction } = this.parseOrderBy(orderBy, "ASC");
576
+ const orderByStatement = `ORDER BY [${field}] ${direction}, [seq_id] ${direction}`;
577
+ const tableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
578
+ const baseQuery = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId, resourceId FROM ${tableName}`;
579
+ const filters = {
580
+ thread_id: threadIds.length === 1 ? threadIds[0] : { $in: threadIds },
581
+ ...resourceId ? { resourceId } : {},
582
+ ...buildDateRangeFilter(filter?.dateRange, "createdAt")
583
+ };
584
+ const { sql: actualWhereClause = "", params: whereParams } = prepareWhereClause(
585
+ filters);
586
+ const bindWhereParams = (req) => {
587
+ Object.entries(whereParams).forEach(([paramName, paramValue]) => req.input(paramName, paramValue));
621
588
  };
622
- } catch (error$1) {
623
- throw new error.MastraError(
624
- {
625
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREAD_BY_ID_FAILED",
626
- domain: error.ErrorDomain.STORAGE,
627
- category: error.ErrorCategory.THIRD_PARTY,
628
- details: {
629
- threadId
630
- }
631
- },
632
- error$1
633
- );
634
- }
635
- }
636
- async getThreadsByResourceIdPaginated(args) {
637
- const { resourceId, page = 0, perPage: perPageInput } = args;
638
- try {
639
- const perPage = perPageInput !== void 0 ? perPageInput : 100;
640
- const currentOffset = page * perPage;
641
- const baseQuery = `FROM ${this.getTableName(storage.TABLE_THREADS)} WHERE [resourceId] = @resourceId`;
642
- const countQuery = `SELECT COUNT(*) as count ${baseQuery}`;
643
589
  const countRequest = this.pool.request();
644
- countRequest.input("resourceId", resourceId);
645
- const countResult = await countRequest.query(countQuery);
646
- const total = parseInt(countResult.recordset[0]?.count ?? "0", 10);
647
- if (total === 0) {
590
+ bindWhereParams(countRequest);
591
+ const countResult = await countRequest.query(`SELECT COUNT(*) as total FROM ${tableName}${actualWhereClause}`);
592
+ const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
593
+ const fetchBaseMessages = async () => {
594
+ const request = this.pool.request();
595
+ bindWhereParams(request);
596
+ if (perPageInput === false) {
597
+ const result2 = await request.query(`${baseQuery}${actualWhereClause} ${orderByStatement}`);
598
+ return result2.recordset || [];
599
+ }
600
+ request.input("offset", offset);
601
+ request.input("limit", perPage > 2147483647 ? sql2__default.default.BigInt : sql2__default.default.Int, perPage);
602
+ const result = await request.query(
603
+ `${baseQuery}${actualWhereClause} ${orderByStatement} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
604
+ );
605
+ return result.recordset || [];
606
+ };
607
+ const baseRows = perPage === 0 ? [] : await fetchBaseMessages();
608
+ const messages = [...baseRows];
609
+ const seqById = /* @__PURE__ */ new Map();
610
+ messages.forEach((msg) => {
611
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
612
+ });
613
+ if (total === 0 && messages.length === 0 && (!include || include.length === 0)) {
648
614
  return {
649
- threads: [],
615
+ messages: [],
650
616
  total: 0,
651
617
  page,
652
- perPage,
618
+ perPage: perPageForResponse,
653
619
  hasMore: false
654
620
  };
655
621
  }
656
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY [seq_id] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
657
- const dataRequest = this.pool.request();
658
- dataRequest.input("resourceId", resourceId);
659
- dataRequest.input("perPage", perPage);
660
- dataRequest.input("offset", currentOffset);
661
- const rowsResult = await dataRequest.query(dataQuery);
662
- const rows = rowsResult.recordset || [];
663
- const threads = rows.map((thread) => ({
664
- ...thread,
665
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
666
- createdAt: thread.createdAt,
667
- updatedAt: thread.updatedAt
668
- }));
622
+ if (include?.length) {
623
+ const messageIds = new Set(messages.map((m) => m.id));
624
+ const includeMessages = await this._getIncludedMessages({ include });
625
+ includeMessages?.forEach((msg) => {
626
+ if (!messageIds.has(msg.id)) {
627
+ messages.push(msg);
628
+ messageIds.add(msg.id);
629
+ if (typeof msg.seq_id === "number") seqById.set(msg.id, msg.seq_id);
630
+ }
631
+ });
632
+ }
633
+ const parsed = this._parseAndFormatMessages(messages, "v2");
634
+ const mult = direction === "ASC" ? 1 : -1;
635
+ const finalMessages = parsed.sort((a, b) => {
636
+ const aVal = field === "createdAt" ? new Date(a.createdAt).getTime() : a[field];
637
+ const bVal = field === "createdAt" ? new Date(b.createdAt).getTime() : b[field];
638
+ if (aVal == null || bVal == null) {
639
+ return aVal == null && bVal == null ? a.id.localeCompare(b.id) : aVal == null ? 1 : -1;
640
+ }
641
+ const diff = (typeof aVal === "number" && typeof bVal === "number" ? aVal - bVal : String(aVal).localeCompare(String(bVal))) * mult;
642
+ if (diff !== 0) return diff;
643
+ const seqA = seqById.get(a.id);
644
+ const seqB = seqById.get(b.id);
645
+ return seqA != null && seqB != null ? (seqA - seqB) * mult : a.id.localeCompare(b.id);
646
+ });
647
+ const threadIdSet = new Set(threadIds);
648
+ const returnedThreadMessageCount = finalMessages.filter((m) => m.threadId && threadIdSet.has(m.threadId)).length;
649
+ const hasMore = perPageInput !== false && returnedThreadMessageCount < total && offset + perPage < total;
669
650
  return {
670
- threads,
651
+ messages: finalMessages,
671
652
  total,
672
653
  page,
673
- perPage,
674
- hasMore: currentOffset + threads.length < total
654
+ perPage: perPageForResponse,
655
+ hasMore
675
656
  };
676
657
  } catch (error$1) {
677
658
  const mastraError = new error.MastraError(
678
659
  {
679
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_THREADS_BY_RESOURCE_ID_PAGINATED_FAILED",
660
+ id: storage.createStorageErrorId("MSSQL", "LIST_MESSAGES", "FAILED"),
680
661
  domain: error.ErrorDomain.STORAGE,
681
662
  category: error.ErrorCategory.THIRD_PARTY,
682
663
  details: {
683
- resourceId,
684
- page
664
+ threadId: Array.isArray(threadId) ? threadId.join(",") : threadId,
665
+ resourceId: resourceId ?? ""
685
666
  }
686
667
  },
687
668
  error$1
688
669
  );
689
670
  this.logger?.error?.(mastraError.toString());
690
671
  this.logger?.trackException?.(mastraError);
691
- return { threads: [], total: 0, page, perPage: perPageInput || 100, hasMore: false };
672
+ return {
673
+ messages: [],
674
+ total: 0,
675
+ page,
676
+ perPage: perPageForResponse,
677
+ hasMore: false
678
+ };
692
679
  }
693
680
  }
694
- async saveThread({ thread }) {
681
+ async saveMessages({ messages }) {
682
+ if (messages.length === 0) return { messages: [] };
683
+ const threadId = messages[0]?.threadId;
684
+ if (!threadId) {
685
+ throw new error.MastraError({
686
+ id: storage.createStorageErrorId("MSSQL", "SAVE_MESSAGES", "INVALID_THREAD_ID"),
687
+ domain: error.ErrorDomain.STORAGE,
688
+ category: error.ErrorCategory.THIRD_PARTY,
689
+ text: `Thread ID is required`
690
+ });
691
+ }
692
+ const thread = await this.getThreadById({ threadId });
693
+ if (!thread) {
694
+ throw new error.MastraError({
695
+ id: storage.createStorageErrorId("MSSQL", "SAVE_MESSAGES", "THREAD_NOT_FOUND"),
696
+ domain: error.ErrorDomain.STORAGE,
697
+ category: error.ErrorCategory.THIRD_PARTY,
698
+ text: `Thread ${threadId} not found`,
699
+ details: { threadId }
700
+ });
701
+ }
702
+ const tableMessages = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
703
+ const tableThreads = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
695
704
  try {
696
- const table = this.getTableName(storage.TABLE_THREADS);
697
- const mergeSql = `MERGE INTO ${table} WITH (HOLDLOCK) AS target
698
- USING (SELECT @id AS id) AS source
699
- ON (target.id = source.id)
700
- WHEN MATCHED THEN
701
- UPDATE SET
702
- [resourceId] = @resourceId,
703
- title = @title,
704
- metadata = @metadata,
705
- [createdAt] = @createdAt,
706
- [updatedAt] = @updatedAt
707
- WHEN NOT MATCHED THEN
708
- INSERT (id, [resourceId], title, metadata, [createdAt], [updatedAt])
709
- VALUES (@id, @resourceId, @title, @metadata, @createdAt, @updatedAt);`;
710
- const req = this.pool.request();
711
- req.input("id", thread.id);
712
- req.input("resourceId", thread.resourceId);
713
- req.input("title", thread.title);
714
- req.input("metadata", thread.metadata ? JSON.stringify(thread.metadata) : null);
715
- req.input("createdAt", thread.createdAt);
716
- req.input("updatedAt", thread.updatedAt);
717
- await req.query(mergeSql);
718
- return thread;
705
+ const transaction = this.pool.transaction();
706
+ await transaction.begin();
707
+ try {
708
+ for (const message of messages) {
709
+ if (!message.threadId) {
710
+ throw new Error(
711
+ `Expected to find a threadId for message, but couldn't find one. An unexpected error has occurred.`
712
+ );
713
+ }
714
+ if (!message.resourceId) {
715
+ throw new Error(
716
+ `Expected to find a resourceId for message, but couldn't find one. An unexpected error has occurred.`
717
+ );
718
+ }
719
+ const request = transaction.request();
720
+ request.input("id", message.id);
721
+ request.input("thread_id", message.threadId);
722
+ request.input(
723
+ "content",
724
+ typeof message.content === "string" ? message.content : JSON.stringify(message.content)
725
+ );
726
+ request.input("createdAt", sql2__default.default.DateTime2, message.createdAt);
727
+ request.input("role", message.role);
728
+ request.input("type", message.type || "v2");
729
+ request.input("resourceId", message.resourceId);
730
+ const mergeSql = `MERGE INTO ${tableMessages} AS target
731
+ USING (SELECT @id AS id) AS src
732
+ ON target.id = src.id
733
+ WHEN MATCHED THEN UPDATE SET
734
+ thread_id = @thread_id,
735
+ content = @content,
736
+ [createdAt] = @createdAt,
737
+ role = @role,
738
+ type = @type,
739
+ resourceId = @resourceId
740
+ WHEN NOT MATCHED THEN INSERT (id, thread_id, content, [createdAt], role, type, resourceId)
741
+ VALUES (@id, @thread_id, @content, @createdAt, @role, @type, @resourceId);`;
742
+ await request.query(mergeSql);
743
+ }
744
+ const threadReq = transaction.request();
745
+ threadReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
746
+ threadReq.input("id", threadId);
747
+ await threadReq.query(`UPDATE ${tableThreads} SET [updatedAt] = @updatedAt WHERE id = @id`);
748
+ await transaction.commit();
749
+ } catch (error) {
750
+ await transaction.rollback();
751
+ throw error;
752
+ }
753
+ const messagesWithParsedContent = messages.map((message) => {
754
+ if (typeof message.content === "string") {
755
+ try {
756
+ return { ...message, content: JSON.parse(message.content) };
757
+ } catch {
758
+ return message;
759
+ }
760
+ }
761
+ return message;
762
+ });
763
+ const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
764
+ return { messages: list.get.all.db() };
719
765
  } catch (error$1) {
720
766
  throw new error.MastraError(
721
767
  {
722
- id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_THREAD_FAILED",
768
+ id: storage.createStorageErrorId("MSSQL", "SAVE_MESSAGES", "FAILED"),
723
769
  domain: error.ErrorDomain.STORAGE,
724
770
  category: error.ErrorCategory.THIRD_PARTY,
725
- details: {
726
- threadId: thread.id
727
- }
771
+ details: { threadId }
728
772
  },
729
773
  error$1
730
774
  );
731
775
  }
732
776
  }
733
- /**
734
- * @deprecated use getThreadsByResourceIdPaginated instead
735
- */
736
- async getThreadsByResourceId(args) {
737
- const { resourceId } = args;
738
- try {
739
- const baseQuery = `FROM ${this.getTableName(storage.TABLE_THREADS)} WHERE [resourceId] = @resourceId`;
740
- const dataQuery = `SELECT id, [resourceId], title, metadata, [createdAt], [updatedAt] ${baseQuery} ORDER BY [seq_id] DESC`;
741
- const request = this.pool.request();
742
- request.input("resourceId", resourceId);
743
- const resultSet = await request.query(dataQuery);
744
- const rows = resultSet.recordset || [];
745
- return rows.map((thread) => ({
746
- ...thread,
747
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
748
- createdAt: thread.createdAt,
749
- updatedAt: thread.updatedAt
750
- }));
751
- } catch (error) {
752
- this.logger?.error?.(`Error getting threads for resource ${resourceId}:`, error);
777
+ async updateMessages({
778
+ messages
779
+ }) {
780
+ if (!messages || messages.length === 0) {
753
781
  return [];
754
782
  }
755
- }
756
- /**
757
- * Updates a thread's title and metadata, merging with existing metadata. Returns the updated thread.
758
- */
759
- async updateThread({
760
- id,
761
- title,
762
- metadata
763
- }) {
764
- const existingThread = await this.getThreadById({ threadId: id });
765
- if (!existingThread) {
766
- throw new error.MastraError({
767
- id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_THREAD_FAILED",
768
- domain: error.ErrorDomain.STORAGE,
769
- category: error.ErrorCategory.USER,
770
- text: `Thread ${id} not found`,
771
- details: {
772
- threadId: id,
773
- title
774
- }
775
- });
783
+ const messageIds = messages.map((m) => m.id);
784
+ const idParams = messageIds.map((_, i) => `@id${i}`).join(", ");
785
+ let selectQuery = `SELECT id, content, role, type, createdAt, thread_id AS threadId, resourceId FROM ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })}`;
786
+ if (idParams.length > 0) {
787
+ selectQuery += ` WHERE id IN (${idParams})`;
788
+ } else {
789
+ return [];
776
790
  }
777
- const mergedMetadata = {
778
- ...existingThread.metadata,
779
- ...metadata
780
- };
791
+ const selectReq = this.pool.request();
792
+ messageIds.forEach((id, i) => selectReq.input(`id${i}`, id));
793
+ const existingMessagesDb = (await selectReq.query(selectQuery)).recordset;
794
+ if (!existingMessagesDb || existingMessagesDb.length === 0) {
795
+ return [];
796
+ }
797
+ const existingMessages = existingMessagesDb.map((msg) => {
798
+ if (typeof msg.content === "string") {
799
+ try {
800
+ msg.content = JSON.parse(msg.content);
801
+ } catch {
802
+ }
803
+ }
804
+ return msg;
805
+ });
806
+ const threadIdsToUpdate = /* @__PURE__ */ new Set();
807
+ const transaction = this.pool.transaction();
781
808
  try {
782
- const table = this.getTableName(storage.TABLE_THREADS);
783
- const sql2 = `UPDATE ${table}
784
- SET title = @title,
785
- metadata = @metadata,
786
- [updatedAt] = @updatedAt
787
- OUTPUT INSERTED.*
788
- WHERE id = @id`;
789
- const req = this.pool.request();
790
- req.input("id", id);
791
- req.input("title", title);
792
- req.input("metadata", JSON.stringify(mergedMetadata));
793
- req.input("updatedAt", (/* @__PURE__ */ new Date()).toISOString());
794
- const result = await req.query(sql2);
795
- let thread = result.recordset && result.recordset[0];
796
- if (thread && "seq_id" in thread) {
797
- const { seq_id, ...rest } = thread;
798
- thread = rest;
799
- }
800
- if (!thread) {
801
- throw new error.MastraError({
802
- id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_THREAD_FAILED",
803
- domain: error.ErrorDomain.STORAGE,
804
- category: error.ErrorCategory.USER,
805
- text: `Thread ${id} not found after update`,
806
- details: {
807
- threadId: id,
808
- title
809
+ await transaction.begin();
810
+ for (const existingMessage of existingMessages) {
811
+ const updatePayload = messages.find((m) => m.id === existingMessage.id);
812
+ if (!updatePayload) continue;
813
+ const { id, ...fieldsToUpdate } = updatePayload;
814
+ if (Object.keys(fieldsToUpdate).length === 0) continue;
815
+ threadIdsToUpdate.add(existingMessage.threadId);
816
+ if (updatePayload.threadId && updatePayload.threadId !== existingMessage.threadId) {
817
+ threadIdsToUpdate.add(updatePayload.threadId);
818
+ }
819
+ const setClauses = [];
820
+ const req = transaction.request();
821
+ req.input("id", id);
822
+ const columnMapping = { threadId: "thread_id" };
823
+ const updatableFields = { ...fieldsToUpdate };
824
+ if (updatableFields.content) {
825
+ const newContent = {
826
+ ...existingMessage.content,
827
+ ...updatableFields.content,
828
+ ...existingMessage.content?.metadata && updatableFields.content.metadata ? { metadata: { ...existingMessage.content.metadata, ...updatableFields.content.metadata } } : {}
829
+ };
830
+ setClauses.push(`content = @content`);
831
+ req.input("content", JSON.stringify(newContent));
832
+ delete updatableFields.content;
833
+ }
834
+ for (const key in updatableFields) {
835
+ if (Object.prototype.hasOwnProperty.call(updatableFields, key)) {
836
+ const dbColumn = columnMapping[key] || key;
837
+ setClauses.push(`[${dbColumn}] = @${dbColumn}`);
838
+ req.input(dbColumn, updatableFields[key]);
809
839
  }
810
- });
840
+ }
841
+ if (setClauses.length > 0) {
842
+ const updateSql = `UPDATE ${getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) })} SET ${setClauses.join(", ")} WHERE id = @id`;
843
+ await req.query(updateSql);
844
+ }
811
845
  }
812
- return {
813
- ...thread,
814
- metadata: typeof thread.metadata === "string" ? JSON.parse(thread.metadata) : thread.metadata,
815
- createdAt: thread.createdAt,
816
- updatedAt: thread.updatedAt
817
- };
818
- } catch (error$1) {
819
- throw new error.MastraError(
820
- {
821
- id: "MASTRA_STORAGE_MSSQL_STORE_UPDATE_THREAD_FAILED",
822
- domain: error.ErrorDomain.STORAGE,
823
- category: error.ErrorCategory.THIRD_PARTY,
824
- details: {
825
- threadId: id,
826
- title
827
- }
828
- },
829
- error$1
830
- );
831
- }
832
- }
833
- async deleteThread({ threadId }) {
834
- const messagesTable = this.getTableName(storage.TABLE_MESSAGES);
835
- const threadsTable = this.getTableName(storage.TABLE_THREADS);
836
- const deleteMessagesSql = `DELETE FROM ${messagesTable} WHERE [thread_id] = @threadId`;
837
- const deleteThreadSql = `DELETE FROM ${threadsTable} WHERE id = @threadId`;
838
- const tx = this.pool.transaction();
839
- try {
840
- await tx.begin();
841
- const req = tx.request();
842
- req.input("threadId", threadId);
843
- await req.query(deleteMessagesSql);
844
- await req.query(deleteThreadSql);
845
- await tx.commit();
846
+ if (threadIdsToUpdate.size > 0) {
847
+ const threadIdParams = Array.from(threadIdsToUpdate).map((_, i) => `@tid${i}`).join(", ");
848
+ const threadReq = transaction.request();
849
+ Array.from(threadIdsToUpdate).forEach((tid, i) => threadReq.input(`tid${i}`, tid));
850
+ threadReq.input("updatedAt", (/* @__PURE__ */ new Date()).toISOString());
851
+ const threadSql = `UPDATE ${getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) })} SET updatedAt = @updatedAt WHERE id IN (${threadIdParams})`;
852
+ await threadReq.query(threadSql);
853
+ }
854
+ await transaction.commit();
846
855
  } catch (error$1) {
847
- await tx.rollback().catch(() => {
848
- });
856
+ await transaction.rollback();
849
857
  throw new error.MastraError(
850
858
  {
851
- id: "MASTRA_STORAGE_MSSQL_STORE_DELETE_THREAD_FAILED",
859
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_MESSAGES", "FAILED"),
852
860
  domain: error.ErrorDomain.STORAGE,
853
- category: error.ErrorCategory.THIRD_PARTY,
854
- details: {
855
- threadId
856
- }
861
+ category: error.ErrorCategory.THIRD_PARTY
857
862
  },
858
863
  error$1
859
864
  );
860
865
  }
861
- }
862
- async _getIncludedMessages({
863
- threadId,
864
- selectBy,
865
- orderByStatement
866
- }) {
867
- const include = selectBy?.include;
868
- if (!include) return null;
869
- const unionQueries = [];
870
- const paramValues = [];
871
- let paramIdx = 1;
872
- const paramNames = [];
873
- for (const inc of include) {
874
- const { id, withPreviousMessages = 0, withNextMessages = 0 } = inc;
875
- const searchId = inc.threadId || threadId;
876
- const pThreadId = `@p${paramIdx}`;
877
- const pId = `@p${paramIdx + 1}`;
878
- const pPrev = `@p${paramIdx + 2}`;
879
- const pNext = `@p${paramIdx + 3}`;
880
- unionQueries.push(
881
- `
882
- SELECT
883
- m.id,
884
- m.content,
885
- m.role,
886
- m.type,
887
- m.[createdAt],
888
- m.thread_id AS threadId,
889
- m.[resourceId],
890
- m.seq_id
891
- FROM (
892
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
893
- FROM ${this.getTableName(storage.TABLE_MESSAGES)}
894
- WHERE [thread_id] = ${pThreadId}
895
- ) AS m
896
- WHERE m.id = ${pId}
897
- OR EXISTS (
898
- SELECT 1
899
- FROM (
900
- SELECT *, ROW_NUMBER() OVER (${orderByStatement}) as row_num
901
- FROM ${this.getTableName(storage.TABLE_MESSAGES)}
902
- WHERE [thread_id] = ${pThreadId}
903
- ) AS target
904
- WHERE target.id = ${pId}
905
- AND (
906
- (m.row_num <= target.row_num + ${pPrev} AND m.row_num > target.row_num)
907
- OR
908
- (m.row_num >= target.row_num - ${pNext} AND m.row_num < target.row_num)
909
- )
910
- )
911
- `
912
- );
913
- paramValues.push(searchId, id, withPreviousMessages, withNextMessages);
914
- paramNames.push(`p${paramIdx}`, `p${paramIdx + 1}`, `p${paramIdx + 2}`, `p${paramIdx + 3}`);
915
- paramIdx += 4;
916
- }
917
- const finalQuery = `
918
- SELECT * FROM (
919
- ${unionQueries.join(" UNION ALL ")}
920
- ) AS union_result
921
- ORDER BY [seq_id] ASC
922
- `;
923
- const req = this.pool.request();
924
- for (let i = 0; i < paramValues.length; ++i) {
925
- req.input(paramNames[i], paramValues[i]);
926
- }
927
- const result = await req.query(finalQuery);
928
- const includedRows = result.recordset || [];
929
- const seen = /* @__PURE__ */ new Set();
930
- const dedupedRows = includedRows.filter((row) => {
931
- if (seen.has(row.id)) return false;
932
- seen.add(row.id);
933
- return true;
866
+ const refetchReq = this.pool.request();
867
+ messageIds.forEach((id, i) => refetchReq.input(`id${i}`, id));
868
+ const updatedMessages = (await refetchReq.query(selectQuery)).recordset;
869
+ return (updatedMessages || []).map((message) => {
870
+ if (typeof message.content === "string") {
871
+ try {
872
+ message.content = JSON.parse(message.content);
873
+ } catch {
874
+ }
875
+ }
876
+ return message;
934
877
  });
935
- return dedupedRows;
936
878
  }
937
- async getMessages(args) {
938
- const { threadId, format, selectBy } = args;
939
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId`;
940
- const orderByStatement = `ORDER BY [seq_id] DESC`;
941
- const limit = this.resolveMessageLimit({ last: selectBy?.last, defaultLimit: 40 });
879
+ async deleteMessages(messageIds) {
880
+ if (!messageIds || messageIds.length === 0) {
881
+ return;
882
+ }
942
883
  try {
943
- let rows = [];
944
- const include = selectBy?.include || [];
945
- if (include?.length) {
946
- const includeMessages = await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
947
- if (includeMessages) {
948
- rows.push(...includeMessages);
949
- }
950
- }
951
- const excludeIds = rows.map((m) => m.id).filter(Boolean);
952
- let query = `${selectStatement} FROM ${this.getTableName(storage.TABLE_MESSAGES)} WHERE [thread_id] = @threadId`;
884
+ const messageTableName = getTableName({ indexName: storage.TABLE_MESSAGES, schemaName: getSchemaName(this.schema) });
885
+ const threadTableName = getTableName({ indexName: storage.TABLE_THREADS, schemaName: getSchemaName(this.schema) });
886
+ const placeholders = messageIds.map((_, idx) => `@p${idx + 1}`).join(",");
953
887
  const request = this.pool.request();
954
- request.input("threadId", threadId);
955
- if (excludeIds.length > 0) {
956
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
957
- query += ` AND id NOT IN (${excludeParams.join(", ")})`;
958
- excludeIds.forEach((id, idx) => {
959
- request.input(`id${idx}`, id);
960
- });
961
- }
962
- query += ` ${orderByStatement} OFFSET 0 ROWS FETCH NEXT @limit ROWS ONLY`;
963
- request.input("limit", limit);
964
- const result = await request.query(query);
965
- const remainingRows = result.recordset || [];
966
- rows.push(...remainingRows);
967
- rows.sort((a, b) => {
968
- const timeDiff = a.seq_id - b.seq_id;
969
- return timeDiff;
888
+ messageIds.forEach((id, idx) => {
889
+ request.input(`p${idx + 1}`, id);
970
890
  });
971
- rows = rows.map(({ seq_id, ...rest }) => rest);
972
- const fetchedMessages = (rows || []).map((message) => {
973
- if (typeof message.content === "string") {
974
- try {
975
- message.content = JSON.parse(message.content);
976
- } catch {
891
+ const messages = await request.query(
892
+ `SELECT DISTINCT [thread_id] FROM ${messageTableName} WHERE [id] IN (${placeholders})`
893
+ );
894
+ const threadIds = messages.recordset?.map((msg) => msg.thread_id).filter(Boolean) || [];
895
+ const transaction = this.pool.transaction();
896
+ await transaction.begin();
897
+ try {
898
+ const deleteRequest = transaction.request();
899
+ messageIds.forEach((id, idx) => {
900
+ deleteRequest.input(`p${idx + 1}`, id);
901
+ });
902
+ await deleteRequest.query(`DELETE FROM ${messageTableName} WHERE [id] IN (${placeholders})`);
903
+ if (threadIds.length > 0) {
904
+ for (const threadId of threadIds) {
905
+ const updateRequest = transaction.request();
906
+ updateRequest.input("p1", threadId);
907
+ await updateRequest.query(`UPDATE ${threadTableName} SET [updatedAt] = GETDATE() WHERE [id] = @p1`);
977
908
  }
978
909
  }
979
- if (format === "v1") {
980
- if (Array.isArray(message.content)) ; else if (typeof message.content === "object" && message.content && Array.isArray(message.content.parts)) {
981
- message.content = message.content.parts;
982
- } else {
983
- message.content = [{ type: "text", text: "" }];
984
- }
985
- } else {
986
- if (typeof message.content !== "object" || !message.content || !("parts" in message.content)) {
987
- message.content = { format: 2, parts: [{ type: "text", text: "" }] };
988
- }
910
+ await transaction.commit();
911
+ } catch (error) {
912
+ try {
913
+ await transaction.rollback();
914
+ } catch {
989
915
  }
990
- if (message.type === "v2") delete message.type;
991
- return message;
992
- });
993
- return format === "v2" ? fetchedMessages.map(
994
- (m) => ({ ...m, content: m.content || { format: 2, parts: [{ type: "text", text: "" }] } })
995
- ) : fetchedMessages;
916
+ throw error;
917
+ }
996
918
  } catch (error$1) {
997
- const mastraError = new error.MastraError(
919
+ throw new error.MastraError(
998
920
  {
999
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_FAILED",
921
+ id: storage.createStorageErrorId("MSSQL", "DELETE_MESSAGES", "FAILED"),
1000
922
  domain: error.ErrorDomain.STORAGE,
1001
923
  category: error.ErrorCategory.THIRD_PARTY,
1002
- details: {
1003
- threadId
1004
- }
924
+ details: { messageIds: messageIds.join(", ") }
1005
925
  },
1006
926
  error$1
1007
927
  );
1008
- this.logger?.error?.(mastraError.toString());
1009
- this.logger?.trackException(mastraError);
1010
- return [];
1011
928
  }
1012
929
  }
1013
- async getMessagesPaginated(args) {
1014
- const { threadId, selectBy } = args;
1015
- const { page = 0, perPage: perPageInput } = selectBy?.pagination || {};
1016
- const orderByStatement = `ORDER BY [seq_id] DESC`;
1017
- if (selectBy?.include?.length) {
1018
- await this._getIncludedMessages({ threadId, selectBy, orderByStatement });
1019
- }
930
+ async getResourceById({ resourceId }) {
931
+ const tableName = getTableName({ indexName: storage.TABLE_RESOURCES, schemaName: getSchemaName(this.schema) });
1020
932
  try {
1021
- const { threadId: threadId2, format, selectBy: selectBy2 } = args;
1022
- const { page: page2 = 0, perPage: perPageInput2, dateRange } = selectBy2?.pagination || {};
1023
- const fromDate = dateRange?.start;
1024
- const toDate = dateRange?.end;
1025
- const selectStatement = `SELECT seq_id, id, content, role, type, [createdAt], thread_id AS threadId`;
1026
- const orderByStatement2 = `ORDER BY [seq_id] DESC`;
1027
- let messages2 = [];
1028
- if (selectBy2?.include?.length) {
1029
- const includeMessages = await this._getIncludedMessages({ threadId: threadId2, selectBy: selectBy2, orderByStatement: orderByStatement2 });
1030
- if (includeMessages) messages2.push(...includeMessages);
1031
- }
1032
- const perPage = perPageInput2 !== void 0 ? perPageInput2 : this.resolveMessageLimit({ last: selectBy2?.last, defaultLimit: 40 });
1033
- const currentOffset = page2 * perPage;
1034
- const conditions = ["[thread_id] = @threadId"];
1035
- const request = this.pool.request();
1036
- request.input("threadId", threadId2);
1037
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1038
- conditions.push("[createdAt] >= @fromDate");
1039
- request.input("fromDate", fromDate.toISOString());
1040
- }
1041
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1042
- conditions.push("[createdAt] <= @toDate");
1043
- request.input("toDate", toDate.toISOString());
1044
- }
1045
- const whereClause = `WHERE ${conditions.join(" AND ")}`;
1046
- const countQuery = `SELECT COUNT(*) as total FROM ${this.getTableName(storage.TABLE_MESSAGES)} ${whereClause}`;
1047
- const countResult = await request.query(countQuery);
1048
- const total = parseInt(countResult.recordset[0]?.total, 10) || 0;
1049
- if (total === 0 && messages2.length > 0) {
1050
- const parsedIncluded = this._parseAndFormatMessages(messages2, format);
1051
- return {
1052
- messages: parsedIncluded,
1053
- total: parsedIncluded.length,
1054
- page: page2,
1055
- perPage,
1056
- hasMore: false
1057
- };
933
+ const req = this.pool.request();
934
+ req.input("resourceId", resourceId);
935
+ const result = (await req.query(`SELECT * FROM ${tableName} WHERE id = @resourceId`)).recordset[0];
936
+ if (!result) {
937
+ return null;
1058
938
  }
1059
- const excludeIds = messages2.map((m) => m.id);
1060
- if (excludeIds.length > 0) {
1061
- const excludeParams = excludeIds.map((_, idx) => `@id${idx}`);
1062
- conditions.push(`id NOT IN (${excludeParams.join(", ")})`);
1063
- excludeIds.forEach((id, idx) => request.input(`id${idx}`, id));
1064
- }
1065
- const finalWhereClause = `WHERE ${conditions.join(" AND ")}`;
1066
- const dataQuery = `${selectStatement} FROM ${this.getTableName(storage.TABLE_MESSAGES)} ${finalWhereClause} ${orderByStatement2} OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1067
- request.input("offset", currentOffset);
1068
- request.input("limit", perPage);
1069
- const rowsResult = await request.query(dataQuery);
1070
- const rows = rowsResult.recordset || [];
1071
- rows.sort((a, b) => a.seq_id - b.seq_id);
1072
- messages2.push(...rows);
1073
- const parsed = this._parseAndFormatMessages(messages2, format);
1074
939
  return {
1075
- messages: parsed,
1076
- total: total + excludeIds.length,
1077
- page: page2,
1078
- perPage,
1079
- hasMore: currentOffset + rows.length < total
940
+ id: result.id,
941
+ createdAt: result.createdAt,
942
+ updatedAt: result.updatedAt,
943
+ workingMemory: result.workingMemory,
944
+ metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
1080
945
  };
1081
946
  } catch (error$1) {
1082
947
  const mastraError = new error.MastraError(
1083
948
  {
1084
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_MESSAGES_PAGINATED_FAILED",
949
+ id: storage.createStorageErrorId("MSSQL", "GET_RESOURCE_BY_ID", "FAILED"),
1085
950
  domain: error.ErrorDomain.STORAGE,
1086
951
  category: error.ErrorCategory.THIRD_PARTY,
1087
- details: {
1088
- threadId,
1089
- page
1090
- }
952
+ details: { resourceId }
1091
953
  },
1092
954
  error$1
1093
955
  );
1094
956
  this.logger?.error?.(mastraError.toString());
1095
- this.logger?.trackException(mastraError);
1096
- return { messages: [], total: 0, page, perPage: perPageInput || 40, hasMore: false };
957
+ this.logger?.trackException?.(mastraError);
958
+ throw mastraError;
1097
959
  }
1098
960
  }
1099
- _parseAndFormatMessages(messages, format) {
1100
- const parsedMessages = messages.map((message) => {
1101
- let parsed = message;
1102
- if (typeof parsed.content === "string") {
1103
- try {
1104
- parsed = { ...parsed, content: JSON.parse(parsed.content) };
1105
- } catch {
1106
- }
1107
- }
1108
- if (format === "v1") {
1109
- if (Array.isArray(parsed.content)) ; else if (parsed.content?.parts) {
1110
- parsed.content = parsed.content.parts;
1111
- } else {
1112
- parsed.content = [{ type: "text", text: "" }];
1113
- }
1114
- } else {
1115
- if (!parsed.content?.parts) {
1116
- parsed = { ...parsed, content: { format: 2, parts: [{ type: "text", text: "" }] } };
1117
- }
961
+ async saveResource({ resource }) {
962
+ await this.operations.insert({
963
+ tableName: storage.TABLE_RESOURCES,
964
+ record: {
965
+ ...resource,
966
+ metadata: resource.metadata
1118
967
  }
1119
- return parsed;
1120
968
  });
1121
- const list = new agent.MessageList().add(parsedMessages, "memory");
1122
- return format === "v2" ? list.get.all.v2() : list.get.all.v1();
969
+ return resource;
1123
970
  }
1124
- async saveMessages({
1125
- messages,
1126
- format
971
+ async updateResource({
972
+ resourceId,
973
+ workingMemory,
974
+ metadata
1127
975
  }) {
1128
- if (messages.length === 0) return messages;
1129
- const threadId = messages[0]?.threadId;
1130
- if (!threadId) {
1131
- throw new error.MastraError({
1132
- id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_MESSAGES_FAILED",
1133
- domain: error.ErrorDomain.STORAGE,
1134
- category: error.ErrorCategory.THIRD_PARTY,
1135
- text: `Thread ID is required`
976
+ try {
977
+ const existingResource = await this.getResourceById({ resourceId });
978
+ if (!existingResource) {
979
+ const newResource = {
980
+ id: resourceId,
981
+ workingMemory,
982
+ metadata: metadata || {},
983
+ createdAt: /* @__PURE__ */ new Date(),
984
+ updatedAt: /* @__PURE__ */ new Date()
985
+ };
986
+ return this.saveResource({ resource: newResource });
987
+ }
988
+ const updatedResource = {
989
+ ...existingResource,
990
+ workingMemory: workingMemory !== void 0 ? workingMemory : existingResource.workingMemory,
991
+ metadata: {
992
+ ...existingResource.metadata,
993
+ ...metadata
994
+ },
995
+ updatedAt: /* @__PURE__ */ new Date()
996
+ };
997
+ const tableName = getTableName({ indexName: storage.TABLE_RESOURCES, schemaName: getSchemaName(this.schema) });
998
+ const updates = [];
999
+ const req = this.pool.request();
1000
+ if (workingMemory !== void 0) {
1001
+ updates.push("workingMemory = @workingMemory");
1002
+ req.input("workingMemory", workingMemory);
1003
+ }
1004
+ if (metadata) {
1005
+ updates.push("metadata = @metadata");
1006
+ req.input("metadata", JSON.stringify(updatedResource.metadata));
1007
+ }
1008
+ updates.push("updatedAt = @updatedAt");
1009
+ req.input("updatedAt", updatedResource.updatedAt.toISOString());
1010
+ req.input("id", resourceId);
1011
+ await req.query(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = @id`);
1012
+ return updatedResource;
1013
+ } catch (error$1) {
1014
+ const mastraError = new error.MastraError(
1015
+ {
1016
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_RESOURCE", "FAILED"),
1017
+ domain: error.ErrorDomain.STORAGE,
1018
+ category: error.ErrorCategory.THIRD_PARTY,
1019
+ details: { resourceId }
1020
+ },
1021
+ error$1
1022
+ );
1023
+ this.logger?.error?.(mastraError.toString());
1024
+ this.logger?.trackException?.(mastraError);
1025
+ throw mastraError;
1026
+ }
1027
+ }
1028
+ };
1029
+ var ObservabilityMSSQL = class extends storage.ObservabilityStorage {
1030
+ pool;
1031
+ operations;
1032
+ schema;
1033
+ constructor({
1034
+ pool,
1035
+ operations,
1036
+ schema
1037
+ }) {
1038
+ super();
1039
+ this.pool = pool;
1040
+ this.operations = operations;
1041
+ this.schema = schema;
1042
+ }
1043
+ get tracingStrategy() {
1044
+ return {
1045
+ preferred: "batch-with-updates",
1046
+ supported: ["batch-with-updates", "insert-only"]
1047
+ };
1048
+ }
1049
+ async createSpan(span) {
1050
+ try {
1051
+ const startedAt = span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt;
1052
+ const endedAt = span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt;
1053
+ const record = {
1054
+ ...span,
1055
+ startedAt,
1056
+ endedAt
1057
+ // Note: createdAt/updatedAt will be set by default values
1058
+ };
1059
+ return this.operations.insert({ tableName: storage.TABLE_SPANS, record });
1060
+ } catch (error$1) {
1061
+ throw new error.MastraError(
1062
+ {
1063
+ id: storage.createStorageErrorId("MSSQL", "CREATE_SPAN", "FAILED"),
1064
+ domain: error.ErrorDomain.STORAGE,
1065
+ category: error.ErrorCategory.USER,
1066
+ details: {
1067
+ spanId: span.spanId,
1068
+ traceId: span.traceId,
1069
+ spanType: span.spanType,
1070
+ spanName: span.name
1071
+ }
1072
+ },
1073
+ error$1
1074
+ );
1075
+ }
1076
+ }
1077
+ async getTrace(traceId) {
1078
+ try {
1079
+ const tableName = getTableName({
1080
+ indexName: storage.TABLE_SPANS,
1081
+ schemaName: getSchemaName(this.schema)
1082
+ });
1083
+ const request = this.pool.request();
1084
+ request.input("traceId", traceId);
1085
+ const result = await request.query(
1086
+ `SELECT
1087
+ [traceId], [spanId], [parentSpanId], [name], [scope], [spanType],
1088
+ [attributes], [metadata], [links], [input], [output], [error], [isEvent],
1089
+ [startedAt], [endedAt], [createdAt], [updatedAt]
1090
+ FROM ${tableName}
1091
+ WHERE [traceId] = @traceId
1092
+ ORDER BY [startedAt] DESC`
1093
+ );
1094
+ if (!result.recordset || result.recordset.length === 0) {
1095
+ return null;
1096
+ }
1097
+ return {
1098
+ traceId,
1099
+ spans: result.recordset.map(
1100
+ (span) => transformFromSqlRow({
1101
+ tableName: storage.TABLE_SPANS,
1102
+ sqlRow: span
1103
+ })
1104
+ )
1105
+ };
1106
+ } catch (error$1) {
1107
+ throw new error.MastraError(
1108
+ {
1109
+ id: storage.createStorageErrorId("MSSQL", "GET_TRACE", "FAILED"),
1110
+ domain: error.ErrorDomain.STORAGE,
1111
+ category: error.ErrorCategory.USER,
1112
+ details: {
1113
+ traceId
1114
+ }
1115
+ },
1116
+ error$1
1117
+ );
1118
+ }
1119
+ }
1120
+ async updateSpan({
1121
+ spanId,
1122
+ traceId,
1123
+ updates
1124
+ }) {
1125
+ try {
1126
+ const data = { ...updates };
1127
+ if (data.endedAt instanceof Date) {
1128
+ data.endedAt = data.endedAt.toISOString();
1129
+ }
1130
+ if (data.startedAt instanceof Date) {
1131
+ data.startedAt = data.startedAt.toISOString();
1132
+ }
1133
+ await this.operations.update({
1134
+ tableName: storage.TABLE_SPANS,
1135
+ keys: { spanId, traceId },
1136
+ data
1137
+ });
1138
+ } catch (error$1) {
1139
+ throw new error.MastraError(
1140
+ {
1141
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_SPAN", "FAILED"),
1142
+ domain: error.ErrorDomain.STORAGE,
1143
+ category: error.ErrorCategory.USER,
1144
+ details: {
1145
+ spanId,
1146
+ traceId
1147
+ }
1148
+ },
1149
+ error$1
1150
+ );
1151
+ }
1152
+ }
1153
+ async getTracesPaginated({
1154
+ filters,
1155
+ pagination
1156
+ }) {
1157
+ const page = pagination?.page ?? 0;
1158
+ const perPage = pagination?.perPage ?? 10;
1159
+ const { entityId, entityType, ...actualFilters } = filters || {};
1160
+ const filtersWithDateRange = {
1161
+ ...actualFilters,
1162
+ ...buildDateRangeFilter(pagination?.dateRange, "startedAt"),
1163
+ parentSpanId: null
1164
+ // Only get root spans for traces
1165
+ };
1166
+ const whereClause = prepareWhereClause(filtersWithDateRange);
1167
+ let actualWhereClause = whereClause.sql;
1168
+ const params = { ...whereClause.params };
1169
+ let currentParamIndex = Object.keys(params).length + 1;
1170
+ if (entityId && entityType) {
1171
+ let name = "";
1172
+ if (entityType === "workflow") {
1173
+ name = `workflow run: '${entityId}'`;
1174
+ } else if (entityType === "agent") {
1175
+ name = `agent run: '${entityId}'`;
1176
+ } else {
1177
+ const error$1 = new error.MastraError({
1178
+ id: storage.createStorageErrorId("MSSQL", "GET_TRACES_PAGINATED", "INVALID_ENTITY_TYPE"),
1179
+ domain: error.ErrorDomain.STORAGE,
1180
+ category: error.ErrorCategory.USER,
1181
+ details: {
1182
+ entityType
1183
+ },
1184
+ text: `Cannot filter by entity type: ${entityType}`
1185
+ });
1186
+ throw error$1;
1187
+ }
1188
+ const entityParam = `p${currentParamIndex++}`;
1189
+ if (actualWhereClause) {
1190
+ actualWhereClause += ` AND [name] = @${entityParam}`;
1191
+ } else {
1192
+ actualWhereClause = ` WHERE [name] = @${entityParam}`;
1193
+ }
1194
+ params[entityParam] = name;
1195
+ }
1196
+ const tableName = getTableName({
1197
+ indexName: storage.TABLE_SPANS,
1198
+ schemaName: getSchemaName(this.schema)
1199
+ });
1200
+ try {
1201
+ const countRequest = this.pool.request();
1202
+ Object.entries(params).forEach(([key, value]) => {
1203
+ countRequest.input(key, value);
1204
+ });
1205
+ const countResult = await countRequest.query(
1206
+ `SELECT COUNT(*) as count FROM ${tableName}${actualWhereClause}`
1207
+ );
1208
+ const total = countResult.recordset[0]?.count ?? 0;
1209
+ if (total === 0) {
1210
+ return {
1211
+ pagination: {
1212
+ total: 0,
1213
+ page,
1214
+ perPage,
1215
+ hasMore: false
1216
+ },
1217
+ spans: []
1218
+ };
1219
+ }
1220
+ const dataRequest = this.pool.request();
1221
+ Object.entries(params).forEach(([key, value]) => {
1222
+ dataRequest.input(key, value);
1223
+ });
1224
+ dataRequest.input("offset", page * perPage);
1225
+ dataRequest.input("limit", perPage);
1226
+ const dataResult = await dataRequest.query(
1227
+ `SELECT * FROM ${tableName}${actualWhereClause} ORDER BY [startedAt] DESC OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`
1228
+ );
1229
+ const spans = dataResult.recordset.map(
1230
+ (row) => transformFromSqlRow({
1231
+ tableName: storage.TABLE_SPANS,
1232
+ sqlRow: row
1233
+ })
1234
+ );
1235
+ return {
1236
+ pagination: {
1237
+ total,
1238
+ page,
1239
+ perPage,
1240
+ hasMore: (page + 1) * perPage < total
1241
+ },
1242
+ spans
1243
+ };
1244
+ } catch (error$1) {
1245
+ throw new error.MastraError(
1246
+ {
1247
+ id: storage.createStorageErrorId("MSSQL", "GET_TRACES_PAGINATED", "FAILED"),
1248
+ domain: error.ErrorDomain.STORAGE,
1249
+ category: error.ErrorCategory.USER
1250
+ },
1251
+ error$1
1252
+ );
1253
+ }
1254
+ }
1255
+ async batchCreateSpans(args) {
1256
+ if (!args.records || args.records.length === 0) {
1257
+ return;
1258
+ }
1259
+ try {
1260
+ await this.operations.batchInsert({
1261
+ tableName: storage.TABLE_SPANS,
1262
+ records: args.records.map((span) => ({
1263
+ ...span,
1264
+ startedAt: span.startedAt instanceof Date ? span.startedAt.toISOString() : span.startedAt,
1265
+ endedAt: span.endedAt instanceof Date ? span.endedAt.toISOString() : span.endedAt
1266
+ }))
1267
+ });
1268
+ } catch (error$1) {
1269
+ throw new error.MastraError(
1270
+ {
1271
+ id: storage.createStorageErrorId("MSSQL", "BATCH_CREATE_SPANS", "FAILED"),
1272
+ domain: error.ErrorDomain.STORAGE,
1273
+ category: error.ErrorCategory.USER,
1274
+ details: {
1275
+ count: args.records.length
1276
+ }
1277
+ },
1278
+ error$1
1279
+ );
1280
+ }
1281
+ }
1282
+ async batchUpdateSpans(args) {
1283
+ if (!args.records || args.records.length === 0) {
1284
+ return;
1285
+ }
1286
+ try {
1287
+ const updates = args.records.map(({ traceId, spanId, updates: data }) => {
1288
+ const processedData = { ...data };
1289
+ if (processedData.endedAt instanceof Date) {
1290
+ processedData.endedAt = processedData.endedAt.toISOString();
1291
+ }
1292
+ if (processedData.startedAt instanceof Date) {
1293
+ processedData.startedAt = processedData.startedAt.toISOString();
1294
+ }
1295
+ return {
1296
+ keys: { spanId, traceId },
1297
+ data: processedData
1298
+ };
1299
+ });
1300
+ await this.operations.batchUpdate({
1301
+ tableName: storage.TABLE_SPANS,
1302
+ updates
1303
+ });
1304
+ } catch (error$1) {
1305
+ throw new error.MastraError(
1306
+ {
1307
+ id: storage.createStorageErrorId("MSSQL", "BATCH_UPDATE_SPANS", "FAILED"),
1308
+ domain: error.ErrorDomain.STORAGE,
1309
+ category: error.ErrorCategory.USER,
1310
+ details: {
1311
+ count: args.records.length
1312
+ }
1313
+ },
1314
+ error$1
1315
+ );
1316
+ }
1317
+ }
1318
+ async batchDeleteTraces(args) {
1319
+ if (!args.traceIds || args.traceIds.length === 0) {
1320
+ return;
1321
+ }
1322
+ try {
1323
+ const keys = args.traceIds.map((traceId) => ({ traceId }));
1324
+ await this.operations.batchDelete({
1325
+ tableName: storage.TABLE_SPANS,
1326
+ keys
1327
+ });
1328
+ } catch (error$1) {
1329
+ throw new error.MastraError(
1330
+ {
1331
+ id: storage.createStorageErrorId("MSSQL", "BATCH_DELETE_TRACES", "FAILED"),
1332
+ domain: error.ErrorDomain.STORAGE,
1333
+ category: error.ErrorCategory.USER,
1334
+ details: {
1335
+ count: args.traceIds.length
1336
+ }
1337
+ },
1338
+ error$1
1339
+ );
1340
+ }
1341
+ }
1342
+ };
1343
+ var StoreOperationsMSSQL = class extends storage.StoreOperations {
1344
+ pool;
1345
+ schemaName;
1346
+ setupSchemaPromise = null;
1347
+ schemaSetupComplete = void 0;
1348
+ getSqlType(type, isPrimaryKey = false, useLargeStorage = false) {
1349
+ switch (type) {
1350
+ case "text":
1351
+ if (useLargeStorage) {
1352
+ return "NVARCHAR(MAX)";
1353
+ }
1354
+ return isPrimaryKey ? "NVARCHAR(255)" : "NVARCHAR(400)";
1355
+ case "timestamp":
1356
+ return "DATETIME2(7)";
1357
+ case "uuid":
1358
+ return "UNIQUEIDENTIFIER";
1359
+ case "jsonb":
1360
+ return "NVARCHAR(MAX)";
1361
+ case "integer":
1362
+ return "INT";
1363
+ case "bigint":
1364
+ return "BIGINT";
1365
+ case "float":
1366
+ return "FLOAT";
1367
+ case "boolean":
1368
+ return "BIT";
1369
+ default:
1370
+ throw new error.MastraError({
1371
+ id: storage.createStorageErrorId("MSSQL", "TYPE", "NOT_SUPPORTED"),
1372
+ domain: error.ErrorDomain.STORAGE,
1373
+ category: error.ErrorCategory.THIRD_PARTY
1374
+ });
1375
+ }
1376
+ }
1377
+ constructor({ pool, schemaName }) {
1378
+ super();
1379
+ this.pool = pool;
1380
+ this.schemaName = schemaName;
1381
+ }
1382
+ async hasColumn(table, column) {
1383
+ const schema = this.schemaName || "dbo";
1384
+ const request = this.pool.request();
1385
+ request.input("schema", schema);
1386
+ request.input("table", table);
1387
+ request.input("column", column);
1388
+ request.input("columnLower", column.toLowerCase());
1389
+ const result = await request.query(
1390
+ `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1391
+ );
1392
+ return result.recordset.length > 0;
1393
+ }
1394
+ async setupSchema() {
1395
+ if (!this.schemaName || this.schemaSetupComplete) {
1396
+ return;
1397
+ }
1398
+ if (!this.setupSchemaPromise) {
1399
+ this.setupSchemaPromise = (async () => {
1400
+ try {
1401
+ const checkRequest = this.pool.request();
1402
+ checkRequest.input("schemaName", this.schemaName);
1403
+ const checkResult = await checkRequest.query(`
1404
+ SELECT 1 AS found FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = @schemaName
1405
+ `);
1406
+ const schemaExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1407
+ if (!schemaExists) {
1408
+ try {
1409
+ await this.pool.request().query(`CREATE SCHEMA [${this.schemaName}]`);
1410
+ this.logger?.info?.(`Schema "${this.schemaName}" created successfully`);
1411
+ } catch (error) {
1412
+ this.logger?.error?.(`Failed to create schema "${this.schemaName}"`, { error });
1413
+ throw new Error(
1414
+ `Unable to create schema "${this.schemaName}". This requires CREATE privilege on the database. Either create the schema manually or grant CREATE privilege to the user.`
1415
+ );
1416
+ }
1417
+ }
1418
+ this.schemaSetupComplete = true;
1419
+ this.logger?.debug?.(`Schema "${this.schemaName}" is ready for use`);
1420
+ } catch (error) {
1421
+ this.schemaSetupComplete = void 0;
1422
+ this.setupSchemaPromise = null;
1423
+ throw error;
1424
+ } finally {
1425
+ this.setupSchemaPromise = null;
1426
+ }
1427
+ })();
1428
+ }
1429
+ await this.setupSchemaPromise;
1430
+ }
1431
+ async insert({
1432
+ tableName,
1433
+ record,
1434
+ transaction
1435
+ }) {
1436
+ try {
1437
+ const columns = Object.keys(record);
1438
+ const parsedColumns = columns.map((col) => utils.parseSqlIdentifier(col, "column name"));
1439
+ const paramNames = columns.map((_, i) => `@param${i}`);
1440
+ const insertSql = `INSERT INTO ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (${parsedColumns.map((c) => `[${c}]`).join(", ")}) VALUES (${paramNames.join(", ")})`;
1441
+ const request = transaction ? transaction.request() : this.pool.request();
1442
+ columns.forEach((col, i) => {
1443
+ const value = record[col];
1444
+ const preparedValue = this.prepareValue(value, col, tableName);
1445
+ if (preparedValue instanceof Date) {
1446
+ request.input(`param${i}`, sql2__default.default.DateTime2, preparedValue);
1447
+ } else if (preparedValue === null || preparedValue === void 0) {
1448
+ request.input(`param${i}`, this.getMssqlType(tableName, col), null);
1449
+ } else {
1450
+ request.input(`param${i}`, preparedValue);
1451
+ }
1452
+ });
1453
+ await request.query(insertSql);
1454
+ } catch (error$1) {
1455
+ throw new error.MastraError(
1456
+ {
1457
+ id: storage.createStorageErrorId("MSSQL", "INSERT", "FAILED"),
1458
+ domain: error.ErrorDomain.STORAGE,
1459
+ category: error.ErrorCategory.THIRD_PARTY,
1460
+ details: {
1461
+ tableName
1462
+ }
1463
+ },
1464
+ error$1
1465
+ );
1466
+ }
1467
+ }
1468
+ async clearTable({ tableName }) {
1469
+ const fullTableName = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1470
+ try {
1471
+ try {
1472
+ await this.pool.request().query(`TRUNCATE TABLE ${fullTableName}`);
1473
+ } catch (truncateError) {
1474
+ if (truncateError?.number === 4712) {
1475
+ await this.pool.request().query(`DELETE FROM ${fullTableName}`);
1476
+ } else {
1477
+ throw truncateError;
1478
+ }
1479
+ }
1480
+ } catch (error$1) {
1481
+ throw new error.MastraError(
1482
+ {
1483
+ id: storage.createStorageErrorId("MSSQL", "CLEAR_TABLE", "FAILED"),
1484
+ domain: error.ErrorDomain.STORAGE,
1485
+ category: error.ErrorCategory.THIRD_PARTY,
1486
+ details: {
1487
+ tableName
1488
+ }
1489
+ },
1490
+ error$1
1491
+ );
1492
+ }
1493
+ }
1494
+ getDefaultValue(type) {
1495
+ switch (type) {
1496
+ case "timestamp":
1497
+ return "DEFAULT SYSUTCDATETIME()";
1498
+ case "jsonb":
1499
+ return "DEFAULT N'{}'";
1500
+ case "boolean":
1501
+ return "DEFAULT 0";
1502
+ default:
1503
+ return super.getDefaultValue(type);
1504
+ }
1505
+ }
1506
+ async createTable({
1507
+ tableName,
1508
+ schema
1509
+ }) {
1510
+ try {
1511
+ const uniqueConstraintColumns = tableName === storage.TABLE_WORKFLOW_SNAPSHOT ? ["workflow_name", "run_id"] : [];
1512
+ const largeDataColumns = [
1513
+ "workingMemory",
1514
+ "snapshot",
1515
+ "metadata",
1516
+ "content",
1517
+ // messages.content - can be very long conversation content
1518
+ "input",
1519
+ // evals.input - test input data
1520
+ "output",
1521
+ // evals.output - test output data
1522
+ "instructions",
1523
+ // evals.instructions - evaluation instructions
1524
+ "other"
1525
+ // traces.other - additional trace data
1526
+ ];
1527
+ const columns = Object.entries(schema).map(([name, def]) => {
1528
+ const parsedName = utils.parseSqlIdentifier(name, "column name");
1529
+ const constraints = [];
1530
+ if (def.primaryKey) constraints.push("PRIMARY KEY");
1531
+ if (!def.nullable) constraints.push("NOT NULL");
1532
+ const isIndexed = !!def.primaryKey || uniqueConstraintColumns.includes(name);
1533
+ const useLargeStorage = largeDataColumns.includes(name);
1534
+ return `[${parsedName}] ${this.getSqlType(def.type, isIndexed, useLargeStorage)} ${constraints.join(" ")}`.trim();
1535
+ }).join(",\n");
1536
+ if (this.schemaName) {
1537
+ await this.setupSchema();
1538
+ }
1539
+ const checkTableRequest = this.pool.request();
1540
+ checkTableRequest.input(
1541
+ "tableName",
1542
+ getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) }).replace(/[[\]]/g, "").split(".").pop()
1543
+ );
1544
+ const checkTableSql = `SELECT 1 AS found FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName`;
1545
+ checkTableRequest.input("schema", this.schemaName || "dbo");
1546
+ const checkTableResult = await checkTableRequest.query(checkTableSql);
1547
+ const tableExists = Array.isArray(checkTableResult.recordset) && checkTableResult.recordset.length > 0;
1548
+ if (!tableExists) {
1549
+ const createSql = `CREATE TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} (
1550
+ ${columns}
1551
+ )`;
1552
+ await this.pool.request().query(createSql);
1553
+ }
1554
+ const columnCheckSql = `
1555
+ SELECT 1 AS found
1556
+ FROM INFORMATION_SCHEMA.COLUMNS
1557
+ WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName AND COLUMN_NAME = 'seq_id'
1558
+ `;
1559
+ const checkColumnRequest = this.pool.request();
1560
+ checkColumnRequest.input("schema", this.schemaName || "dbo");
1561
+ checkColumnRequest.input(
1562
+ "tableName",
1563
+ getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) }).replace(/[[\]]/g, "").split(".").pop()
1564
+ );
1565
+ const columnResult = await checkColumnRequest.query(columnCheckSql);
1566
+ const columnExists = Array.isArray(columnResult.recordset) && columnResult.recordset.length > 0;
1567
+ if (!columnExists) {
1568
+ const alterSql = `ALTER TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} ADD seq_id BIGINT IDENTITY(1,1)`;
1569
+ await this.pool.request().query(alterSql);
1570
+ }
1571
+ if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1572
+ const constraintName = "mastra_workflow_snapshot_workflow_name_run_id_key";
1573
+ const checkConstraintSql = `SELECT 1 AS found FROM sys.key_constraints WHERE name = @constraintName`;
1574
+ const checkConstraintRequest = this.pool.request();
1575
+ checkConstraintRequest.input("constraintName", constraintName);
1576
+ const constraintResult = await checkConstraintRequest.query(checkConstraintSql);
1577
+ const constraintExists = Array.isArray(constraintResult.recordset) && constraintResult.recordset.length > 0;
1578
+ if (!constraintExists) {
1579
+ const addConstraintSql = `ALTER TABLE ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} ADD CONSTRAINT ${constraintName} UNIQUE ([workflow_name], [run_id])`;
1580
+ await this.pool.request().query(addConstraintSql);
1581
+ }
1582
+ }
1583
+ } catch (error$1) {
1584
+ throw new error.MastraError(
1585
+ {
1586
+ id: storage.createStorageErrorId("MSSQL", "CREATE_TABLE", "FAILED"),
1587
+ domain: error.ErrorDomain.STORAGE,
1588
+ category: error.ErrorCategory.THIRD_PARTY,
1589
+ details: {
1590
+ tableName
1591
+ }
1592
+ },
1593
+ error$1
1594
+ );
1595
+ }
1596
+ }
1597
+ /**
1598
+ * Alters table schema to add columns if they don't exist
1599
+ * @param tableName Name of the table
1600
+ * @param schema Schema of the table
1601
+ * @param ifNotExists Array of column names to add if they don't exist
1602
+ */
1603
+ async alterTable({
1604
+ tableName,
1605
+ schema,
1606
+ ifNotExists
1607
+ }) {
1608
+ const fullTableName = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1609
+ try {
1610
+ for (const columnName of ifNotExists) {
1611
+ if (schema[columnName]) {
1612
+ const columnCheckRequest = this.pool.request();
1613
+ columnCheckRequest.input("tableName", fullTableName.replace(/[[\]]/g, "").split(".").pop());
1614
+ columnCheckRequest.input("columnName", columnName);
1615
+ columnCheckRequest.input("schema", this.schemaName || "dbo");
1616
+ const checkSql = `SELECT 1 AS found FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @tableName AND COLUMN_NAME = @columnName`;
1617
+ const checkResult = await columnCheckRequest.query(checkSql);
1618
+ const columnExists = Array.isArray(checkResult.recordset) && checkResult.recordset.length > 0;
1619
+ if (!columnExists) {
1620
+ const columnDef = schema[columnName];
1621
+ const largeDataColumns = [
1622
+ "workingMemory",
1623
+ "snapshot",
1624
+ "metadata",
1625
+ "content",
1626
+ "input",
1627
+ "output",
1628
+ "instructions",
1629
+ "other"
1630
+ ];
1631
+ const useLargeStorage = largeDataColumns.includes(columnName);
1632
+ const isIndexed = !!columnDef.primaryKey;
1633
+ const sqlType = this.getSqlType(columnDef.type, isIndexed, useLargeStorage);
1634
+ const nullable = columnDef.nullable === false ? "NOT NULL" : "";
1635
+ const defaultValue = columnDef.nullable === false ? this.getDefaultValue(columnDef.type) : "";
1636
+ const parsedColumnName = utils.parseSqlIdentifier(columnName, "column name");
1637
+ const alterSql = `ALTER TABLE ${fullTableName} ADD [${parsedColumnName}] ${sqlType} ${nullable} ${defaultValue}`.trim();
1638
+ await this.pool.request().query(alterSql);
1639
+ this.logger?.debug?.(`Ensured column ${parsedColumnName} exists in table ${fullTableName}`);
1640
+ }
1641
+ }
1642
+ }
1643
+ } catch (error$1) {
1644
+ throw new error.MastraError(
1645
+ {
1646
+ id: storage.createStorageErrorId("MSSQL", "ALTER_TABLE", "FAILED"),
1647
+ domain: error.ErrorDomain.STORAGE,
1648
+ category: error.ErrorCategory.THIRD_PARTY,
1649
+ details: {
1650
+ tableName
1651
+ }
1652
+ },
1653
+ error$1
1654
+ );
1655
+ }
1656
+ }
1657
+ async load({ tableName, keys }) {
1658
+ try {
1659
+ const keyEntries = Object.entries(keys).map(([key, value]) => [utils.parseSqlIdentifier(key, "column name"), value]);
1660
+ const conditions = keyEntries.map(([key], i) => `[${key}] = @param${i}`).join(" AND ");
1661
+ const sql5 = `SELECT * FROM ${getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) })} WHERE ${conditions}`;
1662
+ const request = this.pool.request();
1663
+ keyEntries.forEach(([key, value], i) => {
1664
+ const preparedValue = this.prepareValue(value, key, tableName);
1665
+ if (preparedValue === null || preparedValue === void 0) {
1666
+ request.input(`param${i}`, this.getMssqlType(tableName, key), null);
1667
+ } else {
1668
+ request.input(`param${i}`, preparedValue);
1669
+ }
1670
+ });
1671
+ const resultSet = await request.query(sql5);
1672
+ const result = resultSet.recordset[0] || null;
1673
+ if (!result) {
1674
+ return null;
1675
+ }
1676
+ if (tableName === storage.TABLE_WORKFLOW_SNAPSHOT) {
1677
+ const snapshot = result;
1678
+ if (typeof snapshot.snapshot === "string") {
1679
+ snapshot.snapshot = JSON.parse(snapshot.snapshot);
1680
+ }
1681
+ return snapshot;
1682
+ }
1683
+ return result;
1684
+ } catch (error$1) {
1685
+ throw new error.MastraError(
1686
+ {
1687
+ id: storage.createStorageErrorId("MSSQL", "LOAD", "FAILED"),
1688
+ domain: error.ErrorDomain.STORAGE,
1689
+ category: error.ErrorCategory.THIRD_PARTY,
1690
+ details: {
1691
+ tableName
1692
+ }
1693
+ },
1694
+ error$1
1695
+ );
1696
+ }
1697
+ }
1698
+ async batchInsert({ tableName, records }) {
1699
+ const transaction = this.pool.transaction();
1700
+ try {
1701
+ await transaction.begin();
1702
+ for (const record of records) {
1703
+ await this.insert({ tableName, record, transaction });
1704
+ }
1705
+ await transaction.commit();
1706
+ } catch (error$1) {
1707
+ await transaction.rollback();
1708
+ throw new error.MastraError(
1709
+ {
1710
+ id: storage.createStorageErrorId("MSSQL", "BATCH_INSERT", "FAILED"),
1711
+ domain: error.ErrorDomain.STORAGE,
1712
+ category: error.ErrorCategory.THIRD_PARTY,
1713
+ details: {
1714
+ tableName,
1715
+ numberOfRecords: records.length
1716
+ }
1717
+ },
1718
+ error$1
1719
+ );
1720
+ }
1721
+ }
1722
+ async dropTable({ tableName }) {
1723
+ try {
1724
+ const tableNameWithSchema = getTableName({ indexName: tableName, schemaName: getSchemaName(this.schemaName) });
1725
+ await this.pool.request().query(`DROP TABLE IF EXISTS ${tableNameWithSchema}`);
1726
+ } catch (error$1) {
1727
+ throw new error.MastraError(
1728
+ {
1729
+ id: storage.createStorageErrorId("MSSQL", "DROP_TABLE", "FAILED"),
1730
+ domain: error.ErrorDomain.STORAGE,
1731
+ category: error.ErrorCategory.THIRD_PARTY,
1732
+ details: {
1733
+ tableName
1734
+ }
1735
+ },
1736
+ error$1
1737
+ );
1738
+ }
1739
+ }
1740
+ /**
1741
+ * Prepares a value for database operations, handling Date objects and JSON serialization
1742
+ */
1743
+ prepareValue(value, columnName, tableName) {
1744
+ if (value === null || value === void 0) {
1745
+ return value;
1746
+ }
1747
+ if (value instanceof Date) {
1748
+ return value;
1749
+ }
1750
+ const schema = storage.TABLE_SCHEMAS[tableName];
1751
+ const columnSchema = schema?.[columnName];
1752
+ if (columnSchema?.type === "boolean") {
1753
+ return value ? 1 : 0;
1754
+ }
1755
+ if (columnSchema?.type === "jsonb") {
1756
+ if (typeof value === "string") {
1757
+ const trimmed = value.trim();
1758
+ if (trimmed.length > 0) {
1759
+ try {
1760
+ JSON.parse(trimmed);
1761
+ return trimmed;
1762
+ } catch {
1763
+ }
1764
+ }
1765
+ return JSON.stringify(value);
1766
+ }
1767
+ if (typeof value === "bigint") {
1768
+ return value.toString();
1769
+ }
1770
+ return JSON.stringify(value);
1771
+ }
1772
+ if (typeof value === "object") {
1773
+ return JSON.stringify(value);
1774
+ }
1775
+ return value;
1776
+ }
1777
+ /**
1778
+ * Maps TABLE_SCHEMAS types to mssql param types (used when value is null)
1779
+ */
1780
+ getMssqlType(tableName, columnName) {
1781
+ const col = storage.TABLE_SCHEMAS[tableName]?.[columnName];
1782
+ switch (col?.type) {
1783
+ case "text":
1784
+ return sql2__default.default.NVarChar;
1785
+ case "timestamp":
1786
+ return sql2__default.default.DateTime2;
1787
+ case "uuid":
1788
+ return sql2__default.default.UniqueIdentifier;
1789
+ case "jsonb":
1790
+ return sql2__default.default.NVarChar;
1791
+ case "integer":
1792
+ return sql2__default.default.Int;
1793
+ case "bigint":
1794
+ return sql2__default.default.BigInt;
1795
+ case "float":
1796
+ return sql2__default.default.Float;
1797
+ case "boolean":
1798
+ return sql2__default.default.Bit;
1799
+ default:
1800
+ return sql2__default.default.NVarChar;
1801
+ }
1802
+ }
1803
+ /**
1804
+ * Update a single record in the database
1805
+ */
1806
+ async update({
1807
+ tableName,
1808
+ keys,
1809
+ data,
1810
+ transaction
1811
+ }) {
1812
+ try {
1813
+ if (!data || Object.keys(data).length === 0) {
1814
+ throw new error.MastraError({
1815
+ id: storage.createStorageErrorId("MSSQL", "UPDATE", "EMPTY_DATA"),
1816
+ domain: error.ErrorDomain.STORAGE,
1817
+ category: error.ErrorCategory.USER,
1818
+ text: "Cannot update with empty data payload"
1819
+ });
1820
+ }
1821
+ if (!keys || Object.keys(keys).length === 0) {
1822
+ throw new error.MastraError({
1823
+ id: storage.createStorageErrorId("MSSQL", "UPDATE", "EMPTY_KEYS"),
1824
+ domain: error.ErrorDomain.STORAGE,
1825
+ category: error.ErrorCategory.USER,
1826
+ text: "Cannot update without keys to identify records"
1827
+ });
1828
+ }
1829
+ const setClauses = [];
1830
+ const request = transaction ? transaction.request() : this.pool.request();
1831
+ let paramIndex = 0;
1832
+ Object.entries(data).forEach(([key, value]) => {
1833
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1834
+ const paramName = `set${paramIndex++}`;
1835
+ setClauses.push(`[${parsedKey}] = @${paramName}`);
1836
+ const preparedValue = this.prepareValue(value, key, tableName);
1837
+ if (preparedValue === null || preparedValue === void 0) {
1838
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1839
+ } else {
1840
+ request.input(paramName, preparedValue);
1841
+ }
1842
+ });
1843
+ const whereConditions = [];
1844
+ Object.entries(keys).forEach(([key, value]) => {
1845
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1846
+ const paramName = `where${paramIndex++}`;
1847
+ whereConditions.push(`[${parsedKey}] = @${paramName}`);
1848
+ const preparedValue = this.prepareValue(value, key, tableName);
1849
+ if (preparedValue === null || preparedValue === void 0) {
1850
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1851
+ } else {
1852
+ request.input(paramName, preparedValue);
1853
+ }
1854
+ });
1855
+ const tableName_ = getTableName({
1856
+ indexName: tableName,
1857
+ schemaName: getSchemaName(this.schemaName)
1858
+ });
1859
+ const updateSql = `UPDATE ${tableName_} SET ${setClauses.join(", ")} WHERE ${whereConditions.join(" AND ")}`;
1860
+ await request.query(updateSql);
1861
+ } catch (error$1) {
1862
+ throw new error.MastraError(
1863
+ {
1864
+ id: storage.createStorageErrorId("MSSQL", "UPDATE", "FAILED"),
1865
+ domain: error.ErrorDomain.STORAGE,
1866
+ category: error.ErrorCategory.THIRD_PARTY,
1867
+ details: {
1868
+ tableName
1869
+ }
1870
+ },
1871
+ error$1
1872
+ );
1873
+ }
1874
+ }
1875
+ /**
1876
+ * Update multiple records in a single batch transaction
1877
+ */
1878
+ async batchUpdate({
1879
+ tableName,
1880
+ updates
1881
+ }) {
1882
+ const transaction = this.pool.transaction();
1883
+ try {
1884
+ await transaction.begin();
1885
+ for (const { keys, data } of updates) {
1886
+ await this.update({ tableName, keys, data, transaction });
1887
+ }
1888
+ await transaction.commit();
1889
+ } catch (error$1) {
1890
+ await transaction.rollback();
1891
+ throw new error.MastraError(
1892
+ {
1893
+ id: storage.createStorageErrorId("MSSQL", "BATCH_UPDATE", "FAILED"),
1894
+ domain: error.ErrorDomain.STORAGE,
1895
+ category: error.ErrorCategory.THIRD_PARTY,
1896
+ details: {
1897
+ tableName,
1898
+ numberOfRecords: updates.length
1899
+ }
1900
+ },
1901
+ error$1
1902
+ );
1903
+ }
1904
+ }
1905
+ /**
1906
+ * Delete multiple records by keys
1907
+ */
1908
+ async batchDelete({ tableName, keys }) {
1909
+ if (keys.length === 0) {
1910
+ return;
1911
+ }
1912
+ const tableName_ = getTableName({
1913
+ indexName: tableName,
1914
+ schemaName: getSchemaName(this.schemaName)
1915
+ });
1916
+ const transaction = this.pool.transaction();
1917
+ try {
1918
+ await transaction.begin();
1919
+ for (const keySet of keys) {
1920
+ const conditions = [];
1921
+ const request = transaction.request();
1922
+ let paramIndex = 0;
1923
+ Object.entries(keySet).forEach(([key, value]) => {
1924
+ const parsedKey = utils.parseSqlIdentifier(key, "column name");
1925
+ const paramName = `p${paramIndex++}`;
1926
+ conditions.push(`[${parsedKey}] = @${paramName}`);
1927
+ const preparedValue = this.prepareValue(value, key, tableName);
1928
+ if (preparedValue === null || preparedValue === void 0) {
1929
+ request.input(paramName, this.getMssqlType(tableName, key), null);
1930
+ } else {
1931
+ request.input(paramName, preparedValue);
1932
+ }
1933
+ });
1934
+ const deleteSql = `DELETE FROM ${tableName_} WHERE ${conditions.join(" AND ")}`;
1935
+ await request.query(deleteSql);
1936
+ }
1937
+ await transaction.commit();
1938
+ } catch (error$1) {
1939
+ await transaction.rollback();
1940
+ throw new error.MastraError(
1941
+ {
1942
+ id: storage.createStorageErrorId("MSSQL", "BATCH_DELETE", "FAILED"),
1943
+ domain: error.ErrorDomain.STORAGE,
1944
+ category: error.ErrorCategory.THIRD_PARTY,
1945
+ details: {
1946
+ tableName,
1947
+ numberOfRecords: keys.length
1948
+ }
1949
+ },
1950
+ error$1
1951
+ );
1952
+ }
1953
+ }
1954
+ /**
1955
+ * Create a new index on a table
1956
+ */
1957
+ async createIndex(options) {
1958
+ try {
1959
+ const { name, table, columns, unique = false, where } = options;
1960
+ const schemaName = this.schemaName || "dbo";
1961
+ const fullTableName = getTableName({
1962
+ indexName: table,
1963
+ schemaName: getSchemaName(this.schemaName)
1964
+ });
1965
+ const indexNameSafe = utils.parseSqlIdentifier(name, "index name");
1966
+ const checkRequest = this.pool.request();
1967
+ checkRequest.input("indexName", indexNameSafe);
1968
+ checkRequest.input("schemaName", schemaName);
1969
+ checkRequest.input("tableName", table);
1970
+ const indexExists = await checkRequest.query(`
1971
+ SELECT 1 as found
1972
+ FROM sys.indexes i
1973
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
1974
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
1975
+ WHERE i.name = @indexName
1976
+ AND s.name = @schemaName
1977
+ AND t.name = @tableName
1978
+ `);
1979
+ if (indexExists.recordset && indexExists.recordset.length > 0) {
1980
+ return;
1981
+ }
1982
+ const uniqueStr = unique ? "UNIQUE " : "";
1983
+ const columnsStr = columns.map((col) => {
1984
+ if (col.includes(" DESC") || col.includes(" ASC")) {
1985
+ const [colName, ...modifiers] = col.split(" ");
1986
+ if (!colName) {
1987
+ throw new Error(`Invalid column specification: ${col}`);
1988
+ }
1989
+ return `[${utils.parseSqlIdentifier(colName, "column name")}] ${modifiers.join(" ")}`;
1990
+ }
1991
+ return `[${utils.parseSqlIdentifier(col, "column name")}]`;
1992
+ }).join(", ");
1993
+ const whereStr = where ? ` WHERE ${where}` : "";
1994
+ const createIndexSql = `CREATE ${uniqueStr}INDEX [${indexNameSafe}] ON ${fullTableName} (${columnsStr})${whereStr}`;
1995
+ await this.pool.request().query(createIndexSql);
1996
+ } catch (error$1) {
1997
+ throw new error.MastraError(
1998
+ {
1999
+ id: storage.createStorageErrorId("MSSQL", "INDEX_CREATE", "FAILED"),
2000
+ domain: error.ErrorDomain.STORAGE,
2001
+ category: error.ErrorCategory.THIRD_PARTY,
2002
+ details: {
2003
+ indexName: options.name,
2004
+ tableName: options.table
2005
+ }
2006
+ },
2007
+ error$1
2008
+ );
2009
+ }
2010
+ }
2011
+ /**
2012
+ * Drop an existing index
2013
+ */
2014
+ async dropIndex(indexName) {
2015
+ try {
2016
+ const schemaName = this.schemaName || "dbo";
2017
+ const indexNameSafe = utils.parseSqlIdentifier(indexName, "index name");
2018
+ const checkRequest = this.pool.request();
2019
+ checkRequest.input("indexName", indexNameSafe);
2020
+ checkRequest.input("schemaName", schemaName);
2021
+ const result = await checkRequest.query(`
2022
+ SELECT t.name as table_name
2023
+ FROM sys.indexes i
2024
+ INNER JOIN sys.tables t ON i.object_id = t.object_id
2025
+ INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
2026
+ WHERE i.name = @indexName
2027
+ AND s.name = @schemaName
2028
+ `);
2029
+ if (!result.recordset || result.recordset.length === 0) {
2030
+ return;
2031
+ }
2032
+ if (result.recordset.length > 1) {
2033
+ const tables = result.recordset.map((r) => r.table_name).join(", ");
2034
+ throw new error.MastraError({
2035
+ id: storage.createStorageErrorId("MSSQL", "INDEX", "AMBIGUOUS"),
2036
+ domain: error.ErrorDomain.STORAGE,
2037
+ category: error.ErrorCategory.USER,
2038
+ text: `Index "${indexNameSafe}" exists on multiple tables (${tables}) in schema "${schemaName}". Please drop indexes manually or ensure unique index names.`
2039
+ });
2040
+ }
2041
+ const tableName = result.recordset[0].table_name;
2042
+ const fullTableName = getTableName({
2043
+ indexName: tableName,
2044
+ schemaName: getSchemaName(this.schemaName)
2045
+ });
2046
+ const dropSql = `DROP INDEX [${indexNameSafe}] ON ${fullTableName}`;
2047
+ await this.pool.request().query(dropSql);
2048
+ } catch (error$1) {
2049
+ throw new error.MastraError(
2050
+ {
2051
+ id: storage.createStorageErrorId("MSSQL", "INDEX_DROP", "FAILED"),
2052
+ domain: error.ErrorDomain.STORAGE,
2053
+ category: error.ErrorCategory.THIRD_PARTY,
2054
+ details: {
2055
+ indexName
2056
+ }
2057
+ },
2058
+ error$1
2059
+ );
2060
+ }
2061
+ }
2062
+ /**
2063
+ * List indexes for a specific table or all tables
2064
+ */
2065
+ async listIndexes(tableName) {
2066
+ try {
2067
+ const schemaName = this.schemaName || "dbo";
2068
+ let query;
2069
+ const request = this.pool.request();
2070
+ request.input("schemaName", schemaName);
2071
+ if (tableName) {
2072
+ query = `
2073
+ SELECT
2074
+ i.name as name,
2075
+ o.name as [table],
2076
+ i.is_unique as is_unique,
2077
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2078
+ FROM sys.indexes i
2079
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2080
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2081
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2082
+ WHERE sch.name = @schemaName
2083
+ AND o.name = @tableName
2084
+ AND i.name IS NOT NULL
2085
+ GROUP BY i.name, o.name, i.is_unique
2086
+ `;
2087
+ request.input("tableName", tableName);
2088
+ } else {
2089
+ query = `
2090
+ SELECT
2091
+ i.name as name,
2092
+ o.name as [table],
2093
+ i.is_unique as is_unique,
2094
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size
2095
+ FROM sys.indexes i
2096
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2097
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2098
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2099
+ WHERE sch.name = @schemaName
2100
+ AND i.name IS NOT NULL
2101
+ GROUP BY i.name, o.name, i.is_unique
2102
+ `;
2103
+ }
2104
+ const result = await request.query(query);
2105
+ const indexes = [];
2106
+ for (const row of result.recordset) {
2107
+ const colRequest = this.pool.request();
2108
+ colRequest.input("indexName", row.name);
2109
+ colRequest.input("schemaName", schemaName);
2110
+ const colResult = await colRequest.query(`
2111
+ SELECT c.name as column_name
2112
+ FROM sys.indexes i
2113
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2114
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2115
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2116
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2117
+ WHERE i.name = @indexName
2118
+ AND s.name = @schemaName
2119
+ ORDER BY ic.key_ordinal
2120
+ `);
2121
+ indexes.push({
2122
+ name: row.name,
2123
+ table: row.table,
2124
+ columns: colResult.recordset.map((c) => c.column_name),
2125
+ unique: row.is_unique || false,
2126
+ size: row.size || "0 MB",
2127
+ definition: ""
2128
+ // MSSQL doesn't store definition like PG
2129
+ });
2130
+ }
2131
+ return indexes;
2132
+ } catch (error$1) {
2133
+ throw new error.MastraError(
2134
+ {
2135
+ id: storage.createStorageErrorId("MSSQL", "INDEX_LIST", "FAILED"),
2136
+ domain: error.ErrorDomain.STORAGE,
2137
+ category: error.ErrorCategory.THIRD_PARTY,
2138
+ details: tableName ? {
2139
+ tableName
2140
+ } : {}
2141
+ },
2142
+ error$1
2143
+ );
2144
+ }
2145
+ }
2146
+ /**
2147
+ * Get detailed statistics for a specific index
2148
+ */
2149
+ async describeIndex(indexName) {
2150
+ try {
2151
+ const schemaName = this.schemaName || "dbo";
2152
+ const request = this.pool.request();
2153
+ request.input("indexName", indexName);
2154
+ request.input("schemaName", schemaName);
2155
+ const query = `
2156
+ SELECT
2157
+ i.name as name,
2158
+ o.name as [table],
2159
+ i.is_unique as is_unique,
2160
+ CAST(SUM(s.used_page_count) * 8 / 1024.0 AS VARCHAR(50)) + ' MB' as size,
2161
+ i.type_desc as method,
2162
+ ISNULL(us.user_scans, 0) as scans,
2163
+ ISNULL(us.user_seeks + us.user_scans, 0) as tuples_read,
2164
+ ISNULL(us.user_lookups, 0) as tuples_fetched
2165
+ FROM sys.indexes i
2166
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2167
+ INNER JOIN sys.schemas sch ON o.schema_id = sch.schema_id
2168
+ LEFT JOIN sys.dm_db_partition_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id
2169
+ LEFT JOIN sys.dm_db_index_usage_stats us ON i.object_id = us.object_id AND i.index_id = us.index_id
2170
+ WHERE i.name = @indexName
2171
+ AND sch.name = @schemaName
2172
+ GROUP BY i.name, o.name, i.is_unique, i.type_desc, us.user_seeks, us.user_scans, us.user_lookups
2173
+ `;
2174
+ const result = await request.query(query);
2175
+ if (!result.recordset || result.recordset.length === 0) {
2176
+ throw new Error(`Index "${indexName}" not found in schema "${schemaName}"`);
2177
+ }
2178
+ const row = result.recordset[0];
2179
+ const colRequest = this.pool.request();
2180
+ colRequest.input("indexName", indexName);
2181
+ colRequest.input("schemaName", schemaName);
2182
+ const colResult = await colRequest.query(`
2183
+ SELECT c.name as column_name
2184
+ FROM sys.indexes i
2185
+ INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
2186
+ INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
2187
+ INNER JOIN sys.objects o ON i.object_id = o.object_id
2188
+ INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
2189
+ WHERE i.name = @indexName
2190
+ AND s.name = @schemaName
2191
+ ORDER BY ic.key_ordinal
2192
+ `);
2193
+ return {
2194
+ name: row.name,
2195
+ table: row.table,
2196
+ columns: colResult.recordset.map((c) => c.column_name),
2197
+ unique: row.is_unique || false,
2198
+ size: row.size || "0 MB",
2199
+ definition: "",
2200
+ method: row.method?.toLowerCase() || "nonclustered",
2201
+ scans: Number(row.scans) || 0,
2202
+ tuples_read: Number(row.tuples_read) || 0,
2203
+ tuples_fetched: Number(row.tuples_fetched) || 0
2204
+ };
2205
+ } catch (error$1) {
2206
+ throw new error.MastraError(
2207
+ {
2208
+ id: storage.createStorageErrorId("MSSQL", "INDEX_DESCRIBE", "FAILED"),
2209
+ domain: error.ErrorDomain.STORAGE,
2210
+ category: error.ErrorCategory.THIRD_PARTY,
2211
+ details: {
2212
+ indexName
2213
+ }
2214
+ },
2215
+ error$1
2216
+ );
2217
+ }
2218
+ }
2219
+ /**
2220
+ * Returns definitions for automatic performance indexes
2221
+ * IMPORTANT: Uses seq_id DESC instead of createdAt DESC for MSSQL due to millisecond accuracy limitations
2222
+ * NOTE: Using NVARCHAR(400) for text columns (800 bytes) leaves room for composite indexes
2223
+ */
2224
+ getAutomaticIndexDefinitions() {
2225
+ const schemaPrefix = this.schemaName ? `${this.schemaName}_` : "";
2226
+ return [
2227
+ // Composite indexes for optimal filtering + sorting performance
2228
+ // NVARCHAR(400) = 800 bytes, plus BIGINT (8 bytes) = 808 bytes total (under 900-byte limit)
2229
+ {
2230
+ name: `${schemaPrefix}mastra_threads_resourceid_seqid_idx`,
2231
+ table: storage.TABLE_THREADS,
2232
+ columns: ["resourceId", "seq_id DESC"]
2233
+ },
2234
+ {
2235
+ name: `${schemaPrefix}mastra_messages_thread_id_seqid_idx`,
2236
+ table: storage.TABLE_MESSAGES,
2237
+ columns: ["thread_id", "seq_id DESC"]
2238
+ },
2239
+ {
2240
+ name: `${schemaPrefix}mastra_traces_name_seqid_idx`,
2241
+ table: storage.TABLE_TRACES,
2242
+ columns: ["name", "seq_id DESC"]
2243
+ },
2244
+ {
2245
+ name: `${schemaPrefix}mastra_scores_trace_id_span_id_seqid_idx`,
2246
+ table: storage.TABLE_SCORERS,
2247
+ columns: ["traceId", "spanId", "seq_id DESC"]
2248
+ },
2249
+ // Spans indexes for optimal trace querying
2250
+ {
2251
+ name: `${schemaPrefix}mastra_ai_spans_traceid_startedat_idx`,
2252
+ table: storage.TABLE_SPANS,
2253
+ columns: ["traceId", "startedAt DESC"]
2254
+ },
2255
+ {
2256
+ name: `${schemaPrefix}mastra_ai_spans_parentspanid_startedat_idx`,
2257
+ table: storage.TABLE_SPANS,
2258
+ columns: ["parentSpanId", "startedAt DESC"]
2259
+ },
2260
+ {
2261
+ name: `${schemaPrefix}mastra_ai_spans_name_idx`,
2262
+ table: storage.TABLE_SPANS,
2263
+ columns: ["name"]
2264
+ },
2265
+ {
2266
+ name: `${schemaPrefix}mastra_ai_spans_spantype_startedat_idx`,
2267
+ table: storage.TABLE_SPANS,
2268
+ columns: ["spanType", "startedAt DESC"]
2269
+ }
2270
+ ];
2271
+ }
2272
+ /**
2273
+ * Creates automatic indexes for optimal query performance
2274
+ * Uses getAutomaticIndexDefinitions() to determine which indexes to create
2275
+ */
2276
+ async createAutomaticIndexes() {
2277
+ try {
2278
+ const indexes = this.getAutomaticIndexDefinitions();
2279
+ for (const indexOptions of indexes) {
2280
+ try {
2281
+ await this.createIndex(indexOptions);
2282
+ } catch (error) {
2283
+ this.logger?.warn?.(`Failed to create index ${indexOptions.name}:`, error);
2284
+ }
2285
+ }
2286
+ } catch (error$1) {
2287
+ throw new error.MastraError(
2288
+ {
2289
+ id: storage.createStorageErrorId("MSSQL", "CREATE_PERFORMANCE_INDEXES", "FAILED"),
2290
+ domain: error.ErrorDomain.STORAGE,
2291
+ category: error.ErrorCategory.THIRD_PARTY
2292
+ },
2293
+ error$1
2294
+ );
2295
+ }
2296
+ }
2297
+ };
2298
+ function transformScoreRow(row) {
2299
+ return storage.transformScoreRow(row, {
2300
+ convertTimestamps: true
2301
+ });
2302
+ }
2303
+ var ScoresMSSQL = class extends storage.ScoresStorage {
2304
+ pool;
2305
+ operations;
2306
+ schema;
2307
+ constructor({
2308
+ pool,
2309
+ operations,
2310
+ schema
2311
+ }) {
2312
+ super();
2313
+ this.pool = pool;
2314
+ this.operations = operations;
2315
+ this.schema = schema;
2316
+ }
2317
+ async getScoreById({ id }) {
2318
+ try {
2319
+ const request = this.pool.request();
2320
+ request.input("p1", id);
2321
+ const result = await request.query(
2322
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE id = @p1`
2323
+ );
2324
+ if (result.recordset.length === 0) {
2325
+ return null;
2326
+ }
2327
+ return transformScoreRow(result.recordset[0]);
2328
+ } catch (error$1) {
2329
+ throw new error.MastraError(
2330
+ {
2331
+ id: storage.createStorageErrorId("MSSQL", "GET_SCORE_BY_ID", "FAILED"),
2332
+ domain: error.ErrorDomain.STORAGE,
2333
+ category: error.ErrorCategory.THIRD_PARTY,
2334
+ details: { id }
2335
+ },
2336
+ error$1
2337
+ );
2338
+ }
2339
+ }
2340
+ async saveScore(score) {
2341
+ let validatedScore;
2342
+ try {
2343
+ validatedScore = evals.saveScorePayloadSchema.parse(score);
2344
+ } catch (error$1) {
2345
+ throw new error.MastraError(
2346
+ {
2347
+ id: storage.createStorageErrorId("MSSQL", "SAVE_SCORE", "VALIDATION_FAILED"),
2348
+ domain: error.ErrorDomain.STORAGE,
2349
+ category: error.ErrorCategory.USER,
2350
+ details: {
2351
+ scorer: score.scorer?.id ?? "unknown",
2352
+ entityId: score.entityId ?? "unknown",
2353
+ entityType: score.entityType ?? "unknown",
2354
+ traceId: score.traceId ?? "",
2355
+ spanId: score.spanId ?? ""
2356
+ }
2357
+ },
2358
+ error$1
2359
+ );
2360
+ }
2361
+ try {
2362
+ const scoreId = crypto.randomUUID();
2363
+ const now = /* @__PURE__ */ new Date();
2364
+ const {
2365
+ scorer,
2366
+ preprocessStepResult,
2367
+ analyzeStepResult,
2368
+ metadata,
2369
+ input,
2370
+ output,
2371
+ additionalContext,
2372
+ requestContext,
2373
+ entity,
2374
+ ...rest
2375
+ } = validatedScore;
2376
+ await this.operations.insert({
2377
+ tableName: storage.TABLE_SCORERS,
2378
+ record: {
2379
+ id: scoreId,
2380
+ ...rest,
2381
+ input: input || "",
2382
+ output: output || "",
2383
+ preprocessStepResult: preprocessStepResult || null,
2384
+ analyzeStepResult: analyzeStepResult || null,
2385
+ metadata: metadata || null,
2386
+ additionalContext: additionalContext || null,
2387
+ requestContext: requestContext || null,
2388
+ entity: entity || null,
2389
+ scorer: scorer || null,
2390
+ createdAt: now.toISOString(),
2391
+ updatedAt: now.toISOString()
2392
+ }
1136
2393
  });
2394
+ return { score: { ...validatedScore, id: scoreId, createdAt: now, updatedAt: now } };
2395
+ } catch (error$1) {
2396
+ throw new error.MastraError(
2397
+ {
2398
+ id: storage.createStorageErrorId("MSSQL", "SAVE_SCORE", "FAILED"),
2399
+ domain: error.ErrorDomain.STORAGE,
2400
+ category: error.ErrorCategory.THIRD_PARTY
2401
+ },
2402
+ error$1
2403
+ );
1137
2404
  }
1138
- const thread = await this.getThreadById({ threadId });
1139
- if (!thread) {
1140
- throw new error.MastraError({
1141
- id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_MESSAGES_FAILED",
1142
- domain: error.ErrorDomain.STORAGE,
1143
- category: error.ErrorCategory.THIRD_PARTY,
1144
- text: `Thread ${threadId} not found`,
1145
- details: { threadId }
2405
+ }
2406
+ async listScoresByScorerId({
2407
+ scorerId,
2408
+ pagination,
2409
+ entityId,
2410
+ entityType,
2411
+ source
2412
+ }) {
2413
+ try {
2414
+ const conditions = ["[scorerId] = @p1"];
2415
+ const params = { p1: scorerId };
2416
+ let paramIndex = 2;
2417
+ if (entityId) {
2418
+ conditions.push(`[entityId] = @p${paramIndex}`);
2419
+ params[`p${paramIndex}`] = entityId;
2420
+ paramIndex++;
2421
+ }
2422
+ if (entityType) {
2423
+ conditions.push(`[entityType] = @p${paramIndex}`);
2424
+ params[`p${paramIndex}`] = entityType;
2425
+ paramIndex++;
2426
+ }
2427
+ if (source) {
2428
+ conditions.push(`[source] = @p${paramIndex}`);
2429
+ params[`p${paramIndex}`] = source;
2430
+ paramIndex++;
2431
+ }
2432
+ const whereClause = conditions.join(" AND ");
2433
+ const tableName = getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) });
2434
+ const countRequest = this.pool.request();
2435
+ Object.entries(params).forEach(([key, value]) => {
2436
+ countRequest.input(key, value);
2437
+ });
2438
+ const totalResult = await countRequest.query(`SELECT COUNT(*) as count FROM ${tableName} WHERE ${whereClause}`);
2439
+ const total = totalResult.recordset[0]?.count || 0;
2440
+ const { page, perPage: perPageInput } = pagination;
2441
+ if (total === 0) {
2442
+ return {
2443
+ pagination: {
2444
+ total: 0,
2445
+ page,
2446
+ perPage: perPageInput,
2447
+ hasMore: false
2448
+ },
2449
+ scores: []
2450
+ };
2451
+ }
2452
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2453
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2454
+ const limitValue = perPageInput === false ? total : perPage;
2455
+ const end = perPageInput === false ? total : start + perPage;
2456
+ const dataRequest = this.pool.request();
2457
+ Object.entries(params).forEach(([key, value]) => {
2458
+ dataRequest.input(key, value);
1146
2459
  });
2460
+ dataRequest.input("perPage", limitValue);
2461
+ dataRequest.input("offset", start);
2462
+ const dataQuery = `SELECT * FROM ${tableName} WHERE ${whereClause} ORDER BY [createdAt] DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
2463
+ const result = await dataRequest.query(dataQuery);
2464
+ return {
2465
+ pagination: {
2466
+ total: Number(total),
2467
+ page,
2468
+ perPage: perPageForResponse,
2469
+ hasMore: end < total
2470
+ },
2471
+ scores: result.recordset.map((row) => transformScoreRow(row))
2472
+ };
2473
+ } catch (error$1) {
2474
+ throw new error.MastraError(
2475
+ {
2476
+ id: storage.createStorageErrorId("MSSQL", "LIST_SCORES_BY_SCORER_ID", "FAILED"),
2477
+ domain: error.ErrorDomain.STORAGE,
2478
+ category: error.ErrorCategory.THIRD_PARTY,
2479
+ details: { scorerId }
2480
+ },
2481
+ error$1
2482
+ );
1147
2483
  }
1148
- const tableMessages = this.getTableName(storage.TABLE_MESSAGES);
1149
- const tableThreads = this.getTableName(storage.TABLE_THREADS);
2484
+ }
2485
+ async listScoresByRunId({
2486
+ runId,
2487
+ pagination
2488
+ }) {
2489
+ try {
2490
+ const request = this.pool.request();
2491
+ request.input("p1", runId);
2492
+ const totalResult = await request.query(
2493
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1`
2494
+ );
2495
+ const total = totalResult.recordset[0]?.count || 0;
2496
+ const { page, perPage: perPageInput } = pagination;
2497
+ if (total === 0) {
2498
+ return {
2499
+ pagination: {
2500
+ total: 0,
2501
+ page,
2502
+ perPage: perPageInput,
2503
+ hasMore: false
2504
+ },
2505
+ scores: []
2506
+ };
2507
+ }
2508
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2509
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2510
+ const limitValue = perPageInput === false ? total : perPage;
2511
+ const end = perPageInput === false ? total : start + perPage;
2512
+ const dataRequest = this.pool.request();
2513
+ dataRequest.input("p1", runId);
2514
+ dataRequest.input("p2", limitValue);
2515
+ dataRequest.input("p3", start);
2516
+ const result = await dataRequest.query(
2517
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [runId] = @p1 ORDER BY [createdAt] DESC OFFSET @p3 ROWS FETCH NEXT @p2 ROWS ONLY`
2518
+ );
2519
+ return {
2520
+ pagination: {
2521
+ total: Number(total),
2522
+ page,
2523
+ perPage: perPageForResponse,
2524
+ hasMore: end < total
2525
+ },
2526
+ scores: result.recordset.map((row) => transformScoreRow(row))
2527
+ };
2528
+ } catch (error$1) {
2529
+ throw new error.MastraError(
2530
+ {
2531
+ id: storage.createStorageErrorId("MSSQL", "LIST_SCORES_BY_RUN_ID", "FAILED"),
2532
+ domain: error.ErrorDomain.STORAGE,
2533
+ category: error.ErrorCategory.THIRD_PARTY,
2534
+ details: { runId }
2535
+ },
2536
+ error$1
2537
+ );
2538
+ }
2539
+ }
2540
+ async listScoresByEntityId({
2541
+ entityId,
2542
+ entityType,
2543
+ pagination
2544
+ }) {
2545
+ try {
2546
+ const request = this.pool.request();
2547
+ request.input("p1", entityId);
2548
+ request.input("p2", entityType);
2549
+ const totalResult = await request.query(
2550
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2`
2551
+ );
2552
+ const total = totalResult.recordset[0]?.count || 0;
2553
+ const { page, perPage: perPageInput } = pagination;
2554
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2555
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2556
+ if (total === 0) {
2557
+ return {
2558
+ pagination: {
2559
+ total: 0,
2560
+ page,
2561
+ perPage: perPageForResponse,
2562
+ hasMore: false
2563
+ },
2564
+ scores: []
2565
+ };
2566
+ }
2567
+ const limitValue = perPageInput === false ? total : perPage;
2568
+ const end = perPageInput === false ? total : start + perPage;
2569
+ const dataRequest = this.pool.request();
2570
+ dataRequest.input("p1", entityId);
2571
+ dataRequest.input("p2", entityType);
2572
+ dataRequest.input("p3", limitValue);
2573
+ dataRequest.input("p4", start);
2574
+ const result = await dataRequest.query(
2575
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [entityId] = @p1 AND [entityType] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2576
+ );
2577
+ return {
2578
+ pagination: {
2579
+ total: Number(total),
2580
+ page,
2581
+ perPage: perPageForResponse,
2582
+ hasMore: end < total
2583
+ },
2584
+ scores: result.recordset.map((row) => transformScoreRow(row))
2585
+ };
2586
+ } catch (error$1) {
2587
+ throw new error.MastraError(
2588
+ {
2589
+ id: storage.createStorageErrorId("MSSQL", "LIST_SCORES_BY_ENTITY_ID", "FAILED"),
2590
+ domain: error.ErrorDomain.STORAGE,
2591
+ category: error.ErrorCategory.THIRD_PARTY,
2592
+ details: { entityId, entityType }
2593
+ },
2594
+ error$1
2595
+ );
2596
+ }
2597
+ }
2598
+ async listScoresBySpan({
2599
+ traceId,
2600
+ spanId,
2601
+ pagination
2602
+ }) {
2603
+ try {
2604
+ const request = this.pool.request();
2605
+ request.input("p1", traceId);
2606
+ request.input("p2", spanId);
2607
+ const totalResult = await request.query(
2608
+ `SELECT COUNT(*) as count FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2`
2609
+ );
2610
+ const total = totalResult.recordset[0]?.count || 0;
2611
+ const { page, perPage: perPageInput } = pagination;
2612
+ const perPage = storage.normalizePerPage(perPageInput, 100);
2613
+ const { offset: start, perPage: perPageForResponse } = storage.calculatePagination(page, perPageInput, perPage);
2614
+ if (total === 0) {
2615
+ return {
2616
+ pagination: {
2617
+ total: 0,
2618
+ page,
2619
+ perPage: perPageForResponse,
2620
+ hasMore: false
2621
+ },
2622
+ scores: []
2623
+ };
2624
+ }
2625
+ const limitValue = perPageInput === false ? total : perPage;
2626
+ const end = perPageInput === false ? total : start + perPage;
2627
+ const dataRequest = this.pool.request();
2628
+ dataRequest.input("p1", traceId);
2629
+ dataRequest.input("p2", spanId);
2630
+ dataRequest.input("p3", limitValue);
2631
+ dataRequest.input("p4", start);
2632
+ const result = await dataRequest.query(
2633
+ `SELECT * FROM ${getTableName({ indexName: storage.TABLE_SCORERS, schemaName: getSchemaName(this.schema) })} WHERE [traceId] = @p1 AND [spanId] = @p2 ORDER BY [createdAt] DESC OFFSET @p4 ROWS FETCH NEXT @p3 ROWS ONLY`
2634
+ );
2635
+ return {
2636
+ pagination: {
2637
+ total: Number(total),
2638
+ page,
2639
+ perPage: perPageForResponse,
2640
+ hasMore: end < total
2641
+ },
2642
+ scores: result.recordset.map((row) => transformScoreRow(row))
2643
+ };
2644
+ } catch (error$1) {
2645
+ throw new error.MastraError(
2646
+ {
2647
+ id: storage.createStorageErrorId("MSSQL", "LIST_SCORES_BY_SPAN", "FAILED"),
2648
+ domain: error.ErrorDomain.STORAGE,
2649
+ category: error.ErrorCategory.THIRD_PARTY,
2650
+ details: { traceId, spanId }
2651
+ },
2652
+ error$1
2653
+ );
2654
+ }
2655
+ }
2656
+ };
2657
+ var WorkflowsMSSQL = class extends storage.WorkflowsStorage {
2658
+ pool;
2659
+ operations;
2660
+ schema;
2661
+ constructor({
2662
+ pool,
2663
+ operations,
2664
+ schema
2665
+ }) {
2666
+ super();
2667
+ this.pool = pool;
2668
+ this.operations = operations;
2669
+ this.schema = schema;
2670
+ }
2671
+ parseWorkflowRun(row) {
2672
+ let parsedSnapshot = row.snapshot;
2673
+ if (typeof parsedSnapshot === "string") {
2674
+ try {
2675
+ parsedSnapshot = JSON.parse(row.snapshot);
2676
+ } catch (e) {
2677
+ this.logger?.warn?.(`Failed to parse snapshot for workflow ${row.workflow_name}:`, e);
2678
+ }
2679
+ }
2680
+ return {
2681
+ workflowName: row.workflow_name,
2682
+ runId: row.run_id,
2683
+ snapshot: parsedSnapshot,
2684
+ createdAt: row.createdAt,
2685
+ updatedAt: row.updatedAt,
2686
+ resourceId: row.resourceId
2687
+ };
2688
+ }
2689
+ async updateWorkflowResults({
2690
+ workflowName,
2691
+ runId,
2692
+ stepId,
2693
+ result,
2694
+ requestContext
2695
+ }) {
2696
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2697
+ const transaction = this.pool.transaction();
1150
2698
  try {
1151
- const transaction = this.pool.transaction();
1152
2699
  await transaction.begin();
2700
+ const selectRequest = new sql2__default.default.Request(transaction);
2701
+ selectRequest.input("workflow_name", workflowName);
2702
+ selectRequest.input("run_id", runId);
2703
+ const existingSnapshotResult = await selectRequest.query(
2704
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2705
+ );
2706
+ let snapshot;
2707
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2708
+ snapshot = {
2709
+ context: {},
2710
+ activePaths: [],
2711
+ activeStepsPath: {},
2712
+ timestamp: Date.now(),
2713
+ suspendedPaths: {},
2714
+ resumeLabels: {},
2715
+ serializedStepGraph: [],
2716
+ status: "pending",
2717
+ value: {},
2718
+ waitingPaths: {},
2719
+ runId,
2720
+ requestContext: {}
2721
+ };
2722
+ } else {
2723
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2724
+ snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2725
+ }
2726
+ snapshot.context[stepId] = result;
2727
+ snapshot.requestContext = { ...snapshot.requestContext, ...requestContext };
2728
+ const upsertReq = new sql2__default.default.Request(transaction);
2729
+ upsertReq.input("workflow_name", workflowName);
2730
+ upsertReq.input("run_id", runId);
2731
+ upsertReq.input("snapshot", JSON.stringify(snapshot));
2732
+ upsertReq.input("createdAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2733
+ upsertReq.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2734
+ await upsertReq.query(
2735
+ `MERGE ${table} AS target
2736
+ USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
2737
+ ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
2738
+ WHEN MATCHED THEN UPDATE SET snapshot = @snapshot, [updatedAt] = @updatedAt
2739
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
2740
+ VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`
2741
+ );
2742
+ await transaction.commit();
2743
+ return snapshot.context;
2744
+ } catch (error$1) {
1153
2745
  try {
1154
- for (const message of messages) {
1155
- if (!message.threadId) {
1156
- throw new Error(
1157
- `Expected to find a threadId for message, but couldn't find one. An unexpected error has occurred.`
1158
- );
1159
- }
1160
- if (!message.resourceId) {
1161
- throw new Error(
1162
- `Expected to find a resourceId for message, but couldn't find one. An unexpected error has occurred.`
1163
- );
1164
- }
1165
- const request = transaction.request();
1166
- request.input("id", message.id);
1167
- request.input("thread_id", message.threadId);
1168
- request.input(
1169
- "content",
1170
- typeof message.content === "string" ? message.content : JSON.stringify(message.content)
1171
- );
1172
- request.input("createdAt", message.createdAt.toISOString() || (/* @__PURE__ */ new Date()).toISOString());
1173
- request.input("role", message.role);
1174
- request.input("type", message.type || "v2");
1175
- request.input("resourceId", message.resourceId);
1176
- const mergeSql = `MERGE INTO ${tableMessages} AS target
1177
- USING (SELECT @id AS id) AS src
1178
- ON target.id = src.id
1179
- WHEN MATCHED THEN UPDATE SET
1180
- thread_id = @thread_id,
1181
- content = @content,
1182
- [createdAt] = @createdAt,
1183
- role = @role,
1184
- type = @type,
1185
- resourceId = @resourceId
1186
- WHEN NOT MATCHED THEN INSERT (id, thread_id, content, [createdAt], role, type, resourceId)
1187
- VALUES (@id, @thread_id, @content, @createdAt, @role, @type, @resourceId);`;
1188
- await request.query(mergeSql);
1189
- }
1190
- const threadReq = transaction.request();
1191
- threadReq.input("updatedAt", (/* @__PURE__ */ new Date()).toISOString());
1192
- threadReq.input("id", threadId);
1193
- await threadReq.query(`UPDATE ${tableThreads} SET [updatedAt] = @updatedAt WHERE id = @id`);
1194
- await transaction.commit();
1195
- } catch (error) {
1196
2746
  await transaction.rollback();
1197
- throw error;
2747
+ } catch {
1198
2748
  }
1199
- const messagesWithParsedContent = messages.map((message) => {
1200
- if (typeof message.content === "string") {
1201
- try {
1202
- return { ...message, content: JSON.parse(message.content) };
1203
- } catch {
1204
- return message;
2749
+ throw new error.MastraError(
2750
+ {
2751
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_WORKFLOW_RESULTS", "FAILED"),
2752
+ domain: error.ErrorDomain.STORAGE,
2753
+ category: error.ErrorCategory.THIRD_PARTY,
2754
+ details: {
2755
+ workflowName,
2756
+ runId,
2757
+ stepId
1205
2758
  }
1206
- }
1207
- return message;
1208
- });
1209
- const list = new agent.MessageList().add(messagesWithParsedContent, "memory");
1210
- if (format === "v2") return list.get.all.v2();
1211
- return list.get.all.v1();
2759
+ },
2760
+ error$1
2761
+ );
2762
+ }
2763
+ }
2764
+ async updateWorkflowState({
2765
+ workflowName,
2766
+ runId,
2767
+ opts
2768
+ }) {
2769
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2770
+ const transaction = this.pool.transaction();
2771
+ try {
2772
+ await transaction.begin();
2773
+ const selectRequest = new sql2__default.default.Request(transaction);
2774
+ selectRequest.input("workflow_name", workflowName);
2775
+ selectRequest.input("run_id", runId);
2776
+ const existingSnapshotResult = await selectRequest.query(
2777
+ `SELECT snapshot FROM ${table} WITH (UPDLOCK, HOLDLOCK) WHERE workflow_name = @workflow_name AND run_id = @run_id`
2778
+ );
2779
+ if (!existingSnapshotResult.recordset || existingSnapshotResult.recordset.length === 0) {
2780
+ await transaction.rollback();
2781
+ return void 0;
2782
+ }
2783
+ const existingSnapshot = existingSnapshotResult.recordset[0].snapshot;
2784
+ const snapshot = typeof existingSnapshot === "string" ? JSON.parse(existingSnapshot) : existingSnapshot;
2785
+ if (!snapshot || !snapshot?.context) {
2786
+ await transaction.rollback();
2787
+ throw new error.MastraError(
2788
+ {
2789
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_WORKFLOW_STATE", "SNAPSHOT_NOT_FOUND"),
2790
+ domain: error.ErrorDomain.STORAGE,
2791
+ category: error.ErrorCategory.SYSTEM,
2792
+ details: {
2793
+ workflowName,
2794
+ runId
2795
+ }
2796
+ },
2797
+ new Error(`Snapshot not found for runId ${runId}`)
2798
+ );
2799
+ }
2800
+ const updatedSnapshot = { ...snapshot, ...opts };
2801
+ const updateRequest = new sql2__default.default.Request(transaction);
2802
+ updateRequest.input("snapshot", JSON.stringify(updatedSnapshot));
2803
+ updateRequest.input("workflow_name", workflowName);
2804
+ updateRequest.input("run_id", runId);
2805
+ updateRequest.input("updatedAt", sql2__default.default.DateTime2, /* @__PURE__ */ new Date());
2806
+ await updateRequest.query(
2807
+ `UPDATE ${table} SET snapshot = @snapshot, [updatedAt] = @updatedAt WHERE workflow_name = @workflow_name AND run_id = @run_id`
2808
+ );
2809
+ await transaction.commit();
2810
+ return updatedSnapshot;
1212
2811
  } catch (error$1) {
2812
+ try {
2813
+ await transaction.rollback();
2814
+ } catch {
2815
+ }
2816
+ if (error$1 instanceof error.MastraError) throw error$1;
1213
2817
  throw new error.MastraError(
1214
2818
  {
1215
- id: "MASTRA_STORAGE_MSSQL_STORE_SAVE_MESSAGES_FAILED",
2819
+ id: storage.createStorageErrorId("MSSQL", "UPDATE_WORKFLOW_STATE", "FAILED"),
1216
2820
  domain: error.ErrorDomain.STORAGE,
1217
2821
  category: error.ErrorCategory.THIRD_PARTY,
1218
- details: { threadId }
2822
+ details: {
2823
+ workflowName,
2824
+ runId
2825
+ }
1219
2826
  },
1220
2827
  error$1
1221
2828
  );
@@ -1224,30 +2831,33 @@ ${columns}
1224
2831
  async persistWorkflowSnapshot({
1225
2832
  workflowName,
1226
2833
  runId,
2834
+ resourceId,
1227
2835
  snapshot
1228
2836
  }) {
1229
- const table = this.getTableName(storage.TABLE_WORKFLOW_SNAPSHOT);
2837
+ const table = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
1230
2838
  const now = (/* @__PURE__ */ new Date()).toISOString();
1231
2839
  try {
1232
2840
  const request = this.pool.request();
1233
2841
  request.input("workflow_name", workflowName);
1234
2842
  request.input("run_id", runId);
2843
+ request.input("resourceId", resourceId);
1235
2844
  request.input("snapshot", JSON.stringify(snapshot));
1236
- request.input("createdAt", now);
1237
- request.input("updatedAt", now);
2845
+ request.input("createdAt", sql2__default.default.DateTime2, new Date(now));
2846
+ request.input("updatedAt", sql2__default.default.DateTime2, new Date(now));
1238
2847
  const mergeSql = `MERGE INTO ${table} AS target
1239
2848
  USING (SELECT @workflow_name AS workflow_name, @run_id AS run_id) AS src
1240
2849
  ON target.workflow_name = src.workflow_name AND target.run_id = src.run_id
1241
2850
  WHEN MATCHED THEN UPDATE SET
2851
+ resourceId = @resourceId,
1242
2852
  snapshot = @snapshot,
1243
2853
  [updatedAt] = @updatedAt
1244
- WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, snapshot, [createdAt], [updatedAt])
1245
- VALUES (@workflow_name, @run_id, @snapshot, @createdAt, @updatedAt);`;
2854
+ WHEN NOT MATCHED THEN INSERT (workflow_name, run_id, resourceId, snapshot, [createdAt], [updatedAt])
2855
+ VALUES (@workflow_name, @run_id, @resourceId, @snapshot, @createdAt, @updatedAt);`;
1246
2856
  await request.query(mergeSql);
1247
2857
  } catch (error$1) {
1248
2858
  throw new error.MastraError(
1249
2859
  {
1250
- id: "MASTRA_STORAGE_MSSQL_STORE_PERSIST_WORKFLOW_SNAPSHOT_FAILED",
2860
+ id: storage.createStorageErrorId("MSSQL", "PERSIST_WORKFLOW_SNAPSHOT", "FAILED"),
1251
2861
  domain: error.ErrorDomain.STORAGE,
1252
2862
  category: error.ErrorCategory.THIRD_PARTY,
1253
2863
  details: {
@@ -1264,7 +2874,7 @@ ${columns}
1264
2874
  runId
1265
2875
  }) {
1266
2876
  try {
1267
- const result = await this.load({
2877
+ const result = await this.operations.load({
1268
2878
  tableName: storage.TABLE_WORKFLOW_SNAPSHOT,
1269
2879
  keys: {
1270
2880
  workflow_name: workflowName,
@@ -1278,7 +2888,7 @@ ${columns}
1278
2888
  } catch (error$1) {
1279
2889
  throw new error.MastraError(
1280
2890
  {
1281
- id: "MASTRA_STORAGE_MSSQL_STORE_LOAD_WORKFLOW_SNAPSHOT_FAILED",
2891
+ id: storage.createStorageErrorId("MSSQL", "LOAD_WORKFLOW_SNAPSHOT", "FAILED"),
1282
2892
  domain: error.ErrorDomain.STORAGE,
1283
2893
  category: error.ErrorCategory.THIRD_PARTY,
1284
2894
  details: {
@@ -1290,107 +2900,6 @@ ${columns}
1290
2900
  );
1291
2901
  }
1292
2902
  }
1293
- async hasColumn(table, column) {
1294
- const schema = this.schema || "dbo";
1295
- const request = this.pool.request();
1296
- request.input("schema", schema);
1297
- request.input("table", table);
1298
- request.input("column", column);
1299
- request.input("columnLower", column.toLowerCase());
1300
- const result = await request.query(
1301
- `SELECT 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = @schema AND TABLE_NAME = @table AND (COLUMN_NAME = @column OR COLUMN_NAME = @columnLower)`
1302
- );
1303
- return result.recordset.length > 0;
1304
- }
1305
- parseWorkflowRun(row) {
1306
- let parsedSnapshot = row.snapshot;
1307
- if (typeof parsedSnapshot === "string") {
1308
- try {
1309
- parsedSnapshot = JSON.parse(row.snapshot);
1310
- } catch (e) {
1311
- console.warn(`Failed to parse snapshot for workflow ${row.workflow_name}: ${e}`);
1312
- }
1313
- }
1314
- return {
1315
- workflowName: row.workflow_name,
1316
- runId: row.run_id,
1317
- snapshot: parsedSnapshot,
1318
- createdAt: row.createdAt,
1319
- updatedAt: row.updatedAt,
1320
- resourceId: row.resourceId
1321
- };
1322
- }
1323
- async getWorkflowRuns({
1324
- workflowName,
1325
- fromDate,
1326
- toDate,
1327
- limit,
1328
- offset,
1329
- resourceId
1330
- } = {}) {
1331
- try {
1332
- const conditions = [];
1333
- const paramMap = {};
1334
- if (workflowName) {
1335
- conditions.push(`[workflow_name] = @workflowName`);
1336
- paramMap["workflowName"] = workflowName;
1337
- }
1338
- if (resourceId) {
1339
- const hasResourceId = await this.hasColumn(storage.TABLE_WORKFLOW_SNAPSHOT, "resourceId");
1340
- if (hasResourceId) {
1341
- conditions.push(`[resourceId] = @resourceId`);
1342
- paramMap["resourceId"] = resourceId;
1343
- } else {
1344
- console.warn(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
1345
- }
1346
- }
1347
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1348
- conditions.push(`[createdAt] >= @fromDate`);
1349
- paramMap[`fromDate`] = fromDate.toISOString();
1350
- }
1351
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1352
- conditions.push(`[createdAt] <= @toDate`);
1353
- paramMap[`toDate`] = toDate.toISOString();
1354
- }
1355
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1356
- let total = 0;
1357
- const tableName = this.getTableName(storage.TABLE_WORKFLOW_SNAPSHOT);
1358
- const request = this.pool.request();
1359
- Object.entries(paramMap).forEach(([key, value]) => {
1360
- if (value instanceof Date) {
1361
- request.input(key, sql__default.default.DateTime, value);
1362
- } else {
1363
- request.input(key, value);
1364
- }
1365
- });
1366
- if (limit !== void 0 && offset !== void 0) {
1367
- const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
1368
- const countResult = await request.query(countQuery);
1369
- total = Number(countResult.recordset[0]?.count || 0);
1370
- }
1371
- let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
1372
- if (limit !== void 0 && offset !== void 0) {
1373
- query += ` OFFSET @offset ROWS FETCH NEXT @limit ROWS ONLY`;
1374
- request.input("limit", limit);
1375
- request.input("offset", offset);
1376
- }
1377
- const result = await request.query(query);
1378
- const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
1379
- return { runs, total: total || runs.length };
1380
- } catch (error$1) {
1381
- throw new error.MastraError(
1382
- {
1383
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUNS_FAILED",
1384
- domain: error.ErrorDomain.STORAGE,
1385
- category: error.ErrorCategory.THIRD_PARTY,
1386
- details: {
1387
- workflowName: workflowName || "all"
1388
- }
1389
- },
1390
- error$1
1391
- );
1392
- }
1393
- }
1394
2903
  async getWorkflowRunById({
1395
2904
  runId,
1396
2905
  workflowName
@@ -1407,7 +2916,7 @@ ${columns}
1407
2916
  paramMap["workflowName"] = workflowName;
1408
2917
  }
1409
2918
  const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
1410
- const tableName = this.getTableName(storage.TABLE_WORKFLOW_SNAPSHOT);
2919
+ const tableName = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
1411
2920
  const query = `SELECT * FROM ${tableName} ${whereClause}`;
1412
2921
  const request = this.pool.request();
1413
2922
  Object.entries(paramMap).forEach(([key, value]) => request.input(key, value));
@@ -1419,7 +2928,7 @@ ${columns}
1419
2928
  } catch (error$1) {
1420
2929
  throw new error.MastraError(
1421
2930
  {
1422
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_WORKFLOW_RUN_BY_ID_FAILED",
2931
+ id: storage.createStorageErrorId("MSSQL", "GET_WORKFLOW_RUN_BY_ID", "FAILED"),
1423
2932
  domain: error.ErrorDomain.STORAGE,
1424
2933
  category: error.ErrorCategory.THIRD_PARTY,
1425
2934
  details: {
@@ -1431,390 +2940,407 @@ ${columns}
1431
2940
  );
1432
2941
  }
1433
2942
  }
1434
- async updateMessages({
1435
- messages
1436
- }) {
1437
- if (!messages || messages.length === 0) {
1438
- return [];
1439
- }
1440
- const messageIds = messages.map((m) => m.id);
1441
- const idParams = messageIds.map((_, i) => `@id${i}`).join(", ");
1442
- let selectQuery = `SELECT id, content, role, type, createdAt, thread_id AS threadId, resourceId FROM ${this.getTableName(storage.TABLE_MESSAGES)}`;
1443
- if (idParams.length > 0) {
1444
- selectQuery += ` WHERE id IN (${idParams})`;
1445
- } else {
1446
- return [];
1447
- }
1448
- const selectReq = this.pool.request();
1449
- messageIds.forEach((id, i) => selectReq.input(`id${i}`, id));
1450
- const existingMessagesDb = (await selectReq.query(selectQuery)).recordset;
1451
- if (!existingMessagesDb || existingMessagesDb.length === 0) {
1452
- return [];
1453
- }
1454
- const existingMessages = existingMessagesDb.map((msg) => {
1455
- if (typeof msg.content === "string") {
1456
- try {
1457
- msg.content = JSON.parse(msg.content);
1458
- } catch {
1459
- }
1460
- }
1461
- return msg;
1462
- });
1463
- const threadIdsToUpdate = /* @__PURE__ */ new Set();
1464
- const transaction = this.pool.transaction();
2943
+ async listWorkflowRuns({
2944
+ workflowName,
2945
+ fromDate,
2946
+ toDate,
2947
+ page,
2948
+ perPage,
2949
+ resourceId,
2950
+ status
2951
+ } = {}) {
1465
2952
  try {
1466
- await transaction.begin();
1467
- for (const existingMessage of existingMessages) {
1468
- const updatePayload = messages.find((m) => m.id === existingMessage.id);
1469
- if (!updatePayload) continue;
1470
- const { id, ...fieldsToUpdate } = updatePayload;
1471
- if (Object.keys(fieldsToUpdate).length === 0) continue;
1472
- threadIdsToUpdate.add(existingMessage.threadId);
1473
- if (updatePayload.threadId && updatePayload.threadId !== existingMessage.threadId) {
1474
- threadIdsToUpdate.add(updatePayload.threadId);
1475
- }
1476
- const setClauses = [];
1477
- const req = transaction.request();
1478
- req.input("id", id);
1479
- const columnMapping = { threadId: "thread_id" };
1480
- const updatableFields = { ...fieldsToUpdate };
1481
- if (updatableFields.content) {
1482
- const newContent = {
1483
- ...existingMessage.content,
1484
- ...updatableFields.content,
1485
- ...existingMessage.content?.metadata && updatableFields.content.metadata ? { metadata: { ...existingMessage.content.metadata, ...updatableFields.content.metadata } } : {}
1486
- };
1487
- setClauses.push(`content = @content`);
1488
- req.input("content", JSON.stringify(newContent));
1489
- delete updatableFields.content;
1490
- }
1491
- for (const key in updatableFields) {
1492
- if (Object.prototype.hasOwnProperty.call(updatableFields, key)) {
1493
- const dbColumn = columnMapping[key] || key;
1494
- setClauses.push(`[${dbColumn}] = @${dbColumn}`);
1495
- req.input(dbColumn, updatableFields[key]);
1496
- }
1497
- }
1498
- if (setClauses.length > 0) {
1499
- const updateSql = `UPDATE ${this.getTableName(storage.TABLE_MESSAGES)} SET ${setClauses.join(", ")} WHERE id = @id`;
1500
- await req.query(updateSql);
1501
- }
2953
+ const conditions = [];
2954
+ const paramMap = {};
2955
+ if (workflowName) {
2956
+ conditions.push(`[workflow_name] = @workflowName`);
2957
+ paramMap["workflowName"] = workflowName;
1502
2958
  }
1503
- if (threadIdsToUpdate.size > 0) {
1504
- const threadIdParams = Array.from(threadIdsToUpdate).map((_, i) => `@tid${i}`).join(", ");
1505
- const threadReq = transaction.request();
1506
- Array.from(threadIdsToUpdate).forEach((tid, i) => threadReq.input(`tid${i}`, tid));
1507
- threadReq.input("updatedAt", (/* @__PURE__ */ new Date()).toISOString());
1508
- const threadSql = `UPDATE ${this.getTableName(storage.TABLE_THREADS)} SET updatedAt = @updatedAt WHERE id IN (${threadIdParams})`;
1509
- await threadReq.query(threadSql);
2959
+ if (status) {
2960
+ conditions.push(`JSON_VALUE([snapshot], '$.status') = @status`);
2961
+ paramMap["status"] = status;
1510
2962
  }
1511
- await transaction.commit();
1512
- } catch (error$1) {
1513
- await transaction.rollback();
1514
- throw new error.MastraError(
1515
- {
1516
- id: "MASTRA_STORAGE_MSSQL_UPDATE_MESSAGES_FAILED",
1517
- domain: error.ErrorDomain.STORAGE,
1518
- category: error.ErrorCategory.THIRD_PARTY
1519
- },
1520
- error$1
1521
- );
1522
- }
1523
- const refetchReq = this.pool.request();
1524
- messageIds.forEach((id, i) => refetchReq.input(`id${i}`, id));
1525
- const updatedMessages = (await refetchReq.query(selectQuery)).recordset;
1526
- return (updatedMessages || []).map((message) => {
1527
- if (typeof message.content === "string") {
1528
- try {
1529
- message.content = JSON.parse(message.content);
1530
- } catch {
2963
+ if (resourceId) {
2964
+ const hasResourceId = await this.operations.hasColumn(storage.TABLE_WORKFLOW_SNAPSHOT, "resourceId");
2965
+ if (hasResourceId) {
2966
+ conditions.push(`[resourceId] = @resourceId`);
2967
+ paramMap["resourceId"] = resourceId;
2968
+ } else {
2969
+ this.logger?.warn?.(`[${storage.TABLE_WORKFLOW_SNAPSHOT}] resourceId column not found. Skipping resourceId filter.`);
1531
2970
  }
1532
2971
  }
1533
- return message;
1534
- });
1535
- }
1536
- async close() {
1537
- if (this.pool) {
1538
- try {
1539
- if (this.pool.connected) {
1540
- await this.pool.close();
1541
- } else if (this.pool.connecting) {
1542
- await this.pool.connect();
1543
- await this.pool.close();
1544
- }
1545
- } catch (err) {
1546
- if (err.message && err.message.includes("Cannot close a pool while it is connecting")) ; else {
1547
- throw err;
1548
- }
2972
+ if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
2973
+ conditions.push(`[createdAt] >= @fromDate`);
2974
+ paramMap[`fromDate`] = fromDate.toISOString();
1549
2975
  }
1550
- }
1551
- }
1552
- async getEvals(options = {}) {
1553
- const { agentName, type, page = 0, perPage = 100, dateRange } = options;
1554
- const fromDate = dateRange?.start;
1555
- const toDate = dateRange?.end;
1556
- const where = [];
1557
- const params = {};
1558
- if (agentName) {
1559
- where.push("agent_name = @agentName");
1560
- params["agentName"] = agentName;
1561
- }
1562
- if (type === "test") {
1563
- where.push("test_info IS NOT NULL AND JSON_VALUE(test_info, '$.testPath') IS NOT NULL");
1564
- } else if (type === "live") {
1565
- where.push("(test_info IS NULL OR JSON_VALUE(test_info, '$.testPath') IS NULL)");
1566
- }
1567
- if (fromDate instanceof Date && !isNaN(fromDate.getTime())) {
1568
- where.push(`[created_at] >= @fromDate`);
1569
- params[`fromDate`] = fromDate.toISOString();
1570
- }
1571
- if (toDate instanceof Date && !isNaN(toDate.getTime())) {
1572
- where.push(`[created_at] <= @toDate`);
1573
- params[`toDate`] = toDate.toISOString();
1574
- }
1575
- const whereClause = where.length > 0 ? `WHERE ${where.join(" AND ")}` : "";
1576
- const tableName = this.getTableName(storage.TABLE_EVALS);
1577
- const offset = page * perPage;
1578
- const countQuery = `SELECT COUNT(*) as total FROM ${tableName} ${whereClause}`;
1579
- const dataQuery = `SELECT * FROM ${tableName} ${whereClause} ORDER BY seq_id DESC OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
1580
- try {
1581
- const countReq = this.pool.request();
1582
- Object.entries(params).forEach(([key, value]) => {
1583
- if (value instanceof Date) {
1584
- countReq.input(key, sql__default.default.DateTime, value);
1585
- } else {
1586
- countReq.input(key, value);
1587
- }
1588
- });
1589
- const countResult = await countReq.query(countQuery);
1590
- const total = countResult.recordset[0]?.total || 0;
1591
- if (total === 0) {
1592
- return {
1593
- evals: [],
1594
- total: 0,
1595
- page,
1596
- perPage,
1597
- hasMore: false
1598
- };
2976
+ if (toDate instanceof Date && !isNaN(toDate.getTime())) {
2977
+ conditions.push(`[createdAt] <= @toDate`);
2978
+ paramMap[`toDate`] = toDate.toISOString();
1599
2979
  }
1600
- const req = this.pool.request();
1601
- Object.entries(params).forEach(([key, value]) => {
2980
+ const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
2981
+ let total = 0;
2982
+ const tableName = getTableName({ indexName: storage.TABLE_WORKFLOW_SNAPSHOT, schemaName: getSchemaName(this.schema) });
2983
+ const request = this.pool.request();
2984
+ Object.entries(paramMap).forEach(([key, value]) => {
1602
2985
  if (value instanceof Date) {
1603
- req.input(key, sql__default.default.DateTime, value);
2986
+ request.input(key, sql2__default.default.DateTime, value);
1604
2987
  } else {
1605
- req.input(key, value);
2988
+ request.input(key, value);
1606
2989
  }
1607
2990
  });
1608
- req.input("offset", offset);
1609
- req.input("perPage", perPage);
1610
- const result = await req.query(dataQuery);
1611
- const rows = result.recordset;
1612
- return {
1613
- evals: rows?.map((row) => this.transformEvalRow(row)) ?? [],
1614
- total,
1615
- page,
1616
- perPage,
1617
- hasMore: offset + (rows?.length ?? 0) < total
1618
- };
2991
+ const usePagination = typeof perPage === "number" && typeof page === "number";
2992
+ if (usePagination) {
2993
+ const countQuery = `SELECT COUNT(*) as count FROM ${tableName} ${whereClause}`;
2994
+ const countResult = await request.query(countQuery);
2995
+ total = Number(countResult.recordset[0]?.count || 0);
2996
+ }
2997
+ let query = `SELECT * FROM ${tableName} ${whereClause} ORDER BY [seq_id] DESC`;
2998
+ if (usePagination) {
2999
+ const normalizedPerPage = storage.normalizePerPage(perPage, Number.MAX_SAFE_INTEGER);
3000
+ const offset = page * normalizedPerPage;
3001
+ query += ` OFFSET @offset ROWS FETCH NEXT @perPage ROWS ONLY`;
3002
+ request.input("perPage", normalizedPerPage);
3003
+ request.input("offset", offset);
3004
+ }
3005
+ const result = await request.query(query);
3006
+ const runs = (result.recordset || []).map((row) => this.parseWorkflowRun(row));
3007
+ return { runs, total: total || runs.length };
1619
3008
  } catch (error$1) {
1620
- const mastraError = new error.MastraError(
3009
+ throw new error.MastraError(
1621
3010
  {
1622
- id: "MASTRA_STORAGE_MSSQL_STORE_GET_EVALS_FAILED",
3011
+ id: storage.createStorageErrorId("MSSQL", "LIST_WORKFLOW_RUNS", "FAILED"),
1623
3012
  domain: error.ErrorDomain.STORAGE,
1624
3013
  category: error.ErrorCategory.THIRD_PARTY,
1625
3014
  details: {
1626
- agentName: agentName || "all",
1627
- type: type || "all",
1628
- page,
1629
- perPage
3015
+ workflowName: workflowName || "all"
1630
3016
  }
1631
3017
  },
1632
3018
  error$1
1633
3019
  );
1634
- this.logger?.error?.(mastraError.toString());
1635
- this.logger?.trackException(mastraError);
1636
- throw mastraError;
1637
3020
  }
1638
3021
  }
1639
- async saveResource({ resource }) {
1640
- const tableName = this.getTableName(storage.TABLE_RESOURCES);
1641
- try {
1642
- const req = this.pool.request();
1643
- req.input("id", resource.id);
1644
- req.input("workingMemory", resource.workingMemory);
1645
- req.input("metadata", JSON.stringify(resource.metadata));
1646
- req.input("createdAt", resource.createdAt.toISOString());
1647
- req.input("updatedAt", resource.updatedAt.toISOString());
1648
- await req.query(
1649
- `INSERT INTO ${tableName} (id, workingMemory, metadata, createdAt, updatedAt) VALUES (@id, @workingMemory, @metadata, @createdAt, @updatedAt)`
1650
- );
1651
- return resource;
1652
- } catch (error$1) {
1653
- const mastraError = new error.MastraError(
1654
- {
1655
- id: "MASTRA_STORAGE_MSSQL_SAVE_RESOURCE_FAILED",
1656
- domain: error.ErrorDomain.STORAGE,
1657
- category: error.ErrorCategory.THIRD_PARTY,
1658
- details: { resourceId: resource.id }
1659
- },
1660
- error$1
1661
- );
1662
- this.logger?.error?.(mastraError.toString());
1663
- this.logger?.trackException(mastraError);
1664
- throw mastraError;
3022
+ };
3023
+
3024
+ // src/storage/index.ts
3025
+ var MSSQLStore = class extends storage.MastraStorage {
3026
+ pool;
3027
+ schema;
3028
+ isConnected = null;
3029
+ stores;
3030
+ constructor(config) {
3031
+ if (!config.id || typeof config.id !== "string" || config.id.trim() === "") {
3032
+ throw new Error("MSSQLStore: id must be provided and cannot be empty.");
1665
3033
  }
1666
- }
1667
- async updateResource({
1668
- resourceId,
1669
- workingMemory,
1670
- metadata
1671
- }) {
3034
+ super({ id: config.id, name: "MSSQLStore", disableInit: config.disableInit });
1672
3035
  try {
1673
- const existingResource = await this.getResourceById({ resourceId });
1674
- if (!existingResource) {
1675
- const newResource = {
1676
- id: resourceId,
1677
- workingMemory,
1678
- metadata: metadata || {},
1679
- createdAt: /* @__PURE__ */ new Date(),
1680
- updatedAt: /* @__PURE__ */ new Date()
1681
- };
1682
- return this.saveResource({ resource: newResource });
3036
+ if ("connectionString" in config) {
3037
+ if (!config.connectionString || typeof config.connectionString !== "string" || config.connectionString.trim() === "") {
3038
+ throw new Error("MSSQLStore: connectionString must be provided and cannot be empty.");
3039
+ }
3040
+ } else {
3041
+ const required = ["server", "database", "user", "password"];
3042
+ for (const key of required) {
3043
+ if (!(key in config) || typeof config[key] !== "string" || config[key].trim() === "") {
3044
+ throw new Error(`MSSQLStore: ${key} must be provided and cannot be empty.`);
3045
+ }
3046
+ }
1683
3047
  }
1684
- const updatedResource = {
1685
- ...existingResource,
1686
- workingMemory: workingMemory !== void 0 ? workingMemory : existingResource.workingMemory,
1687
- metadata: {
1688
- ...existingResource.metadata,
1689
- ...metadata
1690
- },
1691
- updatedAt: /* @__PURE__ */ new Date()
3048
+ this.schema = config.schemaName || "dbo";
3049
+ this.pool = "connectionString" in config ? new sql2__default.default.ConnectionPool(config.connectionString) : new sql2__default.default.ConnectionPool({
3050
+ server: config.server,
3051
+ database: config.database,
3052
+ user: config.user,
3053
+ password: config.password,
3054
+ port: config.port,
3055
+ options: config.options || { encrypt: true, trustServerCertificate: true }
3056
+ });
3057
+ const operations = new StoreOperationsMSSQL({ pool: this.pool, schemaName: this.schema });
3058
+ const scores = new ScoresMSSQL({ pool: this.pool, operations, schema: this.schema });
3059
+ const workflows = new WorkflowsMSSQL({ pool: this.pool, operations, schema: this.schema });
3060
+ const memory = new MemoryMSSQL({ pool: this.pool, schema: this.schema, operations });
3061
+ const observability = new ObservabilityMSSQL({ pool: this.pool, operations, schema: this.schema });
3062
+ this.stores = {
3063
+ operations,
3064
+ scores,
3065
+ workflows,
3066
+ memory,
3067
+ observability
1692
3068
  };
1693
- const tableName = this.getTableName(storage.TABLE_RESOURCES);
1694
- const updates = [];
1695
- const req = this.pool.request();
1696
- if (workingMemory !== void 0) {
1697
- updates.push("workingMemory = @workingMemory");
1698
- req.input("workingMemory", workingMemory);
1699
- }
1700
- if (metadata) {
1701
- updates.push("metadata = @metadata");
1702
- req.input("metadata", JSON.stringify(updatedResource.metadata));
1703
- }
1704
- updates.push("updatedAt = @updatedAt");
1705
- req.input("updatedAt", updatedResource.updatedAt.toISOString());
1706
- req.input("id", resourceId);
1707
- await req.query(`UPDATE ${tableName} SET ${updates.join(", ")} WHERE id = @id`);
1708
- return updatedResource;
1709
- } catch (error$1) {
1710
- const mastraError = new error.MastraError(
3069
+ } catch (e) {
3070
+ throw new error.MastraError(
1711
3071
  {
1712
- id: "MASTRA_STORAGE_MSSQL_UPDATE_RESOURCE_FAILED",
3072
+ id: storage.createStorageErrorId("MSSQL", "INITIALIZATION", "FAILED"),
1713
3073
  domain: error.ErrorDomain.STORAGE,
1714
- category: error.ErrorCategory.THIRD_PARTY,
1715
- details: { resourceId }
3074
+ category: error.ErrorCategory.USER
1716
3075
  },
1717
- error$1
3076
+ e
1718
3077
  );
1719
- this.logger?.error?.(mastraError.toString());
1720
- this.logger?.trackException(mastraError);
1721
- throw mastraError;
1722
3078
  }
1723
3079
  }
1724
- async getResourceById({ resourceId }) {
1725
- const tableName = this.getTableName(storage.TABLE_RESOURCES);
3080
+ async init() {
3081
+ if (this.isConnected === null) {
3082
+ this.isConnected = this._performInitializationAndStore();
3083
+ }
1726
3084
  try {
1727
- const req = this.pool.request();
1728
- req.input("resourceId", resourceId);
1729
- const result = (await req.query(`SELECT * FROM ${tableName} WHERE id = @resourceId`)).recordset[0];
1730
- if (!result) {
1731
- return null;
3085
+ await this.isConnected;
3086
+ await super.init();
3087
+ try {
3088
+ await this.stores.operations.createAutomaticIndexes();
3089
+ } catch (indexError) {
3090
+ this.logger?.warn?.("Failed to create indexes:", indexError);
1732
3091
  }
1733
- return {
1734
- ...result,
1735
- workingMemory: typeof result.workingMemory === "object" ? JSON.stringify(result.workingMemory) : result.workingMemory,
1736
- metadata: typeof result.metadata === "string" ? JSON.parse(result.metadata) : result.metadata
1737
- };
1738
3092
  } catch (error$1) {
1739
- const mastraError = new error.MastraError(
3093
+ this.isConnected = null;
3094
+ throw new error.MastraError(
1740
3095
  {
1741
- id: "MASTRA_STORAGE_MSSQL_GET_RESOURCE_BY_ID_FAILED",
3096
+ id: storage.createStorageErrorId("MSSQL", "INIT", "FAILED"),
1742
3097
  domain: error.ErrorDomain.STORAGE,
1743
- category: error.ErrorCategory.THIRD_PARTY,
1744
- details: { resourceId }
3098
+ category: error.ErrorCategory.THIRD_PARTY
1745
3099
  },
1746
3100
  error$1
1747
3101
  );
1748
- this.logger?.error?.(mastraError.toString());
1749
- this.logger?.trackException(mastraError);
1750
- throw mastraError;
1751
3102
  }
1752
3103
  }
1753
- async getScoreById({ id }) {
1754
- throw new error.MastraError({
1755
- id: "STORAGE_MONGODB_STORE_GET_SCORE_BY_ID_FAILED",
1756
- domain: error.ErrorDomain.STORAGE,
1757
- category: error.ErrorCategory.THIRD_PARTY,
1758
- details: { id },
1759
- text: "getScoreById is not implemented yet in MongoDBStore"
1760
- });
3104
+ async _performInitializationAndStore() {
3105
+ try {
3106
+ await this.pool.connect();
3107
+ return true;
3108
+ } catch (err) {
3109
+ throw err;
3110
+ }
1761
3111
  }
1762
- async saveScore(_score) {
1763
- throw new error.MastraError({
1764
- id: "STORAGE_MONGODB_STORE_SAVE_SCORE_FAILED",
1765
- domain: error.ErrorDomain.STORAGE,
1766
- category: error.ErrorCategory.THIRD_PARTY,
1767
- details: {},
1768
- text: "saveScore is not implemented yet in MongoDBStore"
1769
- });
3112
+ get supports() {
3113
+ return {
3114
+ selectByIncludeResourceScope: true,
3115
+ resourceWorkingMemory: true,
3116
+ hasColumn: true,
3117
+ createTable: true,
3118
+ deleteMessages: true,
3119
+ listScoresBySpan: true,
3120
+ observabilityInstance: true,
3121
+ indexManagement: true
3122
+ };
1770
3123
  }
1771
- async getScoresByScorerId({
1772
- scorerId,
3124
+ async createTable({
3125
+ tableName,
3126
+ schema
3127
+ }) {
3128
+ return this.stores.operations.createTable({ tableName, schema });
3129
+ }
3130
+ async alterTable({
3131
+ tableName,
3132
+ schema,
3133
+ ifNotExists
3134
+ }) {
3135
+ return this.stores.operations.alterTable({ tableName, schema, ifNotExists });
3136
+ }
3137
+ async clearTable({ tableName }) {
3138
+ return this.stores.operations.clearTable({ tableName });
3139
+ }
3140
+ async dropTable({ tableName }) {
3141
+ return this.stores.operations.dropTable({ tableName });
3142
+ }
3143
+ async insert({ tableName, record }) {
3144
+ return this.stores.operations.insert({ tableName, record });
3145
+ }
3146
+ async batchInsert({ tableName, records }) {
3147
+ return this.stores.operations.batchInsert({ tableName, records });
3148
+ }
3149
+ async load({ tableName, keys }) {
3150
+ return this.stores.operations.load({ tableName, keys });
3151
+ }
3152
+ /**
3153
+ * Memory
3154
+ */
3155
+ async getThreadById({ threadId }) {
3156
+ return this.stores.memory.getThreadById({ threadId });
3157
+ }
3158
+ async saveThread({ thread }) {
3159
+ return this.stores.memory.saveThread({ thread });
3160
+ }
3161
+ async updateThread({
3162
+ id,
3163
+ title,
3164
+ metadata
3165
+ }) {
3166
+ return this.stores.memory.updateThread({ id, title, metadata });
3167
+ }
3168
+ async deleteThread({ threadId }) {
3169
+ return this.stores.memory.deleteThread({ threadId });
3170
+ }
3171
+ async listMessagesById({ messageIds }) {
3172
+ return this.stores.memory.listMessagesById({ messageIds });
3173
+ }
3174
+ async saveMessages(args) {
3175
+ return this.stores.memory.saveMessages(args);
3176
+ }
3177
+ async updateMessages({
3178
+ messages
3179
+ }) {
3180
+ return this.stores.memory.updateMessages({ messages });
3181
+ }
3182
+ async deleteMessages(messageIds) {
3183
+ return this.stores.memory.deleteMessages(messageIds);
3184
+ }
3185
+ async getResourceById({ resourceId }) {
3186
+ return this.stores.memory.getResourceById({ resourceId });
3187
+ }
3188
+ async saveResource({ resource }) {
3189
+ return this.stores.memory.saveResource({ resource });
3190
+ }
3191
+ async updateResource({
3192
+ resourceId,
3193
+ workingMemory,
3194
+ metadata
3195
+ }) {
3196
+ return this.stores.memory.updateResource({ resourceId, workingMemory, metadata });
3197
+ }
3198
+ /**
3199
+ * Workflows
3200
+ */
3201
+ async updateWorkflowResults({
3202
+ workflowName,
3203
+ runId,
3204
+ stepId,
3205
+ result,
3206
+ requestContext
3207
+ }) {
3208
+ return this.stores.workflows.updateWorkflowResults({ workflowName, runId, stepId, result, requestContext });
3209
+ }
3210
+ async updateWorkflowState({
3211
+ workflowName,
3212
+ runId,
3213
+ opts
3214
+ }) {
3215
+ return this.stores.workflows.updateWorkflowState({ workflowName, runId, opts });
3216
+ }
3217
+ async persistWorkflowSnapshot({
3218
+ workflowName,
3219
+ runId,
3220
+ resourceId,
3221
+ snapshot
3222
+ }) {
3223
+ return this.stores.workflows.persistWorkflowSnapshot({ workflowName, runId, resourceId, snapshot });
3224
+ }
3225
+ async loadWorkflowSnapshot({
3226
+ workflowName,
3227
+ runId
3228
+ }) {
3229
+ return this.stores.workflows.loadWorkflowSnapshot({ workflowName, runId });
3230
+ }
3231
+ async listWorkflowRuns(args = {}) {
3232
+ return this.stores.workflows.listWorkflowRuns(args);
3233
+ }
3234
+ async getWorkflowRunById({
3235
+ runId,
3236
+ workflowName
3237
+ }) {
3238
+ return this.stores.workflows.getWorkflowRunById({ runId, workflowName });
3239
+ }
3240
+ async close() {
3241
+ await this.pool.close();
3242
+ }
3243
+ /**
3244
+ * Index Management
3245
+ */
3246
+ async createIndex(options) {
3247
+ return this.stores.operations.createIndex(options);
3248
+ }
3249
+ async listIndexes(tableName) {
3250
+ return this.stores.operations.listIndexes(tableName);
3251
+ }
3252
+ async describeIndex(indexName) {
3253
+ return this.stores.operations.describeIndex(indexName);
3254
+ }
3255
+ async dropIndex(indexName) {
3256
+ return this.stores.operations.dropIndex(indexName);
3257
+ }
3258
+ /**
3259
+ * Tracing / Observability
3260
+ */
3261
+ getObservabilityStore() {
3262
+ if (!this.stores.observability) {
3263
+ throw new error.MastraError({
3264
+ id: storage.createStorageErrorId("MSSQL", "OBSERVABILITY", "NOT_INITIALIZED"),
3265
+ domain: error.ErrorDomain.STORAGE,
3266
+ category: error.ErrorCategory.SYSTEM,
3267
+ text: "Observability storage is not initialized"
3268
+ });
3269
+ }
3270
+ return this.stores.observability;
3271
+ }
3272
+ async createSpan(span) {
3273
+ return this.getObservabilityStore().createSpan(span);
3274
+ }
3275
+ async updateSpan({
3276
+ spanId,
3277
+ traceId,
3278
+ updates
3279
+ }) {
3280
+ return this.getObservabilityStore().updateSpan({ spanId, traceId, updates });
3281
+ }
3282
+ async getTrace(traceId) {
3283
+ return this.getObservabilityStore().getTrace(traceId);
3284
+ }
3285
+ async getTracesPaginated(args) {
3286
+ return this.getObservabilityStore().getTracesPaginated(args);
3287
+ }
3288
+ async batchCreateSpans(args) {
3289
+ return this.getObservabilityStore().batchCreateSpans(args);
3290
+ }
3291
+ async batchUpdateSpans(args) {
3292
+ return this.getObservabilityStore().batchUpdateSpans(args);
3293
+ }
3294
+ async batchDeleteTraces(args) {
3295
+ return this.getObservabilityStore().batchDeleteTraces(args);
3296
+ }
3297
+ /**
3298
+ * Scorers
3299
+ */
3300
+ async getScoreById({ id: _id }) {
3301
+ return this.stores.scores.getScoreById({ id: _id });
3302
+ }
3303
+ async listScoresByScorerId({
3304
+ scorerId: _scorerId,
1773
3305
  pagination: _pagination,
1774
- entityId,
1775
- entityType
3306
+ entityId: _entityId,
3307
+ entityType: _entityType,
3308
+ source: _source
1776
3309
  }) {
1777
- throw new error.MastraError({
1778
- id: "STORAGE_MONGODB_STORE_GET_SCORES_BY_SCORER_ID_FAILED",
1779
- domain: error.ErrorDomain.STORAGE,
1780
- category: error.ErrorCategory.THIRD_PARTY,
1781
- details: { scorerId, entityId: entityId || "", entityType: entityType || "" },
1782
- text: "getScoresByScorerId is not implemented yet in MongoDBStore"
3310
+ return this.stores.scores.listScoresByScorerId({
3311
+ scorerId: _scorerId,
3312
+ pagination: _pagination,
3313
+ entityId: _entityId,
3314
+ entityType: _entityType,
3315
+ source: _source
1783
3316
  });
1784
3317
  }
1785
- async getScoresByRunId({
1786
- runId,
3318
+ async saveScore(score) {
3319
+ return this.stores.scores.saveScore(score);
3320
+ }
3321
+ async listScoresByRunId({
3322
+ runId: _runId,
1787
3323
  pagination: _pagination
1788
3324
  }) {
1789
- throw new error.MastraError({
1790
- id: "STORAGE_MONGODB_STORE_GET_SCORES_BY_RUN_ID_FAILED",
1791
- domain: error.ErrorDomain.STORAGE,
1792
- category: error.ErrorCategory.THIRD_PARTY,
1793
- details: { runId },
1794
- text: "getScoresByRunId is not implemented yet in MongoDBStore"
1795
- });
3325
+ return this.stores.scores.listScoresByRunId({ runId: _runId, pagination: _pagination });
1796
3326
  }
1797
- async getScoresByEntityId({
1798
- entityId,
1799
- entityType,
3327
+ async listScoresByEntityId({
3328
+ entityId: _entityId,
3329
+ entityType: _entityType,
1800
3330
  pagination: _pagination
1801
3331
  }) {
1802
- throw new error.MastraError({
1803
- id: "STORAGE_MONGODB_STORE_GET_SCORES_BY_ENTITY_ID_FAILED",
1804
- domain: error.ErrorDomain.STORAGE,
1805
- category: error.ErrorCategory.THIRD_PARTY,
1806
- details: { entityId, entityType },
1807
- text: "getScoresByEntityId is not implemented yet in MongoDBStore"
3332
+ return this.stores.scores.listScoresByEntityId({
3333
+ entityId: _entityId,
3334
+ entityType: _entityType,
3335
+ pagination: _pagination
1808
3336
  });
1809
3337
  }
1810
- async dropTable({ tableName }) {
1811
- throw new error.MastraError({
1812
- id: "STORAGE_MONGODB_STORE_DROP_TABLE_FAILED",
1813
- domain: error.ErrorDomain.STORAGE,
1814
- category: error.ErrorCategory.THIRD_PARTY,
1815
- details: { tableName },
1816
- text: "dropTable is not implemented yet in MongoDBStore"
1817
- });
3338
+ async listScoresBySpan({
3339
+ traceId,
3340
+ spanId,
3341
+ pagination: _pagination
3342
+ }) {
3343
+ return this.stores.scores.listScoresBySpan({ traceId, spanId, pagination: _pagination });
1818
3344
  }
1819
3345
  };
1820
3346