@mastra/dynamodb 0.0.0-share-agent-metadata-with-cloud-20250718123411 → 0.0.0-sidebar-window-undefined-fix-20251029233656

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +1074 -0
  2. package/README.md +0 -4
  3. package/dist/entities/eval.d.ts +102 -0
  4. package/dist/entities/eval.d.ts.map +1 -0
  5. package/dist/entities/index.d.ts +761 -0
  6. package/dist/entities/index.d.ts.map +1 -0
  7. package/dist/entities/message.d.ts +100 -0
  8. package/dist/entities/message.d.ts.map +1 -0
  9. package/dist/entities/resource.d.ts +54 -0
  10. package/dist/entities/resource.d.ts.map +1 -0
  11. package/dist/entities/score.d.ts +244 -0
  12. package/dist/entities/score.d.ts.map +1 -0
  13. package/dist/entities/thread.d.ts +69 -0
  14. package/dist/entities/thread.d.ts.map +1 -0
  15. package/dist/entities/trace.d.ts +127 -0
  16. package/dist/entities/trace.d.ts.map +1 -0
  17. package/dist/entities/utils.d.ts +21 -0
  18. package/dist/entities/utils.d.ts.map +1 -0
  19. package/dist/entities/workflow-snapshot.d.ts +74 -0
  20. package/dist/entities/workflow-snapshot.d.ts.map +1 -0
  21. package/dist/index.cjs +1978 -578
  22. package/dist/index.cjs.map +1 -0
  23. package/dist/index.d.ts +2 -2
  24. package/dist/index.d.ts.map +1 -0
  25. package/dist/index.js +1979 -579
  26. package/dist/index.js.map +1 -0
  27. package/dist/storage/domains/legacy-evals/index.d.ts +19 -0
  28. package/dist/storage/domains/legacy-evals/index.d.ts.map +1 -0
  29. package/dist/storage/domains/memory/index.d.ts +89 -0
  30. package/dist/storage/domains/memory/index.d.ts.map +1 -0
  31. package/dist/storage/domains/operations/index.d.ts +69 -0
  32. package/dist/storage/domains/operations/index.d.ts.map +1 -0
  33. package/dist/storage/domains/score/index.d.ts +51 -0
  34. package/dist/storage/domains/score/index.d.ts.map +1 -0
  35. package/dist/storage/domains/workflows/index.d.ts +51 -0
  36. package/dist/storage/domains/workflows/index.d.ts.map +1 -0
  37. package/dist/storage/index.d.ts +244 -0
  38. package/dist/storage/index.d.ts.map +1 -0
  39. package/package.json +24 -14
  40. package/dist/_tsup-dts-rollup.d.cts +0 -1160
  41. package/dist/_tsup-dts-rollup.d.ts +0 -1160
  42. package/dist/index.d.cts +0 -2
  43. package/src/entities/eval.ts +0 -102
  44. package/src/entities/index.ts +0 -23
  45. package/src/entities/message.ts +0 -143
  46. package/src/entities/thread.ts +0 -66
  47. package/src/entities/trace.ts +0 -129
  48. package/src/entities/utils.ts +0 -51
  49. package/src/entities/workflow-snapshot.ts +0 -56
  50. package/src/index.ts +0 -1
  51. package/src/storage/docker-compose.yml +0 -16
  52. package/src/storage/index.test.ts +0 -1483
  53. package/src/storage/index.ts +0 -1383
@@ -1,1483 +0,0 @@
1
- import { spawn } from 'child_process';
2
- import { randomUUID } from 'crypto';
3
- import {
4
- BatchWriteItemCommand,
5
- CreateTableCommand,
6
- DeleteTableCommand,
7
- DescribeTableCommand,
8
- DynamoDBClient,
9
- ListTablesCommand,
10
- ScanCommand,
11
- waitUntilTableExists,
12
- waitUntilTableNotExists,
13
- } from '@aws-sdk/client-dynamodb';
14
- import { createSampleMessageV2, createSampleThread } from '@internal/storage-test-utils';
15
- import type { MastraMessageV1, StorageThreadType, WorkflowRun, WorkflowRunState } from '@mastra/core';
16
- import type { MastraMessageV2 } from '@mastra/core/agent';
17
- import { TABLE_EVALS, TABLE_THREADS, TABLE_WORKFLOW_SNAPSHOT } from '@mastra/core/storage';
18
- import { afterAll, beforeAll, beforeEach, describe, expect, test } from 'vitest';
19
- import { DynamoDBStore } from '..';
20
-
21
- const TEST_TABLE_NAME = 'mastra-single-table-test'; // Define the single table name
22
- const LOCAL_ENDPOINT = 'http://localhost:8000';
23
- const LOCAL_REGION = 'local-test'; // Use a distinct region for local testing
24
-
25
- // Docker process handle
26
- let dynamodbProcess: ReturnType<typeof spawn>;
27
-
28
- // AWS SDK Client for setup/teardown
29
- let setupClient: DynamoDBClient;
30
-
31
- // Function to wait for DynamoDB Local to be ready
32
- async function waitForDynamoDBLocal(client: DynamoDBClient, timeoutMs = 90000): Promise<void> {
33
- const startTime = Date.now();
34
- console.log(`Waiting up to ${timeoutMs / 1000}s for DynamoDB Local...`);
35
- while (Date.now() - startTime < timeoutMs) {
36
- try {
37
- await client.send(new ListTablesCommand({}));
38
- console.log('DynamoDB Local is ready.');
39
- return; // Success
40
- } catch (e: unknown) {
41
- let errorName: string | undefined;
42
-
43
- if (e instanceof Error) {
44
- errorName = e.name;
45
- } else if (
46
- typeof e === 'object' &&
47
- e !== null &&
48
- 'name' in e &&
49
- typeof (e as { name: unknown }).name === 'string'
50
- ) {
51
- errorName = (e as { name: string }).name;
52
- }
53
-
54
- if (errorName === 'ECONNREFUSED' || errorName === 'TimeoutError' || errorName === 'ERR_INVALID_PROTOCOL') {
55
- // Expected errors while starting
56
- await new Promise(resolve => setTimeout(resolve, 500)); // Wait before retrying
57
- } else {
58
- console.error('Unexpected error waiting for DynamoDB Local:', e);
59
- throw e; // Rethrow unexpected errors
60
- }
61
- }
62
- }
63
- throw new Error(`DynamoDB Local did not become ready within ${timeoutMs}ms.`);
64
- }
65
-
66
- // Function to clear all items from the single table
67
- async function clearSingleTable(client: DynamoDBClient, tableName: string) {
68
- let ExclusiveStartKey: Record<string, any> | undefined;
69
- let items: Record<string, any>[] = [];
70
-
71
- // Scan all items (handling pagination)
72
- do {
73
- const scanOutput = await client.send(
74
- new ScanCommand({
75
- TableName: tableName,
76
- ExclusiveStartKey,
77
- ProjectionExpression: 'pk, sk', // Only need keys for deletion
78
- }),
79
- );
80
- items = items.concat(scanOutput.Items || []);
81
- ExclusiveStartKey = scanOutput.LastEvaluatedKey;
82
- } while (ExclusiveStartKey);
83
-
84
- if (items.length === 0) {
85
- return; // Nothing to delete
86
- }
87
-
88
- // Batch delete items (handling DynamoDB 25 item limit per batch)
89
- const deleteRequests = items.map(item => ({
90
- DeleteRequest: {
91
- Key: { pk: item.pk, sk: item.sk },
92
- },
93
- }));
94
-
95
- for (let i = 0; i < deleteRequests.length; i += 25) {
96
- const batch = deleteRequests.slice(i, i + 25);
97
- const command = new BatchWriteItemCommand({
98
- RequestItems: {
99
- [tableName]: batch,
100
- },
101
- });
102
- // Handle unprocessed items if necessary (though less likely with local)
103
- let result = await client.send(command);
104
- while (
105
- result.UnprocessedItems &&
106
- result.UnprocessedItems[tableName] &&
107
- result.UnprocessedItems[tableName].length > 0
108
- ) {
109
- console.warn(`Retrying ${result.UnprocessedItems[tableName].length} unprocessed delete items...`);
110
- await new Promise(res => setTimeout(res, 200)); // Simple backoff
111
- const retryCommand = new BatchWriteItemCommand({ RequestItems: result.UnprocessedItems });
112
- result = await client.send(retryCommand);
113
- }
114
- }
115
- // console.log(`Cleared ${items.length} items from ${tableName}`);
116
- }
117
-
118
- // Start DynamoDB Local container and create table
119
- beforeAll(async () => {
120
- // Initialize client for setup
121
- setupClient = new DynamoDBClient({
122
- endpoint: LOCAL_ENDPOINT,
123
- region: LOCAL_REGION,
124
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
125
- // Increase timeout for setup operations
126
- requestHandler: { requestTimeout: 10000 },
127
- // Add retries for setup commands
128
- maxAttempts: 5,
129
- });
130
-
131
- // Start DynamoDB Local using docker-compose
132
- console.log('Starting DynamoDB Local container...');
133
- dynamodbProcess = spawn('docker-compose', ['up', '-d'], {
134
- cwd: __dirname, // Ensure docker-compose runs from the test file directory if needed
135
- stdio: 'pipe', // Use pipe to potentially capture output if needed
136
- });
137
- dynamodbProcess.stderr?.on('data', data => console.error(`docker-compose stderr: ${data}`));
138
- dynamodbProcess.on('error', err => console.error('Failed to start docker-compose:', err));
139
-
140
- // Add a short fixed delay to allow the container process to stabilize before polling
141
- console.log('Waiting a few seconds for container process to stabilize...');
142
- await new Promise(resolve => setTimeout(resolve, 3000)); // 3-second delay
143
-
144
- // Wait for DynamoDB to be ready
145
- try {
146
- await waitForDynamoDBLocal(setupClient);
147
- } catch (e) {
148
- console.error('Failed to connect to DynamoDB Local after startup.', e);
149
- // Attempt to stop container on failure
150
- spawn('docker-compose', ['down'], { cwd: __dirname, stdio: 'pipe' });
151
- throw e; // Re-throw error to fail the test suite
152
- }
153
-
154
- // Delete the table if it exists from a previous run
155
- try {
156
- console.log(`Checking if table ${TEST_TABLE_NAME} exists...`);
157
- await setupClient.send(new DescribeTableCommand({ TableName: TEST_TABLE_NAME }));
158
- console.log(`Table ${TEST_TABLE_NAME} exists, attempting deletion...`);
159
- await setupClient.send(new DeleteTableCommand({ TableName: TEST_TABLE_NAME }));
160
- console.log(`Waiting for table ${TEST_TABLE_NAME} to be deleted...`);
161
- await waitUntilTableNotExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
162
- console.log(`Table ${TEST_TABLE_NAME} deleted.`);
163
- } catch (e: unknown) {
164
- let errorName: string | undefined;
165
-
166
- if (e instanceof Error) {
167
- errorName = e.name;
168
- } else if (
169
- typeof e === 'object' &&
170
- e !== null &&
171
- 'name' in e &&
172
- typeof (e as { name: unknown }).name === 'string'
173
- ) {
174
- errorName = (e as { name: string }).name;
175
- }
176
-
177
- if (errorName === 'ResourceNotFoundException') {
178
- console.log(`Table ${TEST_TABLE_NAME} does not exist, proceeding.`);
179
- } else {
180
- console.error(`Error deleting table ${TEST_TABLE_NAME}:`, e);
181
- throw e; // Rethrow other errors
182
- }
183
- }
184
-
185
- // Create the single table with the correct schema
186
- console.log(`Creating table ${TEST_TABLE_NAME}...`);
187
- try {
188
- const createTableCommand = new CreateTableCommand({
189
- TableName: TEST_TABLE_NAME,
190
- AttributeDefinitions: [
191
- { AttributeName: 'pk', AttributeType: 'S' },
192
- { AttributeName: 'sk', AttributeType: 'S' },
193
- { AttributeName: 'gsi1pk', AttributeType: 'S' },
194
- { AttributeName: 'gsi1sk', AttributeType: 'S' },
195
- { AttributeName: 'gsi2pk', AttributeType: 'S' },
196
- { AttributeName: 'gsi2sk', AttributeType: 'S' },
197
- ],
198
- KeySchema: [
199
- { AttributeName: 'pk', KeyType: 'HASH' },
200
- { AttributeName: 'sk', KeyType: 'RANGE' },
201
- ],
202
- GlobalSecondaryIndexes: [
203
- {
204
- IndexName: 'gsi1',
205
- KeySchema: [
206
- { AttributeName: 'gsi1pk', KeyType: 'HASH' },
207
- { AttributeName: 'gsi1sk', KeyType: 'RANGE' },
208
- ],
209
- Projection: { ProjectionType: 'ALL' },
210
- },
211
- {
212
- IndexName: 'gsi2',
213
- KeySchema: [
214
- { AttributeName: 'gsi2pk', KeyType: 'HASH' },
215
- { AttributeName: 'gsi2sk', KeyType: 'RANGE' },
216
- ],
217
- Projection: { ProjectionType: 'ALL' },
218
- },
219
- ],
220
- BillingMode: 'PAY_PER_REQUEST', // Use PAY_PER_REQUEST for local testing ease
221
- });
222
- await setupClient.send(createTableCommand);
223
- console.log(`Waiting for table ${TEST_TABLE_NAME} to become active...`);
224
- await waitUntilTableExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
225
- console.log(`Table ${TEST_TABLE_NAME} created successfully.`);
226
- } catch (e) {
227
- console.error(`Failed to create table ${TEST_TABLE_NAME}:`, e);
228
- throw e;
229
- }
230
- }, 60000); // Increase timeout for beforeAll to accommodate Docker startup and table creation
231
-
232
- // Stop DynamoDB Local container
233
- afterAll(async () => {
234
- console.log('Stopping DynamoDB Local container...');
235
- // Optionally delete the table
236
- // try {
237
- // await setupClient.send(new DeleteTableCommand({ TableName: TEST_TABLE_NAME }));
238
- // await waitUntilTableNotExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
239
- // console.log(`Test table ${TEST_TABLE_NAME} deleted.`);
240
- // } catch (error) {
241
- // console.error(`Error deleting test table ${TEST_TABLE_NAME}:`, error);
242
- // }
243
-
244
- if (setupClient) {
245
- setupClient.destroy();
246
- }
247
-
248
- const stopProcess = spawn('docker-compose', ['down', '--volumes'], {
249
- // Remove volumes too
250
- cwd: __dirname,
251
- stdio: 'pipe',
252
- });
253
- stopProcess.stderr?.on('data', data => console.error(`docker-compose down stderr: ${data}`));
254
- stopProcess.on('error', err => console.error('Failed to stop docker-compose:', err));
255
- await new Promise(resolve => stopProcess.on('close', resolve)); // Wait for compose down
256
-
257
- if (dynamodbProcess && !dynamodbProcess.killed) {
258
- dynamodbProcess.kill();
259
- }
260
- console.log('DynamoDB Local container stopped.');
261
- }, 30000); // Increase timeout for afterAll
262
-
263
- describe('DynamoDBStore Integration Tests', () => {
264
- let store: DynamoDBStore;
265
-
266
- beforeAll(async () => {
267
- // Initialize main store instance used by most tests
268
- store = new DynamoDBStore({
269
- name: 'DynamoDBStoreTest',
270
- config: {
271
- tableName: TEST_TABLE_NAME,
272
- endpoint: LOCAL_ENDPOINT,
273
- region: LOCAL_REGION,
274
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
275
- },
276
- });
277
- console.log('Main DynamoDBStore initialized for tests.');
278
- });
279
-
280
- beforeEach(async () => {
281
- // Clear table between tests using the setup client
282
- await clearSingleTable(setupClient, TEST_TABLE_NAME);
283
- });
284
-
285
- afterAll(async () => {
286
- // No client.destroy() needed here as the store manages its internal client
287
- // Or if the store exposes a close/destroy method, call that.
288
- if (store) {
289
- await store.close(); // Assuming store has a close method
290
- }
291
- });
292
-
293
- // DynamoDB-specific tests
294
- describe('DynamoDB-specific operations', () => {
295
- describe('Entity Operations', () => {
296
- test('should persist and retrieve thread metadata', async () => {
297
- const now = new Date();
298
- const threadId = 'metadata-thread';
299
- const metadata = { user: 'test-user', complex: { nested: true, arr: [1, 'a'] } };
300
- const thread: StorageThreadType = {
301
- id: threadId,
302
- resourceId: 'resource-meta',
303
- title: 'Metadata Test Thread',
304
- createdAt: now,
305
- updatedAt: now,
306
- metadata: metadata,
307
- };
308
- await store.saveThread({ thread });
309
- const retrieved = await store.getThreadById({ threadId });
310
- expect(retrieved).toBeDefined();
311
- expect(retrieved?.metadata).toEqual(metadata); // ElectroDB should handle JSON stringify/parse
312
- });
313
-
314
- test('should handle large workflow snapshots near DynamoDB item size limit', async () => {
315
- // Test remains largely the same, relies on clearSingleTable working
316
- const now = Date.now();
317
- const largeSnapshot: WorkflowRunState = {
318
- // ... (rest of the large snapshot definition) ...
319
- value: { state: 'test' },
320
- context: {
321
- input: { source: 'test' },
322
- step1: { status: 'success', output: { data: 'test' } },
323
- } as unknown as WorkflowRunState['context'],
324
- serializedStepGraph: [],
325
- activePaths: [{ stepPath: ['test'], stepId: 'step1', status: 'success' }],
326
- suspendedPaths: { test: [1] },
327
- runId: 'test-run-large', // Use unique runId
328
- timestamp: now,
329
- status: 'success',
330
- };
331
-
332
- await expect(
333
- store.persistWorkflowSnapshot({
334
- workflowName: 'test-workflow-large',
335
- runId: 'test-run-large',
336
- snapshot: largeSnapshot,
337
- }),
338
- ).resolves.not.toThrow();
339
-
340
- const retrieved = await store.loadWorkflowSnapshot({
341
- workflowName: 'test-workflow-large',
342
- runId: 'test-run-large',
343
- });
344
-
345
- expect(retrieved).toEqual(largeSnapshot);
346
- }, 10000); // Increase timeout for potentially large item handling
347
-
348
- test('should handle concurrent thread updates (last writer wins)', async () => {
349
- // Test remains largely the same, verifies final state
350
- const threadId = 'concurrent-thread';
351
- const resourceId = 'resource-123';
352
- const now = new Date();
353
- const thread: StorageThreadType = {
354
- id: threadId,
355
- resourceId,
356
- title: 'Initial Title',
357
- createdAt: now,
358
- updatedAt: now,
359
- metadata: { initial: true },
360
- };
361
- await store.saveThread({ thread });
362
-
363
- // Simulate potential delay between read and write for update 1
364
- const update1 = async () => {
365
- await new Promise(res => setTimeout(res, 50)); // Short delay
366
- await store.updateThread({
367
- id: threadId,
368
- title: 'Updated Thread 1',
369
- metadata: { update: 1, time: Date.now() },
370
- });
371
- };
372
- // Simulate potential delay between read and write for update 2
373
- const update2 = async () => {
374
- await new Promise(res => setTimeout(res, 100)); // Slightly longer delay
375
- await store.updateThread({
376
- id: threadId,
377
- title: 'Updated Thread 2',
378
- metadata: { update: 2, time: Date.now() },
379
- });
380
- };
381
-
382
- await Promise.all([update1(), update2()]);
383
-
384
- const retrieved = await store.getThreadById({ threadId });
385
- expect(retrieved).toBeDefined();
386
- expect(retrieved?.id).toBe(threadId);
387
- // In DynamoDB default (non-conditional) updates, the last writer wins.
388
- // We expect title 2 / metadata 2 because update2 started later.
389
- expect(retrieved?.title).toBe('Updated Thread 2');
390
- expect(retrieved?.metadata?.update).toBe(2);
391
- });
392
-
393
- test('getMessages should return the N most recent messages [v2 storage]', async () => {
394
- const threadId = 'last-selector-thread';
395
- const start = Date.now();
396
-
397
- // Insert 10 messages with increasing timestamps
398
- const messages: MastraMessageV2[] = Array.from({ length: 10 }, (_, i) => ({
399
- id: `m-${i}`,
400
- threadId,
401
- resourceId: 'r',
402
- content: { format: 2, parts: [{ type: 'text', text: `msg-${i}` }] },
403
- createdAt: new Date(start + i), // 0..9 ms apart
404
- role: 'user',
405
- type: 'text',
406
- }));
407
- await store.saveMessages({ messages, format: 'v2' });
408
-
409
- const last3 = await store.getMessages({
410
- format: 'v2',
411
- threadId,
412
- selectBy: { last: 3 },
413
- });
414
-
415
- expect(last3).toHaveLength(3);
416
- expect(last3.map(m => (m.content.parts[0] as { type: string; text: string }).text)).toEqual([
417
- 'msg-7',
418
- 'msg-8',
419
- 'msg-9',
420
- ]);
421
- });
422
-
423
- test('getMessages should return the N most recent messages [v1 storage]', async () => {
424
- const threadId = 'last-selector-thread';
425
- const start = Date.now();
426
-
427
- // Insert 10 messages with increasing timestamps
428
- const messages: MastraMessageV1[] = Array.from({ length: 10 }, (_, i) => ({
429
- id: `m-${i}`,
430
- threadId,
431
- resourceId: 'r',
432
- content: `msg-${i}`,
433
- createdAt: new Date(start + i), // 0..9 ms apart
434
- role: 'user',
435
- type: 'text',
436
- }));
437
- await store.saveMessages({ messages });
438
-
439
- const last3 = await store.getMessages({
440
- threadId,
441
- selectBy: { last: 3 },
442
- });
443
-
444
- expect(last3).toHaveLength(3);
445
- expect(last3.map(m => m.content)).toEqual(['msg-7', 'msg-8', 'msg-9']);
446
- });
447
-
448
- test('should update thread updatedAt when a message is saved to it', async () => {
449
- const thread: StorageThreadType = {
450
- id: 'thread-update-test',
451
- resourceId: 'resource-update',
452
- title: 'Update Test Thread',
453
- createdAt: new Date(),
454
- updatedAt: new Date(),
455
- metadata: { test: true },
456
- };
457
- await store.saveThread({ thread });
458
-
459
- // Get the initial thread to capture the original updatedAt
460
- const initialThread = await store.getThreadById({ threadId: thread.id });
461
- expect(initialThread).toBeDefined();
462
- const originalUpdatedAt = initialThread!.updatedAt;
463
-
464
- // Wait a small amount to ensure different timestamp
465
- await new Promise(resolve => setTimeout(resolve, 100));
466
-
467
- // Create and save a message to the thread
468
- const message: MastraMessageV1 = {
469
- id: 'msg-update-test',
470
- threadId: thread.id,
471
- resourceId: 'resource-update',
472
- content: 'Test message for update',
473
- createdAt: new Date(),
474
- role: 'user',
475
- type: 'text',
476
- };
477
- await store.saveMessages({ messages: [message] });
478
-
479
- // Retrieve the thread again and check that updatedAt was updated
480
- const updatedThread = await store.getThreadById({ threadId: thread.id });
481
- expect(updatedThread).toBeDefined();
482
- expect(updatedThread!.updatedAt.getTime()).toBeGreaterThan(originalUpdatedAt.getTime());
483
- });
484
-
485
- test('saveThread upsert: should create new thread when thread does not exist', async () => {
486
- const threadId = `upsert-new-${randomUUID()}`;
487
- const now = new Date();
488
- const thread: StorageThreadType = {
489
- id: threadId,
490
- resourceId: 'resource-upsert-new',
491
- title: 'New Thread via Upsert',
492
- createdAt: now,
493
- updatedAt: now,
494
- metadata: { operation: 'create', test: true },
495
- };
496
-
497
- // Save the thread (should create new)
498
- await expect(store.saveThread({ thread })).resolves.not.toThrow();
499
-
500
- // Verify the thread was created
501
- const retrieved = await store.getThreadById({ threadId });
502
- expect(retrieved).toBeDefined();
503
- expect(retrieved?.id).toBe(threadId);
504
- expect(retrieved?.title).toBe('New Thread via Upsert');
505
- expect(retrieved?.resourceId).toBe('resource-upsert-new');
506
- expect(retrieved?.metadata).toEqual({ operation: 'create', test: true });
507
- });
508
-
509
- test('saveThread upsert: should update existing thread when thread already exists', async () => {
510
- const threadId = `upsert-update-${randomUUID()}`;
511
- const initialCreatedAt = new Date();
512
-
513
- // Create initial thread
514
- const initialThread: StorageThreadType = {
515
- id: threadId,
516
- resourceId: 'resource-upsert-initial',
517
- title: 'Initial Thread Title',
518
- createdAt: initialCreatedAt,
519
- updatedAt: initialCreatedAt,
520
- metadata: { operation: 'initial', version: 1 },
521
- };
522
- await store.saveThread({ thread: initialThread });
523
-
524
- // Wait a small amount to ensure different timestamp
525
- await new Promise(resolve => setTimeout(resolve, 100));
526
-
527
- // Update the thread with same ID but different data
528
- const updatedThread: StorageThreadType = {
529
- id: threadId,
530
- resourceId: 'resource-upsert-updated',
531
- title: 'Updated Thread Title',
532
- createdAt: initialCreatedAt, // Keep original creation time
533
- updatedAt: new Date(), // New update time
534
- metadata: { operation: 'update', version: 2 },
535
- };
536
- await expect(store.saveThread({ thread: updatedThread })).resolves.not.toThrow();
537
-
538
- // Verify the thread was updated
539
- const retrieved = await store.getThreadById({ threadId });
540
- expect(retrieved).toBeDefined();
541
- expect(retrieved?.id).toBe(threadId);
542
- expect(retrieved?.title).toBe('Updated Thread Title');
543
- expect(retrieved?.resourceId).toBe('resource-upsert-updated');
544
- expect(retrieved?.metadata).toEqual({ operation: 'update', version: 2 });
545
-
546
- // updatedAt should be newer than the initial creation time
547
- expect(retrieved?.updatedAt.getTime()).toBeGreaterThan(initialCreatedAt.getTime());
548
- // createdAt should remain exactly equal to the initial creation time
549
- expect(retrieved?.createdAt.getTime()).toBe(initialCreatedAt.getTime());
550
- });
551
-
552
- test('saveThread upsert: should handle complex metadata updates', async () => {
553
- const threadId = `upsert-metadata-${randomUUID()}`;
554
- const initialMetadata = {
555
- user: 'initial-user',
556
- tags: ['initial', 'test'],
557
- count: 1,
558
- };
559
-
560
- // Create initial thread with complex metadata
561
- const initialThread: StorageThreadType = {
562
- id: threadId,
563
- resourceId: 'resource-metadata-test',
564
- title: 'Metadata Test Thread',
565
- createdAt: new Date(),
566
- updatedAt: new Date(),
567
- metadata: initialMetadata,
568
- };
569
- await store.saveThread({ thread: initialThread });
570
-
571
- // Wait a small amount to ensure different timestamp
572
- await new Promise(resolve => setTimeout(resolve, 100));
573
-
574
- // Update with completely different metadata structure
575
- const updatedMetadata = {
576
- user: 'updated-user',
577
- settings: { theme: 'light', language: 'ja', notifications: true },
578
- tags: ['updated', 'upsert', 'complex'],
579
- count: 5,
580
- newField: { nested: { deeply: 'value' } },
581
- };
582
-
583
- const updatedThread: StorageThreadType = {
584
- id: threadId,
585
- resourceId: 'resource-metadata-test',
586
- title: 'Updated Metadata Thread',
587
- createdAt: initialThread.createdAt,
588
- updatedAt: new Date(),
589
- metadata: updatedMetadata,
590
- };
591
- await expect(store.saveThread({ thread: updatedThread })).resolves.not.toThrow();
592
-
593
- // Verify the metadata was completely replaced
594
- const retrieved = await store.getThreadById({ threadId });
595
- expect(retrieved).toBeDefined();
596
- expect(retrieved?.metadata).toEqual(updatedMetadata);
597
- expect(retrieved?.metadata?.user).toBe('updated-user');
598
- expect(retrieved?.metadata?.tags).toEqual(['updated', 'upsert', 'complex']);
599
- expect(retrieved?.title).toBe('Updated Metadata Thread');
600
- });
601
- });
602
-
603
- describe('Batch Operations', () => {
604
- test('should handle batch message inserts efficiently (up to 25 items) [v1 storage]', async () => {
605
- const startTime = Date.now(); // Get a base time
606
- const threadId = 'batch-thread';
607
- const messages: MastraMessageV1[] = Array.from({ length: 25 }, (_, i) => ({
608
- id: `msg-${i}`,
609
- threadId,
610
- resourceId: 'test-resource',
611
- content: `Message ${i}`,
612
- // Increment timestamp slightly for each message to ensure order
613
- createdAt: new Date(startTime + i),
614
- role: i % 2 === 0 ? 'user' : 'assistant',
615
- type: 'text',
616
- }));
617
-
618
- // Assuming saveMessages uses BatchWriteItem internally
619
- await expect(store.saveMessages({ messages })).resolves.not.toThrow();
620
-
621
- const retrieved = await store.getMessages({ threadId });
622
- expect(retrieved).toHaveLength(25);
623
- // Now the order should be guaranteed by the ascending createdAt timestamp
624
- expect(retrieved[0]?.content).toBe('Message 0');
625
- expect(retrieved[24]?.content).toBe('Message 24');
626
- });
627
-
628
- test('should handle batch message inserts efficiently (up to 25 items) [v2 storage]', async () => {
629
- const startTime = Date.now(); // Get a base time
630
- const threadId = 'batch-thread';
631
- const messages: MastraMessageV2[] = Array.from({ length: 25 }, (_, i) => ({
632
- id: `msg-${i}`,
633
- threadId,
634
- resourceId: 'test-resource',
635
- content: { format: 2, parts: [{ type: 'text', text: `Message ${i}` }] },
636
- // Increment timestamp slightly for each message to ensure order
637
- createdAt: new Date(startTime + i),
638
- role: i % 2 === 0 ? 'user' : 'assistant',
639
- type: 'text',
640
- }));
641
-
642
- // Assuming saveMessages uses BatchWriteItem internally
643
- await expect(store.saveMessages({ messages, format: 'v2' })).resolves.not.toThrow();
644
-
645
- const retrieved = await store.getMessages({ threadId, format: 'v2' });
646
- expect(retrieved).toHaveLength(25);
647
- // Now the order should be guaranteed by the ascending createdAt timestamp
648
- if (retrieved[0]?.content?.parts[0]?.type !== `text`) throw new Error(`Expected text part`);
649
- expect(retrieved[0].content.parts[0].text).toBe('Message 0');
650
- if (retrieved[24]?.content?.parts?.[0]?.type !== `text`) throw new Error(`Expected text part`);
651
- expect(retrieved[24].content.parts[0].text).toBe('Message 24');
652
- });
653
-
654
- test('should handle batch inserts exceeding 25 items (if saveMessages chunks)', async () => {
655
- const startTime = Date.now(); // Get a base time
656
- const threadId = 'batch-thread-large';
657
- const messages: MastraMessageV1[] = Array.from({ length: 30 }, (_, i) => ({
658
- id: `msg-large-${i}`,
659
- threadId,
660
- resourceId: 'test-resource-large',
661
- content: `Large Message ${i}`,
662
- // Increment timestamp slightly for each message to ensure order
663
- createdAt: new Date(startTime + i),
664
- role: 'user',
665
- type: 'text',
666
- }));
667
-
668
- await expect(store.saveMessages({ messages })).resolves.not.toThrow();
669
-
670
- const retrieved = await store.getMessages({ threadId });
671
- expect(retrieved).toHaveLength(30); // Verify all were saved
672
- // Add order check for the > 25 test as well
673
- expect(retrieved[0]?.content).toBe('Large Message 0');
674
- expect(retrieved[29]?.content).toBe('Large Message 29');
675
- });
676
-
677
- test('should upsert messages: duplicate id+threadId results in update, not duplicate row', async () => {
678
- const thread = await createSampleThread();
679
- await store.saveThread({ thread });
680
- const baseMessage = createSampleMessageV2({
681
- threadId: thread.id,
682
- createdAt: new Date(),
683
- content: { content: 'Original' },
684
- resourceId: thread.resourceId,
685
- });
686
-
687
- // Insert the message for the first time
688
- await store.saveMessages({ messages: [baseMessage], format: 'v2' });
689
-
690
- // // Insert again with the same id and threadId but different content
691
- const updatedMessage = {
692
- ...createSampleMessageV2({
693
- threadId: thread.id,
694
- createdAt: new Date(),
695
- content: { content: 'Updated' },
696
- resourceId: thread.resourceId,
697
- }),
698
- id: baseMessage.id,
699
- };
700
-
701
- await store.saveMessages({ messages: [updatedMessage], format: 'v2' });
702
-
703
- // Retrieve messages for the thread
704
- const retrievedMessages = await store.getMessages({ threadId: thread.id, format: 'v2' });
705
-
706
- // Only one message should exist for that id+threadId
707
- expect(retrievedMessages.filter(m => m.id === baseMessage.id)).toHaveLength(1);
708
-
709
- // The content should be the updated one
710
- expect(retrievedMessages.find(m => m.id === baseMessage.id)?.content.content).toBe('Updated');
711
- });
712
-
713
- test('should upsert messages: duplicate id and different threadid', async () => {
714
- const thread1 = await createSampleThread();
715
- const thread2 = await createSampleThread();
716
- await store.saveThread({ thread: thread1 });
717
- await store.saveThread({ thread: thread2 });
718
-
719
- const message = createSampleMessageV2({
720
- threadId: thread1.id,
721
- createdAt: new Date(),
722
- content: { content: 'Thread1 Content' },
723
- resourceId: thread1.resourceId,
724
- });
725
-
726
- // Insert message into thread1
727
- await store.saveMessages({ messages: [message], format: 'v2' });
728
-
729
- // Attempt to insert a message with the same id but different threadId
730
- const conflictingMessage = {
731
- ...createSampleMessageV2({
732
- threadId: thread2.id, // different thread
733
- content: { content: 'Thread2 Content' },
734
- resourceId: thread2.resourceId,
735
- }),
736
- id: message.id,
737
- };
738
-
739
- // Save should save the message to the new thread
740
- await store.saveMessages({ messages: [conflictingMessage], format: 'v2' });
741
-
742
- // Retrieve messages for both threads
743
- const thread1Messages = await store.getMessages({ threadId: thread1.id, format: 'v2' });
744
- const thread2Messages = await store.getMessages({ threadId: thread2.id, format: 'v2' });
745
-
746
- // Thread 1 should NOT have the message with that id
747
- expect(thread1Messages.find(m => m.id === message.id)).toBeUndefined();
748
-
749
- // Thread 2 should have the message with that id
750
- expect(thread2Messages.find(m => m.id === message.id)?.content.content).toBe('Thread2 Content');
751
- });
752
- });
753
-
754
- describe('Single-Table Design', () => {
755
- test('should maintain entity separation in single table', async () => {
756
- // Test remains largely the same
757
- const threadId = 'mixed-thread';
758
- const workflowName = 'mixed-workflow';
759
- const now = new Date();
760
- const thread: StorageThreadType = {
761
- id: threadId,
762
- resourceId: 'mixed-resource',
763
- title: 'Mixed Thread',
764
- createdAt: now,
765
- updatedAt: now,
766
- metadata: { type: 'thread' },
767
- };
768
- await store.saveThread({ thread });
769
-
770
- const workflowSnapshot: WorkflowRunState = {
771
- // ...(snapshot definition)
772
- value: { state: 'test' },
773
- context: {
774
- step1: { status: 'success', output: { data: 'test' } },
775
- input: { source: 'test' },
776
- } as unknown as WorkflowRunState['context'],
777
- serializedStepGraph: [],
778
- activePaths: [{ stepPath: ['test'], stepId: 'step1', status: 'success' }],
779
- suspendedPaths: { test: [1] },
780
- runId: 'mixed-run',
781
- timestamp: Date.now(),
782
- status: 'success',
783
- };
784
- await store.persistWorkflowSnapshot({ workflowName, runId: 'mixed-run', snapshot: workflowSnapshot });
785
-
786
- const retrievedThread = await store.getThreadById({ threadId });
787
- const retrievedWorkflow = await store.loadWorkflowSnapshot({ workflowName, runId: 'mixed-run' });
788
-
789
- expect(retrievedThread?.metadata?.type).toBe('thread');
790
- expect(retrievedWorkflow).toEqual(workflowSnapshot);
791
- });
792
- });
793
-
794
- describe('Error Handling', () => {
795
- test('should handle non-existent IDs gracefully for getById methods', async () => {
796
- const nonExistentId = 'does-not-exist';
797
- // Test getThreadById (already partially covered but good to keep specific)
798
- const thread = await store.getThreadById({ threadId: nonExistentId });
799
- expect(thread).toBeNull();
800
-
801
- // Test loadWorkflowSnapshot (already covered in Workflow tests, technically)
802
- const snapshot = await store.loadWorkflowSnapshot({ workflowName: nonExistentId, runId: nonExistentId });
803
- expect(snapshot).toBeNull();
804
-
805
- // Test getWorkflowRunById (already covered in Workflow tests, technically)
806
- const workflowRun = await store.getWorkflowRunById({ runId: nonExistentId });
807
- expect(workflowRun).toBeNull();
808
- });
809
-
810
- test('getMessages should return empty array for non-existent thread', async () => {
811
- const messages = await store.getMessages({ threadId: 'non-existent-thread' });
812
- expect(messages).toEqual([]);
813
- });
814
-
815
- test('getThreadsByResourceId should return empty array for non-existent resourceId', async () => {
816
- const threads = await store.getThreadsByResourceId({ resourceId: 'non-existent-resource' });
817
- expect(threads).toEqual([]);
818
- });
819
-
820
- test('getTraces should return empty array when no traces match filter', async () => {
821
- const tracesByName = await store.getTraces({ name: 'non-existent-trace', page: 1, perPage: 10 });
822
- expect(tracesByName).toEqual([]);
823
- const tracesByScope = await store.getTraces({ scope: 'non-existent-scope', page: 1, perPage: 10 });
824
- expect(tracesByScope).toEqual([]);
825
- });
826
-
827
- test('getEvalsByAgentName should return empty array for non-existent agent', async () => {
828
- const evals = await store.getEvalsByAgentName('non-existent-agent');
829
- expect(evals).toEqual([]);
830
- });
831
-
832
- test('getWorkflowRuns should return empty result for non-existent filters', async () => {
833
- const { runs: runsByName, total: totalByName } = await store.getWorkflowRuns({
834
- workflowName: 'non-existent-workflow',
835
- });
836
- expect(runsByName).toEqual([]);
837
- expect(totalByName).toBe(0);
838
-
839
- const { runs: runsByResource, total: totalByResource } = await store.getWorkflowRuns({
840
- resourceId: 'non-existent-resource',
841
- });
842
- expect(runsByResource).toEqual([]);
843
- expect(totalByResource).toBe(0);
844
- });
845
- }); // End Error Handling describe
846
- });
847
-
848
- // --- Trace Operations Tests ---
849
- describe('Trace Operations', () => {
850
- const sampleTrace = (name: string, scope: string, startTime = Date.now()) => ({
851
- id: `trace-${randomUUID()}`,
852
- parentSpanId: `span-${randomUUID()}`,
853
- traceId: `traceid-${randomUUID()}`,
854
- name,
855
- scope,
856
- kind: 1, // Example kind
857
- startTime: startTime,
858
- endTime: startTime + 100, // Example duration
859
- status: JSON.stringify({ code: 0 }), // Example status
860
- attributes: JSON.stringify({ key: 'value', scopeAttr: scope }),
861
- events: JSON.stringify([{ name: 'event1', timestamp: startTime + 50 }]),
862
- links: JSON.stringify([]),
863
- createdAt: new Date(startTime).toISOString(),
864
- updatedAt: new Date(startTime).toISOString(),
865
- });
866
-
867
- test('should batch insert and retrieve traces', async () => {
868
- const trace1 = sampleTrace('trace-op-1', 'scope-A');
869
- const trace2 = sampleTrace('trace-op-2', 'scope-A', Date.now() + 10);
870
- const trace3 = sampleTrace('trace-op-3', 'scope-B', Date.now() + 20);
871
- const records = [trace1, trace2, trace3];
872
-
873
- await expect(store.batchTraceInsert({ records })).resolves.not.toThrow();
874
-
875
- // Retrieve all (via scan, assuming low test data volume)
876
- const allTraces = await store.getTraces({ page: 1, perPage: 10 });
877
- expect(allTraces.length).toBe(3);
878
- });
879
-
880
- test('should handle Date objects for createdAt/updatedAt fields in batchTraceInsert', async () => {
881
- // This test specifically verifies the bug from the issue where Date objects
882
- // were passed instead of ISO strings and ElectroDB validation failed
883
- const now = new Date();
884
- const traceWithDateObjects = {
885
- id: `trace-${randomUUID()}`,
886
- parentSpanId: `span-${randomUUID()}`,
887
- traceId: `traceid-${randomUUID()}`,
888
- name: 'test-trace-with-dates',
889
- scope: 'default-tracer',
890
- kind: 1,
891
- startTime: now.getTime(),
892
- endTime: now.getTime() + 100,
893
- status: JSON.stringify({ code: 0 }),
894
- attributes: JSON.stringify({ key: 'value' }),
895
- events: JSON.stringify([]),
896
- links: JSON.stringify([]),
897
- // These are Date objects, not ISO strings - this should be handled by ElectroDB attribute setters
898
- createdAt: now,
899
- updatedAt: now,
900
- };
901
-
902
- // This should not throw a validation error due to Date object type
903
- await expect(store.batchTraceInsert({ records: [traceWithDateObjects] })).resolves.not.toThrow();
904
-
905
- // Verify the trace was saved correctly
906
- const allTraces = await store.getTraces({ name: 'test-trace-with-dates', page: 1, perPage: 10 });
907
- expect(allTraces.length).toBe(1);
908
- expect(allTraces[0].name).toBe('test-trace-with-dates');
909
- });
910
-
911
- test('should retrieve traces filtered by name using GSI', async () => {
912
- const trace1 = sampleTrace('trace-filter-name', 'scope-X');
913
- const trace2 = sampleTrace('trace-filter-name', 'scope-Y', Date.now() + 10);
914
- const trace3 = sampleTrace('other-name', 'scope-X', Date.now() + 20);
915
- await store.batchTraceInsert({ records: [trace1, trace2, trace3] });
916
-
917
- const filteredTraces = await store.getTraces({ name: 'trace-filter-name', page: 1, perPage: 10 });
918
- expect(filteredTraces.length).toBe(2);
919
- expect(filteredTraces.every(t => t.name === 'trace-filter-name')).toBe(true);
920
- // Check if sorted by startTime (GSI SK) - ascending default
921
- expect(filteredTraces[0].scope).toBe('scope-X');
922
- expect(filteredTraces[1].scope).toBe('scope-Y');
923
- });
924
-
925
- test('should retrieve traces filtered by scope using GSI', async () => {
926
- const trace1 = sampleTrace('trace-filter-scope-A', 'scope-TARGET');
927
- const trace2 = sampleTrace('trace-filter-scope-B', 'scope-OTHER', Date.now() + 10);
928
- const trace3 = sampleTrace('trace-filter-scope-C', 'scope-TARGET', Date.now() + 20);
929
- await store.batchTraceInsert({ records: [trace1, trace2, trace3] });
930
-
931
- const filteredTraces = await store.getTraces({ scope: 'scope-TARGET', page: 1, perPage: 10 });
932
- expect(filteredTraces.length).toBe(2);
933
- expect(filteredTraces.every(t => t.scope === 'scope-TARGET')).toBe(true);
934
- // Check if sorted by startTime (GSI SK) - ascending default
935
- expect(filteredTraces[0].name).toBe('trace-filter-scope-A');
936
- expect(filteredTraces[1].name).toBe('trace-filter-scope-C');
937
- });
938
-
939
- test('should handle pagination for getTraces', async () => {
940
- const traceData = Array.from({ length: 5 }, (_, i) =>
941
- sampleTrace('trace-page', `scope-page`, Date.now() + i * 10),
942
- );
943
- await store.batchTraceInsert({ records: traceData });
944
-
945
- // Get page 1 (first 2 items)
946
- const page1 = await store.getTraces({ name: 'trace-page', page: 1, perPage: 2 });
947
- expect(page1.length).toBe(2);
948
- // Use non-null assertion (!) since lengths are verified
949
- expect(page1[0]!.startTime).toBe(traceData[0]!.startTime);
950
- expect(page1[1]!.startTime).toBe(traceData[1]!.startTime);
951
-
952
- // Get page 2 (next 2 items)
953
- const page2 = await store.getTraces({ name: 'trace-page', page: 2, perPage: 2 });
954
- expect(page2.length).toBe(2);
955
- expect(page2[0]!.startTime).toBe(traceData[2]!.startTime);
956
- expect(page2[1]!.startTime).toBe(traceData[3]!.startTime);
957
-
958
- // Get page 3 (last 1 item)
959
- const page3 = await store.getTraces({ name: 'trace-page', page: 3, perPage: 2 });
960
- expect(page3.length).toBe(1);
961
- expect(page3[0]!.startTime).toBe(traceData[4]!.startTime);
962
-
963
- // Get page beyond results
964
- const page4 = await store.getTraces({ name: 'trace-page', page: 4, perPage: 2 });
965
- expect(page4.length).toBe(0);
966
- });
967
- }); // End Trace Operations describe
968
-
969
- // --- Eval Operations Tests ---
970
- describe('Eval Operations', () => {
971
- const sampleEval = (agentName: string, isTest = false, createdAt = new Date()) => {
972
- const testInfo = isTest ? { testPath: 'test/path.ts', testName: 'Test Name' } : undefined;
973
- return {
974
- entity: 'eval', // Important for saving
975
- agent_name: agentName,
976
- input: 'Sample input',
977
- output: 'Sample output',
978
- result: JSON.stringify({ score: Math.random() }), // Random score
979
- metric_name: 'sample-metric',
980
- instructions: 'Sample instructions',
981
- test_info: testInfo ? JSON.stringify(testInfo) : undefined,
982
- global_run_id: `global-${randomUUID()}`,
983
- run_id: `run-${randomUUID()}`,
984
- created_at: createdAt.toISOString(),
985
- // Add core MastraStorage fields
986
- createdAt: createdAt.toISOString(),
987
- updatedAt: createdAt.toISOString(),
988
- metadata: JSON.stringify({ custom: 'eval_meta' }),
989
- };
990
- };
991
-
992
- test('should handle Date objects for createdAt/updatedAt fields in eval batchInsert', async () => {
993
- // Test that eval entity properly handles Date objects in createdAt/updatedAt fields
994
- const now = new Date();
995
- const evalWithDateObjects = {
996
- entity: 'eval',
997
- agent_name: 'test-agent-dates',
998
- input: 'Test input',
999
- output: 'Test output',
1000
- result: JSON.stringify({ score: 0.95 }),
1001
- metric_name: 'test-metric',
1002
- instructions: 'Test instructions',
1003
- global_run_id: `global-${randomUUID()}`,
1004
- run_id: `run-${randomUUID()}`,
1005
- created_at: now, // Date object instead of ISO string
1006
- // These are Date objects, not ISO strings - should be handled by ElectroDB attribute setters
1007
- createdAt: now,
1008
- updatedAt: now,
1009
- metadata: JSON.stringify({ test: 'meta' }),
1010
- };
1011
-
1012
- // This should not throw a validation error due to Date object type
1013
- await expect(
1014
- store.batchInsert({
1015
- tableName: TABLE_EVALS,
1016
- records: [evalWithDateObjects],
1017
- }),
1018
- ).resolves.not.toThrow();
1019
-
1020
- // Verify the eval was saved correctly
1021
- const evals = await store.getEvalsByAgentName('test-agent-dates');
1022
- expect(evals.length).toBe(1);
1023
- expect(evals[0].agentName).toBe('test-agent-dates');
1024
- });
1025
-
1026
- test('should retrieve evals by agent name using GSI and filter by type', async () => {
1027
- const agent1 = 'eval-agent-1';
1028
- const agent2 = 'eval-agent-2';
1029
- const time1 = new Date();
1030
- const time2 = new Date(Date.now() + 1000);
1031
- const time3 = new Date(Date.now() + 2000);
1032
- const time4 = new Date(Date.now() + 3000);
1033
-
1034
- const eval1_live = sampleEval(agent1, false, time1);
1035
- const eval1_test = sampleEval(agent1, true, time2);
1036
- const eval2_live = sampleEval(agent2, false, time3);
1037
- const eval1_live_later = sampleEval(agent1, false, time4);
1038
-
1039
- // Use generic batchInsert (which expects entity prop already set)
1040
- await store.batchInsert({
1041
- tableName: TABLE_EVALS,
1042
- records: [eval1_live, eval1_test, eval2_live, eval1_live_later],
1043
- });
1044
-
1045
- // Get all for agent1 (expecting DESCENDING order now)
1046
- const allAgent1 = await store.getEvalsByAgentName(agent1);
1047
- expect(allAgent1.length).toBe(3);
1048
- // Assert descending order (newest first)
1049
- expect(allAgent1[0]!.runId).toBe(eval1_live_later.run_id); // Newest (time4)
1050
- expect(allAgent1[1]!.runId).toBe(eval1_test.run_id); // Middle (time2)
1051
- expect(allAgent1[2]!.runId).toBe(eval1_live.run_id); // Oldest (time1)
1052
-
1053
- // Get only live for agent1 (should be 2, ordered descending)
1054
- const liveAgent1 = await store.getEvalsByAgentName(agent1, 'live');
1055
- expect(liveAgent1.length).toBe(2);
1056
- // Assert descending order
1057
- expect(liveAgent1[0]!.runId).toBe(eval1_live_later.run_id); // Newest live (time4)
1058
- expect(liveAgent1[1]!.runId).toBe(eval1_live.run_id); // Oldest live (time1)
1059
-
1060
- // Get only test for agent1 (should be 1)
1061
- const testAgent1 = await store.getEvalsByAgentName(agent1, 'test');
1062
- expect(testAgent1.length).toBe(1);
1063
- expect(testAgent1[0]!.runId).toBe(eval1_test.run_id);
1064
- expect(testAgent1[0]!.testInfo).toEqual(JSON.parse(eval1_test.test_info!));
1065
-
1066
- // Get for agent2 (should be 1)
1067
- const allAgent2 = await store.getEvalsByAgentName(agent2);
1068
- expect(allAgent2.length).toBe(1);
1069
- expect(allAgent2[0]!.runId).toBe(eval2_live.run_id);
1070
-
1071
- // Get for non-existent agent
1072
- const none = await store.getEvalsByAgentName('non-existent-agent');
1073
- expect(none.length).toBe(0);
1074
- });
1075
- }); // End Eval Operations describe
1076
-
1077
- // --- Workflow Operations Tests ---
1078
- describe('Workflow Operations', () => {
1079
- const sampleWorkflowSnapshot = (
1080
- workflowName: string,
1081
- runId: string,
1082
- resourceId?: string,
1083
- createdAt = new Date(),
1084
- status = 'running',
1085
- ): { recordData: Record<string, any>; snapshot: WorkflowRunState } => {
1086
- const snapshot: WorkflowRunState = {
1087
- value: { currentState: status },
1088
- context: {
1089
- step1: { status: 'success', output: { data: 'test' } },
1090
- input: { source: 'test' },
1091
- } as unknown as WorkflowRunState['context'],
1092
- serializedStepGraph: [],
1093
- activePaths: [],
1094
- suspendedPaths: {},
1095
- runId: runId,
1096
- timestamp: createdAt.getTime(),
1097
- status: 'success',
1098
- ...(resourceId && { resourceId: resourceId }), // Conditionally add resourceId to snapshot
1099
- };
1100
- return {
1101
- recordData: {
1102
- entity: 'workflow_snapshot',
1103
- workflow_name: workflowName,
1104
- run_id: runId,
1105
- snapshot: JSON.stringify(snapshot),
1106
- createdAt: createdAt.toISOString(),
1107
- updatedAt: createdAt.toISOString(),
1108
- resourceId: resourceId, // Store resourceId directly if available
1109
- metadata: JSON.stringify({ wf: 'meta' }),
1110
- },
1111
- snapshot: snapshot,
1112
- };
1113
- };
1114
-
1115
- test('should persist and load a workflow snapshot', async () => {
1116
- const wfName = 'persist-test-wf';
1117
- const runId = `run-${randomUUID()}`;
1118
- const { snapshot } = sampleWorkflowSnapshot(wfName, runId);
1119
-
1120
- await expect(
1121
- store.persistWorkflowSnapshot({
1122
- workflowName: wfName,
1123
- runId: runId,
1124
- snapshot: snapshot,
1125
- }),
1126
- ).resolves.not.toThrow();
1127
-
1128
- const loadedSnapshot = await store.loadWorkflowSnapshot({
1129
- workflowName: wfName,
1130
- runId: runId,
1131
- });
1132
- // Compare only relevant parts, as persist might add internal fields
1133
- expect(loadedSnapshot?.runId).toEqual(snapshot.runId);
1134
- expect(loadedSnapshot?.value).toEqual(snapshot.value);
1135
- expect(loadedSnapshot?.context).toEqual(snapshot.context);
1136
- });
1137
-
1138
- test('should allow updating an existing workflow snapshot', async () => {
1139
- const wfName = 'update-test-wf';
1140
- const runId = `run-${randomUUID()}`;
1141
-
1142
- // Create initial snapshot
1143
- const { snapshot: initialSnapshot } = sampleWorkflowSnapshot(wfName, runId);
1144
-
1145
- await expect(
1146
- store.persistWorkflowSnapshot({
1147
- workflowName: wfName,
1148
- runId: runId,
1149
- snapshot: initialSnapshot,
1150
- }),
1151
- ).resolves.not.toThrow();
1152
-
1153
- // Create updated snapshot with different data
1154
- const updatedSnapshot: WorkflowRunState = {
1155
- ...initialSnapshot,
1156
- value: { currentState: 'completed' },
1157
- context: {
1158
- step1: { status: 'success', output: { data: 'updated-test' } },
1159
- step2: { status: 'success', output: { data: 'new-step' } },
1160
- input: { source: 'updated-test' },
1161
- } as unknown as WorkflowRunState['context'],
1162
- timestamp: Date.now(),
1163
- };
1164
-
1165
- // This should succeed (update existing snapshot)
1166
- await expect(
1167
- store.persistWorkflowSnapshot({
1168
- workflowName: wfName,
1169
- runId: runId,
1170
- snapshot: updatedSnapshot,
1171
- }),
1172
- ).resolves.not.toThrow();
1173
-
1174
- // Verify the snapshot was updated
1175
- const loadedSnapshot = await store.loadWorkflowSnapshot({
1176
- workflowName: wfName,
1177
- runId: runId,
1178
- });
1179
-
1180
- expect(loadedSnapshot?.runId).toEqual(updatedSnapshot.runId);
1181
- expect(loadedSnapshot?.value).toEqual(updatedSnapshot.value);
1182
- expect(loadedSnapshot?.context).toEqual(updatedSnapshot.context);
1183
- });
1184
-
1185
- test('getWorkflowRunById should retrieve correct run', async () => {
1186
- const wfName = 'get-by-id-wf';
1187
- const runId1 = `run-${randomUUID()}`;
1188
- const runId2 = `run-${randomUUID()}`;
1189
- const wf1 = sampleWorkflowSnapshot(wfName, runId1);
1190
- const wf2 = sampleWorkflowSnapshot(wfName, runId2);
1191
-
1192
- await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
1193
-
1194
- const found = await store.getWorkflowRunById({ runId: runId1, workflowName: wfName });
1195
- expect(found).toBeDefined();
1196
- expect(found!.runId).toBe(runId1);
1197
- expect(found!.workflowName).toBe(wfName);
1198
-
1199
- const notFound = await store.getWorkflowRunById({ runId: 'non-existent', workflowName: wfName });
1200
- expect(notFound).toBeNull();
1201
- });
1202
-
1203
- test('getWorkflowRuns should return all runs when no filters applied', async () => {
1204
- const wfName = 'get-runs-all';
1205
- const runId1 = `run-${randomUUID()}`;
1206
- const runId2 = `run-${randomUUID()}`;
1207
- const wf1 = sampleWorkflowSnapshot(wfName, runId1, undefined, new Date(Date.now() - 1000));
1208
- const wf2 = sampleWorkflowSnapshot(wfName, runId2, undefined, new Date());
1209
-
1210
- await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
1211
-
1212
- const { runs, total } = await store.getWorkflowRuns(); // No filters
1213
- // Note: Scan order is not guaranteed, check for presence and count
1214
- expect(total).toBe(2);
1215
- expect(runs.length).toBe(2);
1216
- expect(runs.map(r => r.runId)).toEqual(expect.arrayContaining([runId1, runId2]));
1217
- });
1218
-
1219
- test('getWorkflowRuns should filter by workflowName', async () => {
1220
- const wfName1 = 'get-runs-filter-name1';
1221
- const wfName2 = 'get-runs-filter-name2';
1222
- const runId1 = `run-${randomUUID()}`;
1223
- const runId2 = `run-${randomUUID()}`;
1224
- const wf1 = sampleWorkflowSnapshot(wfName1, runId1);
1225
- const wf2 = sampleWorkflowSnapshot(wfName2, runId2);
1226
-
1227
- await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
1228
-
1229
- const { runs, total } = await store.getWorkflowRuns({ workflowName: wfName1 });
1230
- expect(total).toBe(1);
1231
- expect(runs.length).toBe(1);
1232
- expect(runs[0]!.runId).toBe(runId1);
1233
- });
1234
-
1235
- test('getWorkflowRuns should filter by resourceId', async () => {
1236
- const wfName = 'get-runs-filter-resource';
1237
- const resource1 = 'resource-filter-1';
1238
- const resource2 = 'resource-filter-2';
1239
- const runId1 = `run-${randomUUID()}`;
1240
- const runId2 = `run-${randomUUID()}`;
1241
- const runId3 = `run-${randomUUID()}`;
1242
- const wf1 = sampleWorkflowSnapshot(wfName, runId1, resource1);
1243
- const wf2 = sampleWorkflowSnapshot(wfName, runId2, resource2);
1244
- const wf3 = sampleWorkflowSnapshot(wfName, runId3, resource1);
1245
-
1246
- await store.batchInsert({
1247
- tableName: TABLE_WORKFLOW_SNAPSHOT,
1248
- records: [wf1.recordData, wf2.recordData, wf3.recordData],
1249
- });
1250
-
1251
- const { runs, total } = await store.getWorkflowRuns({ resourceId: resource1 });
1252
- // Note: Scan order not guaranteed
1253
- expect(total).toBe(2);
1254
- expect(runs.length).toBe(2);
1255
- expect(runs.map(r => r.runId)).toEqual(expect.arrayContaining([runId1, runId3]));
1256
- expect(runs.every(r => r.resourceId === resource1)).toBe(true);
1257
- });
1258
-
1259
- test('getWorkflowRuns should filter by date range', async () => {
1260
- const wfName = 'get-runs-filter-date';
1261
- const time1 = new Date(2024, 0, 10); // Jan 10 2024
1262
- const time2 = new Date(2024, 0, 15); // Jan 15 2024
1263
- const time3 = new Date(2024, 0, 20); // Jan 20 2024
1264
- const runId1 = `run-${randomUUID()}`;
1265
- const runId2 = `run-${randomUUID()}`;
1266
- const runId3 = `run-${randomUUID()}`;
1267
- const wf1 = sampleWorkflowSnapshot(wfName, runId1, undefined, time1);
1268
- const wf2 = sampleWorkflowSnapshot(wfName, runId2, undefined, time2);
1269
- const wf3 = sampleWorkflowSnapshot(wfName, runId3, undefined, time3);
1270
-
1271
- await store.batchInsert({
1272
- tableName: TABLE_WORKFLOW_SNAPSHOT,
1273
- records: [wf1.recordData, wf2.recordData, wf3.recordData],
1274
- });
1275
-
1276
- const { runs, total } = await store.getWorkflowRuns({
1277
- fromDate: new Date(2024, 0, 12), // Jan 12
1278
- toDate: new Date(2024, 0, 18), // Jan 18
1279
- });
1280
- expect(total).toBe(1);
1281
- expect(runs.length).toBe(1);
1282
- expect(runs[0]!.runId).toBe(runId2); // Only wf2 falls within the range
1283
- });
1284
-
1285
- test('getWorkflowRuns should handle pagination (limit/offset)', async () => {
1286
- const wfName = 'get-runs-pagination';
1287
- const snapshots = Array.from({ length: 5 }, (_, i) =>
1288
- sampleWorkflowSnapshot(wfName, `run-page-${i}`, undefined, new Date(Date.now() + i * 1000)),
1289
- );
1290
- await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: snapshots.map(s => s.recordData) });
1291
-
1292
- // Get page 1 (limit 2, offset 0)
1293
- const page1 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 0 });
1294
- expect(page1.total).toBe(5);
1295
- expect(page1.runs.length).toBe(2);
1296
- // Scan order not guaranteed, check for presence of two expected runs
1297
- const page1Ids = page1.runs.map(r => r.runId);
1298
- expect(snapshots.slice(0, 2).map(s => s!.recordData.run_id)).toEqual(expect.arrayContaining(page1Ids));
1299
-
1300
- // Get page 2 (limit 2, offset 2)
1301
- const page2 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 2 });
1302
- expect(page2.total).toBe(5);
1303
- expect(page2.runs.length).toBe(2);
1304
- const page2Ids = page2.runs.map(r => r.runId);
1305
- expect(snapshots.slice(2, 4).map(s => s!.recordData.run_id)).toEqual(expect.arrayContaining(page2Ids));
1306
-
1307
- // Get page 3 (limit 2, offset 4)
1308
- const page3 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 4 });
1309
- expect(page3.total).toBe(5);
1310
- expect(page3.runs.length).toBe(1);
1311
- // Use explicit type assertion for runs array access to fix linter error
1312
- expect((page3.runs as WorkflowRun[])[0]!.runId).toBe(snapshots[4]!.recordData.run_id);
1313
-
1314
- // Get page beyond results (offset 5)
1315
- const page4 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 5 });
1316
- expect(page4.total).toBe(5);
1317
- expect(page4.runs.length).toBe(0);
1318
- });
1319
- }); // End Workflow Operations describe
1320
-
1321
- // --- Initialization & Configuration Tests ---
1322
- describe('Initialization & Configuration', () => {
1323
- test('should throw error if tableName is missing in config', () => {
1324
- expect(() => {
1325
- new DynamoDBStore({
1326
- name: 'MissingTableStore',
1327
- config: {
1328
- endpoint: LOCAL_ENDPOINT,
1329
- region: LOCAL_REGION,
1330
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
1331
- } as any, // Cast to any to bypass compile-time check for this specific test
1332
- });
1333
- }).toThrow(/tableName must be provided/); // Check for specific error message if possible
1334
- });
1335
-
1336
- test('should throw error during operations if table does not exist', async () => {
1337
- // Use a valid but random table name unlikely to exist
1338
- const nonExistentTableName = `non-existent-${randomUUID()}`;
1339
- const storeWithInvalidTable = new DynamoDBStore({
1340
- name: 'InvalidTableStore',
1341
- config: {
1342
- tableName: nonExistentTableName,
1343
- endpoint: LOCAL_ENDPOINT,
1344
- region: LOCAL_REGION,
1345
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
1346
- },
1347
- });
1348
-
1349
- await expect(storeWithInvalidTable.getThreadById({ threadId: 'any-id' }))
1350
- .rejects // Update regex to match either DDB error or ElectroDB wrapper
1351
- .toThrow(/ResourceNotFoundException|Table.*does not exist|Cannot do operations on a non-existent table/);
1352
- });
1353
-
1354
- test('init() should throw error if table does not exist', async () => {
1355
- // Use a valid but random table name unlikely to exist
1356
- const nonExistentTableName = `non-existent-init-${randomUUID()}`;
1357
- const storeWithInvalidTable = new DynamoDBStore({
1358
- name: 'InvalidTableStoreInit',
1359
- config: {
1360
- tableName: nonExistentTableName,
1361
- endpoint: LOCAL_ENDPOINT,
1362
- region: LOCAL_REGION,
1363
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
1364
- },
1365
- });
1366
-
1367
- await expect(storeWithInvalidTable.init())
1368
- .rejects // Update regex here too for consistency
1369
- .toThrow(/ResourceNotFoundException|Table.*does not exist|Cannot do operations on a non-existent table/);
1370
- });
1371
- }); // End Initialization & Configuration describe
1372
-
1373
- // --- Generic Storage Methods Tests ---
1374
- describe('Generic Storage Methods (`insert`, `load`, `batchInsert`, `clearTable`)', () => {
1375
- // Declare genericStore specific to this block
1376
- let genericStore: DynamoDBStore;
1377
-
1378
- beforeAll(() => {
1379
- // Initialize genericStore using the same config as the main store
1380
- genericStore = new DynamoDBStore({
1381
- name: 'DynamoDBGenericTest',
1382
- config: {
1383
- tableName: TEST_TABLE_NAME, // Ensure this uses the correct test table
1384
- endpoint: LOCAL_ENDPOINT,
1385
- region: LOCAL_REGION,
1386
- credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
1387
- },
1388
- });
1389
- console.log('Generic test store initialized for generic tests.');
1390
- });
1391
-
1392
- const sampleThreadData = (id: string) => ({
1393
- entity: 'thread',
1394
- id: id,
1395
- resourceId: `resource-${randomUUID()}`,
1396
- title: 'Generic Test Thread',
1397
- createdAt: new Date().toISOString(),
1398
- updatedAt: new Date().toISOString(),
1399
- metadata: JSON.stringify({ generic: true }),
1400
- });
1401
-
1402
- test('insert() should save a record', async () => {
1403
- const threadId = `thread-${randomUUID()}`;
1404
- const record = sampleThreadData(threadId);
1405
- // Use the genericStore instance
1406
- await expect(genericStore.insert({ tableName: TABLE_THREADS, record })).resolves.not.toThrow();
1407
- const loaded = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId } });
1408
- expect(loaded).not.toBeNull();
1409
- if (loaded) {
1410
- expect(loaded.id).toBe(threadId);
1411
- expect(loaded.title).toBe('Generic Test Thread');
1412
- expect(loaded.metadata).toEqual({ generic: true });
1413
- }
1414
- });
1415
-
1416
- test('insert() should handle Date objects for createdAt/updatedAt fields', async () => {
1417
- // Test that individual insert method properly handles Date objects in date fields
1418
- const now = new Date();
1419
- const recordWithDates = {
1420
- id: `thread-${randomUUID()}`,
1421
- resourceId: `resource-${randomUUID()}`,
1422
- title: 'Thread with Date Objects',
1423
- // These are Date objects, not ISO strings - should be handled by preprocessing
1424
- createdAt: now,
1425
- updatedAt: now,
1426
- metadata: JSON.stringify({ test: 'with-dates' }),
1427
- };
1428
-
1429
- // This should not throw a validation error due to Date object type
1430
- await expect(genericStore.insert({ tableName: TABLE_THREADS, record: recordWithDates })).resolves.not.toThrow();
1431
-
1432
- // Verify the record was saved correctly
1433
- const loaded = await genericStore.load<StorageThreadType>({
1434
- tableName: TABLE_THREADS,
1435
- keys: { id: recordWithDates.id },
1436
- });
1437
- expect(loaded).not.toBeNull();
1438
- expect(loaded?.id).toBe(recordWithDates.id);
1439
- expect(loaded?.title).toBe('Thread with Date Objects');
1440
- });
1441
-
1442
- test('load() should return null for non-existent record', async () => {
1443
- // Use the genericStore instance
1444
- const loaded = await genericStore.load({ tableName: TABLE_THREADS, keys: { id: 'non-existent-generic' } });
1445
- expect(loaded).toBeNull();
1446
- });
1447
-
1448
- test('batchInsert() should save multiple records', async () => {
1449
- const threadId1 = `thread-batch-${randomUUID()}`;
1450
- const threadId2 = `thread-batch-${randomUUID()}`;
1451
- const records = [sampleThreadData(threadId1), sampleThreadData(threadId2)];
1452
- // Use the genericStore instance
1453
- await expect(genericStore.batchInsert({ tableName: TABLE_THREADS, records })).resolves.not.toThrow();
1454
- const loaded1 = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } });
1455
- const loaded2 = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } });
1456
- expect(loaded1).toBeDefined();
1457
- expect(loaded2).toBeDefined();
1458
- expect(loaded1?.id).toBe(threadId1);
1459
- expect(loaded2?.id).toBe(threadId2);
1460
- });
1461
-
1462
- test('clearTable() should remove all records for the logical table', async () => {
1463
- const threadId1 = `thread-clear-${randomUUID()}`;
1464
- const threadId2 = `thread-clear-${randomUUID()}`;
1465
- const records = [sampleThreadData(threadId1), sampleThreadData(threadId2)];
1466
- // Use the genericStore instance
1467
- await genericStore.batchInsert({ tableName: TABLE_THREADS, records });
1468
- expect(
1469
- await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } }),
1470
- ).toBeDefined();
1471
- expect(
1472
- await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } }),
1473
- ).toBeDefined();
1474
- await expect(genericStore.clearTable({ tableName: TABLE_THREADS })).resolves.not.toThrow();
1475
- expect(
1476
- await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } }),
1477
- ).toBeNull();
1478
- expect(
1479
- await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } }),
1480
- ).toBeNull();
1481
- });
1482
- }); // End Generic Storage Methods describe
1483
- });