@mastra/dynamodb 0.0.2-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1026 @@
1
+ import { spawn } from 'child_process';
2
+ import { randomUUID } from 'crypto';
3
+ import {
4
+ BatchWriteItemCommand,
5
+ CreateTableCommand,
6
+ DeleteTableCommand,
7
+ DescribeTableCommand,
8
+ DynamoDBClient,
9
+ ListTablesCommand,
10
+ ScanCommand,
11
+ waitUntilTableExists,
12
+ waitUntilTableNotExists,
13
+ } from '@aws-sdk/client-dynamodb';
14
+ import type { MessageType, StorageThreadType, WorkflowRun, WorkflowRunState } from '@mastra/core';
15
+ import { TABLE_EVALS, TABLE_THREADS, TABLE_WORKFLOW_SNAPSHOT } from '@mastra/core/storage';
16
+ import { afterAll, beforeAll, beforeEach, describe, expect, test } from 'vitest';
17
+ import { DynamoDBStore } from '..';
18
+
19
+ const TEST_TABLE_NAME = 'mastra-single-table-test'; // Define the single table name
20
+ const LOCAL_ENDPOINT = 'http://localhost:8000';
21
+ const LOCAL_REGION = 'local-test'; // Use a distinct region for local testing
22
+
23
+ // Docker process handle
24
+ let dynamodbProcess: ReturnType<typeof spawn>;
25
+
26
+ // AWS SDK Client for setup/teardown
27
+ let setupClient: DynamoDBClient;
28
+
29
+ // Function to wait for DynamoDB Local to be ready
30
+ async function waitForDynamoDBLocal(client: DynamoDBClient, timeoutMs = 90000): Promise<void> {
31
+ const startTime = Date.now();
32
+ console.log(`Waiting up to ${timeoutMs / 1000}s for DynamoDB Local...`);
33
+ while (Date.now() - startTime < timeoutMs) {
34
+ try {
35
+ await client.send(new ListTablesCommand({}));
36
+ console.log('DynamoDB Local is ready.');
37
+ return; // Success
38
+ } catch (e: unknown) {
39
+ let errorName: string | undefined;
40
+
41
+ if (e instanceof Error) {
42
+ errorName = e.name;
43
+ } else if (
44
+ typeof e === 'object' &&
45
+ e !== null &&
46
+ 'name' in e &&
47
+ typeof (e as { name: unknown }).name === 'string'
48
+ ) {
49
+ errorName = (e as { name: string }).name;
50
+ }
51
+
52
+ if (errorName === 'ECONNREFUSED' || errorName === 'TimeoutError' || errorName === 'ERR_INVALID_PROTOCOL') {
53
+ // Expected errors while starting
54
+ await new Promise(resolve => setTimeout(resolve, 500)); // Wait before retrying
55
+ } else {
56
+ console.error('Unexpected error waiting for DynamoDB Local:', e);
57
+ throw e; // Rethrow unexpected errors
58
+ }
59
+ }
60
+ }
61
+ throw new Error(`DynamoDB Local did not become ready within ${timeoutMs}ms.`);
62
+ }
63
+
64
+ // Function to clear all items from the single table
65
+ async function clearSingleTable(client: DynamoDBClient, tableName: string) {
66
+ let ExclusiveStartKey: Record<string, any> | undefined;
67
+ let items: Record<string, any>[] = [];
68
+
69
+ // Scan all items (handling pagination)
70
+ do {
71
+ const scanOutput = await client.send(
72
+ new ScanCommand({
73
+ TableName: tableName,
74
+ ExclusiveStartKey,
75
+ ProjectionExpression: 'pk, sk', // Only need keys for deletion
76
+ }),
77
+ );
78
+ items = items.concat(scanOutput.Items || []);
79
+ ExclusiveStartKey = scanOutput.LastEvaluatedKey;
80
+ } while (ExclusiveStartKey);
81
+
82
+ if (items.length === 0) {
83
+ return; // Nothing to delete
84
+ }
85
+
86
+ // Batch delete items (handling DynamoDB 25 item limit per batch)
87
+ const deleteRequests = items.map(item => ({
88
+ DeleteRequest: {
89
+ Key: { pk: item.pk, sk: item.sk },
90
+ },
91
+ }));
92
+
93
+ for (let i = 0; i < deleteRequests.length; i += 25) {
94
+ const batch = deleteRequests.slice(i, i + 25);
95
+ const command = new BatchWriteItemCommand({
96
+ RequestItems: {
97
+ [tableName]: batch,
98
+ },
99
+ });
100
+ // Handle unprocessed items if necessary (though less likely with local)
101
+ let result = await client.send(command);
102
+ while (
103
+ result.UnprocessedItems &&
104
+ result.UnprocessedItems[tableName] &&
105
+ result.UnprocessedItems[tableName].length > 0
106
+ ) {
107
+ console.warn(`Retrying ${result.UnprocessedItems[tableName].length} unprocessed delete items...`);
108
+ await new Promise(res => setTimeout(res, 200)); // Simple backoff
109
+ const retryCommand = new BatchWriteItemCommand({ RequestItems: result.UnprocessedItems });
110
+ result = await client.send(retryCommand);
111
+ }
112
+ }
113
+ // console.log(`Cleared ${items.length} items from ${tableName}`);
114
+ }
115
+
116
+ // Start DynamoDB Local container and create table
117
+ beforeAll(async () => {
118
+ // Initialize client for setup
119
+ setupClient = new DynamoDBClient({
120
+ endpoint: LOCAL_ENDPOINT,
121
+ region: LOCAL_REGION,
122
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
123
+ // Increase timeout for setup operations
124
+ requestHandler: { requestTimeout: 10000 },
125
+ // Add retries for setup commands
126
+ maxAttempts: 5,
127
+ });
128
+
129
+ // Start DynamoDB Local using docker-compose
130
+ console.log('Starting DynamoDB Local container...');
131
+ dynamodbProcess = spawn('docker-compose', ['up', '-d'], {
132
+ cwd: __dirname, // Ensure docker-compose runs from the test file directory if needed
133
+ stdio: 'pipe', // Use pipe to potentially capture output if needed
134
+ });
135
+ dynamodbProcess.stderr?.on('data', data => console.error(`docker-compose stderr: ${data}`));
136
+ dynamodbProcess.on('error', err => console.error('Failed to start docker-compose:', err));
137
+
138
+ // Add a short fixed delay to allow the container process to stabilize before polling
139
+ console.log('Waiting a few seconds for container process to stabilize...');
140
+ await new Promise(resolve => setTimeout(resolve, 3000)); // 3-second delay
141
+
142
+ // Wait for DynamoDB to be ready
143
+ try {
144
+ await waitForDynamoDBLocal(setupClient);
145
+ } catch (e) {
146
+ console.error('Failed to connect to DynamoDB Local after startup.', e);
147
+ // Attempt to stop container on failure
148
+ spawn('docker-compose', ['down'], { cwd: __dirname, stdio: 'pipe' });
149
+ throw e; // Re-throw error to fail the test suite
150
+ }
151
+
152
+ // Delete the table if it exists from a previous run
153
+ try {
154
+ console.log(`Checking if table ${TEST_TABLE_NAME} exists...`);
155
+ await setupClient.send(new DescribeTableCommand({ TableName: TEST_TABLE_NAME }));
156
+ console.log(`Table ${TEST_TABLE_NAME} exists, attempting deletion...`);
157
+ await setupClient.send(new DeleteTableCommand({ TableName: TEST_TABLE_NAME }));
158
+ console.log(`Waiting for table ${TEST_TABLE_NAME} to be deleted...`);
159
+ await waitUntilTableNotExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
160
+ console.log(`Table ${TEST_TABLE_NAME} deleted.`);
161
+ } catch (e: unknown) {
162
+ let errorName: string | undefined;
163
+
164
+ if (e instanceof Error) {
165
+ errorName = e.name;
166
+ } else if (
167
+ typeof e === 'object' &&
168
+ e !== null &&
169
+ 'name' in e &&
170
+ typeof (e as { name: unknown }).name === 'string'
171
+ ) {
172
+ errorName = (e as { name: string }).name;
173
+ }
174
+
175
+ if (errorName === 'ResourceNotFoundException') {
176
+ console.log(`Table ${TEST_TABLE_NAME} does not exist, proceeding.`);
177
+ } else {
178
+ console.error(`Error deleting table ${TEST_TABLE_NAME}:`, e);
179
+ throw e; // Rethrow other errors
180
+ }
181
+ }
182
+
183
+ // Create the single table with the correct schema
184
+ console.log(`Creating table ${TEST_TABLE_NAME}...`);
185
+ try {
186
+ const createTableCommand = new CreateTableCommand({
187
+ TableName: TEST_TABLE_NAME,
188
+ AttributeDefinitions: [
189
+ { AttributeName: 'pk', AttributeType: 'S' },
190
+ { AttributeName: 'sk', AttributeType: 'S' },
191
+ { AttributeName: 'gsi1pk', AttributeType: 'S' },
192
+ { AttributeName: 'gsi1sk', AttributeType: 'S' },
193
+ { AttributeName: 'gsi2pk', AttributeType: 'S' },
194
+ { AttributeName: 'gsi2sk', AttributeType: 'S' },
195
+ ],
196
+ KeySchema: [
197
+ { AttributeName: 'pk', KeyType: 'HASH' },
198
+ { AttributeName: 'sk', KeyType: 'RANGE' },
199
+ ],
200
+ GlobalSecondaryIndexes: [
201
+ {
202
+ IndexName: 'gsi1',
203
+ KeySchema: [
204
+ { AttributeName: 'gsi1pk', KeyType: 'HASH' },
205
+ { AttributeName: 'gsi1sk', KeyType: 'RANGE' },
206
+ ],
207
+ Projection: { ProjectionType: 'ALL' },
208
+ },
209
+ {
210
+ IndexName: 'gsi2',
211
+ KeySchema: [
212
+ { AttributeName: 'gsi2pk', KeyType: 'HASH' },
213
+ { AttributeName: 'gsi2sk', KeyType: 'RANGE' },
214
+ ],
215
+ Projection: { ProjectionType: 'ALL' },
216
+ },
217
+ ],
218
+ BillingMode: 'PAY_PER_REQUEST', // Use PAY_PER_REQUEST for local testing ease
219
+ });
220
+ await setupClient.send(createTableCommand);
221
+ console.log(`Waiting for table ${TEST_TABLE_NAME} to become active...`);
222
+ await waitUntilTableExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
223
+ console.log(`Table ${TEST_TABLE_NAME} created successfully.`);
224
+ } catch (e) {
225
+ console.error(`Failed to create table ${TEST_TABLE_NAME}:`, e);
226
+ throw e;
227
+ }
228
+ }, 60000); // Increase timeout for beforeAll to accommodate Docker startup and table creation
229
+
230
+ // Stop DynamoDB Local container
231
+ afterAll(async () => {
232
+ console.log('Stopping DynamoDB Local container...');
233
+ // Optionally delete the table
234
+ // try {
235
+ // await setupClient.send(new DeleteTableCommand({ TableName: TEST_TABLE_NAME }));
236
+ // await waitUntilTableNotExists({ client: setupClient, maxWaitTime: 60 }, { TableName: TEST_TABLE_NAME });
237
+ // console.log(`Test table ${TEST_TABLE_NAME} deleted.`);
238
+ // } catch (error) {
239
+ // console.error(`Error deleting test table ${TEST_TABLE_NAME}:`, error);
240
+ // }
241
+
242
+ if (setupClient) {
243
+ setupClient.destroy();
244
+ }
245
+
246
+ const stopProcess = spawn('docker-compose', ['down', '--volumes'], {
247
+ // Remove volumes too
248
+ cwd: __dirname,
249
+ stdio: 'pipe',
250
+ });
251
+ stopProcess.stderr?.on('data', data => console.error(`docker-compose down stderr: ${data}`));
252
+ stopProcess.on('error', err => console.error('Failed to stop docker-compose:', err));
253
+ await new Promise(resolve => stopProcess.on('close', resolve)); // Wait for compose down
254
+
255
+ if (dynamodbProcess && !dynamodbProcess.killed) {
256
+ dynamodbProcess.kill();
257
+ }
258
+ console.log('DynamoDB Local container stopped.');
259
+ }, 30000); // Increase timeout for afterAll
260
+
261
+ describe('DynamoDBStore Integration Tests', () => {
262
+ let store: DynamoDBStore;
263
+
264
+ beforeAll(async () => {
265
+ // Initialize main store instance used by most tests
266
+ store = new DynamoDBStore({
267
+ name: 'DynamoDBStoreTest',
268
+ config: {
269
+ tableName: TEST_TABLE_NAME,
270
+ endpoint: LOCAL_ENDPOINT,
271
+ region: LOCAL_REGION,
272
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
273
+ },
274
+ });
275
+ console.log('Main DynamoDBStore initialized for tests.');
276
+ });
277
+
278
+ beforeEach(async () => {
279
+ // Clear table between tests using the setup client
280
+ await clearSingleTable(setupClient, TEST_TABLE_NAME);
281
+ });
282
+
283
+ afterAll(async () => {
284
+ // No client.destroy() needed here as the store manages its internal client
285
+ // Or if the store exposes a close/destroy method, call that.
286
+ if (store) {
287
+ await store.close(); // Assuming store has a close method
288
+ }
289
+ });
290
+
291
+ // DynamoDB-specific tests
292
+ describe('DynamoDB-specific operations', () => {
293
+ describe('Entity Operations', () => {
294
+ test('should persist and retrieve thread metadata', async () => {
295
+ const now = new Date();
296
+ const threadId = 'metadata-thread';
297
+ const metadata = { user: 'test-user', complex: { nested: true, arr: [1, 'a'] } };
298
+ const thread: StorageThreadType = {
299
+ id: threadId,
300
+ resourceId: 'resource-meta',
301
+ title: 'Metadata Test Thread',
302
+ createdAt: now,
303
+ updatedAt: now,
304
+ metadata: metadata,
305
+ };
306
+ await store.saveThread({ thread });
307
+ const retrieved = await store.getThreadById({ threadId });
308
+ expect(retrieved).toBeDefined();
309
+ expect(retrieved?.metadata).toEqual(metadata); // ElectroDB should handle JSON stringify/parse
310
+ });
311
+
312
+ test('should handle large workflow snapshots near DynamoDB item size limit', async () => {
313
+ // Test remains largely the same, relies on clearSingleTable working
314
+ const now = Date.now();
315
+ const largeSnapshot: WorkflowRunState = {
316
+ // ... (rest of the large snapshot definition) ...
317
+ value: { state: 'test' },
318
+ context: {
319
+ input: { source: 'test' },
320
+ step1: { status: 'success', output: { data: 'test' } },
321
+ } as unknown as WorkflowRunState['context'],
322
+ activePaths: [{ stepPath: ['test'], stepId: 'step1', status: 'success' }],
323
+ suspendedPaths: { test: [1] },
324
+ runId: 'test-run-large', // Use unique runId
325
+ timestamp: now,
326
+ };
327
+
328
+ await expect(
329
+ store.persistWorkflowSnapshot({
330
+ workflowName: 'test-workflow-large',
331
+ runId: 'test-run-large',
332
+ snapshot: largeSnapshot,
333
+ }),
334
+ ).resolves.not.toThrow();
335
+
336
+ const retrieved = await store.loadWorkflowSnapshot({
337
+ workflowName: 'test-workflow-large',
338
+ runId: 'test-run-large',
339
+ });
340
+
341
+ expect(retrieved).toEqual(largeSnapshot);
342
+ }, 10000); // Increase timeout for potentially large item handling
343
+
344
+ test('should handle concurrent thread updates (last writer wins)', async () => {
345
+ // Test remains largely the same, verifies final state
346
+ const threadId = 'concurrent-thread';
347
+ const resourceId = 'resource-123';
348
+ const now = new Date();
349
+ const thread: StorageThreadType = {
350
+ id: threadId,
351
+ resourceId,
352
+ title: 'Initial Title',
353
+ createdAt: now,
354
+ updatedAt: now,
355
+ metadata: { initial: true },
356
+ };
357
+ await store.saveThread({ thread });
358
+
359
+ // Simulate potential delay between read and write for update 1
360
+ const update1 = async () => {
361
+ await new Promise(res => setTimeout(res, 50)); // Short delay
362
+ await store.updateThread({
363
+ id: threadId,
364
+ title: 'Updated Thread 1',
365
+ metadata: { update: 1, time: Date.now() },
366
+ });
367
+ };
368
+ // Simulate potential delay between read and write for update 2
369
+ const update2 = async () => {
370
+ await new Promise(res => setTimeout(res, 100)); // Slightly longer delay
371
+ await store.updateThread({
372
+ id: threadId,
373
+ title: 'Updated Thread 2',
374
+ metadata: { update: 2, time: Date.now() },
375
+ });
376
+ };
377
+
378
+ await Promise.all([update1(), update2()]);
379
+
380
+ const retrieved = await store.getThreadById({ threadId });
381
+ expect(retrieved).toBeDefined();
382
+ expect(retrieved?.id).toBe(threadId);
383
+ // In DynamoDB default (non-conditional) updates, the last writer wins.
384
+ // We expect title 2 / metadata 2 because update2 started later.
385
+ expect(retrieved?.title).toBe('Updated Thread 2');
386
+ expect(retrieved?.metadata?.update).toBe(2);
387
+ });
388
+ });
389
+
390
+ describe('Batch Operations', () => {
391
+ test('should handle batch message inserts efficiently (up to 25 items)', async () => {
392
+ const startTime = Date.now(); // Get a base time
393
+ const threadId = 'batch-thread';
394
+ const messages: MessageType[] = Array.from({ length: 25 }, (_, i) => ({
395
+ id: `msg-${i}`,
396
+ threadId,
397
+ resourceId: 'test-resource',
398
+ content: `Message ${i}`,
399
+ // Increment timestamp slightly for each message to ensure order
400
+ createdAt: new Date(startTime + i),
401
+ role: 'user',
402
+ type: 'text',
403
+ }));
404
+
405
+ // Assuming saveMessages uses BatchWriteItem internally
406
+ await expect(store.saveMessages({ messages })).resolves.not.toThrow();
407
+
408
+ const retrieved = await store.getMessages({ threadId });
409
+ expect(retrieved).toHaveLength(25);
410
+ // Now the order should be guaranteed by the ascending createdAt timestamp
411
+ expect(retrieved[0]?.content).toBe('Message 0');
412
+ expect(retrieved[24]?.content).toBe('Message 24');
413
+ });
414
+
415
+ test('should handle batch inserts exceeding 25 items (if saveMessages chunks)', async () => {
416
+ const startTime = Date.now(); // Get a base time
417
+ const threadId = 'batch-thread-large';
418
+ const messages: MessageType[] = Array.from({ length: 30 }, (_, i) => ({
419
+ id: `msg-large-${i}`,
420
+ threadId,
421
+ resourceId: 'test-resource-large',
422
+ content: `Large Message ${i}`,
423
+ // Increment timestamp slightly for each message to ensure order
424
+ createdAt: new Date(startTime + i),
425
+ role: 'user',
426
+ type: 'text',
427
+ }));
428
+
429
+ await expect(store.saveMessages({ messages })).resolves.not.toThrow();
430
+
431
+ const retrieved = await store.getMessages({ threadId });
432
+ expect(retrieved).toHaveLength(30); // Verify all were saved
433
+ // Add order check for the > 25 test as well
434
+ expect(retrieved[0]?.content).toBe('Large Message 0');
435
+ expect(retrieved[29]?.content).toBe('Large Message 29');
436
+ });
437
+ });
438
+
439
+ describe('Single-Table Design', () => {
440
+ test('should maintain entity separation in single table', async () => {
441
+ // Test remains largely the same
442
+ const threadId = 'mixed-thread';
443
+ const workflowName = 'mixed-workflow';
444
+ const now = new Date();
445
+ const thread: StorageThreadType = {
446
+ id: threadId,
447
+ resourceId: 'mixed-resource',
448
+ title: 'Mixed Thread',
449
+ createdAt: now,
450
+ updatedAt: now,
451
+ metadata: { type: 'thread' },
452
+ };
453
+ await store.saveThread({ thread });
454
+
455
+ const workflowSnapshot: WorkflowRunState = {
456
+ // ...(snapshot definition)
457
+ value: { state: 'test' },
458
+ context: {
459
+ step1: { status: 'success', output: { data: 'test' } },
460
+ input: { source: 'test' },
461
+ } as unknown as WorkflowRunState['context'],
462
+ activePaths: [{ stepPath: ['test'], stepId: 'step1', status: 'success' }],
463
+ suspendedPaths: { test: [1] },
464
+ runId: 'mixed-run',
465
+ timestamp: Date.now(),
466
+ };
467
+ await store.persistWorkflowSnapshot({ workflowName, runId: 'mixed-run', snapshot: workflowSnapshot });
468
+
469
+ const retrievedThread = await store.getThreadById({ threadId });
470
+ const retrievedWorkflow = await store.loadWorkflowSnapshot({ workflowName, runId: 'mixed-run' });
471
+
472
+ expect(retrievedThread?.metadata?.type).toBe('thread');
473
+ expect(retrievedWorkflow).toEqual(workflowSnapshot);
474
+ });
475
+ });
476
+
477
+ describe('Error Handling', () => {
478
+ test('should handle non-existent IDs gracefully for getById methods', async () => {
479
+ const nonExistentId = 'does-not-exist';
480
+ // Test getThreadById (already partially covered but good to keep specific)
481
+ const thread = await store.getThreadById({ threadId: nonExistentId });
482
+ expect(thread).toBeNull();
483
+
484
+ // Test loadWorkflowSnapshot (already covered in Workflow tests, technically)
485
+ const snapshot = await store.loadWorkflowSnapshot({ workflowName: nonExistentId, runId: nonExistentId });
486
+ expect(snapshot).toBeNull();
487
+
488
+ // Test getWorkflowRunById (already covered in Workflow tests, technically)
489
+ const workflowRun = await store.getWorkflowRunById({ runId: nonExistentId });
490
+ expect(workflowRun).toBeNull();
491
+ });
492
+
493
+ test('getMessages should return empty array for non-existent thread', async () => {
494
+ const messages = await store.getMessages({ threadId: 'non-existent-thread' });
495
+ expect(messages).toEqual([]);
496
+ });
497
+
498
+ test('getThreadsByResourceId should return empty array for non-existent resourceId', async () => {
499
+ const threads = await store.getThreadsByResourceId({ resourceId: 'non-existent-resource' });
500
+ expect(threads).toEqual([]);
501
+ });
502
+
503
+ test('getTraces should return empty array when no traces match filter', async () => {
504
+ const tracesByName = await store.getTraces({ name: 'non-existent-trace', page: 1, perPage: 10 });
505
+ expect(tracesByName).toEqual([]);
506
+ const tracesByScope = await store.getTraces({ scope: 'non-existent-scope', page: 1, perPage: 10 });
507
+ expect(tracesByScope).toEqual([]);
508
+ });
509
+
510
+ test('getEvalsByAgentName should return empty array for non-existent agent', async () => {
511
+ const evals = await store.getEvalsByAgentName('non-existent-agent');
512
+ expect(evals).toEqual([]);
513
+ });
514
+
515
+ test('getWorkflowRuns should return empty result for non-existent filters', async () => {
516
+ const { runs: runsByName, total: totalByName } = await store.getWorkflowRuns({
517
+ workflowName: 'non-existent-workflow',
518
+ });
519
+ expect(runsByName).toEqual([]);
520
+ expect(totalByName).toBe(0);
521
+
522
+ const { runs: runsByResource, total: totalByResource } = await store.getWorkflowRuns({
523
+ resourceId: 'non-existent-resource',
524
+ });
525
+ expect(runsByResource).toEqual([]);
526
+ expect(totalByResource).toBe(0);
527
+ });
528
+ }); // End Error Handling describe
529
+ });
530
+
531
+ // --- Trace Operations Tests ---
532
+ describe('Trace Operations', () => {
533
+ const sampleTrace = (name: string, scope: string, startTime = Date.now()) => ({
534
+ id: `trace-${randomUUID()}`,
535
+ parentSpanId: `span-${randomUUID()}`,
536
+ traceId: `traceid-${randomUUID()}`,
537
+ name,
538
+ scope,
539
+ kind: 1, // Example kind
540
+ startTime: startTime,
541
+ endTime: startTime + 100, // Example duration
542
+ status: JSON.stringify({ code: 0 }), // Example status
543
+ attributes: JSON.stringify({ key: 'value', scopeAttr: scope }),
544
+ events: JSON.stringify([{ name: 'event1', timestamp: startTime + 50 }]),
545
+ links: JSON.stringify([]),
546
+ createdAt: new Date(startTime).toISOString(),
547
+ updatedAt: new Date(startTime).toISOString(),
548
+ });
549
+
550
+ test('should batch insert and retrieve traces', async () => {
551
+ const trace1 = sampleTrace('trace-op-1', 'scope-A');
552
+ const trace2 = sampleTrace('trace-op-2', 'scope-A', Date.now() + 10);
553
+ const trace3 = sampleTrace('trace-op-3', 'scope-B', Date.now() + 20);
554
+ const records = [trace1, trace2, trace3];
555
+
556
+ await expect(store.batchTraceInsert({ records })).resolves.not.toThrow();
557
+
558
+ // Retrieve all (via scan, assuming low test data volume)
559
+ const allTraces = await store.getTraces({ page: 1, perPage: 10 });
560
+ expect(allTraces.length).toBe(3);
561
+ });
562
+
563
+ test('should retrieve traces filtered by name using GSI', async () => {
564
+ const trace1 = sampleTrace('trace-filter-name', 'scope-X');
565
+ const trace2 = sampleTrace('trace-filter-name', 'scope-Y', Date.now() + 10);
566
+ const trace3 = sampleTrace('other-name', 'scope-X', Date.now() + 20);
567
+ await store.batchTraceInsert({ records: [trace1, trace2, trace3] });
568
+
569
+ const filteredTraces = await store.getTraces({ name: 'trace-filter-name', page: 1, perPage: 10 });
570
+ expect(filteredTraces.length).toBe(2);
571
+ expect(filteredTraces.every(t => t.name === 'trace-filter-name')).toBe(true);
572
+ // Check if sorted by startTime (GSI SK) - ascending default
573
+ expect(filteredTraces[0].scope).toBe('scope-X');
574
+ expect(filteredTraces[1].scope).toBe('scope-Y');
575
+ });
576
+
577
+ test('should retrieve traces filtered by scope using GSI', async () => {
578
+ const trace1 = sampleTrace('trace-filter-scope-A', 'scope-TARGET');
579
+ const trace2 = sampleTrace('trace-filter-scope-B', 'scope-OTHER', Date.now() + 10);
580
+ const trace3 = sampleTrace('trace-filter-scope-C', 'scope-TARGET', Date.now() + 20);
581
+ await store.batchTraceInsert({ records: [trace1, trace2, trace3] });
582
+
583
+ const filteredTraces = await store.getTraces({ scope: 'scope-TARGET', page: 1, perPage: 10 });
584
+ expect(filteredTraces.length).toBe(2);
585
+ expect(filteredTraces.every(t => t.scope === 'scope-TARGET')).toBe(true);
586
+ // Check if sorted by startTime (GSI SK) - ascending default
587
+ expect(filteredTraces[0].name).toBe('trace-filter-scope-A');
588
+ expect(filteredTraces[1].name).toBe('trace-filter-scope-C');
589
+ });
590
+
591
+ test('should handle pagination for getTraces', async () => {
592
+ const traceData = Array.from({ length: 5 }, (_, i) =>
593
+ sampleTrace('trace-page', `scope-page`, Date.now() + i * 10),
594
+ );
595
+ await store.batchTraceInsert({ records: traceData });
596
+
597
+ // Get page 1 (first 2 items)
598
+ const page1 = await store.getTraces({ name: 'trace-page', page: 1, perPage: 2 });
599
+ expect(page1.length).toBe(2);
600
+ // Use non-null assertion (!) since lengths are verified
601
+ expect(page1[0]!.startTime).toBe(traceData[0]!.startTime);
602
+ expect(page1[1]!.startTime).toBe(traceData[1]!.startTime);
603
+
604
+ // Get page 2 (next 2 items)
605
+ const page2 = await store.getTraces({ name: 'trace-page', page: 2, perPage: 2 });
606
+ expect(page2.length).toBe(2);
607
+ expect(page2[0]!.startTime).toBe(traceData[2]!.startTime);
608
+ expect(page2[1]!.startTime).toBe(traceData[3]!.startTime);
609
+
610
+ // Get page 3 (last 1 item)
611
+ const page3 = await store.getTraces({ name: 'trace-page', page: 3, perPage: 2 });
612
+ expect(page3.length).toBe(1);
613
+ expect(page3[0]!.startTime).toBe(traceData[4]!.startTime);
614
+
615
+ // Get page beyond results
616
+ const page4 = await store.getTraces({ name: 'trace-page', page: 4, perPage: 2 });
617
+ expect(page4.length).toBe(0);
618
+ });
619
+ }); // End Trace Operations describe
620
+
621
+ // --- Eval Operations Tests ---
622
+ describe('Eval Operations', () => {
623
+ const sampleEval = (agentName: string, isTest = false, createdAt = new Date()) => {
624
+ const testInfo = isTest ? { testPath: 'test/path.ts', testName: 'Test Name' } : undefined;
625
+ return {
626
+ entity: 'eval', // Important for saving
627
+ agent_name: agentName,
628
+ input: 'Sample input',
629
+ output: 'Sample output',
630
+ result: JSON.stringify({ score: Math.random() }), // Random score
631
+ metric_name: 'sample-metric',
632
+ instructions: 'Sample instructions',
633
+ test_info: testInfo ? JSON.stringify(testInfo) : undefined,
634
+ global_run_id: `global-${randomUUID()}`,
635
+ run_id: `run-${randomUUID()}`,
636
+ created_at: createdAt.toISOString(),
637
+ // Add core MastraStorage fields
638
+ createdAt: createdAt.toISOString(),
639
+ updatedAt: createdAt.toISOString(),
640
+ metadata: JSON.stringify({ custom: 'eval_meta' }),
641
+ };
642
+ };
643
+
644
+ test('should retrieve evals by agent name using GSI and filter by type', async () => {
645
+ const agent1 = 'eval-agent-1';
646
+ const agent2 = 'eval-agent-2';
647
+ const time1 = new Date();
648
+ const time2 = new Date(Date.now() + 1000);
649
+ const time3 = new Date(Date.now() + 2000);
650
+ const time4 = new Date(Date.now() + 3000);
651
+
652
+ const eval1_live = sampleEval(agent1, false, time1);
653
+ const eval1_test = sampleEval(agent1, true, time2);
654
+ const eval2_live = sampleEval(agent2, false, time3);
655
+ const eval1_live_later = sampleEval(agent1, false, time4);
656
+
657
+ // Use generic batchInsert (which expects entity prop already set)
658
+ await store.batchInsert({
659
+ tableName: TABLE_EVALS,
660
+ records: [eval1_live, eval1_test, eval2_live, eval1_live_later],
661
+ });
662
+
663
+ // Get all for agent1 (expecting DESCENDING order now)
664
+ const allAgent1 = await store.getEvalsByAgentName(agent1);
665
+ expect(allAgent1.length).toBe(3);
666
+ // Assert descending order (newest first)
667
+ expect(allAgent1[0]!.runId).toBe(eval1_live_later.run_id); // Newest (time4)
668
+ expect(allAgent1[1]!.runId).toBe(eval1_test.run_id); // Middle (time2)
669
+ expect(allAgent1[2]!.runId).toBe(eval1_live.run_id); // Oldest (time1)
670
+
671
+ // Get only live for agent1 (should be 2, ordered descending)
672
+ const liveAgent1 = await store.getEvalsByAgentName(agent1, 'live');
673
+ expect(liveAgent1.length).toBe(2);
674
+ // Assert descending order
675
+ expect(liveAgent1[0]!.runId).toBe(eval1_live_later.run_id); // Newest live (time4)
676
+ expect(liveAgent1[1]!.runId).toBe(eval1_live.run_id); // Oldest live (time1)
677
+
678
+ // Get only test for agent1 (should be 1)
679
+ const testAgent1 = await store.getEvalsByAgentName(agent1, 'test');
680
+ expect(testAgent1.length).toBe(1);
681
+ expect(testAgent1[0]!.runId).toBe(eval1_test.run_id);
682
+ expect(testAgent1[0]!.testInfo).toEqual(JSON.parse(eval1_test.test_info!));
683
+
684
+ // Get for agent2 (should be 1)
685
+ const allAgent2 = await store.getEvalsByAgentName(agent2);
686
+ expect(allAgent2.length).toBe(1);
687
+ expect(allAgent2[0]!.runId).toBe(eval2_live.run_id);
688
+
689
+ // Get for non-existent agent
690
+ const none = await store.getEvalsByAgentName('non-existent-agent');
691
+ expect(none.length).toBe(0);
692
+ });
693
+ }); // End Eval Operations describe
694
+
695
+ // --- Workflow Operations Tests ---
696
+ describe('Workflow Operations', () => {
697
+ const sampleWorkflowSnapshot = (
698
+ workflowName: string,
699
+ runId: string,
700
+ resourceId?: string,
701
+ createdAt = new Date(),
702
+ status = 'running',
703
+ ): { recordData: Record<string, any>; snapshot: WorkflowRunState } => {
704
+ const snapshot: WorkflowRunState = {
705
+ value: { currentState: status },
706
+ context: {
707
+ step1: { status: 'success', output: { data: 'test' } },
708
+ input: { source: 'test' },
709
+ } as unknown as WorkflowRunState['context'],
710
+ activePaths: [],
711
+ suspendedPaths: {},
712
+ runId: runId,
713
+ timestamp: createdAt.getTime(),
714
+ ...(resourceId && { resourceId: resourceId }), // Conditionally add resourceId to snapshot
715
+ };
716
+ return {
717
+ recordData: {
718
+ entity: 'workflow_snapshot',
719
+ workflow_name: workflowName,
720
+ run_id: runId,
721
+ snapshot: JSON.stringify(snapshot),
722
+ createdAt: createdAt.toISOString(),
723
+ updatedAt: createdAt.toISOString(),
724
+ resourceId: resourceId, // Store resourceId directly if available
725
+ metadata: JSON.stringify({ wf: 'meta' }),
726
+ },
727
+ snapshot: snapshot,
728
+ };
729
+ };
730
+
731
+ test('should persist and load a workflow snapshot', async () => {
732
+ const wfName = 'persist-test-wf';
733
+ const runId = `run-${randomUUID()}`;
734
+ const { snapshot } = sampleWorkflowSnapshot(wfName, runId);
735
+
736
+ await expect(
737
+ store.persistWorkflowSnapshot({
738
+ workflowName: wfName,
739
+ runId: runId,
740
+ snapshot: snapshot,
741
+ }),
742
+ ).resolves.not.toThrow();
743
+
744
+ const loadedSnapshot = await store.loadWorkflowSnapshot({
745
+ workflowName: wfName,
746
+ runId: runId,
747
+ });
748
+ // Compare only relevant parts, as persist might add internal fields
749
+ expect(loadedSnapshot?.runId).toEqual(snapshot.runId);
750
+ expect(loadedSnapshot?.value).toEqual(snapshot.value);
751
+ expect(loadedSnapshot?.context).toEqual(snapshot.context);
752
+ });
753
+
754
+ test('getWorkflowRunById should retrieve correct run', async () => {
755
+ const wfName = 'get-by-id-wf';
756
+ const runId1 = `run-${randomUUID()}`;
757
+ const runId2 = `run-${randomUUID()}`;
758
+ const wf1 = sampleWorkflowSnapshot(wfName, runId1);
759
+ const wf2 = sampleWorkflowSnapshot(wfName, runId2);
760
+
761
+ await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
762
+
763
+ const found = await store.getWorkflowRunById({ runId: runId1, workflowName: wfName });
764
+ expect(found).toBeDefined();
765
+ expect(found!.runId).toBe(runId1);
766
+ expect(found!.workflowName).toBe(wfName);
767
+
768
+ const notFound = await store.getWorkflowRunById({ runId: 'non-existent', workflowName: wfName });
769
+ expect(notFound).toBeNull();
770
+ });
771
+
772
+ test('getWorkflowRuns should return all runs when no filters applied', async () => {
773
+ const wfName = 'get-runs-all';
774
+ const runId1 = `run-${randomUUID()}`;
775
+ const runId2 = `run-${randomUUID()}`;
776
+ const wf1 = sampleWorkflowSnapshot(wfName, runId1, undefined, new Date(Date.now() - 1000));
777
+ const wf2 = sampleWorkflowSnapshot(wfName, runId2, undefined, new Date());
778
+
779
+ await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
780
+
781
+ const { runs, total } = await store.getWorkflowRuns(); // No filters
782
+ // Note: Scan order is not guaranteed, check for presence and count
783
+ expect(total).toBe(2);
784
+ expect(runs.length).toBe(2);
785
+ expect(runs.map(r => r.runId)).toEqual(expect.arrayContaining([runId1, runId2]));
786
+ });
787
+
788
+ test('getWorkflowRuns should filter by workflowName', async () => {
789
+ const wfName1 = 'get-runs-filter-name1';
790
+ const wfName2 = 'get-runs-filter-name2';
791
+ const runId1 = `run-${randomUUID()}`;
792
+ const runId2 = `run-${randomUUID()}`;
793
+ const wf1 = sampleWorkflowSnapshot(wfName1, runId1);
794
+ const wf2 = sampleWorkflowSnapshot(wfName2, runId2);
795
+
796
+ await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: [wf1.recordData, wf2.recordData] });
797
+
798
+ const { runs, total } = await store.getWorkflowRuns({ workflowName: wfName1 });
799
+ expect(total).toBe(1);
800
+ expect(runs.length).toBe(1);
801
+ expect(runs[0]!.runId).toBe(runId1);
802
+ });
803
+
804
+ test('getWorkflowRuns should filter by resourceId', async () => {
805
+ const wfName = 'get-runs-filter-resource';
806
+ const resource1 = 'resource-filter-1';
807
+ const resource2 = 'resource-filter-2';
808
+ const runId1 = `run-${randomUUID()}`;
809
+ const runId2 = `run-${randomUUID()}`;
810
+ const runId3 = `run-${randomUUID()}`;
811
+ const wf1 = sampleWorkflowSnapshot(wfName, runId1, resource1);
812
+ const wf2 = sampleWorkflowSnapshot(wfName, runId2, resource2);
813
+ const wf3 = sampleWorkflowSnapshot(wfName, runId3, resource1);
814
+
815
+ await store.batchInsert({
816
+ tableName: TABLE_WORKFLOW_SNAPSHOT,
817
+ records: [wf1.recordData, wf2.recordData, wf3.recordData],
818
+ });
819
+
820
+ const { runs, total } = await store.getWorkflowRuns({ resourceId: resource1 });
821
+ // Note: Scan order not guaranteed
822
+ expect(total).toBe(2);
823
+ expect(runs.length).toBe(2);
824
+ expect(runs.map(r => r.runId)).toEqual(expect.arrayContaining([runId1, runId3]));
825
+ expect(runs.every(r => r.resourceId === resource1)).toBe(true);
826
+ });
827
+
828
+ test('getWorkflowRuns should filter by date range', async () => {
829
+ const wfName = 'get-runs-filter-date';
830
+ const time1 = new Date(2024, 0, 10); // Jan 10 2024
831
+ const time2 = new Date(2024, 0, 15); // Jan 15 2024
832
+ const time3 = new Date(2024, 0, 20); // Jan 20 2024
833
+ const runId1 = `run-${randomUUID()}`;
834
+ const runId2 = `run-${randomUUID()}`;
835
+ const runId3 = `run-${randomUUID()}`;
836
+ const wf1 = sampleWorkflowSnapshot(wfName, runId1, undefined, time1);
837
+ const wf2 = sampleWorkflowSnapshot(wfName, runId2, undefined, time2);
838
+ const wf3 = sampleWorkflowSnapshot(wfName, runId3, undefined, time3);
839
+
840
+ await store.batchInsert({
841
+ tableName: TABLE_WORKFLOW_SNAPSHOT,
842
+ records: [wf1.recordData, wf2.recordData, wf3.recordData],
843
+ });
844
+
845
+ const { runs, total } = await store.getWorkflowRuns({
846
+ fromDate: new Date(2024, 0, 12), // Jan 12
847
+ toDate: new Date(2024, 0, 18), // Jan 18
848
+ });
849
+ expect(total).toBe(1);
850
+ expect(runs.length).toBe(1);
851
+ expect(runs[0]!.runId).toBe(runId2); // Only wf2 falls within the range
852
+ });
853
+
854
+ test('getWorkflowRuns should handle pagination (limit/offset)', async () => {
855
+ const wfName = 'get-runs-pagination';
856
+ const snapshots = Array.from({ length: 5 }, (_, i) =>
857
+ sampleWorkflowSnapshot(wfName, `run-page-${i}`, undefined, new Date(Date.now() + i * 1000)),
858
+ );
859
+ await store.batchInsert({ tableName: TABLE_WORKFLOW_SNAPSHOT, records: snapshots.map(s => s.recordData) });
860
+
861
+ // Get page 1 (limit 2, offset 0)
862
+ const page1 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 0 });
863
+ expect(page1.total).toBe(5);
864
+ expect(page1.runs.length).toBe(2);
865
+ // Scan order not guaranteed, check for presence of two expected runs
866
+ const page1Ids = page1.runs.map(r => r.runId);
867
+ expect(snapshots.slice(0, 2).map(s => s!.recordData.run_id)).toEqual(expect.arrayContaining(page1Ids));
868
+
869
+ // Get page 2 (limit 2, offset 2)
870
+ const page2 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 2 });
871
+ expect(page2.total).toBe(5);
872
+ expect(page2.runs.length).toBe(2);
873
+ const page2Ids = page2.runs.map(r => r.runId);
874
+ expect(snapshots.slice(2, 4).map(s => s!.recordData.run_id)).toEqual(expect.arrayContaining(page2Ids));
875
+
876
+ // Get page 3 (limit 2, offset 4)
877
+ const page3 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 4 });
878
+ expect(page3.total).toBe(5);
879
+ expect(page3.runs.length).toBe(1);
880
+ // Use explicit type assertion for runs array access to fix linter error
881
+ expect((page3.runs as WorkflowRun[])[0]!.runId).toBe(snapshots[4]!.recordData.run_id);
882
+
883
+ // Get page beyond results (offset 5)
884
+ const page4 = await store.getWorkflowRuns({ workflowName: wfName, limit: 2, offset: 5 });
885
+ expect(page4.total).toBe(5);
886
+ expect(page4.runs.length).toBe(0);
887
+ });
888
+ }); // End Workflow Operations describe
889
+
890
+ // --- Initialization & Configuration Tests ---
891
+ describe('Initialization & Configuration', () => {
892
+ test('should throw error if tableName is missing in config', () => {
893
+ expect(() => {
894
+ new DynamoDBStore({
895
+ name: 'MissingTableStore',
896
+ config: {
897
+ endpoint: LOCAL_ENDPOINT,
898
+ region: LOCAL_REGION,
899
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
900
+ } as any, // Cast to any to bypass compile-time check for this specific test
901
+ });
902
+ }).toThrow(/tableName must be provided/); // Check for specific error message if possible
903
+ });
904
+
905
+ test('should throw error during operations if table does not exist', async () => {
906
+ // Use a valid but random table name unlikely to exist
907
+ const nonExistentTableName = `non-existent-${randomUUID()}`;
908
+ const storeWithInvalidTable = new DynamoDBStore({
909
+ name: 'InvalidTableStore',
910
+ config: {
911
+ tableName: nonExistentTableName,
912
+ endpoint: LOCAL_ENDPOINT,
913
+ region: LOCAL_REGION,
914
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
915
+ },
916
+ });
917
+
918
+ await expect(storeWithInvalidTable.getThreadById({ threadId: 'any-id' }))
919
+ .rejects // Update regex to match either DDB error or ElectroDB wrapper
920
+ .toThrow(/ResourceNotFoundException|Table.*does not exist|Cannot do operations on a non-existent table/);
921
+ });
922
+
923
+ test('init() should throw error if table does not exist', async () => {
924
+ // Use a valid but random table name unlikely to exist
925
+ const nonExistentTableName = `non-existent-init-${randomUUID()}`;
926
+ const storeWithInvalidTable = new DynamoDBStore({
927
+ name: 'InvalidTableStoreInit',
928
+ config: {
929
+ tableName: nonExistentTableName,
930
+ endpoint: LOCAL_ENDPOINT,
931
+ region: LOCAL_REGION,
932
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
933
+ },
934
+ });
935
+
936
+ await expect(storeWithInvalidTable.init())
937
+ .rejects // Update regex here too for consistency
938
+ .toThrow(/ResourceNotFoundException|Table.*does not exist|Cannot do operations on a non-existent table/);
939
+ });
940
+ }); // End Initialization & Configuration describe
941
+
942
+ // --- Generic Storage Methods Tests ---
943
+ describe('Generic Storage Methods (`insert`, `load`, `batchInsert`, `clearTable`)', () => {
944
+ // Declare genericStore specific to this block
945
+ let genericStore: DynamoDBStore;
946
+
947
+ beforeAll(() => {
948
+ // Initialize genericStore using the same config as the main store
949
+ genericStore = new DynamoDBStore({
950
+ name: 'DynamoDBGenericTest',
951
+ config: {
952
+ tableName: TEST_TABLE_NAME, // Ensure this uses the correct test table
953
+ endpoint: LOCAL_ENDPOINT,
954
+ region: LOCAL_REGION,
955
+ credentials: { accessKeyId: 'test', secretAccessKey: 'test' },
956
+ },
957
+ });
958
+ console.log('Generic test store initialized for generic tests.');
959
+ });
960
+
961
+ const sampleThreadData = (id: string) => ({
962
+ entity: 'thread',
963
+ id: id,
964
+ resourceId: `resource-${randomUUID()}`,
965
+ title: 'Generic Test Thread',
966
+ createdAt: new Date().toISOString(),
967
+ updatedAt: new Date().toISOString(),
968
+ metadata: JSON.stringify({ generic: true }),
969
+ });
970
+
971
+ test('insert() should save a record', async () => {
972
+ const threadId = `thread-${randomUUID()}`;
973
+ const record = sampleThreadData(threadId);
974
+ // Use the genericStore instance
975
+ await expect(genericStore.insert({ tableName: TABLE_THREADS, record })).resolves.not.toThrow();
976
+ const loaded = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId } });
977
+ expect(loaded).not.toBeNull();
978
+ if (loaded) {
979
+ expect(loaded.id).toBe(threadId);
980
+ expect(loaded.title).toBe('Generic Test Thread');
981
+ expect(loaded.metadata).toEqual({ generic: true });
982
+ }
983
+ });
984
+
985
+ test('load() should return null for non-existent record', async () => {
986
+ // Use the genericStore instance
987
+ const loaded = await genericStore.load({ tableName: TABLE_THREADS, keys: { id: 'non-existent-generic' } });
988
+ expect(loaded).toBeNull();
989
+ });
990
+
991
+ test('batchInsert() should save multiple records', async () => {
992
+ const threadId1 = `thread-batch-${randomUUID()}`;
993
+ const threadId2 = `thread-batch-${randomUUID()}`;
994
+ const records = [sampleThreadData(threadId1), sampleThreadData(threadId2)];
995
+ // Use the genericStore instance
996
+ await expect(genericStore.batchInsert({ tableName: TABLE_THREADS, records })).resolves.not.toThrow();
997
+ const loaded1 = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } });
998
+ const loaded2 = await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } });
999
+ expect(loaded1).toBeDefined();
1000
+ expect(loaded2).toBeDefined();
1001
+ expect(loaded1?.id).toBe(threadId1);
1002
+ expect(loaded2?.id).toBe(threadId2);
1003
+ });
1004
+
1005
+ test('clearTable() should remove all records for the logical table', async () => {
1006
+ const threadId1 = `thread-clear-${randomUUID()}`;
1007
+ const threadId2 = `thread-clear-${randomUUID()}`;
1008
+ const records = [sampleThreadData(threadId1), sampleThreadData(threadId2)];
1009
+ // Use the genericStore instance
1010
+ await genericStore.batchInsert({ tableName: TABLE_THREADS, records });
1011
+ expect(
1012
+ await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } }),
1013
+ ).toBeDefined();
1014
+ expect(
1015
+ await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } }),
1016
+ ).toBeDefined();
1017
+ await expect(genericStore.clearTable({ tableName: TABLE_THREADS })).resolves.not.toThrow();
1018
+ expect(
1019
+ await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId1 } }),
1020
+ ).toBeNull();
1021
+ expect(
1022
+ await genericStore.load<StorageThreadType>({ tableName: TABLE_THREADS, keys: { id: threadId2 } }),
1023
+ ).toBeNull();
1024
+ });
1025
+ }); // End Generic Storage Methods describe
1026
+ }); // End Main Describe