@aj-archipelago/cortex 1.4.6 → 1.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/helper-apps/cortex-file-handler/package-lock.json +2 -2
  2. package/helper-apps/cortex-file-handler/package.json +1 -1
  3. package/helper-apps/cortex-file-handler/src/index.js +27 -4
  4. package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +74 -10
  5. package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +23 -2
  6. package/helper-apps/cortex-file-handler/src/start.js +2 -0
  7. package/helper-apps/cortex-file-handler/tests/deleteOperations.test.js +287 -0
  8. package/helper-apps/cortex-file-handler/tests/start.test.js +1 -1
  9. package/lib/entityConstants.js +1 -1
  10. package/lib/fileUtils.js +1481 -0
  11. package/lib/pathwayTools.js +7 -1
  12. package/lib/util.js +2 -313
  13. package/package.json +4 -3
  14. package/pathways/image_qwen.js +1 -1
  15. package/pathways/system/entity/memory/sys_read_memory.js +17 -3
  16. package/pathways/system/entity/memory/sys_save_memory.js +22 -6
  17. package/pathways/system/entity/sys_entity_agent.js +21 -4
  18. package/pathways/system/entity/tools/sys_tool_analyzefile.js +171 -0
  19. package/pathways/system/entity/tools/sys_tool_codingagent.js +38 -4
  20. package/pathways/system/entity/tools/sys_tool_editfile.js +403 -0
  21. package/pathways/system/entity/tools/sys_tool_file_collection.js +433 -0
  22. package/pathways/system/entity/tools/sys_tool_image.js +172 -10
  23. package/pathways/system/entity/tools/sys_tool_image_gemini.js +123 -10
  24. package/pathways/system/entity/tools/sys_tool_readfile.js +217 -124
  25. package/pathways/system/entity/tools/sys_tool_validate_url.js +137 -0
  26. package/pathways/system/entity/tools/sys_tool_writefile.js +211 -0
  27. package/pathways/system/workspaces/run_workspace_prompt.js +4 -3
  28. package/pathways/transcribe_gemini.js +2 -1
  29. package/server/executeWorkspace.js +1 -1
  30. package/server/plugins/neuralSpacePlugin.js +2 -6
  31. package/server/plugins/openAiWhisperPlugin.js +2 -1
  32. package/server/plugins/replicateApiPlugin.js +4 -14
  33. package/server/typeDef.js +10 -1
  34. package/tests/integration/features/tools/fileCollection.test.js +858 -0
  35. package/tests/integration/features/tools/fileOperations.test.js +851 -0
  36. package/tests/integration/features/tools/writefile.test.js +350 -0
  37. package/tests/unit/core/fileCollection.test.js +259 -0
  38. package/tests/unit/core/util.test.js +320 -1
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex-file-handler",
3
- "version": "2.6.2",
3
+ "version": "2.6.3",
4
4
  "lockfileVersion": 3,
5
5
  "requires": true,
6
6
  "packages": {
7
7
  "": {
8
8
  "name": "@aj-archipelago/cortex-file-handler",
9
- "version": "2.6.2",
9
+ "version": "2.6.3",
10
10
  "dependencies": {
11
11
  "@azure/storage-blob": "^12.13.0",
12
12
  "@distube/ytdl-core": "^4.14.3",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex-file-handler",
3
- "version": "2.6.2",
3
+ "version": "2.6.3",
4
4
  "description": "File handling service for Cortex - handles file uploads, media chunking, and document processing",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -47,6 +47,27 @@ async function cleanupInactive(context) {
47
47
  }
48
48
 
49
49
  async function CortexFileHandler(context, req) {
50
+ // Parse body if it's a string (Azure Functions sometimes doesn't auto-parse DELETE bodies)
51
+ let parsedBody = req.body;
52
+ if (typeof req.body === 'string' && req.body.length > 0) {
53
+ try {
54
+ parsedBody = JSON.parse(req.body);
55
+ } catch (e) {
56
+ // If parsing fails, treat as empty object
57
+ parsedBody = {};
58
+ }
59
+ }
60
+
61
+ // For GET requests, prioritize query string. For other methods, check body first, then query
62
+ // Also check if parsedBody actually has content (not just empty object)
63
+ const hasBodyContent = parsedBody && typeof parsedBody === 'object' && Object.keys(parsedBody).length > 0;
64
+ const bodySource = hasBodyContent ? (parsedBody.params || parsedBody) : {};
65
+ const querySource = req.query || {};
66
+
67
+ // Merge sources: for GET, query takes priority; for others, body takes priority
68
+ const isGet = req.method?.toLowerCase() === 'get';
69
+ const source = isGet ? { ...bodySource, ...querySource } : { ...querySource, ...bodySource };
70
+
50
71
  const {
51
72
  uri,
52
73
  requestId,
@@ -59,7 +80,7 @@ async function CortexFileHandler(context, req) {
59
80
  load,
60
81
  restore,
61
82
  container,
62
- } = req.body?.params || req.query;
83
+ } = source;
63
84
 
64
85
  // Normalize boolean parameters
65
86
  const shouldSave = save === true || save === "true";
@@ -136,8 +157,10 @@ async function CortexFileHandler(context, req) {
136
157
  // 1. Delete multiple files by requestId (existing behavior)
137
158
  // 2. Delete single file by hash (new behavior)
138
159
  if (operation === "delete") {
139
- const deleteRequestId = req.query.requestId || requestId;
140
- const deleteHash = req.query.hash || hash;
160
+ // Check both query string and body params for delete parameters
161
+ // Handle both req.body.params.hash and req.body.hash formats
162
+ const deleteRequestId = req.query.requestId || parsedBody?.params?.requestId || parsedBody?.requestId || requestId;
163
+ const deleteHash = req.query.hash || parsedBody?.params?.hash || parsedBody?.hash || hash;
141
164
 
142
165
  // If only hash is provided, delete single file by hash
143
166
  if (deleteHash && !deleteRequestId) {
@@ -164,7 +187,7 @@ async function CortexFileHandler(context, req) {
164
187
  if (!deleteRequestId) {
165
188
  context.res = {
166
189
  status: 400,
167
- body: "Please pass either a requestId or hash on the query string",
190
+ body: "Please pass either a requestId or hash in the query string or request body",
168
191
  };
169
192
  return;
170
193
  }
@@ -119,6 +119,11 @@ export class AzureStorageProvider extends StorageProvider {
119
119
  blobName = generateBlobName(requestId, `${shortId}${fileExtension}`);
120
120
  }
121
121
 
122
+ // Validate blobName is not empty
123
+ if (!blobName || blobName.trim().length === 0) {
124
+ throw new Error(`Invalid blob name generated: blobName="${blobName}", requestId="${requestId}", filename="${filename}"`);
125
+ }
126
+
122
127
  // Create a read stream for the file
123
128
  const fileStream = fs.createReadStream(filePath);
124
129
 
@@ -134,8 +139,20 @@ export class AzureStorageProvider extends StorageProvider {
134
139
  // Generate SAS token after successful upload
135
140
  const sasToken = this.generateSASToken(containerClient, blobName);
136
141
 
142
+ const url = `${blockBlobClient.url}?${sasToken}`;
143
+
144
+ // Validate that the URL contains a blob name (not just container)
145
+ // Azure blob URLs should be: https://account.blob.core.windows.net/container/blobname
146
+ // Container-only URLs end with /container/ or /container
147
+ const urlObj = new URL(url);
148
+ const pathParts = urlObj.pathname.split('/').filter(p => p.length > 0);
149
+ if (pathParts.length <= 1) {
150
+ // Only container name, no blob name - this is invalid
151
+ throw new Error(`Generated invalid Azure URL (container-only): ${url}, blobName: ${blobName}`);
152
+ }
153
+
137
154
  return {
138
- url: `${blockBlobClient.url}?${sasToken}`,
155
+ url: url,
139
156
  blobName: blobName,
140
157
  };
141
158
  }
@@ -148,6 +165,11 @@ export class AzureStorageProvider extends StorageProvider {
148
165
  let blobName = sanitizeFilename(encodedFilename);
149
166
  blobName = encodeURIComponent(blobName);
150
167
 
168
+ // Validate blobName is not empty
169
+ if (!blobName || blobName.trim().length === 0) {
170
+ throw new Error(`Invalid blob name generated from encodedFilename: "${encodedFilename}"`);
171
+ }
172
+
151
173
  const options = {
152
174
  blobHTTPHeaders: {
153
175
  ...(contentType ? { blobContentType: contentType } : {}),
@@ -163,7 +185,16 @@ export class AzureStorageProvider extends StorageProvider {
163
185
  await blockBlobClient.uploadStream(stream, undefined, undefined, options);
164
186
  const sasToken = this.generateSASToken(containerClient, blobName);
165
187
 
166
- return `${blockBlobClient.url}?${sasToken}`;
188
+ const url = `${blockBlobClient.url}?${sasToken}`;
189
+
190
+ // Validate that the URL contains a blob name (not just container)
191
+ const urlObj = new URL(url);
192
+ const pathParts = urlObj.pathname.split('/').filter(p => p.length > 0);
193
+ if (pathParts.length <= 1) {
194
+ throw new Error(`Generated invalid Azure URL (container-only) from uploadStream: ${url}, blobName: ${blobName}`);
195
+ }
196
+
197
+ return url;
167
198
  }
168
199
 
169
200
  async deleteFiles(requestId) {
@@ -204,19 +235,52 @@ export class AzureStorageProvider extends StorageProvider {
204
235
  const urlObj = new URL(url);
205
236
  let blobName = urlObj.pathname.substring(1); // Remove leading slash
206
237
 
207
- // Handle Azurite URLs which include account name in path: /devstoreaccount1/container/blob
238
+ // Handle different URL formats:
239
+ // 1. Azurite: /devstoreaccount1/container/blobname (3 segments)
240
+ // 2. Standard Azure: /container/blobname (2 segments)
241
+ // 3. Container-only: /container or /container/ (invalid)
242
+
208
243
  if (blobName.includes('/')) {
209
- const pathSegments = blobName.split('/');
210
- if (pathSegments.length >= 2) {
211
- // For Azurite: devstoreaccount1/container/blobname -> blobname
244
+ const pathSegments = blobName.split('/').filter(segment => segment.length > 0);
245
+
246
+ if (pathSegments.length === 1) {
247
+ // Only container name, no blob name - this is invalid
248
+ console.warn(`Invalid blob URL (container-only): ${url}`);
249
+ return null;
250
+ } else if (pathSegments.length === 2) {
251
+ // Standard Azure format: container/blobname
252
+ // Check if first segment matches container name
253
+ if (pathSegments[0] === this.containerName) {
254
+ blobName = pathSegments[1];
255
+ } else {
256
+ // Container name doesn't match, but assume second segment is blob name
257
+ blobName = pathSegments[1];
258
+ }
259
+ } else if (pathSegments.length >= 3) {
260
+ // Azurite format: devstoreaccount1/container/blobname
212
261
  // Skip the account and container segments to get the actual blob name
213
- blobName = pathSegments.slice(2).join('/');
262
+ // Check if second segment matches container name
263
+ if (pathSegments[1] === this.containerName) {
264
+ blobName = pathSegments.slice(2).join('/');
265
+ } else {
266
+ // Container name doesn't match, but assume remaining segments are blob name
267
+ blobName = pathSegments.slice(2).join('/');
268
+ }
269
+ }
270
+ } else {
271
+ // No slashes - could be just container name or just blob name
272
+ if (blobName === this.containerName || blobName === this.containerName + '/') {
273
+ // URL is just the container name - invalid blob URL
274
+ console.warn(`Invalid blob URL (container-only): ${url}`);
275
+ return null;
214
276
  }
277
+ // Otherwise assume it's a blob name at root level (unlikely but possible)
215
278
  }
216
279
 
217
- // Remove container name prefix if present (for non-Azurite URLs)
218
- if (blobName.startsWith(this.containerName + '/')) {
219
- blobName = blobName.substring(this.containerName.length + 1);
280
+ // Validate that we have a non-empty blob name
281
+ if (!blobName || blobName.trim().length === 0) {
282
+ console.warn(`Invalid blob URL (empty blob name): ${url}`);
283
+ return null;
220
284
  }
221
285
 
222
286
  const blockBlobClient = containerClient.getBlockBlobClient(blobName);
@@ -191,8 +191,11 @@ export class StorageService {
191
191
  const effectiveContainer = containerName || defaultContainerName;
192
192
  if (effectiveContainer === defaultContainerName && scopedHash.includes(':')) {
193
193
  const [legacyHash] = scopedHash.split(':', 2);
194
- // Try to remove legacy key - it's okay if it doesn't exist
195
- await removeFromFileStoreMap(legacyHash);
194
+ // Try to remove legacy key - only attempt if it exists to avoid unnecessary "does not exist" logs
195
+ const legacyExists = await getFileStoreMap(legacyHash);
196
+ if (legacyExists) {
197
+ await removeFromFileStoreMap(legacyHash);
198
+ }
196
199
  }
197
200
  }
198
201
 
@@ -203,9 +206,16 @@ export class StorageService {
203
206
  // Delete from primary storage
204
207
  if (hashResult.url) {
205
208
  try {
209
+ // Log the URL being deleted for debugging
210
+ console.log(`Deleting file from primary storage - hash: ${hash}, url: ${hashResult.url}`);
206
211
  const primaryResult = await this.deleteFile(hashResult.url);
207
212
  if (primaryResult) {
213
+ console.log(`Successfully deleted from primary storage - hash: ${hash}, result: ${primaryResult}`);
208
214
  results.push({ provider: 'primary', result: primaryResult });
215
+ } else {
216
+ // deleteFile returned null, which means the URL was invalid
217
+ console.warn(`Invalid or empty URL for hash ${hash}: ${hashResult.url}`);
218
+ results.push({ provider: 'primary', error: 'Invalid URL (container-only or empty blob name)' });
209
219
  }
210
220
  } catch (error) {
211
221
  console.error(`Error deleting file from primary storage:`, error);
@@ -216,14 +226,25 @@ export class StorageService {
216
226
  // Delete from backup storage (GCS)
217
227
  if (hashResult.gcs && this.backupProvider) {
218
228
  try {
229
+ console.log(`Deleting file from backup storage - hash: ${hash}, gcs: ${hashResult.gcs}`);
219
230
  const backupResult = await this.deleteFileFromBackup(hashResult.gcs);
220
231
  if (backupResult) {
232
+ console.log(`Successfully deleted from backup storage - hash: ${hash}, result: ${backupResult}`);
221
233
  results.push({ provider: 'backup', result: backupResult });
234
+ } else {
235
+ console.warn(`Backup deletion returned null for hash ${hash}: ${hashResult.gcs}`);
236
+ results.push({ provider: 'backup', error: 'Deletion returned null' });
222
237
  }
223
238
  } catch (error) {
224
239
  console.error(`Error deleting file from backup storage:`, error);
225
240
  results.push({ provider: 'backup', error: error.message });
226
241
  }
242
+ } else {
243
+ if (!hashResult.gcs) {
244
+ console.log(`No GCS URL found for hash ${hash}, skipping backup deletion`);
245
+ } else if (!this.backupProvider) {
246
+ console.log(`Backup provider not configured, skipping backup deletion for hash ${hash}`);
247
+ }
227
248
  }
228
249
 
229
250
  // Note: Hash was already removed from Redis atomically at the beginning
@@ -48,6 +48,8 @@ const packageJson = JSON.parse(
48
48
  const version = packageJson.version;
49
49
 
50
50
  app.use(cors());
51
+ // Parse JSON bodies for all requests (including DELETE)
52
+ app.use(express.json());
51
53
  // Serve static files from the public folder
52
54
  app.use("/files", express.static(publicFolder));
53
55
 
@@ -345,4 +345,291 @@ test.serial("should delete file uploaded with different filename", async (t) =>
345
345
  // Ignore cleanup errors
346
346
  }
347
347
  }
348
+ });
349
+
350
+ // Tests for DELETE with hash in request body
351
+ test.serial("should delete file by hash from request body params", async (t) => {
352
+ const testContent = "test content for body params deletion";
353
+ const testHash = `test-body-${uuidv4()}`;
354
+ const filePath = await createTestFile(testContent, "txt");
355
+ let uploadResponse;
356
+
357
+ try {
358
+ // Upload file with hash
359
+ uploadResponse = await uploadFile(filePath, null, testHash);
360
+ t.is(uploadResponse.status, 200, "Upload should succeed");
361
+
362
+ // Delete file by hash using body params
363
+ const deleteResponse = await axios.delete(baseUrl, {
364
+ data: { params: { hash: testHash } },
365
+ validateStatus: (status) => true,
366
+ timeout: 10000,
367
+ });
368
+
369
+ t.is(deleteResponse.status, 200, "Delete should succeed");
370
+ t.truthy(deleteResponse.data.message, "Should have success message");
371
+ t.true(deleteResponse.data.message.includes(testHash), "Message should include hash");
372
+ t.is(deleteResponse.data.deleted.hash, testHash, "Should include deleted hash");
373
+
374
+ // Verify hash is gone
375
+ const hashCheckAfter = await checkHashExists(testHash);
376
+ t.is(hashCheckAfter.status, 404, "Hash should not exist after deletion");
377
+
378
+ } finally {
379
+ fs.unlinkSync(filePath);
380
+ try {
381
+ await removeFromFileStoreMap(testHash);
382
+ } catch (e) {
383
+ // Ignore cleanup errors
384
+ }
385
+ }
386
+ });
387
+
388
+ test.serial("should delete file by hash from request body (direct)", async (t) => {
389
+ const testContent = "test content for direct body deletion";
390
+ const testHash = `test-direct-body-${uuidv4()}`;
391
+ const filePath = await createTestFile(testContent, "txt");
392
+ let uploadResponse;
393
+
394
+ try {
395
+ // Upload file with hash
396
+ uploadResponse = await uploadFile(filePath, null, testHash);
397
+ t.is(uploadResponse.status, 200, "Upload should succeed");
398
+
399
+ // Delete file by hash using direct body (not in params)
400
+ const deleteResponse = await axios.delete(baseUrl, {
401
+ data: { hash: testHash },
402
+ validateStatus: (status) => true,
403
+ timeout: 10000,
404
+ });
405
+
406
+ t.is(deleteResponse.status, 200, "Delete should succeed");
407
+ t.truthy(deleteResponse.data.message, "Should have success message");
408
+ t.is(deleteResponse.data.deleted.hash, testHash, "Should include deleted hash");
409
+
410
+ // Verify hash is gone
411
+ const hashCheckAfter = await checkHashExists(testHash);
412
+ t.is(hashCheckAfter.status, 404, "Hash should not exist after deletion");
413
+
414
+ } finally {
415
+ fs.unlinkSync(filePath);
416
+ try {
417
+ await removeFromFileStoreMap(testHash);
418
+ } catch (e) {
419
+ // Ignore cleanup errors
420
+ }
421
+ }
422
+ });
423
+
424
+ test.serial("should prioritize query string over body params for hash", async (t) => {
425
+ const testContent = "test content for priority test";
426
+ const queryHash = `test-query-${uuidv4()}`;
427
+ const bodyHash = `test-body-${uuidv4()}`;
428
+ const filePath = await createTestFile(testContent, "txt");
429
+ let uploadResponse;
430
+
431
+ try {
432
+ // Upload file with query hash
433
+ uploadResponse = await uploadFile(filePath, null, queryHash);
434
+ t.is(uploadResponse.status, 200, "Upload should succeed");
435
+
436
+ // Try to delete with hash in both query and body - query should take priority
437
+ const deleteResponse = await axios.delete(`${baseUrl}?hash=${queryHash}`, {
438
+ data: { params: { hash: bodyHash } },
439
+ validateStatus: (status) => true,
440
+ timeout: 10000,
441
+ });
442
+
443
+ t.is(deleteResponse.status, 200, "Delete should succeed");
444
+ t.is(deleteResponse.data.deleted.hash, queryHash, "Should use query hash, not body hash");
445
+
446
+ // Verify query hash is gone
447
+ const queryHashCheck = await checkHashExists(queryHash);
448
+ t.is(queryHashCheck.status, 404, "Query hash should not exist after deletion");
449
+
450
+ } finally {
451
+ fs.unlinkSync(filePath);
452
+ try {
453
+ await removeFromFileStoreMap(queryHash);
454
+ await removeFromFileStoreMap(bodyHash);
455
+ } catch (e) {
456
+ // Ignore cleanup errors
457
+ }
458
+ }
459
+ });
460
+
461
+ test.serial("should delete file by requestId from body params", async (t) => {
462
+ const testContent = "test content for requestId body deletion";
463
+ const requestId = uuidv4();
464
+ const filePath = await createTestFile(testContent, "txt");
465
+ let uploadResponse;
466
+
467
+ try {
468
+ // Upload file with requestId
469
+ uploadResponse = await uploadFile(filePath, requestId, null);
470
+ t.is(uploadResponse.status, 200, "Upload should succeed");
471
+
472
+ // Delete file by requestId using body params
473
+ const deleteResponse = await axios.delete(baseUrl, {
474
+ data: { params: { requestId: requestId } },
475
+ validateStatus: (status) => true,
476
+ timeout: 10000,
477
+ });
478
+
479
+ t.is(deleteResponse.status, 200, "Delete should succeed");
480
+ t.truthy(deleteResponse.data.body, "Should have deletion body");
481
+ t.true(Array.isArray(deleteResponse.data.body), "Deletion body should be array");
482
+
483
+ } finally {
484
+ fs.unlinkSync(filePath);
485
+ }
486
+ });
487
+
488
+ test.serial("should handle standard Azure URL format correctly", async (t) => {
489
+ const testContent = "test content for standard URL format";
490
+ const testHash = `test-standard-url-${uuidv4()}`;
491
+ const filePath = await createTestFile(testContent, "txt");
492
+ let uploadResponse;
493
+
494
+ try {
495
+ // Upload file
496
+ uploadResponse = await uploadFile(filePath, null, testHash);
497
+ t.is(uploadResponse.status, 200, "Upload should succeed");
498
+ t.truthy(uploadResponse.data.url, "Should have file URL");
499
+
500
+ // Verify URL format is standard Azure format (container/blob)
501
+ const url = uploadResponse.data.url;
502
+ const urlObj = new URL(url);
503
+ const pathParts = urlObj.pathname.split('/').filter(p => p.length > 0);
504
+ t.true(pathParts.length >= 2, "URL should have at least container and blob name");
505
+
506
+ // Delete file - should parse URL correctly
507
+ const deleteResponse = await deleteFileByHash(testHash);
508
+ t.is(deleteResponse.status, 200, "Delete should succeed");
509
+
510
+ // Verify deletion was successful
511
+ const hashCheckAfter = await checkHashExists(testHash);
512
+ t.is(hashCheckAfter.status, 404, "Hash should not exist after deletion");
513
+
514
+ } finally {
515
+ fs.unlinkSync(filePath);
516
+ try {
517
+ await removeFromFileStoreMap(testHash);
518
+ } catch (e) {
519
+ // Ignore cleanup errors
520
+ }
521
+ }
522
+ });
523
+
524
+ test.serial("should handle backwards compatibility key removal correctly", async (t) => {
525
+ const testContent = "test content for legacy key test";
526
+ const testHash = `test-legacy-${uuidv4()}`;
527
+ const filePath = await createTestFile(testContent, "txt");
528
+ let uploadResponse;
529
+
530
+ try {
531
+ // Upload file
532
+ uploadResponse = await uploadFile(filePath, null, testHash);
533
+ t.is(uploadResponse.status, 200, "Upload should succeed");
534
+
535
+ // Manually create a legacy unscoped key to test backwards compatibility
536
+ const { setFileStoreMap, getFileStoreMap, getScopedHashKey } = await import("../src/redis.js");
537
+ const { getDefaultContainerName } = await import("../src/constants.js");
538
+ const defaultContainer = getDefaultContainerName();
539
+ const scopedHash = getScopedHashKey(testHash, defaultContainer);
540
+ const hashResult = await getFileStoreMap(scopedHash);
541
+
542
+ if (hashResult) {
543
+ // Create legacy unscoped key
544
+ await setFileStoreMap(testHash, hashResult);
545
+
546
+ // Verify both keys exist
547
+ const scopedExists = await getFileStoreMap(scopedHash);
548
+ const legacyExists = await getFileStoreMap(testHash);
549
+ t.truthy(scopedExists, "Scoped key should exist");
550
+ t.truthy(legacyExists, "Legacy key should exist");
551
+
552
+ // Delete file - should remove both keys
553
+ const deleteResponse = await deleteFileByHash(testHash);
554
+ t.is(deleteResponse.status, 200, "Delete should succeed");
555
+
556
+ // Verify both keys are removed
557
+ const scopedAfter = await getFileStoreMap(scopedHash);
558
+ const legacyAfter = await getFileStoreMap(testHash);
559
+ t.falsy(scopedAfter, "Scoped key should be removed");
560
+ t.falsy(legacyAfter, "Legacy key should be removed");
561
+ }
562
+
563
+ } finally {
564
+ fs.unlinkSync(filePath);
565
+ try {
566
+ await removeFromFileStoreMap(testHash);
567
+ } catch (e) {
568
+ // Ignore cleanup errors
569
+ }
570
+ }
571
+ });
572
+
573
+ test.serial("should not log 'does not exist' when legacy key doesn't exist", async (t) => {
574
+ const testContent = "test content for no legacy key test";
575
+ const testHash = `test-no-legacy-${uuidv4()}`;
576
+ const filePath = await createTestFile(testContent, "txt");
577
+ let uploadResponse;
578
+
579
+ try {
580
+ // Upload file (this creates only the scoped key, no legacy key)
581
+ uploadResponse = await uploadFile(filePath, null, testHash);
582
+ t.is(uploadResponse.status, 200, "Upload should succeed");
583
+
584
+ // Verify only scoped key exists
585
+ const { getFileStoreMap, getScopedHashKey } = await import("../src/redis.js");
586
+ const { getDefaultContainerName } = await import("../src/constants.js");
587
+ const defaultContainer = getDefaultContainerName();
588
+ const scopedHash = getScopedHashKey(testHash, defaultContainer);
589
+ const scopedExists = await getFileStoreMap(scopedHash);
590
+ const legacyExists = await getFileStoreMap(testHash);
591
+ t.truthy(scopedExists, "Scoped key should exist");
592
+ t.falsy(legacyExists, "Legacy key should not exist");
593
+
594
+ // Delete file - should not try to remove non-existent legacy key
595
+ // (This test verifies the fix doesn't log "does not exist" unnecessarily)
596
+ const deleteResponse = await deleteFileByHash(testHash);
597
+ t.is(deleteResponse.status, 200, "Delete should succeed");
598
+
599
+ // Verify scoped key is removed
600
+ const scopedAfter = await getFileStoreMap(scopedHash);
601
+ t.falsy(scopedAfter, "Scoped key should be removed");
602
+
603
+ } finally {
604
+ fs.unlinkSync(filePath);
605
+ try {
606
+ await removeFromFileStoreMap(testHash);
607
+ } catch (e) {
608
+ // Ignore cleanup errors
609
+ }
610
+ }
611
+ });
612
+
613
+ test.serial("should handle error message for missing hash/requestId correctly", async (t) => {
614
+ // Test with no parameters at all
615
+ const deleteResponse1 = await axios.delete(baseUrl, {
616
+ validateStatus: (status) => true,
617
+ timeout: 10000,
618
+ });
619
+
620
+ t.is(deleteResponse1.status, 400, "Should return 400 for missing parameters");
621
+ t.truthy(deleteResponse1.data, "Should have error message");
622
+ t.true(
623
+ deleteResponse1.data.includes("query string or request body"),
624
+ "Error should mention both query string and request body"
625
+ );
626
+
627
+ // Test with empty body
628
+ const deleteResponse2 = await axios.delete(baseUrl, {
629
+ data: {},
630
+ validateStatus: (status) => true,
631
+ timeout: 10000,
632
+ });
633
+
634
+ t.is(deleteResponse2.status, 400, "Should return 400 for missing parameters");
348
635
  });
@@ -361,7 +361,7 @@ test.serial("should validate requestId for delete operation", async (t) => {
361
361
  t.is(response.status, 400, "Should return 400 for missing requestId");
362
362
  t.is(
363
363
  response.data,
364
- "Please pass either a requestId or hash on the query string",
364
+ "Please pass either a requestId or hash in the query string or request body",
365
365
  "Should return proper error message",
366
366
  );
367
367
  });
@@ -99,7 +99,7 @@ term~N (Match terms similar to "term", edit distance N)
99
99
 
100
100
  AI_GROUNDING_INSTRUCTIONS: "# Grounding Responses\n\nIf you base part or all of your response on one or more search results, you MUST cite the source using a custom markdown directive of the form :cd_source[searchResultId]. There is NO other valid way to cite a source and a good UX depends on you using this directive correctly. Do not include other clickable links to the source when using the :cd_source[searchResultId] directive. Every search result has a unique searchResultId. You must include it verbatim, copied directly from the search results. Place the directives at the end of the phrase, sentence or paragraph that is grounded in that particular search result. If you are citing multiple search results, use multiple individual :cd_source[searchResultId] directives (e.g. :cd_source[searchResultId1] :cd_source[searchResultId2] :cd_source[searchResultId3] etc.)",
101
101
 
102
- AI_AVAILABLE_FILES: "# Available Files\n\nThe following files are available for you to use in your tool calls or responses:\n{{{availableFiles}}}\n",
102
+ AI_AVAILABLE_FILES: "# Available Files (Last 10 Most Recently Used)\n\nThe following files are available for you to use in your tool calls or responses. This shows the last 10 most recently used files. More files may be available in your collection - use ListFileCollection or SearchFileCollection to see all files.\n\n{{{availableFiles}}}\n",
103
103
 
104
104
  AI_MEMORY_INSTRUCTIONS: `# Memory Instructions
105
105