@aj-archipelago/cortex 1.4.4 → 1.4.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,7 +16,16 @@ import {
16
16
  generateBlobName,
17
17
  } from "./utils/filenameUtils.js";
18
18
  import { publicFolder, port, ipAddress } from "./start.js";
19
- import { CONVERTED_EXTENSIONS, AZURITE_ACCOUNT_NAME } from "./constants.js";
19
+ import {
20
+ CONVERTED_EXTENSIONS,
21
+ AZURITE_ACCOUNT_NAME,
22
+ parseContainerNames,
23
+ getCurrentContainerNames,
24
+ AZURE_STORAGE_CONTAINER_NAMES,
25
+ getDefaultContainerName,
26
+ GCS_BUCKETNAME,
27
+ isValidContainerName
28
+ } from "./constants.js";
20
29
  import { FileConversionService } from "./services/FileConversionService.js";
21
30
  import { StorageFactory } from "./services/storage/StorageFactory.js";
22
31
 
@@ -70,28 +79,6 @@ if (!GCP_PROJECT_ID || !GCP_SERVICE_ACCOUNT) {
70
79
  }
71
80
  }
72
81
 
73
- // Parse comma-separated container names from environment variable
74
- const parseContainerNames = () => {
75
- const containerStr = process.env.AZURE_STORAGE_CONTAINER_NAME || "whispertempfiles";
76
- return containerStr.split(',').map(name => name.trim());
77
- };
78
-
79
- // Helper function to get current container names at runtime
80
- export const getCurrentContainerNames = () => {
81
- return parseContainerNames();
82
- };
83
-
84
- export const AZURE_STORAGE_CONTAINER_NAMES = parseContainerNames();
85
- export const DEFAULT_AZURE_STORAGE_CONTAINER_NAME = AZURE_STORAGE_CONTAINER_NAMES[0];
86
- export const GCS_BUCKETNAME = process.env.GCS_BUCKETNAME || "cortextempfiles";
87
-
88
- // Validate if a container name is allowed
89
- export const isValidContainerName = (containerName) => {
90
- // Read from environment at runtime to support dynamically changing env in tests
91
- const currentContainerNames = getCurrentContainerNames();
92
- return currentContainerNames.includes(containerName);
93
- };
94
-
95
82
  function isEncoded(str) {
96
83
  // Checks for any percent-encoded sequence
97
84
  return /%[0-9A-Fa-f]{2}/.test(str);
@@ -194,7 +181,7 @@ async function downloadFromGCS(gcsUrl, destinationPath) {
194
181
 
195
182
  export const getBlobClient = async (containerName = null) => {
196
183
  const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
197
- const finalContainerName = containerName || DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
184
+ const finalContainerName = containerName || getDefaultContainerName();
198
185
 
199
186
  // Validate container name is in whitelist
200
187
  if (!isValidContainerName(finalContainerName)) {
@@ -350,10 +337,12 @@ function uploadBlob(
350
337
 
351
338
  if (errorOccurred) return; // Check again after waiting
352
339
 
353
- await processFile(fieldname, file, info);
340
+ // Capture containerName value to avoid closure issues
341
+ const capturedContainerName = containerName;
342
+ await processFile(fieldname, file, info, capturedContainerName);
354
343
  });
355
344
 
356
- const processFile = async (fieldname, file, info) => {
345
+ const processFile = async (fieldname, file, info, capturedContainerName) => {
357
346
  if (errorOccurred) return;
358
347
 
359
348
  // Validate file
@@ -449,7 +438,7 @@ function uploadBlob(
449
438
  context,
450
439
  uploadName,
451
440
  azureStream,
452
- containerName,
441
+ capturedContainerName,
453
442
  ).catch(async (err) => {
454
443
  cloudUploadError = err;
455
444
  // Fallback: try from disk if available
@@ -459,7 +448,7 @@ function uploadBlob(
459
448
  highWaterMark: 1024 * 1024,
460
449
  autoClose: true,
461
450
  });
462
- return saveToAzureStorage(context, uploadName, diskStream, containerName);
451
+ return saveToAzureStorage(context, uploadName, diskStream, capturedContainerName);
463
452
  }
464
453
  throw err;
465
454
  });
@@ -511,6 +500,7 @@ function uploadBlob(
511
500
  }, {}),
512
501
  };
513
502
  if (hash) result.hash = hash;
503
+ if (capturedContainerName) result.container = capturedContainerName;
514
504
 
515
505
  // If saving locally, wait for disk write to finish and then move to public folder
516
506
  if (saveToLocal) {
@@ -582,7 +572,7 @@ function uploadBlob(
582
572
  conversion.convertedPath,
583
573
  requestId,
584
574
  null,
585
- containerName,
575
+ capturedContainerName,
586
576
  );
587
577
 
588
578
  // Optionally save to GCS
@@ -827,6 +817,10 @@ async function uploadFile(
827
817
  if (hash) {
828
818
  result.hash = hash;
829
819
  }
820
+
821
+ if (containerName) {
822
+ result.container = containerName;
823
+ }
830
824
 
831
825
  // Initialize conversion service
832
826
  const conversionService = new FileConversionService(context, !saveToLocal);
@@ -1156,4 +1150,10 @@ export {
1156
1150
  gcs,
1157
1151
  uploadChunkToGCS,
1158
1152
  downloadFromGCS,
1153
+ // Re-export container constants for backward compatibility
1154
+ getCurrentContainerNames,
1155
+ AZURE_STORAGE_CONTAINER_NAMES,
1156
+ getDefaultContainerName,
1157
+ GCS_BUCKETNAME,
1158
+ isValidContainerName,
1159
1159
  };
@@ -132,3 +132,33 @@ export const CONVERTED_EXTENSIONS = [
132
132
 
133
133
  // Azure Storage constants
134
134
  export const AZURITE_ACCOUNT_NAME = "devstoreaccount1";
135
+
136
+ // Parse comma-separated container names from environment variable
137
+ export const parseContainerNames = () => {
138
+ const containerStr = process.env.AZURE_STORAGE_CONTAINER_NAME || "whispertempfiles";
139
+ return containerStr.split(',').map(name => name.trim());
140
+ };
141
+
142
+ // Helper function to get current container names at runtime
143
+ // Useful for runtime validation when env vars might change (e.g., in tests)
144
+ export const getCurrentContainerNames = () => {
145
+ return parseContainerNames();
146
+ };
147
+
148
+ export const AZURE_STORAGE_CONTAINER_NAMES = parseContainerNames();
149
+
150
+ // Helper function to get the default container name at runtime
151
+ // This allows tests to change the environment variable and have the correct default
152
+ export const getDefaultContainerName = () => {
153
+ return process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME || getCurrentContainerNames()[0];
154
+ };
155
+
156
+ export const DEFAULT_AZURE_STORAGE_CONTAINER_NAME = process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME || AZURE_STORAGE_CONTAINER_NAMES[0];
157
+ export const GCS_BUCKETNAME = process.env.GCS_BUCKETNAME || "cortextempfiles";
158
+
159
+ // Validate if a container name is allowed
160
+ export const isValidContainerName = (containerName) => {
161
+ // Read from environment at runtime to support dynamically changing env in tests
162
+ const currentContainerNames = getCurrentContainerNames();
163
+ return currentContainerNames.includes(containerName);
164
+ };
@@ -14,6 +14,7 @@ import {
14
14
  removeFromFileStoreMap,
15
15
  setFileStoreMap,
16
16
  cleanupRedisFileStoreMapAge,
17
+ getScopedHashKey,
17
18
  } from "./redis.js";
18
19
  import { FileConversionService } from "./services/FileConversionService.js";
19
20
  import { StorageService } from "./services/storage/StorageService.js";
@@ -141,7 +142,7 @@ async function CortexFileHandler(context, req) {
141
142
  // If only hash is provided, delete single file by hash
142
143
  if (deleteHash && !deleteRequestId) {
143
144
  try {
144
- const deleted = await storageService.deleteFileByHash(deleteHash);
145
+ const deleted = await storageService.deleteFileByHash(deleteHash, container);
145
146
  context.res = {
146
147
  status: 200,
147
148
  body: {
@@ -170,10 +171,11 @@ async function CortexFileHandler(context, req) {
170
171
 
171
172
  // First, get the hash from the map if it exists
172
173
  if (deleteHash) {
173
- const hashResult = await getFileStoreMap(deleteHash);
174
+ const scopedHash = getScopedHashKey(deleteHash, container);
175
+ const hashResult = await getFileStoreMap(scopedHash);
174
176
  if (hashResult) {
175
- context.log(`Found hash in map for deletion: ${deleteHash}`);
176
- await removeFromFileStoreMap(deleteHash);
177
+ context.log(`Found hash in map for deletion: ${deleteHash} (scoped key: ${scopedHash})`);
178
+ await removeFromFileStoreMap(scopedHash);
177
179
  }
178
180
  }
179
181
 
@@ -201,7 +203,8 @@ async function CortexFileHandler(context, req) {
201
203
  }
202
204
 
203
205
  // Check if file already exists (using hash or URL as the key)
204
- const cacheKey = hash || remoteUrl;
206
+ // If hash is provided, scope it by container; otherwise use URL as-is
207
+ const cacheKey = hash ? getScopedHashKey(hash, container) : remoteUrl;
205
208
  const exists = await getFileStoreMap(cacheKey);
206
209
  if (exists) {
207
210
  context.res = {
@@ -255,9 +258,10 @@ async function CortexFileHandler(context, req) {
255
258
 
256
259
  if (hash && clearHash) {
257
260
  try {
258
- const hashValue = await getFileStoreMap(hash);
261
+ const scopedHash = getScopedHashKey(hash, container);
262
+ const hashValue = await getFileStoreMap(scopedHash);
259
263
  if (hashValue) {
260
- await removeFromFileStoreMap(hash);
264
+ await removeFromFileStoreMap(scopedHash);
261
265
  context.res = {
262
266
  status: 200,
263
267
  body: `Hash ${hash} removed`,
@@ -279,10 +283,11 @@ async function CortexFileHandler(context, req) {
279
283
  }
280
284
 
281
285
  if (hash && checkHash) {
282
- let hashResult = await getFileStoreMap(hash, true); // Skip lazy cleanup to handle it ourselves
286
+ const scopedHash = getScopedHashKey(hash, container);
287
+ let hashResult = await getFileStoreMap(scopedHash, true); // Skip lazy cleanup to handle it ourselves
283
288
 
284
289
  if (hashResult) {
285
- context.log(`File exists in map: ${hash}`);
290
+ context.log(`File exists in map: ${hash} (scoped key: ${scopedHash})`);
286
291
 
287
292
  // Log the URL retrieved from Redis before checking existence
288
293
  context.log(`Checking existence of URL from Redis: ${hashResult?.url}`);
@@ -301,7 +306,7 @@ async function CortexFileHandler(context, req) {
301
306
  context.log(
302
307
  `File not found in any storage. Removing from map: ${hash}`,
303
308
  );
304
- await removeFromFileStoreMap(hash);
309
+ await removeFromFileStoreMap(scopedHash);
305
310
  context.res = {
306
311
  status: 404,
307
312
  body: `Hash ${hash} not found in storage`,
@@ -320,7 +325,7 @@ async function CortexFileHandler(context, req) {
320
325
  } catch (error) {
321
326
  context.log(`Error restoring to GCS: ${error}`);
322
327
  // If restoration fails, remove the hash from the map
323
- await removeFromFileStoreMap(hash);
328
+ await removeFromFileStoreMap(scopedHash);
324
329
  context.res = {
325
330
  status: 404,
326
331
  body: `Hash ${hash} not found`,
@@ -378,7 +383,7 @@ async function CortexFileHandler(context, req) {
378
383
  } catch (error) {
379
384
  console.error("Error restoring from GCS:", error);
380
385
  // If restoration fails, remove the hash from the map
381
- await removeFromFileStoreMap(hash);
386
+ await removeFromFileStoreMap(scopedHash);
382
387
  context.res = {
383
388
  status: 404,
384
389
  body: `Hash ${hash} not found`,
@@ -396,7 +401,7 @@ async function CortexFileHandler(context, req) {
396
401
  : false;
397
402
  if (!finalPrimaryCheck && !finalGCSCheck) {
398
403
  context.log(`Failed to restore file. Removing from map: ${hash}`);
399
- await removeFromFileStoreMap(hash);
404
+ await removeFromFileStoreMap(scopedHash);
400
405
  context.res = {
401
406
  status: 404,
402
407
  body: `Hash ${hash} not found`,
@@ -498,7 +503,7 @@ async function CortexFileHandler(context, req) {
498
503
  }
499
504
 
500
505
  //update redis timestamp with current time
501
- await setFileStoreMap(hash, hashResult);
506
+ await setFileStoreMap(scopedHash, hashResult);
502
507
 
503
508
  context.res = {
504
509
  status: 200,
@@ -508,7 +513,7 @@ async function CortexFileHandler(context, req) {
508
513
  } catch (error) {
509
514
  context.log(`Error checking file existence: ${error}`);
510
515
  // If there's an error checking file existence, remove the hash from the map
511
- await removeFromFileStoreMap(hash);
516
+ await removeFromFileStoreMap(scopedHash);
512
517
  context.res = {
513
518
  status: 404,
514
519
  body: `Hash ${hash} not found`,
@@ -532,7 +537,8 @@ async function CortexFileHandler(context, req) {
532
537
  // Use uploadBlob to handle multipart/form-data
533
538
  const result = await uploadBlob(context, req, saveToLocal, null, hash, container);
534
539
  if (result?.hash && context?.res?.body) {
535
- await setFileStoreMap(result.hash, context.res.body);
540
+ const scopedHash = getScopedHashKey(result.hash, result.container || container);
541
+ await setFileStoreMap(scopedHash, context.res.body);
536
542
  }
537
543
  return;
538
544
  }
@@ -1,7 +1,28 @@
1
1
  import redis from "ioredis";
2
+ import { getDefaultContainerName } from "./constants.js";
2
3
 
3
4
  const connectionString = process.env["REDIS_CONNECTION_STRING"];
4
5
 
6
+ /**
7
+ * Generate a scoped hash key for Redis storage
8
+ * Always includes the container name in the format hash:container
9
+ * @param {string} hash - The file hash
10
+ * @param {string} containerName - The container name (optional, defaults to default container)
11
+ * @returns {string} The scoped hash key
12
+ */
13
+ export const getScopedHashKey = (hash, containerName = null) => {
14
+ if (!hash) return hash;
15
+
16
+ // Get the default container name at runtime to support dynamic env changes in tests
17
+ const defaultContainerName = getDefaultContainerName();
18
+
19
+ // Use default container if not provided
20
+ const container = containerName || defaultContainerName;
21
+
22
+ // Always scope by container
23
+ return `${hash}:${container}`;
24
+ };
25
+
5
26
  // Create a mock client for test environment when Redis is not configured
6
27
  const createMockClient = () => {
7
28
  const store = new Map();
@@ -123,7 +144,28 @@ const setFileStoreMap = async (key, value) => {
123
144
 
124
145
  const getFileStoreMap = async (key, skipLazyCleanup = false) => {
125
146
  try {
126
- const value = await client.hget("FileStoreMap", key);
147
+ let value = await client.hget("FileStoreMap", key);
148
+
149
+ // Backwards compatibility: if not found and key is for default container, try legacy key
150
+ if (!value && key && key.includes(':')) {
151
+ const [hash, containerName] = key.split(':', 2);
152
+ const defaultContainerName = getDefaultContainerName();
153
+
154
+ // If this is the default container, try the legacy key (hash without container)
155
+ if (containerName === defaultContainerName) {
156
+ console.log(`Key ${key} not found, trying legacy key ${hash} for backwards compatibility`);
157
+ value = await client.hget("FileStoreMap", hash);
158
+
159
+ // If found with legacy key, migrate it to the new scoped key
160
+ if (value) {
161
+ console.log(`Found value with legacy key ${hash}, migrating to new key ${key}`);
162
+ await client.hset("FileStoreMap", key, value);
163
+ // Optionally remove the old key after migration
164
+ // await client.hdel("FileStoreMap", hash);
165
+ }
166
+ }
167
+ }
168
+
127
169
  if (value) {
128
170
  try {
129
171
  // parse the value back to an object before returning
@@ -1,18 +1,10 @@
1
1
  import { AzureStorageProvider } from "./AzureStorageProvider.js";
2
2
  import { GCSStorageProvider } from "./GCSStorageProvider.js";
3
3
  import { LocalStorageProvider } from "./LocalStorageProvider.js";
4
+ import { getCurrentContainerNames, GCS_BUCKETNAME } from "../../constants.js";
4
5
  import path from "path";
5
6
  import { fileURLToPath } from "url";
6
7
 
7
- // Lazy-load blob handler constants to avoid blocking module import
8
- let blobHandlerConstants = null;
9
- async function getBlobHandlerConstants() {
10
- if (!blobHandlerConstants) {
11
- blobHandlerConstants = await import("../../blobHandler.js");
12
- }
13
- return blobHandlerConstants;
14
- }
15
-
16
8
  // Singleton instance for provider caching across the application
17
9
  let storageFactoryInstance = null;
18
10
 
@@ -49,7 +41,6 @@ export class StorageFactory {
49
41
 
50
42
  async getAzureProvider(containerName = null) {
51
43
  // Read container names from environment directly to get current values
52
- const { getCurrentContainerNames } = await getBlobHandlerConstants();
53
44
  const azureStorageContainerNames = getCurrentContainerNames();
54
45
  const defaultAzureStorageContainerName = azureStorageContainerNames[0];
55
46
 
@@ -82,7 +73,7 @@ export class StorageFactory {
82
73
  }
83
74
  const provider = new GCSStorageProvider(
84
75
  credentials,
85
- process.env.GCS_BUCKETNAME || "cortextempfiles",
76
+ GCS_BUCKETNAME,
86
77
  );
87
78
  this.providers.set(key, provider);
88
79
  }
@@ -163,9 +163,10 @@ export class StorageService {
163
163
  /**
164
164
  * Delete a single file by its hash from both primary and backup storage
165
165
  * @param {string} hash - The hash of the file to delete
166
+ * @param {string} containerName - Optional container name for scoping the hash
166
167
  * @returns {Promise<Object>} Object containing deletion results and file info
167
168
  */
168
- async deleteFileByHash(hash) {
169
+ async deleteFileByHash(hash, containerName = null) {
169
170
  await this._initialize();
170
171
 
171
172
  if (!hash) {
@@ -175,11 +176,24 @@ export class StorageService {
175
176
  const results = [];
176
177
 
177
178
  // Get and remove file information from Redis map (non-atomic operations)
178
- const { getFileStoreMap, removeFromFileStoreMap } = await import("../../redis.js");
179
- const hashResult = await getFileStoreMap(hash);
179
+ const { getFileStoreMap, removeFromFileStoreMap, getScopedHashKey, getDefaultContainerName } = await import("../../redis.js");
180
+ const { getDefaultContainerName: getDefaultContainerNameFromConstants } = await import("../../constants.js");
181
+ const scopedHash = getScopedHashKey(hash, containerName);
182
+ const hashResult = await getFileStoreMap(scopedHash);
180
183
 
181
184
  if (hashResult) {
182
- await removeFromFileStoreMap(hash);
185
+ // Remove from scoped key
186
+ await removeFromFileStoreMap(scopedHash);
187
+
188
+ // Also check and remove legacy key (unscoped) if this is the default container
189
+ // This handles backwards compatibility with old entries stored without container scoping
190
+ const defaultContainerName = getDefaultContainerNameFromConstants();
191
+ const effectiveContainer = containerName || defaultContainerName;
192
+ if (effectiveContainer === defaultContainerName && scopedHash.includes(':')) {
193
+ const [legacyHash] = scopedHash.split(':', 2);
194
+ // Try to remove legacy key - it's okay if it doesn't exist
195
+ await removeFromFileStoreMap(legacyHash);
196
+ }
183
197
  }
184
198
 
185
199
  if (!hashResult) {
@@ -6,6 +6,7 @@ import cors from "cors";
6
6
  import { readFileSync } from "fs";
7
7
 
8
8
  import { publicIpv4 } from "public-ip";
9
+ import { AZURE_STORAGE_CONTAINER_NAMES, getDefaultContainerName } from "./blobHandler.js";
9
10
 
10
11
  // When running under tests we want all generated URLs to resolve to the
11
12
  // locally-running server, otherwise checks like HEAD requests inside the
@@ -93,6 +94,10 @@ if (import.meta.url === `file://${process.argv[1]}`) {
93
94
  console.log(
94
95
  `Cortex File Handler v${version} running on port ${port} (includes legacy MediaFileChunker endpoint)`,
95
96
  );
97
+
98
+ // Debug: Show configured container names
99
+ console.log(`Configured container names: ${AZURE_STORAGE_CONTAINER_NAMES.join(', ')}`);
100
+ console.log(`Default container name: ${getDefaultContainerName()}`);
96
101
  });
97
102
  });
98
103
  }
@@ -12,7 +12,7 @@ import {
12
12
  deleteGCS,
13
13
  getBlobClient,
14
14
  AZURE_STORAGE_CONTAINER_NAMES,
15
- DEFAULT_AZURE_STORAGE_CONTAINER_NAME,
15
+ getDefaultContainerName,
16
16
  isValidContainerName,
17
17
  } from "../src/blobHandler.js";
18
18
  import { urlExists } from "../src/helper.js";
@@ -330,10 +330,11 @@ test("AZURE_STORAGE_CONTAINER_NAMES should be an array with at least one contain
330
330
  });
331
331
  });
332
332
 
333
- test("DEFAULT_AZURE_STORAGE_CONTAINER_NAME should be the first container", (t) => {
334
- t.is(DEFAULT_AZURE_STORAGE_CONTAINER_NAME, AZURE_STORAGE_CONTAINER_NAMES[0]);
335
- t.truthy(DEFAULT_AZURE_STORAGE_CONTAINER_NAME);
336
- t.is(typeof DEFAULT_AZURE_STORAGE_CONTAINER_NAME, 'string');
333
+ test("getDefaultContainerName should return the first container", (t) => {
334
+ const defaultContainer = getDefaultContainerName();
335
+ t.is(defaultContainer, AZURE_STORAGE_CONTAINER_NAMES[0]);
336
+ t.truthy(defaultContainer);
337
+ t.is(typeof defaultContainer, 'string');
337
338
  });
338
339
 
339
340
  test("isValidContainerName should validate container names correctly", (t) => {
@@ -1,7 +1,7 @@
1
1
  import test from "ava";
2
2
  import {
3
3
  AZURE_STORAGE_CONTAINER_NAMES,
4
- DEFAULT_AZURE_STORAGE_CONTAINER_NAME,
4
+ getDefaultContainerName,
5
5
  isValidContainerName,
6
6
  getCurrentContainerNames
7
7
  } from "../src/blobHandler.js";
@@ -119,15 +119,16 @@ test("parseContainerNames should handle only commas", (t) => {
119
119
  t.is(result[3], "");
120
120
  });
121
121
 
122
- test("DEFAULT_AZURE_STORAGE_CONTAINER_NAME should be the first container in the list", (t) => {
122
+ test("getDefaultContainerName should return the first container in the list", (t) => {
123
123
  // Test with current module exports (these are loaded at import time)
124
124
  // The default should be the first item in the array
125
- t.is(DEFAULT_AZURE_STORAGE_CONTAINER_NAME, AZURE_STORAGE_CONTAINER_NAMES[0]);
125
+ const defaultContainer = getDefaultContainerName();
126
+ t.is(defaultContainer, getCurrentContainerNames()[0]);
126
127
 
127
128
  // Additional validation that it's a non-empty string
128
- t.truthy(DEFAULT_AZURE_STORAGE_CONTAINER_NAME);
129
- t.is(typeof DEFAULT_AZURE_STORAGE_CONTAINER_NAME, 'string');
130
- t.true(DEFAULT_AZURE_STORAGE_CONTAINER_NAME.length > 0);
129
+ t.truthy(defaultContainer);
130
+ t.is(typeof defaultContainer, 'string');
131
+ t.true(defaultContainer.length > 0);
131
132
  });
132
133
 
133
134
  test("isValidContainerName should return true for valid container names", (t) => {
@@ -11,7 +11,7 @@ import {
11
11
  uploadBlob,
12
12
  isValidContainerName,
13
13
  AZURE_STORAGE_CONTAINER_NAMES,
14
- DEFAULT_AZURE_STORAGE_CONTAINER_NAME,
14
+ getDefaultContainerName,
15
15
  } from "../src/blobHandler.js";
16
16
  import CortexFileHandler from "../src/index.js";
17
17
  import {
@@ -315,8 +315,9 @@ test("should use default container when no container specified", async (t) => {
315
315
  }
316
316
 
317
317
  // Test that default container is first in the list
318
- t.is(DEFAULT_AZURE_STORAGE_CONTAINER_NAME, AZURE_STORAGE_CONTAINER_NAMES[0]);
319
- t.true(isValidContainerName(DEFAULT_AZURE_STORAGE_CONTAINER_NAME));
318
+ const defaultContainer = getDefaultContainerName();
319
+ t.is(defaultContainer, AZURE_STORAGE_CONTAINER_NAMES[0]);
320
+ t.true(isValidContainerName(defaultContainer));
320
321
 
321
322
  // Create a test file
322
323
  const testContent = "test content for default container";
@@ -0,0 +1,415 @@
1
+ import test from "ava";
2
+ import axios from "axios";
3
+ import FormData from "form-data";
4
+ import fs from "fs";
5
+ import os from "os";
6
+ import path from "path";
7
+ import { v4 as uuidv4 } from "uuid";
8
+ import { port } from "../src/start.js";
9
+ import { startTestServer } from "./testUtils.helper.js";
10
+
11
+ // Test server setup
12
+ let baseUrl;
13
+ let server;
14
+
15
+ // Start test server before running tests
16
+ test.before(async (t) => {
17
+ baseUrl = `http://localhost:${port}/api/CortexFileHandler`;
18
+
19
+ // Start the test server
20
+ server = await startTestServer();
21
+ });
22
+
23
+ // Clean up server after tests
24
+ test.after.always(async (t) => {
25
+ if (server) {
26
+ await new Promise((resolve, reject) => {
27
+ server.close((err) => {
28
+ if (err) reject(err);
29
+ else resolve();
30
+ });
31
+ });
32
+ }
33
+ });
34
+
35
+ // Helper to create a test file
36
+ async function createTestFile(content, extension = "txt", filename = null) {
37
+ const tempDir = os.tmpdir();
38
+ const actualFilename = filename || `test-${uuidv4()}.${extension}`;
39
+ const filePath = path.join(tempDir, actualFilename);
40
+
41
+ if (extension === "txt") {
42
+ fs.writeFileSync(filePath, content);
43
+ } else {
44
+ throw new Error(`Unsupported file extension: ${extension}`);
45
+ }
46
+
47
+ return filePath;
48
+ }
49
+
50
+ // Helper to upload a file with hash and container
51
+ async function uploadFileWithHashAndContainer(filePath, hash, containerName) {
52
+ const form = new FormData();
53
+ // Append hash and container BEFORE file so they're processed first
54
+ form.append("hash", hash);
55
+ if (containerName) {
56
+ form.append("container", containerName);
57
+ }
58
+ form.append("file", fs.createReadStream(filePath));
59
+
60
+ const response = await axios.post(baseUrl, form, {
61
+ headers: {
62
+ ...form.getHeaders(),
63
+ "Content-Type": "multipart/form-data",
64
+ },
65
+ validateStatus: (status) => true,
66
+ timeout: 10000,
67
+ });
68
+
69
+ return response;
70
+ }
71
+
72
+ // Helper to check if hash exists with optional container
73
+ async function checkHashExists(hash, containerName = null) {
74
+ const params = {
75
+ hash,
76
+ checkHash: true,
77
+ };
78
+
79
+ if (containerName) {
80
+ params.container = containerName;
81
+ }
82
+
83
+ const response = await axios.get(baseUrl, {
84
+ params,
85
+ validateStatus: (status) => true,
86
+ timeout: 10000,
87
+ });
88
+
89
+ return response;
90
+ }
91
+
92
+ // Helper to cleanup hash
93
+ async function cleanupHash(hash, containerName = null) {
94
+ const params = {
95
+ hash,
96
+ clearHash: true,
97
+ };
98
+
99
+ if (containerName) {
100
+ params.container = containerName;
101
+ }
102
+
103
+ try {
104
+ await axios.get(baseUrl, {
105
+ params,
106
+ validateStatus: (status) => true,
107
+ timeout: 5000,
108
+ });
109
+ } catch (error) {
110
+ // Ignore cleanup errors
111
+ }
112
+ }
113
+
114
+ // Main test: Hash scoping across containers
115
+ test.serial("should scope hash by container - same hash different containers should be independent", async (t) => {
116
+ if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
117
+ t.pass("Skipping test - Azure not configured");
118
+ return;
119
+ }
120
+
121
+ const originalEnv = process.env.AZURE_STORAGE_CONTAINER_NAME;
122
+ process.env.AZURE_STORAGE_CONTAINER_NAME = "test1,test2,test3";
123
+
124
+ try {
125
+ const testHash = `hash-scope-test-${uuidv4()}`;
126
+ const contentA = "Content for container A";
127
+ const contentB = "Content for container B";
128
+ const fileA = await createTestFile(contentA, "txt", "fileA.txt");
129
+ const fileB = await createTestFile(contentB, "txt", "fileB.txt");
130
+
131
+ // Upload file to container test1 with hash
132
+ const uploadA = await uploadFileWithHashAndContainer(fileA, testHash, "test1");
133
+ t.is(uploadA.status, 200, "Upload to test1 should succeed");
134
+ t.truthy(uploadA.data.url, "Upload A should have URL");
135
+
136
+ // Wait for Redis to update
137
+ await new Promise((resolve) => setTimeout(resolve, 1000));
138
+
139
+ // Upload file to container test2 with SAME hash
140
+ const uploadB = await uploadFileWithHashAndContainer(fileB, testHash, "test2");
141
+ t.is(uploadB.status, 200, "Upload to test2 should succeed");
142
+ t.truthy(uploadB.data.url, "Upload B should have URL");
143
+
144
+ // Wait for Redis to update
145
+ await new Promise((resolve) => setTimeout(resolve, 1000));
146
+
147
+ // Check hash in container test1 - should return file A
148
+ const checkA = await checkHashExists(testHash, "test1");
149
+ t.is(checkA.status, 200, "Hash should exist in test1");
150
+ t.is(checkA.data.url, uploadA.data.url, "Should return URL from container test1");
151
+
152
+ // Check hash in container test2 - should return file B
153
+ const checkB = await checkHashExists(testHash, "test2");
154
+ t.is(checkB.status, 200, "Hash should exist in test2");
155
+ t.is(checkB.data.url, uploadB.data.url, "Should return URL from container test2");
156
+
157
+ // Verify the URLs are different
158
+ t.not(checkA.data.url, checkB.data.url, "URLs should be different for same hash in different containers");
159
+
160
+ // Verify the file contents are different
161
+ const fileResponseA = await axios.get(uploadA.data.url, {
162
+ validateStatus: (status) => true,
163
+ timeout: 5000,
164
+ });
165
+ const fileResponseB = await axios.get(uploadB.data.url, {
166
+ validateStatus: (status) => true,
167
+ timeout: 5000,
168
+ });
169
+
170
+ t.is(fileResponseA.data, contentA, "File A should have correct content");
171
+ t.is(fileResponseB.data, contentB, "File B should have correct content");
172
+
173
+ // Cleanup
174
+ fs.unlinkSync(fileA);
175
+ fs.unlinkSync(fileB);
176
+ await cleanupHash(testHash, "test1");
177
+ await cleanupHash(testHash, "test2");
178
+
179
+ // Delete the actual files
180
+ await axios.delete(baseUrl, {
181
+ params: {
182
+ hash: testHash,
183
+ container: "test1",
184
+ },
185
+ validateStatus: (status) => true,
186
+ });
187
+ await axios.delete(baseUrl, {
188
+ params: {
189
+ hash: testHash,
190
+ container: "test2",
191
+ },
192
+ validateStatus: (status) => true,
193
+ });
194
+ } finally {
195
+ // Restore environment
196
+ if (originalEnv) {
197
+ process.env.AZURE_STORAGE_CONTAINER_NAME = originalEnv;
198
+ } else {
199
+ delete process.env.AZURE_STORAGE_CONTAINER_NAME;
200
+ }
201
+ }
202
+ });
203
+
204
+ // Test: Hash in default container should be scoped with container name
205
+ test.serial("should scope hash for default container with container name", async (t) => {
206
+ if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
207
+ t.pass("Skipping test - Azure not configured");
208
+ return;
209
+ }
210
+
211
+ const originalEnv = process.env.AZURE_STORAGE_CONTAINER_NAME;
212
+ const originalDefaultEnv = process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
213
+ process.env.AZURE_STORAGE_CONTAINER_NAME = "test1,test2,test3";
214
+ // Ensure test1 is the default container
215
+ delete process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
216
+
217
+ try {
218
+ const testHash = `hash-default-test-${uuidv4()}`;
219
+ const content = "Content for default container";
220
+ const file = await createTestFile(content, "txt", "fileDefault.txt");
221
+
222
+ // Upload file to default container (test1) with hash
223
+ // We upload WITHOUT specifying container, so it should use default
224
+ // Now it will be stored as hash:test1 (always scoped)
225
+ const uploadDefault = await uploadFileWithHashAndContainer(file, testHash, null);
226
+ t.is(uploadDefault.status, 200, "Upload to default should succeed");
227
+ t.truthy(uploadDefault.data.url, "Upload should have URL");
228
+
229
+ // Wait for Redis to update
230
+ await new Promise((resolve) => setTimeout(resolve, 1000));
231
+
232
+ // Check hash without container parameter - should work for default container
233
+ const checkWithoutContainer = await checkHashExists(testHash, null);
234
+ t.is(checkWithoutContainer.status, 200, "Hash should exist without container param");
235
+ t.is(checkWithoutContainer.data.url, uploadDefault.data.url, "Should return URL from default container");
236
+
237
+ // Check hash with explicit default container parameter - should also work
238
+ const checkWithDefaultContainer = await checkHashExists(testHash, "test1");
239
+ t.is(checkWithDefaultContainer.status, 200, "Hash should exist with default container param");
240
+ t.is(checkWithDefaultContainer.data.url, uploadDefault.data.url, "Should return same URL with default container param");
241
+
242
+ // Cleanup
243
+ fs.unlinkSync(file);
244
+ await cleanupHash(testHash, null);
245
+
246
+ // Delete the actual file
247
+ await axios.delete(baseUrl, {
248
+ params: {
249
+ hash: testHash,
250
+ },
251
+ validateStatus: (status) => true,
252
+ });
253
+ } finally {
254
+ // Restore environment
255
+ if (originalEnv) {
256
+ process.env.AZURE_STORAGE_CONTAINER_NAME = originalEnv;
257
+ } else {
258
+ delete process.env.AZURE_STORAGE_CONTAINER_NAME;
259
+ }
260
+ if (originalDefaultEnv) {
261
+ process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME = originalDefaultEnv;
262
+ } else {
263
+ delete process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
264
+ }
265
+ }
266
+ });
267
+
268
+ // Test: Backwards compatibility - legacy hash without container should be found for default container
269
+ test.serial("should support backwards compatibility for legacy hashes in default container", async (t) => {
270
+ if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
271
+ t.pass("Skipping test - Azure not configured");
272
+ return;
273
+ }
274
+
275
+ const originalEnv = process.env.AZURE_STORAGE_CONTAINER_NAME;
276
+ const originalDefaultEnv = process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
277
+ process.env.AZURE_STORAGE_CONTAINER_NAME = "test1,test2,test3";
278
+ // Ensure test1 is the default container for backwards compatibility logic
279
+ delete process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
280
+
281
+ try {
282
+ const testHash = `hash-legacy-test-${uuidv4()}`;
283
+ const content = "Content for legacy test";
284
+ const file = await createTestFile(content, "txt", "fileLegacy.txt");
285
+
286
+ // Upload file to default container (test1) with hash - this creates the scoped entry
287
+ const upload = await uploadFileWithHashAndContainer(file, testHash, "test1");
288
+ t.is(upload.status, 200, "Upload to test1 should succeed");
289
+ t.truthy(upload.data.url, "Upload should have URL");
290
+
291
+ // Wait for Redis to update
292
+ await new Promise((resolve) => setTimeout(resolve, 1000));
293
+
294
+ // Now simulate a legacy entry by also storing the hash WITHOUT container scope
295
+ // This mimics the old behavior before container scoping was added
296
+ const { client } = await import("../src/redis.js");
297
+ const legacyData = {
298
+ url: upload.data.url, // Use the real uploaded URL
299
+ blobName: upload.data.blobName || upload.data.filename, // Include blobName for proper restoration
300
+ filename: upload.data.filename,
301
+ timestamp: new Date().toISOString(),
302
+ };
303
+ await client.hset("FileStoreMap", testHash, JSON.stringify(legacyData));
304
+
305
+ // Wait for Redis to update
306
+ await new Promise((resolve) => setTimeout(resolve, 500));
307
+
308
+ // Delete the scoped key to simulate only having the legacy entry
309
+ const { getScopedHashKey } = await import("../src/redis.js");
310
+ const scopedKey = getScopedHashKey(testHash, "test1");
311
+ await client.hdel("FileStoreMap", scopedKey);
312
+
313
+ // Wait a bit for Redis
314
+ await new Promise((resolve) => setTimeout(resolve, 500));
315
+
316
+ // Check hash with default container parameter - should find the legacy entry
317
+ const checkWithDefaultContainer = await checkHashExists(testHash, "test1");
318
+ t.is(checkWithDefaultContainer.status, 200, "Legacy hash should be found with default container param");
319
+ t.is(checkWithDefaultContainer.data.url, legacyData.url, "Should return URL from legacy entry");
320
+
321
+ // Check hash without container parameter - should also find the legacy entry
322
+ const checkWithoutContainer = await checkHashExists(testHash, null);
323
+ t.is(checkWithoutContainer.status, 200, "Legacy hash should be found without container param");
324
+ t.is(checkWithoutContainer.data.url, legacyData.url, "Should return URL from legacy entry");
325
+
326
+ // After migration, the new scoped key should exist
327
+ const { getFileStoreMap } = await import("../src/redis.js");
328
+ const migratedValue = await getFileStoreMap(scopedKey, true); // Skip lazy cleanup
329
+ t.truthy(migratedValue, "Migrated value should exist with new scoped key");
330
+ t.is(migratedValue.url, legacyData.url, "Migrated value should have same URL");
331
+
332
+ // Cleanup
333
+ fs.unlinkSync(file);
334
+ await cleanupHash(testHash, "test1");
335
+ await cleanupHash(testHash, null);
336
+
337
+ // Delete the actual file
338
+ await axios.delete(baseUrl, {
339
+ params: {
340
+ hash: testHash,
341
+ container: "test1",
342
+ },
343
+ validateStatus: (status) => true,
344
+ });
345
+ } finally {
346
+ // Restore environment
347
+ if (originalEnv) {
348
+ process.env.AZURE_STORAGE_CONTAINER_NAME = originalEnv;
349
+ } else {
350
+ delete process.env.AZURE_STORAGE_CONTAINER_NAME;
351
+ }
352
+ if (originalDefaultEnv) {
353
+ process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME = originalDefaultEnv;
354
+ } else {
355
+ delete process.env.DEFAULT_AZURE_STORAGE_CONTAINER_NAME;
356
+ }
357
+ }
358
+ });
359
+
360
+ // Test: Hash check with wrong container should return 404
361
+ test.serial("should return 404 when checking hash with wrong container", async (t) => {
362
+ if (!process.env.AZURE_STORAGE_CONNECTION_STRING) {
363
+ t.pass("Skipping test - Azure not configured");
364
+ return;
365
+ }
366
+
367
+ const originalEnv = process.env.AZURE_STORAGE_CONTAINER_NAME;
368
+ process.env.AZURE_STORAGE_CONTAINER_NAME = "test1,test2,test3";
369
+
370
+ try {
371
+ const testHash = `hash-wrong-container-test-${uuidv4()}`;
372
+ const content = "Content for specific container";
373
+ const file = await createTestFile(content, "txt", "fileWrong.txt");
374
+
375
+ // Upload file to container test1 with hash
376
+ const upload = await uploadFileWithHashAndContainer(file, testHash, "test1");
377
+ t.is(upload.status, 200, "Upload to test1 should succeed");
378
+
379
+ // Wait for Redis to update
380
+ await new Promise((resolve) => setTimeout(resolve, 1000));
381
+
382
+ // Check hash in container test2 (wrong container) - should return 404
383
+ const checkWrong = await checkHashExists(testHash, "test2");
384
+ t.is(checkWrong.status, 404, "Hash should not exist in test2");
385
+
386
+ // Check hash in container test3 (also wrong) - should return 404
387
+ const checkWrong2 = await checkHashExists(testHash, "test3");
388
+ t.is(checkWrong2.status, 404, "Hash should not exist in test3");
389
+
390
+ // Check hash in container test1 (correct container) - should return 200
391
+ const checkCorrect = await checkHashExists(testHash, "test1");
392
+ t.is(checkCorrect.status, 200, "Hash should exist in test1");
393
+
394
+ // Cleanup
395
+ fs.unlinkSync(file);
396
+ await cleanupHash(testHash, "test1");
397
+
398
+ // Delete the actual file
399
+ await axios.delete(baseUrl, {
400
+ params: {
401
+ hash: testHash,
402
+ container: "test1",
403
+ },
404
+ validateStatus: (status) => true,
405
+ });
406
+ } finally {
407
+ // Restore environment
408
+ if (originalEnv) {
409
+ process.env.AZURE_STORAGE_CONTAINER_NAME = originalEnv;
410
+ } else {
411
+ delete process.env.AZURE_STORAGE_CONTAINER_NAME;
412
+ }
413
+ }
414
+ });
415
+
package/lib/crypto.js CHANGED
@@ -2,6 +2,18 @@
2
2
  import logger from './logger.js';
3
3
  import crypto from 'crypto';
4
4
 
5
+ // Helper function to generate a preview of a message for logging
6
+ function getMessagePreview(message, maxLength = 50) {
7
+ if (typeof message === 'string') {
8
+ return message.substring(0, maxLength);
9
+ }
10
+ try {
11
+ return JSON.stringify(message).substring(0, maxLength);
12
+ } catch (e) {
13
+ return String(message).substring(0, maxLength);
14
+ }
15
+ }
16
+
5
17
  // Encryption function
6
18
  function encrypt(text, key) {
7
19
  if (!key) { return text; }
@@ -22,6 +34,20 @@ function encrypt(text, key) {
22
34
  function decrypt(message, key) {
23
35
  if (!key) { return message; }
24
36
  try {
37
+ // Quick type check - if not string, convert or skip
38
+ if (typeof message !== 'string') {
39
+ if (Buffer.isBuffer(message)) {
40
+ message = message.toString('utf8');
41
+ } else if (message === null || message === undefined) {
42
+ logger.warn(`Decryption skipped: message is ${message === null ? 'null' : 'undefined'}`);
43
+ return null;
44
+ } else {
45
+ const preview = getMessagePreview(message);
46
+ logger.warn(`Decryption skipped: message is not a string (type: ${typeof message}, preview: ${preview})`);
47
+ return null;
48
+ }
49
+ }
50
+
25
51
  key = tryBufferKey(key);
26
52
  let parts = message.split(':');
27
53
  let iv = Buffer.from(parts.shift(), 'hex');
@@ -31,7 +57,8 @@ function decrypt(message, key) {
31
57
  decrypted += decipher.final('utf8');
32
58
  return decrypted;
33
59
  } catch (error) {
34
- logger.error(`Decryption failed: ${error.message}`);
60
+ const preview = getMessagePreview(message);
61
+ logger.error(`Decryption failed: ${error.message} (preview: ${preview})`);
35
62
  return null;
36
63
  }
37
64
  }
package/lib/util.js CHANGED
@@ -475,8 +475,12 @@ const uploadImageToCloud = async (base64Data, mimeType, pathwayResolver = null)
475
475
  contentType: mimeType
476
476
  });
477
477
 
478
+ // Append requestId parameter (preserving existing query parameters like subscription-key)
479
+ const separator = fileHandlerUrl.includes('?') ? '&' : '?';
480
+ const uploadUrl = `${fileHandlerUrl}${separator}requestId=${requestId}`;
481
+
478
482
  // Upload file
479
- const uploadResponse = await axios.post(`${fileHandlerUrl}?requestId=${requestId}`, formData, {
483
+ const uploadResponse = await axios.post(uploadUrl, formData, {
480
484
  headers: {
481
485
  ...formData.getHeaders()
482
486
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.4.4",
3
+ "version": "1.4.6",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -10,6 +10,30 @@ import { Prompt } from '../../../server/prompt.js';
10
10
  import { getToolsForEntity, loadEntityConfig } from './tools/shared/sys_entity_tools.js';
11
11
  import CortexResponse from '../../../lib/cortexResponse.js';
12
12
 
13
+ // Helper function to generate a smart error response using the agent
14
+ async function generateErrorResponse(error, args, pathwayResolver) {
15
+ const errorMessage = error?.message || error?.toString() || String(error);
16
+
17
+ // Clear any accumulated errors since we're handling them intelligently
18
+ pathwayResolver.errors = [];
19
+
20
+ // Use sys_generator_error to create a smart response
21
+ try {
22
+ const errorResponse = await callPathway('sys_generator_error', {
23
+ ...args,
24
+ text: errorMessage,
25
+ chatHistory: args.chatHistory || [],
26
+ stream: false
27
+ }, pathwayResolver);
28
+
29
+ return errorResponse;
30
+ } catch (errorResponseError) {
31
+ // Fallback if sys_generator_error itself fails
32
+ logger.error(`Error generating error response: ${errorResponseError.message}`);
33
+ return `I apologize, but I encountered an error while processing your request: ${errorMessage}. Please try again or contact support if the issue persists.`;
34
+ }
35
+ }
36
+
13
37
  export default {
14
38
  emulateOpenAIChatModel: 'cortex-agent',
15
39
  useInputChunking: false,
@@ -234,11 +258,20 @@ export default {
234
258
  // Add a line break to avoid running output together
235
259
  await say(pathwayResolver.rootRequestId || pathwayResolver.requestId, `\n`, 1000, false, false);
236
260
 
237
- return await pathwayResolver.promptAndParse({
238
- ...args,
239
- tools: entityToolsOpenAiFormat,
240
- tool_choice: "auto",
241
- });
261
+ try {
262
+ return await pathwayResolver.promptAndParse({
263
+ ...args,
264
+ tools: entityToolsOpenAiFormat,
265
+ tool_choice: "auto",
266
+ });
267
+ } catch (parseError) {
268
+ // If promptAndParse fails, generate error response instead of re-throwing
269
+ logger.error(`Error in promptAndParse during tool callback: ${parseError.message}`);
270
+ const errorResponse = await generateErrorResponse(parseError, args, pathwayResolver);
271
+ // Ensure errors are cleared before returning
272
+ pathwayResolver.errors = [];
273
+ return errorResponse;
274
+ }
242
275
  }
243
276
  },
244
277
 
@@ -396,6 +429,11 @@ export default {
396
429
  tool_choice: memoryLookupRequired ? "required" : "auto"
397
430
  });
398
431
 
432
+ // Handle null response (can happen when ModelExecutor catches an error)
433
+ if (!response) {
434
+ throw new Error('Model execution returned null - the model request likely failed');
435
+ }
436
+
399
437
  let toolCallback = pathwayResolver.pathway.toolCallback;
400
438
 
401
439
  // Handle both CortexResponse objects and plain responses
@@ -403,15 +441,38 @@ export default {
403
441
  (response instanceof CortexResponse && response.hasToolCalls()) ||
404
442
  (typeof response === 'object' && response.tool_calls)
405
443
  )) {
406
- response = await toolCallback(args, response, pathwayResolver);
444
+ try {
445
+ response = await toolCallback(args, response, pathwayResolver);
446
+
447
+ // Handle null response from tool callback
448
+ if (!response) {
449
+ throw new Error('Tool callback returned null - a model request likely failed');
450
+ }
451
+ } catch (toolError) {
452
+ // Handle errors in tool callback
453
+ logger.error(`Error in tool callback: ${toolError.message}`);
454
+ // Generate error response for tool callback errors
455
+ const errorResponse = await generateErrorResponse(toolError, args, pathwayResolver);
456
+ // Ensure errors are cleared before returning
457
+ pathwayResolver.errors = [];
458
+ return errorResponse;
459
+ }
407
460
  }
408
461
 
409
462
  return response;
410
463
 
411
464
  } catch (e) {
412
- pathwayResolver.logError(e);
413
- const chatResponse = await callPathway('sys_generator_quick', {...args, model: styleModel, stream: false}, pathwayResolver);
414
- return chatResponse;
465
+ logger.error(`Error in sys_entity_agent: ${e.message}`);
466
+
467
+ // Generate a smart error response instead of throwing
468
+ // Note: We don't call logError here because generateErrorResponse will clear errors
469
+ // and we want to handle the error gracefully rather than tracking it
470
+ const errorResponse = await generateErrorResponse(e, args, pathwayResolver);
471
+
472
+ // Ensure errors are cleared before returning (in case any were added during error response generation)
473
+ pathwayResolver.errors = [];
474
+
475
+ return errorResponse;
415
476
  }
416
477
  }
417
478
  };
@@ -7,7 +7,7 @@ export default {
7
7
  {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\n{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou were trying to fulfill the user's last request in the above conversation, but ran into an error. You cannot resolve this error.\n{{renderTemplate AI_DATETIME}}`},
8
8
  {
9
9
  "role": "user",
10
- "content": `The model that you were trying to use to fulfill the user's request returned the following error(s): {{{text}}}. Please let them know what happened. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.`
10
+ "content": `The model that you were trying to use to fulfill the user's request returned the following error(s): {{{text}}}. Please let them know what happened. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.\n\nIf this error was likely caused by a file in the chat history, you should tell the user they may need to delete the file from the chat history or open a new chat to continue the conversation. Re-uploading the file will not usually work if the problem is the file itself.`
11
11
  },
12
12
  ]}),
13
13
  ],
@@ -18,6 +18,6 @@ export default {
18
18
  aiName: "Jarvis",
19
19
  language: "English",
20
20
  },
21
- model: 'oai-gpt4o',
21
+ model: 'oai-gpt5-chat',
22
22
  useInputChunking: false,
23
23
  }
@@ -10,7 +10,7 @@ export default {
10
10
  {
11
11
  role: "system",
12
12
  content: `You are a UI/UX expert assistant. Your task is to help Al Jazeera employees design and create applets for company use, or discuss the design of such applets.
13
-
13
+
14
14
  Each applet is a single page application that should be responsive to the screen size, accessible, secure, and performant.
15
15
 
16
16
  {{#if currentHtml}}
@@ -20,73 +20,19 @@ export default {
20
20
  {{{currentHtml}}}
21
21
  </APPLET>
22
22
 
23
- **IMPORTANT: When modifying an existing applet, you have TWO options:**
24
-
25
- 1. **For targeted changes** (adding a feature, fixing a bug, updating styles, etc.):
26
- - Generate a **unified diff patch** (git-style diff format)
27
- - Only include the lines that changed
28
- - Use standard unified diff format with hunk headers (@@)
29
- - Wrap the diff in <APPLET> tags
30
-
31
- 2. **For major changes** (complete redesign, restructure, or when diff would be too complex):
32
- - Generate the **complete HTML and JavaScript code** with all changes
33
- - Wrap the complete code in <APPLET> tags
34
-
35
- **How to generate a unified diff:**
36
-
37
- When making small to moderate changes, use the unified diff format. Example:
38
-
39
- <APPLET>
40
- Index: applet.html
41
- ===================================================================
42
- --- applet.html
43
- +++ applet.html
44
- @@ -10,7 +10,8 @@
45
- <button id="myButton">Click me</button>
46
- + <p>New paragraph added</p>
47
- </div>
48
- </body>
49
- </APPLET>
50
-
51
- Or use the minimal format (just the hunks):
52
-
53
- <APPLET>
54
- @@ -10,7 +10,8 @@
55
- <button id="myButton">Click me</button>
56
- + <p>New paragraph added</p>
57
- </div>
58
- </APPLET>
59
-
60
- **Diff format guidelines:**
61
- - Lines starting with \`-\` indicate deletions
62
- - Lines starting with \`+\` indicate additions
63
- - Lines starting with a space are context lines (unchanged)
64
- - The \`@@\` line shows the hunk header with line numbers
65
- - Include enough context lines around changes (typically 2-3 lines)
66
-
67
- **When to use full HTML vs diff:**
68
- - Use **diff** for: Adding features, fixing bugs, updating styles, changing text, modifying functions
69
- - Use **full HTML** for: Complete redesigns, major structural changes, or when the diff would be larger than the original file
70
-
23
+ When modifiying an existing applet, you should:
24
+ - Only make the specific changes requested by the user
25
+ - Preserve all existing structure, classes, and functionality not related to the requested changes (minimize diffs)
26
+ - Return the complete HTML and JavaScript code including your modifications
71
27
  {{/if}}
72
-
28
+
73
29
  CODING GUIDELINES:
74
-
75
- - If you are asked to **create a new applet**, your response must include the complete HTML and JavaScript code in a single block. Only one code block should be returned in your response.
76
-
77
- - If you are asked to **modify an existing applet**:
78
- - For targeted changes: Generate a unified diff patch wrapped in <APPLET> tags
79
- - For major changes: Generate complete HTML wrapped in <APPLET> tags
80
-
81
- - **CRITICAL: The complete applet code OR diff patch MUST be surrounded by <APPLET> and </APPLET> tags. THIS IS MANDATORY** - otherwise the parser will not pick up the code. These are reserved tags and should not be used for any other purpose - there should be exactly one <APPLET> tag and one </APPLET> tag in every coding response.
82
-
83
- - In the assistant responses you see in your chat history, the <APPLET> tags have been filtered out so don't take previous assistant responses as an example of how to structure your response - if you want to change code, you MUST include the code or diff in an <APPLET> tag in your response.
84
-
85
- - **CRITICAL: Always implement actual functionality** - never use placeholders, mock data, or TODO comments. Every UI component should be fully functional and ready for production use. Where possible, use the internal REST endpoints provided below to accomplish tasks instead of using a third party service.
86
-
87
- - When making modifications, preserve all existing structure, classes, and functionality not related to the requested changes. Only modify what is necessary.
88
-
89
- After you have provided the code or diff, you should include a brief explanation of the changes you made and why you made them in your response. Keep this very short and concise.
30
+ - If you are asked to create or make changes to an applet, your response must include a complete rewrite of the HTML and JavaScript code with your changes in a single block (no diffs or partial code). Only one code block should be returned in your response and it's contents will completely replace the existing code of the applet.
31
+ - CRITICAL: The complete applet code MUST be surrounded by <APPLET> and </APPLET> tags. THIS IS MANDATORY - otherwise the parser will not pick up the code. These are reserved tags and should not be used for any other purpose - there should be exactly one <APPLET> tag and one </APPLET> tag in every coding response.
32
+ - In the assistant responses you see in your chat history, the <APPLET> tags have been filtered out so don't take previous assistant responses as an example of how to structure your response - if you want to change code, you MUST include the complete code in an <APPLET> tag in your response.
33
+ - CRITICAL: Always implement actual functionality - never use placeholders, mock data, or TODO comments. Every UI component should be fully functional and ready for production use. Where possible, use the internal REST endpoints provided below to accomplish tasks instead of using a third party service.
34
+
35
+ After you have provided the complete code, you should include a brief explanation of the changes you made and why you made them in your response. Keep this very short and concise.
90
36
 
91
37
  {{#if promptEndpoint}}
92
38
  You have access to a REST endpoint at {{promptEndpoint}} that can be used to execute prompts. This endpoint supports both direct prompts and prompts by ID, and can handle multimodal content including files and images.
@@ -707,6 +653,7 @@ export default {
707
653
  currentHtml: "",
708
654
  promptDetails: "[]",
709
655
  },
656
+ // model: 'oai-gpt41',
710
657
  model: 'gemini-pro-25-vision',
711
658
  timeout: 600,
712
659
  stream: true,