@promptbook/node 0.110.0-4 → 0.110.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -34,7 +34,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
34
34
  * @generated
35
35
  * @see https://github.com/webgptorg/promptbook
36
36
  */
37
- const PROMPTBOOK_ENGINE_VERSION = '0.110.0-4';
37
+ const PROMPTBOOK_ENGINE_VERSION = '0.110.0-5';
38
38
  /**
39
39
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
40
40
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -23256,18 +23256,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
23256
23256
  get profile() {
23257
23257
  return OPENAI_PROVIDER_PROFILE;
23258
23258
  }
23259
- /*
23260
- Note: Commenting this out to avoid circular dependency
23261
- /**
23262
- * Create (sub)tools for calling OpenAI API Assistants
23263
- *
23264
- * @param assistantId Which assistant to use
23265
- * @returns Tools for calling OpenAI API Assistants with same token
23266
- * /
23267
- public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
23268
- return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
23269
- }
23270
- */
23271
23259
  /**
23272
23260
  * List all available models (non dynamically)
23273
23261
  *
@@ -23302,207 +23290,6 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
23302
23290
  }
23303
23291
  }
23304
23292
 
23305
- /**
23306
- * Execution Tools for calling OpenAI API using the Responses API (Agents)
23307
- *
23308
- * @public exported from `@promptbook/openai`
23309
- */
23310
- class OpenAiAgentExecutionTools extends OpenAiExecutionTools {
23311
- constructor(options) {
23312
- super(options);
23313
- this.vectorStoreId = options.vectorStoreId;
23314
- }
23315
- get title() {
23316
- return 'OpenAI Agent';
23317
- }
23318
- get description() {
23319
- return 'Use OpenAI Responses API (Agentic)';
23320
- }
23321
- /**
23322
- * Calls OpenAI API to use a chat model with streaming.
23323
- */
23324
- async callChatModelStream(prompt, onProgress) {
23325
- if (this.options.isVerbose) {
23326
- console.info('💬 OpenAI Agent callChatModel call', { prompt });
23327
- }
23328
- const { content, parameters, modelRequirements } = prompt;
23329
- const client = await this.getClient();
23330
- if (modelRequirements.modelVariant !== 'CHAT') {
23331
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
23332
- }
23333
- const rawPromptContent = templateParameters(content, {
23334
- ...parameters,
23335
- modelName: 'agent',
23336
- });
23337
- // Build input items
23338
- const input = []; // TODO: Type properly when OpenAI types are updated
23339
- // Add previous messages from thread (if any)
23340
- if ('thread' in prompt && Array.isArray(prompt.thread)) {
23341
- const previousMessages = prompt.thread.map((msg) => ({
23342
- role: msg.sender === 'assistant' ? 'assistant' : 'user',
23343
- content: msg.content,
23344
- }));
23345
- input.push(...previousMessages);
23346
- }
23347
- // Add current user message
23348
- input.push({
23349
- role: 'user',
23350
- content: rawPromptContent,
23351
- });
23352
- // Prepare tools
23353
- const tools = modelRequirements.tools ? mapToolsToOpenAi(modelRequirements.tools) : undefined;
23354
- // Add file_search if vector store is present
23355
- const agentTools = tools ? [...tools] : [];
23356
- let toolResources = undefined;
23357
- if (this.vectorStoreId) {
23358
- agentTools.push({ type: 'file_search' });
23359
- toolResources = {
23360
- file_search: {
23361
- vector_store_ids: [this.vectorStoreId],
23362
- },
23363
- };
23364
- }
23365
- // Add file_search also if knowledgeSources are present in the prompt (passed via AgentLlmExecutionTools)
23366
- if (modelRequirements.knowledgeSources &&
23367
- modelRequirements.knowledgeSources.length > 0 &&
23368
- !this.vectorStoreId) {
23369
- // Note: Vector store should have been created by AgentLlmExecutionTools and passed via options.
23370
- // If we are here, it means we have knowledge sources but no vector store ID.
23371
- // We can't easily create one here without persisting it.
23372
- console.warn('Knowledge sources provided but no vector store ID. Creating temporary vector store is not implemented in callChatModelStream.');
23373
- }
23374
- const start = $getCurrentDate();
23375
- // Construct the request
23376
- const rawRequest = {
23377
- // TODO: Type properly as OpenAI.Responses.CreateResponseParams
23378
- model: modelRequirements.modelName || 'gpt-4o',
23379
- input,
23380
- instructions: modelRequirements.systemMessage,
23381
- tools: agentTools.length > 0 ? agentTools : undefined,
23382
- tool_resources: toolResources,
23383
- store: false, // Stateless by default as we pass full history
23384
- };
23385
- if (this.options.isVerbose) {
23386
- console.info(colors.bgWhite('rawRequest (Responses API)'), JSON.stringify(rawRequest, null, 4));
23387
- }
23388
- // Call Responses API
23389
- // Note: Using any cast because types might not be updated yet
23390
- const response = await client.responses.create(rawRequest);
23391
- if (this.options.isVerbose) {
23392
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(response, null, 4));
23393
- }
23394
- const complete = $getCurrentDate();
23395
- let resultContent = '';
23396
- const toolCalls = [];
23397
- // Parse output items
23398
- if (response.output) {
23399
- for (const item of response.output) {
23400
- if (item.type === 'message' && item.role === 'assistant') {
23401
- for (const contentPart of item.content) {
23402
- if (contentPart.type === 'output_text') {
23403
- // "output_text" based on migration guide, or "text"? Guide says "output_text" in example.
23404
- resultContent += contentPart.text;
23405
- }
23406
- else if (contentPart.type === 'text') {
23407
- resultContent += contentPart.text.value || contentPart.text;
23408
- }
23409
- }
23410
- }
23411
- else if (item.type === 'function_call') ;
23412
- }
23413
- }
23414
- // Use output_text helper if available (mentioned in guide)
23415
- if (response.output_text) {
23416
- resultContent = response.output_text;
23417
- }
23418
- // TODO: Handle tool calls properly (Requires clearer docs or experimentation)
23419
- onProgress({
23420
- content: resultContent,
23421
- modelName: response.model || 'agent',
23422
- timing: { start, complete },
23423
- usage: UNCERTAIN_USAGE,
23424
- rawPromptContent,
23425
- rawRequest,
23426
- rawResponse: response,
23427
- });
23428
- return exportJson({
23429
- name: 'promptResult',
23430
- message: `Result of \`OpenAiAgentExecutionTools.callChatModelStream\``,
23431
- order: [],
23432
- value: {
23433
- content: resultContent,
23434
- modelName: response.model || 'agent',
23435
- timing: { start, complete },
23436
- usage: UNCERTAIN_USAGE,
23437
- rawPromptContent,
23438
- rawRequest,
23439
- rawResponse: response,
23440
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
23441
- },
23442
- });
23443
- }
23444
- /**
23445
- * Creates a vector store from knowledge sources
23446
- */
23447
- static async createVectorStore(client, name, knowledgeSources) {
23448
- // Create a vector store
23449
- const vectorStore = await client.beta.vectorStores.create({
23450
- name: `${name} Knowledge Base`,
23451
- });
23452
- const vectorStoreId = vectorStore.id;
23453
- // Upload files from knowledge sources to the vector store
23454
- const fileStreams = [];
23455
- for (const source of knowledgeSources) {
23456
- try {
23457
- // Check if it's a URL
23458
- if (source.startsWith('http://') || source.startsWith('https://')) {
23459
- // Download the file
23460
- const response = await fetch(source);
23461
- if (!response.ok) {
23462
- console.error(`Failed to download ${source}: ${response.statusText}`);
23463
- continue;
23464
- }
23465
- const buffer = await response.arrayBuffer();
23466
- const filename = source.split('/').pop() || 'downloaded-file';
23467
- const blob = new Blob([buffer]);
23468
- const file = new File([blob], filename);
23469
- fileStreams.push(file);
23470
- }
23471
- else {
23472
- // Local files not supported in browser env easily, same as before
23473
- }
23474
- }
23475
- catch (error) {
23476
- console.error(`Error processing knowledge source ${source}:`, error);
23477
- }
23478
- }
23479
- // Batch upload files to the vector store
23480
- if (fileStreams.length > 0) {
23481
- try {
23482
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
23483
- files: fileStreams,
23484
- });
23485
- }
23486
- catch (error) {
23487
- console.error('Error uploading files to vector store:', error);
23488
- }
23489
- }
23490
- return vectorStoreId;
23491
- }
23492
- /**
23493
- * Discriminant for type guards
23494
- */
23495
- get discriminant() {
23496
- return 'OPEN_AI_AGENT';
23497
- }
23498
- /**
23499
- * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentExecutionTools`
23500
- */
23501
- static isOpenAiAgentExecutionTools(llmExecutionTools) {
23502
- return llmExecutionTools.discriminant === 'OPEN_AI_AGENT';
23503
- }
23504
- }
23505
-
23506
23293
  /**
23507
23294
  * Uploads files to OpenAI and returns their IDs
23508
23295
  *
@@ -23524,6 +23311,10 @@ async function uploadFilesToOpenAi(client, files) {
23524
23311
  return fileIds;
23525
23312
  }
23526
23313
 
23314
+ const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
23315
+ const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
23316
+ const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
23317
+ const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
23527
23318
  /**
23528
23319
  * Execution Tools for calling OpenAI API Assistants
23529
23320
  *
@@ -23537,7 +23328,6 @@ async function uploadFilesToOpenAi(client, files) {
23537
23328
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
23538
23329
  *
23539
23330
  * @public exported from `@promptbook/openai`
23540
- * @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
23541
23331
  */
23542
23332
  class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
23543
23333
  /**
@@ -23974,111 +23764,731 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
23974
23764
  assistantId,
23975
23765
  });
23976
23766
  }
23977
- async createNewAssistant(options) {
23978
- var _a, _b, _c;
23979
- if (!this.isCreatingNewAssistantsAllowed) {
23980
- throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
23981
- }
23982
- // await this.playground();
23983
- const { name, instructions, knowledgeSources, tools } = options;
23984
- const preparationStartedAtMs = Date.now();
23985
- const knowledgeSourcesCount = (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0;
23986
- const toolsCount = (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0;
23767
+ /**
23768
+ * Returns the per-knowledge-source download timeout in milliseconds.
23769
+ */
23770
+ getKnowledgeSourceDownloadTimeoutMs() {
23771
+ var _a;
23772
+ return (_a = this.assistantOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
23773
+ }
23774
+ /**
23775
+ * Returns the max concurrency for knowledge source uploads.
23776
+ */
23777
+ getKnowledgeSourceUploadMaxConcurrency() {
23778
+ var _a;
23779
+ return (_a = this.assistantOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
23780
+ }
23781
+ /**
23782
+ * Returns the polling interval in milliseconds for vector store uploads.
23783
+ */
23784
+ getKnowledgeSourceUploadPollIntervalMs() {
23785
+ var _a;
23786
+ return (_a = this.assistantOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
23787
+ }
23788
+ /**
23789
+ * Returns the overall upload timeout in milliseconds for vector store uploads.
23790
+ */
23791
+ getKnowledgeSourceUploadTimeoutMs() {
23792
+ var _a;
23793
+ return (_a = this.assistantOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
23794
+ }
23795
+ /**
23796
+ * Returns true if we should continue even if vector store ingestion stalls.
23797
+ */
23798
+ shouldContinueOnVectorStoreStall() {
23799
+ var _a;
23800
+ return (_a = this.assistantOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
23801
+ }
23802
+ /**
23803
+ * Returns assistant-specific options with extended settings.
23804
+ */
23805
+ get assistantOptions() {
23806
+ return this.options;
23807
+ }
23808
+ /**
23809
+ * Downloads a knowledge source URL into a File for vector store upload.
23810
+ */
23811
+ async downloadKnowledgeSourceFile(options) {
23812
+ var _a;
23813
+ const { source, timeoutMs, logLabel } = options;
23814
+ const startedAtMs = Date.now();
23815
+ const controller = new AbortController();
23816
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
23987
23817
  if (this.options.isVerbose) {
23988
- console.info('[🤰]', 'Starting OpenAI assistant creation', {
23989
- name,
23990
- knowledgeSourcesCount,
23991
- toolsCount,
23992
- instructionsLength: instructions.length,
23818
+ console.info('[🤰]', 'Downloading knowledge source', {
23819
+ source,
23820
+ timeoutMs,
23821
+ logLabel,
23993
23822
  });
23994
23823
  }
23995
- const client = await this.getClient();
23996
- let vectorStoreId;
23997
- // If knowledge sources are provided, create a vector store with them
23998
- if (knowledgeSources && knowledgeSources.length > 0) {
23999
- if (this.options.isVerbose) {
24000
- console.info('[🤰]', 'Creating vector store with knowledge sources', {
24001
- name,
24002
- knowledgeSourcesCount,
23824
+ try {
23825
+ const response = await fetch(source, { signal: controller.signal });
23826
+ const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
23827
+ if (!response.ok) {
23828
+ console.error('[🤰]', 'Failed to download knowledge source', {
23829
+ source,
23830
+ status: response.status,
23831
+ statusText: response.statusText,
23832
+ contentType,
23833
+ elapsedMs: Date.now() - startedAtMs,
23834
+ logLabel,
24003
23835
  });
23836
+ return null;
24004
23837
  }
24005
- // Create a vector store
24006
- const vectorStore = await client.beta.vectorStores.create({
24007
- name: `${name} Knowledge Base`,
24008
- });
24009
- vectorStoreId = vectorStore.id;
23838
+ const buffer = await response.arrayBuffer();
23839
+ let filename = source.split('/').pop() || 'downloaded-file';
23840
+ try {
23841
+ const url = new URL(source);
23842
+ filename = url.pathname.split('/').pop() || filename;
23843
+ }
23844
+ catch (error) {
23845
+ // Keep default filename
23846
+ }
23847
+ const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
23848
+ const elapsedMs = Date.now() - startedAtMs;
23849
+ const sizeBytes = buffer.byteLength;
24010
23850
  if (this.options.isVerbose) {
24011
- console.info('[🤰]', 'Vector store created', {
24012
- vectorStoreId,
23851
+ console.info('[🤰]', 'Downloaded knowledge source', {
23852
+ source,
23853
+ filename,
23854
+ sizeBytes,
23855
+ contentType,
23856
+ elapsedMs,
23857
+ logLabel,
24013
23858
  });
24014
23859
  }
24015
- // Upload files from knowledge sources to the vector store
24016
- const fileStreams = [];
24017
- for (const [index, source] of knowledgeSources.entries()) {
23860
+ return { file, sizeBytes, filename, elapsedMs };
23861
+ }
23862
+ catch (error) {
23863
+ assertsError(error);
23864
+ console.error('[🤰]', 'Error downloading knowledge source', {
23865
+ source,
23866
+ elapsedMs: Date.now() - startedAtMs,
23867
+ logLabel,
23868
+ error: serializeError(error),
23869
+ });
23870
+ return null;
23871
+ }
23872
+ finally {
23873
+ clearTimeout(timeoutId);
23874
+ }
23875
+ }
23876
+ /**
23877
+ * Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
23878
+ */
23879
+ async logVectorStoreFileBatchDiagnostics(options) {
23880
+ var _a, _b;
23881
+ const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
23882
+ if (reason === 'stalled' && !this.options.isVerbose) {
23883
+ return;
23884
+ }
23885
+ if (!batchId.startsWith('vsfb_')) {
23886
+ console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
23887
+ vectorStoreId,
23888
+ batchId,
23889
+ reason,
23890
+ logLabel,
23891
+ });
23892
+ return;
23893
+ }
23894
+ const fileIdToMetadata = new Map();
23895
+ for (const file of uploadedFiles) {
23896
+ fileIdToMetadata.set(file.fileId, file);
23897
+ }
23898
+ try {
23899
+ const limit = Math.min(100, Math.max(10, uploadedFiles.length));
23900
+ const batchFilesPage = await client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, {
23901
+ limit,
23902
+ });
23903
+ const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
23904
+ const statusCounts = {
23905
+ in_progress: 0,
23906
+ completed: 0,
23907
+ failed: 0,
23908
+ cancelled: 0,
23909
+ };
23910
+ const errorSamples = [];
23911
+ const inProgressSamples = [];
23912
+ const batchFileIds = new Set();
23913
+ for (const file of batchFiles) {
23914
+ batchFileIds.add(file.id);
23915
+ statusCounts[file.status] = ((_b = statusCounts[file.status]) !== null && _b !== void 0 ? _b : 0) + 1;
23916
+ const metadata = fileIdToMetadata.get(file.id);
23917
+ if (file.last_error) {
23918
+ errorSamples.push({
23919
+ fileId: file.id,
23920
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
23921
+ sizeBytes: metadata === null || metadata === void 0 ? void 0 : metadata.sizeBytes,
23922
+ status: file.status,
23923
+ lastError: file.last_error,
23924
+ });
23925
+ }
23926
+ else if (file.status === 'in_progress' && inProgressSamples.length < 5) {
23927
+ inProgressSamples.push({
23928
+ fileId: file.id,
23929
+ filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
23930
+ sizeBytes: metadata === null || metadata === void 0 ? void 0 : metadata.sizeBytes,
23931
+ });
23932
+ }
23933
+ }
23934
+ const missingSamples = uploadedFiles
23935
+ .filter((file) => !batchFileIds.has(file.fileId))
23936
+ .slice(0, 5)
23937
+ .map((file) => ({
23938
+ fileId: file.fileId,
23939
+ filename: file.filename,
23940
+ sizeBytes: file.sizeBytes,
23941
+ }));
23942
+ const vectorStore = await client.beta.vectorStores.retrieve(vectorStoreId);
23943
+ const logPayload = {
23944
+ vectorStoreId,
23945
+ batchId,
23946
+ reason,
23947
+ vectorStoreStatus: vectorStore.status,
23948
+ vectorStoreFileCounts: vectorStore.file_counts,
23949
+ vectorStoreUsageBytes: vectorStore.usage_bytes,
23950
+ batchFileCount: batchFiles.length,
23951
+ statusCounts,
23952
+ errorSamples: errorSamples.slice(0, 5),
23953
+ inProgressSamples,
23954
+ missingFileCount: uploadedFiles.length - batchFileIds.size,
23955
+ missingSamples,
23956
+ logLabel,
23957
+ };
23958
+ const logFunction = reason === 'stalled' ? console.info : console.error;
23959
+ logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
23960
+ }
23961
+ catch (error) {
23962
+ assertsError(error);
23963
+ console.error('[🤰]', 'Vector store file batch diagnostics failed', {
23964
+ vectorStoreId,
23965
+ batchId,
23966
+ reason,
23967
+ logLabel,
23968
+ error: serializeError(error),
23969
+ });
23970
+ }
23971
+ }
23972
+ /**
23973
+ * Uploads knowledge source files to the vector store and polls until processing completes.
23974
+ */
23975
+ async uploadKnowledgeSourceFilesToVectorStore(options) {
23976
+ var _a, _b, _c, _d;
23977
+ const { client, vectorStoreId, files, totalBytes, logLabel } = options;
23978
+ const uploadStartedAtMs = Date.now();
23979
+ const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
23980
+ const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
23981
+ const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
23982
+ if (this.options.isVerbose) {
23983
+ console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
23984
+ vectorStoreId,
23985
+ fileCount: files.length,
23986
+ totalBytes,
23987
+ maxConcurrency,
23988
+ pollIntervalMs,
23989
+ uploadTimeoutMs,
23990
+ logLabel,
23991
+ });
23992
+ }
23993
+ const fileTypeSummary = {};
23994
+ for (const file of files) {
23995
+ const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
23996
+ const extension = filename.includes('.')
23997
+ ? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
23998
+ : 'unknown';
23999
+ const sizeBytes = typeof file.size === 'number' ? file.size : 0;
24000
+ const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
24001
+ summary.count += 1;
24002
+ summary.totalBytes += sizeBytes;
24003
+ fileTypeSummary[extension] = summary;
24004
+ }
24005
+ if (this.options.isVerbose) {
24006
+ console.info('[🤰]', 'Knowledge source file summary', {
24007
+ vectorStoreId,
24008
+ fileCount: files.length,
24009
+ totalBytes,
24010
+ fileTypeSummary,
24011
+ logLabel,
24012
+ });
24013
+ }
24014
+ const fileEntries = files.map((file, index) => ({ file, index }));
24015
+ const fileIterator = fileEntries.values();
24016
+ const fileIds = [];
24017
+ const uploadedFiles = [];
24018
+ const failedUploads = [];
24019
+ let uploadedCount = 0;
24020
+ const processFiles = async (iterator) => {
24021
+ var _a, _b;
24022
+ for (const { file, index } of iterator) {
24023
+ const uploadIndex = index + 1;
24024
+ const filename = file.name || `knowledge-source-${uploadIndex}`;
24025
+ const extension = filename.includes('.')
24026
+ ? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
24027
+ : 'unknown';
24028
+ const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
24029
+ const fileUploadStartedAtMs = Date.now();
24030
+ if (this.options.isVerbose) {
24031
+ console.info('[🤰]', 'Uploading knowledge source file', {
24032
+ index: uploadIndex,
24033
+ total: files.length,
24034
+ filename,
24035
+ extension,
24036
+ sizeBytes,
24037
+ logLabel,
24038
+ });
24039
+ }
24018
24040
  try {
24041
+ const uploaded = await client.files.create({ file, purpose: 'assistants' });
24042
+ fileIds.push(uploaded.id);
24043
+ uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
24044
+ uploadedCount += 1;
24019
24045
  if (this.options.isVerbose) {
24020
- console.info('[🤰]', 'Processing knowledge source', {
24021
- index: index + 1,
24022
- total: knowledgeSources.length,
24023
- source,
24024
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
24046
+ console.info('[🤰]', 'Uploaded knowledge source file', {
24047
+ index: uploadIndex,
24048
+ total: files.length,
24049
+ filename,
24050
+ sizeBytes,
24051
+ fileId: uploaded.id,
24052
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
24053
+ logLabel,
24025
24054
  });
24026
24055
  }
24027
- // Check if it's a URL
24028
- if (source.startsWith('http://') || source.startsWith('https://')) {
24029
- // Download the file
24030
- const response = await fetch(source);
24031
- if (!response.ok) {
24032
- console.error(`Failed to download ${source}: ${response.statusText}`);
24033
- continue;
24034
- }
24035
- const buffer = await response.arrayBuffer();
24036
- let filename = source.split('/').pop() || 'downloaded-file';
24037
- try {
24038
- const url = new URL(source);
24039
- filename = url.pathname.split('/').pop() || filename;
24040
- }
24041
- catch (error) {
24042
- // Keep default filename
24043
- }
24044
- const blob = new Blob([buffer]);
24045
- const file = new File([blob], filename);
24046
- fileStreams.push(file);
24047
- }
24048
- else {
24049
- /*
24050
- TODO: [🐱‍🚀] Resolve problem with browser environment
24051
- // Assume it's a local file path
24052
- // Note: This will work in Node.js environment
24053
- // For browser environments, this would need different handling
24054
- const fs = await import('fs');
24055
- const fileStream = fs.createReadStream(source);
24056
- fileStreams.push(fileStream);
24057
- */
24058
- }
24059
24056
  }
24060
24057
  catch (error) {
24061
- console.error(`Error processing knowledge source ${source}:`, error);
24058
+ assertsError(error);
24059
+ const serializedError = serializeError(error);
24060
+ failedUploads.push({ index: uploadIndex, filename, error: serializedError });
24061
+ console.error('[🤰]', 'Failed to upload knowledge source file', {
24062
+ index: uploadIndex,
24063
+ total: files.length,
24064
+ filename,
24065
+ sizeBytes,
24066
+ elapsedMs: Date.now() - fileUploadStartedAtMs,
24067
+ logLabel,
24068
+ error: serializedError,
24069
+ });
24062
24070
  }
24063
24071
  }
24064
- // Batch upload files to the vector store
24065
- if (fileStreams.length > 0) {
24066
- try {
24067
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
24068
- files: fileStreams,
24072
+ };
24073
+ const workerCount = Math.min(maxConcurrency, files.length);
24074
+ const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
24075
+ await Promise.all(workers);
24076
+ if (this.options.isVerbose) {
24077
+ console.info('[🤰]', 'Finished uploading knowledge source files', {
24078
+ vectorStoreId,
24079
+ fileCount: files.length,
24080
+ uploadedCount,
24081
+ failedCount: failedUploads.length,
24082
+ elapsedMs: Date.now() - uploadStartedAtMs,
24083
+ failedSamples: failedUploads.slice(0, 3),
24084
+ logLabel,
24085
+ });
24086
+ }
24087
+ if (fileIds.length === 0) {
24088
+ console.error('[🤰]', 'No knowledge source files were uploaded', {
24089
+ vectorStoreId,
24090
+ fileCount: files.length,
24091
+ failedCount: failedUploads.length,
24092
+ logLabel,
24093
+ });
24094
+ return null;
24095
+ }
24096
+ const batch = await client.beta.vectorStores.fileBatches.create(vectorStoreId, {
24097
+ file_ids: fileIds,
24098
+ });
24099
+ const expectedBatchId = batch.id;
24100
+ const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
24101
+ if (!expectedBatchIdValid) {
24102
+ console.error('[🤰]', 'Vector store file batch id looks invalid', {
24103
+ vectorStoreId,
24104
+ batchId: expectedBatchId,
24105
+ batchVectorStoreId: batch.vector_store_id,
24106
+ logLabel,
24107
+ });
24108
+ }
24109
+ else if (batch.vector_store_id !== vectorStoreId) {
24110
+ console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
24111
+ vectorStoreId,
24112
+ batchId: expectedBatchId,
24113
+ batchVectorStoreId: batch.vector_store_id,
24114
+ logLabel,
24115
+ });
24116
+ }
24117
+ if (this.options.isVerbose) {
24118
+ console.info('[🤰]', 'Created vector store file batch', {
24119
+ vectorStoreId,
24120
+ batchId: expectedBatchId,
24121
+ fileCount: fileIds.length,
24122
+ logLabel,
24123
+ });
24124
+ }
24125
+ const pollStartedAtMs = Date.now();
24126
+ const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
24127
+ const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
24128
+ let lastStatus;
24129
+ let lastCountsKey = '';
24130
+ let lastProgressKey = '';
24131
+ let lastLogAtMs = 0;
24132
+ let lastProgressAtMs = pollStartedAtMs;
24133
+ let lastDiagnosticsAtMs = pollStartedAtMs;
24134
+ let latestBatch = batch;
24135
+ let loggedBatchIdMismatch = false;
24136
+ let shouldPoll = true;
24137
+ while (shouldPoll) {
24138
+ latestBatch = await client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, expectedBatchId);
24139
+ const counts = latestBatch.file_counts;
24140
+ const countsKey = `${counts.completed}/${counts.failed}/${counts.in_progress}/${counts.cancelled}/${counts.total}`;
24141
+ const nowMs = Date.now();
24142
+ const returnedBatchId = latestBatch.id;
24143
+ // [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
24144
+ // We only consider it a mismatch if the returned ID looks like a Batch ID.
24145
+ const batchIdMismatch = returnedBatchId !== expectedBatchId && returnedBatchId.startsWith('vsfb_');
24146
+ const diagnosticsBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
24147
+ const shouldLog = this.options.isVerbose &&
24148
+ (latestBatch.status !== lastStatus ||
24149
+ countsKey !== lastCountsKey ||
24150
+ nowMs - lastLogAtMs >= progressLogIntervalMs);
24151
+ if (batchIdMismatch && !loggedBatchIdMismatch) {
24152
+ console.error('[🤰]', 'Vector store file batch id mismatch', {
24153
+ vectorStoreId,
24154
+ expectedBatchId,
24155
+ returnedBatchId,
24156
+ status: latestBatch.status,
24157
+ fileCounts: counts,
24158
+ logLabel,
24159
+ });
24160
+ loggedBatchIdMismatch = true;
24161
+ }
24162
+ if (countsKey !== lastProgressKey) {
24163
+ lastProgressKey = countsKey;
24164
+ lastProgressAtMs = nowMs;
24165
+ }
24166
+ if (shouldLog) {
24167
+ console.info('[🤰]', 'Vector store file batch status', {
24168
+ vectorStoreId,
24169
+ batchId: expectedBatchId,
24170
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24171
+ status: latestBatch.status,
24172
+ fileCounts: counts,
24173
+ elapsedMs: nowMs - pollStartedAtMs,
24174
+ logLabel,
24175
+ });
24176
+ // [🤰] If there are in-progress files for a long time, log their details
24177
+ if (counts.in_progress > 0 && nowMs - lastProgressAtMs > VECTOR_STORE_STALL_LOG_THRESHOLD_MS) {
24178
+ await this.logVectorStoreFileBatchDiagnostics({
24179
+ client,
24180
+ vectorStoreId,
24181
+ batchId: diagnosticsBatchId,
24182
+ uploadedFiles,
24183
+ logLabel,
24184
+ reason: 'stalled',
24185
+ });
24186
+ }
24187
+ lastStatus = latestBatch.status;
24188
+ lastCountsKey = countsKey;
24189
+ lastLogAtMs = nowMs;
24190
+ }
24191
+ if (nowMs - lastProgressAtMs >= diagnosticsIntervalMs &&
24192
+ nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
24193
+ lastDiagnosticsAtMs = nowMs;
24194
+ await this.logVectorStoreFileBatchDiagnostics({
24195
+ client,
24196
+ vectorStoreId,
24197
+ batchId: diagnosticsBatchId,
24198
+ uploadedFiles,
24199
+ logLabel,
24200
+ reason: 'stalled',
24201
+ });
24202
+ }
24203
+ if (latestBatch.status === 'completed') {
24204
+ if (this.options.isVerbose) {
24205
+ console.info('[🤰]', 'Vector store file batch completed', {
24206
+ vectorStoreId,
24207
+ batchId: expectedBatchId,
24208
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24209
+ fileCounts: latestBatch.file_counts,
24210
+ elapsedMs: Date.now() - uploadStartedAtMs,
24211
+ logLabel,
24069
24212
  });
24213
+ }
24214
+ if (latestBatch.file_counts.failed > 0) {
24215
+ console.error('[🤰]', 'Vector store file batch completed with failures', {
24216
+ vectorStoreId,
24217
+ batchId: expectedBatchId,
24218
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24219
+ fileCounts: latestBatch.file_counts,
24220
+ logLabel,
24221
+ });
24222
+ await this.logVectorStoreFileBatchDiagnostics({
24223
+ client,
24224
+ vectorStoreId,
24225
+ batchId: diagnosticsBatchId,
24226
+ uploadedFiles,
24227
+ logLabel,
24228
+ reason: 'failed',
24229
+ });
24230
+ }
24231
+ shouldPoll = false;
24232
+ continue;
24233
+ }
24234
+ if (latestBatch.status === 'failed' || latestBatch.status === 'cancelled') {
24235
+ console.error('[🤰]', 'Vector store file batch did not complete', {
24236
+ vectorStoreId,
24237
+ batchId: expectedBatchId,
24238
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24239
+ status: latestBatch.status,
24240
+ fileCounts: latestBatch.file_counts,
24241
+ elapsedMs: Date.now() - uploadStartedAtMs,
24242
+ logLabel,
24243
+ });
24244
+ await this.logVectorStoreFileBatchDiagnostics({
24245
+ client,
24246
+ vectorStoreId,
24247
+ batchId: diagnosticsBatchId,
24248
+ uploadedFiles,
24249
+ logLabel,
24250
+ reason: 'failed',
24251
+ });
24252
+ shouldPoll = false;
24253
+ continue;
24254
+ }
24255
+ if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
24256
+ console.error('[🤰]', 'Timed out waiting for vector store file batch', {
24257
+ vectorStoreId,
24258
+ batchId: expectedBatchId,
24259
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24260
+ fileCounts: latestBatch.file_counts,
24261
+ elapsedMs: nowMs - pollStartedAtMs,
24262
+ uploadTimeoutMs,
24263
+ logLabel,
24264
+ });
24265
+ await this.logVectorStoreFileBatchDiagnostics({
24266
+ client,
24267
+ vectorStoreId,
24268
+ batchId: diagnosticsBatchId,
24269
+ uploadedFiles,
24270
+ logLabel,
24271
+ reason: 'timeout',
24272
+ });
24273
+ if (this.shouldContinueOnVectorStoreStall()) {
24274
+ console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
24275
+ vectorStoreId,
24276
+ logLabel,
24277
+ });
24278
+ shouldPoll = false;
24279
+ continue;
24280
+ }
24281
+ try {
24282
+ const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
24283
+ if (!cancelBatchId.startsWith('vsfb_')) {
24284
+ console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
24285
+ vectorStoreId,
24286
+ batchId: cancelBatchId,
24287
+ logLabel,
24288
+ });
24289
+ }
24290
+ else {
24291
+ await client.beta.vectorStores.fileBatches.cancel(vectorStoreId, cancelBatchId);
24292
+ }
24070
24293
  if (this.options.isVerbose) {
24071
- console.info('[🤰]', 'Uploaded files to vector store', {
24294
+ console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
24072
24295
  vectorStoreId,
24073
- fileCount: fileStreams.length,
24296
+ batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
24297
+ ? returnedBatchId
24298
+ : expectedBatchId,
24299
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24300
+ logLabel,
24074
24301
  });
24075
24302
  }
24076
24303
  }
24077
24304
  catch (error) {
24078
- console.error('Error uploading files to vector store:', error);
24305
+ assertsError(error);
24306
+ console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
24307
+ vectorStoreId,
24308
+ batchId: expectedBatchId,
24309
+ ...(batchIdMismatch ? { returnedBatchId } : {}),
24310
+ logLabel,
24311
+ error: serializeError(error),
24312
+ });
24313
+ }
24314
+ shouldPoll = false;
24315
+ continue;
24316
+ }
24317
+ await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
24318
+ }
24319
+ return latestBatch;
24320
+ }
24321
+ /**
24322
+ * Creates a vector store and uploads knowledge sources, returning its ID.
24323
+ */
24324
+ async createVectorStoreWithKnowledgeSources(options) {
24325
+ const { client, name, knowledgeSources, logLabel } = options;
24326
+ const knowledgeSourcesCount = knowledgeSources.length;
24327
+ const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
24328
+ if (this.options.isVerbose) {
24329
+ console.info('[🤰]', 'Creating vector store with knowledge sources', {
24330
+ name,
24331
+ knowledgeSourcesCount,
24332
+ downloadTimeoutMs,
24333
+ logLabel,
24334
+ });
24335
+ }
24336
+ const vectorStore = await client.beta.vectorStores.create({
24337
+ name: `${name} Knowledge Base`,
24338
+ });
24339
+ const vectorStoreId = vectorStore.id;
24340
+ if (this.options.isVerbose) {
24341
+ console.info('[🤰]', 'Vector store created', {
24342
+ vectorStoreId,
24343
+ logLabel,
24344
+ });
24345
+ }
24346
+ const fileStreams = [];
24347
+ const skippedSources = [];
24348
+ let totalBytes = 0;
24349
+ const processingStartedAtMs = Date.now();
24350
+ for (const [index, source] of knowledgeSources.entries()) {
24351
+ try {
24352
+ const sourceType = source.startsWith('http') || source.startsWith('https') ? 'url' : 'file';
24353
+ if (this.options.isVerbose) {
24354
+ console.info('[🤰]', 'Processing knowledge source', {
24355
+ index: index + 1,
24356
+ total: knowledgeSourcesCount,
24357
+ source,
24358
+ sourceType,
24359
+ logLabel,
24360
+ });
24079
24361
  }
24362
+ // Check if it's a URL
24363
+ if (source.startsWith('http://') || source.startsWith('https://')) {
24364
+ const downloadResult = await this.downloadKnowledgeSourceFile({
24365
+ source,
24366
+ timeoutMs: downloadTimeoutMs,
24367
+ logLabel,
24368
+ });
24369
+ if (downloadResult) {
24370
+ fileStreams.push(downloadResult.file);
24371
+ totalBytes += downloadResult.sizeBytes;
24372
+ }
24373
+ else {
24374
+ skippedSources.push({ source, reason: 'download_failed' });
24375
+ }
24376
+ }
24377
+ else {
24378
+ skippedSources.push({ source, reason: 'unsupported_source_type' });
24379
+ if (this.options.isVerbose) {
24380
+ console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
24381
+ source,
24382
+ sourceType,
24383
+ logLabel,
24384
+ });
24385
+ }
24386
+ /*
24387
+ TODO: [?????] Resolve problem with browser environment
24388
+ // Assume it's a local file path
24389
+ // Note: This will work in Node.js environment
24390
+ // For browser environments, this would need different handling
24391
+ const fs = await import('fs');
24392
+ const fileStream = fs.createReadStream(source);
24393
+ fileStreams.push(fileStream);
24394
+ */
24395
+ }
24396
+ }
24397
+ catch (error) {
24398
+ assertsError(error);
24399
+ skippedSources.push({ source, reason: 'processing_error' });
24400
+ console.error('[🤰]', 'Error processing knowledge source', {
24401
+ source,
24402
+ logLabel,
24403
+ error: serializeError(error),
24404
+ });
24405
+ }
24406
+ }
24407
+ if (this.options.isVerbose) {
24408
+ console.info('[🤰]', 'Finished processing knowledge sources', {
24409
+ total: knowledgeSourcesCount,
24410
+ downloadedCount: fileStreams.length,
24411
+ skippedCount: skippedSources.length,
24412
+ totalBytes,
24413
+ elapsedMs: Date.now() - processingStartedAtMs,
24414
+ skippedSamples: skippedSources.slice(0, 3),
24415
+ logLabel,
24416
+ });
24417
+ }
24418
+ if (fileStreams.length > 0) {
24419
+ if (this.options.isVerbose) {
24420
+ console.info('[🤰]', 'Uploading files to vector store', {
24421
+ vectorStoreId,
24422
+ fileCount: fileStreams.length,
24423
+ totalBytes,
24424
+ maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
24425
+ pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
24426
+ uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
24427
+ logLabel,
24428
+ });
24429
+ }
24430
+ try {
24431
+ await this.uploadKnowledgeSourceFilesToVectorStore({
24432
+ client,
24433
+ vectorStoreId,
24434
+ files: fileStreams,
24435
+ totalBytes,
24436
+ logLabel,
24437
+ });
24438
+ }
24439
+ catch (error) {
24440
+ assertsError(error);
24441
+ console.error('[🤰]', 'Error uploading files to vector store', {
24442
+ vectorStoreId,
24443
+ logLabel,
24444
+ error: serializeError(error),
24445
+ });
24080
24446
  }
24081
24447
  }
24448
+ else if (this.options.isVerbose) {
24449
+ console.info('[🤰]', 'No knowledge source files to upload', {
24450
+ vectorStoreId,
24451
+ skippedCount: skippedSources.length,
24452
+ logLabel,
24453
+ });
24454
+ }
24455
+ return {
24456
+ vectorStoreId,
24457
+ uploadedFileCount: fileStreams.length,
24458
+ skippedCount: skippedSources.length,
24459
+ totalBytes,
24460
+ };
24461
+ }
24462
+ async createNewAssistant(options) {
24463
+ var _a, _b, _c;
24464
+ if (!this.isCreatingNewAssistantsAllowed) {
24465
+ throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
24466
+ }
24467
+ // await this.playground();
24468
+ const { name, instructions, knowledgeSources, tools } = options;
24469
+ const preparationStartedAtMs = Date.now();
24470
+ const knowledgeSourcesCount = (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0;
24471
+ const toolsCount = (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0;
24472
+ if (this.options.isVerbose) {
24473
+ console.info('[🤰]', 'Starting OpenAI assistant creation', {
24474
+ name,
24475
+ knowledgeSourcesCount,
24476
+ toolsCount,
24477
+ instructionsLength: instructions.length,
24478
+ });
24479
+ }
24480
+ const client = await this.getClient();
24481
+ let vectorStoreId;
24482
+ // If knowledge sources are provided, create a vector store with them
24483
+ if (knowledgeSources && knowledgeSources.length > 0) {
24484
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
24485
+ client,
24486
+ name,
24487
+ knowledgeSources,
24488
+ logLabel: 'assistant creation',
24489
+ });
24490
+ vectorStoreId = vectorStoreResult.vectorStoreId;
24491
+ }
24082
24492
  // Create assistant with vector store attached
24083
24493
  const assistantConfig = {
24084
24494
  name,
@@ -24144,91 +24554,14 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
24144
24554
  const client = await this.getClient();
24145
24555
  let vectorStoreId;
24146
24556
  // If knowledge sources are provided, create a vector store with them
24147
- // TODO: [🧠] Reuse vector store creation logic from createNewAssistant
24148
24557
  if (knowledgeSources && knowledgeSources.length > 0) {
24149
- if (this.options.isVerbose) {
24150
- console.info('[🤰]', 'Creating vector store for assistant update', {
24151
- assistantId,
24152
- name,
24153
- knowledgeSourcesCount,
24154
- });
24155
- }
24156
- // Create a vector store
24157
- const vectorStore = await client.beta.vectorStores.create({
24158
- name: `${name} Knowledge Base`,
24558
+ const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
24559
+ client,
24560
+ name: name !== null && name !== void 0 ? name : assistantId,
24561
+ knowledgeSources,
24562
+ logLabel: 'assistant update',
24159
24563
  });
24160
- vectorStoreId = vectorStore.id;
24161
- if (this.options.isVerbose) {
24162
- console.info('[🤰]', 'Vector store created for assistant update', {
24163
- vectorStoreId,
24164
- });
24165
- }
24166
- // Upload files from knowledge sources to the vector store
24167
- const fileStreams = [];
24168
- for (const [index, source] of knowledgeSources.entries()) {
24169
- try {
24170
- if (this.options.isVerbose) {
24171
- console.info('[🤰]', 'Processing knowledge source for update', {
24172
- index: index + 1,
24173
- total: knowledgeSources.length,
24174
- source,
24175
- sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
24176
- });
24177
- }
24178
- // Check if it's a URL
24179
- if (source.startsWith('http://') || source.startsWith('https://')) {
24180
- // Download the file
24181
- const response = await fetch(source);
24182
- if (!response.ok) {
24183
- console.error(`Failed to download ${source}: ${response.statusText}`);
24184
- continue;
24185
- }
24186
- const buffer = await response.arrayBuffer();
24187
- let filename = source.split('/').pop() || 'downloaded-file';
24188
- try {
24189
- const url = new URL(source);
24190
- filename = url.pathname.split('/').pop() || filename;
24191
- }
24192
- catch (error) {
24193
- // Keep default filename
24194
- }
24195
- const blob = new Blob([buffer]);
24196
- const file = new File([blob], filename);
24197
- fileStreams.push(file);
24198
- }
24199
- else {
24200
- /*
24201
- TODO: [🐱‍🚀] Resolve problem with browser environment
24202
- // Assume it's a local file path
24203
- // Note: This will work in Node.js environment
24204
- // For browser environments, this would need different handling
24205
- const fs = await import('fs');
24206
- const fileStream = fs.createReadStream(source);
24207
- fileStreams.push(fileStream);
24208
- */
24209
- }
24210
- }
24211
- catch (error) {
24212
- console.error(`Error processing knowledge source ${source}:`, error);
24213
- }
24214
- }
24215
- // Batch upload files to the vector store
24216
- if (fileStreams.length > 0) {
24217
- try {
24218
- await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
24219
- files: fileStreams,
24220
- });
24221
- if (this.options.isVerbose) {
24222
- console.info('[🤰]', 'Uploaded files to vector store for update', {
24223
- vectorStoreId,
24224
- fileCount: fileStreams.length,
24225
- });
24226
- }
24227
- }
24228
- catch (error) {
24229
- console.error('Error uploading files to vector store:', error);
24230
- }
24231
- }
24564
+ vectorStoreId = vectorStoreResult.vectorStoreId;
24232
24565
  }
24233
24566
  const assistantUpdate = {
24234
24567
  name,
@@ -24332,7 +24665,6 @@ function emitAssistantPreparationProgress(options) {
24332
24665
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
24333
24666
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
24334
24667
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
24335
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
24336
24668
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
24337
24669
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
24338
24670
  *
@@ -24495,65 +24827,7 @@ class AgentLlmExecutionTools {
24495
24827
  }, // Cast to avoid readonly mismatch from spread
24496
24828
  };
24497
24829
  console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
24498
- if (OpenAiAgentExecutionTools.isOpenAiAgentExecutionTools(this.options.llmTools)) {
24499
- const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
24500
- const cached = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
24501
- let agentTools;
24502
- if (cached && cached.requirementsHash === requirementsHash) {
24503
- if (this.options.isVerbose) {
24504
- console.log(`1️⃣ Using cached OpenAI Agent Vector Store for agent ${this.title}...`);
24505
- }
24506
- // Create new instance with cached vectorStoreId
24507
- // We need to access options from the original tool.
24508
- // We assume isOpenAiAgentExecutionTools implies it has options we can clone.
24509
- // But protected options are not accessible.
24510
- // We can cast to access options if they were public, or use a method to clone.
24511
- // OpenAiAgentExecutionTools doesn't have a clone method.
24512
- // However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
24513
- // Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
24514
- // TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
24515
- // Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
24516
- agentTools = new OpenAiAgentExecutionTools({
24517
- ...this.options.llmTools.options,
24518
- vectorStoreId: cached.vectorStoreId,
24519
- });
24520
- }
24521
- else {
24522
- if (this.options.isVerbose) {
24523
- console.log(`1️⃣ Creating/Updating OpenAI Agent Vector Store for agent ${this.title}...`);
24524
- }
24525
- let vectorStoreId;
24526
- if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
24527
- const client = await this.options.llmTools.getClient();
24528
- vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
24529
- }
24530
- if (vectorStoreId) {
24531
- AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
24532
- vectorStoreId,
24533
- requirementsHash,
24534
- });
24535
- }
24536
- agentTools = new OpenAiAgentExecutionTools({
24537
- ...this.options.llmTools.options,
24538
- vectorStoreId,
24539
- });
24540
- }
24541
- // Create modified chat prompt with agent system message specific to OpenAI Agent
24542
- // Note: Unlike Assistants API, Responses API expects instructions (system message) to be passed in the call.
24543
- // So we use promptWithAgentModelRequirements which has the system message prepended.
24544
- // But we need to make sure we pass knowledgeSources in modelRequirements so OpenAiAgentExecutionTools can fallback to warning if vectorStoreId is missing (though we just handled it).
24545
- const promptForAgent = {
24546
- ...promptWithAgentModelRequirements,
24547
- modelRequirements: {
24548
- ...promptWithAgentModelRequirements.modelRequirements,
24549
- knowledgeSources: modelRequirements.knowledgeSources
24550
- ? [...modelRequirements.knowledgeSources]
24551
- : undefined, // Pass knowledge sources explicitly
24552
- },
24553
- };
24554
- underlyingLlmResult = await agentTools.callChatModelStream(promptForAgent, onProgress);
24555
- }
24556
- else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
24830
+ if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
24557
24831
  // ... deprecated path ...
24558
24832
  const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
24559
24833
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
@@ -24758,7 +25032,6 @@ function buildTeacherSummary(commitments, used) {
24758
25032
  * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
24759
25033
  * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
24760
25034
  * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
24761
- * - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
24762
25035
  * - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
24763
25036
  * - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
24764
25037
  *